aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWolfram Sang <wsa@the-dreams.de>2019-05-03 09:20:04 -0400
committerWolfram Sang <wsa@the-dreams.de>2019-05-03 09:20:58 -0400
commitd00afd5ede1c29a6dc59be2d7fb7d6ef28eb85c5 (patch)
treee194b1968e54380a6654abf7d3a037ca0a010280
parent9a51b86a61214a297cdfc1bb705b7267f9455ae6 (diff)
parentd5984d2a312144bedccf32aea2298f8df05bb617 (diff)
Merge branch 'i2c-mux/for-next' of https://github.com/peda-r/i2c-mux into i2c/for-5.2
Mainly some pca954x work, i.e. removal of unused platform data support and added support for sysfs interface for manipulating/examining the idle state. And then a mechanical cocci-style patch.
-rw-r--r--.mailmap2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i2c-devices-pca954x20
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-xscale.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-mtk.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-stu300.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt)0
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-wmt.txt (renamed from Documentation/devicetree/bindings/i2c/i2c-vt8500.txt)0
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.txt73
-rw-r--r--Documentation/devicetree/bindings/serial/mtk-uart.txt1
-rw-r--r--Documentation/filesystems/mount_api.txt367
-rw-r--r--Documentation/i2c/busses/i2c-i8011
-rw-r--r--Documentation/networking/msg_zerocopy.rst2
-rw-r--r--Documentation/networking/netdev-FAQ.rst13
-rw-r--r--Documentation/networking/nf_flowtable.txt8
-rw-r--r--Documentation/networking/snmp_counter.rst12
-rw-r--r--Documentation/virtual/kvm/api.txt77
-rw-r--r--Documentation/virtual/kvm/mmu.txt11
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile49
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/arc/Kconfig21
-rw-r--r--arch/arc/Makefile6
-rw-r--r--arch/arc/boot/dts/abilis_tb100.dtsi58
-rw-r--r--arch/arc/boot/dts/abilis_tb100_dvk.dts14
-rw-r--r--arch/arc/boot/dts/abilis_tb101.dtsi58
-rw-r--r--arch/arc/boot/dts/abilis_tb101_dvk.dts14
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi60
-rw-r--r--arch/arc/boot/dts/axc001.dtsi6
-rw-r--r--arch/arc/boot/dts/axc003.dtsi16
-rw-r--r--arch/arc/boot/dts/axc003_idu.dtsi16
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi22
-rw-r--r--arch/arc/boot/dts/hsdk.dts33
-rw-r--r--arch/arc/boot/dts/vdk_axc003.dtsi4
-rw-r--r--arch/arc/boot/dts/vdk_axc003_idu.dtsi4
-rw-r--r--arch/arc/boot/dts/vdk_axs10x_mb.dtsi18
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/include/asm/Kbuild1
-rw-r--r--arch/arc/include/asm/arcregs.h12
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h8
-rw-r--r--arch/arc/include/asm/perf_event.h2
-rw-r--r--arch/arc/include/asm/spinlock.h49
-rw-r--r--arch/arc/include/uapi/asm/Kbuild1
-rw-r--r--arch/arc/kernel/head.S6
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/setup.c211
-rw-r--r--arch/arc/kernel/troubleshoot.c5
-rw-r--r--arch/arc/lib/Makefile8
-rw-r--r--arch/arc/lib/memcpy-archs-unaligned.S47
-rw-r--r--arch/arc/plat-eznps/Kconfig12
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts2
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6ull-pinfunc-snvs.h2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts9
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig3
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/include/asm/kvm_mmu.h11
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h2
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/arm/mach-cns3xxx/core.c2
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c27
-rw-r--r--arch/arm/mach-imx/mach-imx51.c1
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi7
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi7
-rw-r--r--arch/arm64/include/asm/cputype.h6
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h11
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/arm64/kernel/probes/kprobes.c56
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c1
-rw-r--r--arch/arm64/kvm/reset.c6
-rw-r--r--arch/c6x/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/bcm47xx/workarounds.c1
-rw-r--r--arch/mips/include/asm/jump_label.h8
-rw-r--r--arch/mips/include/uapi/asm/posix_types.h7
-rw-r--r--arch/mips/kernel/vmlinux.lds.S12
-rw-r--r--arch/mips/loongson64/lemote-2f/irq.c2
-rw-r--r--arch/nios2/include/asm/Kbuild1
-rw-r--r--arch/nios2/include/uapi/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/mmu.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h8
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S3
-rw-r--r--arch/powerpc/kernel/head_32.S6
-rw-r--r--arch/powerpc/kernel/security.c23
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S4
-rw-r--r--arch/powerpc/lib/memcmp_64.S17
-rw-r--r--arch/powerpc/mm/hash_low_32.S8
-rw-r--r--arch/powerpc/net/bpf_jit.h17
-rw-r--r--arch/powerpc/net/bpf_jit32.h4
-rw-r--r--arch/powerpc/net/bpf_jit64.h20
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c12
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c27
-rw-r--r--arch/powerpc/platforms/pseries/ras.c1
-rw-r--r--arch/s390/include/asm/ap.h11
-rw-r--r--arch/s390/include/asm/elf.h11
-rw-r--r--arch/s390/include/asm/lowcore.h61
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c19
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/vtime.c19
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/uapi/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/unicore32/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/compressed/misc.h4
-rw-r--r--arch/x86/boot/string.c3
-rw-r--r--arch/x86/hyperv/hv_init.c6
-rw-r--r--arch/x86/include/asm/cpu_device_id.h31
-rw-r--r--arch/x86/include/asm/cpufeature.h5
-rw-r--r--arch/x86/include/asm/kvm_host.h10
-rw-r--r--arch/x86/include/asm/processor-cyrix.h21
-rw-r--r--arch/x86/include/asm/realmode.h6
-rw-r--r--arch/x86/kernel/aperture_64.c20
-rw-r--r--arch/x86/kernel/cpu/cyrix.c14
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c3
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c1
-rw-r--r--arch/x86/kernel/mpparse.c4
-rw-r--r--arch/x86/kvm/hyperv.c9
-rw-r--r--arch/x86/kvm/mmu.c54
-rw-r--r--arch/x86/kvm/mmutrace.h4
-rw-r--r--arch/x86/kvm/svm.c32
-rw-r--r--arch/x86/kvm/vmx/nested.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.c19
-rw-r--r--arch/x86/kvm/vmx/vmx.h1
-rw-r--r--arch/x86/kvm/x86.c59
-rw-r--r--arch/x86/lib/csum-partial_64.c2
-rw-r--r--arch/x86/mm/mmap.c2
-rw-r--r--arch/x86/mm/pti.c4
-rw-r--r--arch/x86/platform/efi/quirks.c2
-rw-r--r--arch/x86/realmode/init.c11
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--arch/xtensa/include/uapi/asm/Kbuild1
-rw-r--r--block/bio.c43
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-iolatency.c1
-rw-r--r--block/blk-mq.c24
-rw-r--r--block/blk-mq.h11
-rw-r--r--block/blk-sysfs.c12
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/android/binder.c3
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/libata-zpodd.c34
-rw-r--r--drivers/auxdisplay/Kconfig38
-rw-r--r--drivers/auxdisplay/Makefile2
-rw-r--r--drivers/auxdisplay/charlcd.c55
-rw-r--r--drivers/auxdisplay/hd44780.c4
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/swnode.c4
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/paride/pcd.c6
-rw-r--r--drivers/block/paride/pf.c16
-rw-r--r--drivers/block/rbd.c28
-rw-r--r--drivers/block/zram/zram_drv.c32
-rw-r--r--drivers/clocksource/clps711x-timer.c44
-rw-r--r--drivers/clocksource/mips-gic-timer.c2
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/clocksource/timer-riscv.c5
-rw-r--r--drivers/clocksource/timer-ti-dm.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/gpio/gpio-adnp.c6
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-mockup.c10
-rw-r--r--drivers/gpio/gpiolib-of.c17
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/drm_drv.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_file.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c110
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c18
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c72
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c8
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c106
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c12
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c34
-rw-r--r--drivers/infiniband/hw/mlx5/main.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/iommu/amd_iommu.c16
-rw-r--r--drivers/iommu/amd_iommu_init.c7
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/intel-iommu.c5
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c19
-rw-r--r--drivers/iommu/iommu.c8
-rw-r--r--drivers/iommu/iova.c5
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic.c45
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c8
-rw-r--r--drivers/irqchip/irq-mbigen.c3
-rw-r--r--drivers/irqchip/irq-mmp.c2
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c2
-rw-r--r--drivers/irqchip/irq-stm32-exti.c10
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c3
-rw-r--r--drivers/leds/leds-pca9532.c8
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/misc/habanalabs/command_submission.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c7
-rw-r--r--drivers/misc/habanalabs/device.c71
-rw-r--r--drivers/misc/habanalabs/goya/goya.c65
-rw-r--r--drivers/misc/habanalabs/habanalabs.h21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/memory.c38
-rw-r--r--drivers/misc/habanalabs/mmu.c6
-rw-r--r--drivers/mmc/host/alcor.c25
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c16
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c8
-rw-r--r--drivers/mmc/host/sdhci-omap.c3
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/dsa/qca8k.c174
-rw-r--r--drivers/net/dsa/qca8k.h13
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c42
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h93
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c317
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c8
-rw-r--r--drivers/net/ethernet/sis/sis900.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c14
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ieee802154/adf7242.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/phy/Kconfig3
-rw-r--r--drivers/net/phy/broadcom.c13
-rw-r--r--drivers/net/phy/dp83822.c34
-rw-r--r--drivers/net/phy/meson-gxl.c6
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c4
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/core.c4
-rw-r--r--drivers/nvme/target/io-cmd-file.c20
-rw-r--r--drivers/parport/daisy.c32
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c10
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/bw_notification.c23
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c5
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c10
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c2
-rw-r--r--drivers/s390/cio/chsc.c13
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_erp.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h7
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c6
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c39
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c23
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_lib.c15
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/soc/bcm/bcm2835-power.c49
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/axis-fifo/Kconfig1
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers.c33
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c10
-rw-r--r--drivers/staging/erofs/dir.c45
-rw-r--r--drivers/staging/erofs/unzip_vle.c45
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c7
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts29
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi73
-rw-r--r--drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt48
-rw-r--r--drivers/staging/mt7621-eth/Kconfig39
-rw-r--r--drivers/staging/mt7621-eth/Makefile14
-rw-r--r--drivers/staging/mt7621-eth/TODO13
-rw-r--r--drivers/staging/mt7621-eth/ethtool.c250
-rw-r--r--drivers/staging/mt7621-eth/ethtool.h15
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7620.h277
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7621.c297
-rw-r--r--drivers/staging/mt7621-eth/mdio.c275
-rw-r--r--drivers/staging/mt7621-eth/mdio.h27
-rw-r--r--drivers/staging/mt7621-eth/mdio_mt7620.c173
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c2176
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.h716
-rw-r--r--drivers/staging/mt7621-eth/soc_mt7621.c161
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet.c40
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/speakup/speakup_soft.c16
-rw-r--r--drivers/staging/speakup/spk_priv.h1
-rw-r--r--drivers/staging/speakup/synth.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vt6655/device_main.c11
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c9
-rw-r--r--drivers/thermal/cpu_cooling.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c21
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/mtk_thermal.c7
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c24
-rw-r--r--drivers/tty/serial/atmel_serial.c52
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c12
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/common.c2
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c5
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-rcar.c1
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.h8
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/mtu3/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h4
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c27
-rw-r--r--drivers/usb/typec/tcpm/wcove.c9
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c106
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c26
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c32
-rw-r--r--drivers/virt/vboxguest/vboxguest_version.h9
-rw-r--r--drivers/virt/vboxguest/vmmdev.h8
-rw-r--r--fs/afs/fsclient.c6
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/block_dev.c12
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/qgroup.c4
-rw-r--r--fs/btrfs/raid56.c3
-rw-r--r--fs/btrfs/transaction.c49
-rw-r--r--fs/btrfs/tree-log.c33
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/file.c148
-rw-r--r--fs/cifs/smb2maperror.c3
-rw-r--r--fs/cifs/smb2pdu.c11
-rw-r--r--fs/cifs/trace.h6
-rw-r--r--fs/ext4/ext4_jbd2.h2
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/indirect.c43
-rw-r--r--fs/ext4/inode.c30
-rw-r--r--fs/ext4/ioctl.c7
-rw-r--r--fs/ext4/resize.c17
-rw-r--r--fs/ext4/super.c16
-rw-r--r--fs/fs_parser.c2
-rw-r--r--fs/io_uring.c465
-rw-r--r--fs/iomap.c12
-rw-r--r--fs/lockd/host.c3
-rw-r--r--fs/locks.c5
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c5
-rw-r--r--fs/nfs/nfs4proc.c5
-rw-r--r--fs/notify/fanotify/fanotify_user.c12
-rw-r--r--fs/notify/inotify/inotify_user.c7
-rw-r--r--fs/ocfs2/refcounttree.c42
-rw-r--r--fs/open.c6
-rw-r--r--fs/proc/kcore.c29
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/udf/truncate.c8
-rw-r--r--fs/udf/udfdecl.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c15
-rw-r--r--fs/xfs/scrub/btree.c11
-rw-r--r--fs/xfs/scrub/dabtree.c5
-rw-r--r--fs/xfs/xfs_discard.c8
-rw-r--r--fs/xfs/xfs_file.c27
-rw-r--r--include/acpi/acoutput.h3
-rw-r--r--include/acpi/platform/aclinux.h5
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blk_types.h1
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/bpf_verifier.h40
-rw-r--r--include/linux/brcmphy.h16
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/kcore.h13
-rw-r--r--include/linux/list.h2
-rw-r--r--include/linux/mlx5/qp.h3
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/page-isolation.h10
-rw-r--r--include/linux/parport.h13
-rw-r--r--include/linux/platform_data/gpio/gpio-amd-fch.h2
-rw-r--r--include/linux/platform_data/pca954x.h48
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/sched/signal.h18
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/socket.h12
-rw-r--r--include/linux/uio.h24
-rw-r--r--include/linux/vbox_utils.h12
-rw-r--r--include/misc/charlcd.h1
-rw-r--r--include/net/act_api.h9
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sctp/checksum.h2
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tc_act/tc_gact.h2
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/bpf.h188
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h60
-rw-r--r--kernel/bpf/syscall.c22
-rw-r--r--kernel/bpf/verifier.c154
-rw-r--r--kernel/cpu.c20
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/irq/devres.c2
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c59
-rw-r--r--kernel/sched/fair.c84
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/trace/ftrace.c12
-rw-r--r--kernel/trace/trace_dynevent.c2
-rw-r--r--kernel/trace/trace_events_hist.c1
-rw-r--r--kernel/watchdog.c10
-rw-r--r--kernel/workqueue.c5
-rw-r--r--lib/rhashtable.c8
-rw-r--r--lib/sbitmap.c11
-rw-r--r--mm/debug.c4
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/memory.c11
-rw-r--r--mm/memory_hotplug.c19
-rw-r--r--mm/mempolicy.c40
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/page_isolation.c51
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c5
-rw-r--r--mm/sparse.c2
-rw-r--r--net/appletalk/aarp.c15
-rw-r--r--net/appletalk/ddp.c20
-rw-r--r--net/bridge/br_netfilter_hooks.c1
-rw-r--r--net/bridge/br_netfilter_ipv6.c2
-rw-r--r--net/ceph/ceph_common.c18
-rw-r--r--net/ceph/messenger.c8
-rw-r--r--net/ceph/mon_client.c9
-rw-r--r--net/core/devlink.c5
-rw-r--r--net/core/filter.c27
-rw-r--r--net/core/net-sysfs.c20
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/ipv6/netfilter/ip6t_srh.c6
-rw-r--r--net/ipv6/route.c18
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/mpls/mpls_iptunnel.c12
-rw-r--r--net/ncsi/ncsi-netlink.c4
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/nf_conntrack_sip.c37
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/nft_objref.c19
-rw-r--r--net/netfilter/nft_redir.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c7
-rw-r--r--net/netlink/genetlink.c3
-rw-r--r--net/nfc/llcp_sock.c9
-rw-r--r--net/openvswitch/datapath.c12
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rose/rose_subr.c21
-rw-r--r--net/rxrpc/output.c11
-rw-r--r--net/sched/Kconfig3
-rw-r--r--net/sched/act_api.c101
-rw-r--r--net/sched/act_bpf.c25
-rw-r--r--net/sched/act_connmark.c22
-rw-r--r--net/sched/act_csum.c22
-rw-r--r--net/sched/act_gact.c15
-rw-r--r--net/sched/act_ife.c35
-rw-r--r--net/sched/act_ipt.c11
-rw-r--r--net/sched/act_mirred.c25
-rw-r--r--net/sched/act_nat.c15
-rw-r--r--net/sched/act_pedit.c18
-rw-r--r--net/sched/act_police.c13
-rw-r--r--net/sched/act_sample.c21
-rw-r--r--net/sched/act_simple.c54
-rw-r--r--net/sched/act_skbedit.c20
-rw-r--r--net/sched/act_skbmod.c20
-rw-r--r--net/sched/act_tunnel_key.c19
-rw-r--r--net/sched/act_vlan.c22
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/sch_cake.c25
-rw-r--r--net/sctp/socket.c54
-rw-r--r--net/socket.c277
-rw-r--r--net/strparser/strparser.c2
-rw-r--r--net/sunrpc/clnt.c12
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/tipc/group.c3
-rw-r--r--net/tipc/net.c5
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/socket.c22
-rw-r--r--net/tipc/topsrv.c1
-rw-r--r--net/xdp/xdp_umem.c19
-rw-r--r--scripts/Makefile.build7
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--scripts/coccinelle/free/put_device.cocci1
-rw-r--r--scripts/coccinelle/misc/badty.cocci2
-rw-r--r--scripts/kconfig/lxdialog/inputbox.c3
-rw-r--r--scripts/kconfig/nconf.c2
-rw-r--r--scripts/kconfig/nconf.gui.c3
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--security/Kconfig38
-rw-r--r--security/selinux/ss/policydb.c13
-rw-r--r--security/yama/yama_lsm.c8
-rw-r--r--sound/core/oss/pcm_oss.c43
-rw-r--r--sound/core/pcm_native.c9
-rw-r--r--sound/core/rawmidi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c7
-rw-r--r--sound/drivers/opl3/opl3_voice.h2
-rw-r--r--sound/firewire/motu/motu.c20
-rw-r--r--sound/isa/sb/sb8.c4
-rw-r--r--sound/pci/echoaudio/echoaudio.c5
-rw-r--r--sound/pci/hda/hda_codec.c20
-rw-r--r--sound/pci/hda/hda_intel.c14
-rw-r--r--sound/pci/hda/patch_ca0132.c20
-rw-r--r--sound/pci/hda/patch_realtek.c52
-rw-r--r--tools/arch/alpha/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h2
-rw-r--r--tools/arch/mips/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/parisc/include/uapi/asm/mman.h2
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/arch/xtensa/include/uapi/asm/mman.h2
-rw-r--r--tools/bpf/bpftool/prog.c266
-rw-r--r--tools/build/Makefile.feature6
-rw-r--r--tools/build/feature/test-all.c5
-rw-r--r--tools/build/feature/test-libopencsd.c4
-rw-r--r--tools/include/uapi/asm-generic/mman-common-tools.h23
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h4
-rw-r--r--tools/include/uapi/asm-generic/mman.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h158
-rw-r--r--tools/include/uapi/drm/i915_drm.h64
-rw-r--r--tools/include/uapi/linux/bpf.h188
-rw-r--r--tools/include/uapi/linux/fcntl.h1
-rw-r--r--tools/include/uapi/linux/in.h9
-rw-r--r--tools/include/uapi/linux/mman.h4
-rw-r--r--tools/lib/bpf/Makefile42
-rw-r--r--tools/lib/bpf/README.rst1
-rw-r--r--tools/lib/bpf/btf.c51
-rw-r--r--tools/lib/bpf/libbpf.c266
-rw-r--r--tools/lib/bpf/libbpf.h64
-rw-r--r--tools/lib/bpf/libbpf.map3
-rw-r--r--tools/lib/bpf/xsk.c15
-rw-r--r--tools/objtool/Makefile7
-rw-r--r--tools/objtool/check.c3
-rw-r--r--tools/perf/Documentation/Build.txt24
-rw-r--r--tools/perf/Documentation/perf-config.txt16
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-report.txt13
-rw-r--r--tools/perf/Documentation/perf-script.txt3
-rw-r--r--tools/perf/Documentation/perf-stat.txt5
-rw-r--r--tools/perf/Documentation/tips.txt7
-rw-r--r--tools/perf/Makefile.config15
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl10
-rw-r--r--tools/perf/arch/x86/util/Build1
-rw-r--r--tools/perf/arch/x86/util/archinsn.c26
-rw-r--r--tools/perf/bench/epoll-ctl.c2
-rw-r--r--tools/perf/bench/epoll-wait.c2
-rw-r--r--tools/perf/builtin-list.c2
-rw-r--r--tools/perf/builtin-record.c54
-rw-r--r--tools/perf/builtin-report.c50
-rw-r--r--tools/perf/builtin-script.c129
-rw-r--r--tools/perf/builtin-stat.c3
-rw-r--r--tools/perf/builtin-top.c62
-rw-r--r--tools/perf/builtin.h3
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json594
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/branch.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/cache.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/core.json134
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json168
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/memory.json162
-rw-r--r--tools/perf/pmu-events/arch/x86/amdfam17h/other.json65
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv1
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py61
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py26
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py119
-rw-r--r--tools/perf/tests/attr/test-record-C02
-rw-r--r--tools/perf/tests/attr/test-record-basic2
-rw-r--r--tools/perf/tests/attr/test-record-branch-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_ret2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-hv2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-ind_call2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-k2
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-u2
-rw-r--r--tools/perf/tests/attr/test-record-count2
-rw-r--r--tools/perf/tests/attr/test-record-data2
-rw-r--r--tools/perf/tests/attr/test-record-freq2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default2
-rw-r--r--tools/perf/tests/attr/test-record-graph-dwarf2
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp2
-rw-r--r--tools/perf/tests/attr/test-record-group2
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling2
-rw-r--r--tools/perf/tests/attr/test-record-group12
-rw-r--r--tools/perf/tests/attr/test-record-no-buffering2
-rw-r--r--tools/perf/tests/attr/test-record-no-inherit2
-rw-r--r--tools/perf/tests/attr/test-record-no-samples2
-rw-r--r--tools/perf/tests/attr/test-record-period2
-rw-r--r--tools/perf/tests/attr/test-record-raw2
-rw-r--r--tools/perf/tests/backward-ring-buffer.c2
-rw-r--r--tools/perf/tests/evsel-tp-sched.c1
-rw-r--r--tools/perf/tests/expr.c5
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c4
-rwxr-xr-xtools/perf/trace/beauty/mmap_flags.sh14
-rw-r--r--tools/perf/ui/browser.c10
-rw-r--r--tools/perf/ui/browsers/Build1
-rw-r--r--tools/perf/ui/browsers/annotate.c2
-rw-r--r--tools/perf/ui/browsers/hists.c141
-rw-r--r--tools/perf/ui/browsers/res_sample.c91
-rw-r--r--tools/perf/ui/browsers/scripts.c274
-rw-r--r--tools/perf/util/annotate.c163
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/archinsn.h12
-rw-r--r--tools/perf/util/bpf-event.c425
-rw-r--r--tools/perf/util/bpf-event.h42
-rw-r--r--tools/perf/util/build-id.c1
-rw-r--r--tools/perf/util/config.c3
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c1
-rw-r--r--tools/perf/util/data.c107
-rw-r--r--tools/perf/util/data.h14
-rw-r--r--tools/perf/util/dso.c43
-rw-r--r--tools/perf/util/dso.h8
-rw-r--r--tools/perf/util/env.c155
-rw-r--r--tools/perf/util/env.h24
-rw-r--r--tools/perf/util/evlist.c148
-rw-r--r--tools/perf/util/evlist.h14
-rw-r--r--tools/perf/util/evsel.c80
-rw-r--r--tools/perf/util/evsel.h6
-rw-r--r--tools/perf/util/header.c295
-rw-r--r--tools/perf/util/header.h7
-rw-r--r--tools/perf/util/hist.c54
-rw-r--r--tools/perf/util/hist.h31
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c20
-rw-r--r--tools/perf/util/machine.c32
-rw-r--r--tools/perf/util/map.c18
-rw-r--r--tools/perf/util/ordered-events.c2
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/pmu.c10
-rw-r--r--tools/perf/util/probe-event.c6
-rw-r--r--tools/perf/util/session.c28
-rw-r--r--tools/perf/util/sort.c91
-rw-r--r--tools/perf/util/sort.h12
-rw-r--r--tools/perf/util/stat.c12
-rw-r--r--tools/perf/util/symbol.c5
-rw-r--r--tools/perf/util/symbol_conf.h3
-rw-r--r--tools/perf/util/time-utils.c8
-rw-r--r--tools/perf/util/time-utils.h1
-rw-r--r--tools/power/x86/turbostat/turbostat.c3
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lock.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c88
-rw-r--r--tools/testing/selftests/bpf/test_btf.c44
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c134
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c25
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c168
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c4
-rw-r--r--tools/testing/selftests/kvm/Makefile4
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h1
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c16
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c35
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c18
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/nat.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json51
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/sample.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/simple.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json25
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c4
-rw-r--r--virt/kvm/arm/mmu.c125
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c31
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c4
-rw-r--r--virt/kvm/arm/vgic/vgic.c14
-rw-r--r--virt/kvm/eventfd.c6
-rw-r--r--virt/kvm/kvm_main.c3
841 files changed, 11586 insertions, 9858 deletions
diff --git a/.mailmap b/.mailmap
index 37e1847c7988..b2cde8668dcc 100644
--- a/.mailmap
+++ b/.mailmap
@@ -224,3 +224,5 @@ Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
224Yusuke Goda <goda.yusuke@renesas.com> 224Yusuke Goda <goda.yusuke@renesas.com>
225Gustavo Padovan <gustavo@las.ic.unicamp.br> 225Gustavo Padovan <gustavo@las.ic.unicamp.br>
226Gustavo Padovan <padovan@profusion.mobi> 226Gustavo Padovan <padovan@profusion.mobi>
227Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
228Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-pca954x b/Documentation/ABI/testing/sysfs-bus-i2c-devices-pca954x
new file mode 100644
index 000000000000..0b0de8cd0d13
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-pca954x
@@ -0,0 +1,20 @@
1What: /sys/bus/i2c/.../idle_state
2Date: January 2019
3KernelVersion: 5.2
4Contact: Robert Shearman <robert.shearman@att.com>
5Description:
6 Value that exists only for mux devices that can be
7 written to control the behaviour of the multiplexer on
8 idle. Possible values:
9 -2 - disconnect on idle, i.e. deselect the last used
10 channel, which is useful when there is a device
11 with an address that conflicts with another
12 device on another mux on the same parent bus.
13 -1 - leave the mux as-is, which is the most optimal
14 setting in terms of I2C operations and is the
15 default mode.
16 0..<nchans> - set the mux to a predetermined channel,
17 which is useful if there is one channel that is
18 used almost always, and you want to reduce the
19 latency for normal operations after rare
20 transactions on other channels
diff --git a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
index dcc8390e0d24..dcc8390e0d24 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
index 68f6d73a8b73..68f6d73a8b73 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
index bd81a482634f..bd81a482634f 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
index 49df0053347a..49df0053347a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
index 94a425eaa6c7..94a425eaa6c7 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index 8de96a4fb2d5..f977ea7617f6 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -16,6 +16,7 @@ Required properties:
16 - "renesas,irqc-r8a7793" (R-Car M2-N) 16 - "renesas,irqc-r8a7793" (R-Car M2-N)
17 - "renesas,irqc-r8a7794" (R-Car E2) 17 - "renesas,irqc-r8a7794" (R-Car E2)
18 - "renesas,intc-ex-r8a774a1" (RZ/G2M) 18 - "renesas,intc-ex-r8a774a1" (RZ/G2M)
19 - "renesas,intc-ex-r8a774c0" (RZ/G2E)
19 - "renesas,intc-ex-r8a7795" (R-Car H3) 20 - "renesas,intc-ex-r8a7795" (R-Car H3)
20 - "renesas,intc-ex-r8a7796" (R-Car M3-W) 21 - "renesas,intc-ex-r8a7796" (R-Car M3-W)
21 - "renesas,intc-ex-r8a77965" (R-Car M3-N) 22 - "renesas,intc-ex-r8a77965" (R-Car M3-N)
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
index bbcb255c3150..93a7469e70d4 100644
--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt
@@ -12,10 +12,15 @@ Required properties:
12Subnodes: 12Subnodes:
13 13
14The integrated switch subnode should be specified according to the binding 14The integrated switch subnode should be specified according to the binding
15described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of 15described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
16port and PHY id, each subnode describing a port needs to have a valid phandle 16mdio-bus each subnode describing a port needs to have a valid phandle
17referencing the internal PHY connected to it. The CPU port of this switch is 17referencing the internal PHY it is connected to. This is because there's no
18always port 0. 18N:N mapping of port and PHY id.
19
20Don't use mixed external and internal mdio-bus configurations, as this is
21not supported by the hardware.
22
23The CPU port of this switch is always port 0.
19 24
20A CPU port node has the following optional node: 25A CPU port node has the following optional node:
21 26
@@ -31,8 +36,9 @@ For QCA8K the 'fixed-link' sub-node supports only the following properties:
31- 'full-duplex' (boolean, optional), to indicate that full duplex is 36- 'full-duplex' (boolean, optional), to indicate that full duplex is
32 used. When absent, half duplex is assumed. 37 used. When absent, half duplex is assumed.
33 38
34Example: 39Examples:
35 40
41for the external mdio-bus configuration:
36 42
37 &mdio0 { 43 &mdio0 {
38 phy_port1: phy@0 { 44 phy_port1: phy@0 {
@@ -55,12 +61,12 @@ Example:
55 reg = <4>; 61 reg = <4>;
56 }; 62 };
57 63
58 switch0@0 { 64 switch@10 {
59 compatible = "qca,qca8337"; 65 compatible = "qca,qca8337";
60 #address-cells = <1>; 66 #address-cells = <1>;
61 #size-cells = <0>; 67 #size-cells = <0>;
62 68
63 reg = <0>; 69 reg = <0x10>;
64 70
65 ports { 71 ports {
66 #address-cells = <1>; 72 #address-cells = <1>;
@@ -108,3 +114,56 @@ Example:
108 }; 114 };
109 }; 115 };
110 }; 116 };
117
118for the internal master mdio-bus configuration:
119
120 &mdio0 {
121 switch@10 {
122 compatible = "qca,qca8337";
123 #address-cells = <1>;
124 #size-cells = <0>;
125
126 reg = <0x10>;
127
128 ports {
129 #address-cells = <1>;
130 #size-cells = <0>;
131
132 port@0 {
133 reg = <0>;
134 label = "cpu";
135 ethernet = <&gmac1>;
136 phy-mode = "rgmii";
137 fixed-link {
138 speed = 1000;
139 full-duplex;
140 };
141 };
142
143 port@1 {
144 reg = <1>;
145 label = "lan1";
146 };
147
148 port@2 {
149 reg = <2>;
150 label = "lan2";
151 };
152
153 port@3 {
154 reg = <3>;
155 label = "lan3";
156 };
157
158 port@4 {
159 reg = <4>;
160 label = "lan4";
161 };
162
163 port@5 {
164 reg = <5>;
165 label = "wan";
166 };
167 };
168 };
169 };
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index 742cb470595b..bcfb13194f16 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -16,6 +16,7 @@ Required properties:
16 * "mediatek,mt8127-uart" for MT8127 compatible UARTS 16 * "mediatek,mt8127-uart" for MT8127 compatible UARTS
17 * "mediatek,mt8135-uart" for MT8135 compatible UARTS 17 * "mediatek,mt8135-uart" for MT8135 compatible UARTS
18 * "mediatek,mt8173-uart" for MT8173 compatible UARTS 18 * "mediatek,mt8173-uart" for MT8173 compatible UARTS
19 * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
19 * "mediatek,mt6577-uart" for MT6577 and all of the above 20 * "mediatek,mt6577-uart" for MT6577 and all of the above
20 21
21- reg: The base address of the UART register bank. 22- reg: The base address of the UART register bank.
diff --git a/Documentation/filesystems/mount_api.txt b/Documentation/filesystems/mount_api.txt
index 944d1965e917..00ff0cfccfa7 100644
--- a/Documentation/filesystems/mount_api.txt
+++ b/Documentation/filesystems/mount_api.txt
@@ -12,11 +12,13 @@ CONTENTS
12 12
13 (4) Filesystem context security. 13 (4) Filesystem context security.
14 14
15 (5) VFS filesystem context operations. 15 (5) VFS filesystem context API.
16 16
17 (6) Parameter description. 17 (6) Superblock creation helpers.
18 18
19 (7) Parameter helper functions. 19 (7) Parameter description.
20
21 (8) Parameter helper functions.
20 22
21 23
22======== 24========
@@ -41,12 +43,15 @@ The creation of new mounts is now to be done in a multistep process:
41 43
42 (7) Destroy the context. 44 (7) Destroy the context.
43 45
44To support this, the file_system_type struct gains a new field: 46To support this, the file_system_type struct gains two new fields:
45 47
46 int (*init_fs_context)(struct fs_context *fc); 48 int (*init_fs_context)(struct fs_context *fc);
49 const struct fs_parameter_description *parameters;
47 50
48which is invoked to set up the filesystem-specific parts of a filesystem 51The first is invoked to set up the filesystem-specific parts of a filesystem
49context, including the additional space. 52context, including the additional space, and the second points to the
53parameter description for validation at registration time and querying by a
54future system call.
50 55
51Note that security initialisation is done *after* the filesystem is called so 56Note that security initialisation is done *after* the filesystem is called so
52that the namespaces may be adjusted first. 57that the namespaces may be adjusted first.
@@ -73,9 +78,9 @@ context. This is represented by the fs_context structure:
73 void *s_fs_info; 78 void *s_fs_info;
74 unsigned int sb_flags; 79 unsigned int sb_flags;
75 unsigned int sb_flags_mask; 80 unsigned int sb_flags_mask;
81 unsigned int s_iflags;
82 unsigned int lsm_flags;
76 enum fs_context_purpose purpose:8; 83 enum fs_context_purpose purpose:8;
77 bool sloppy:1;
78 bool silent:1;
79 ... 84 ...
80 }; 85 };
81 86
@@ -141,6 +146,10 @@ The fs_context fields are as follows:
141 146
142 Which bits SB_* flags are to be set/cleared in super_block::s_flags. 147 Which bits SB_* flags are to be set/cleared in super_block::s_flags.
143 148
149 (*) unsigned int s_iflags
150
151 These will be bitwise-OR'd with s->s_iflags when a superblock is created.
152
144 (*) enum fs_context_purpose 153 (*) enum fs_context_purpose
145 154
146 This indicates the purpose for which the context is intended. The 155 This indicates the purpose for which the context is intended. The
@@ -150,17 +159,6 @@ The fs_context fields are as follows:
150 FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount 159 FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount
151 FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount 160 FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount
152 161
153 (*) bool sloppy
154 (*) bool silent
155
156 These are set if the sloppy or silent mount options are given.
157
158 [NOTE] sloppy is probably unnecessary when userspace passes over one
159 option at a time since the error can just be ignored if userspace deems it
160 to be unimportant.
161
162 [NOTE] silent is probably redundant with sb_flags & SB_SILENT.
163
164The mount context is created by calling vfs_new_fs_context() or 162The mount context is created by calling vfs_new_fs_context() or
165vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the 163vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the
166structure is not refcounted. 164structure is not refcounted.
@@ -342,28 +340,47 @@ number of operations used by the new mount code for this purpose:
342 It should return 0 on success or a negative error code on failure. 340 It should return 0 on success or a negative error code on failure.
343 341
344 342
345================================= 343==========================
346VFS FILESYSTEM CONTEXT OPERATIONS 344VFS FILESYSTEM CONTEXT API
347================================= 345==========================
348 346
349There are four operations for creating a filesystem context and 347There are four operations for creating a filesystem context and one for
350one for destroying a context: 348destroying a context:
351 349
352 (*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type, 350 (*) struct fs_context *fs_context_for_mount(
353 struct dentry *reference, 351 struct file_system_type *fs_type,
354 unsigned int sb_flags, 352 unsigned int sb_flags);
355 unsigned int sb_flags_mask,
356 enum fs_context_purpose purpose);
357 353
358 Create a filesystem context for a given filesystem type and purpose. This 354 Allocate a filesystem context for the purpose of setting up a new mount,
359 allocates the filesystem context, sets the superblock flags, initialises 355 whether that be with a new superblock or sharing an existing one. This
360 the security and calls fs_type->init_fs_context() to initialise the 356 sets the superblock flags, initialises the security and calls
361 filesystem private data. 357 fs_type->init_fs_context() to initialise the filesystem private data.
362 358
363 reference can be NULL or it may indicate the root dentry of a superblock 359 fs_type specifies the filesystem type that will manage the context and
364 that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or 360 sb_flags presets the superblock flags stored therein.
365 the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT). 361
366 This is provided as a source of namespace information. 362 (*) struct fs_context *fs_context_for_reconfigure(
363 struct dentry *dentry,
364 unsigned int sb_flags,
365 unsigned int sb_flags_mask);
366
367 Allocate a filesystem context for the purpose of reconfiguring an
368 existing superblock. dentry provides a reference to the superblock to be
369 configured. sb_flags and sb_flags_mask indicate which superblock flags
370 need changing and to what.
371
372 (*) struct fs_context *fs_context_for_submount(
373 struct file_system_type *fs_type,
374 struct dentry *reference);
375
376 Allocate a filesystem context for the purpose of creating a new mount for
377 an automount point or other derived superblock. fs_type specifies the
378 filesystem type that will manage the context and the reference dentry
379 supplies the parameters. Namespaces are propagated from the reference
380 dentry's superblock also.
381
382 Note that it's not a requirement that the reference dentry be of the same
383 filesystem type as fs_type.
367 384
368 (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc); 385 (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc);
369 386
@@ -390,20 +407,6 @@ context pointer or a negative error code.
390For the remaining operations, if an error occurs, a negative error code will be 407For the remaining operations, if an error occurs, a negative error code will be
391returned. 408returned.
392 409
393 (*) int vfs_get_tree(struct fs_context *fc);
394
395 Get or create the mountable root and superblock, using the parameters in
396 the filesystem context to select/configure the superblock. This invokes
397 the ->validate() op and then the ->get_tree() op.
398
399 [NOTE] ->validate() could perhaps be rolled into ->get_tree() and
400 ->reconfigure().
401
402 (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
403
404 Create a mount given the parameters in the specified filesystem context.
405 Note that this does not attach the mount to anything.
406
407 (*) int vfs_parse_fs_param(struct fs_context *fc, 410 (*) int vfs_parse_fs_param(struct fs_context *fc,
408 struct fs_parameter *param); 411 struct fs_parameter *param);
409 412
@@ -432,17 +435,80 @@ returned.
432 clear the pointer, but then becomes responsible for disposing of the 435 clear the pointer, but then becomes responsible for disposing of the
433 object. 436 object.
434 437
435 (*) int vfs_parse_fs_string(struct fs_context *fc, char *key, 438 (*) int vfs_parse_fs_string(struct fs_context *fc, const char *key,
436 const char *value, size_t v_size); 439 const char *value, size_t v_size);
437 440
438 A wrapper around vfs_parse_fs_param() that just passes a constant string. 441 A wrapper around vfs_parse_fs_param() that copies the value string it is
442 passed.
439 443
440 (*) int generic_parse_monolithic(struct fs_context *fc, void *data); 444 (*) int generic_parse_monolithic(struct fs_context *fc, void *data);
441 445
442 Parse a sys_mount() data page, assuming the form to be a text list 446 Parse a sys_mount() data page, assuming the form to be a text list
443 consisting of key[=val] options separated by commas. Each item in the 447 consisting of key[=val] options separated by commas. Each item in the
444 list is passed to vfs_mount_option(). This is the default when the 448 list is passed to vfs_mount_option(). This is the default when the
445 ->parse_monolithic() operation is NULL. 449 ->parse_monolithic() method is NULL.
450
451 (*) int vfs_get_tree(struct fs_context *fc);
452
453 Get or create the mountable root and superblock, using the parameters in
454 the filesystem context to select/configure the superblock. This invokes
455 the ->get_tree() method.
456
457 (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
458
459 Create a mount given the parameters in the specified filesystem context.
460 Note that this does not attach the mount to anything.
461
462
463===========================
464SUPERBLOCK CREATION HELPERS
465===========================
466
467A number of VFS helpers are available for use by filesystems for the creation
468or looking up of superblocks.
469
470 (*) struct super_block *
471 sget_fc(struct fs_context *fc,
472 int (*test)(struct super_block *sb, struct fs_context *fc),
473 int (*set)(struct super_block *sb, struct fs_context *fc));
474
475 This is the core routine. If test is non-NULL, it searches for an
476 existing superblock matching the criteria held in the fs_context, using
477 the test function to match them. If no match is found, a new superblock
478 is created and the set function is called to set it up.
479
480 Prior to the set function being called, fc->s_fs_info will be transferred
481 to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns
482 success (ie. 0).
483
484The following helpers all wrap sget_fc():
485
486 (*) int vfs_get_super(struct fs_context *fc,
487 enum vfs_get_super_keying keying,
488 int (*fill_super)(struct super_block *sb,
489 struct fs_context *fc))
490
491 This creates/looks up a deviceless superblock. The keying indicates how
492 many superblocks of this type may exist and in what manner they may be
493 shared:
494
495 (1) vfs_get_single_super
496
497 Only one such superblock may exist in the system. Any further
498 attempt to get a new superblock gets this one (and any parameter
499 differences are ignored).
500
501 (2) vfs_get_keyed_super
502
503 Multiple superblocks of this type may exist and they're keyed on
504 their s_fs_info pointer (for example this may refer to a
505 namespace).
506
507 (3) vfs_get_independent_super
508
509 Multiple independent superblocks of this type may exist. This
510 function never matches an existing one and always creates a new
511 one.
446 512
447 513
448===================== 514=====================
@@ -454,35 +520,22 @@ There's a core description struct that links everything together:
454 520
455 struct fs_parameter_description { 521 struct fs_parameter_description {
456 const char name[16]; 522 const char name[16];
457 u8 nr_params;
458 u8 nr_alt_keys;
459 u8 nr_enums;
460 bool ignore_unknown;
461 bool no_source;
462 const char *const *keys;
463 const struct constant_table *alt_keys;
464 const struct fs_parameter_spec *specs; 523 const struct fs_parameter_spec *specs;
465 const struct fs_parameter_enum *enums; 524 const struct fs_parameter_enum *enums;
466 }; 525 };
467 526
468For example: 527For example:
469 528
470 enum afs_param { 529 enum {
471 Opt_autocell, 530 Opt_autocell,
472 Opt_bar, 531 Opt_bar,
473 Opt_dyn, 532 Opt_dyn,
474 Opt_foo, 533 Opt_foo,
475 Opt_source, 534 Opt_source,
476 nr__afs_params
477 }; 535 };
478 536
479 static const struct fs_parameter_description afs_fs_parameters = { 537 static const struct fs_parameter_description afs_fs_parameters = {
480 .name = "kAFS", 538 .name = "kAFS",
481 .nr_params = nr__afs_params,
482 .nr_alt_keys = ARRAY_SIZE(afs_param_alt_keys),
483 .nr_enums = ARRAY_SIZE(afs_param_enums),
484 .keys = afs_param_keys,
485 .alt_keys = afs_param_alt_keys,
486 .specs = afs_param_specs, 539 .specs = afs_param_specs,
487 .enums = afs_param_enums, 540 .enums = afs_param_enums,
488 }; 541 };
@@ -494,28 +547,24 @@ The members are as follows:
494 The name to be used in error messages generated by the parse helper 547 The name to be used in error messages generated by the parse helper
495 functions. 548 functions.
496 549
497 (2) u8 nr_params; 550 (2) const struct fs_parameter_specification *specs;
498
499 The number of discrete parameter identifiers. This indicates the number
500 of elements in the ->types[] array and also limits the values that may be
501 used in the values that the ->keys[] array maps to.
502
503 It is expected that, for example, two parameters that are related, say
504 "acl" and "noacl" with have the same ID, but will be flagged to indicate
505 that one is the inverse of the other. The value can then be picked out
506 from the parse result.
507 551
508 (3) const struct fs_parameter_specification *specs; 552 Table of parameter specifications, terminated with a null entry, where the
553 entries are of type:
509 554
510 Table of parameter specifications, where the entries are of type: 555 struct fs_parameter_spec {
511 556 const char *name;
512 struct fs_parameter_type { 557 u8 opt;
513 enum fs_parameter_spec type:8; 558 enum fs_parameter_type type:8;
514 u8 flags; 559 unsigned short flags;
515 }; 560 };
516 561
517 and the parameter identifier is the index to the array. 'type' indicates 562 The 'name' field is a string to match exactly to the parameter key (no
518 the desired value type and must be one of: 563 wildcards, patterns and no case-independence) and 'opt' is the value that
564 will be returned by the fs_parser() function in the case of a successful
565 match.
566
567 The 'type' field indicates the desired value type and must be one of:
519 568
520 TYPE NAME EXPECTED VALUE RESULT IN 569 TYPE NAME EXPECTED VALUE RESULT IN
521 ======================= ======================= ===================== 570 ======================= ======================= =====================
@@ -525,85 +574,65 @@ The members are as follows:
525 fs_param_is_u32_octal 32-bit octal int result->uint_32 574 fs_param_is_u32_octal 32-bit octal int result->uint_32
526 fs_param_is_u32_hex 32-bit hex int result->uint_32 575 fs_param_is_u32_hex 32-bit hex int result->uint_32
527 fs_param_is_s32 32-bit signed int result->int_32 576 fs_param_is_s32 32-bit signed int result->int_32
577 fs_param_is_u64 64-bit unsigned int result->uint_64
528 fs_param_is_enum Enum value name result->uint_32 578 fs_param_is_enum Enum value name result->uint_32
529 fs_param_is_string Arbitrary string param->string 579 fs_param_is_string Arbitrary string param->string
530 fs_param_is_blob Binary blob param->blob 580 fs_param_is_blob Binary blob param->blob
531 fs_param_is_blockdev Blockdev path * Needs lookup 581 fs_param_is_blockdev Blockdev path * Needs lookup
532 fs_param_is_path Path * Needs lookup 582 fs_param_is_path Path * Needs lookup
533 fs_param_is_fd File descriptor param->file 583 fs_param_is_fd File descriptor result->int_32
534
535 And each parameter can be qualified with 'flags':
536
537 fs_param_v_optional The value is optional
538 fs_param_neg_with_no If key name is prefixed with "no", it is false
539 fs_param_neg_with_empty If value is "", it is false
540 fs_param_deprecated The parameter is deprecated.
541
542 For example:
543
544 static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = {
545 [Opt_autocell] = { fs_param_is flag },
546 [Opt_bar] = { fs_param_is_enum },
547 [Opt_dyn] = { fs_param_is flag },
548 [Opt_foo] = { fs_param_is_bool, fs_param_neg_with_no },
549 [Opt_source] = { fs_param_is_string },
550 };
551 584
552 Note that if the value is of fs_param_is_bool type, fs_parse() will try 585 Note that if the value is of fs_param_is_bool type, fs_parse() will try
553 to match any string value against "0", "1", "no", "yes", "false", "true". 586 to match any string value against "0", "1", "no", "yes", "false", "true".
554 587
555 [!] NOTE that the table must be sorted according to primary key name so 588 Each parameter can also be qualified with 'flags':
556 that ->keys[] is also sorted.
557
558 (4) const char *const *keys;
559
560 Table of primary key names for the parameters. There must be one entry
561 per defined parameter. The table is optional if ->nr_params is 0. The
562 table is just an array of names e.g.:
563 589
564 static const char *const afs_param_keys[nr__afs_params] = { 590 fs_param_v_optional The value is optional
565 [Opt_autocell] = "autocell", 591 fs_param_neg_with_no result->negated set if key is prefixed with "no"
566 [Opt_bar] = "bar", 592 fs_param_neg_with_empty result->negated set if value is ""
567 [Opt_dyn] = "dyn", 593 fs_param_deprecated The parameter is deprecated.
568 [Opt_foo] = "foo",
569 [Opt_source] = "source",
570 };
571
572 [!] NOTE that the table must be sorted such that the table can be searched
573 with bsearch() using strcmp(). This means that the Opt_* values must
574 correspond to the entries in this table.
575
576 (5) const struct constant_table *alt_keys;
577 u8 nr_alt_keys;
578
579 Table of additional key names and their mappings to parameter ID plus the
580 number of elements in the table. This is optional. The table is just an
581 array of { name, integer } pairs, e.g.:
582 594
583 static const struct constant_table afs_param_keys[] = { 595 These are wrapped with a number of convenience wrappers:
584 { "baz", Opt_bar }, 596
585 { "dynamic", Opt_dyn }, 597 MACRO SPECIFIES
598 ======================= ===============================================
599 fsparam_flag() fs_param_is_flag
600 fsparam_flag_no() fs_param_is_flag, fs_param_neg_with_no
601 fsparam_bool() fs_param_is_bool
602 fsparam_u32() fs_param_is_u32
603 fsparam_u32oct() fs_param_is_u32_octal
604 fsparam_u32hex() fs_param_is_u32_hex
605 fsparam_s32() fs_param_is_s32
606 fsparam_u64() fs_param_is_u64
607 fsparam_enum() fs_param_is_enum
608 fsparam_string() fs_param_is_string
609 fsparam_blob() fs_param_is_blob
610 fsparam_bdev() fs_param_is_blockdev
611 fsparam_path() fs_param_is_path
612 fsparam_fd() fs_param_is_fd
613
614 all of which take two arguments, name string and option number - for
615 example:
616
617 static const struct fs_parameter_spec afs_param_specs[] = {
618 fsparam_flag ("autocell", Opt_autocell),
619 fsparam_flag ("dyn", Opt_dyn),
620 fsparam_string ("source", Opt_source),
621 fsparam_flag_no ("foo", Opt_foo),
622 {}
586 }; 623 };
587 624
588 [!] NOTE that the table must be sorted such that strcmp() can be used with 625 An addition macro, __fsparam() is provided that takes an additional pair
589 bsearch() to search the entries. 626 of arguments to specify the type and the flags for anything that doesn't
590 627 match one of the above macros.
591 The parameter ID can also be fs_param_key_removed to indicate that a
592 deprecated parameter has been removed and that an error will be given.
593 This differs from fs_param_deprecated where the parameter may still have
594 an effect.
595
596 Further, the behaviour of the parameter may differ when an alternate name
597 is used (for instance with NFS, "v3", "v4.2", etc. are alternate names).
598 628
599 (6) const struct fs_parameter_enum *enums; 629 (6) const struct fs_parameter_enum *enums;
600 u8 nr_enums;
601 630
602 Table of enum value names to integer mappings and the number of elements 631 Table of enum value names to integer mappings, terminated with a null
603 stored therein. This is of type: 632 entry. This is of type:
604 633
605 struct fs_parameter_enum { 634 struct fs_parameter_enum {
606 u8 param_id; 635 u8 opt;
607 char name[14]; 636 char name[14];
608 u8 value; 637 u8 value;
609 }; 638 };
@@ -621,11 +650,6 @@ The members are as follows:
621 try to look the value up in the enum table and the result will be stored 650 try to look the value up in the enum table and the result will be stored
622 in the parse result. 651 in the parse result.
623 652
624 (7) bool no_source;
625
626 If this is set, fs_parse() will ignore any "source" parameter and not
627 pass it to the filesystem.
628
629The parser should be pointed to by the parser pointer in the file_system_type 653The parser should be pointed to by the parser pointer in the file_system_type
630struct as this will provide validation on registration (if 654struct as this will provide validation on registration (if
631CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from 655CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from
@@ -650,9 +674,8 @@ process the parameters it is given.
650 int value; 674 int value;
651 }; 675 };
652 676
653 and it must be sorted such that it can be searched using bsearch() using 677 If a match is found, the corresponding value is returned. If a match
654 strcmp(). If a match is found, the corresponding value is returned. If a 678 isn't found, the not_found value is returned instead.
655 match isn't found, the not_found value is returned instead.
656 679
657 (*) bool validate_constant_table(const struct constant_table *tbl, 680 (*) bool validate_constant_table(const struct constant_table *tbl,
658 size_t tbl_size, 681 size_t tbl_size,
@@ -665,36 +688,36 @@ process the parameters it is given.
665 should just be set to lie inside the low-to-high range. 688 should just be set to lie inside the low-to-high range.
666 689
667 If all is good, true is returned. If the table is invalid, errors are 690 If all is good, true is returned. If the table is invalid, errors are
668 logged to dmesg, the stack is dumped and false is returned. 691 logged to dmesg and false is returned.
692
693 (*) bool fs_validate_description(const struct fs_parameter_description *desc);
694
695 This performs some validation checks on a parameter description. It
696 returns true if the description is good and false if it is not. It will
697 log errors to dmesg if validation fails.
669 698
670 (*) int fs_parse(struct fs_context *fc, 699 (*) int fs_parse(struct fs_context *fc,
671 const struct fs_param_parser *parser, 700 const struct fs_parameter_description *desc,
672 struct fs_parameter *param, 701 struct fs_parameter *param,
673 struct fs_param_parse_result *result); 702 struct fs_parse_result *result);
674 703
675 This is the main interpreter of parameters. It uses the parameter 704 This is the main interpreter of parameters. It uses the parameter
676 description (parser) to look up the name of the parameter to use and to 705 description to look up a parameter by key name and to convert that to an
677 convert that to a parameter ID (stored in result->key). 706 option number (which it returns).
678 707
679 If successful, and if the parameter type indicates the result is a 708 If successful, and if the parameter type indicates the result is a
680 boolean, integer or enum type, the value is converted by this function and 709 boolean, integer or enum type, the value is converted by this function and
681 the result stored in result->{boolean,int_32,uint_32}. 710 the result stored in result->{boolean,int_32,uint_32,uint_64}.
682 711
683 If a match isn't initially made, the key is prefixed with "no" and no 712 If a match isn't initially made, the key is prefixed with "no" and no
684 value is present then an attempt will be made to look up the key with the 713 value is present then an attempt will be made to look up the key with the
685 prefix removed. If this matches a parameter for which the type has flag 714 prefix removed. If this matches a parameter for which the type has flag
686 fs_param_neg_with_no set, then a match will be made and the value will be 715 fs_param_neg_with_no set, then a match will be made and result->negated
687 set to false/0/NULL. 716 will be set to true.
688
689 If the parameter is successfully matched and, optionally, parsed
690 correctly, 1 is returned. If the parameter isn't matched and
691 parser->ignore_unknown is set, then 0 is returned. Otherwise -EINVAL is
692 returned.
693
694 (*) bool fs_validate_description(const struct fs_parameter_description *desc);
695 717
696 This is validates the parameter description. It returns true if the 718 If the parameter isn't matched, -ENOPARAM will be returned; if the
697 description is good and false if it is not. 719 parameter is matched, but the value is erroneous, -EINVAL will be
720 returned; otherwise the parameter's option number will be returned.
698 721
699 (*) int fs_lookup_param(struct fs_context *fc, 722 (*) int fs_lookup_param(struct fs_context *fc,
700 struct fs_parameter *value, 723 struct fs_parameter *value,
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index d1ee484a787d..ee9984f35868 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -36,6 +36,7 @@ Supported adapters:
36 * Intel Cannon Lake (PCH) 36 * Intel Cannon Lake (PCH)
37 * Intel Cedar Fork (PCH) 37 * Intel Cedar Fork (PCH)
38 * Intel Ice Lake (PCH) 38 * Intel Ice Lake (PCH)
39 * Intel Comet Lake (PCH)
39 Datasheets: Publicly available at the Intel website 40 Datasheets: Publicly available at the Intel website
40 41
41On Intel Patsburg and later chipsets, both the normal host SMBus controller 42On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst
index 18c1415e7bfa..ace56204dd03 100644
--- a/Documentation/networking/msg_zerocopy.rst
+++ b/Documentation/networking/msg_zerocopy.rst
@@ -50,7 +50,7 @@ the excellent reporting over at LWN.net or read the original code.
50 50
51 patchset 51 patchset
52 [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY 52 [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
53 http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com 53 https://lkml.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
54 54
55 55
56Interface 56Interface
diff --git a/Documentation/networking/netdev-FAQ.rst b/Documentation/networking/netdev-FAQ.rst
index 0ac5fa77f501..8c7a713cf657 100644
--- a/Documentation/networking/netdev-FAQ.rst
+++ b/Documentation/networking/netdev-FAQ.rst
@@ -131,6 +131,19 @@ it to the maintainer to figure out what is the most recent and current
131version that should be applied. If there is any doubt, the maintainer 131version that should be applied. If there is any doubt, the maintainer
132will reply and ask what should be done. 132will reply and ask what should be done.
133 133
134Q: I made changes to only a few patches in a patch series should I resend only those changed?
135--------------------------------------------------------------------------------------------
136A: No, please resend the entire patch series and make sure you do number your
137patches such that it is clear this is the latest and greatest set of patches
138that can be applied.
139
140Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
141-------------------------------------------------------------------------------------------------------------------------------------------
142A: There is no revert possible, once it is pushed out, it stays like that.
143Please send incremental versions on top of what has been merged in order to fix
144the patches the way they would look like if your latest patch series was to be
145merged.
146
134Q: How can I tell what patches are queued up for backporting to the various stable releases? 147Q: How can I tell what patches are queued up for backporting to the various stable releases?
135-------------------------------------------------------------------------------------------- 148--------------------------------------------------------------------------------------------
136A: Normally Greg Kroah-Hartman collects stable commits himself, but for 149A: Normally Greg Kroah-Hartman collects stable commits himself, but for
diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt
index 54128c50d508..ca2136c76042 100644
--- a/Documentation/networking/nf_flowtable.txt
+++ b/Documentation/networking/nf_flowtable.txt
@@ -44,10 +44,10 @@ including the Netfilter hooks and the flowtable fastpath bypass.
44 / \ / \ |Routing | / \ 44 / \ / \ |Routing | / \
45 --> ingress ---> prerouting ---> |decision| | postrouting |--> neigh_xmit 45 --> ingress ---> prerouting ---> |decision| | postrouting |--> neigh_xmit
46 \_________/ \__________/ ---------- \____________/ ^ 46 \_________/ \__________/ ---------- \____________/ ^
47 | ^ | | ^ | 47 | ^ | ^ |
48 flowtable | | ____\/___ | | 48 flowtable | ____\/___ | |
49 | | | / \ | | 49 | | / \ | |
50 __\/___ | --------->| forward |------------ | 50 __\/___ | | forward |------------ |
51 |-----| | \_________/ | 51 |-----| | \_________/ |
52 |-----| | 'flow offload' rule | 52 |-----| | 'flow offload' rule |
53 |-----| | adds entry to | 53 |-----| | adds entry to |
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index 52b026be028f..38a4edc4522b 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -413,7 +413,7 @@ algorithm.
413.. _F-RTO: https://tools.ietf.org/html/rfc5682 413.. _F-RTO: https://tools.ietf.org/html/rfc5682
414 414
415TCP Fast Path 415TCP Fast Path
416============ 416=============
417When kernel receives a TCP packet, it has two paths to handler the 417When kernel receives a TCP packet, it has two paths to handler the
418packet, one is fast path, another is slow path. The comment in kernel 418packet, one is fast path, another is slow path. The comment in kernel
419code provides a good explanation of them, I pasted them below:: 419code provides a good explanation of them, I pasted them below::
@@ -681,6 +681,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
681DSACK to the sender. 681DSACK to the sender.
682 682
683* TcpExtTCPDSACKRecv 683* TcpExtTCPDSACKRecv
684
684The TCP stack receives a DSACK, which indicates an acknowledged 685The TCP stack receives a DSACK, which indicates an acknowledged
685duplicate packet is received. 686duplicate packet is received.
686 687
@@ -690,7 +691,7 @@ The TCP stack receives a DSACK, which indicate an out of order
690duplicate packet is received. 691duplicate packet is received.
691 692
692invalid SACK and DSACK 693invalid SACK and DSACK
693==================== 694======================
694When a SACK (or DSACK) block is invalid, a corresponding counter would 695When a SACK (or DSACK) block is invalid, a corresponding counter would
695be updated. The validation method is base on the start/end sequence 696be updated. The validation method is base on the start/end sequence
696number of the SACK block. For more details, please refer the comment 697number of the SACK block. For more details, please refer the comment
@@ -704,11 +705,13 @@ explaination:
704.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32 705.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
705 706
706* TcpExtTCPSACKDiscard 707* TcpExtTCPSACKDiscard
708
707This counter indicates how many SACK blocks are invalid. If the invalid 709This counter indicates how many SACK blocks are invalid. If the invalid
708SACK block is caused by ACK recording, the TCP stack will only ignore 710SACK block is caused by ACK recording, the TCP stack will only ignore
709it and won't update this counter. 711it and won't update this counter.
710 712
711* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo 713* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
714
712When a DSACK block is invalid, one of these two counters would be 715When a DSACK block is invalid, one of these two counters would be
713updated. Which counter will be updated depends on the undo_marker flag 716updated. Which counter will be updated depends on the undo_marker flag
714of the TCP socket. If the undo_marker is not set, the TCP stack isn't 717of the TCP socket. If the undo_marker is not set, the TCP stack isn't
@@ -719,7 +722,7 @@ will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
719will be updated. As implied in its name, it might be an old packet. 722will be updated. As implied in its name, it might be an old packet.
720 723
721SACK shift 724SACK shift
722========= 725==========
723The linux networking stack stores data in sk_buff struct (skb for 726The linux networking stack stores data in sk_buff struct (skb for
724short). If a SACK block acrosses multiple skb, the TCP stack will try 727short). If a SACK block acrosses multiple skb, the TCP stack will try
725to re-arrange data in these skb. E.g. if a SACK block acknowledges seq 728to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
@@ -730,12 +733,15 @@ seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
730discard, this operation is 'merge'. 733discard, this operation is 'merge'.
731 734
732* TcpExtTCPSackShifted 735* TcpExtTCPSackShifted
736
733A skb is shifted 737A skb is shifted
734 738
735* TcpExtTCPSackMerged 739* TcpExtTCPSackMerged
740
736A skb is merged 741A skb is merged
737 742
738* TcpExtTCPSackShiftFallback 743* TcpExtTCPSackShiftFallback
744
739A skb should be shifted or merged, but the TCP stack doesn't do it for 745A skb should be shifted or merged, but the TCP stack doesn't do it for
740some reasons. 746some reasons.
741 747
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 7de9eee73fcd..67068c47c591 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -5,25 +5,32 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
5---------------------- 5----------------------
6 6
7The kvm API is a set of ioctls that are issued to control various aspects 7The kvm API is a set of ioctls that are issued to control various aspects
8of a virtual machine. The ioctls belong to three classes 8of a virtual machine. The ioctls belong to three classes:
9 9
10 - System ioctls: These query and set global attributes which affect the 10 - System ioctls: These query and set global attributes which affect the
11 whole kvm subsystem. In addition a system ioctl is used to create 11 whole kvm subsystem. In addition a system ioctl is used to create
12 virtual machines 12 virtual machines.
13 13
14 - VM ioctls: These query and set attributes that affect an entire virtual 14 - VM ioctls: These query and set attributes that affect an entire virtual
15 machine, for example memory layout. In addition a VM ioctl is used to 15 machine, for example memory layout. In addition a VM ioctl is used to
16 create virtual cpus (vcpus). 16 create virtual cpus (vcpus) and devices.
17 17
18 Only run VM ioctls from the same process (address space) that was used 18 VM ioctls must be issued from the same process (address space) that was
19 to create the VM. 19 used to create the VM.
20 20
21 - vcpu ioctls: These query and set attributes that control the operation 21 - vcpu ioctls: These query and set attributes that control the operation
22 of a single virtual cpu. 22 of a single virtual cpu.
23 23
24 Only run vcpu ioctls from the same thread that was used to create the 24 vcpu ioctls should be issued from the same thread that was used to create
25 vcpu. 25 the vcpu, except for asynchronous vcpu ioctl that are marked as such in
26 the documentation. Otherwise, the first ioctl after switching threads
27 could see a performance impact.
26 28
29 - device ioctls: These query and set attributes that control the operation
30 of a single device.
31
32 device ioctls must be issued from the same process (address space) that
33 was used to create the VM.
27 34
282. File descriptors 352. File descriptors
29------------------- 36-------------------
@@ -32,17 +39,34 @@ The kvm API is centered around file descriptors. An initial
32open("/dev/kvm") obtains a handle to the kvm subsystem; this handle 39open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
33can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this 40can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
34handle will create a VM file descriptor which can be used to issue VM 41handle will create a VM file descriptor which can be used to issue VM
35ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu 42ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
36and return a file descriptor pointing to it. Finally, ioctls on a vcpu 43create a virtual cpu or device and return a file descriptor pointing to
37fd can be used to control the vcpu, including the important task of 44the new resource. Finally, ioctls on a vcpu or device fd can be used
38actually running guest code. 45to control the vcpu or device. For vcpus, this includes the important
46task of actually running guest code.
39 47
40In general file descriptors can be migrated among processes by means 48In general file descriptors can be migrated among processes by means
41of fork() and the SCM_RIGHTS facility of unix domain socket. These 49of fork() and the SCM_RIGHTS facility of unix domain socket. These
42kinds of tricks are explicitly not supported by kvm. While they will 50kinds of tricks are explicitly not supported by kvm. While they will
43not cause harm to the host, their actual behavior is not guaranteed by 51not cause harm to the host, their actual behavior is not guaranteed by
44the API. The only supported use is one virtual machine per process, 52the API. See "General description" for details on the ioctl usage
45and one vcpu per thread. 53model that is supported by KVM.
54
55It is important to note that althought VM ioctls may only be issued from
56the process that created the VM, a VM's lifecycle is associated with its
57file descriptor, not its creator (process). In other words, the VM and
58its resources, *including the associated address space*, are not freed
59until the last reference to the VM's file descriptor has been released.
60For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will
61not be freed until both the parent (original) process and its child have
62put their references to the VM's file descriptor.
63
64Because a VM's resources are not freed until the last reference to its
65file descriptor is released, creating additional references to a VM via
66via fork(), dup(), etc... without careful consideration is strongly
67discouraged and may have unwanted side effects, e.g. memory allocated
68by and on behalf of the VM's process may not be freed/unaccounted when
69the VM is shut down.
46 70
47 71
48It is important to note that althought VM ioctls may only be issued from 72It is important to note that althought VM ioctls may only be issued from
@@ -515,11 +539,15 @@ c) KVM_INTERRUPT_SET_LEVEL
515Note that any value for 'irq' other than the ones stated above is invalid 539Note that any value for 'irq' other than the ones stated above is invalid
516and incurs unexpected behavior. 540and incurs unexpected behavior.
517 541
542This is an asynchronous vcpu ioctl and can be invoked from any thread.
543
518MIPS: 544MIPS:
519 545
520Queues an external interrupt to be injected into the virtual CPU. A negative 546Queues an external interrupt to be injected into the virtual CPU. A negative
521interrupt number dequeues the interrupt. 547interrupt number dequeues the interrupt.
522 548
549This is an asynchronous vcpu ioctl and can be invoked from any thread.
550
523 551
5244.17 KVM_DEBUG_GUEST 5524.17 KVM_DEBUG_GUEST
525 553
@@ -1086,14 +1114,12 @@ struct kvm_userspace_memory_region {
1086#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) 1114#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
1087#define KVM_MEM_READONLY (1UL << 1) 1115#define KVM_MEM_READONLY (1UL << 1)
1088 1116
1089This ioctl allows the user to create or modify a guest physical memory 1117This ioctl allows the user to create, modify or delete a guest physical
1090slot. When changing an existing slot, it may be moved in the guest 1118memory slot. Bits 0-15 of "slot" specify the slot id and this value
1091physical memory space, or its flags may be modified. It may not be 1119should be less than the maximum number of user memory slots supported per
1092resized. Slots may not overlap in guest physical address space. 1120VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
1093Bits 0-15 of "slot" specifies the slot id and this value should be 1121if this capability is supported by the architecture. Slots may not
1094less than the maximum number of user memory slots supported per VM. 1122overlap in guest physical address space.
1095The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS,
1096if this capability is supported by the architecture.
1097 1123
1098If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" 1124If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot"
1099specifies the address space which is being modified. They must be 1125specifies the address space which is being modified. They must be
@@ -1102,6 +1128,10 @@ KVM_CAP_MULTI_ADDRESS_SPACE capability. Slots in separate address spaces
1102are unrelated; the restriction on overlapping slots only applies within 1128are unrelated; the restriction on overlapping slots only applies within
1103each address space. 1129each address space.
1104 1130
1131Deleting a slot is done by passing zero for memory_size. When changing
1132an existing slot, it may be moved in the guest physical memory space,
1133or its flags may be modified, but it may not be resized.
1134
1105Memory for the region is taken starting at the address denoted by the 1135Memory for the region is taken starting at the address denoted by the
1106field userspace_addr, which must point at user addressable memory for 1136field userspace_addr, which must point at user addressable memory for
1107the entire memory slot size. Any object may back this memory, including 1137the entire memory slot size. Any object may back this memory, including
@@ -2493,7 +2523,7 @@ KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
2493 machine checks needing further payload are not 2523 machine checks needing further payload are not
2494 supported by this ioctl) 2524 supported by this ioctl)
2495 2525
2496Note that the vcpu ioctl is asynchronous to vcpu execution. 2526This is an asynchronous vcpu ioctl and can be invoked from any thread.
2497 2527
24984.78 KVM_PPC_GET_HTAB_FD 25284.78 KVM_PPC_GET_HTAB_FD
2499 2529
@@ -3042,8 +3072,7 @@ KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
3042KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall 3072KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
3043KVM_S390_MCHK - machine check interrupt; parameters in .mchk 3073KVM_S390_MCHK - machine check interrupt; parameters in .mchk
3044 3074
3045 3075This is an asynchronous vcpu ioctl and can be invoked from any thread.
3046Note that the vcpu ioctl is asynchronous to vcpu execution.
3047 3076
30484.94 KVM_S390_GET_IRQ_STATE 30774.94 KVM_S390_GET_IRQ_STATE
3049 3078
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index f365102c80f5..2efe0efc516e 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -142,7 +142,7 @@ Shadow pages contain the following information:
142 If clear, this page corresponds to a guest page table denoted by the gfn 142 If clear, this page corresponds to a guest page table denoted by the gfn
143 field. 143 field.
144 role.quadrant: 144 role.quadrant:
145 When role.cr4_pae=0, the guest uses 32-bit gptes while the host uses 64-bit 145 When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit
146 sptes. That means a guest page table contains more ptes than the host, 146 sptes. That means a guest page table contains more ptes than the host,
147 so multiple shadow pages are needed to shadow one guest page. 147 so multiple shadow pages are needed to shadow one guest page.
148 For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the 148 For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
@@ -158,9 +158,9 @@ Shadow pages contain the following information:
158 The page is invalid and should not be used. It is a root page that is 158 The page is invalid and should not be used. It is a root page that is
159 currently pinned (by a cpu hardware register pointing to it); once it is 159 currently pinned (by a cpu hardware register pointing to it); once it is
160 unpinned it will be destroyed. 160 unpinned it will be destroyed.
161 role.cr4_pae: 161 role.gpte_is_8_bytes:
162 Contains the value of cr4.pae for which the page is valid (e.g. whether 162 Reflects the size of the guest PTE for which the page is valid, i.e. '1'
163 32-bit or 64-bit gptes are in use). 163 if 64-bit gptes are in use, '0' if 32-bit gptes are in use.
164 role.nxe: 164 role.nxe:
165 Contains the value of efer.nxe for which the page is valid. 165 Contains the value of efer.nxe for which the page is valid.
166 role.cr0_wp: 166 role.cr0_wp:
@@ -173,6 +173,9 @@ Shadow pages contain the following information:
173 Contains the value of cr4.smap && !cr0.wp for which the page is valid 173 Contains the value of cr4.smap && !cr0.wp for which the page is valid
174 (pages for which this is true are different from other pages; see the 174 (pages for which this is true are different from other pages; see the
175 treatment of cr0.wp=0 below). 175 treatment of cr0.wp=0 below).
176 role.ept_sp:
177 This is a virtual flag to denote a shadowed nested EPT page. ept_sp
178 is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination.
176 role.smm: 179 role.smm:
177 Is 1 if the page is valid in system management mode. This field 180 Is 1 if the page is valid in system management mode. This field
178 determines which of the kvm_memslots array was used to build this 181 determines which of the kvm_memslots array was used to build this
diff --git a/MAINTAINERS b/MAINTAINERS
index 3606922c64d7..baf7623b01b6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2364,7 +2364,7 @@ F: arch/arm/mm/cache-uniphier.c
2364F: arch/arm64/boot/dts/socionext/uniphier* 2364F: arch/arm64/boot/dts/socionext/uniphier*
2365F: drivers/bus/uniphier-system-bus.c 2365F: drivers/bus/uniphier-system-bus.c
2366F: drivers/clk/uniphier/ 2366F: drivers/clk/uniphier/
2367F: drivers/dmaengine/uniphier-mdmac.c 2367F: drivers/dma/uniphier-mdmac.c
2368F: drivers/gpio/gpio-uniphier.c 2368F: drivers/gpio/gpio-uniphier.c
2369F: drivers/i2c/busses/i2c-uniphier* 2369F: drivers/i2c/busses/i2c-uniphier*
2370F: drivers/irqchip/irq-uniphier-aidet.c 2370F: drivers/irqchip/irq-uniphier-aidet.c
@@ -6416,7 +6416,6 @@ L: linux-kernel@vger.kernel.org
6416T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core 6416T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
6417S: Maintained 6417S: Maintained
6418F: kernel/futex.c 6418F: kernel/futex.c
6419F: kernel/futex_compat.c
6420F: include/asm-generic/futex.h 6419F: include/asm-generic/futex.h
6421F: include/linux/futex.h 6420F: include/linux/futex.h
6422F: include/uapi/linux/futex.h 6421F: include/uapi/linux/futex.h
@@ -8104,6 +8103,16 @@ F: include/linux/iommu.h
8104F: include/linux/of_iommu.h 8103F: include/linux/of_iommu.h
8105F: include/linux/iova.h 8104F: include/linux/iova.h
8106 8105
8106IO_URING
8107M: Jens Axboe <axboe@kernel.dk>
8108L: linux-block@vger.kernel.org
8109L: linux-fsdevel@vger.kernel.org
8110T: git git://git.kernel.dk/linux-block
8111T: git git://git.kernel.dk/liburing
8112S: Maintained
8113F: fs/io_uring.c
8114F: include/uapi/linux/io_uring.h
8115
8107IP MASQUERADING 8116IP MASQUERADING
8108M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> 8117M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
8109S: Maintained 8118S: Maintained
diff --git a/Makefile b/Makefile
index 99c0530489ef..026fbc450906 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 1 3PATCHLEVEL = 1
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc1 5EXTRAVERSION = -rc3
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -31,26 +31,12 @@ _all:
31# descending is started. They are now explicitly listed as the 31# descending is started. They are now explicitly listed as the
32# prepare rule. 32# prepare rule.
33 33
34# Ugly workaround for Debian make-kpkg: 34ifneq ($(sub_make_done),1)
35# make-kpkg directly includes the top Makefile of Linux kernel. In such a case,
36# skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but
37# displays warning to discourage such abusage.
38ifneq ($(word 2, $(MAKEFILE_LIST)),)
39$(warning Do not include top Makefile of Linux Kernel)
40sub-make-done := 1
41MAKEFLAGS += -rR
42endif
43
44ifneq ($(sub-make-done),1)
45 35
46# Do not use make's built-in rules and variables 36# Do not use make's built-in rules and variables
47# (this increases performance and avoids hard-to-debug behaviour) 37# (this increases performance and avoids hard-to-debug behaviour)
48MAKEFLAGS += -rR 38MAKEFLAGS += -rR
49 39
50# 'MAKEFLAGS += -rR' does not become immediately effective for old
51# GNU Make versions. Cancel implicit rules for this Makefile.
52$(lastword $(MAKEFILE_LIST)): ;
53
54# Avoid funny character set dependencies 40# Avoid funny character set dependencies
55unexport LC_ALL 41unexport LC_ALL
56LC_COLLATE=C 42LC_COLLATE=C
@@ -153,6 +139,7 @@ $(if $(KBUILD_OUTPUT),, \
153# 'sub-make' below. 139# 'sub-make' below.
154MAKEFLAGS += --include-dir=$(CURDIR) 140MAKEFLAGS += --include-dir=$(CURDIR)
155 141
142need-sub-make := 1
156else 143else
157 144
158# Do not print "Entering directory ..." at all for in-tree build. 145# Do not print "Entering directory ..." at all for in-tree build.
@@ -160,6 +147,18 @@ MAKEFLAGS += --no-print-directory
160 147
161endif # ifneq ($(KBUILD_OUTPUT),) 148endif # ifneq ($(KBUILD_OUTPUT),)
162 149
150ifneq ($(filter 3.%,$(MAKE_VERSION)),)
151# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
152# We need to invoke sub-make to avoid implicit rules in the top Makefile.
153need-sub-make := 1
154# Cancel implicit rules for this Makefile.
155$(lastword $(MAKEFILE_LIST)): ;
156endif
157
158export sub_make_done := 1
159
160ifeq ($(need-sub-make),1)
161
163PHONY += $(MAKECMDGOALS) sub-make 162PHONY += $(MAKECMDGOALS) sub-make
164 163
165$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make 164$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@@ -167,12 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
167 166
168# Invoke a second make in the output directory, passing relevant variables 167# Invoke a second make in the output directory, passing relevant variables
169sub-make: 168sub-make:
170 $(Q)$(MAKE) sub-make-done=1 \ 169 $(Q)$(MAKE) \
171 $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \ 170 $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
172 -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS)) 171 -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
173 172
174else # sub-make-done 173endif # need-sub-make
174endif # sub_make_done
175
175# We process the rest of the Makefile if this is the final invocation of make 176# We process the rest of the Makefile if this is the final invocation of make
177ifeq ($(need-sub-make),)
176 178
177# Do not print "Entering directory ...", 179# Do not print "Entering directory ...",
178# but we want to display it when entering to the output directory 180# but we want to display it when entering to the output directory
@@ -497,7 +499,8 @@ outputmakefile:
497ifneq ($(KBUILD_SRC),) 499ifneq ($(KBUILD_SRC),)
498 $(Q)ln -fsn $(srctree) source 500 $(Q)ln -fsn $(srctree) source
499 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) 501 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
500 $(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore 502 $(Q)test -e .gitignore || \
503 { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
501endif 504endif
502 505
503ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) 506ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
@@ -677,7 +680,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
677KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) 680KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
678 681
679ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE 682ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
680KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) 683KBUILD_CFLAGS += -Os
681else 684else
682KBUILD_CFLAGS += -O2 685KBUILD_CFLAGS += -O2
683endif 686endif
@@ -950,9 +953,11 @@ mod_sign_cmd = true
950endif 953endif
951export mod_sign_cmd 954export mod_sign_cmd
952 955
956HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
957
953ifdef CONFIG_STACK_VALIDATION 958ifdef CONFIG_STACK_VALIDATION
954 has_libelf := $(call try-run,\ 959 has_libelf := $(call try-run,\
955 echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0) 960 echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
956 ifeq ($(has_libelf),1) 961 ifeq ($(has_libelf),1)
957 objtool_target := tools/objtool FORCE 962 objtool_target := tools/objtool FORCE
958 else 963 else
@@ -1757,7 +1762,7 @@ existing-targets := $(wildcard $(sort $(targets)))
1757 1762
1758endif # ifeq ($(config-targets),1) 1763endif # ifeq ($(config-targets),1)
1759endif # ifeq ($(mixed-targets),1) 1764endif # ifeq ($(mixed-targets),1)
1760endif # sub-make-done 1765endif # need-sub-make
1761 1766
1762PHONY += FORCE 1767PHONY += FORCE
1763FORCE: 1768FORCE:
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index dc0ab28baca1..70b783333965 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -6,6 +6,7 @@ generic-y += exec.h
6generic-y += export.h 6generic-y += export.h
7generic-y += fb.h 7generic-y += fb.h
8generic-y += irq_work.h 8generic-y += irq_work.h
9generic-y += kvm_para.h
9generic-y += mcs_spinlock.h 10generic-y += mcs_spinlock.h
10generic-y += mm-arch-hooks.h 11generic-y += mm-arch-hooks.h
11generic-y += preempt.h 12generic-y += preempt.h
diff --git a/arch/alpha/include/uapi/asm/kvm_para.h b/arch/alpha/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/alpha/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/kvm_para.h>
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index df55672c59e6..c781e45d1d99 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -144,11 +144,11 @@ config ARC_CPU_770
144 Support for ARC770 core introduced with Rel 4.10 (Summer 2011) 144 Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
145 This core has a bunch of cool new features: 145 This core has a bunch of cool new features:
146 -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4) 146 -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
147 Shared Address Spaces (for sharing TLB entries in MMU) 147 Shared Address Spaces (for sharing TLB entries in MMU)
148 -Caches: New Prog Model, Region Flush 148 -Caches: New Prog Model, Region Flush
149 -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr 149 -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
150 150
151endif #ISA_ARCOMPACT 151endif #ISA_ARCOMPACT
152 152
153config ARC_CPU_HS 153config ARC_CPU_HS
154 bool "ARC-HS" 154 bool "ARC-HS"
@@ -198,7 +198,7 @@ config ARC_SMP_HALT_ON_RESET
198 at designated entry point. For other case, all jump to common 198 at designated entry point. For other case, all jump to common
199 entry point and spin wait for Master's signal. 199 entry point and spin wait for Master's signal.
200 200
201endif #SMP 201endif #SMP
202 202
203config ARC_MCIP 203config ARC_MCIP
204 bool "ARConnect Multicore IP (MCIP) Support " 204 bool "ARConnect Multicore IP (MCIP) Support "
@@ -249,7 +249,7 @@ config ARC_CACHE_VIPT_ALIASING
249 bool "Support VIPT Aliasing D$" 249 bool "Support VIPT Aliasing D$"
250 depends on ARC_HAS_DCACHE && ISA_ARCOMPACT 250 depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
251 251
252endif #ARC_CACHE 252endif #ARC_CACHE
253 253
254config ARC_HAS_ICCM 254config ARC_HAS_ICCM
255 bool "Use ICCM" 255 bool "Use ICCM"
@@ -370,7 +370,7 @@ config ARC_FPU_SAVE_RESTORE
370 based on actual usage of FPU by a task. Thus our implemn does 370 based on actual usage of FPU by a task. Thus our implemn does
371 this for all tasks in system. 371 this for all tasks in system.
372 372
373endif #ISA_ARCOMPACT 373endif #ISA_ARCOMPACT
374 374
375config ARC_CANT_LLSC 375config ARC_CANT_LLSC
376 def_bool n 376 def_bool n
@@ -386,6 +386,15 @@ config ARC_HAS_SWAPE
386 386
387if ISA_ARCV2 387if ISA_ARCV2
388 388
389config ARC_USE_UNALIGNED_MEM_ACCESS
390 bool "Enable unaligned access in HW"
391 default y
392 select HAVE_EFFICIENT_UNALIGNED_ACCESS
393 help
394 The ARC HS architecture supports unaligned memory access
395 which is disabled by default. Enable unaligned access in
396 hardware and use software to use it
397
389config ARC_HAS_LL64 398config ARC_HAS_LL64
390 bool "Insn: 64bit LDD/STD" 399 bool "Insn: 64bit LDD/STD"
391 help 400 help
@@ -414,7 +423,7 @@ config ARC_IRQ_NO_AUTOSAVE
414 This is programmable and can be optionally disabled in which case 423 This is programmable and can be optionally disabled in which case
415 software INTERRUPT_PROLOGUE/EPILGUE do the needed work 424 software INTERRUPT_PROLOGUE/EPILGUE do the needed work
416 425
417endif # ISA_ARCV2 426endif # ISA_ARCV2
418 427
419endmenu # "ARC CPU Configuration" 428endmenu # "ARC CPU Configuration"
420 429
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index df00578c279d..e2b991f75bc5 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -28,6 +28,12 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
28 28
29ifdef CONFIG_ISA_ARCV2 29ifdef CONFIG_ISA_ARCV2
30 30
31ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
32cflags-y += -munaligned-access
33else
34cflags-y += -mno-unaligned-access
35endif
36
31ifndef CONFIG_ARC_HAS_LL64 37ifndef CONFIG_ARC_HAS_LL64
32cflags-y += -mno-ll64 38cflags-y += -mno-ll64
33endif 39endif
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi
index 02410b211433..c0bcd97522bb 100644
--- a/arch/arc/boot/dts/abilis_tb100.dtsi
+++ b/arch/arc/boot/dts/abilis_tb100.dtsi
@@ -38,7 +38,7 @@
38 clock-div = <6>; 38 clock-div = <6>;
39 }; 39 };
40 40
41 iomux: iomux@FF10601c { 41 iomux: iomux@ff10601c {
42 /* Port 1 */ 42 /* Port 1 */
43 pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ 43 pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
44 abilis,function = "mis0"; 44 abilis,function = "mis0";
@@ -162,182 +162,182 @@
162 }; 162 };
163 }; 163 };
164 164
165 gpioa: gpio@FF140000 { 165 gpioa: gpio@ff140000 {
166 compatible = "abilis,tb10x-gpio"; 166 compatible = "abilis,tb10x-gpio";
167 interrupt-controller; 167 interrupt-controller;
168 #interrupt-cells = <1>; 168 #interrupt-cells = <1>;
169 interrupt-parent = <&tb10x_ictl>; 169 interrupt-parent = <&tb10x_ictl>;
170 interrupts = <27 2>; 170 interrupts = <27 2>;
171 reg = <0xFF140000 0x1000>; 171 reg = <0xff140000 0x1000>;
172 gpio-controller; 172 gpio-controller;
173 #gpio-cells = <2>; 173 #gpio-cells = <2>;
174 abilis,ngpio = <3>; 174 abilis,ngpio = <3>;
175 gpio-ranges = <&iomux 0 0 0>; 175 gpio-ranges = <&iomux 0 0 0>;
176 gpio-ranges-group-names = "gpioa"; 176 gpio-ranges-group-names = "gpioa";
177 }; 177 };
178 gpiob: gpio@FF141000 { 178 gpiob: gpio@ff141000 {
179 compatible = "abilis,tb10x-gpio"; 179 compatible = "abilis,tb10x-gpio";
180 interrupt-controller; 180 interrupt-controller;
181 #interrupt-cells = <1>; 181 #interrupt-cells = <1>;
182 interrupt-parent = <&tb10x_ictl>; 182 interrupt-parent = <&tb10x_ictl>;
183 interrupts = <27 2>; 183 interrupts = <27 2>;
184 reg = <0xFF141000 0x1000>; 184 reg = <0xff141000 0x1000>;
185 gpio-controller; 185 gpio-controller;
186 #gpio-cells = <2>; 186 #gpio-cells = <2>;
187 abilis,ngpio = <2>; 187 abilis,ngpio = <2>;
188 gpio-ranges = <&iomux 0 0 0>; 188 gpio-ranges = <&iomux 0 0 0>;
189 gpio-ranges-group-names = "gpiob"; 189 gpio-ranges-group-names = "gpiob";
190 }; 190 };
191 gpioc: gpio@FF142000 { 191 gpioc: gpio@ff142000 {
192 compatible = "abilis,tb10x-gpio"; 192 compatible = "abilis,tb10x-gpio";
193 interrupt-controller; 193 interrupt-controller;
194 #interrupt-cells = <1>; 194 #interrupt-cells = <1>;
195 interrupt-parent = <&tb10x_ictl>; 195 interrupt-parent = <&tb10x_ictl>;
196 interrupts = <27 2>; 196 interrupts = <27 2>;
197 reg = <0xFF142000 0x1000>; 197 reg = <0xff142000 0x1000>;
198 gpio-controller; 198 gpio-controller;
199 #gpio-cells = <2>; 199 #gpio-cells = <2>;
200 abilis,ngpio = <3>; 200 abilis,ngpio = <3>;
201 gpio-ranges = <&iomux 0 0 0>; 201 gpio-ranges = <&iomux 0 0 0>;
202 gpio-ranges-group-names = "gpioc"; 202 gpio-ranges-group-names = "gpioc";
203 }; 203 };
204 gpiod: gpio@FF143000 { 204 gpiod: gpio@ff143000 {
205 compatible = "abilis,tb10x-gpio"; 205 compatible = "abilis,tb10x-gpio";
206 interrupt-controller; 206 interrupt-controller;
207 #interrupt-cells = <1>; 207 #interrupt-cells = <1>;
208 interrupt-parent = <&tb10x_ictl>; 208 interrupt-parent = <&tb10x_ictl>;
209 interrupts = <27 2>; 209 interrupts = <27 2>;
210 reg = <0xFF143000 0x1000>; 210 reg = <0xff143000 0x1000>;
211 gpio-controller; 211 gpio-controller;
212 #gpio-cells = <2>; 212 #gpio-cells = <2>;
213 abilis,ngpio = <2>; 213 abilis,ngpio = <2>;
214 gpio-ranges = <&iomux 0 0 0>; 214 gpio-ranges = <&iomux 0 0 0>;
215 gpio-ranges-group-names = "gpiod"; 215 gpio-ranges-group-names = "gpiod";
216 }; 216 };
217 gpioe: gpio@FF144000 { 217 gpioe: gpio@ff144000 {
218 compatible = "abilis,tb10x-gpio"; 218 compatible = "abilis,tb10x-gpio";
219 interrupt-controller; 219 interrupt-controller;
220 #interrupt-cells = <1>; 220 #interrupt-cells = <1>;
221 interrupt-parent = <&tb10x_ictl>; 221 interrupt-parent = <&tb10x_ictl>;
222 interrupts = <27 2>; 222 interrupts = <27 2>;
223 reg = <0xFF144000 0x1000>; 223 reg = <0xff144000 0x1000>;
224 gpio-controller; 224 gpio-controller;
225 #gpio-cells = <2>; 225 #gpio-cells = <2>;
226 abilis,ngpio = <3>; 226 abilis,ngpio = <3>;
227 gpio-ranges = <&iomux 0 0 0>; 227 gpio-ranges = <&iomux 0 0 0>;
228 gpio-ranges-group-names = "gpioe"; 228 gpio-ranges-group-names = "gpioe";
229 }; 229 };
230 gpiof: gpio@FF145000 { 230 gpiof: gpio@ff145000 {
231 compatible = "abilis,tb10x-gpio"; 231 compatible = "abilis,tb10x-gpio";
232 interrupt-controller; 232 interrupt-controller;
233 #interrupt-cells = <1>; 233 #interrupt-cells = <1>;
234 interrupt-parent = <&tb10x_ictl>; 234 interrupt-parent = <&tb10x_ictl>;
235 interrupts = <27 2>; 235 interrupts = <27 2>;
236 reg = <0xFF145000 0x1000>; 236 reg = <0xff145000 0x1000>;
237 gpio-controller; 237 gpio-controller;
238 #gpio-cells = <2>; 238 #gpio-cells = <2>;
239 abilis,ngpio = <2>; 239 abilis,ngpio = <2>;
240 gpio-ranges = <&iomux 0 0 0>; 240 gpio-ranges = <&iomux 0 0 0>;
241 gpio-ranges-group-names = "gpiof"; 241 gpio-ranges-group-names = "gpiof";
242 }; 242 };
243 gpiog: gpio@FF146000 { 243 gpiog: gpio@ff146000 {
244 compatible = "abilis,tb10x-gpio"; 244 compatible = "abilis,tb10x-gpio";
245 interrupt-controller; 245 interrupt-controller;
246 #interrupt-cells = <1>; 246 #interrupt-cells = <1>;
247 interrupt-parent = <&tb10x_ictl>; 247 interrupt-parent = <&tb10x_ictl>;
248 interrupts = <27 2>; 248 interrupts = <27 2>;
249 reg = <0xFF146000 0x1000>; 249 reg = <0xff146000 0x1000>;
250 gpio-controller; 250 gpio-controller;
251 #gpio-cells = <2>; 251 #gpio-cells = <2>;
252 abilis,ngpio = <3>; 252 abilis,ngpio = <3>;
253 gpio-ranges = <&iomux 0 0 0>; 253 gpio-ranges = <&iomux 0 0 0>;
254 gpio-ranges-group-names = "gpiog"; 254 gpio-ranges-group-names = "gpiog";
255 }; 255 };
256 gpioh: gpio@FF147000 { 256 gpioh: gpio@ff147000 {
257 compatible = "abilis,tb10x-gpio"; 257 compatible = "abilis,tb10x-gpio";
258 interrupt-controller; 258 interrupt-controller;
259 #interrupt-cells = <1>; 259 #interrupt-cells = <1>;
260 interrupt-parent = <&tb10x_ictl>; 260 interrupt-parent = <&tb10x_ictl>;
261 interrupts = <27 2>; 261 interrupts = <27 2>;
262 reg = <0xFF147000 0x1000>; 262 reg = <0xff147000 0x1000>;
263 gpio-controller; 263 gpio-controller;
264 #gpio-cells = <2>; 264 #gpio-cells = <2>;
265 abilis,ngpio = <2>; 265 abilis,ngpio = <2>;
266 gpio-ranges = <&iomux 0 0 0>; 266 gpio-ranges = <&iomux 0 0 0>;
267 gpio-ranges-group-names = "gpioh"; 267 gpio-ranges-group-names = "gpioh";
268 }; 268 };
269 gpioi: gpio@FF148000 { 269 gpioi: gpio@ff148000 {
270 compatible = "abilis,tb10x-gpio"; 270 compatible = "abilis,tb10x-gpio";
271 interrupt-controller; 271 interrupt-controller;
272 #interrupt-cells = <1>; 272 #interrupt-cells = <1>;
273 interrupt-parent = <&tb10x_ictl>; 273 interrupt-parent = <&tb10x_ictl>;
274 interrupts = <27 2>; 274 interrupts = <27 2>;
275 reg = <0xFF148000 0x1000>; 275 reg = <0xff148000 0x1000>;
276 gpio-controller; 276 gpio-controller;
277 #gpio-cells = <2>; 277 #gpio-cells = <2>;
278 abilis,ngpio = <12>; 278 abilis,ngpio = <12>;
279 gpio-ranges = <&iomux 0 0 0>; 279 gpio-ranges = <&iomux 0 0 0>;
280 gpio-ranges-group-names = "gpioi"; 280 gpio-ranges-group-names = "gpioi";
281 }; 281 };
282 gpioj: gpio@FF149000 { 282 gpioj: gpio@ff149000 {
283 compatible = "abilis,tb10x-gpio"; 283 compatible = "abilis,tb10x-gpio";
284 interrupt-controller; 284 interrupt-controller;
285 #interrupt-cells = <1>; 285 #interrupt-cells = <1>;
286 interrupt-parent = <&tb10x_ictl>; 286 interrupt-parent = <&tb10x_ictl>;
287 interrupts = <27 2>; 287 interrupts = <27 2>;
288 reg = <0xFF149000 0x1000>; 288 reg = <0xff149000 0x1000>;
289 gpio-controller; 289 gpio-controller;
290 #gpio-cells = <2>; 290 #gpio-cells = <2>;
291 abilis,ngpio = <32>; 291 abilis,ngpio = <32>;
292 gpio-ranges = <&iomux 0 0 0>; 292 gpio-ranges = <&iomux 0 0 0>;
293 gpio-ranges-group-names = "gpioj"; 293 gpio-ranges-group-names = "gpioj";
294 }; 294 };
295 gpiok: gpio@FF14a000 { 295 gpiok: gpio@ff14a000 {
296 compatible = "abilis,tb10x-gpio"; 296 compatible = "abilis,tb10x-gpio";
297 interrupt-controller; 297 interrupt-controller;
298 #interrupt-cells = <1>; 298 #interrupt-cells = <1>;
299 interrupt-parent = <&tb10x_ictl>; 299 interrupt-parent = <&tb10x_ictl>;
300 interrupts = <27 2>; 300 interrupts = <27 2>;
301 reg = <0xFF14A000 0x1000>; 301 reg = <0xff14a000 0x1000>;
302 gpio-controller; 302 gpio-controller;
303 #gpio-cells = <2>; 303 #gpio-cells = <2>;
304 abilis,ngpio = <22>; 304 abilis,ngpio = <22>;
305 gpio-ranges = <&iomux 0 0 0>; 305 gpio-ranges = <&iomux 0 0 0>;
306 gpio-ranges-group-names = "gpiok"; 306 gpio-ranges-group-names = "gpiok";
307 }; 307 };
308 gpiol: gpio@FF14b000 { 308 gpiol: gpio@ff14b000 {
309 compatible = "abilis,tb10x-gpio"; 309 compatible = "abilis,tb10x-gpio";
310 interrupt-controller; 310 interrupt-controller;
311 #interrupt-cells = <1>; 311 #interrupt-cells = <1>;
312 interrupt-parent = <&tb10x_ictl>; 312 interrupt-parent = <&tb10x_ictl>;
313 interrupts = <27 2>; 313 interrupts = <27 2>;
314 reg = <0xFF14B000 0x1000>; 314 reg = <0xff14b000 0x1000>;
315 gpio-controller; 315 gpio-controller;
316 #gpio-cells = <2>; 316 #gpio-cells = <2>;
317 abilis,ngpio = <4>; 317 abilis,ngpio = <4>;
318 gpio-ranges = <&iomux 0 0 0>; 318 gpio-ranges = <&iomux 0 0 0>;
319 gpio-ranges-group-names = "gpiol"; 319 gpio-ranges-group-names = "gpiol";
320 }; 320 };
321 gpiom: gpio@FF14c000 { 321 gpiom: gpio@ff14c000 {
322 compatible = "abilis,tb10x-gpio"; 322 compatible = "abilis,tb10x-gpio";
323 interrupt-controller; 323 interrupt-controller;
324 #interrupt-cells = <1>; 324 #interrupt-cells = <1>;
325 interrupt-parent = <&tb10x_ictl>; 325 interrupt-parent = <&tb10x_ictl>;
326 interrupts = <27 2>; 326 interrupts = <27 2>;
327 reg = <0xFF14C000 0x1000>; 327 reg = <0xff14c000 0x1000>;
328 gpio-controller; 328 gpio-controller;
329 #gpio-cells = <2>; 329 #gpio-cells = <2>;
330 abilis,ngpio = <4>; 330 abilis,ngpio = <4>;
331 gpio-ranges = <&iomux 0 0 0>; 331 gpio-ranges = <&iomux 0 0 0>;
332 gpio-ranges-group-names = "gpiom"; 332 gpio-ranges-group-names = "gpiom";
333 }; 333 };
334 gpion: gpio@FF14d000 { 334 gpion: gpio@ff14d000 {
335 compatible = "abilis,tb10x-gpio"; 335 compatible = "abilis,tb10x-gpio";
336 interrupt-controller; 336 interrupt-controller;
337 #interrupt-cells = <1>; 337 #interrupt-cells = <1>;
338 interrupt-parent = <&tb10x_ictl>; 338 interrupt-parent = <&tb10x_ictl>;
339 interrupts = <27 2>; 339 interrupts = <27 2>;
340 reg = <0xFF14D000 0x1000>; 340 reg = <0xff14d000 0x1000>;
341 gpio-controller; 341 gpio-controller;
342 #gpio-cells = <2>; 342 #gpio-cells = <2>;
343 abilis,ngpio = <5>; 343 abilis,ngpio = <5>;
diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts
index 3acf04db8030..c968e677db46 100644
--- a/arch/arc/boot/dts/abilis_tb100_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts
@@ -37,27 +37,27 @@
37 }; 37 };
38 38
39 soc100 { 39 soc100 {
40 uart@FF100000 { 40 uart@ff100000 {
41 pinctrl-names = "default"; 41 pinctrl-names = "default";
42 pinctrl-0 = <&pctl_uart0>; 42 pinctrl-0 = <&pctl_uart0>;
43 }; 43 };
44 ethernet@FE100000 { 44 ethernet@fe100000 {
45 phy-mode = "rgmii"; 45 phy-mode = "rgmii";
46 }; 46 };
47 47
48 i2c0: i2c@FF120000 { 48 i2c0: i2c@ff120000 {
49 i2c-sda-hold-time-ns = <432>; 49 i2c-sda-hold-time-ns = <432>;
50 }; 50 };
51 i2c1: i2c@FF121000 { 51 i2c1: i2c@ff121000 {
52 i2c-sda-hold-time-ns = <432>; 52 i2c-sda-hold-time-ns = <432>;
53 }; 53 };
54 i2c2: i2c@FF122000 { 54 i2c2: i2c@ff122000 {
55 i2c-sda-hold-time-ns = <432>; 55 i2c-sda-hold-time-ns = <432>;
56 }; 56 };
57 i2c3: i2c@FF123000 { 57 i2c3: i2c@ff123000 {
58 i2c-sda-hold-time-ns = <432>; 58 i2c-sda-hold-time-ns = <432>;
59 }; 59 };
60 i2c4: i2c@FF124000 { 60 i2c4: i2c@ff124000 {
61 i2c-sda-hold-time-ns = <432>; 61 i2c-sda-hold-time-ns = <432>;
62 }; 62 };
63 63
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi
index f9e7686044eb..6a1615f58f05 100644
--- a/arch/arc/boot/dts/abilis_tb101.dtsi
+++ b/arch/arc/boot/dts/abilis_tb101.dtsi
@@ -38,7 +38,7 @@
38 clock-div = <6>; 38 clock-div = <6>;
39 }; 39 };
40 40
41 iomux: iomux@FF10601c { 41 iomux: iomux@ff10601c {
42 /* Port 1 */ 42 /* Port 1 */
43 pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ 43 pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */
44 abilis,function = "mis0"; 44 abilis,function = "mis0";
@@ -171,182 +171,182 @@
171 }; 171 };
172 }; 172 };
173 173
174 gpioa: gpio@FF140000 { 174 gpioa: gpio@ff140000 {
175 compatible = "abilis,tb10x-gpio"; 175 compatible = "abilis,tb10x-gpio";
176 interrupt-controller; 176 interrupt-controller;
177 #interrupt-cells = <1>; 177 #interrupt-cells = <1>;
178 interrupt-parent = <&tb10x_ictl>; 178 interrupt-parent = <&tb10x_ictl>;
179 interrupts = <27 2>; 179 interrupts = <27 2>;
180 reg = <0xFF140000 0x1000>; 180 reg = <0xff140000 0x1000>;
181 gpio-controller; 181 gpio-controller;
182 #gpio-cells = <2>; 182 #gpio-cells = <2>;
183 abilis,ngpio = <3>; 183 abilis,ngpio = <3>;
184 gpio-ranges = <&iomux 0 0 0>; 184 gpio-ranges = <&iomux 0 0 0>;
185 gpio-ranges-group-names = "gpioa"; 185 gpio-ranges-group-names = "gpioa";
186 }; 186 };
187 gpiob: gpio@FF141000 { 187 gpiob: gpio@ff141000 {
188 compatible = "abilis,tb10x-gpio"; 188 compatible = "abilis,tb10x-gpio";
189 interrupt-controller; 189 interrupt-controller;
190 #interrupt-cells = <1>; 190 #interrupt-cells = <1>;
191 interrupt-parent = <&tb10x_ictl>; 191 interrupt-parent = <&tb10x_ictl>;
192 interrupts = <27 2>; 192 interrupts = <27 2>;
193 reg = <0xFF141000 0x1000>; 193 reg = <0xff141000 0x1000>;
194 gpio-controller; 194 gpio-controller;
195 #gpio-cells = <2>; 195 #gpio-cells = <2>;
196 abilis,ngpio = <2>; 196 abilis,ngpio = <2>;
197 gpio-ranges = <&iomux 0 0 0>; 197 gpio-ranges = <&iomux 0 0 0>;
198 gpio-ranges-group-names = "gpiob"; 198 gpio-ranges-group-names = "gpiob";
199 }; 199 };
200 gpioc: gpio@FF142000 { 200 gpioc: gpio@ff142000 {
201 compatible = "abilis,tb10x-gpio"; 201 compatible = "abilis,tb10x-gpio";
202 interrupt-controller; 202 interrupt-controller;
203 #interrupt-cells = <1>; 203 #interrupt-cells = <1>;
204 interrupt-parent = <&tb10x_ictl>; 204 interrupt-parent = <&tb10x_ictl>;
205 interrupts = <27 2>; 205 interrupts = <27 2>;
206 reg = <0xFF142000 0x1000>; 206 reg = <0xff142000 0x1000>;
207 gpio-controller; 207 gpio-controller;
208 #gpio-cells = <2>; 208 #gpio-cells = <2>;
209 abilis,ngpio = <3>; 209 abilis,ngpio = <3>;
210 gpio-ranges = <&iomux 0 0 0>; 210 gpio-ranges = <&iomux 0 0 0>;
211 gpio-ranges-group-names = "gpioc"; 211 gpio-ranges-group-names = "gpioc";
212 }; 212 };
213 gpiod: gpio@FF143000 { 213 gpiod: gpio@ff143000 {
214 compatible = "abilis,tb10x-gpio"; 214 compatible = "abilis,tb10x-gpio";
215 interrupt-controller; 215 interrupt-controller;
216 #interrupt-cells = <1>; 216 #interrupt-cells = <1>;
217 interrupt-parent = <&tb10x_ictl>; 217 interrupt-parent = <&tb10x_ictl>;
218 interrupts = <27 2>; 218 interrupts = <27 2>;
219 reg = <0xFF143000 0x1000>; 219 reg = <0xff143000 0x1000>;
220 gpio-controller; 220 gpio-controller;
221 #gpio-cells = <2>; 221 #gpio-cells = <2>;
222 abilis,ngpio = <2>; 222 abilis,ngpio = <2>;
223 gpio-ranges = <&iomux 0 0 0>; 223 gpio-ranges = <&iomux 0 0 0>;
224 gpio-ranges-group-names = "gpiod"; 224 gpio-ranges-group-names = "gpiod";
225 }; 225 };
226 gpioe: gpio@FF144000 { 226 gpioe: gpio@ff144000 {
227 compatible = "abilis,tb10x-gpio"; 227 compatible = "abilis,tb10x-gpio";
228 interrupt-controller; 228 interrupt-controller;
229 #interrupt-cells = <1>; 229 #interrupt-cells = <1>;
230 interrupt-parent = <&tb10x_ictl>; 230 interrupt-parent = <&tb10x_ictl>;
231 interrupts = <27 2>; 231 interrupts = <27 2>;
232 reg = <0xFF144000 0x1000>; 232 reg = <0xff144000 0x1000>;
233 gpio-controller; 233 gpio-controller;
234 #gpio-cells = <2>; 234 #gpio-cells = <2>;
235 abilis,ngpio = <3>; 235 abilis,ngpio = <3>;
236 gpio-ranges = <&iomux 0 0 0>; 236 gpio-ranges = <&iomux 0 0 0>;
237 gpio-ranges-group-names = "gpioe"; 237 gpio-ranges-group-names = "gpioe";
238 }; 238 };
239 gpiof: gpio@FF145000 { 239 gpiof: gpio@ff145000 {
240 compatible = "abilis,tb10x-gpio"; 240 compatible = "abilis,tb10x-gpio";
241 interrupt-controller; 241 interrupt-controller;
242 #interrupt-cells = <1>; 242 #interrupt-cells = <1>;
243 interrupt-parent = <&tb10x_ictl>; 243 interrupt-parent = <&tb10x_ictl>;
244 interrupts = <27 2>; 244 interrupts = <27 2>;
245 reg = <0xFF145000 0x1000>; 245 reg = <0xff145000 0x1000>;
246 gpio-controller; 246 gpio-controller;
247 #gpio-cells = <2>; 247 #gpio-cells = <2>;
248 abilis,ngpio = <2>; 248 abilis,ngpio = <2>;
249 gpio-ranges = <&iomux 0 0 0>; 249 gpio-ranges = <&iomux 0 0 0>;
250 gpio-ranges-group-names = "gpiof"; 250 gpio-ranges-group-names = "gpiof";
251 }; 251 };
252 gpiog: gpio@FF146000 { 252 gpiog: gpio@ff146000 {
253 compatible = "abilis,tb10x-gpio"; 253 compatible = "abilis,tb10x-gpio";
254 interrupt-controller; 254 interrupt-controller;
255 #interrupt-cells = <1>; 255 #interrupt-cells = <1>;
256 interrupt-parent = <&tb10x_ictl>; 256 interrupt-parent = <&tb10x_ictl>;
257 interrupts = <27 2>; 257 interrupts = <27 2>;
258 reg = <0xFF146000 0x1000>; 258 reg = <0xff146000 0x1000>;
259 gpio-controller; 259 gpio-controller;
260 #gpio-cells = <2>; 260 #gpio-cells = <2>;
261 abilis,ngpio = <3>; 261 abilis,ngpio = <3>;
262 gpio-ranges = <&iomux 0 0 0>; 262 gpio-ranges = <&iomux 0 0 0>;
263 gpio-ranges-group-names = "gpiog"; 263 gpio-ranges-group-names = "gpiog";
264 }; 264 };
265 gpioh: gpio@FF147000 { 265 gpioh: gpio@ff147000 {
266 compatible = "abilis,tb10x-gpio"; 266 compatible = "abilis,tb10x-gpio";
267 interrupt-controller; 267 interrupt-controller;
268 #interrupt-cells = <1>; 268 #interrupt-cells = <1>;
269 interrupt-parent = <&tb10x_ictl>; 269 interrupt-parent = <&tb10x_ictl>;
270 interrupts = <27 2>; 270 interrupts = <27 2>;
271 reg = <0xFF147000 0x1000>; 271 reg = <0xff147000 0x1000>;
272 gpio-controller; 272 gpio-controller;
273 #gpio-cells = <2>; 273 #gpio-cells = <2>;
274 abilis,ngpio = <2>; 274 abilis,ngpio = <2>;
275 gpio-ranges = <&iomux 0 0 0>; 275 gpio-ranges = <&iomux 0 0 0>;
276 gpio-ranges-group-names = "gpioh"; 276 gpio-ranges-group-names = "gpioh";
277 }; 277 };
278 gpioi: gpio@FF148000 { 278 gpioi: gpio@ff148000 {
279 compatible = "abilis,tb10x-gpio"; 279 compatible = "abilis,tb10x-gpio";
280 interrupt-controller; 280 interrupt-controller;
281 #interrupt-cells = <1>; 281 #interrupt-cells = <1>;
282 interrupt-parent = <&tb10x_ictl>; 282 interrupt-parent = <&tb10x_ictl>;
283 interrupts = <27 2>; 283 interrupts = <27 2>;
284 reg = <0xFF148000 0x1000>; 284 reg = <0xff148000 0x1000>;
285 gpio-controller; 285 gpio-controller;
286 #gpio-cells = <2>; 286 #gpio-cells = <2>;
287 abilis,ngpio = <12>; 287 abilis,ngpio = <12>;
288 gpio-ranges = <&iomux 0 0 0>; 288 gpio-ranges = <&iomux 0 0 0>;
289 gpio-ranges-group-names = "gpioi"; 289 gpio-ranges-group-names = "gpioi";
290 }; 290 };
291 gpioj: gpio@FF149000 { 291 gpioj: gpio@ff149000 {
292 compatible = "abilis,tb10x-gpio"; 292 compatible = "abilis,tb10x-gpio";
293 interrupt-controller; 293 interrupt-controller;
294 #interrupt-cells = <1>; 294 #interrupt-cells = <1>;
295 interrupt-parent = <&tb10x_ictl>; 295 interrupt-parent = <&tb10x_ictl>;
296 interrupts = <27 2>; 296 interrupts = <27 2>;
297 reg = <0xFF149000 0x1000>; 297 reg = <0xff149000 0x1000>;
298 gpio-controller; 298 gpio-controller;
299 #gpio-cells = <2>; 299 #gpio-cells = <2>;
300 abilis,ngpio = <32>; 300 abilis,ngpio = <32>;
301 gpio-ranges = <&iomux 0 0 0>; 301 gpio-ranges = <&iomux 0 0 0>;
302 gpio-ranges-group-names = "gpioj"; 302 gpio-ranges-group-names = "gpioj";
303 }; 303 };
304 gpiok: gpio@FF14a000 { 304 gpiok: gpio@ff14a000 {
305 compatible = "abilis,tb10x-gpio"; 305 compatible = "abilis,tb10x-gpio";
306 interrupt-controller; 306 interrupt-controller;
307 #interrupt-cells = <1>; 307 #interrupt-cells = <1>;
308 interrupt-parent = <&tb10x_ictl>; 308 interrupt-parent = <&tb10x_ictl>;
309 interrupts = <27 2>; 309 interrupts = <27 2>;
310 reg = <0xFF14A000 0x1000>; 310 reg = <0xff14a000 0x1000>;
311 gpio-controller; 311 gpio-controller;
312 #gpio-cells = <2>; 312 #gpio-cells = <2>;
313 abilis,ngpio = <22>; 313 abilis,ngpio = <22>;
314 gpio-ranges = <&iomux 0 0 0>; 314 gpio-ranges = <&iomux 0 0 0>;
315 gpio-ranges-group-names = "gpiok"; 315 gpio-ranges-group-names = "gpiok";
316 }; 316 };
317 gpiol: gpio@FF14b000 { 317 gpiol: gpio@ff14b000 {
318 compatible = "abilis,tb10x-gpio"; 318 compatible = "abilis,tb10x-gpio";
319 interrupt-controller; 319 interrupt-controller;
320 #interrupt-cells = <1>; 320 #interrupt-cells = <1>;
321 interrupt-parent = <&tb10x_ictl>; 321 interrupt-parent = <&tb10x_ictl>;
322 interrupts = <27 2>; 322 interrupts = <27 2>;
323 reg = <0xFF14B000 0x1000>; 323 reg = <0xff14b000 0x1000>;
324 gpio-controller; 324 gpio-controller;
325 #gpio-cells = <2>; 325 #gpio-cells = <2>;
326 abilis,ngpio = <4>; 326 abilis,ngpio = <4>;
327 gpio-ranges = <&iomux 0 0 0>; 327 gpio-ranges = <&iomux 0 0 0>;
328 gpio-ranges-group-names = "gpiol"; 328 gpio-ranges-group-names = "gpiol";
329 }; 329 };
330 gpiom: gpio@FF14c000 { 330 gpiom: gpio@ff14c000 {
331 compatible = "abilis,tb10x-gpio"; 331 compatible = "abilis,tb10x-gpio";
332 interrupt-controller; 332 interrupt-controller;
333 #interrupt-cells = <1>; 333 #interrupt-cells = <1>;
334 interrupt-parent = <&tb10x_ictl>; 334 interrupt-parent = <&tb10x_ictl>;
335 interrupts = <27 2>; 335 interrupts = <27 2>;
336 reg = <0xFF14C000 0x1000>; 336 reg = <0xff14c000 0x1000>;
337 gpio-controller; 337 gpio-controller;
338 #gpio-cells = <2>; 338 #gpio-cells = <2>;
339 abilis,ngpio = <4>; 339 abilis,ngpio = <4>;
340 gpio-ranges = <&iomux 0 0 0>; 340 gpio-ranges = <&iomux 0 0 0>;
341 gpio-ranges-group-names = "gpiom"; 341 gpio-ranges-group-names = "gpiom";
342 }; 342 };
343 gpion: gpio@FF14d000 { 343 gpion: gpio@ff14d000 {
344 compatible = "abilis,tb10x-gpio"; 344 compatible = "abilis,tb10x-gpio";
345 interrupt-controller; 345 interrupt-controller;
346 #interrupt-cells = <1>; 346 #interrupt-cells = <1>;
347 interrupt-parent = <&tb10x_ictl>; 347 interrupt-parent = <&tb10x_ictl>;
348 interrupts = <27 2>; 348 interrupts = <27 2>;
349 reg = <0xFF14D000 0x1000>; 349 reg = <0xff14d000 0x1000>;
350 gpio-controller; 350 gpio-controller;
351 #gpio-cells = <2>; 351 #gpio-cells = <2>;
352 abilis,ngpio = <5>; 352 abilis,ngpio = <5>;
diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts
index 37d88c5dd181..05143ce9c120 100644
--- a/arch/arc/boot/dts/abilis_tb101_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts
@@ -37,27 +37,27 @@
37 }; 37 };
38 38
39 soc100 { 39 soc100 {
40 uart@FF100000 { 40 uart@ff100000 {
41 pinctrl-names = "default"; 41 pinctrl-names = "default";
42 pinctrl-0 = <&pctl_uart0>; 42 pinctrl-0 = <&pctl_uart0>;
43 }; 43 };
44 ethernet@FE100000 { 44 ethernet@fe100000 {
45 phy-mode = "rgmii"; 45 phy-mode = "rgmii";
46 }; 46 };
47 47
48 i2c0: i2c@FF120000 { 48 i2c0: i2c@ff120000 {
49 i2c-sda-hold-time-ns = <432>; 49 i2c-sda-hold-time-ns = <432>;
50 }; 50 };
51 i2c1: i2c@FF121000 { 51 i2c1: i2c@ff121000 {
52 i2c-sda-hold-time-ns = <432>; 52 i2c-sda-hold-time-ns = <432>;
53 }; 53 };
54 i2c2: i2c@FF122000 { 54 i2c2: i2c@ff122000 {
55 i2c-sda-hold-time-ns = <432>; 55 i2c-sda-hold-time-ns = <432>;
56 }; 56 };
57 i2c3: i2c@FF123000 { 57 i2c3: i2c@ff123000 {
58 i2c-sda-hold-time-ns = <432>; 58 i2c-sda-hold-time-ns = <432>;
59 }; 59 };
60 i2c4: i2c@FF124000 { 60 i2c4: i2c@ff124000 {
61 i2c-sda-hold-time-ns = <432>; 61 i2c-sda-hold-time-ns = <432>;
62 }; 62 };
63 63
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index 3121536b25a3..2fbf1bdfe6de 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -54,7 +54,7 @@
54 #size-cells = <1>; 54 #size-cells = <1>;
55 device_type = "soc"; 55 device_type = "soc";
56 ranges = <0xfe000000 0xfe000000 0x02000000 56 ranges = <0xfe000000 0xfe000000 0x02000000
57 0x000F0000 0x000F0000 0x00010000>; 57 0x000f0000 0x000f0000 0x00010000>;
58 compatible = "abilis,tb10x", "simple-bus"; 58 compatible = "abilis,tb10x", "simple-bus";
59 59
60 pll0: oscillator { 60 pll0: oscillator {
@@ -75,10 +75,10 @@
75 clock-output-names = "ahb_clk"; 75 clock-output-names = "ahb_clk";
76 }; 76 };
77 77
78 iomux: iomux@FF10601c { 78 iomux: iomux@ff10601c {
79 compatible = "abilis,tb10x-iomux"; 79 compatible = "abilis,tb10x-iomux";
80 #gpio-range-cells = <3>; 80 #gpio-range-cells = <3>;
81 reg = <0xFF10601c 0x4>; 81 reg = <0xff10601c 0x4>;
82 }; 82 };
83 83
84 intc: interrupt-controller { 84 intc: interrupt-controller {
@@ -88,7 +88,7 @@
88 }; 88 };
89 tb10x_ictl: pic@fe002000 { 89 tb10x_ictl: pic@fe002000 {
90 compatible = "abilis,tb10x-ictl"; 90 compatible = "abilis,tb10x-ictl";
91 reg = <0xFE002000 0x20>; 91 reg = <0xfe002000 0x20>;
92 interrupt-controller; 92 interrupt-controller;
93 #interrupt-cells = <2>; 93 #interrupt-cells = <2>;
94 interrupt-parent = <&intc>; 94 interrupt-parent = <&intc>;
@@ -96,27 +96,27 @@
96 20 21 22 23 24 25 26 27 28 29 30 31>; 96 20 21 22 23 24 25 26 27 28 29 30 31>;
97 }; 97 };
98 98
99 uart@FF100000 { 99 uart@ff100000 {
100 compatible = "snps,dw-apb-uart"; 100 compatible = "snps,dw-apb-uart";
101 reg = <0xFF100000 0x100>; 101 reg = <0xff100000 0x100>;
102 clock-frequency = <166666666>; 102 clock-frequency = <166666666>;
103 interrupts = <25 8>; 103 interrupts = <25 8>;
104 reg-shift = <2>; 104 reg-shift = <2>;
105 reg-io-width = <4>; 105 reg-io-width = <4>;
106 interrupt-parent = <&tb10x_ictl>; 106 interrupt-parent = <&tb10x_ictl>;
107 }; 107 };
108 ethernet@FE100000 { 108 ethernet@fe100000 {
109 compatible = "snps,dwmac-3.70a","snps,dwmac"; 109 compatible = "snps,dwmac-3.70a","snps,dwmac";
110 reg = <0xFE100000 0x1058>; 110 reg = <0xfe100000 0x1058>;
111 interrupt-parent = <&tb10x_ictl>; 111 interrupt-parent = <&tb10x_ictl>;
112 interrupts = <6 8>; 112 interrupts = <6 8>;
113 interrupt-names = "macirq"; 113 interrupt-names = "macirq";
114 clocks = <&ahb_clk>; 114 clocks = <&ahb_clk>;
115 clock-names = "stmmaceth"; 115 clock-names = "stmmaceth";
116 }; 116 };
117 dma@FE000000 { 117 dma@fe000000 {
118 compatible = "snps,dma-spear1340"; 118 compatible = "snps,dma-spear1340";
119 reg = <0xFE000000 0x400>; 119 reg = <0xfe000000 0x400>;
120 interrupt-parent = <&tb10x_ictl>; 120 interrupt-parent = <&tb10x_ictl>;
121 interrupts = <14 8>; 121 interrupts = <14 8>;
122 dma-channels = <6>; 122 dma-channels = <6>;
@@ -132,70 +132,70 @@
132 multi-block = <1 1 1 1 1 1>; 132 multi-block = <1 1 1 1 1 1>;
133 }; 133 };
134 134
135 i2c0: i2c@FF120000 { 135 i2c0: i2c@ff120000 {
136 #address-cells = <1>; 136 #address-cells = <1>;
137 #size-cells = <0>; 137 #size-cells = <0>;
138 compatible = "snps,designware-i2c"; 138 compatible = "snps,designware-i2c";
139 reg = <0xFF120000 0x1000>; 139 reg = <0xff120000 0x1000>;
140 interrupt-parent = <&tb10x_ictl>; 140 interrupt-parent = <&tb10x_ictl>;
141 interrupts = <12 8>; 141 interrupts = <12 8>;
142 clocks = <&ahb_clk>; 142 clocks = <&ahb_clk>;
143 }; 143 };
144 i2c1: i2c@FF121000 { 144 i2c1: i2c@ff121000 {
145 #address-cells = <1>; 145 #address-cells = <1>;
146 #size-cells = <0>; 146 #size-cells = <0>;
147 compatible = "snps,designware-i2c"; 147 compatible = "snps,designware-i2c";
148 reg = <0xFF121000 0x1000>; 148 reg = <0xff121000 0x1000>;
149 interrupt-parent = <&tb10x_ictl>; 149 interrupt-parent = <&tb10x_ictl>;
150 interrupts = <12 8>; 150 interrupts = <12 8>;
151 clocks = <&ahb_clk>; 151 clocks = <&ahb_clk>;
152 }; 152 };
153 i2c2: i2c@FF122000 { 153 i2c2: i2c@ff122000 {
154 #address-cells = <1>; 154 #address-cells = <1>;
155 #size-cells = <0>; 155 #size-cells = <0>;
156 compatible = "snps,designware-i2c"; 156 compatible = "snps,designware-i2c";
157 reg = <0xFF122000 0x1000>; 157 reg = <0xff122000 0x1000>;
158 interrupt-parent = <&tb10x_ictl>; 158 interrupt-parent = <&tb10x_ictl>;
159 interrupts = <12 8>; 159 interrupts = <12 8>;
160 clocks = <&ahb_clk>; 160 clocks = <&ahb_clk>;
161 }; 161 };
162 i2c3: i2c@FF123000 { 162 i2c3: i2c@ff123000 {
163 #address-cells = <1>; 163 #address-cells = <1>;
164 #size-cells = <0>; 164 #size-cells = <0>;
165 compatible = "snps,designware-i2c"; 165 compatible = "snps,designware-i2c";
166 reg = <0xFF123000 0x1000>; 166 reg = <0xff123000 0x1000>;
167 interrupt-parent = <&tb10x_ictl>; 167 interrupt-parent = <&tb10x_ictl>;
168 interrupts = <12 8>; 168 interrupts = <12 8>;
169 clocks = <&ahb_clk>; 169 clocks = <&ahb_clk>;
170 }; 170 };
171 i2c4: i2c@FF124000 { 171 i2c4: i2c@ff124000 {
172 #address-cells = <1>; 172 #address-cells = <1>;
173 #size-cells = <0>; 173 #size-cells = <0>;
174 compatible = "snps,designware-i2c"; 174 compatible = "snps,designware-i2c";
175 reg = <0xFF124000 0x1000>; 175 reg = <0xff124000 0x1000>;
176 interrupt-parent = <&tb10x_ictl>; 176 interrupt-parent = <&tb10x_ictl>;
177 interrupts = <12 8>; 177 interrupts = <12 8>;
178 clocks = <&ahb_clk>; 178 clocks = <&ahb_clk>;
179 }; 179 };
180 180
181 spi0: spi@0xFE010000 { 181 spi0: spi@fe010000 {
182 #address-cells = <1>; 182 #address-cells = <1>;
183 #size-cells = <0>; 183 #size-cells = <0>;
184 cell-index = <0>; 184 cell-index = <0>;
185 compatible = "abilis,tb100-spi"; 185 compatible = "abilis,tb100-spi";
186 num-cs = <1>; 186 num-cs = <1>;
187 reg = <0xFE010000 0x20>; 187 reg = <0xfe010000 0x20>;
188 interrupt-parent = <&tb10x_ictl>; 188 interrupt-parent = <&tb10x_ictl>;
189 interrupts = <26 8>; 189 interrupts = <26 8>;
190 clocks = <&ahb_clk>; 190 clocks = <&ahb_clk>;
191 }; 191 };
192 spi1: spi@0xFE011000 { 192 spi1: spi@fe011000 {
193 #address-cells = <1>; 193 #address-cells = <1>;
194 #size-cells = <0>; 194 #size-cells = <0>;
195 cell-index = <1>; 195 cell-index = <1>;
196 compatible = "abilis,tb100-spi"; 196 compatible = "abilis,tb100-spi";
197 num-cs = <2>; 197 num-cs = <2>;
198 reg = <0xFE011000 0x20>; 198 reg = <0xfe011000 0x20>;
199 interrupt-parent = <&tb10x_ictl>; 199 interrupt-parent = <&tb10x_ictl>;
200 interrupts = <10 8>; 200 interrupts = <10 8>;
201 clocks = <&ahb_clk>; 201 clocks = <&ahb_clk>;
@@ -226,23 +226,23 @@
226 interrupts = <20 2>, <19 2>; 226 interrupts = <20 2>, <19 2>;
227 interrupt-names = "cmd_irq", "event_irq"; 227 interrupt-names = "cmd_irq", "event_irq";
228 }; 228 };
229 tb10x_mdsc0: tb10x-mdscr@FF300000 { 229 tb10x_mdsc0: tb10x-mdscr@ff300000 {
230 compatible = "abilis,tb100-mdscr"; 230 compatible = "abilis,tb100-mdscr";
231 reg = <0xFF300000 0x7000>; 231 reg = <0xff300000 0x7000>;
232 tb100-mdscr-manage-tsin; 232 tb100-mdscr-manage-tsin;
233 }; 233 };
234 tb10x_mscr0: tb10x-mdscr@FF307000 { 234 tb10x_mscr0: tb10x-mdscr@ff307000 {
235 compatible = "abilis,tb100-mdscr"; 235 compatible = "abilis,tb100-mdscr";
236 reg = <0xFF307000 0x7000>; 236 reg = <0xff307000 0x7000>;
237 }; 237 };
238 tb10x_scr0: tb10x-mdscr@ff30e000 { 238 tb10x_scr0: tb10x-mdscr@ff30e000 {
239 compatible = "abilis,tb100-mdscr"; 239 compatible = "abilis,tb100-mdscr";
240 reg = <0xFF30e000 0x4000>; 240 reg = <0xff30e000 0x4000>;
241 tb100-mdscr-manage-tsin; 241 tb100-mdscr-manage-tsin;
242 }; 242 };
243 tb10x_scr1: tb10x-mdscr@ff312000 { 243 tb10x_scr1: tb10x-mdscr@ff312000 {
244 compatible = "abilis,tb100-mdscr"; 244 compatible = "abilis,tb100-mdscr";
245 reg = <0xFF312000 0x4000>; 245 reg = <0xff312000 0x4000>;
246 tb100-mdscr-manage-tsin; 246 tb100-mdscr-manage-tsin;
247 }; 247 };
248 tb10x_wfb: tb10x-wfb@ff319000 { 248 tb10x_wfb: tb10x-wfb@ff319000 {
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi
index fdc266504ada..37be3bf03ad6 100644
--- a/arch/arc/boot/dts/axc001.dtsi
+++ b/arch/arc/boot/dts/axc001.dtsi
@@ -41,7 +41,7 @@
41 * this GPIO block ORs all interrupts on CPU card (creg,..) 41 * this GPIO block ORs all interrupts on CPU card (creg,..)
42 * to uplink only 1 IRQ to ARC core intc 42 * to uplink only 1 IRQ to ARC core intc
43 */ 43 */
44 dw-apb-gpio@0x2000 { 44 dw-apb-gpio@2000 {
45 compatible = "snps,dw-apb-gpio"; 45 compatible = "snps,dw-apb-gpio";
46 reg = < 0x2000 0x80 >; 46 reg = < 0x2000 0x80 >;
47 #address-cells = <1>; 47 #address-cells = <1>;
@@ -60,7 +60,7 @@
60 }; 60 };
61 }; 61 };
62 62
63 debug_uart: dw-apb-uart@0x5000 { 63 debug_uart: dw-apb-uart@5000 {
64 compatible = "snps,dw-apb-uart"; 64 compatible = "snps,dw-apb-uart";
65 reg = <0x5000 0x100>; 65 reg = <0x5000 0x100>;
66 clock-frequency = <33333000>; 66 clock-frequency = <33333000>;
@@ -88,7 +88,7 @@
88 * avoid duplicating the MB dtsi file given that IRQ from 88 * avoid duplicating the MB dtsi file given that IRQ from
89 * this intc to cpu intc are different for axs101 and axs103 89 * this intc to cpu intc are different for axs101 and axs103
90 */ 90 */
91 mb_intc: dw-apb-ictl@0xe0012000 { 91 mb_intc: dw-apb-ictl@e0012000 {
92 #interrupt-cells = <1>; 92 #interrupt-cells = <1>;
93 compatible = "snps,dw-apb-ictl"; 93 compatible = "snps,dw-apb-ictl";
94 reg = < 0x0 0xe0012000 0x0 0x200 >; 94 reg = < 0x0 0xe0012000 0x0 0x200 >;
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index d75d65ddf8e3..effa37536d7a 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -55,7 +55,7 @@
55 * this GPIO block ORs all interrupts on CPU card (creg,..) 55 * this GPIO block ORs all interrupts on CPU card (creg,..)
56 * to uplink only 1 IRQ to ARC core intc 56 * to uplink only 1 IRQ to ARC core intc
57 */ 57 */
58 dw-apb-gpio@0x2000 { 58 dw-apb-gpio@2000 {
59 compatible = "snps,dw-apb-gpio"; 59 compatible = "snps,dw-apb-gpio";
60 reg = < 0x2000 0x80 >; 60 reg = < 0x2000 0x80 >;
61 #address-cells = <1>; 61 #address-cells = <1>;
@@ -74,7 +74,7 @@
74 }; 74 };
75 }; 75 };
76 76
77 debug_uart: dw-apb-uart@0x5000 { 77 debug_uart: dw-apb-uart@5000 {
78 compatible = "snps,dw-apb-uart"; 78 compatible = "snps,dw-apb-uart";
79 reg = <0x5000 0x100>; 79 reg = <0x5000 0x100>;
80 clock-frequency = <33333000>; 80 clock-frequency = <33333000>;
@@ -102,19 +102,19 @@
102 * external DMA buffer located outside of IOC aperture. 102 * external DMA buffer located outside of IOC aperture.
103 */ 103 */
104 axs10x_mb { 104 axs10x_mb {
105 ethernet@0x18000 { 105 ethernet@18000 {
106 dma-coherent; 106 dma-coherent;
107 }; 107 };
108 108
109 ehci@0x40000 { 109 ehci@40000 {
110 dma-coherent; 110 dma-coherent;
111 }; 111 };
112 112
113 ohci@0x60000 { 113 ohci@60000 {
114 dma-coherent; 114 dma-coherent;
115 }; 115 };
116 116
117 mmc@0x15000 { 117 mmc@15000 {
118 dma-coherent; 118 dma-coherent;
119 }; 119 };
120 }; 120 };
@@ -132,7 +132,7 @@
132 * avoid duplicating the MB dtsi file given that IRQ from 132 * avoid duplicating the MB dtsi file given that IRQ from
133 * this intc to cpu intc are different for axs101 and axs103 133 * this intc to cpu intc are different for axs101 and axs103
134 */ 134 */
135 mb_intc: dw-apb-ictl@0xe0012000 { 135 mb_intc: dw-apb-ictl@e0012000 {
136 #interrupt-cells = <1>; 136 #interrupt-cells = <1>;
137 compatible = "snps,dw-apb-ictl"; 137 compatible = "snps,dw-apb-ictl";
138 reg = < 0x0 0xe0012000 0x0 0x200 >; 138 reg = < 0x0 0xe0012000 0x0 0x200 >;
@@ -153,7 +153,7 @@
153 #size-cells = <2>; 153 #size-cells = <2>;
154 ranges; 154 ranges;
155 /* 155 /*
156 * Move frame buffer out of IOC aperture (0x8z-0xAz). 156 * Move frame buffer out of IOC aperture (0x8z-0xaz).
157 */ 157 */
158 frame_buffer: frame_buffer@be000000 { 158 frame_buffer: frame_buffer@be000000 {
159 compatible = "shared-dma-pool"; 159 compatible = "shared-dma-pool";
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi
index a05bb737ea63..e401e59f6180 100644
--- a/arch/arc/boot/dts/axc003_idu.dtsi
+++ b/arch/arc/boot/dts/axc003_idu.dtsi
@@ -62,7 +62,7 @@
62 * this GPIO block ORs all interrupts on CPU card (creg,..) 62 * this GPIO block ORs all interrupts on CPU card (creg,..)
63 * to uplink only 1 IRQ to ARC core intc 63 * to uplink only 1 IRQ to ARC core intc
64 */ 64 */
65 dw-apb-gpio@0x2000 { 65 dw-apb-gpio@2000 {
66 compatible = "snps,dw-apb-gpio"; 66 compatible = "snps,dw-apb-gpio";
67 reg = < 0x2000 0x80 >; 67 reg = < 0x2000 0x80 >;
68 #address-cells = <1>; 68 #address-cells = <1>;
@@ -81,7 +81,7 @@
81 }; 81 };
82 }; 82 };
83 83
84 debug_uart: dw-apb-uart@0x5000 { 84 debug_uart: dw-apb-uart@5000 {
85 compatible = "snps,dw-apb-uart"; 85 compatible = "snps,dw-apb-uart";
86 reg = <0x5000 0x100>; 86 reg = <0x5000 0x100>;
87 clock-frequency = <33333000>; 87 clock-frequency = <33333000>;
@@ -109,19 +109,19 @@
109 * external DMA buffer located outside of IOC aperture. 109 * external DMA buffer located outside of IOC aperture.
110 */ 110 */
111 axs10x_mb { 111 axs10x_mb {
112 ethernet@0x18000 { 112 ethernet@18000 {
113 dma-coherent; 113 dma-coherent;
114 }; 114 };
115 115
116 ehci@0x40000 { 116 ehci@40000 {
117 dma-coherent; 117 dma-coherent;
118 }; 118 };
119 119
120 ohci@0x60000 { 120 ohci@60000 {
121 dma-coherent; 121 dma-coherent;
122 }; 122 };
123 123
124 mmc@0x15000 { 124 mmc@15000 {
125 dma-coherent; 125 dma-coherent;
126 }; 126 };
127 }; 127 };
@@ -138,7 +138,7 @@
138 * avoid duplicating the MB dtsi file given that IRQ from 138 * avoid duplicating the MB dtsi file given that IRQ from
139 * this intc to cpu intc are different for axs101 and axs103 139 * this intc to cpu intc are different for axs101 and axs103
140 */ 140 */
141 mb_intc: dw-apb-ictl@0xe0012000 { 141 mb_intc: dw-apb-ictl@e0012000 {
142 #interrupt-cells = <1>; 142 #interrupt-cells = <1>;
143 compatible = "snps,dw-apb-ictl"; 143 compatible = "snps,dw-apb-ictl";
144 reg = < 0x0 0xe0012000 0x0 0x200 >; 144 reg = < 0x0 0xe0012000 0x0 0x200 >;
@@ -159,7 +159,7 @@
159 #size-cells = <2>; 159 #size-cells = <2>;
160 ranges; 160 ranges;
161 /* 161 /*
162 * Move frame buffer out of IOC aperture (0x8z-0xAz). 162 * Move frame buffer out of IOC aperture (0x8z-0xaz).
163 */ 163 */
164 frame_buffer: frame_buffer@be000000 { 164 frame_buffer: frame_buffer@be000000 {
165 compatible = "shared-dma-pool"; 165 compatible = "shared-dma-pool";
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index 37bafd44e36d..4ead6dc9af2f 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -72,7 +72,7 @@
72 }; 72 };
73 }; 73 };
74 74
75 gmac: ethernet@0x18000 { 75 gmac: ethernet@18000 {
76 #interrupt-cells = <1>; 76 #interrupt-cells = <1>;
77 compatible = "snps,dwmac"; 77 compatible = "snps,dwmac";
78 reg = < 0x18000 0x2000 >; 78 reg = < 0x18000 0x2000 >;
@@ -88,13 +88,13 @@
88 mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */ 88 mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
89 }; 89 };
90 90
91 ehci@0x40000 { 91 ehci@40000 {
92 compatible = "generic-ehci"; 92 compatible = "generic-ehci";
93 reg = < 0x40000 0x100 >; 93 reg = < 0x40000 0x100 >;
94 interrupts = < 8 >; 94 interrupts = < 8 >;
95 }; 95 };
96 96
97 ohci@0x60000 { 97 ohci@60000 {
98 compatible = "generic-ohci"; 98 compatible = "generic-ohci";
99 reg = < 0x60000 0x100 >; 99 reg = < 0x60000 0x100 >;
100 interrupts = < 8 >; 100 interrupts = < 8 >;
@@ -118,7 +118,7 @@
118 * dw_mci_pltfm_prepare_command() is used in generic platform 118 * dw_mci_pltfm_prepare_command() is used in generic platform
119 * code. 119 * code.
120 */ 120 */
121 mmc@0x15000 { 121 mmc@15000 {
122 compatible = "altr,socfpga-dw-mshc"; 122 compatible = "altr,socfpga-dw-mshc";
123 reg = < 0x15000 0x400 >; 123 reg = < 0x15000 0x400 >;
124 fifo-depth = < 16 >; 124 fifo-depth = < 16 >;
@@ -129,7 +129,7 @@
129 bus-width = < 4 >; 129 bus-width = < 4 >;
130 }; 130 };
131 131
132 uart@0x20000 { 132 uart@20000 {
133 compatible = "snps,dw-apb-uart"; 133 compatible = "snps,dw-apb-uart";
134 reg = <0x20000 0x100>; 134 reg = <0x20000 0x100>;
135 clock-frequency = <33333333>; 135 clock-frequency = <33333333>;
@@ -139,7 +139,7 @@
139 reg-io-width = <4>; 139 reg-io-width = <4>;
140 }; 140 };
141 141
142 uart@0x21000 { 142 uart@21000 {
143 compatible = "snps,dw-apb-uart"; 143 compatible = "snps,dw-apb-uart";
144 reg = <0x21000 0x100>; 144 reg = <0x21000 0x100>;
145 clock-frequency = <33333333>; 145 clock-frequency = <33333333>;
@@ -150,7 +150,7 @@
150 }; 150 };
151 151
152 /* UART muxed with USB data port (ttyS3) */ 152 /* UART muxed with USB data port (ttyS3) */
153 uart@0x22000 { 153 uart@22000 {
154 compatible = "snps,dw-apb-uart"; 154 compatible = "snps,dw-apb-uart";
155 reg = <0x22000 0x100>; 155 reg = <0x22000 0x100>;
156 clock-frequency = <33333333>; 156 clock-frequency = <33333333>;
@@ -160,7 +160,7 @@
160 reg-io-width = <4>; 160 reg-io-width = <4>;
161 }; 161 };
162 162
163 i2c@0x1d000 { 163 i2c@1d000 {
164 compatible = "snps,designware-i2c"; 164 compatible = "snps,designware-i2c";
165 reg = <0x1d000 0x100>; 165 reg = <0x1d000 0x100>;
166 clock-frequency = <400000>; 166 clock-frequency = <400000>;
@@ -177,7 +177,7 @@
177 #sound-dai-cells = <0>; 177 #sound-dai-cells = <0>;
178 }; 178 };
179 179
180 i2c@0x1f000 { 180 i2c@1f000 {
181 compatible = "snps,designware-i2c"; 181 compatible = "snps,designware-i2c";
182 #address-cells = <1>; 182 #address-cells = <1>;
183 #size-cells = <0>; 183 #size-cells = <0>;
@@ -218,13 +218,13 @@
218 }; 218 };
219 }; 219 };
220 220
221 eeprom@0x54{ 221 eeprom@54{
222 compatible = "atmel,24c01"; 222 compatible = "atmel,24c01";
223 reg = <0x54>; 223 reg = <0x54>;
224 pagesize = <0x8>; 224 pagesize = <0x8>;
225 }; 225 };
226 226
227 eeprom@0x57{ 227 eeprom@57{
228 compatible = "atmel,24c04"; 228 compatible = "atmel,24c04";
229 reg = <0x57>; 229 reg = <0x57>;
230 pagesize = <0x8>; 230 pagesize = <0x8>;
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 43f17b51ee89..69bc1c9e8e50 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -110,12 +110,12 @@
110 cgu_rst: reset-controller@8a0 { 110 cgu_rst: reset-controller@8a0 {
111 compatible = "snps,hsdk-reset"; 111 compatible = "snps,hsdk-reset";
112 #reset-cells = <1>; 112 #reset-cells = <1>;
113 reg = <0x8A0 0x4>, <0xFF0 0x4>; 113 reg = <0x8a0 0x4>, <0xff0 0x4>;
114 }; 114 };
115 115
116 core_clk: core-clk@0 { 116 core_clk: core-clk@0 {
117 compatible = "snps,hsdk-core-pll-clock"; 117 compatible = "snps,hsdk-core-pll-clock";
118 reg = <0x00 0x10>, <0x14B8 0x4>; 118 reg = <0x00 0x10>, <0x14b8 0x4>;
119 #clock-cells = <0>; 119 #clock-cells = <0>;
120 clocks = <&input_clk>; 120 clocks = <&input_clk>;
121 121
@@ -167,6 +167,18 @@
167 #clock-cells = <0>; 167 #clock-cells = <0>;
168 }; 168 };
169 169
170 dmac_core_clk: dmac-core-clk {
171 compatible = "fixed-clock";
172 clock-frequency = <400000000>;
173 #clock-cells = <0>;
174 };
175
176 dmac_cfg_clk: dmac-gpu-cfg-clk {
177 compatible = "fixed-clock";
178 clock-frequency = <200000000>;
179 #clock-cells = <0>;
180 };
181
170 gmac: ethernet@8000 { 182 gmac: ethernet@8000 {
171 #interrupt-cells = <1>; 183 #interrupt-cells = <1>;
172 compatible = "snps,dwmac"; 184 compatible = "snps,dwmac";
@@ -200,6 +212,7 @@
200 compatible = "snps,hsdk-v1.0-ohci", "generic-ohci"; 212 compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
201 reg = <0x60000 0x100>; 213 reg = <0x60000 0x100>;
202 interrupts = <15>; 214 interrupts = <15>;
215 resets = <&cgu_rst HSDK_USB_RESET>;
203 dma-coherent; 216 dma-coherent;
204 }; 217 };
205 218
@@ -207,6 +220,7 @@
207 compatible = "snps,hsdk-v1.0-ehci", "generic-ehci"; 220 compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
208 reg = <0x40000 0x100>; 221 reg = <0x40000 0x100>;
209 interrupts = <15>; 222 interrupts = <15>;
223 resets = <&cgu_rst HSDK_USB_RESET>;
210 dma-coherent; 224 dma-coherent;
211 }; 225 };
212 226
@@ -237,6 +251,21 @@
237 reg = <0>; 251 reg = <0>;
238 }; 252 };
239 }; 253 };
254
255 dmac: dmac@80000 {
256 compatible = "snps,axi-dma-1.01a";
257 reg = <0x80000 0x400>;
258 interrupts = <27>;
259 clocks = <&dmac_core_clk>, <&dmac_cfg_clk>;
260 clock-names = "core-clk", "cfgr-clk";
261
262 dma-channels = <4>;
263 snps,dma-masters = <2>;
264 snps,data-width = <3>;
265 snps,block-size = <4096 4096 4096 4096>;
266 snps,priority = <0 1 2 3>;
267 snps,axi-max-burst-len = <16>;
268 };
240 }; 269 };
241 270
242 memory@80000000 { 271 memory@80000000 {
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi
index 0fd6ba985b16..84e8766c8ca2 100644
--- a/arch/arc/boot/dts/vdk_axc003.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003.dtsi
@@ -36,7 +36,7 @@
36 #interrupt-cells = <1>; 36 #interrupt-cells = <1>;
37 }; 37 };
38 38
39 debug_uart: dw-apb-uart@0x5000 { 39 debug_uart: dw-apb-uart@5000 {
40 compatible = "snps,dw-apb-uart"; 40 compatible = "snps,dw-apb-uart";
41 reg = <0x5000 0x100>; 41 reg = <0x5000 0x100>;
42 clock-frequency = <2403200>; 42 clock-frequency = <2403200>;
@@ -49,7 +49,7 @@
49 49
50 }; 50 };
51 51
52 mb_intc: dw-apb-ictl@0xe0012000 { 52 mb_intc: dw-apb-ictl@e0012000 {
53 #interrupt-cells = <1>; 53 #interrupt-cells = <1>;
54 compatible = "snps,dw-apb-ictl"; 54 compatible = "snps,dw-apb-ictl";
55 reg = < 0xe0012000 0x200 >; 55 reg = < 0xe0012000 0x200 >;
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
index 28956f9a9f3d..eb7e705e8a27 100644
--- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi
+++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi
@@ -44,7 +44,7 @@
44 #interrupt-cells = <1>; 44 #interrupt-cells = <1>;
45 }; 45 };
46 46
47 debug_uart: dw-apb-uart@0x5000 { 47 debug_uart: dw-apb-uart@5000 {
48 compatible = "snps,dw-apb-uart"; 48 compatible = "snps,dw-apb-uart";
49 reg = <0x5000 0x100>; 49 reg = <0x5000 0x100>;
50 clock-frequency = <2403200>; 50 clock-frequency = <2403200>;
@@ -57,7 +57,7 @@
57 57
58 }; 58 };
59 59
60 mb_intc: dw-apb-ictl@0xe0012000 { 60 mb_intc: dw-apb-ictl@e0012000 {
61 #interrupt-cells = <1>; 61 #interrupt-cells = <1>;
62 compatible = "snps,dw-apb-ictl"; 62 compatible = "snps,dw-apb-ictl";
63 reg = < 0xe0012000 0x200 >; 63 reg = < 0xe0012000 0x200 >;
diff --git a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
index 48bb4b4cd234..925d5cc95dbb 100644
--- a/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/vdk_axs10x_mb.dtsi
@@ -36,7 +36,7 @@
36 }; 36 };
37 }; 37 };
38 38
39 ethernet@0x18000 { 39 ethernet@18000 {
40 #interrupt-cells = <1>; 40 #interrupt-cells = <1>;
41 compatible = "snps,dwmac"; 41 compatible = "snps,dwmac";
42 reg = < 0x18000 0x2000 >; 42 reg = < 0x18000 0x2000 >;
@@ -49,13 +49,13 @@
49 clock-names = "stmmaceth"; 49 clock-names = "stmmaceth";
50 }; 50 };
51 51
52 ehci@0x40000 { 52 ehci@40000 {
53 compatible = "generic-ehci"; 53 compatible = "generic-ehci";
54 reg = < 0x40000 0x100 >; 54 reg = < 0x40000 0x100 >;
55 interrupts = < 8 >; 55 interrupts = < 8 >;
56 }; 56 };
57 57
58 uart@0x20000 { 58 uart@20000 {
59 compatible = "snps,dw-apb-uart"; 59 compatible = "snps,dw-apb-uart";
60 reg = <0x20000 0x100>; 60 reg = <0x20000 0x100>;
61 clock-frequency = <2403200>; 61 clock-frequency = <2403200>;
@@ -65,7 +65,7 @@
65 reg-io-width = <4>; 65 reg-io-width = <4>;
66 }; 66 };
67 67
68 uart@0x21000 { 68 uart@21000 {
69 compatible = "snps,dw-apb-uart"; 69 compatible = "snps,dw-apb-uart";
70 reg = <0x21000 0x100>; 70 reg = <0x21000 0x100>;
71 clock-frequency = <2403200>; 71 clock-frequency = <2403200>;
@@ -75,7 +75,7 @@
75 reg-io-width = <4>; 75 reg-io-width = <4>;
76 }; 76 };
77 77
78 uart@0x22000 { 78 uart@22000 {
79 compatible = "snps,dw-apb-uart"; 79 compatible = "snps,dw-apb-uart";
80 reg = <0x22000 0x100>; 80 reg = <0x22000 0x100>;
81 clock-frequency = <2403200>; 81 clock-frequency = <2403200>;
@@ -101,7 +101,7 @@
101 interrupt-names = "arc_ps2_irq"; 101 interrupt-names = "arc_ps2_irq";
102 }; 102 };
103 103
104 mmc@0x15000 { 104 mmc@15000 {
105 compatible = "snps,dw-mshc"; 105 compatible = "snps,dw-mshc";
106 reg = <0x15000 0x400>; 106 reg = <0x15000 0x400>;
107 fifo-depth = <1024>; 107 fifo-depth = <1024>;
@@ -117,11 +117,11 @@
117 * Embedded Vision subsystem UIO mappings; only relevant for EV VDK 117 * Embedded Vision subsystem UIO mappings; only relevant for EV VDK
118 * 118 *
119 * This node is intentionally put outside of MB above becase 119 * This node is intentionally put outside of MB above becase
120 * it maps areas outside of MB's 0xEz-0xFz. 120 * it maps areas outside of MB's 0xez-0xfz.
121 */ 121 */
122 uio_ev: uio@0xD0000000 { 122 uio_ev: uio@d0000000 {
123 compatible = "generic-uio"; 123 compatible = "generic-uio";
124 reg = <0xD0000000 0x2000 0xD1000000 0x2000 0x90000000 0x10000000 0xC0000000 0x10000000>; 124 reg = <0xd0000000 0x2000 0xd1000000 0x2000 0x90000000 0x10000000 0xc0000000 0x10000000>;
125 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem"; 125 reg-names = "ev_gsa", "ev_ctrl", "ev_shared_mem", "ev_code_mem";
126 interrupt-parent = <&mb_intc>; 126 interrupt-parent = <&mb_intc>;
127 interrupts = <23>; 127 interrupts = <23>;
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 6fd3d29546af..0e5fd29ed238 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -8,6 +8,7 @@ CONFIG_NAMESPACES=y
8# CONFIG_UTS_NS is not set 8# CONFIG_UTS_NS is not set
9# CONFIG_PID_NS is not set 9# CONFIG_PID_NS is not set
10CONFIG_BLK_DEV_INITRD=y 10CONFIG_BLK_DEV_INITRD=y
11CONFIG_BLK_DEV_RAM=y
11CONFIG_EMBEDDED=y 12CONFIG_EMBEDDED=y
12CONFIG_PERF_EVENTS=y 13CONFIG_PERF_EVENTS=y
13# CONFIG_VM_EVENT_COUNTERS is not set 14# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
index b41f8881ecc8..decc306a3b52 100644
--- a/arch/arc/include/asm/Kbuild
+++ b/arch/arc/include/asm/Kbuild
@@ -11,6 +11,7 @@ generic-y += hardirq.h
11generic-y += hw_irq.h 11generic-y += hw_irq.h
12generic-y += irq_regs.h 12generic-y += irq_regs.h
13generic-y += irq_work.h 13generic-y += irq_work.h
14generic-y += kvm_para.h
14generic-y += local.h 15generic-y += local.h
15generic-y += local64.h 16generic-y += local64.h
16generic-y += mcs_spinlock.h 17generic-y += mcs_spinlock.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index a27eafdc8260..a7d4be87b2f0 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -82,6 +82,7 @@
82#define ECR_V_DTLB_MISS 0x05 82#define ECR_V_DTLB_MISS 0x05
83#define ECR_V_PROTV 0x06 83#define ECR_V_PROTV 0x06
84#define ECR_V_TRAP 0x09 84#define ECR_V_TRAP 0x09
85#define ECR_V_MISALIGN 0x0d
85#endif 86#endif
86 87
87/* DTLB Miss and Protection Violation Cause Codes */ 88/* DTLB Miss and Protection Violation Cause Codes */
@@ -167,14 +168,6 @@ struct bcr_mpy {
167#endif 168#endif
168}; 169};
169 170
170struct bcr_extn_xymem {
171#ifdef CONFIG_CPU_BIG_ENDIAN
172 unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
173#else
174 unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
175#endif
176};
177
178struct bcr_iccm_arcompact { 171struct bcr_iccm_arcompact {
179#ifdef CONFIG_CPU_BIG_ENDIAN 172#ifdef CONFIG_CPU_BIG_ENDIAN
180 unsigned int base:16, pad:5, sz:3, ver:8; 173 unsigned int base:16, pad:5, sz:3, ver:8;
@@ -312,7 +305,7 @@ struct cpuinfo_arc {
312 struct cpuinfo_arc_bpu bpu; 305 struct cpuinfo_arc_bpu bpu;
313 struct bcr_identity core; 306 struct bcr_identity core;
314 struct bcr_isa_arcv2 isa; 307 struct bcr_isa_arcv2 isa;
315 const char *details, *name; 308 const char *release, *name;
316 unsigned int vec_base; 309 unsigned int vec_base;
317 struct cpuinfo_arc_ccm iccm, dccm; 310 struct cpuinfo_arc_ccm iccm, dccm;
318 struct { 311 struct {
@@ -322,7 +315,6 @@ struct cpuinfo_arc {
322 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; 315 timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
323 } extn; 316 } extn;
324 struct bcr_mpy extn_mpy; 317 struct bcr_mpy extn_mpy;
325 struct bcr_extn_xymem extn_xymem;
326}; 318};
327 319
328extern struct cpuinfo_arc cpuinfo_arc700[]; 320extern struct cpuinfo_arc cpuinfo_arc700[];
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 8a4f77ea3238..e66d0339e1d8 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -44,7 +44,13 @@
44#define ARCV2_IRQ_DEF_PRIO 1 44#define ARCV2_IRQ_DEF_PRIO 1
45 45
46/* seed value for status register */ 46/* seed value for status register */
47#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ 47#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
48#define __AD_ENB STATUS_AD_MASK
49#else
50#define __AD_ENB 0
51#endif
52
53#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \
48 (ARCV2_IRQ_DEF_PRIO << 1)) 54 (ARCV2_IRQ_DEF_PRIO << 1))
49 55
50#ifndef __ASSEMBLY__ 56#ifndef __ASSEMBLY__
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 6958545390f0..9cd7ee4fad39 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -105,10 +105,10 @@ static const char * const arc_pmu_ev_hw_map[] = {
105 [PERF_COUNT_HW_INSTRUCTIONS] = "iall", 105 [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
106 /* All jump instructions that are taken */ 106 /* All jump instructions that are taken */
107 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", 107 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
108 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
109#ifdef CONFIG_ISA_ARCV2 108#ifdef CONFIG_ISA_ARCV2
110 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", 109 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
111#else 110#else
111 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
112 [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */ 112 [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
113#endif 113#endif
114 [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */ 114 [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index 2ba04a7db621..daa914da7968 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -21,8 +21,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
21{ 21{
22 unsigned int val; 22 unsigned int val;
23 23
24 smp_mb();
25
26 __asm__ __volatile__( 24 __asm__ __volatile__(
27 "1: llock %[val], [%[slock]] \n" 25 "1: llock %[val], [%[slock]] \n"
28 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ 26 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
@@ -34,6 +32,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
34 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) 32 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
35 : "memory", "cc"); 33 : "memory", "cc");
36 34
35 /*
36 * ACQUIRE barrier to ensure load/store after taking the lock
37 * don't "bleed-up" out of the critical section (leak-in is allowed)
38 * http://www.spinics.net/lists/kernel/msg2010409.html
39 *
40 * ARCv2 only has load-load, store-store and all-all barrier
41 * thus need the full all-all barrier
42 */
37 smp_mb(); 43 smp_mb();
38} 44}
39 45
@@ -42,8 +48,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
42{ 48{
43 unsigned int val, got_it = 0; 49 unsigned int val, got_it = 0;
44 50
45 smp_mb();
46
47 __asm__ __volatile__( 51 __asm__ __volatile__(
48 "1: llock %[val], [%[slock]] \n" 52 "1: llock %[val], [%[slock]] \n"
49 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ 53 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
@@ -67,9 +71,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
67{ 71{
68 smp_mb(); 72 smp_mb();
69 73
70 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; 74 WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
71
72 smp_mb();
73} 75}
74 76
75/* 77/*
@@ -81,8 +83,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
81{ 83{
82 unsigned int val; 84 unsigned int val;
83 85
84 smp_mb();
85
86 /* 86 /*
87 * zero means writer holds the lock exclusively, deny Reader. 87 * zero means writer holds the lock exclusively, deny Reader.
88 * Otherwise grant lock to first/subseq reader 88 * Otherwise grant lock to first/subseq reader
@@ -113,8 +113,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
113{ 113{
114 unsigned int val, got_it = 0; 114 unsigned int val, got_it = 0;
115 115
116 smp_mb();
117
118 __asm__ __volatile__( 116 __asm__ __volatile__(
119 "1: llock %[val], [%[rwlock]] \n" 117 "1: llock %[val], [%[rwlock]] \n"
120 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ 118 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
@@ -140,8 +138,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
140{ 138{
141 unsigned int val; 139 unsigned int val;
142 140
143 smp_mb();
144
145 /* 141 /*
146 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), 142 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
147 * deny writer. Otherwise if unlocked grant to writer 143 * deny writer. Otherwise if unlocked grant to writer
@@ -175,8 +171,6 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
175{ 171{
176 unsigned int val, got_it = 0; 172 unsigned int val, got_it = 0;
177 173
178 smp_mb();
179
180 __asm__ __volatile__( 174 __asm__ __volatile__(
181 "1: llock %[val], [%[rwlock]] \n" 175 "1: llock %[val], [%[rwlock]] \n"
182 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ 176 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
@@ -217,17 +211,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
217 : [val] "=&r" (val) 211 : [val] "=&r" (val)
218 : [rwlock] "r" (&(rw->counter)) 212 : [rwlock] "r" (&(rw->counter))
219 : "memory", "cc"); 213 : "memory", "cc");
220
221 smp_mb();
222} 214}
223 215
224static inline void arch_write_unlock(arch_rwlock_t *rw) 216static inline void arch_write_unlock(arch_rwlock_t *rw)
225{ 217{
226 smp_mb(); 218 smp_mb();
227 219
228 rw->counter = __ARCH_RW_LOCK_UNLOCKED__; 220 WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
229
230 smp_mb();
231} 221}
232 222
233#else /* !CONFIG_ARC_HAS_LLSC */ 223#else /* !CONFIG_ARC_HAS_LLSC */
@@ -237,10 +227,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
237 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; 227 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
238 228
239 /* 229 /*
240 * This smp_mb() is technically superfluous, we only need the one 230 * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
241 * after the lock for providing the ACQUIRE semantics. 231 * for ACQ and REL semantics respectively. However EX based spinlocks
242 * However doing the "right" thing was regressing hackbench 232 * need the extra smp_mb to workaround a hardware quirk.
243 * so keeping this, pending further investigation
244 */ 233 */
245 smp_mb(); 234 smp_mb();
246 235
@@ -257,14 +246,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
257#endif 246#endif
258 : "memory"); 247 : "memory");
259 248
260 /*
261 * ACQUIRE barrier to ensure load/store after taking the lock
262 * don't "bleed-up" out of the critical section (leak-in is allowed)
263 * http://www.spinics.net/lists/kernel/msg2010409.html
264 *
265 * ARCv2 only has load-load, store-store and all-all barrier
266 * thus need the full all-all barrier
267 */
268 smp_mb(); 249 smp_mb();
269} 250}
270 251
@@ -309,8 +290,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
309 : "memory"); 290 : "memory");
310 291
311 /* 292 /*
312 * superfluous, but keeping for now - see pairing version in 293 * see pairing version/comment in arch_spin_lock above
313 * arch_spin_lock above
314 */ 294 */
315 smp_mb(); 295 smp_mb();
316} 296}
@@ -344,7 +324,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
344 arch_spin_unlock(&(rw->lock_mutex)); 324 arch_spin_unlock(&(rw->lock_mutex));
345 local_irq_restore(flags); 325 local_irq_restore(flags);
346 326
347 smp_mb();
348 return ret; 327 return ret;
349} 328}
350 329
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/arc/include/uapi/asm/Kbuild
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 30e090625916..8f6e0447dd17 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -54,7 +54,12 @@
54 ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access 54 ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
55 ; by default 55 ; by default
56 lr r5, [status32] 56 lr r5, [status32]
57#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
57 bset r5, r5, STATUS_AD_BIT 58 bset r5, r5, STATUS_AD_BIT
59#else
60 ; Although disabled at reset, bootloader might have enabled it
61 bclr r5, r5, STATUS_AD_BIT
62#endif
58 kflag r5 63 kflag r5
59#endif 64#endif
60.endm 65.endm
@@ -106,6 +111,7 @@ ENTRY(stext)
106 ; r2 = pointer to uboot provided cmdline or external DTB in mem 111 ; r2 = pointer to uboot provided cmdline or external DTB in mem
107 ; These are handled later in handle_uboot_args() 112 ; These are handled later in handle_uboot_args()
108 st r0, [@uboot_tag] 113 st r0, [@uboot_tag]
114 st r1, [@uboot_magic]
109 st r2, [@uboot_arg] 115 st r2, [@uboot_arg]
110 116
111 ; setup "current" tsk and optionally cache it in dedicated r25 117 ; setup "current" tsk and optionally cache it in dedicated r25
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index cf18b3e5a934..c0d0124de089 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -95,7 +95,7 @@ void arc_init_IRQ(void)
95 95
96 /* setup status32, don't enable intr yet as kernel doesn't want */ 96 /* setup status32, don't enable intr yet as kernel doesn't want */
97 tmp = read_aux_reg(ARC_REG_STATUS32); 97 tmp = read_aux_reg(ARC_REG_STATUS32);
98 tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1); 98 tmp |= ARCV2_IRQ_DEF_PRIO << 1;
99 tmp &= ~STATUS_IE_MASK; 99 tmp &= ~STATUS_IE_MASK;
100 asm volatile("kflag %0 \n"::"r"(tmp)); 100 asm volatile("kflag %0 \n"::"r"(tmp));
101} 101}
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 7b2340996cf8..a9c88b7e9182 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -36,6 +36,7 @@ unsigned int intr_to_DE_cnt;
36 36
37/* Part of U-boot ABI: see head.S */ 37/* Part of U-boot ABI: see head.S */
38int __initdata uboot_tag; 38int __initdata uboot_tag;
39int __initdata uboot_magic;
39char __initdata *uboot_arg; 40char __initdata *uboot_arg;
40 41
41const struct machine_desc *machine_desc; 42const struct machine_desc *machine_desc;
@@ -44,29 +45,24 @@ struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
44 45
45struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 46struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
46 47
47static const struct id_to_str arc_cpu_rel[] = { 48static const struct id_to_str arc_legacy_rel[] = {
49 /* ID.ARCVER, Release */
48#ifdef CONFIG_ISA_ARCOMPACT 50#ifdef CONFIG_ISA_ARCOMPACT
49 { 0x34, "R4.10"}, 51 { 0x34, "R4.10"},
50 { 0x35, "R4.11"}, 52 { 0x35, "R4.11"},
51#else 53#else
52 { 0x51, "R2.0" }, 54 { 0x51, "R2.0" },
53 { 0x52, "R2.1" }, 55 { 0x52, "R2.1" },
54 { 0x53, "R3.0" }, 56 { 0x53, "R3.0" },
55 { 0x54, "R3.10a" },
56#endif 57#endif
57 { 0x00, NULL } 58 { 0x00, NULL }
58}; 59};
59 60
60static const struct id_to_str arc_cpu_nm[] = { 61static const struct id_to_str arc_cpu_rel[] = {
61#ifdef CONFIG_ISA_ARCOMPACT 62 /* UARCH.MAJOR, Release */
62 { 0x20, "ARC 600" }, 63 { 0, "R3.10a"},
63 { 0x30, "ARC 770" }, /* 750 identified seperately */ 64 { 1, "R3.50a"},
64#else 65 { 0xFF, NULL }
65 { 0x40, "ARC EM" },
66 { 0x50, "ARC HS38" },
67 { 0x54, "ARC HS48" },
68#endif
69 { 0x00, "Unknown" }
70}; 66};
71 67
72static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu) 68static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
@@ -116,31 +112,72 @@ static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
116 } 112 }
117} 113}
118 114
115static void decode_arc_core(struct cpuinfo_arc *cpu)
116{
117 struct bcr_uarch_build_arcv2 uarch;
118 const struct id_to_str *tbl;
119
120 /*
121 * Up until (including) the first core4 release (0x54) things were
122 * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
123 * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
124 */
125
126 if (cpu->core.family < 0x54) { /* includes arc700 */
127
128 for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
129 if (cpu->core.family == tbl->id) {
130 cpu->release = tbl->str;
131 break;
132 }
133 }
134
135 if (is_isa_arcompact())
136 cpu->name = "ARC700";
137 else if (tbl->str)
138 cpu->name = "HS38";
139 else
140 cpu->name = cpu->release = "Unknown";
141
142 return;
143 }
144
145 /*
146 * However the subsequent HS release (same 0x54) allow HS38 or HS48
147 * configurations and encode this info in a different BCR.
148 * The BCR was introduced in 0x54 so can't be read unconditionally.
149 */
150
151 READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
152
153 if (uarch.prod == 4) {
154 cpu->name = "HS48";
155 cpu->extn.dual = 1;
156
157 } else {
158 cpu->name = "HS38";
159 }
160
161 for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
162 if (uarch.maj == tbl->id) {
163 cpu->release = tbl->str;
164 break;
165 }
166 }
167}
168
119static void read_arc_build_cfg_regs(void) 169static void read_arc_build_cfg_regs(void)
120{ 170{
121 struct bcr_timer timer; 171 struct bcr_timer timer;
122 struct bcr_generic bcr; 172 struct bcr_generic bcr;
123 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 173 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
124 const struct id_to_str *tbl;
125 struct bcr_isa_arcv2 isa; 174 struct bcr_isa_arcv2 isa;
126 struct bcr_actionpoint ap; 175 struct bcr_actionpoint ap;
127 176
128 FIX_PTR(cpu); 177 FIX_PTR(cpu);
129 178
130 READ_BCR(AUX_IDENTITY, cpu->core); 179 READ_BCR(AUX_IDENTITY, cpu->core);
131 180 decode_arc_core(cpu);
132 for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
133 if (cpu->core.family == tbl->id) {
134 cpu->details = tbl->str;
135 break;
136 }
137 }
138
139 for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
140 if ((cpu->core.family & 0xF4) == tbl->id)
141 break;
142 }
143 cpu->name = tbl->str;
144 181
145 READ_BCR(ARC_REG_TIMERS_BCR, timer); 182 READ_BCR(ARC_REG_TIMERS_BCR, timer);
146 cpu->extn.timer0 = timer.t0; 183 cpu->extn.timer0 = timer.t0;
@@ -151,16 +188,6 @@ static void read_arc_build_cfg_regs(void)
151 188
152 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); 189 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
153 190
154 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */
155 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */
156 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
157 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
158 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
159 cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 :
160 IS_ENABLED(CONFIG_ARC_HAS_SWAPE);
161
162 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
163
164 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */ 191 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
165 read_decode_ccm_bcr(cpu); 192 read_decode_ccm_bcr(cpu);
166 193
@@ -198,30 +225,12 @@ static void read_arc_build_cfg_regs(void)
198 cpu->bpu.num_pred = 2048 << bpu.pte; 225 cpu->bpu.num_pred = 2048 << bpu.pte;
199 cpu->bpu.ret_stk = 4 << bpu.rse; 226 cpu->bpu.ret_stk = 4 << bpu.rse;
200 227
201 if (cpu->core.family >= 0x54) { 228 /* if dual issue hardware, is it enabled ? */
202 229 if (cpu->extn.dual) {
203 struct bcr_uarch_build_arcv2 uarch; 230 unsigned int exec_ctrl;
204
205 /*
206 * The first 0x54 core (uarch maj:min 0:1 or 0:2) was
207 * dual issue only (HS4x). But next uarch rev (1:0)
208 * allows it be configured for single issue (HS3x)
209 * Ensure we fiddle with dual issue only on HS4x
210 */
211 READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
212
213 if (uarch.prod == 4) {
214 unsigned int exec_ctrl;
215
216 /* dual issue hardware always present */
217 cpu->extn.dual = 1;
218
219 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
220 231
221 /* dual issue hardware enabled ? */ 232 READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
222 cpu->extn.dual_enb = !(exec_ctrl & 1); 233 cpu->extn.dual_enb = !(exec_ctrl & 1);
223
224 }
225 } 234 }
226 } 235 }
227 236
@@ -263,7 +272,8 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
263{ 272{
264 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 273 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
265 struct bcr_identity *core = &cpu->core; 274 struct bcr_identity *core = &cpu->core;
266 int i, n = 0, ua = 0; 275 char mpy_opt[16];
276 int n = 0;
267 277
268 FIX_PTR(cpu); 278 FIX_PTR(cpu);
269 279
@@ -272,7 +282,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
272 core->family, core->cpu_id, core->chip_id); 282 core->family, core->cpu_id, core->chip_id);
273 283
274 n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n", 284 n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
275 cpu_id, cpu->name, cpu->details, 285 cpu_id, cpu->name, cpu->release,
276 is_isa_arcompact() ? "ARCompact" : "ARCv2", 286 is_isa_arcompact() ? "ARCompact" : "ARCv2",
277 IS_AVAIL1(cpu->isa.be, "[Big-Endian]"), 287 IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
278 IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue ")); 288 IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
@@ -283,61 +293,50 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
283 IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT), 293 IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
284 IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT)); 294 IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
285 295
286#ifdef __ARC_UNALIGNED__
287 ua = 1;
288#endif
289 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
290 IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
291 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
292 IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
293
294 if (i)
295 n += scnprintf(buf + n, len - n, "\n\t\t: ");
296
297 if (cpu->extn_mpy.ver) { 296 if (cpu->extn_mpy.ver) {
298 if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */ 297 if (is_isa_arcompact()) {
299 n += scnprintf(buf + n, len - n, "mpy "); 298 scnprintf(mpy_opt, 16, "mpy");
300 } else { 299 } else {
300
301 int opt = 2; /* stock MPY/MPYH */ 301 int opt = 2; /* stock MPY/MPYH */
302 302
303 if (cpu->extn_mpy.dsp) /* OPT 7-9 */ 303 if (cpu->extn_mpy.dsp) /* OPT 7-9 */
304 opt = cpu->extn_mpy.dsp + 6; 304 opt = cpu->extn_mpy.dsp + 6;
305 305
306 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); 306 scnprintf(mpy_opt, 16, "mpy[opt %d] ", opt);
307 } 307 }
308 } 308 }
309 309
310 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", 310 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
311 IS_AVAIL1(cpu->isa.div_rem, "div_rem "), 311 IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
312 IS_AVAIL1(cpu->extn.norm, "norm "), 312 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
313 IS_AVAIL1(cpu->extn.barrel, "barrel-shift "), 313 IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
314 IS_AVAIL1(cpu->extn.swap, "swap "), 314 IS_AVAIL1(cpu->extn_mpy.ver, mpy_opt),
315 IS_AVAIL1(cpu->extn.minmax, "minmax "), 315 IS_AVAIL1(cpu->isa.div_rem, "div_rem "));
316 IS_AVAIL1(cpu->extn.crc, "crc "), 316
317 IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE)); 317 if (cpu->bpu.ver) {
318
319 if (cpu->bpu.ver)
320 n += scnprintf(buf + n, len - n, 318 n += scnprintf(buf + n, len - n,
321 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d", 319 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
322 IS_AVAIL1(cpu->bpu.full, "full"), 320 IS_AVAIL1(cpu->bpu.full, "full"),
323 IS_AVAIL1(!cpu->bpu.full, "partial"), 321 IS_AVAIL1(!cpu->bpu.full, "partial"),
324 cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk); 322 cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
325 323
326 if (is_isa_arcv2()) { 324 if (is_isa_arcv2()) {
327 struct bcr_lpb lpb; 325 struct bcr_lpb lpb;
328 326
329 READ_BCR(ARC_REG_LPB_BUILD, lpb); 327 READ_BCR(ARC_REG_LPB_BUILD, lpb);
330 if (lpb.ver) { 328 if (lpb.ver) {
331 unsigned int ctl; 329 unsigned int ctl;
332 ctl = read_aux_reg(ARC_REG_LPB_CTRL); 330 ctl = read_aux_reg(ARC_REG_LPB_CTRL);
333 331
334 n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s", 332 n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
335 lpb.entries, 333 lpb.entries,
336 IS_DISABLED_RUN(!ctl)); 334 IS_DISABLED_RUN(!ctl));
335 }
337 } 336 }
337 n += scnprintf(buf + n, len - n, "\n");
338 } 338 }
339 339
340 n += scnprintf(buf + n, len - n, "\n");
341 return buf; 340 return buf;
342} 341}
343 342
@@ -390,11 +389,6 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
390 } 389 }
391 } 390 }
392 391
393 n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
394 EF_ARC_OSABI_CURRENT >> 8,
395 EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
396 "no-legacy-syscalls" : "64-bit data any register aligned");
397
398 return buf; 392 return buf;
399} 393}
400 394
@@ -497,6 +491,8 @@ static inline bool uboot_arg_invalid(unsigned long addr)
497#define UBOOT_TAG_NONE 0 491#define UBOOT_TAG_NONE 0
498#define UBOOT_TAG_CMDLINE 1 492#define UBOOT_TAG_CMDLINE 1
499#define UBOOT_TAG_DTB 2 493#define UBOOT_TAG_DTB 2
494/* We always pass 0 as magic from U-boot */
495#define UBOOT_MAGIC_VALUE 0
500 496
501void __init handle_uboot_args(void) 497void __init handle_uboot_args(void)
502{ 498{
@@ -511,6 +507,11 @@ void __init handle_uboot_args(void)
511 goto ignore_uboot_args; 507 goto ignore_uboot_args;
512 } 508 }
513 509
510 if (uboot_magic != UBOOT_MAGIC_VALUE) {
511 pr_warn(IGNORE_ARGS "non zero uboot magic\n");
512 goto ignore_uboot_args;
513 }
514
514 if (uboot_tag != UBOOT_TAG_NONE && 515 if (uboot_tag != UBOOT_TAG_NONE &&
515 uboot_arg_invalid((unsigned long)uboot_arg)) { 516 uboot_arg_invalid((unsigned long)uboot_arg)) {
516 pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg); 517 pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 215f515442e0..b0aa8c028331 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -145,7 +145,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
145 } else if (vec == ECR_V_PROTV) { 145 } else if (vec == ECR_V_PROTV) {
146 if (cause_code == ECR_C_PROTV_INST_FETCH) 146 if (cause_code == ECR_C_PROTV_INST_FETCH)
147 pr_cont("Execute from Non-exec Page\n"); 147 pr_cont("Execute from Non-exec Page\n");
148 else if (cause_code == ECR_C_PROTV_MISALIG_DATA) 148 else if (cause_code == ECR_C_PROTV_MISALIG_DATA &&
149 IS_ENABLED(CONFIG_ISA_ARCOMPACT))
149 pr_cont("Misaligned r/w from 0x%08lx\n", address); 150 pr_cont("Misaligned r/w from 0x%08lx\n", address);
150 else 151 else
151 pr_cont("%s access not allowed on page\n", 152 pr_cont("%s access not allowed on page\n",
@@ -161,6 +162,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
161 pr_cont("Bus Error from Data Mem\n"); 162 pr_cont("Bus Error from Data Mem\n");
162 else 163 else
163 pr_cont("Bus Error, check PRM\n"); 164 pr_cont("Bus Error, check PRM\n");
165 } else if (vec == ECR_V_MISALIGN) {
166 pr_cont("Misaligned r/w from 0x%08lx\n", address);
164#endif 167#endif
165 } else if (vec == ECR_V_TRAP) { 168 } else if (vec == ECR_V_TRAP) {
166 if (regs->ecr_param == 5) 169 if (regs->ecr_param == 5)
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
index b1656d156097..f7537b466b23 100644
--- a/arch/arc/lib/Makefile
+++ b/arch/arc/lib/Makefile
@@ -8,4 +8,10 @@
8lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o 8lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o
9 9
10lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o 10lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o
11lib-$(CONFIG_ISA_ARCV2) += memcpy-archs.o memset-archs.o strcmp-archs.o 11lib-$(CONFIG_ISA_ARCV2) += memset-archs.o strcmp-archs.o
12
13ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
14lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs-unaligned.o
15else
16lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs.o
17endif
diff --git a/arch/arc/lib/memcpy-archs-unaligned.S b/arch/arc/lib/memcpy-archs-unaligned.S
new file mode 100644
index 000000000000..28993a73fdde
--- /dev/null
+++ b/arch/arc/lib/memcpy-archs-unaligned.S
@@ -0,0 +1,47 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * ARCv2 memcpy implementation optimized for unaligned memory access using.
4 *
5 * Copyright (C) 2019 Synopsys
6 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
7 */
8
9#include <linux/linkage.h>
10
11#ifdef CONFIG_ARC_HAS_LL64
12# define LOADX(DST,RX) ldd.ab DST, [RX, 8]
13# define STOREX(SRC,RX) std.ab SRC, [RX, 8]
14# define ZOLSHFT 5
15# define ZOLAND 0x1F
16#else
17# define LOADX(DST,RX) ld.ab DST, [RX, 4]
18# define STOREX(SRC,RX) st.ab SRC, [RX, 4]
19# define ZOLSHFT 4
20# define ZOLAND 0xF
21#endif
22
23ENTRY_CFI(memcpy)
24 mov r3, r0 ; don;t clobber ret val
25
26 lsr.f lp_count, r2, ZOLSHFT
27 lpnz @.Lcopy32_64bytes
28 ;; LOOP START
29 LOADX (r6, r1)
30 LOADX (r8, r1)
31 LOADX (r10, r1)
32 LOADX (r4, r1)
33 STOREX (r6, r3)
34 STOREX (r8, r3)
35 STOREX (r10, r3)
36 STOREX (r4, r3)
37.Lcopy32_64bytes:
38
39 and.f lp_count, r2, ZOLAND ;Last remaining 31 bytes
40 lpnz @.Lcopyremainingbytes
41 ;; LOOP START
42 ldb.ab r5, [r1, 1]
43 stb.ab r5, [r3, 1]
44.Lcopyremainingbytes:
45
46 j [blink]
47END_CFI(memcpy)
diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig
index 8eff057efcae..2eaecfb063a7 100644
--- a/arch/arc/plat-eznps/Kconfig
+++ b/arch/arc/plat-eznps/Kconfig
@@ -26,8 +26,8 @@ config EZNPS_MTM_EXT
26 help 26 help
27 Here we add new hierarchy for CPUs topology. 27 Here we add new hierarchy for CPUs topology.
28 We got: 28 We got:
29 Core 29 Core
30 Thread 30 Thread
31 At the new thread level each CPU represent one HW thread. 31 At the new thread level each CPU represent one HW thread.
32 At highest hierarchy each core contain 16 threads, 32 At highest hierarchy each core contain 16 threads,
33 any of them seem like CPU from Linux point of view. 33 any of them seem like CPU from Linux point of view.
@@ -35,10 +35,10 @@ config EZNPS_MTM_EXT
35 core and HW scheduler round robin between them. 35 core and HW scheduler round robin between them.
36 36
37config EZNPS_MEM_ERROR_ALIGN 37config EZNPS_MEM_ERROR_ALIGN
38 bool "ARC-EZchip Memory error as an exception" 38 bool "ARC-EZchip Memory error as an exception"
39 depends on EZNPS_MTM_EXT 39 depends on EZNPS_MTM_EXT
40 default n 40 default n
41 help 41 help
42 On the real chip of the NPS, user memory errors are handled 42 On the real chip of the NPS, user memory errors are handled
43 as a machine check exception, which is fatal, whereas on 43 as a machine check exception, which is fatal, whereas on
44 simulator platform for NPS, is handled as a Level 2 interrupt 44 simulator platform for NPS, is handled as a Level 2 interrupt
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 054ead960f98..850b4805e2d1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -596,6 +596,7 @@ config ARCH_DAVINCI
596 select HAVE_IDE 596 select HAVE_IDE
597 select PM_GENERIC_DOMAINS if PM 597 select PM_GENERIC_DOMAINS if PM
598 select PM_GENERIC_DOMAINS_OF if PM && OF 598 select PM_GENERIC_DOMAINS_OF if PM && OF
599 select REGMAP_MMIO
599 select RESET_CONTROLLER 600 select RESET_CONTROLLER
600 select SPARSE_IRQ 601 select SPARSE_IRQ
601 select USE_OF 602 select USE_OF
diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
index 5641d162dfdb..28e7513ce617 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
@@ -93,7 +93,7 @@
93}; 93};
94 94
95&hdmi { 95&hdmi {
96 hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>; 96 hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
97}; 97};
98 98
99&pwm { 99&pwm {
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index b715ab0fa1ff..e8d800fec637 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -114,9 +114,9 @@
114 reg = <2>; 114 reg = <2>;
115 }; 115 };
116 116
117 switch@0 { 117 switch@10 {
118 compatible = "qca,qca8334"; 118 compatible = "qca,qca8334";
119 reg = <0>; 119 reg = <10>;
120 120
121 switch_ports: ports { 121 switch_ports: ports {
122 #address-cells = <1>; 122 #address-cells = <1>;
@@ -125,7 +125,7 @@
125 ethphy0: port@0 { 125 ethphy0: port@0 {
126 reg = <0>; 126 reg = <0>;
127 label = "cpu"; 127 label = "cpu";
128 phy-mode = "rgmii"; 128 phy-mode = "rgmii-id";
129 ethernet = <&fec>; 129 ethernet = <&fec>;
130 130
131 fixed-link { 131 fixed-link {
diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
index 1d1b4bd0670f..a4217f564a53 100644
--- a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
@@ -264,7 +264,7 @@
264 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 264 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
265 vmcc-supply = <&reg_sd3_vmmc>; 265 vmcc-supply = <&reg_sd3_vmmc>;
266 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; 266 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
267 bus-witdh = <4>; 267 bus-width = <4>;
268 no-1-8-v; 268 no-1-8-v;
269 status = "okay"; 269 status = "okay";
270}; 270};
@@ -275,7 +275,7 @@
275 pinctrl-1 = <&pinctrl_usdhc4_100mhz>; 275 pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
276 pinctrl-2 = <&pinctrl_usdhc4_200mhz>; 276 pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
277 vmcc-supply = <&reg_sd4_vmmc>; 277 vmcc-supply = <&reg_sd4_vmmc>;
278 bus-witdh = <8>; 278 bus-width = <8>;
279 no-1-8-v; 279 no-1-8-v;
280 non-removable; 280 non-removable;
281 status = "okay"; 281 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 433bf09a1954..027df06c5dc7 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -91,6 +91,7 @@
91 pinctrl-0 = <&pinctrl_enet>; 91 pinctrl-0 = <&pinctrl_enet>;
92 phy-handle = <&ethphy>; 92 phy-handle = <&ethphy>;
93 phy-mode = "rgmii"; 93 phy-mode = "rgmii";
94 phy-reset-duration = <10>; /* in msecs */
94 phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; 95 phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
95 phy-supply = <&vdd_eth_io_reg>; 96 phy-supply = <&vdd_eth_io_reg>;
96 status = "disabled"; 97 status = "disabled";
diff --git a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
index f6fb6783c193..54cfe72295aa 100644
--- a/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
+++ b/arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright (C) 2016 Freescale Semiconductor, Inc. 3 * Copyright (C) 2016 Freescale Semiconductor, Inc.
4 * Copyright (C) 2017 NXP 4 * Copyright (C) 2017 NXP
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 04066f9cb8a3..f2f6558a00f1 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -213,12 +213,13 @@
213 gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; 213 gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
214 gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; 214 gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
215 /* 215 /*
216 * It's not actually active high, but the frameworks assume 216 * This chipselect is active high. Just setting the flags
217 * the polarity of the passed-in GPIO is "normal" (active 217 * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings,
218 * high) then actively drives the line low to select the 218 * it will be ignored, only the special "spi-cs-high" flag
219 * chip. 219 * really counts.
220 */ 220 */
221 cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; 221 cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
222 spi-cs-high;
222 num-chipselects = <1>; 223 num-chipselects = <1>;
223 224
224 /* 225 /*
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 8661dd9b064a..b37f8e675e40 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
170# CONFIG_IOMMU_SUPPORT is not set 170# CONFIG_IOMMU_SUPPORT is not set
171CONFIG_IIO=y 171CONFIG_IIO=y
172CONFIG_FSL_MX25_ADC=y 172CONFIG_FSL_MX25_ADC=y
173CONFIG_PWM=y
174CONFIG_PWM_IMX1=y
175CONFIG_PWM_IMX27=y
173CONFIG_EXT4_FS=y 176CONFIG_EXT4_FS=y
174# CONFIG_DNOTIFY is not set 177# CONFIG_DNOTIFY is not set
175CONFIG_VFAT_FS=y 178CONFIG_VFAT_FS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 5586a5074a96..50fb01d70b10 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -398,7 +398,7 @@ CONFIG_MAG3110=y
398CONFIG_MPL3115=y 398CONFIG_MPL3115=y
399CONFIG_PWM=y 399CONFIG_PWM=y
400CONFIG_PWM_FSL_FTM=y 400CONFIG_PWM_FSL_FTM=y
401CONFIG_PWM_IMX=y 401CONFIG_PWM_IMX27=y
402CONFIG_NVMEM_IMX_OCOTP=y 402CONFIG_NVMEM_IMX_OCOTP=y
403CONFIG_NVMEM_VF610_OCOTP=y 403CONFIG_NVMEM_VF610_OCOTP=y
404CONFIG_TEE=y 404CONFIG_TEE=y
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 2de96a180166..31de4ab93005 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -381,6 +381,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
381 return ret; 381 return ret;
382} 382}
383 383
384static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
385 const void *data, unsigned long len)
386{
387 int srcu_idx = srcu_read_lock(&kvm->srcu);
388 int ret = kvm_write_guest(kvm, gpa, data, len);
389
390 srcu_read_unlock(&kvm->srcu, srcu_idx);
391
392 return ret;
393}
394
384static inline void *kvm_get_hyp_vector(void) 395static inline void *kvm_get_hyp_vector(void)
385{ 396{
386 switch(read_cpuid_part()) { 397 switch(read_cpuid_part()) {
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index de2089501b8b..9e11dce55e06 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -75,6 +75,8 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm)
75 75
76#define S2_PMD_MASK PMD_MASK 76#define S2_PMD_MASK PMD_MASK
77#define S2_PMD_SIZE PMD_SIZE 77#define S2_PMD_SIZE PMD_SIZE
78#define S2_PUD_MASK PUD_MASK
79#define S2_PUD_SIZE PUD_SIZE
78 80
79static inline bool kvm_stage2_has_pmd(struct kvm *kvm) 81static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
80{ 82{
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 23b4464c0995..ce8573157774 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -3,3 +3,4 @@
3generated-y += unistd-common.h 3generated-y += unistd-common.h
4generated-y += unistd-oabi.h 4generated-y += unistd-oabi.h
5generated-y += unistd-eabi.h 5generated-y += unistd-eabi.h
6generic-y += kvm_para.h
diff --git a/arch/arm/include/uapi/asm/kvm_para.h b/arch/arm/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/arm/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 7d5a44a06648..f676592d8402 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -90,7 +90,7 @@ void __init cns3xxx_map_io(void)
90/* used by entry-macro.S */ 90/* used by entry-macro.S */
91void __init cns3xxx_init_irq(void) 91void __init cns3xxx_init_irq(void)
92{ 92{
93 gic_init(0, 29, IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT), 93 gic_init(IOMEM(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
94 IOMEM(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT)); 94 IOMEM(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
95} 95}
96 96
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index bfeb25aaf9a2..326e870d7123 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -16,30 +16,23 @@
16#include "cpuidle.h" 16#include "cpuidle.h"
17#include "hardware.h" 17#include "hardware.h"
18 18
19static atomic_t master = ATOMIC_INIT(0); 19static int num_idle_cpus = 0;
20static DEFINE_SPINLOCK(master_lock); 20static DEFINE_SPINLOCK(cpuidle_lock);
21 21
22static int imx6q_enter_wait(struct cpuidle_device *dev, 22static int imx6q_enter_wait(struct cpuidle_device *dev,
23 struct cpuidle_driver *drv, int index) 23 struct cpuidle_driver *drv, int index)
24{ 24{
25 if (atomic_inc_return(&master) == num_online_cpus()) { 25 spin_lock(&cpuidle_lock);
26 /* 26 if (++num_idle_cpus == num_online_cpus())
27 * With this lock, we prevent other cpu to exit and enter
28 * this function again and become the master.
29 */
30 if (!spin_trylock(&master_lock))
31 goto idle;
32 imx6_set_lpm(WAIT_UNCLOCKED); 27 imx6_set_lpm(WAIT_UNCLOCKED);
33 cpu_do_idle(); 28 spin_unlock(&cpuidle_lock);
34 imx6_set_lpm(WAIT_CLOCKED);
35 spin_unlock(&master_lock);
36 goto done;
37 }
38 29
39idle:
40 cpu_do_idle(); 30 cpu_do_idle();
41done: 31
42 atomic_dec(&master); 32 spin_lock(&cpuidle_lock);
33 if (num_idle_cpus-- == num_online_cpus())
34 imx6_set_lpm(WAIT_CLOCKED);
35 spin_unlock(&cpuidle_lock);
43 36
44 return index; 37 return index;
45} 38}
diff --git a/arch/arm/mach-imx/mach-imx51.c b/arch/arm/mach-imx/mach-imx51.c
index c7169c2f94c4..08c7892866c2 100644
--- a/arch/arm/mach-imx/mach-imx51.c
+++ b/arch/arm/mach-imx/mach-imx51.c
@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
59 return; 59 return;
60 60
61 m4if_base = of_iomap(np, 0); 61 m4if_base = of_iomap(np, 0);
62 of_node_put(np);
62 if (!m4if_base) { 63 if (!m4if_base) {
63 pr_err("Unable to map M4IF registers\n"); 64 pr_err("Unable to map M4IF registers\n");
64 return; 65 return;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 117b2541ef3d..7e34b9eba5de 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -159,7 +159,6 @@ config ARM64
159 select IRQ_DOMAIN 159 select IRQ_DOMAIN
160 select IRQ_FORCED_THREADING 160 select IRQ_FORCED_THREADING
161 select MODULES_USE_ELF_RELA 161 select MODULES_USE_ELF_RELA
162 select MULTI_IRQ_HANDLER
163 select NEED_DMA_MAP_STATE 162 select NEED_DMA_MAP_STATE
164 select NEED_SG_DMA_LENGTH 163 select NEED_SG_DMA_LENGTH
165 select OF 164 select OF
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 70498a033cf5..b5ca9c50876d 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -27,6 +27,7 @@ config ARCH_BCM2835
27 bool "Broadcom BCM2835 family" 27 bool "Broadcom BCM2835 family"
28 select TIMER_OF 28 select TIMER_OF
29 select GPIOLIB 29 select GPIOLIB
30 select MFD_CORE
30 select PINCTRL 31 select PINCTRL
31 select PINCTRL_BCM2835 32 select PINCTRL_BCM2835
32 select ARM_AMBA 33 select ARM_AMBA
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index bb2045be8814..97aeb946ed5e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -321,7 +321,6 @@
321 nvidia,default-trim = <0x9>; 321 nvidia,default-trim = <0x9>;
322 nvidia,dqs-trim = <63>; 322 nvidia,dqs-trim = <63>;
323 mmc-hs400-1_8v; 323 mmc-hs400-1_8v;
324 supports-cqe;
325 status = "disabled"; 324 status = "disabled";
326 }; 325 };
327 326
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 61a0afb74e63..1ea684af99c4 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -2,7 +2,7 @@
2/* 2/*
3 * Device Tree Source for the RZ/G2E (R8A774C0) SoC 3 * Device Tree Source for the RZ/G2E (R8A774C0) SoC
4 * 4 *
5 * Copyright (C) 2018 Renesas Electronics Corp. 5 * Copyright (C) 2018-2019 Renesas Electronics Corp.
6 */ 6 */
7 7
8#include <dt-bindings/clock/r8a774c0-cpg-mssr.h> 8#include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
@@ -1150,9 +1150,8 @@
1150 <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, 1150 <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
1151 <&scif_clk>; 1151 <&scif_clk>;
1152 clock-names = "fck", "brg_int", "scif_clk"; 1152 clock-names = "fck", "brg_int", "scif_clk";
1153 dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, 1153 dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
1154 <&dmac2 0x5b>, <&dmac2 0x5a>; 1154 dma-names = "tx", "rx";
1155 dma-names = "tx", "rx", "tx", "rx";
1156 power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; 1155 power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
1157 resets = <&cpg 202>; 1156 resets = <&cpg 202>;
1158 status = "disabled"; 1157 status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index a69faa60ea4d..d2ad665fe2d9 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -2,7 +2,7 @@
2/* 2/*
3 * Device Tree Source for the R-Car E3 (R8A77990) SoC 3 * Device Tree Source for the R-Car E3 (R8A77990) SoC
4 * 4 *
5 * Copyright (C) 2018 Renesas Electronics Corp. 5 * Copyright (C) 2018-2019 Renesas Electronics Corp.
6 */ 6 */
7 7
8#include <dt-bindings/clock/r8a77990-cpg-mssr.h> 8#include <dt-bindings/clock/r8a77990-cpg-mssr.h>
@@ -1067,9 +1067,8 @@
1067 <&cpg CPG_CORE R8A77990_CLK_S3D1C>, 1067 <&cpg CPG_CORE R8A77990_CLK_S3D1C>,
1068 <&scif_clk>; 1068 <&scif_clk>;
1069 clock-names = "fck", "brg_int", "scif_clk"; 1069 clock-names = "fck", "brg_int", "scif_clk";
1070 dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, 1070 dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
1071 <&dmac2 0x5b>, <&dmac2 0x5a>; 1071 dma-names = "tx", "rx";
1072 dma-names = "tx", "rx", "tx", "rx";
1073 power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; 1072 power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
1074 resets = <&cpg 202>; 1073 resets = <&cpg 202>;
1075 status = "disabled"; 1074 status = "disabled";
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 2afb1338b48a..5f1437099b99 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -77,6 +77,7 @@
77#define ARM_CPU_IMP_QCOM 0x51 77#define ARM_CPU_IMP_QCOM 0x51
78#define ARM_CPU_IMP_NVIDIA 0x4E 78#define ARM_CPU_IMP_NVIDIA 0x4E
79#define ARM_CPU_IMP_FUJITSU 0x46 79#define ARM_CPU_IMP_FUJITSU 0x46
80#define ARM_CPU_IMP_HISI 0x48
80 81
81#define ARM_CPU_PART_AEM_V8 0xD0F 82#define ARM_CPU_PART_AEM_V8 0xD0F
82#define ARM_CPU_PART_FOUNDATION 0xD00 83#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -107,6 +108,8 @@
107 108
108#define FUJITSU_CPU_PART_A64FX 0x001 109#define FUJITSU_CPU_PART_A64FX 0x001
109 110
111#define HISI_CPU_PART_TSV110 0xD01
112
110#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) 113#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
111#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) 114#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
112#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) 115#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -126,10 +129,11 @@
126#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) 129#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
127#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) 130#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
128#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) 131#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
132#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
129 133
130/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ 134/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
131#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX 135#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
132#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_VARIANT(1)) 136#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0))
133#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) 137#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0)
134 138
135#ifndef __ASSEMBLY__ 139#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index b0742a16c6c9..ebeefcf835e8 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -445,6 +445,17 @@ static inline int kvm_read_guest_lock(struct kvm *kvm,
445 return ret; 445 return ret;
446} 446}
447 447
448static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
449 const void *data, unsigned long len)
450{
451 int srcu_idx = srcu_read_lock(&kvm->srcu);
452 int ret = kvm_write_guest(kvm, gpa, data, len);
453
454 srcu_read_unlock(&kvm->srcu, srcu_idx);
455
456 return ret;
457}
458
448#ifdef CONFIG_KVM_INDIRECT_VECTORS 459#ifdef CONFIG_KVM_INDIRECT_VECTORS
449/* 460/*
450 * EL2 vectors can be mapped and rerouted in a number of ways, 461 * EL2 vectors can be mapped and rerouted in a number of ways,
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e24e94d28767..4061de10cea6 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -963,6 +963,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
963 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), 963 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
964 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 964 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
965 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), 965 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
966 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
966 { /* sentinel */ } 967 { /* sentinel */ }
967 }; 968 };
968 char const *str = "command line option"; 969 char const *str = "command line option";
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 7fb6f3aa5ceb..7a679caf4585 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -91,8 +91,6 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
91int __kprobes arch_prepare_kprobe(struct kprobe *p) 91int __kprobes arch_prepare_kprobe(struct kprobe *p)
92{ 92{
93 unsigned long probe_addr = (unsigned long)p->addr; 93 unsigned long probe_addr = (unsigned long)p->addr;
94 extern char __start_rodata[];
95 extern char __end_rodata[];
96 94
97 if (probe_addr & 0x3) 95 if (probe_addr & 0x3)
98 return -EINVAL; 96 return -EINVAL;
@@ -100,10 +98,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
100 /* copy instruction */ 98 /* copy instruction */
101 p->opcode = le32_to_cpu(*p->addr); 99 p->opcode = le32_to_cpu(*p->addr);
102 100
103 if (in_exception_text(probe_addr)) 101 if (search_exception_tables(probe_addr))
104 return -EINVAL;
105 if (probe_addr >= (unsigned long) __start_rodata &&
106 probe_addr <= (unsigned long) __end_rodata)
107 return -EINVAL; 102 return -EINVAL;
108 103
109 /* decode instruction */ 104 /* decode instruction */
@@ -476,26 +471,37 @@ kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
476 return DBG_HOOK_HANDLED; 471 return DBG_HOOK_HANDLED;
477} 472}
478 473
479bool arch_within_kprobe_blacklist(unsigned long addr) 474/*
475 * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
476 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
477 */
478int __init arch_populate_kprobe_blacklist(void)
480{ 479{
481 if ((addr >= (unsigned long)__kprobes_text_start && 480 int ret;
482 addr < (unsigned long)__kprobes_text_end) || 481
483 (addr >= (unsigned long)__entry_text_start && 482 ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start,
484 addr < (unsigned long)__entry_text_end) || 483 (unsigned long)__entry_text_end);
485 (addr >= (unsigned long)__idmap_text_start && 484 if (ret)
486 addr < (unsigned long)__idmap_text_end) || 485 return ret;
487 (addr >= (unsigned long)__hyp_text_start && 486 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
488 addr < (unsigned long)__hyp_text_end) || 487 (unsigned long)__irqentry_text_end);
489 !!search_exception_tables(addr)) 488 if (ret)
490 return true; 489 return ret;
491 490 ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start,
492 if (!is_kernel_in_hyp_mode()) { 491 (unsigned long)__exception_text_end);
493 if ((addr >= (unsigned long)__hyp_idmap_text_start && 492 if (ret)
494 addr < (unsigned long)__hyp_idmap_text_end)) 493 return ret;
495 return true; 494 ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start,
496 } 495 (unsigned long)__idmap_text_end);
497 496 if (ret)
498 return false; 497 return ret;
498 ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start,
499 (unsigned long)__hyp_text_end);
500 if (ret || is_kernel_in_hyp_mode())
501 return ret;
502 ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start,
503 (unsigned long)__hyp_idmap_text_end);
504 return ret;
499} 505}
500 506
501void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) 507void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f8482fe5a190..413d566405d1 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
217 217
218 num_standard_resources = memblock.memory.cnt; 218 num_standard_resources = memblock.memory.cnt;
219 res_size = num_standard_resources * sizeof(*standard_resources); 219 res_size = num_standard_resources * sizeof(*standard_resources);
220 standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); 220 standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
221 if (!standard_resources) 221 if (!standard_resources)
222 panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); 222 panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
223 223
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 1a29f2695ff2..d908b5e9e949 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -143,6 +143,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
143 if (trace->nr_entries < trace->max_entries) 143 if (trace->nr_entries < trace->max_entries)
144 trace->entries[trace->nr_entries++] = ULONG_MAX; 144 trace->entries[trace->nr_entries++] = ULONG_MAX;
145} 145}
146EXPORT_SYMBOL_GPL(save_stack_trace_regs);
146 147
147static noinline void __save_stack_trace(struct task_struct *tsk, 148static noinline void __save_stack_trace(struct task_struct *tsk,
148 struct stack_trace *trace, unsigned int nosched) 149 struct stack_trace *trace, unsigned int nosched)
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index f16a5f8ff2b4..e2a0500cd7a2 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -123,6 +123,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
123 int ret = -EINVAL; 123 int ret = -EINVAL;
124 bool loaded; 124 bool loaded;
125 125
126 /* Reset PMU outside of the non-preemptible section */
127 kvm_pmu_vcpu_reset(vcpu);
128
126 preempt_disable(); 129 preempt_disable();
127 loaded = (vcpu->cpu != -1); 130 loaded = (vcpu->cpu != -1);
128 if (loaded) 131 if (loaded)
@@ -170,9 +173,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
170 vcpu->arch.reset_state.reset = false; 173 vcpu->arch.reset_state.reset = false;
171 } 174 }
172 175
173 /* Reset PMU */
174 kvm_pmu_vcpu_reset(vcpu);
175
176 /* Default workaround setup is enabled (if supported) */ 176 /* Default workaround setup is enabled (if supported) */
177 if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) 177 if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; 178 vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild
index 63b4a1705182..249c9f6f26dc 100644
--- a/arch/c6x/include/asm/Kbuild
+++ b/arch/c6x/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += irq_work.h
19generic-y += kdebug.h 19generic-y += kdebug.h
20generic-y += kmap_types.h 20generic-y += kmap_types.h
21generic-y += kprobes.h 21generic-y += kprobes.h
22generic-y += kvm_para.h
22generic-y += local.h 23generic-y += local.h
23generic-y += mcs_spinlock.h 24generic-y += mcs_spinlock.h
24generic-y += mm-arch-hooks.h 25generic-y += mm-arch-hooks.h
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 3e7c8ecf151e..e3dead402e5f 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -23,6 +23,7 @@ generic-y += irq_work.h
23generic-y += kdebug.h 23generic-y += kdebug.h
24generic-y += kmap_types.h 24generic-y += kmap_types.h
25generic-y += kprobes.h 25generic-y += kprobes.h
26generic-y += kvm_para.h
26generic-y += linkage.h 27generic-y += linkage.h
27generic-y += local.h 28generic-y += local.h
28generic-y += local64.h 29generic-y += local64.h
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index b25fd42aa0f4..d046e8ccdf78 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -19,6 +19,7 @@ generic-y += irq_work.h
19generic-y += kdebug.h 19generic-y += kdebug.h
20generic-y += kmap_types.h 20generic-y += kmap_types.h
21generic-y += kprobes.h 21generic-y += kprobes.h
22generic-y += kvm_para.h
22generic-y += local.h 23generic-y += local.h
23generic-y += local64.h 24generic-y += local64.h
24generic-y += mcs_spinlock.h 25generic-y += mcs_spinlock.h
diff --git a/arch/hexagon/include/uapi/asm/kvm_para.h b/arch/hexagon/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/hexagon/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/kvm_para.h>
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 43e21fe3499c..11f191689c9e 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -2,6 +2,7 @@ generated-y += syscall_table.h
2generic-y += compat.h 2generic-y += compat.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += irq_work.h 4generic-y += irq_work.h
5generic-y += kvm_para.h
5generic-y += mcs_spinlock.h 6generic-y += mcs_spinlock.h
6generic-y += mm-arch-hooks.h 7generic-y += mm-arch-hooks.h
7generic-y += preempt.h 8generic-y += preempt.h
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index 20018cb883a9..62a9522af51e 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generated-y += unistd_64.h generated-y += unistd_64.h
2generic-y += kvm_para.h
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 95f8f631c4df..2c359d9e80f6 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += irq_work.h
13generic-y += kdebug.h 13generic-y += kdebug.h
14generic-y += kmap_types.h 14generic-y += kmap_types.h
15generic-y += kprobes.h 15generic-y += kprobes.h
16generic-y += kvm_para.h
16generic-y += local.h 17generic-y += local.h
17generic-y += local64.h 18generic-y += local64.h
18generic-y += mcs_spinlock.h 19generic-y += mcs_spinlock.h
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 8a7ad40be463..7417847dc438 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generated-y += unistd_32.h generated-y += unistd_32.h
2generic-y += kvm_para.h
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 791cc8d54d0a..1a8285c3f693 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -17,6 +17,7 @@ generic-y += irq_work.h
17generic-y += kdebug.h 17generic-y += kdebug.h
18generic-y += kmap_types.h 18generic-y += kmap_types.h
19generic-y += kprobes.h 19generic-y += kprobes.h
20generic-y += kvm_para.h
20generic-y += linkage.h 21generic-y += linkage.h
21generic-y += local.h 22generic-y += local.h
22generic-y += local64.h 23generic-y += local64.h
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 3ce84fbb2678..13f59631c576 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -1,3 +1,2 @@
1generated-y += unistd_32.h 1generated-y += unistd_32.h
2generic-y += kvm_para.h
3generic-y += ucontext.h 2generic-y += ucontext.h
diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
index 46eddbec8d9f..0ab95dd431b3 100644
--- a/arch/mips/bcm47xx/workarounds.c
+++ b/arch/mips/bcm47xx/workarounds.c
@@ -24,6 +24,7 @@ void __init bcm47xx_workarounds(void)
24 case BCM47XX_BOARD_NETGEAR_WNR3500L: 24 case BCM47XX_BOARD_NETGEAR_WNR3500L:
25 bcm47xx_workarounds_enable_usb_power(12); 25 bcm47xx_workarounds_enable_usb_power(12);
26 break; 26 break;
27 case BCM47XX_BOARD_NETGEAR_WNDR3400V2:
27 case BCM47XX_BOARD_NETGEAR_WNDR3400_V3: 28 case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
28 bcm47xx_workarounds_enable_usb_power(21); 29 bcm47xx_workarounds_enable_usb_power(21);
29 break; 30 break;
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e77672539e8e..e4456e450f94 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -21,15 +21,15 @@
21#endif 21#endif
22 22
23#ifdef CONFIG_CPU_MICROMIPS 23#ifdef CONFIG_CPU_MICROMIPS
24#define NOP_INSN "nop32" 24#define B_INSN "b32"
25#else 25#else
26#define NOP_INSN "nop" 26#define B_INSN "b"
27#endif 27#endif
28 28
29static __always_inline bool arch_static_branch(struct static_key *key, bool branch) 29static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
30{ 30{
31 asm_volatile_goto("1:\t" NOP_INSN "\n\t" 31 asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
32 "nop\n\t" 32 "2:\tnop\n\t"
33 ".pushsection __jump_table, \"aw\"\n\t" 33 ".pushsection __jump_table, \"aw\"\n\t"
34 WORD_INSN " 1b, %l[l_yes], %0\n\t" 34 WORD_INSN " 1b, %l[l_yes], %0\n\t"
35 ".popsection\n\t" 35 ".popsection\n\t"
diff --git a/arch/mips/include/uapi/asm/posix_types.h b/arch/mips/include/uapi/asm/posix_types.h
index 6aa49c10f88f..f0ccb5b90ce9 100644
--- a/arch/mips/include/uapi/asm/posix_types.h
+++ b/arch/mips/include/uapi/asm/posix_types.h
@@ -21,13 +21,6 @@
21typedef long __kernel_daddr_t; 21typedef long __kernel_daddr_t;
22#define __kernel_daddr_t __kernel_daddr_t 22#define __kernel_daddr_t __kernel_daddr_t
23 23
24#if (_MIPS_SZLONG == 32)
25typedef struct {
26 long val[2];
27} __kernel_fsid_t;
28#define __kernel_fsid_t __kernel_fsid_t
29#endif
30
31#include <asm-generic/posix_types.h> 24#include <asm-generic/posix_types.h>
32 25
33#endif /* _ASM_POSIX_TYPES_H */ 26#endif /* _ASM_POSIX_TYPES_H */
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index cb7e9ed7a453..33ee0d18fb0a 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -140,6 +140,13 @@ SECTIONS
140 PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) 140 PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
141#endif 141#endif
142 142
143#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
144 .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
145 *(.appended_dtb)
146 KEEP(*(.appended_dtb))
147 }
148#endif
149
143#ifdef CONFIG_RELOCATABLE 150#ifdef CONFIG_RELOCATABLE
144 . = ALIGN(4); 151 . = ALIGN(4);
145 152
@@ -164,11 +171,6 @@ SECTIONS
164 __appended_dtb = .; 171 __appended_dtb = .;
165 /* leave space for appended DTB */ 172 /* leave space for appended DTB */
166 . += 0x100000; 173 . += 0x100000;
167#elif defined(CONFIG_MIPS_ELF_APPENDED_DTB)
168 .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
169 *(.appended_dtb)
170 KEEP(*(.appended_dtb))
171 }
172#endif 174#endif
173 /* 175 /*
174 * Align to 64K in attempt to eliminate holes before the 176 * Align to 64K in attempt to eliminate holes before the
diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson64/lemote-2f/irq.c
index 9e33e45aa17c..b213cecb8e3a 100644
--- a/arch/mips/loongson64/lemote-2f/irq.c
+++ b/arch/mips/loongson64/lemote-2f/irq.c
@@ -103,7 +103,7 @@ static struct irqaction ip6_irqaction = {
103static struct irqaction cascade_irqaction = { 103static struct irqaction cascade_irqaction = {
104 .handler = no_action, 104 .handler = no_action,
105 .name = "cascade", 105 .name = "cascade",
106 .flags = IRQF_NO_THREAD, 106 .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
107}; 107};
108 108
109void __init mach_init_irq(void) 109void __init mach_init_irq(void)
diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild
index 8fde4fa2c34f..88a667d12aaa 100644
--- a/arch/nios2/include/asm/Kbuild
+++ b/arch/nios2/include/asm/Kbuild
@@ -23,6 +23,7 @@ generic-y += irq_work.h
23generic-y += kdebug.h 23generic-y += kdebug.h
24generic-y += kmap_types.h 24generic-y += kmap_types.h
25generic-y += kprobes.h 25generic-y += kprobes.h
26generic-y += kvm_para.h
26generic-y += local.h 27generic-y += local.h
27generic-y += mcs_spinlock.h 28generic-y += mcs_spinlock.h
28generic-y += mm-arch-hooks.h 29generic-y += mm-arch-hooks.h
diff --git a/arch/nios2/include/uapi/asm/Kbuild b/arch/nios2/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/nios2/include/uapi/asm/Kbuild
+++ b/arch/nios2/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index 5a73e2956ac4..22aa97136c01 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -20,6 +20,7 @@ generic-y += irq_work.h
20generic-y += kdebug.h 20generic-y += kdebug.h
21generic-y += kmap_types.h 21generic-y += kmap_types.h
22generic-y += kprobes.h 22generic-y += kprobes.h
23generic-y += kvm_para.h
23generic-y += local.h 24generic-y += local.h
24generic-y += mcs_spinlock.h 25generic-y += mcs_spinlock.h
25generic-y += mm-arch-hooks.h 26generic-y += mm-arch-hooks.h
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 6f49e77d82a2..9bcd0c903dbb 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -11,6 +11,7 @@ generic-y += irq_regs.h
11generic-y += irq_work.h 11generic-y += irq_work.h
12generic-y += kdebug.h 12generic-y += kdebug.h
13generic-y += kprobes.h 13generic-y += kprobes.h
14generic-y += kvm_para.h
14generic-y += local.h 15generic-y += local.h
15generic-y += local64.h 16generic-y += local64.h
16generic-y += mcs_spinlock.h 17generic-y += mcs_spinlock.h
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index 22fdbd08cdc8..2bd5b392277c 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -1,3 +1,2 @@
1generated-y += unistd_32.h 1generated-y += unistd_32.h
2generated-y += unistd_64.h 2generated-y += unistd_64.h
3generic-y += kvm_para.h
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index d34ad1657d7b..598cdcdd1355 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -352,7 +352,7 @@ static inline bool strict_kernel_rwx_enabled(void)
352#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \ 352#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
353 defined (CONFIG_PPC_64K_PAGES) 353 defined (CONFIG_PPC_64K_PAGES)
354#define MAX_PHYSMEM_BITS 51 354#define MAX_PHYSMEM_BITS 51
355#else 355#elif defined(CONFIG_SPARSEMEM)
356#define MAX_PHYSMEM_BITS 46 356#define MAX_PHYSMEM_BITS 46
357#endif 357#endif
358 358
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c5698a523bb1..23f7ed796f38 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -302,6 +302,7 @@
302/* Misc instructions for BPF compiler */ 302/* Misc instructions for BPF compiler */
303#define PPC_INST_LBZ 0x88000000 303#define PPC_INST_LBZ 0x88000000
304#define PPC_INST_LD 0xe8000000 304#define PPC_INST_LD 0xe8000000
305#define PPC_INST_LDX 0x7c00002a
305#define PPC_INST_LHZ 0xa0000000 306#define PPC_INST_LHZ 0xa0000000
306#define PPC_INST_LWZ 0x80000000 307#define PPC_INST_LWZ 0x80000000
307#define PPC_INST_LHBRX 0x7c00062c 308#define PPC_INST_LHBRX 0x7c00062c
@@ -309,6 +310,7 @@
309#define PPC_INST_STB 0x98000000 310#define PPC_INST_STB 0x98000000
310#define PPC_INST_STH 0xb0000000 311#define PPC_INST_STH 0xb0000000
311#define PPC_INST_STD 0xf8000000 312#define PPC_INST_STD 0xf8000000
313#define PPC_INST_STDX 0x7c00012a
312#define PPC_INST_STDU 0xf8000001 314#define PPC_INST_STDU 0xf8000001
313#define PPC_INST_STW 0x90000000 315#define PPC_INST_STW 0x90000000
314#define PPC_INST_STWU 0x94000000 316#define PPC_INST_STWU 0x94000000
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 1afe90ade595..bbc06bd72b1f 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -82,10 +82,10 @@ struct vdso_data {
82 __u32 icache_block_size; /* L1 i-cache block size */ 82 __u32 icache_block_size; /* L1 i-cache block size */
83 __u32 dcache_log_block_size; /* L1 d-cache log block size */ 83 __u32 dcache_log_block_size; /* L1 d-cache log block size */
84 __u32 icache_log_block_size; /* L1 i-cache log block size */ 84 __u32 icache_log_block_size; /* L1 i-cache log block size */
85 __s32 wtom_clock_sec; /* Wall to monotonic clock */ 85 __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
86 __s32 wtom_clock_nsec; 86 __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */
87 struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */ 87 __s64 wtom_clock_sec; /* Wall to monotonic clock sec */
88 __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */ 88 struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
89 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */ 89 __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
90 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */ 90 __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
91}; 91};
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 6f1c11e0691f..7534ecff5e92 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -24,9 +24,6 @@ BEGIN_MMU_FTR_SECTION
24 li r10,0 24 li r10,0
25 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ 25 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
26END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) 26END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
27 lis r10, (swapper_pg_dir - PAGE_OFFSET)@h
28 ori r10, r10, (swapper_pg_dir - PAGE_OFFSET)@l
29 mtspr SPRN_SPRG_PGDIR, r10
30 27
31BEGIN_FTR_SECTION 28BEGIN_FTR_SECTION
32 bl __init_fpu_registers 29 bl __init_fpu_registers
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index ce6a972f2584..48051c8977c5 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -855,6 +855,9 @@ __secondary_start:
855 li r3,0 855 li r3,0
856 stw r3, RTAS_SP(r4) /* 0 => not in RTAS */ 856 stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
857#endif 857#endif
858 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
859 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
860 mtspr SPRN_SPRG_PGDIR, r4
858 861
859 /* enable MMU and jump to start_secondary */ 862 /* enable MMU and jump to start_secondary */
860 li r4,MSR_KERNEL 863 li r4,MSR_KERNEL
@@ -942,6 +945,9 @@ start_here:
942 li r3,0 945 li r3,0
943 stw r3, RTAS_SP(r4) /* 0 => not in RTAS */ 946 stw r3, RTAS_SP(r4) /* 0 => not in RTAS */
944#endif 947#endif
948 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
949 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
950 mtspr SPRN_SPRG_PGDIR, r4
945 951
946 /* stack */ 952 /* stack */
947 lis r1,init_thread_union@ha 953 lis r1,init_thread_union@ha
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 9b8631533e02..b33bafb8fcea 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -190,29 +190,22 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
190 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 190 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
191 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 191 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
192 192
193 if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { 193 if (bcs || ccd) {
194 bool comma = false;
195 seq_buf_printf(&s, "Mitigation: "); 194 seq_buf_printf(&s, "Mitigation: ");
196 195
197 if (bcs) { 196 if (bcs)
198 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 197 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
199 comma = true;
200 }
201 198
202 if (ccd) { 199 if (bcs && ccd)
203 if (comma)
204 seq_buf_printf(&s, ", ");
205 seq_buf_printf(&s, "Indirect branch cache disabled");
206 comma = true;
207 }
208
209 if (comma)
210 seq_buf_printf(&s, ", "); 200 seq_buf_printf(&s, ", ");
211 201
212 seq_buf_printf(&s, "Software count cache flush"); 202 if (ccd)
203 seq_buf_printf(&s, "Indirect branch cache disabled");
204 } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
205 seq_buf_printf(&s, "Mitigation: Software count cache flush");
213 206
214 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) 207 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
215 seq_buf_printf(&s, "(hardware accelerated)"); 208 seq_buf_printf(&s, " (hardware accelerated)");
216 } else if (btb_flush_enabled) { 209 } else if (btb_flush_enabled) {
217 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 210 seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
218 } else { 211 } else {
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index a4ed9edfd5f0..1f324c28705b 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -92,7 +92,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
92 * At this point, r4,r5 contain our sec/nsec values. 92 * At this point, r4,r5 contain our sec/nsec values.
93 */ 93 */
94 94
95 lwa r6,WTOM_CLOCK_SEC(r3) 95 ld r6,WTOM_CLOCK_SEC(r3)
96 lwa r9,WTOM_CLOCK_NSEC(r3) 96 lwa r9,WTOM_CLOCK_NSEC(r3)
97 97
98 /* We now have our result in r6,r9. We create a fake dependency 98 /* We now have our result in r6,r9. We create a fake dependency
@@ -125,7 +125,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
125 bne cr6,75f 125 bne cr6,75f
126 126
127 /* CLOCK_MONOTONIC_COARSE */ 127 /* CLOCK_MONOTONIC_COARSE */
128 lwa r6,WTOM_CLOCK_SEC(r3) 128 ld r6,WTOM_CLOCK_SEC(r3)
129 lwa r9,WTOM_CLOCK_NSEC(r3) 129 lwa r9,WTOM_CLOCK_NSEC(r3)
130 130
131 /* check if counter has updated */ 131 /* check if counter has updated */
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 844d8e774492..b7f6f6e0b6e8 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
215 beq .Lzero 215 beq .Lzero
216 216
217.Lcmp_rest_lt8bytes: 217.Lcmp_rest_lt8bytes:
218 /* Here we have only less than 8 bytes to compare with. at least s1 218 /*
219 * Address is aligned with 8 bytes. 219 * Here we have less than 8 bytes to compare. At least s1 is aligned to
220 * The next double words are load and shift right with appropriate 220 * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
221 * bits. 221 * page boundary, otherwise we might read past the end of the buffer and
222 * trigger a page fault. We use 4K as the conservative minimum page
223 * size. If we detect that case we go to the byte-by-byte loop.
224 *
225 * Otherwise the next double word is loaded from s1 and s2, and shifted
226 * right to compare the appropriate bits.
222 */ 227 */
228 clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
229 cmpdi r6,0xff8
230 bgt .Lshort
231
223 subfic r6,r5,8 232 subfic r6,r5,8
224 slwi r6,r6,3 233 slwi r6,r6,3
225 LD rA,0,r3 234 LD rA,0,r3
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 1f13494efb2b..a6c491f18a04 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -70,12 +70,12 @@ _GLOBAL(hash_page)
70 lis r0,KERNELBASE@h /* check if kernel address */ 70 lis r0,KERNELBASE@h /* check if kernel address */
71 cmplw 0,r4,r0 71 cmplw 0,r4,r0
72 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ 72 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
73 mfspr r5, SPRN_SPRG_PGDIR /* virt page-table root */ 73 mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
74 blt+ 112f /* assume user more likely */ 74 blt+ 112f /* assume user more likely */
75 lis r5,swapper_pg_dir@ha /* if kernel address, use */ 75 lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
76 addi r5,r5,swapper_pg_dir@l /* kernel page table */ 76 addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
77 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 77 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
78112: tophys(r5, r5) 78112:
79#ifndef CONFIG_PTE_64BIT 79#ifndef CONFIG_PTE_64BIT
80 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ 80 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
81 lwz r8,0(r5) /* get pmd entry */ 81 lwz r8,0(r5) /* get pmd entry */
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 549e9490ff2a..dcac37745b05 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -51,6 +51,8 @@
51#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) 51#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
52#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ 52#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
53 ___PPC_RA(base) | ((i) & 0xfffc)) 53 ___PPC_RA(base) | ((i) & 0xfffc))
54#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
55 ___PPC_RA(base) | ___PPC_RB(b))
54#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ 56#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
55 ___PPC_RA(base) | ((i) & 0xfffc)) 57 ___PPC_RA(base) | ((i) & 0xfffc))
56#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ 58#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
@@ -65,7 +67,9 @@
65#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ 67#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
66 ___PPC_RA(base) | IMM_L(i)) 68 ___PPC_RA(base) | IMM_L(i))
67#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \ 69#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
68 ___PPC_RA(base) | IMM_L(i)) 70 ___PPC_RA(base) | ((i) & 0xfffc))
71#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
72 ___PPC_RA(base) | ___PPC_RB(b))
69#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \ 73#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
70 ___PPC_RA(base) | IMM_L(i)) 74 ___PPC_RA(base) | IMM_L(i))
71#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \ 75#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
@@ -85,17 +89,6 @@
85 ___PPC_RA(a) | ___PPC_RB(b)) 89 ___PPC_RA(a) | ___PPC_RB(b))
86#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \ 90#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
87 ___PPC_RA(a) | ___PPC_RB(b)) 91 ___PPC_RA(a) | ___PPC_RB(b))
88
89#ifdef CONFIG_PPC64
90#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
91#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
92#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
93#else
94#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
95#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
96#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
97#endif
98
99#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) 92#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
100#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) 93#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
101#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \ 94#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
index dc50a8d4b3b9..21744d8aa053 100644
--- a/arch/powerpc/net/bpf_jit32.h
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
122#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i) 122#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
123#endif 123#endif
124 124
125#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
126#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
127#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
128
125#define SEEN_DATAREF 0x10000 /* might call external helpers */ 129#define SEEN_DATAREF 0x10000 /* might call external helpers */
126#define SEEN_XREG 0x20000 /* X reg is used */ 130#define SEEN_XREG 0x20000 /* X reg is used */
127#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary 131#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 3609be4692b3..47f441f351a6 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -68,6 +68,26 @@ static const int b2p[] = {
68/* PPC NVR range -- update this if we ever use NVRs below r27 */ 68/* PPC NVR range -- update this if we ever use NVRs below r27 */
69#define BPF_PPC_NVR_MIN 27 69#define BPF_PPC_NVR_MIN 27
70 70
71/*
72 * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
73 * so ensure that it isn't in use already.
74 */
75#define PPC_BPF_LL(r, base, i) do { \
76 if ((i) % 4) { \
77 PPC_LI(b2p[TMP_REG_2], (i)); \
78 PPC_LDX(r, base, b2p[TMP_REG_2]); \
79 } else \
80 PPC_LD(r, base, i); \
81 } while(0)
82#define PPC_BPF_STL(r, base, i) do { \
83 if ((i) % 4) { \
84 PPC_LI(b2p[TMP_REG_2], (i)); \
85 PPC_STDX(r, base, b2p[TMP_REG_2]); \
86 } else \
87 PPC_STD(r, base, i); \
88 } while(0)
89#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
90
71#define SEEN_FUNC 0x1000 /* might call external helpers */ 91#define SEEN_FUNC 0x1000 /* might call external helpers */
72#define SEEN_STACK 0x2000 /* uses BPF stack */ 92#define SEEN_STACK 0x2000 /* uses BPF stack */
73#define SEEN_TAILCALL 0x4000 /* uses tail calls */ 93#define SEEN_TAILCALL 0x4000 /* uses tail calls */
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 4194d3cfb60c..21a1dcd4b156 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
252 * if (tail_call_cnt > MAX_TAIL_CALL_CNT) 252 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
253 * goto out; 253 * goto out;
254 */ 254 */
255 PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)); 255 PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
256 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT); 256 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
257 PPC_BCC(COND_GT, out); 257 PPC_BCC(COND_GT, out);
258 258
@@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
265 /* prog = array->ptrs[index]; */ 265 /* prog = array->ptrs[index]; */
266 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8); 266 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
267 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array); 267 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
268 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)); 268 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
269 269
270 /* 270 /*
271 * if (prog == NULL) 271 * if (prog == NULL)
@@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
275 PPC_BCC(COND_EQ, out); 275 PPC_BCC(COND_EQ, out);
276 276
277 /* goto *(prog->bpf_func + prologue_size); */ 277 /* goto *(prog->bpf_func + prologue_size); */
278 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)); 278 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
279#ifdef PPC64_ELF_ABI_v1 279#ifdef PPC64_ELF_ABI_v1
280 /* skip past the function descriptor */ 280 /* skip past the function descriptor */
281 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 281 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@@ -606,7 +606,7 @@ bpf_alu32_trunc:
606 * the instructions generated will remain the 606 * the instructions generated will remain the
607 * same across all passes 607 * same across all passes
608 */ 608 */
609 PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx)); 609 PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
610 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)); 610 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
611 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]); 611 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
612 break; 612 break;
@@ -662,7 +662,7 @@ emit_clear:
662 PPC_LI32(b2p[TMP_REG_1], imm); 662 PPC_LI32(b2p[TMP_REG_1], imm);
663 src_reg = b2p[TMP_REG_1]; 663 src_reg = b2p[TMP_REG_1];
664 } 664 }
665 PPC_STD(src_reg, dst_reg, off); 665 PPC_BPF_STL(src_reg, dst_reg, off);
666 break; 666 break;
667 667
668 /* 668 /*
@@ -709,7 +709,7 @@ emit_clear:
709 break; 709 break;
710 /* dst = *(u64 *)(ul) (src + off) */ 710 /* dst = *(u64 *)(ul) (src + off) */
711 case BPF_LDX | BPF_MEM | BPF_DW: 711 case BPF_LDX | BPF_MEM | BPF_DW:
712 PPC_LD(dst_reg, src_reg, off); 712 PPC_BPF_LL(dst_reg, src_reg, off);
713 break; 713 break;
714 714
715 /* 715 /*
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index 6ed22127391b..921f12182f3e 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
77 77
78 ret = drc.drc_index_start + (thread_index * drc.sequential_inc); 78 ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
79 } else { 79 } else {
80 const __be32 *indexes; 80 u32 nr_drc_indexes, thread_drc_index;
81
82 indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
83 if (indexes == NULL)
84 goto err_of_node_put;
85 81
86 /* 82 /*
87 * The first element indexes[0] is the number of drc_indexes 83 * The first element of ibm,drc-indexes array is the
88 * returned in the list. Hence thread_index+1 will get the 84 * number of drc_indexes returned in the list. Hence
89 * drc_index corresponding to core number thread_index. 85 * thread_index+1 will get the drc_index corresponding
86 * to core number thread_index.
90 */ 87 */
91 ret = indexes[thread_index + 1]; 88 rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
89 0, &nr_drc_indexes);
90 if (rc)
91 goto err_of_node_put;
92
93 WARN_ON_ONCE(thread_index > nr_drc_indexes);
94 rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
95 thread_index + 1,
96 &thread_drc_index);
97 if (rc)
98 goto err_of_node_put;
99
100 ret = thread_drc_index;
92 } 101 }
93 102
94 rc = 0; 103 rc = 0;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index d97d52772789..452dcfd7e5dd 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
550 "UE", 550 "UE",
551 "SLB", 551 "SLB",
552 "ERAT", 552 "ERAT",
553 "Unknown",
553 "TLB", 554 "TLB",
554 "D-Cache", 555 "D-Cache",
555 "Unknown", 556 "Unknown",
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 1a6a7092d942..e94a0a28b5eb 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
360 return reg1; 360 return reg1;
361} 361}
362 362
363/*
364 * Interface to tell the AP bus code that a configuration
365 * change has happened. The bus code should at least do
366 * an ap bus resource rescan.
367 */
368#if IS_ENABLED(CONFIG_ZCRYPT)
369void ap_bus_cfg_chg(void);
370#else
371static inline void ap_bus_cfg_chg(void){};
372#endif
373
363#endif /* _ASM_S390_AP_H_ */ 374#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 7d22a474a040..f74639a05f0f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -252,11 +252,14 @@ do { \
252 252
253/* 253/*
254 * Cache aliasing on the latest machines calls for a mapping granularity 254 * Cache aliasing on the latest machines calls for a mapping granularity
255 * of 512KB. For 64-bit processes use a 512KB alignment and a randomization 255 * of 512KB for the anonymous mapping base. For 64-bit processes use a
256 * of up to 1GB. For 31-bit processes the virtual address space is limited, 256 * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
257 * use no alignment and limit the randomization to 8MB. 257 * the virtual address space is limited, use no alignment and limit the
258 * randomization to 8MB.
259 * For the additional randomization of the program break use 32MB for
260 * 64-bit and 8MB for 31-bit.
258 */ 261 */
259#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) 262#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
260#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) 263#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
261#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) 264#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
262#define STACK_RND_MASK MMAP_RND_MASK 265#define STACK_RND_MASK MMAP_RND_MASK
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index cc0947e08b6f..5b9f10b1e55d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -91,52 +91,53 @@ struct lowcore {
91 __u64 hardirq_timer; /* 0x02e8 */ 91 __u64 hardirq_timer; /* 0x02e8 */
92 __u64 softirq_timer; /* 0x02f0 */ 92 __u64 softirq_timer; /* 0x02f0 */
93 __u64 steal_timer; /* 0x02f8 */ 93 __u64 steal_timer; /* 0x02f8 */
94 __u64 last_update_timer; /* 0x0300 */ 94 __u64 avg_steal_timer; /* 0x0300 */
95 __u64 last_update_clock; /* 0x0308 */ 95 __u64 last_update_timer; /* 0x0308 */
96 __u64 int_clock; /* 0x0310 */ 96 __u64 last_update_clock; /* 0x0310 */
97 __u64 mcck_clock; /* 0x0318 */ 97 __u64 int_clock; /* 0x0318*/
98 __u64 clock_comparator; /* 0x0320 */ 98 __u64 mcck_clock; /* 0x0320 */
99 __u64 boot_clock[2]; /* 0x0328 */ 99 __u64 clock_comparator; /* 0x0328 */
100 __u64 boot_clock[2]; /* 0x0330 */
100 101
101 /* Current process. */ 102 /* Current process. */
102 __u64 current_task; /* 0x0338 */ 103 __u64 current_task; /* 0x0340 */
103 __u64 kernel_stack; /* 0x0340 */ 104 __u64 kernel_stack; /* 0x0348 */
104 105
105 /* Interrupt, DAT-off and restartstack. */ 106 /* Interrupt, DAT-off and restartstack. */
106 __u64 async_stack; /* 0x0348 */ 107 __u64 async_stack; /* 0x0350 */
107 __u64 nodat_stack; /* 0x0350 */ 108 __u64 nodat_stack; /* 0x0358 */
108 __u64 restart_stack; /* 0x0358 */ 109 __u64 restart_stack; /* 0x0360 */
109 110
110 /* Restart function and parameter. */ 111 /* Restart function and parameter. */
111 __u64 restart_fn; /* 0x0360 */ 112 __u64 restart_fn; /* 0x0368 */
112 __u64 restart_data; /* 0x0368 */ 113 __u64 restart_data; /* 0x0370 */
113 __u64 restart_source; /* 0x0370 */ 114 __u64 restart_source; /* 0x0378 */
114 115
115 /* Address space pointer. */ 116 /* Address space pointer. */
116 __u64 kernel_asce; /* 0x0378 */ 117 __u64 kernel_asce; /* 0x0380 */
117 __u64 user_asce; /* 0x0380 */ 118 __u64 user_asce; /* 0x0388 */
118 __u64 vdso_asce; /* 0x0388 */ 119 __u64 vdso_asce; /* 0x0390 */
119 120
120 /* 121 /*
121 * The lpp and current_pid fields form a 122 * The lpp and current_pid fields form a
122 * 64-bit value that is set as program 123 * 64-bit value that is set as program
123 * parameter with the LPP instruction. 124 * parameter with the LPP instruction.
124 */ 125 */
125 __u32 lpp; /* 0x0390 */ 126 __u32 lpp; /* 0x0398 */
126 __u32 current_pid; /* 0x0394 */ 127 __u32 current_pid; /* 0x039c */
127 128
128 /* SMP info area */ 129 /* SMP info area */
129 __u32 cpu_nr; /* 0x0398 */ 130 __u32 cpu_nr; /* 0x03a0 */
130 __u32 softirq_pending; /* 0x039c */ 131 __u32 softirq_pending; /* 0x03a4 */
131 __u32 preempt_count; /* 0x03a0 */ 132 __u32 preempt_count; /* 0x03a8 */
132 __u32 spinlock_lockval; /* 0x03a4 */ 133 __u32 spinlock_lockval; /* 0x03ac */
133 __u32 spinlock_index; /* 0x03a8 */ 134 __u32 spinlock_index; /* 0x03b0 */
134 __u32 fpu_flags; /* 0x03ac */ 135 __u32 fpu_flags; /* 0x03b4 */
135 __u64 percpu_offset; /* 0x03b0 */ 136 __u64 percpu_offset; /* 0x03b8 */
136 __u64 vdso_per_cpu_data; /* 0x03b8 */ 137 __u64 vdso_per_cpu_data; /* 0x03c0 */
137 __u64 machine_flags; /* 0x03c0 */ 138 __u64 machine_flags; /* 0x03c8 */
138 __u64 gmap; /* 0x03c8 */ 139 __u64 gmap; /* 0x03d0 */
139 __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ 140 __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
140 141
141 /* br %r1 trampoline */ 142 /* br %r1 trampoline */
142 __u16 br_r1_trampoline; /* 0x0400 */ 143 __u16 br_r1_trampoline; /* 0x0400 */
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index c6fad208c2fa..b6854812d2ed 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
196 */ 196 */
197static int __hw_perf_event_init(struct perf_event *event) 197static int __hw_perf_event_init(struct perf_event *event)
198{ 198{
199 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
200 struct perf_event_attr *attr = &event->attr; 199 struct perf_event_attr *attr = &event->attr;
200 struct cpu_cf_events *cpuhw;
201 enum cpumf_ctr_set i; 201 enum cpumf_ctr_set i;
202 int err = 0; 202 int err = 0;
203 203
204 debug_sprintf_event(cf_diag_dbg, 5, 204 debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
205 "%s event %p cpu %d authorized %#x\n", __func__, 205 event, event->cpu);
206 event, event->cpu, cpuhw->info.auth_ctl);
207 206
208 event->hw.config = attr->config; 207 event->hw.config = attr->config;
209 event->hw.config_base = 0; 208 event->hw.config_base = 0;
210 local64_set(&event->count, 0);
211 209
212 /* Add all authorized counter sets to config_base */ 210 /* Add all authorized counter sets to config_base. The
211 * the hardware init function is either called per-cpu or just once
212 * for all CPUS (event->cpu == -1). This depends on the whether
213 * counting is started for all CPUs or on a per workload base where
214 * the perf event moves from one CPU to another CPU.
215 * Checking the authorization on any CPU is fine as the hardware
216 * applies the same authorization settings to all CPUs.
217 */
218 cpuhw = &get_cpu_var(cpu_cf_events);
213 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) 219 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
214 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) 220 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
215 event->hw.config_base |= cpumf_ctr_ctl[i]; 221 event->hw.config_base |= cpumf_ctr_ctl[i];
222 put_cpu_var(cpu_cf_events);
216 223
217 /* No authorized counter sets, nothing to count/sample */ 224 /* No authorized counter sets, nothing to count/sample */
218 if (!event->hw.config_base) { 225 if (!event->hw.config_base) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 3fe1c77c361b..bd197baf1dc3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
266 lc->percpu_offset = __per_cpu_offset[cpu]; 266 lc->percpu_offset = __per_cpu_offset[cpu];
267 lc->kernel_asce = S390_lowcore.kernel_asce; 267 lc->kernel_asce = S390_lowcore.kernel_asce;
268 lc->machine_flags = S390_lowcore.machine_flags; 268 lc->machine_flags = S390_lowcore.machine_flags;
269 lc->user_timer = lc->system_timer = lc->steal_timer = 0; 269 lc->user_timer = lc->system_timer =
270 lc->steal_timer = lc->avg_steal_timer = 0;
270 __ctl_store(lc->cregs_save_area, 0, 15); 271 __ctl_store(lc->cregs_save_area, 0, 15);
271 save_access_regs((unsigned int *) lc->access_regs_save_area); 272 save_access_regs((unsigned int *) lc->access_regs_save_area);
272 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 273 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 98f850e00008..a69a0911ed0e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
124 */ 124 */
125static int do_account_vtime(struct task_struct *tsk) 125static int do_account_vtime(struct task_struct *tsk)
126{ 126{
127 u64 timer, clock, user, guest, system, hardirq, softirq, steal; 127 u64 timer, clock, user, guest, system, hardirq, softirq;
128 128
129 timer = S390_lowcore.last_update_timer; 129 timer = S390_lowcore.last_update_timer;
130 clock = S390_lowcore.last_update_clock; 130 clock = S390_lowcore.last_update_clock;
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
182 if (softirq) 182 if (softirq)
183 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); 183 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
184 184
185 steal = S390_lowcore.steal_timer;
186 if ((s64) steal > 0) {
187 S390_lowcore.steal_timer = 0;
188 account_steal_time(cputime_to_nsecs(steal));
189 }
190
191 return virt_timer_forward(user + guest + system + hardirq + softirq); 185 return virt_timer_forward(user + guest + system + hardirq + softirq);
192} 186}
193 187
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
213 */ 207 */
214void vtime_flush(struct task_struct *tsk) 208void vtime_flush(struct task_struct *tsk)
215{ 209{
210 u64 steal, avg_steal;
211
216 if (do_account_vtime(tsk)) 212 if (do_account_vtime(tsk))
217 virt_timer_expire(); 213 virt_timer_expire();
214
215 steal = S390_lowcore.steal_timer;
216 avg_steal = S390_lowcore.avg_steal_timer / 2;
217 if ((s64) steal > 0) {
218 S390_lowcore.steal_timer = 0;
219 account_steal_time(steal);
220 avg_steal += steal;
221 }
222 S390_lowcore.avg_steal_timer = avg_steal;
218} 223}
219 224
220/* 225/*
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index a6ef3fee5f85..7bf2cb680d32 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += emergency-restart.h
9generic-y += exec.h 9generic-y += exec.h
10generic-y += irq_regs.h 10generic-y += irq_regs.h
11generic-y += irq_work.h 11generic-y += irq_work.h
12generic-y += kvm_para.h
12generic-y += local.h 13generic-y += local.h
13generic-y += local64.h 14generic-y += local64.h
14generic-y += mcs_spinlock.h 15generic-y += mcs_spinlock.h
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index ecfbd40924dd..b8812c74c1de 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -1,5 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2 2
3generated-y += unistd_32.h 3generated-y += unistd_32.h
4generic-y += kvm_para.h
5generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index b82f64e28f55..a22cfd5c0ee8 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -9,6 +9,7 @@ generic-y += exec.h
9generic-y += export.h 9generic-y += export.h
10generic-y += irq_regs.h 10generic-y += irq_regs.h
11generic-y += irq_work.h 11generic-y += irq_work.h
12generic-y += kvm_para.h
12generic-y += linkage.h 13generic-y += linkage.h
13generic-y += local.h 14generic-y += local.h
14generic-y += local64.h 15generic-y += local64.h
diff --git a/arch/sparc/include/uapi/asm/kvm_para.h b/arch/sparc/include/uapi/asm/kvm_para.h
deleted file mode 100644
index baacc4996d18..000000000000
--- a/arch/sparc/include/uapi/asm/kvm_para.h
+++ /dev/null
@@ -1,2 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#include <asm-generic/kvm_para.h>
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index 1d1544b6ca74..d77d953c04c1 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -18,6 +18,7 @@ generic-y += irq_work.h
18generic-y += kdebug.h 18generic-y += kdebug.h
19generic-y += kmap_types.h 19generic-y += kmap_types.h
20generic-y += kprobes.h 20generic-y += kprobes.h
21generic-y += kvm_para.h
21generic-y += local.h 22generic-y += local.h
22generic-y += mcs_spinlock.h 23generic-y += mcs_spinlock.h
23generic-y += mm-arch-hooks.h 24generic-y += mm-arch-hooks.h
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 755bb11323d8..1c72f04ff75d 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generic-y += kvm_para.h
2generic-y += ucontext.h generic-y += ucontext.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c1f9b3cf437c..5ad92419be19 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2217,14 +2217,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
2217 If unsure, leave at the default value. 2217 If unsure, leave at the default value.
2218 2218
2219config HOTPLUG_CPU 2219config HOTPLUG_CPU
2220 bool "Support for hot-pluggable CPUs" 2220 def_bool y
2221 depends on SMP 2221 depends on SMP
2222 ---help---
2223 Say Y here to allow turning CPUs off and on. CPUs can be
2224 controlled through /sys/devices/system/cpu.
2225 ( Note: power management support will enable this option
2226 automatically on SMP systems. )
2227 Say N if you want to disable CPU hotplug.
2228 2222
2229config BOOTPARAM_HOTPLUG_CPU0 2223config BOOTPARAM_HOTPLUG_CPU0
2230 bool "Set default setting of cpu0_hotpluggable" 2224 bool "Set default setting of cpu0_hotpluggable"
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d8b9d8ca4f8..a587805c6687 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -219,8 +219,12 @@ ifdef CONFIG_RETPOLINE
219 # Additionally, avoid generating expensive indirect jumps which 219 # Additionally, avoid generating expensive indirect jumps which
220 # are subject to retpolines for small number of switch cases. 220 # are subject to retpolines for small number of switch cases.
221 # clang turns off jump table generation by default when under 221 # clang turns off jump table generation by default when under
222 # retpoline builds, however, gcc does not for x86. 222 # retpoline builds, however, gcc does not for x86. This has
223 KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20) 223 # only been fixed starting from gcc stable version 8.4.0 and
224 # onwards, but not for older ones. See gcc bug #86952.
225 ifndef CONFIG_CC_IS_CLANG
226 KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
227 endif
224endif 228endif
225 229
226archscripts: scripts_basic 230archscripts: scripts_basic
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index fd13655e0f9b..d2f184165934 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -120,8 +120,6 @@ static inline void console_init(void)
120 120
121void set_sev_encryption_mask(void); 121void set_sev_encryption_mask(void);
122 122
123#endif
124
125/* acpi.c */ 123/* acpi.c */
126#ifdef CONFIG_ACPI 124#ifdef CONFIG_ACPI
127acpi_physical_address get_rsdp_addr(void); 125acpi_physical_address get_rsdp_addr(void);
@@ -135,3 +133,5 @@ int count_immovable_mem_regions(void);
135#else 133#else
136static inline int count_immovable_mem_regions(void) { return 0; } 134static inline int count_immovable_mem_regions(void) { return 0; }
137#endif 135#endif
136
137#endif /* BOOT_COMPRESSED_MISC_H */
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 315a67b8896b..90154df8f125 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -13,8 +13,9 @@
13 */ 13 */
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/kernel.h> 16#include <linux/compiler.h>
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/limits.h>
18#include <asm/asm.h> 19#include <asm/asm.h>
19#include "ctype.h" 20#include "ctype.h"
20#include "string.h" 21#include "string.h"
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 6461a16b4559..e4ba467a9fc6 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -103,9 +103,13 @@ static int hv_cpu_init(unsigned int cpu)
103 u64 msr_vp_index; 103 u64 msr_vp_index;
104 struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()]; 104 struct hv_vp_assist_page **hvp = &hv_vp_assist_page[smp_processor_id()];
105 void **input_arg; 105 void **input_arg;
106 struct page *pg;
106 107
107 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 108 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
108 *input_arg = page_address(alloc_page(GFP_KERNEL)); 109 pg = alloc_page(GFP_KERNEL);
110 if (unlikely(!pg))
111 return -ENOMEM;
112 *input_arg = page_address(pg);
109 113
110 hv_get_vp_index(msr_vp_index); 114 hv_get_vp_index(msr_vp_index);
111 115
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index 3417110574c1..31c379c1da41 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -1,6 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _CPU_DEVICE_ID 2#ifndef _ASM_X86_CPU_DEVICE_ID
3#define _CPU_DEVICE_ID 1 3#define _ASM_X86_CPU_DEVICE_ID
4 4
5/* 5/*
6 * Declare drivers belonging to specific x86 CPUs 6 * Declare drivers belonging to specific x86 CPUs
@@ -9,8 +9,6 @@
9 9
10#include <linux/mod_devicetable.h> 10#include <linux/mod_devicetable.h>
11 11
12extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
13
14/* 12/*
15 * Match specific microcode revisions. 13 * Match specific microcode revisions.
16 * 14 *
@@ -22,21 +20,22 @@ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
22 */ 20 */
23 21
24struct x86_cpu_desc { 22struct x86_cpu_desc {
25 __u8 x86_family; 23 u8 x86_family;
26 __u8 x86_vendor; 24 u8 x86_vendor;
27 __u8 x86_model; 25 u8 x86_model;
28 __u8 x86_stepping; 26 u8 x86_stepping;
29 __u32 x86_microcode_rev; 27 u32 x86_microcode_rev;
30}; 28};
31 29
32#define INTEL_CPU_DESC(mod, step, rev) { \ 30#define INTEL_CPU_DESC(model, stepping, revision) { \
33 .x86_family = 6, \ 31 .x86_family = 6, \
34 .x86_vendor = X86_VENDOR_INTEL, \ 32 .x86_vendor = X86_VENDOR_INTEL, \
35 .x86_model = mod, \ 33 .x86_model = (model), \
36 .x86_stepping = step, \ 34 .x86_stepping = (stepping), \
37 .x86_microcode_rev = rev, \ 35 .x86_microcode_rev = (revision), \
38} 36}
39 37
38extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
40extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table); 39extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
41 40
42#endif 41#endif /* _ASM_X86_CPU_DEVICE_ID */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index ce95b8cbd229..0e56ff7e4848 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -112,8 +112,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
112 test_cpu_cap(c, bit)) 112 test_cpu_cap(c, bit))
113 113
114#define this_cpu_has(bit) \ 114#define this_cpu_has(bit) \
115 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ 115 (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
116 x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability)) 116 x86_this_cpu_test_bit(bit, \
117 (unsigned long __percpu *)&cpu_info.x86_capability))
117 118
118/* 119/*
119 * This macro is for detection of features which need kernel 120 * This macro is for detection of features which need kernel
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a5db4475e72d..159b5988292f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -253,14 +253,14 @@ struct kvm_mmu_memory_cache {
253 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used 253 * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
254 * by indirect shadow page can not be more than 15 bits. 254 * by indirect shadow page can not be more than 15 bits.
255 * 255 *
256 * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access, 256 * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
257 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp. 257 * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
258 */ 258 */
259union kvm_mmu_page_role { 259union kvm_mmu_page_role {
260 u32 word; 260 u32 word;
261 struct { 261 struct {
262 unsigned level:4; 262 unsigned level:4;
263 unsigned cr4_pae:1; 263 unsigned gpte_is_8_bytes:1;
264 unsigned quadrant:2; 264 unsigned quadrant:2;
265 unsigned direct:1; 265 unsigned direct:1;
266 unsigned access:3; 266 unsigned access:3;
@@ -350,6 +350,7 @@ struct kvm_mmu_page {
350}; 350};
351 351
352struct kvm_pio_request { 352struct kvm_pio_request {
353 unsigned long linear_rip;
353 unsigned long count; 354 unsigned long count;
354 int in; 355 int in;
355 int port; 356 int port;
@@ -568,6 +569,7 @@ struct kvm_vcpu_arch {
568 bool tpr_access_reporting; 569 bool tpr_access_reporting;
569 u64 ia32_xss; 570 u64 ia32_xss;
570 u64 microcode_version; 571 u64 microcode_version;
572 u64 arch_capabilities;
571 573
572 /* 574 /*
573 * Paging state of the vcpu 575 * Paging state of the vcpu
@@ -1192,6 +1194,8 @@ struct kvm_x86_ops {
1192 int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu, 1194 int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
1193 uint16_t *vmcs_version); 1195 uint16_t *vmcs_version);
1194 uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu); 1196 uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
1197
1198 bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
1195}; 1199};
1196 1200
1197struct kvm_arch_async_pf { 1201struct kvm_arch_async_pf {
@@ -1252,7 +1256,7 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1252 gfn_t gfn_offset, unsigned long mask); 1256 gfn_t gfn_offset, unsigned long mask);
1253void kvm_mmu_zap_all(struct kvm *kvm); 1257void kvm_mmu_zap_all(struct kvm *kvm);
1254void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen); 1258void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1255unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 1259unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
1256void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 1260void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1257 1261
1258int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3); 1262int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
diff --git a/arch/x86/include/asm/processor-cyrix.h b/arch/x86/include/asm/processor-cyrix.h
index aaedd73ea2c6..df700a6cc869 100644
--- a/arch/x86/include/asm/processor-cyrix.h
+++ b/arch/x86/include/asm/processor-cyrix.h
@@ -3,19 +3,6 @@
3 * NSC/Cyrix CPU indexed register access. Must be inlined instead of 3 * NSC/Cyrix CPU indexed register access. Must be inlined instead of
4 * macros to ensure correct access ordering 4 * macros to ensure correct access ordering
5 * Access order is always 0x22 (=offset), 0x23 (=value) 5 * Access order is always 0x22 (=offset), 0x23 (=value)
6 *
7 * When using the old macros a line like
8 * setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
9 * gets expanded to:
10 * do {
11 * outb((CX86_CCR2), 0x22);
12 * outb((({
13 * outb((CX86_CCR2), 0x22);
14 * inb(0x23);
15 * }) | 0x88), 0x23);
16 * } while (0);
17 *
18 * which in fact violates the access order (= 0x22, 0x22, 0x23, 0x23).
19 */ 6 */
20 7
21static inline u8 getCx86(u8 reg) 8static inline u8 getCx86(u8 reg)
@@ -29,11 +16,3 @@ static inline void setCx86(u8 reg, u8 data)
29 outb(reg, 0x22); 16 outb(reg, 0x22);
30 outb(data, 0x23); 17 outb(data, 0x23);
31} 18}
32
33#define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); })
34
35#define setCx86_old(reg, data) do { \
36 outb((reg), 0x22); \
37 outb((data), 0x23); \
38} while (0)
39
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index 63b3393bd98e..c53682303c9c 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -77,7 +77,11 @@ static inline size_t real_mode_size_needed(void)
77 return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE); 77 return ALIGN(real_mode_blob_end - real_mode_blob, PAGE_SIZE);
78} 78}
79 79
80void set_real_mode_mem(phys_addr_t mem, size_t size); 80static inline void set_real_mode_mem(phys_addr_t mem)
81{
82 real_mode_header = (struct real_mode_header *) __va(mem);
83}
84
81void reserve_real_mode(void); 85void reserve_real_mode(void);
82 86
83#endif /* __ASSEMBLY__ */ 87#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 58176b56354e..294ed4392a0e 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -14,6 +14,7 @@
14#define pr_fmt(fmt) "AGP: " fmt 14#define pr_fmt(fmt) "AGP: " fmt
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/kcore.h>
17#include <linux/types.h> 18#include <linux/types.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/memblock.h> 20#include <linux/memblock.h>
@@ -57,7 +58,7 @@ int fallback_aper_force __initdata;
57 58
58int fix_aperture __initdata = 1; 59int fix_aperture __initdata = 1;
59 60
60#ifdef CONFIG_PROC_VMCORE 61#if defined(CONFIG_PROC_VMCORE) || defined(CONFIG_PROC_KCORE)
61/* 62/*
62 * If the first kernel maps the aperture over e820 RAM, the kdump kernel will 63 * If the first kernel maps the aperture over e820 RAM, the kdump kernel will
63 * use the same range because it will remain configured in the northbridge. 64 * use the same range because it will remain configured in the northbridge.
@@ -66,20 +67,25 @@ int fix_aperture __initdata = 1;
66 */ 67 */
67static unsigned long aperture_pfn_start, aperture_page_count; 68static unsigned long aperture_pfn_start, aperture_page_count;
68 69
69static int gart_oldmem_pfn_is_ram(unsigned long pfn) 70static int gart_mem_pfn_is_ram(unsigned long pfn)
70{ 71{
71 return likely((pfn < aperture_pfn_start) || 72 return likely((pfn < aperture_pfn_start) ||
72 (pfn >= aperture_pfn_start + aperture_page_count)); 73 (pfn >= aperture_pfn_start + aperture_page_count));
73} 74}
74 75
75static void exclude_from_vmcore(u64 aper_base, u32 aper_order) 76static void __init exclude_from_core(u64 aper_base, u32 aper_order)
76{ 77{
77 aperture_pfn_start = aper_base >> PAGE_SHIFT; 78 aperture_pfn_start = aper_base >> PAGE_SHIFT;
78 aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT; 79 aperture_page_count = (32 * 1024 * 1024) << aper_order >> PAGE_SHIFT;
79 WARN_ON(register_oldmem_pfn_is_ram(&gart_oldmem_pfn_is_ram)); 80#ifdef CONFIG_PROC_VMCORE
81 WARN_ON(register_oldmem_pfn_is_ram(&gart_mem_pfn_is_ram));
82#endif
83#ifdef CONFIG_PROC_KCORE
84 WARN_ON(register_mem_pfn_is_ram(&gart_mem_pfn_is_ram));
85#endif
80} 86}
81#else 87#else
82static void exclude_from_vmcore(u64 aper_base, u32 aper_order) 88static void exclude_from_core(u64 aper_base, u32 aper_order)
83{ 89{
84} 90}
85#endif 91#endif
@@ -474,7 +480,7 @@ out:
474 * may have allocated the range over its e820 RAM 480 * may have allocated the range over its e820 RAM
475 * and fixed up the northbridge 481 * and fixed up the northbridge
476 */ 482 */
477 exclude_from_vmcore(last_aper_base, last_aper_order); 483 exclude_from_core(last_aper_base, last_aper_order);
478 484
479 return 1; 485 return 1;
480 } 486 }
@@ -520,7 +526,7 @@ out:
520 * overlap with the first kernel's memory. We can't access the 526 * overlap with the first kernel's memory. We can't access the
521 * range through vmcore even though it should be part of the dump. 527 * range through vmcore even though it should be part of the dump.
522 */ 528 */
523 exclude_from_vmcore(aper_alloc, aper_order); 529 exclude_from_core(aper_alloc, aper_order);
524 530
525 /* Fix up the north bridges */ 531 /* Fix up the north bridges */
526 for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) { 532 for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index d12226f60168..1d9b8aaea06c 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -124,7 +124,7 @@ static void set_cx86_reorder(void)
124 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 124 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
125 125
126 /* Load/Store Serialize to mem access disable (=reorder it) */ 126 /* Load/Store Serialize to mem access disable (=reorder it) */
127 setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); 127 setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
128 /* set load/store serialize from 1GB to 4GB */ 128 /* set load/store serialize from 1GB to 4GB */
129 ccr3 |= 0xe0; 129 ccr3 |= 0xe0;
130 setCx86(CX86_CCR3, ccr3); 130 setCx86(CX86_CCR3, ccr3);
@@ -135,11 +135,11 @@ static void set_cx86_memwb(void)
135 pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 135 pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
136 136
137 /* CCR2 bit 2: unlock NW bit */ 137 /* CCR2 bit 2: unlock NW bit */
138 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); 138 setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
139 /* set 'Not Write-through' */ 139 /* set 'Not Write-through' */
140 write_cr0(read_cr0() | X86_CR0_NW); 140 write_cr0(read_cr0() | X86_CR0_NW);
141 /* CCR2 bit 2: lock NW bit and set WT1 */ 141 /* CCR2 bit 2: lock NW bit and set WT1 */
142 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); 142 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14);
143} 143}
144 144
145/* 145/*
@@ -153,14 +153,14 @@ static void geode_configure(void)
153 local_irq_save(flags); 153 local_irq_save(flags);
154 154
155 /* Suspend on halt power saving and enable #SUSP pin */ 155 /* Suspend on halt power saving and enable #SUSP pin */
156 setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); 156 setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
157 157
158 ccr3 = getCx86(CX86_CCR3); 158 ccr3 = getCx86(CX86_CCR3);
159 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 159 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
160 160
161 161
162 /* FPU fast, DTE cache, Mem bypass */ 162 /* FPU fast, DTE cache, Mem bypass */
163 setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); 163 setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
164 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 164 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
165 165
166 set_cx86_memwb(); 166 set_cx86_memwb();
@@ -296,7 +296,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
296 /* GXm supports extended cpuid levels 'ala' AMD */ 296 /* GXm supports extended cpuid levels 'ala' AMD */
297 if (c->cpuid_level == 2) { 297 if (c->cpuid_level == 2) {
298 /* Enable cxMMX extensions (GX1 Datasheet 54) */ 298 /* Enable cxMMX extensions (GX1 Datasheet 54) */
299 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); 299 setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
300 300
301 /* 301 /*
302 * GXm : 0x30 ... 0x5f GXm datasheet 51 302 * GXm : 0x30 ... 0x5f GXm datasheet 51
@@ -319,7 +319,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
319 if (dir1 > 7) { 319 if (dir1 > 7) {
320 dir0_msn++; /* M II */ 320 dir0_msn++; /* M II */
321 /* Enable MMX extensions (App note 108) */ 321 /* Enable MMX extensions (App note 108) */
322 setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); 322 setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
323 } else { 323 } else {
324 /* A 6x86MX - it has the bug. */ 324 /* A 6x86MX - it has the bug. */
325 set_cpu_bug(c, X86_BUG_COMA); 325 set_cpu_bug(c, X86_BUG_COMA);
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 97f9ada9ceda..5260185cbf7b 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -608,6 +608,8 @@ static int microcode_reload_late(void)
608 if (ret > 0) 608 if (ret > 0)
609 microcode_check(); 609 microcode_check();
610 610
611 pr_info("Reload completed, microcode revision: 0x%x\n", boot_cpu_data.microcode);
612
611 return ret; 613 return ret;
612} 614}
613 615
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index f33f11f69078..1573a0a6b525 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -501,11 +501,8 @@ out_unlock:
501void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) 501void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
502{ 502{
503 unsigned long delay = msecs_to_jiffies(delay_ms); 503 unsigned long delay = msecs_to_jiffies(delay_ms);
504 struct rdt_resource *r;
505 int cpu; 504 int cpu;
506 505
507 r = &rdt_resources_all[RDT_RESOURCE_L3];
508
509 cpu = cpumask_any(&dom->cpu_mask); 506 cpu = cpumask_any(&dom->cpu_mask);
510 dom->cqm_work_cpu = cpu; 507 dom->cqm_work_cpu = cpu;
511 508
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index dfd3aca82c61..fb32925a2e62 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -905,6 +905,8 @@ int __init hpet_enable(void)
905 return 0; 905 return 0;
906 906
907 hpet_set_mapping(); 907 hpet_set_mapping();
908 if (!hpet_virt_address)
909 return 0;
908 910
909 /* 911 /*
910 * Read the period and check for a sane value: 912 * Read the period and check for a sane value:
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index ff9bfd40429e..d73083021002 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -354,6 +354,7 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
354#endif 354#endif
355 default: 355 default:
356 WARN_ON_ONCE(1); 356 WARN_ON_ONCE(1);
357 return -EINVAL;
357 } 358 }
358 359
359 /* 360 /*
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 3482460d984d..1bfe5c6e6cfe 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -598,8 +598,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
598 mpf_base = base; 598 mpf_base = base;
599 mpf_found = true; 599 mpf_found = true;
600 600
601 pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n", 601 pr_info("found SMP MP-table at [mem %#010lx-%#010lx]\n",
602 base, base + sizeof(*mpf) - 1, mpf); 602 base, base + sizeof(*mpf) - 1);
603 603
604 memblock_reserve(base, sizeof(*mpf)); 604 memblock_reserve(base, sizeof(*mpf));
605 if (mpf->physptr) 605 if (mpf->physptr)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 27c43525a05f..421899f6ad7b 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -526,7 +526,9 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
526 new_config.enable = 0; 526 new_config.enable = 0;
527 stimer->config.as_uint64 = new_config.as_uint64; 527 stimer->config.as_uint64 = new_config.as_uint64;
528 528
529 stimer_mark_pending(stimer, false); 529 if (stimer->config.enable)
530 stimer_mark_pending(stimer, false);
531
530 return 0; 532 return 0;
531} 533}
532 534
@@ -542,7 +544,10 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
542 stimer->config.enable = 0; 544 stimer->config.enable = 0;
543 else if (stimer->config.auto_enable) 545 else if (stimer->config.auto_enable)
544 stimer->config.enable = 1; 546 stimer->config.enable = 1;
545 stimer_mark_pending(stimer, false); 547
548 if (stimer->config.enable)
549 stimer_mark_pending(stimer, false);
550
546 return 0; 551 return 0;
547} 552}
548 553
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7837ab001d80..eee455a8a612 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -182,7 +182,7 @@ struct kvm_shadow_walk_iterator {
182 182
183static const union kvm_mmu_page_role mmu_base_role_mask = { 183static const union kvm_mmu_page_role mmu_base_role_mask = {
184 .cr0_wp = 1, 184 .cr0_wp = 1,
185 .cr4_pae = 1, 185 .gpte_is_8_bytes = 1,
186 .nxe = 1, 186 .nxe = 1,
187 .smep_andnot_wp = 1, 187 .smep_andnot_wp = 1,
188 .smap_andnot_wp = 1, 188 .smap_andnot_wp = 1,
@@ -2205,6 +2205,7 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2205static void kvm_mmu_commit_zap_page(struct kvm *kvm, 2205static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2206 struct list_head *invalid_list); 2206 struct list_head *invalid_list);
2207 2207
2208
2208#define for_each_valid_sp(_kvm, _sp, _gfn) \ 2209#define for_each_valid_sp(_kvm, _sp, _gfn) \
2209 hlist_for_each_entry(_sp, \ 2210 hlist_for_each_entry(_sp, \
2210 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ 2211 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
@@ -2215,12 +2216,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2215 for_each_valid_sp(_kvm, _sp, _gfn) \ 2216 for_each_valid_sp(_kvm, _sp, _gfn) \
2216 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else 2217 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
2217 2218
2219static inline bool is_ept_sp(struct kvm_mmu_page *sp)
2220{
2221 return sp->role.cr0_wp && sp->role.smap_andnot_wp;
2222}
2223
2218/* @sp->gfn should be write-protected at the call site */ 2224/* @sp->gfn should be write-protected at the call site */
2219static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 2225static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2220 struct list_head *invalid_list) 2226 struct list_head *invalid_list)
2221{ 2227{
2222 if (sp->role.cr4_pae != !!is_pae(vcpu) 2228 if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
2223 || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { 2229 vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
2224 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); 2230 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2225 return false; 2231 return false;
2226 } 2232 }
@@ -2423,7 +2429,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2423 role.level = level; 2429 role.level = level;
2424 role.direct = direct; 2430 role.direct = direct;
2425 if (role.direct) 2431 if (role.direct)
2426 role.cr4_pae = 0; 2432 role.gpte_is_8_bytes = true;
2427 role.access = access; 2433 role.access = access;
2428 if (!vcpu->arch.mmu->direct_map 2434 if (!vcpu->arch.mmu->direct_map
2429 && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { 2435 && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
@@ -4794,7 +4800,6 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
4794 4800
4795 role.base.access = ACC_ALL; 4801 role.base.access = ACC_ALL;
4796 role.base.nxe = !!is_nx(vcpu); 4802 role.base.nxe = !!is_nx(vcpu);
4797 role.base.cr4_pae = !!is_pae(vcpu);
4798 role.base.cr0_wp = is_write_protection(vcpu); 4803 role.base.cr0_wp = is_write_protection(vcpu);
4799 role.base.smm = is_smm(vcpu); 4804 role.base.smm = is_smm(vcpu);
4800 role.base.guest_mode = is_guest_mode(vcpu); 4805 role.base.guest_mode = is_guest_mode(vcpu);
@@ -4815,6 +4820,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4815 role.base.ad_disabled = (shadow_accessed_mask == 0); 4820 role.base.ad_disabled = (shadow_accessed_mask == 0);
4816 role.base.level = kvm_x86_ops->get_tdp_level(vcpu); 4821 role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
4817 role.base.direct = true; 4822 role.base.direct = true;
4823 role.base.gpte_is_8_bytes = true;
4818 4824
4819 return role; 4825 return role;
4820} 4826}
@@ -4879,6 +4885,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
4879 role.base.smap_andnot_wp = role.ext.cr4_smap && 4885 role.base.smap_andnot_wp = role.ext.cr4_smap &&
4880 !is_write_protection(vcpu); 4886 !is_write_protection(vcpu);
4881 role.base.direct = !is_paging(vcpu); 4887 role.base.direct = !is_paging(vcpu);
4888 role.base.gpte_is_8_bytes = !!is_pae(vcpu);
4882 4889
4883 if (!is_long_mode(vcpu)) 4890 if (!is_long_mode(vcpu))
4884 role.base.level = PT32E_ROOT_LEVEL; 4891 role.base.level = PT32E_ROOT_LEVEL;
@@ -4918,18 +4925,26 @@ static union kvm_mmu_role
4918kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, 4925kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
4919 bool execonly) 4926 bool execonly)
4920{ 4927{
4921 union kvm_mmu_role role; 4928 union kvm_mmu_role role = {0};
4922 4929
4923 /* Base role is inherited from root_mmu */ 4930 /* SMM flag is inherited from root_mmu */
4924 role.base.word = vcpu->arch.root_mmu.mmu_role.base.word; 4931 role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4925 role.ext = kvm_calc_mmu_role_ext(vcpu);
4926 4932
4927 role.base.level = PT64_ROOT_4LEVEL; 4933 role.base.level = PT64_ROOT_4LEVEL;
4934 role.base.gpte_is_8_bytes = true;
4928 role.base.direct = false; 4935 role.base.direct = false;
4929 role.base.ad_disabled = !accessed_dirty; 4936 role.base.ad_disabled = !accessed_dirty;
4930 role.base.guest_mode = true; 4937 role.base.guest_mode = true;
4931 role.base.access = ACC_ALL; 4938 role.base.access = ACC_ALL;
4932 4939
4940 /*
4941 * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
4942 * SMAP variation to denote shadow EPT entries.
4943 */
4944 role.base.cr0_wp = true;
4945 role.base.smap_andnot_wp = true;
4946
4947 role.ext = kvm_calc_mmu_role_ext(vcpu);
4933 role.ext.execonly = execonly; 4948 role.ext.execonly = execonly;
4934 4949
4935 return role; 4950 return role;
@@ -5179,7 +5194,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
5179 gpa, bytes, sp->role.word); 5194 gpa, bytes, sp->role.word);
5180 5195
5181 offset = offset_in_page(gpa); 5196 offset = offset_in_page(gpa);
5182 pte_size = sp->role.cr4_pae ? 8 : 4; 5197 pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
5183 5198
5184 /* 5199 /*
5185 * Sometimes, the OS only writes the last one bytes to update status 5200 * Sometimes, the OS only writes the last one bytes to update status
@@ -5203,7 +5218,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
5203 page_offset = offset_in_page(gpa); 5218 page_offset = offset_in_page(gpa);
5204 level = sp->role.level; 5219 level = sp->role.level;
5205 *nspte = 1; 5220 *nspte = 1;
5206 if (!sp->role.cr4_pae) { 5221 if (!sp->role.gpte_is_8_bytes) {
5207 page_offset <<= 1; /* 32->64 */ 5222 page_offset <<= 1; /* 32->64 */
5208 /* 5223 /*
5209 * A 32-bit pde maps 4MB while the shadow pdes map 5224 * A 32-bit pde maps 4MB while the shadow pdes map
@@ -5393,10 +5408,12 @@ emulate:
5393 * This can happen if a guest gets a page-fault on data access but the HW 5408 * This can happen if a guest gets a page-fault on data access but the HW
5394 * table walker is not able to read the instruction page (e.g instruction 5409 * table walker is not able to read the instruction page (e.g instruction
5395 * page is not present in memory). In those cases we simply restart the 5410 * page is not present in memory). In those cases we simply restart the
5396 * guest. 5411 * guest, with the exception of AMD Erratum 1096 which is unrecoverable.
5397 */ 5412 */
5398 if (unlikely(insn && !insn_len)) 5413 if (unlikely(insn && !insn_len)) {
5399 return 1; 5414 if (!kvm_x86_ops->need_emulation_on_page_fault(vcpu))
5415 return 1;
5416 }
5400 5417
5401 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); 5418 er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
5402 5419
@@ -5509,7 +5526,9 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5509 5526
5510 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { 5527 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5511 if (flush && lock_flush_tlb) { 5528 if (flush && lock_flush_tlb) {
5512 kvm_flush_remote_tlbs(kvm); 5529 kvm_flush_remote_tlbs_with_address(kvm,
5530 start_gfn,
5531 iterator.gfn - start_gfn + 1);
5513 flush = false; 5532 flush = false;
5514 } 5533 }
5515 cond_resched_lock(&kvm->mmu_lock); 5534 cond_resched_lock(&kvm->mmu_lock);
@@ -5517,7 +5536,8 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5517 } 5536 }
5518 5537
5519 if (flush && lock_flush_tlb) { 5538 if (flush && lock_flush_tlb) {
5520 kvm_flush_remote_tlbs(kvm); 5539 kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
5540 end_gfn - start_gfn + 1);
5521 flush = false; 5541 flush = false;
5522 } 5542 }
5523 5543
@@ -6011,7 +6031,7 @@ out:
6011/* 6031/*
6012 * Calculate mmu pages needed for kvm. 6032 * Calculate mmu pages needed for kvm.
6013 */ 6033 */
6014unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) 6034unsigned int kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
6015{ 6035{
6016 unsigned int nr_mmu_pages; 6036 unsigned int nr_mmu_pages;
6017 unsigned int nr_pages = 0; 6037 unsigned int nr_pages = 0;
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 9f6c855a0043..dd30dccd2ad5 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -29,10 +29,10 @@
29 \ 29 \
30 role.word = __entry->role; \ 30 role.word = __entry->role; \
31 \ 31 \
32 trace_seq_printf(p, "sp gfn %llx l%u%s q%u%s %s%s" \ 32 trace_seq_printf(p, "sp gfn %llx l%u %u-byte q%u%s %s%s" \
33 " %snxe %sad root %u %s%c", \ 33 " %snxe %sad root %u %s%c", \
34 __entry->gfn, role.level, \ 34 __entry->gfn, role.level, \
35 role.cr4_pae ? " pae" : "", \ 35 role.gpte_is_8_bytes ? 8 : 4, \
36 role.quadrant, \ 36 role.quadrant, \
37 role.direct ? " direct" : "", \ 37 role.direct ? " direct" : "", \
38 access_str[role.access], \ 38 access_str[role.access], \
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b5b128a0a051..426039285fd1 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -7098,6 +7098,36 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
7098 return -ENODEV; 7098 return -ENODEV;
7099} 7099}
7100 7100
7101static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
7102{
7103 bool is_user, smap;
7104
7105 is_user = svm_get_cpl(vcpu) == 3;
7106 smap = !kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
7107
7108 /*
7109 * Detect and workaround Errata 1096 Fam_17h_00_0Fh
7110 *
7111 * In non SEV guest, hypervisor will be able to read the guest
7112 * memory to decode the instruction pointer when insn_len is zero
7113 * so we return true to indicate that decoding is possible.
7114 *
7115 * But in the SEV guest, the guest memory is encrypted with the
7116 * guest specific key and hypervisor will not be able to decode the
7117 * instruction pointer so we will not able to workaround it. Lets
7118 * print the error and request to kill the guest.
7119 */
7120 if (is_user && smap) {
7121 if (!sev_guest(vcpu->kvm))
7122 return true;
7123
7124 pr_err_ratelimited("KVM: Guest triggered AMD Erratum 1096\n");
7125 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7126 }
7127
7128 return false;
7129}
7130
7101static struct kvm_x86_ops svm_x86_ops __ro_after_init = { 7131static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7102 .cpu_has_kvm_support = has_svm, 7132 .cpu_has_kvm_support = has_svm,
7103 .disabled_by_bios = is_disabled, 7133 .disabled_by_bios = is_disabled,
@@ -7231,6 +7261,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7231 7261
7232 .nested_enable_evmcs = nested_enable_evmcs, 7262 .nested_enable_evmcs = nested_enable_evmcs,
7233 .nested_get_evmcs_version = nested_get_evmcs_version, 7263 .nested_get_evmcs_version = nested_get_evmcs_version,
7264
7265 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7234}; 7266};
7235 7267
7236static int __init svm_init(void) 7268static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index f24a2c225070..153e539c29c9 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2585,6 +2585,11 @@ static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
2585 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) || 2585 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
2586 !nested_cr3_valid(vcpu, vmcs12->host_cr3)) 2586 !nested_cr3_valid(vcpu, vmcs12->host_cr3))
2587 return -EINVAL; 2587 return -EINVAL;
2588
2589 if (is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu) ||
2590 is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))
2591 return -EINVAL;
2592
2588 /* 2593 /*
2589 * If the load IA32_EFER VM-exit control is 1, bits reserved in the 2594 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2590 * IA32_EFER MSR must be 0 in the field for that register. In addition, 2595 * IA32_EFER MSR must be 0 in the field for that register. In addition,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index c73375e01ab8..ab432a930ae8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1683,12 +1683,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1683 1683
1684 msr_info->data = to_vmx(vcpu)->spec_ctrl; 1684 msr_info->data = to_vmx(vcpu)->spec_ctrl;
1685 break; 1685 break;
1686 case MSR_IA32_ARCH_CAPABILITIES:
1687 if (!msr_info->host_initiated &&
1688 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
1689 return 1;
1690 msr_info->data = to_vmx(vcpu)->arch_capabilities;
1691 break;
1692 case MSR_IA32_SYSENTER_CS: 1686 case MSR_IA32_SYSENTER_CS:
1693 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); 1687 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
1694 break; 1688 break;
@@ -1895,11 +1889,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1895 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, 1889 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
1896 MSR_TYPE_W); 1890 MSR_TYPE_W);
1897 break; 1891 break;
1898 case MSR_IA32_ARCH_CAPABILITIES:
1899 if (!msr_info->host_initiated)
1900 return 1;
1901 vmx->arch_capabilities = data;
1902 break;
1903 case MSR_IA32_CR_PAT: 1892 case MSR_IA32_CR_PAT:
1904 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 1893 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1905 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) 1894 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -4088,8 +4077,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
4088 ++vmx->nmsrs; 4077 ++vmx->nmsrs;
4089 } 4078 }
4090 4079
4091 vmx->arch_capabilities = kvm_get_arch_capabilities();
4092
4093 vm_exit_controls_init(vmx, vmx_vmexit_ctrl()); 4080 vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
4094 4081
4095 /* 22.2.1, 20.8.1 */ 4082 /* 22.2.1, 20.8.1 */
@@ -7409,6 +7396,11 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
7409 return 0; 7396 return 0;
7410} 7397}
7411 7398
7399static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
7400{
7401 return 0;
7402}
7403
7412static __init int hardware_setup(void) 7404static __init int hardware_setup(void)
7413{ 7405{
7414 unsigned long host_bndcfgs; 7406 unsigned long host_bndcfgs;
@@ -7711,6 +7703,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
7711 .set_nested_state = NULL, 7703 .set_nested_state = NULL,
7712 .get_vmcs12_pages = NULL, 7704 .get_vmcs12_pages = NULL,
7713 .nested_enable_evmcs = NULL, 7705 .nested_enable_evmcs = NULL,
7706 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
7714}; 7707};
7715 7708
7716static void vmx_cleanup_l1d_flush(void) 7709static void vmx_cleanup_l1d_flush(void)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 1554cb45b393..a1e00d0a2482 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -190,7 +190,6 @@ struct vcpu_vmx {
190 u64 msr_guest_kernel_gs_base; 190 u64 msr_guest_kernel_gs_base;
191#endif 191#endif
192 192
193 u64 arch_capabilities;
194 u64 spec_ctrl; 193 u64 spec_ctrl;
195 194
196 u32 vm_entry_controls_shadow; 195 u32 vm_entry_controls_shadow;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 65e4559eef2f..099b851dabaf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1125,7 +1125,7 @@ static u32 msrs_to_save[] = {
1125#endif 1125#endif
1126 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, 1126 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1127 MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX, 1127 MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1128 MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES, 1128 MSR_IA32_SPEC_CTRL,
1129 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH, 1129 MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
1130 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK, 1130 MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
1131 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B, 1131 MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
@@ -1158,6 +1158,7 @@ static u32 emulated_msrs[] = {
1158 1158
1159 MSR_IA32_TSC_ADJUST, 1159 MSR_IA32_TSC_ADJUST,
1160 MSR_IA32_TSCDEADLINE, 1160 MSR_IA32_TSCDEADLINE,
1161 MSR_IA32_ARCH_CAPABILITIES,
1161 MSR_IA32_MISC_ENABLE, 1162 MSR_IA32_MISC_ENABLE,
1162 MSR_IA32_MCG_STATUS, 1163 MSR_IA32_MCG_STATUS,
1163 MSR_IA32_MCG_CTL, 1164 MSR_IA32_MCG_CTL,
@@ -2443,6 +2444,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2443 if (msr_info->host_initiated) 2444 if (msr_info->host_initiated)
2444 vcpu->arch.microcode_version = data; 2445 vcpu->arch.microcode_version = data;
2445 break; 2446 break;
2447 case MSR_IA32_ARCH_CAPABILITIES:
2448 if (!msr_info->host_initiated)
2449 return 1;
2450 vcpu->arch.arch_capabilities = data;
2451 break;
2446 case MSR_EFER: 2452 case MSR_EFER:
2447 return set_efer(vcpu, data); 2453 return set_efer(vcpu, data);
2448 case MSR_K7_HWCR: 2454 case MSR_K7_HWCR:
@@ -2747,6 +2753,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2747 case MSR_IA32_UCODE_REV: 2753 case MSR_IA32_UCODE_REV:
2748 msr_info->data = vcpu->arch.microcode_version; 2754 msr_info->data = vcpu->arch.microcode_version;
2749 break; 2755 break;
2756 case MSR_IA32_ARCH_CAPABILITIES:
2757 if (!msr_info->host_initiated &&
2758 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
2759 return 1;
2760 msr_info->data = vcpu->arch.arch_capabilities;
2761 break;
2750 case MSR_IA32_TSC: 2762 case MSR_IA32_TSC:
2751 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; 2763 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
2752 break; 2764 break;
@@ -6523,14 +6535,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
6523} 6535}
6524EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer); 6536EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
6525 6537
6538static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
6539{
6540 vcpu->arch.pio.count = 0;
6541
6542 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
6543 return 1;
6544
6545 return kvm_skip_emulated_instruction(vcpu);
6546}
6547
6526static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, 6548static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
6527 unsigned short port) 6549 unsigned short port)
6528{ 6550{
6529 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); 6551 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
6530 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, 6552 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
6531 size, port, &val, 1); 6553 size, port, &val, 1);
6532 /* do not return to emulator after return from userspace */ 6554
6533 vcpu->arch.pio.count = 0; 6555 if (!ret) {
6556 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
6557 vcpu->arch.complete_userspace_io = complete_fast_pio_out;
6558 }
6534 return ret; 6559 return ret;
6535} 6560}
6536 6561
@@ -6541,6 +6566,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
6541 /* We should only ever be called with arch.pio.count equal to 1 */ 6566 /* We should only ever be called with arch.pio.count equal to 1 */
6542 BUG_ON(vcpu->arch.pio.count != 1); 6567 BUG_ON(vcpu->arch.pio.count != 1);
6543 6568
6569 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
6570 vcpu->arch.pio.count = 0;
6571 return 1;
6572 }
6573
6544 /* For size less than 4 we merge, else we zero extend */ 6574 /* For size less than 4 we merge, else we zero extend */
6545 val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) 6575 val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
6546 : 0; 6576 : 0;
@@ -6553,7 +6583,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
6553 vcpu->arch.pio.port, &val, 1); 6583 vcpu->arch.pio.port, &val, 1);
6554 kvm_register_write(vcpu, VCPU_REGS_RAX, val); 6584 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
6555 6585
6556 return 1; 6586 return kvm_skip_emulated_instruction(vcpu);
6557} 6587}
6558 6588
6559static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, 6589static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
@@ -6572,6 +6602,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
6572 return ret; 6602 return ret;
6573 } 6603 }
6574 6604
6605 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
6575 vcpu->arch.complete_userspace_io = complete_fast_pio_in; 6606 vcpu->arch.complete_userspace_io = complete_fast_pio_in;
6576 6607
6577 return 0; 6608 return 0;
@@ -6579,16 +6610,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
6579 6610
6580int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) 6611int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
6581{ 6612{
6582 int ret = kvm_skip_emulated_instruction(vcpu); 6613 int ret;
6583 6614
6584 /*
6585 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
6586 * KVM_EXIT_DEBUG here.
6587 */
6588 if (in) 6615 if (in)
6589 return kvm_fast_pio_in(vcpu, size, port) && ret; 6616 ret = kvm_fast_pio_in(vcpu, size, port);
6590 else 6617 else
6591 return kvm_fast_pio_out(vcpu, size, port) && ret; 6618 ret = kvm_fast_pio_out(vcpu, size, port);
6619 return ret && kvm_skip_emulated_instruction(vcpu);
6592} 6620}
6593EXPORT_SYMBOL_GPL(kvm_fast_pio); 6621EXPORT_SYMBOL_GPL(kvm_fast_pio);
6594 6622
@@ -8733,6 +8761,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
8733 8761
8734int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 8762int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
8735{ 8763{
8764 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
8736 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; 8765 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
8737 kvm_vcpu_mtrr_init(vcpu); 8766 kvm_vcpu_mtrr_init(vcpu);
8738 vcpu_load(vcpu); 8767 vcpu_load(vcpu);
@@ -9429,13 +9458,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
9429 const struct kvm_memory_slot *new, 9458 const struct kvm_memory_slot *new,
9430 enum kvm_mr_change change) 9459 enum kvm_mr_change change)
9431{ 9460{
9432 int nr_mmu_pages = 0;
9433
9434 if (!kvm->arch.n_requested_mmu_pages) 9461 if (!kvm->arch.n_requested_mmu_pages)
9435 nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); 9462 kvm_mmu_change_mmu_pages(kvm,
9436 9463 kvm_mmu_calculate_default_mmu_pages(kvm));
9437 if (nr_mmu_pages)
9438 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
9439 9464
9440 /* 9465 /*
9441 * Dirty logging tracks sptes in 4k granularity, meaning that large 9466 * Dirty logging tracks sptes in 4k granularity, meaning that large
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index 9baca3e054be..e7925d668b68 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -94,7 +94,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len)
94 : "m" (*(unsigned long *)buff), 94 : "m" (*(unsigned long *)buff),
95 "r" (zero), "0" (result)); 95 "r" (zero), "0" (result));
96 --count; 96 --count;
97 buff += 8; 97 buff += 8;
98 } 98 }
99 result = add32_with_carry(result>>32, 99 result = add32_with_carry(result>>32,
100 result&0xffffffff); 100 result&0xffffffff);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index db3165714521..dc726e07d8ba 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -230,7 +230,7 @@ bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
230/* Can we access it for direct reading/writing? Must be RAM: */ 230/* Can we access it for direct reading/writing? Must be RAM: */
231int valid_phys_addr_range(phys_addr_t addr, size_t count) 231int valid_phys_addr_range(phys_addr_t addr, size_t count)
232{ 232{
233 return addr + count <= __pa(high_memory); 233 return addr + count - 1 <= __pa(high_memory - 1);
234} 234}
235 235
236/* Can we access it through mmap? Must be a valid physical address: */ 236/* Can we access it through mmap? Must be a valid physical address: */
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 4fee5c3003ed..139b28a01ce4 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -77,7 +77,7 @@ static void __init pti_print_if_secure(const char *reason)
77 pr_info("%s\n", reason); 77 pr_info("%s\n", reason);
78} 78}
79 79
80enum pti_mode { 80static enum pti_mode {
81 PTI_AUTO = 0, 81 PTI_AUTO = 0,
82 PTI_FORCE_OFF, 82 PTI_FORCE_OFF,
83 PTI_FORCE_ON 83 PTI_FORCE_ON
@@ -602,7 +602,7 @@ static void pti_clone_kernel_text(void)
602 set_memory_global(start, (end_global - start) >> PAGE_SHIFT); 602 set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
603} 603}
604 604
605void pti_set_kernel_image_nonglobal(void) 605static void pti_set_kernel_image_nonglobal(void)
606{ 606{
607 /* 607 /*
608 * The identity map is created with PMDs, regardless of the 608 * The identity map is created with PMDs, regardless of the
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 458a0e2bcc57..a25a9fd987a9 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -449,7 +449,7 @@ void __init efi_free_boot_services(void)
449 */ 449 */
450 rm_size = real_mode_size_needed(); 450 rm_size = real_mode_size_needed();
451 if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) { 451 if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
452 set_real_mode_mem(start, rm_size); 452 set_real_mode_mem(start);
453 start += rm_size; 453 start += rm_size;
454 size -= rm_size; 454 size -= rm_size;
455 } 455 }
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index d10105825d57..7dce39c8c034 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -15,15 +15,6 @@ u32 *trampoline_cr4_features;
15/* Hold the pgd entry used on booting additional CPUs */ 15/* Hold the pgd entry used on booting additional CPUs */
16pgd_t trampoline_pgd_entry; 16pgd_t trampoline_pgd_entry;
17 17
18void __init set_real_mode_mem(phys_addr_t mem, size_t size)
19{
20 void *base = __va(mem);
21
22 real_mode_header = (struct real_mode_header *) base;
23 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
24 base, (unsigned long long)mem, size);
25}
26
27void __init reserve_real_mode(void) 18void __init reserve_real_mode(void)
28{ 19{
29 phys_addr_t mem; 20 phys_addr_t mem;
@@ -42,7 +33,7 @@ void __init reserve_real_mode(void)
42 } 33 }
43 34
44 memblock_reserve(mem, size); 35 memblock_reserve(mem, size);
45 set_real_mode_mem(mem, size); 36 set_real_mode_mem(mem);
46} 37}
47 38
48static void __init setup_real_mode(void) 39static void __init setup_real_mode(void)
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 42b6cb3d16f7..3843198e03d4 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -15,6 +15,7 @@ generic-y += irq_work.h
15generic-y += kdebug.h 15generic-y += kdebug.h
16generic-y += kmap_types.h 16generic-y += kmap_types.h
17generic-y += kprobes.h 17generic-y += kprobes.h
18generic-y += kvm_para.h
18generic-y += local.h 19generic-y += local.h
19generic-y += local64.h 20generic-y += local64.h
20generic-y += mcs_spinlock.h 21generic-y += mcs_spinlock.h
diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild
index 8a7ad40be463..7417847dc438 100644
--- a/arch/xtensa/include/uapi/asm/Kbuild
+++ b/arch/xtensa/include/uapi/asm/Kbuild
@@ -1,2 +1 @@
1generated-y += unistd_32.h generated-y += unistd_32.h
2generic-y += kvm_para.h
diff --git a/block/bio.c b/block/bio.c
index 71a78d9fb8b7..b64cedc7f87c 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -849,20 +849,14 @@ static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
849 size = bio_add_page(bio, bv->bv_page, len, 849 size = bio_add_page(bio, bv->bv_page, len,
850 bv->bv_offset + iter->iov_offset); 850 bv->bv_offset + iter->iov_offset);
851 if (size == len) { 851 if (size == len) {
852 struct page *page; 852 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
853 int i; 853 struct page *page;
854 int i;
855
856 mp_bvec_for_each_page(page, bv, i)
857 get_page(page);
858 }
854 859
855 /*
856 * For the normal O_DIRECT case, we could skip grabbing this
857 * reference and then not have to put them again when IO
858 * completes. But this breaks some in-kernel users, like
859 * splicing to/from a loop device, where we release the pipe
860 * pages unconditionally. If we can fix that case, we can
861 * get rid of the get here and the need to call
862 * bio_release_pages() at IO completion time.
863 */
864 mp_bvec_for_each_page(page, bv, i)
865 get_page(page);
866 iov_iter_advance(iter, size); 860 iov_iter_advance(iter, size);
867 return 0; 861 return 0;
868 } 862 }
@@ -925,10 +919,12 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
925 * This takes either an iterator pointing to user memory, or one pointing to 919 * This takes either an iterator pointing to user memory, or one pointing to
926 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and 920 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
927 * map them into the kernel. On IO completion, the caller should put those 921 * map them into the kernel. On IO completion, the caller should put those
928 * pages. For now, when adding kernel pages, we still grab a reference to the 922 * pages. If we're adding kernel pages, and the caller told us it's safe to
929 * page. This isn't strictly needed for the common case, but some call paths 923 * do so, we just have to add the pages to the bio directly. We don't grab an
930 * end up releasing pages from eg a pipe and we can't easily control these. 924 * extra reference to those pages (the user should already have that), and we
931 * See comment in __bio_iov_bvec_add_pages(). 925 * don't put the page on IO completion. The caller needs to check if the bio is
926 * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
927 * released.
932 * 928 *
933 * The function tries, but does not guarantee, to pin as many pages as 929 * The function tries, but does not guarantee, to pin as many pages as
934 * fit into the bio, or are requested in *iter, whatever is smaller. If 930 * fit into the bio, or are requested in *iter, whatever is smaller. If
@@ -940,6 +936,13 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
940 const bool is_bvec = iov_iter_is_bvec(iter); 936 const bool is_bvec = iov_iter_is_bvec(iter);
941 unsigned short orig_vcnt = bio->bi_vcnt; 937 unsigned short orig_vcnt = bio->bi_vcnt;
942 938
939 /*
940 * If this is a BVEC iter, then the pages are kernel pages. Don't
941 * release them on IO completion, if the caller asked us to.
942 */
943 if (is_bvec && iov_iter_bvec_no_ref(iter))
944 bio_set_flag(bio, BIO_NO_PAGE_REF);
945
943 do { 946 do {
944 int ret; 947 int ret;
945 948
@@ -1696,7 +1699,8 @@ static void bio_dirty_fn(struct work_struct *work)
1696 next = bio->bi_private; 1699 next = bio->bi_private;
1697 1700
1698 bio_set_pages_dirty(bio); 1701 bio_set_pages_dirty(bio);
1699 bio_release_pages(bio); 1702 if (!bio_flagged(bio, BIO_NO_PAGE_REF))
1703 bio_release_pages(bio);
1700 bio_put(bio); 1704 bio_put(bio);
1701 } 1705 }
1702} 1706}
@@ -1713,7 +1717,8 @@ void bio_check_pages_dirty(struct bio *bio)
1713 goto defer; 1717 goto defer;
1714 } 1718 }
1715 1719
1716 bio_release_pages(bio); 1720 if (!bio_flagged(bio, BIO_NO_PAGE_REF))
1721 bio_release_pages(bio);
1717 bio_put(bio); 1722 bio_put(bio);
1718 return; 1723 return;
1719defer: 1724defer:
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 77f37ef8ef06..617a2b3f7582 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1736,8 +1736,8 @@ out:
1736 1736
1737/** 1737/**
1738 * blkcg_schedule_throttle - this task needs to check for throttling 1738 * blkcg_schedule_throttle - this task needs to check for throttling
1739 * @q - the request queue IO was submitted on 1739 * @q: the request queue IO was submitted on
1740 * @use_memdelay - do we charge this to memory delay for PSI 1740 * @use_memdelay: do we charge this to memory delay for PSI
1741 * 1741 *
1742 * This is called by the IO controller when we know there's delay accumulated 1742 * This is called by the IO controller when we know there's delay accumulated
1743 * for the blkg for this task. We do not pass the blkg because there are places 1743 * for the blkg for this task. We do not pass the blkg because there are places
@@ -1769,8 +1769,9 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1769 1769
1770/** 1770/**
1771 * blkcg_add_delay - add delay to this blkg 1771 * blkcg_add_delay - add delay to this blkg
1772 * @now - the current time in nanoseconds 1772 * @blkg: blkg of interest
1773 * @delta - how many nanoseconds of delay to add 1773 * @now: the current time in nanoseconds
1774 * @delta: how many nanoseconds of delay to add
1774 * 1775 *
1775 * Charge @delta to the blkg's current delay accumulation. This is used to 1776 * Charge @delta to the blkg's current delay accumulation. This is used to
1776 * throttle tasks if an IO controller thinks we need more throttling. 1777 * throttle tasks if an IO controller thinks we need more throttling.
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6e0f2d97fc6d..d95f94892015 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
220 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); 220 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
221 flush_rq->tag = -1; 221 flush_rq->tag = -1;
222 } else { 222 } else {
223 blk_mq_put_driver_tag_hctx(hctx, flush_rq); 223 blk_mq_put_driver_tag(flush_rq);
224 flush_rq->internal_tag = -1; 224 flush_rq->internal_tag = -1;
225 } 225 }
226 226
@@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
324 324
325 if (q->elevator) { 325 if (q->elevator) {
326 WARN_ON(rq->tag < 0); 326 WARN_ON(rq->tag < 0);
327 blk_mq_put_driver_tag_hctx(hctx, rq); 327 blk_mq_put_driver_tag(rq);
328 } 328 }
329 329
330 /* 330 /*
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 2620baa1f699..507212d75ee2 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -75,6 +75,7 @@
75#include <linux/blk-mq.h> 75#include <linux/blk-mq.h>
76#include "blk-rq-qos.h" 76#include "blk-rq-qos.h"
77#include "blk-stat.h" 77#include "blk-stat.h"
78#include "blk.h"
78 79
79#define DEFAULT_SCALE_COOKIE 1000000U 80#define DEFAULT_SCALE_COOKIE 1000000U
80 81
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a9c181603cbd..3ff3d7b49969 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
59} 59}
60 60
61/* 61/*
62 * Check if any of the ctx's have pending work in this hardware queue 62 * Check if any of the ctx, dispatch list or elevator
63 * have pending work in this hardware queue.
63 */ 64 */
64static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx) 65static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
65{ 66{
@@ -782,7 +783,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
782 if (kick_requeue_list) 783 if (kick_requeue_list)
783 blk_mq_kick_requeue_list(q); 784 blk_mq_kick_requeue_list(q);
784} 785}
785EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
786 786
787void blk_mq_kick_requeue_list(struct request_queue *q) 787void blk_mq_kick_requeue_list(struct request_queue *q)
788{ 788{
@@ -1072,7 +1072,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1072 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); 1072 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
1073 1073
1074 spin_lock(&hctx->dispatch_wait_lock); 1074 spin_lock(&hctx->dispatch_wait_lock);
1075 list_del_init(&wait->entry); 1075 if (!list_empty(&wait->entry)) {
1076 struct sbitmap_queue *sbq;
1077
1078 list_del_init(&wait->entry);
1079 sbq = &hctx->tags->bitmap_tags;
1080 atomic_dec(&sbq->ws_active);
1081 }
1076 spin_unlock(&hctx->dispatch_wait_lock); 1082 spin_unlock(&hctx->dispatch_wait_lock);
1077 1083
1078 blk_mq_run_hw_queue(hctx, true); 1084 blk_mq_run_hw_queue(hctx, true);
@@ -1088,13 +1094,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
1088static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, 1094static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1089 struct request *rq) 1095 struct request *rq)
1090{ 1096{
1097 struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
1091 struct wait_queue_head *wq; 1098 struct wait_queue_head *wq;
1092 wait_queue_entry_t *wait; 1099 wait_queue_entry_t *wait;
1093 bool ret; 1100 bool ret;
1094 1101
1095 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) { 1102 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
1096 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 1103 blk_mq_sched_mark_restart_hctx(hctx);
1097 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
1098 1104
1099 /* 1105 /*
1100 * It's possible that a tag was freed in the window between the 1106 * It's possible that a tag was freed in the window between the
@@ -1111,7 +1117,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1111 if (!list_empty_careful(&wait->entry)) 1117 if (!list_empty_careful(&wait->entry))
1112 return false; 1118 return false;
1113 1119
1114 wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait; 1120 wq = &bt_wait_ptr(sbq, hctx)->wait;
1115 1121
1116 spin_lock_irq(&wq->lock); 1122 spin_lock_irq(&wq->lock);
1117 spin_lock(&hctx->dispatch_wait_lock); 1123 spin_lock(&hctx->dispatch_wait_lock);
@@ -1121,6 +1127,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1121 return false; 1127 return false;
1122 } 1128 }
1123 1129
1130 atomic_inc(&sbq->ws_active);
1124 wait->flags &= ~WQ_FLAG_EXCLUSIVE; 1131 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
1125 __add_wait_queue(wq, wait); 1132 __add_wait_queue(wq, wait);
1126 1133
@@ -1141,6 +1148,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
1141 * someone else gets the wakeup. 1148 * someone else gets the wakeup.
1142 */ 1149 */
1143 list_del_init(&wait->entry); 1150 list_del_init(&wait->entry);
1151 atomic_dec(&sbq->ws_active);
1144 spin_unlock(&hctx->dispatch_wait_lock); 1152 spin_unlock(&hctx->dispatch_wait_lock);
1145 spin_unlock_irq(&wq->lock); 1153 spin_unlock_irq(&wq->lock);
1146 1154
@@ -2857,7 +2865,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2857 /* 2865 /*
2858 * Default to classic polling 2866 * Default to classic polling
2859 */ 2867 */
2860 q->poll_nsec = -1; 2868 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
2861 2869
2862 blk_mq_init_cpu_queues(q, set->nr_hw_queues); 2870 blk_mq_init_cpu_queues(q, set->nr_hw_queues);
2863 blk_mq_add_queue_tag_set(set, q); 2871 blk_mq_add_queue_tag_set(set, q);
@@ -3392,7 +3400,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
3392{ 3400{
3393 struct request *rq; 3401 struct request *rq;
3394 3402
3395 if (q->poll_nsec == -1) 3403 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
3396 return false; 3404 return false;
3397 3405
3398 if (!blk_qc_t_is_internal(cookie)) 3406 if (!blk_qc_t_is_internal(cookie))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index c11353a3749d..d704fc7766f4 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -41,6 +41,8 @@ void blk_mq_free_queue(struct request_queue *q);
41int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); 41int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
42void blk_mq_wake_waiters(struct request_queue *q); 42void blk_mq_wake_waiters(struct request_queue *q);
43bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool); 43bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
44void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
45 bool kick_requeue_list);
44void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); 46void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
45bool blk_mq_get_driver_tag(struct request *rq); 47bool blk_mq_get_driver_tag(struct request *rq);
46struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, 48struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
@@ -222,15 +224,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
222 } 224 }
223} 225}
224 226
225static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
226 struct request *rq)
227{
228 if (rq->tag == -1 || rq->internal_tag == -1)
229 return;
230
231 __blk_mq_put_driver_tag(hctx, rq);
232}
233
234static inline void blk_mq_put_driver_tag(struct request *rq) 227static inline void blk_mq_put_driver_tag(struct request *rq)
235{ 228{
236 if (rq->tag == -1 || rq->internal_tag == -1) 229 if (rq->tag == -1 || rq->internal_tag == -1)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 59685918167e..422327089e0f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
360{ 360{
361 int val; 361 int val;
362 362
363 if (q->poll_nsec == -1) 363 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
364 val = -1; 364 val = BLK_MQ_POLL_CLASSIC;
365 else 365 else
366 val = q->poll_nsec / 1000; 366 val = q->poll_nsec / 1000;
367 367
@@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
380 if (err < 0) 380 if (err < 0)
381 return err; 381 return err;
382 382
383 if (val == -1) 383 if (val == BLK_MQ_POLL_CLASSIC)
384 q->poll_nsec = -1; 384 q->poll_nsec = BLK_MQ_POLL_CLASSIC;
385 else 385 else if (val >= 0)
386 q->poll_nsec = val * 1000; 386 q->poll_nsec = val * 1000;
387 else
388 return -EINVAL;
387 389
388 return count; 390 return count;
389} 391}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 6ecbbabf1233..eec263c9019e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
1043 1043
1044 acpi_permanent_mmap = true; 1044 acpi_permanent_mmap = true;
1045 1045
1046 /* Initialize debug output. Linux does not use ACPICA defaults */
1047 acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
1048
1049#ifdef CONFIG_X86 1046#ifdef CONFIG_X86
1050 /* 1047 /*
1051 * If the machine falls into the DMI check table, 1048 * If the machine falls into the DMI check table,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1b207fca1420..d4244e7d0e38 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1150 cpc_read(cpunum, nominal_reg, &nom); 1150 cpc_read(cpunum, nominal_reg, &nom);
1151 perf_caps->nominal_perf = nom; 1151 perf_caps->nominal_perf = nom;
1152 1152
1153 cpc_read(cpunum, guaranteed_reg, &guaranteed); 1153 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1154 perf_caps->guaranteed_perf = guaranteed; 1154 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1155 perf_caps->guaranteed_perf = 0;
1156 } else {
1157 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1158 perf_caps->guaranteed_perf = guaranteed;
1159 }
1155 1160
1156 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); 1161 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1157 perf_caps->lowest_nonlinear_perf = min_nonlinear; 1162 perf_caps->lowest_nonlinear_perf = min_nonlinear;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 78db97687f26..c4b06cc075f9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
800 match.hrv = hrv; 800 match.hrv = hrv;
801 801
802 dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); 802 dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
803 put_device(dev);
803 return !!dev; 804 return !!dev;
804} 805}
805EXPORT_SYMBOL(acpi_dev_present); 806EXPORT_SYMBOL(acpi_dev_present);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8685882da64c..4b9c7ca492e6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
2057 size_t object_size = 0; 2057 size_t object_size = 0;
2058 2058
2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2060 if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) 2060 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2061 !IS_ALIGNED(offset, sizeof(u32)))
2061 return 0; 2062 return 0;
2062 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 2063 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2063 offset, read_size); 2064 offset, read_size);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467670a0..195f120c4e8c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
927 927
928 index = page - alloc->pages; 928 index = page - alloc->pages;
929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
930
931 mm = alloc->vma_vm_mm;
932 if (!mmget_not_zero(mm))
933 goto err_mmget;
934 if (!down_write_trylock(&mm->mmap_sem))
935 goto err_down_write_mmap_sem_failed;
930 vma = binder_alloc_get_vma(alloc); 936 vma = binder_alloc_get_vma(alloc);
931 if (vma) {
932 if (!mmget_not_zero(alloc->vma_vm_mm))
933 goto err_mmget;
934 mm = alloc->vma_vm_mm;
935 if (!down_read_trylock(&mm->mmap_sem))
936 goto err_down_write_mmap_sem_failed;
937 }
938 937
939 list_lru_isolate(lru, item); 938 list_lru_isolate(lru, item);
940 spin_unlock(lock); 939 spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
945 zap_page_range(vma, page_addr, PAGE_SIZE); 944 zap_page_range(vma, page_addr, PAGE_SIZE);
946 945
947 trace_binder_unmap_user_end(alloc, index); 946 trace_binder_unmap_user_end(alloc, index);
948
949 up_read(&mm->mmap_sem);
950 mmput(mm);
951 } 947 }
948 up_write(&mm->mmap_sem);
949 mmput(mm);
952 950
953 trace_binder_unmap_kernel_start(alloc, index); 951 trace_binder_unmap_kernel_start(alloc, index);
954 952
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index b3ed8f9953a8..173e6f2dd9af 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
52/* Per the spec, only slot type and drawer type ODD can be supported */ 52/* Per the spec, only slot type and drawer type ODD can be supported */
53static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) 53static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
54{ 54{
55 char buf[16]; 55 char *buf;
56 unsigned int ret; 56 unsigned int ret;
57 struct rm_feature_desc *desc = (void *)(buf + 8); 57 struct rm_feature_desc *desc;
58 struct ata_taskfile tf; 58 struct ata_taskfile tf;
59 static const char cdb[] = { GPCMD_GET_CONFIGURATION, 59 static const char cdb[] = { GPCMD_GET_CONFIGURATION,
60 2, /* only 1 feature descriptor requested */ 60 2, /* only 1 feature descriptor requested */
61 0, 3, /* 3, removable medium feature */ 61 0, 3, /* 3, removable medium feature */
62 0, 0, 0,/* reserved */ 62 0, 0, 0,/* reserved */
63 0, sizeof(buf), 63 0, 16,
64 0, 0, 0, 64 0, 0, 0,
65 }; 65 };
66 66
67 buf = kzalloc(16, GFP_KERNEL);
68 if (!buf)
69 return ODD_MECH_TYPE_UNSUPPORTED;
70 desc = (void *)(buf + 8);
71
67 ata_tf_init(dev, &tf); 72 ata_tf_init(dev, &tf);
68 tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 73 tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
69 tf.command = ATA_CMD_PACKET; 74 tf.command = ATA_CMD_PACKET;
70 tf.protocol = ATAPI_PROT_PIO; 75 tf.protocol = ATAPI_PROT_PIO;
71 tf.lbam = sizeof(buf); 76 tf.lbam = 16;
72 77
73 ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 78 ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
74 buf, sizeof(buf), 0); 79 buf, 16, 0);
75 if (ret) 80 if (ret) {
81 kfree(buf);
76 return ODD_MECH_TYPE_UNSUPPORTED; 82 return ODD_MECH_TYPE_UNSUPPORTED;
83 }
77 84
78 if (be16_to_cpu(desc->feature_code) != 3) 85 if (be16_to_cpu(desc->feature_code) != 3) {
86 kfree(buf);
79 return ODD_MECH_TYPE_UNSUPPORTED; 87 return ODD_MECH_TYPE_UNSUPPORTED;
88 }
80 89
81 if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) 90 if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
91 kfree(buf);
82 return ODD_MECH_TYPE_SLOT; 92 return ODD_MECH_TYPE_SLOT;
83 else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1) 93 } else if (desc->mech_type == 1 && desc->load == 0 &&
94 desc->eject == 1) {
95 kfree(buf);
84 return ODD_MECH_TYPE_DRAWER; 96 return ODD_MECH_TYPE_DRAWER;
85 else 97 } else {
98 kfree(buf);
86 return ODD_MECH_TYPE_UNSUPPORTED; 99 return ODD_MECH_TYPE_UNSUPPORTED;
100 }
87} 101}
88 102
89/* Test if ODD is zero power ready by sense code */ 103/* Test if ODD is zero power ready by sense code */
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 57410f9c5d44..c52c738e554a 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -164,9 +164,7 @@ config ARM_CHARLCD
164 line and the Linux version on the second line, but that's 164 line and the Linux version on the second line, but that's
165 still useful. 165 still useful.
166 166
167endif # AUXDISPLAY 167menuconfig PARPORT_PANEL
168
169menuconfig PANEL
170 tristate "Parallel port LCD/Keypad Panel support" 168 tristate "Parallel port LCD/Keypad Panel support"
171 depends on PARPORT 169 depends on PARPORT
172 select CHARLCD 170 select CHARLCD
@@ -178,7 +176,7 @@ menuconfig PANEL
178 compiled as a module, or linked into the kernel and started at boot. 176 compiled as a module, or linked into the kernel and started at boot.
179 If you don't understand what all this is about, say N. 177 If you don't understand what all this is about, say N.
180 178
181if PANEL 179if PARPORT_PANEL
182 180
183config PANEL_PARPORT 181config PANEL_PARPORT
184 int "Default parallel port number (0=LPT1)" 182 int "Default parallel port number (0=LPT1)"
@@ -419,8 +417,11 @@ config PANEL_LCD_PIN_BL
419 417
420 Default for the 'BL' pin in custom profile is '0' (uncontrolled). 418 Default for the 'BL' pin in custom profile is '0' (uncontrolled).
421 419
420endif # PARPORT_PANEL
421
422config PANEL_CHANGE_MESSAGE 422config PANEL_CHANGE_MESSAGE
423 bool "Change LCD initialization message ?" 423 bool "Change LCD initialization message ?"
424 depends on CHARLCD
424 default "n" 425 default "n"
425 ---help--- 426 ---help---
426 This allows you to replace the boot message indicating the kernel version 427 This allows you to replace the boot message indicating the kernel version
@@ -444,7 +445,34 @@ config PANEL_BOOT_MESSAGE
444 An empty message will only clear the display at driver init time. Any other 445 An empty message will only clear the display at driver init time. Any other
445 printf()-formatted message is valid with newline and escape codes. 446 printf()-formatted message is valid with newline and escape codes.
446 447
447endif # PANEL 448choice
449 prompt "Backlight initial state"
450 default CHARLCD_BL_FLASH
451
452 config CHARLCD_BL_OFF
453 bool "Off"
454 help
455 Backlight is initially turned off
456
457 config CHARLCD_BL_ON
458 bool "On"
459 help
460 Backlight is initially turned on
461
462 config CHARLCD_BL_FLASH
463 bool "Flash"
464 help
465 Backlight is flashed briefly on init
466
467endchoice
468
469endif # AUXDISPLAY
470
471config PANEL
472 tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)"
473 depends on PARPORT
474 select AUXDISPLAY
475 select PARPORT_PANEL
448 476
449config CHARLCD 477config CHARLCD
450 tristate "Character LCD core support" if COMPILE_TEST 478 tristate "Character LCD core support" if COMPILE_TEST
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 7ac6776ca3f6..cf54b5efb07e 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
10obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o 10obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
11obj-$(CONFIG_HD44780) += hd44780.o 11obj-$(CONFIG_HD44780) += hd44780.o
12obj-$(CONFIG_HT16K33) += ht16k33.o 12obj-$(CONFIG_HT16K33) += ht16k33.o
13obj-$(CONFIG_PANEL) += panel.o 13obj-$(CONFIG_PARPORT_PANEL) += panel.o
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 60e0b772673f..92745efefb54 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -91,7 +91,7 @@ struct charlcd_priv {
91 unsigned long long drvdata[0]; 91 unsigned long long drvdata[0];
92}; 92};
93 93
94#define to_priv(p) container_of(p, struct charlcd_priv, lcd) 94#define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd)
95 95
96/* Device single-open policy control */ 96/* Device single-open policy control */
97static atomic_t charlcd_available = ATOMIC_INIT(1); 97static atomic_t charlcd_available = ATOMIC_INIT(1);
@@ -105,7 +105,7 @@ static void long_sleep(int ms)
105/* turn the backlight on or off */ 105/* turn the backlight on or off */
106static void charlcd_backlight(struct charlcd *lcd, int on) 106static void charlcd_backlight(struct charlcd *lcd, int on)
107{ 107{
108 struct charlcd_priv *priv = to_priv(lcd); 108 struct charlcd_priv *priv = charlcd_to_priv(lcd);
109 109
110 if (!lcd->ops->backlight) 110 if (!lcd->ops->backlight)
111 return; 111 return;
@@ -134,7 +134,7 @@ static void charlcd_bl_off(struct work_struct *work)
134/* turn the backlight on for a little while */ 134/* turn the backlight on for a little while */
135void charlcd_poke(struct charlcd *lcd) 135void charlcd_poke(struct charlcd *lcd)
136{ 136{
137 struct charlcd_priv *priv = to_priv(lcd); 137 struct charlcd_priv *priv = charlcd_to_priv(lcd);
138 138
139 if (!lcd->ops->backlight) 139 if (!lcd->ops->backlight)
140 return; 140 return;
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(charlcd_poke);
152 152
153static void charlcd_gotoxy(struct charlcd *lcd) 153static void charlcd_gotoxy(struct charlcd *lcd)
154{ 154{
155 struct charlcd_priv *priv = to_priv(lcd); 155 struct charlcd_priv *priv = charlcd_to_priv(lcd);
156 unsigned int addr; 156 unsigned int addr;
157 157
158 /* 158 /*
@@ -170,7 +170,7 @@ static void charlcd_gotoxy(struct charlcd *lcd)
170 170
171static void charlcd_home(struct charlcd *lcd) 171static void charlcd_home(struct charlcd *lcd)
172{ 172{
173 struct charlcd_priv *priv = to_priv(lcd); 173 struct charlcd_priv *priv = charlcd_to_priv(lcd);
174 174
175 priv->addr.x = 0; 175 priv->addr.x = 0;
176 priv->addr.y = 0; 176 priv->addr.y = 0;
@@ -179,7 +179,7 @@ static void charlcd_home(struct charlcd *lcd)
179 179
180static void charlcd_print(struct charlcd *lcd, char c) 180static void charlcd_print(struct charlcd *lcd, char c)
181{ 181{
182 struct charlcd_priv *priv = to_priv(lcd); 182 struct charlcd_priv *priv = charlcd_to_priv(lcd);
183 183
184 if (priv->addr.x < lcd->bwidth) { 184 if (priv->addr.x < lcd->bwidth) {
185 if (lcd->char_conv) 185 if (lcd->char_conv)
@@ -211,7 +211,7 @@ static void charlcd_clear_fast(struct charlcd *lcd)
211/* clears the display and resets X/Y */ 211/* clears the display and resets X/Y */
212static void charlcd_clear_display(struct charlcd *lcd) 212static void charlcd_clear_display(struct charlcd *lcd)
213{ 213{
214 struct charlcd_priv *priv = to_priv(lcd); 214 struct charlcd_priv *priv = charlcd_to_priv(lcd);
215 215
216 lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR); 216 lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR);
217 priv->addr.x = 0; 217 priv->addr.x = 0;
@@ -223,7 +223,7 @@ static void charlcd_clear_display(struct charlcd *lcd)
223static int charlcd_init_display(struct charlcd *lcd) 223static int charlcd_init_display(struct charlcd *lcd)
224{ 224{
225 void (*write_cmd_raw)(struct charlcd *lcd, int cmd); 225 void (*write_cmd_raw)(struct charlcd *lcd, int cmd);
226 struct charlcd_priv *priv = to_priv(lcd); 226 struct charlcd_priv *priv = charlcd_to_priv(lcd);
227 u8 init; 227 u8 init;
228 228
229 if (lcd->ifwidth != 4 && lcd->ifwidth != 8) 229 if (lcd->ifwidth != 4 && lcd->ifwidth != 8)
@@ -369,7 +369,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
369 369
370static inline int handle_lcd_special_code(struct charlcd *lcd) 370static inline int handle_lcd_special_code(struct charlcd *lcd)
371{ 371{
372 struct charlcd_priv *priv = to_priv(lcd); 372 struct charlcd_priv *priv = charlcd_to_priv(lcd);
373 373
374 /* LCD special codes */ 374 /* LCD special codes */
375 375
@@ -580,7 +580,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
580 580
581static void charlcd_write_char(struct charlcd *lcd, char c) 581static void charlcd_write_char(struct charlcd *lcd, char c)
582{ 582{
583 struct charlcd_priv *priv = to_priv(lcd); 583 struct charlcd_priv *priv = charlcd_to_priv(lcd);
584 584
585 /* first, we'll test if we're in escape mode */ 585 /* first, we'll test if we're in escape mode */
586 if ((c != '\n') && priv->esc_seq.len >= 0) { 586 if ((c != '\n') && priv->esc_seq.len >= 0) {
@@ -705,7 +705,7 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
705 705
706static int charlcd_open(struct inode *inode, struct file *file) 706static int charlcd_open(struct inode *inode, struct file *file)
707{ 707{
708 struct charlcd_priv *priv = to_priv(the_charlcd); 708 struct charlcd_priv *priv = charlcd_to_priv(the_charlcd);
709 int ret; 709 int ret;
710 710
711 ret = -EBUSY; 711 ret = -EBUSY;
@@ -763,10 +763,24 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
763 } 763 }
764} 764}
765 765
766#ifdef CONFIG_PANEL_BOOT_MESSAGE
767#define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE
768#else
769#define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n"
770#endif
771
772#ifdef CONFIG_CHARLCD_BL_ON
773#define LCD_INIT_BL "\x1b[L+"
774#elif defined(CONFIG_CHARLCD_BL_FLASH)
775#define LCD_INIT_BL "\x1b[L*"
776#else
777#define LCD_INIT_BL "\x1b[L-"
778#endif
779
766/* initialize the LCD driver */ 780/* initialize the LCD driver */
767static int charlcd_init(struct charlcd *lcd) 781static int charlcd_init(struct charlcd *lcd)
768{ 782{
769 struct charlcd_priv *priv = to_priv(lcd); 783 struct charlcd_priv *priv = charlcd_to_priv(lcd);
770 int ret; 784 int ret;
771 785
772 if (lcd->ops->backlight) { 786 if (lcd->ops->backlight) {
@@ -784,13 +798,8 @@ static int charlcd_init(struct charlcd *lcd)
784 return ret; 798 return ret;
785 799
786 /* display a short message */ 800 /* display a short message */
787#ifdef CONFIG_PANEL_CHANGE_MESSAGE 801 charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT);
788#ifdef CONFIG_PANEL_BOOT_MESSAGE 802
789 charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
790#endif
791#else
792 charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n");
793#endif
794 /* clear the display on the next device opening */ 803 /* clear the display on the next device opening */
795 priv->must_clear = true; 804 priv->must_clear = true;
796 charlcd_home(lcd); 805 charlcd_home(lcd);
@@ -818,6 +827,12 @@ struct charlcd *charlcd_alloc(unsigned int drvdata_size)
818} 827}
819EXPORT_SYMBOL_GPL(charlcd_alloc); 828EXPORT_SYMBOL_GPL(charlcd_alloc);
820 829
830void charlcd_free(struct charlcd *lcd)
831{
832 kfree(charlcd_to_priv(lcd));
833}
834EXPORT_SYMBOL_GPL(charlcd_free);
835
821static int panel_notify_sys(struct notifier_block *this, unsigned long code, 836static int panel_notify_sys(struct notifier_block *this, unsigned long code,
822 void *unused) 837 void *unused)
823{ 838{
@@ -866,7 +881,7 @@ EXPORT_SYMBOL_GPL(charlcd_register);
866 881
867int charlcd_unregister(struct charlcd *lcd) 882int charlcd_unregister(struct charlcd *lcd)
868{ 883{
869 struct charlcd_priv *priv = to_priv(lcd); 884 struct charlcd_priv *priv = charlcd_to_priv(lcd);
870 885
871 unregister_reboot_notifier(&panel_notifier); 886 unregister_reboot_notifier(&panel_notifier);
872 charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-"); 887 charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 9ad93ea42fdc..ab15b64707ad 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -271,7 +271,7 @@ static int hd44780_probe(struct platform_device *pdev)
271 return 0; 271 return 0;
272 272
273fail: 273fail:
274 kfree(lcd); 274 charlcd_free(lcd);
275 return ret; 275 return ret;
276} 276}
277 277
@@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev)
280 struct charlcd *lcd = platform_get_drvdata(pdev); 280 struct charlcd *lcd = platform_get_drvdata(pdev);
281 281
282 charlcd_unregister(lcd); 282 charlcd_unregister(lcd);
283
284 charlcd_free(lcd);
283 return 0; 285 return 0;
284} 286}
285 287
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 21b9b2f2470a..e06de63497cf 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1620,7 +1620,7 @@ err_lcd_unreg:
1620 if (lcd.enabled) 1620 if (lcd.enabled)
1621 charlcd_unregister(lcd.charlcd); 1621 charlcd_unregister(lcd.charlcd);
1622err_unreg_device: 1622err_unreg_device:
1623 kfree(lcd.charlcd); 1623 charlcd_free(lcd.charlcd);
1624 lcd.charlcd = NULL; 1624 lcd.charlcd = NULL;
1625 parport_unregister_device(pprt); 1625 parport_unregister_device(pprt);
1626 pprt = NULL; 1626 pprt = NULL;
@@ -1647,7 +1647,7 @@ static void panel_detach(struct parport *port)
1647 if (lcd.enabled) { 1647 if (lcd.enabled) {
1648 charlcd_unregister(lcd.charlcd); 1648 charlcd_unregister(lcd.charlcd);
1649 lcd.initialized = false; 1649 lcd.initialized = false;
1650 kfree(lcd.charlcd); 1650 charlcd_free(lcd.charlcd);
1651 lcd.charlcd = NULL; 1651 lcd.charlcd = NULL;
1652 } 1652 }
1653 1653
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 76c9969b7124..96a6dc9d305c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1469,12 +1469,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1469 if (IS_ERR(gpd_data)) 1469 if (IS_ERR(gpd_data))
1470 return PTR_ERR(gpd_data); 1470 return PTR_ERR(gpd_data);
1471 1471
1472 genpd_lock(genpd);
1473
1474 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1472 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1475 if (ret) 1473 if (ret)
1476 goto out; 1474 goto out;
1477 1475
1476 genpd_lock(genpd);
1477
1478 dev_pm_domain_set(dev, &genpd->domain); 1478 dev_pm_domain_set(dev, &genpd->domain);
1479 1479
1480 genpd->device_count++; 1480 genpd->device_count++;
@@ -1482,9 +1482,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1482 1482
1483 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1483 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1484 1484
1485 out:
1486 genpd_unlock(genpd); 1485 genpd_unlock(genpd);
1487 1486 out:
1488 if (ret) 1487 if (ret)
1489 genpd_free_dev_data(dev, gpd_data); 1488 genpd_free_dev_data(dev, gpd_data);
1490 else 1489 else
@@ -1533,15 +1532,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
1533 genpd->device_count--; 1532 genpd->device_count--;
1534 genpd->max_off_time_changed = true; 1533 genpd->max_off_time_changed = true;
1535 1534
1536 if (genpd->detach_dev)
1537 genpd->detach_dev(genpd, dev);
1538
1539 dev_pm_domain_set(dev, NULL); 1535 dev_pm_domain_set(dev, NULL);
1540 1536
1541 list_del_init(&pdd->list_node); 1537 list_del_init(&pdd->list_node);
1542 1538
1543 genpd_unlock(genpd); 1539 genpd_unlock(genpd);
1544 1540
1541 if (genpd->detach_dev)
1542 genpd->detach_dev(genpd, dev);
1543
1545 genpd_free_dev_data(dev, gpd_data); 1544 genpd_free_dev_data(dev, gpd_data);
1546 1545
1547 return 0; 1546 return 0;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 1fad9291f6aa..7fc5a18e02ad 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -472,7 +472,7 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
472 val, nval); 472 val, nval);
473} 473}
474 474
475struct fwnode_handle * 475static struct fwnode_handle *
476software_node_get_parent(const struct fwnode_handle *fwnode) 476software_node_get_parent(const struct fwnode_handle *fwnode)
477{ 477{
478 struct software_node *swnode = to_software_node(fwnode); 478 struct software_node *swnode = to_software_node(fwnode);
@@ -481,7 +481,7 @@ software_node_get_parent(const struct fwnode_handle *fwnode)
481 NULL; 481 NULL;
482} 482}
483 483
484struct fwnode_handle * 484static struct fwnode_handle *
485software_node_get_next_child(const struct fwnode_handle *fwnode, 485software_node_get_next_child(const struct fwnode_handle *fwnode,
486 struct fwnode_handle *child) 486 struct fwnode_handle *child)
487{ 487{
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e6edd568214..bf1c61cab8eb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
656 return -EBADF; 656 return -EBADF;
657 657
658 l = f->f_mapping->host->i_bdev->bd_disk->private_data; 658 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
659 if (l->lo_state == Lo_unbound) { 659 if (l->lo_state != Lo_bound) {
660 return -EINVAL; 660 return -EINVAL;
661 } 661 }
662 f = l->lo_backing_file; 662 f = l->lo_backing_file;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 96670eefaeb2..377a694dc228 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -749,8 +749,12 @@ static int pcd_detect(void)
749 return 0; 749 return 0;
750 750
751 printk("%s: No CD-ROM drive found\n", name); 751 printk("%s: No CD-ROM drive found\n", name);
752 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) 752 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
753 blk_cleanup_queue(cd->disk->queue);
754 cd->disk->queue = NULL;
755 blk_mq_free_tag_set(&cd->tag_set);
753 put_disk(cd->disk); 756 put_disk(cd->disk);
757 }
754 pi_unregister_driver(par_drv); 758 pi_unregister_driver(par_drv);
755 return -1; 759 return -1;
756} 760}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e92e7a8eeeb2..103b617cdc31 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -761,8 +761,12 @@ static int pf_detect(void)
761 return 0; 761 return 0;
762 762
763 printk("%s: No ATAPI disk detected\n", name); 763 printk("%s: No ATAPI disk detected\n", name);
764 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) 764 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
765 blk_cleanup_queue(pf->disk->queue);
766 pf->disk->queue = NULL;
767 blk_mq_free_tag_set(&pf->tag_set);
765 put_disk(pf->disk); 768 put_disk(pf->disk);
769 }
766 pi_unregister_driver(par_drv); 770 pi_unregister_driver(par_drv);
767 return -1; 771 return -1;
768} 772}
@@ -1047,13 +1051,15 @@ static void __exit pf_exit(void)
1047 int unit; 1051 int unit;
1048 unregister_blkdev(major, name); 1052 unregister_blkdev(major, name);
1049 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 1053 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1050 if (!pf->present) 1054 if (pf->present)
1051 continue; 1055 del_gendisk(pf->disk);
1052 del_gendisk(pf->disk); 1056
1053 blk_cleanup_queue(pf->disk->queue); 1057 blk_cleanup_queue(pf->disk->queue);
1054 blk_mq_free_tag_set(&pf->tag_set); 1058 blk_mq_free_tag_set(&pf->tag_set);
1055 put_disk(pf->disk); 1059 put_disk(pf->disk);
1056 pi_release(pf->pi); 1060
1061 if (pf->present)
1062 pi_release(pf->pi);
1057 } 1063 }
1058} 1064}
1059 1065
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4ba967d65cf9..2210c1b9491b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private)
833 pctx->opts->queue_depth = intval; 833 pctx->opts->queue_depth = intval;
834 break; 834 break;
835 case Opt_alloc_size: 835 case Opt_alloc_size:
836 if (intval < 1) { 836 if (intval < SECTOR_SIZE) {
837 pr_err("alloc_size out of range\n"); 837 pr_err("alloc_size out of range\n");
838 return -EINVAL; 838 return -EINVAL;
839 } 839 }
@@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
924 kref_put(&rbdc->kref, rbd_client_release); 924 kref_put(&rbdc->kref, rbd_client_release);
925} 925}
926 926
927static int wait_for_latest_osdmap(struct ceph_client *client)
928{
929 u64 newest_epoch;
930 int ret;
931
932 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
933 if (ret)
934 return ret;
935
936 if (client->osdc.osdmap->epoch >= newest_epoch)
937 return 0;
938
939 ceph_osdc_maybe_request_map(&client->osdc);
940 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
941 client->options->mount_timeout);
942}
943
944/* 927/*
945 * Get a ceph client with specific addr and configuration, if one does 928 * Get a ceph client with specific addr and configuration, if one does
946 * not exist create it. Either way, ceph_opts is consumed by this 929 * not exist create it. Either way, ceph_opts is consumed by this
@@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
960 * Using an existing client. Make sure ->pg_pools is up to 943 * Using an existing client. Make sure ->pg_pools is up to
961 * date before we look up the pool id in do_rbd_add(). 944 * date before we look up the pool id in do_rbd_add().
962 */ 945 */
963 ret = wait_for_latest_osdmap(rbdc->client); 946 ret = ceph_wait_for_latest_osdmap(rbdc->client,
947 rbdc->client->options->mount_timeout);
964 if (ret) { 948 if (ret) {
965 rbd_warn(NULL, "failed to get latest osdmap: %d", ret); 949 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
966 rbd_put_client(rbdc); 950 rbd_put_client(rbdc);
@@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
4203 q->limits.max_sectors = queue_max_hw_sectors(q); 4187 q->limits.max_sectors = queue_max_hw_sectors(q);
4204 blk_queue_max_segments(q, USHRT_MAX); 4188 blk_queue_max_segments(q, USHRT_MAX);
4205 blk_queue_max_segment_size(q, UINT_MAX); 4189 blk_queue_max_segment_size(q, UINT_MAX);
4206 blk_queue_io_min(q, objset_bytes); 4190 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
4207 blk_queue_io_opt(q, objset_bytes); 4191 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
4208 4192
4209 if (rbd_dev->opts->trim) { 4193 if (rbd_dev->opts->trim) {
4210 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 4194 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4211 q->limits.discard_granularity = objset_bytes; 4195 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
4212 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); 4196 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4213 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); 4197 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4214 } 4198 }
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e7a5f1d1c314..399cad7daae7 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
290 struct zram *zram = dev_to_zram(dev); 290 struct zram *zram = dev_to_zram(dev);
291 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; 291 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
292 int index; 292 int index;
293 char mode_buf[8];
294 ssize_t sz;
295 293
296 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 294 if (!sysfs_streq(buf, "all"))
297 if (sz <= 0)
298 return -EINVAL;
299
300 /* ignore trailing new line */
301 if (mode_buf[sz - 1] == '\n')
302 mode_buf[sz - 1] = 0x00;
303
304 if (strcmp(mode_buf, "all"))
305 return -EINVAL; 295 return -EINVAL;
306 296
307 down_read(&zram->init_lock); 297 down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
635 struct bio bio; 625 struct bio bio;
636 struct bio_vec bio_vec; 626 struct bio_vec bio_vec;
637 struct page *page; 627 struct page *page;
638 ssize_t ret, sz; 628 ssize_t ret;
639 char mode_buf[8]; 629 int mode;
640 int mode = -1;
641 unsigned long blk_idx = 0; 630 unsigned long blk_idx = 0;
642 631
643 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 632 if (sysfs_streq(buf, "idle"))
644 if (sz <= 0)
645 return -EINVAL;
646
647 /* ignore trailing newline */
648 if (mode_buf[sz - 1] == '\n')
649 mode_buf[sz - 1] = 0x00;
650
651 if (!strcmp(mode_buf, "idle"))
652 mode = IDLE_WRITEBACK; 633 mode = IDLE_WRITEBACK;
653 else if (!strcmp(mode_buf, "huge")) 634 else if (sysfs_streq(buf, "huge"))
654 mode = HUGE_WRITEBACK; 635 mode = HUGE_WRITEBACK;
655 636 else
656 if (mode == -1)
657 return -EINVAL; 637 return -EINVAL;
658 638
659 down_read(&zram->init_lock); 639 down_read(&zram->init_lock);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index a8dd80576c95..857f8c086274 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -31,16 +31,9 @@ static u64 notrace clps711x_sched_clock_read(void)
31 return ~readw(tcd); 31 return ~readw(tcd);
32} 32}
33 33
34static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base) 34static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base)
35{ 35{
36 unsigned long rate; 36 unsigned long rate = clk_get_rate(clock);
37
38 if (!base)
39 return -ENOMEM;
40 if (IS_ERR(clock))
41 return PTR_ERR(clock);
42
43 rate = clk_get_rate(clock);
44 37
45 tcd = base; 38 tcd = base;
46 39
@@ -48,8 +41,6 @@ static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
48 clocksource_mmio_readw_down); 41 clocksource_mmio_readw_down);
49 42
50 sched_clock_register(clps711x_sched_clock_read, 16, rate); 43 sched_clock_register(clps711x_sched_clock_read, 16, rate);
51
52 return 0;
53} 44}
54 45
55static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id) 46static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
@@ -67,13 +58,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
67 struct clock_event_device *clkevt; 58 struct clock_event_device *clkevt;
68 unsigned long rate; 59 unsigned long rate;
69 60
70 if (!irq)
71 return -EINVAL;
72 if (!base)
73 return -ENOMEM;
74 if (IS_ERR(clock))
75 return PTR_ERR(clock);
76
77 clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); 61 clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
78 if (!clkevt) 62 if (!clkevt)
79 return -ENOMEM; 63 return -ENOMEM;
@@ -93,31 +77,29 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
93 "clps711x-timer", clkevt); 77 "clps711x-timer", clkevt);
94} 78}
95 79
96void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
97 unsigned int irq)
98{
99 struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
100 struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
101
102 BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
103 BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
104}
105
106#ifdef CONFIG_TIMER_OF
107static int __init clps711x_timer_init(struct device_node *np) 80static int __init clps711x_timer_init(struct device_node *np)
108{ 81{
109 unsigned int irq = irq_of_parse_and_map(np, 0); 82 unsigned int irq = irq_of_parse_and_map(np, 0);
110 struct clk *clock = of_clk_get(np, 0); 83 struct clk *clock = of_clk_get(np, 0);
111 void __iomem *base = of_iomap(np, 0); 84 void __iomem *base = of_iomap(np, 0);
112 85
86 if (!base)
87 return -ENOMEM;
88 if (!irq)
89 return -EINVAL;
90 if (IS_ERR(clock))
91 return PTR_ERR(clock);
92
113 switch (of_alias_get_id(np, "timer")) { 93 switch (of_alias_get_id(np, "timer")) {
114 case CLPS711X_CLKSRC_CLOCKSOURCE: 94 case CLPS711X_CLKSRC_CLOCKSOURCE:
115 return _clps711x_clksrc_init(clock, base); 95 clps711x_clksrc_init(clock, base);
96 break;
116 case CLPS711X_CLKSRC_CLOCKEVENT: 97 case CLPS711X_CLKSRC_CLOCKEVENT:
117 return _clps711x_clkevt_init(clock, base, irq); 98 return _clps711x_clkevt_init(clock, base, irq);
118 default: 99 default:
119 return -EINVAL; 100 return -EINVAL;
120 } 101 }
102
103 return 0;
121} 104}
122TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init); 105TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
123#endif
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 54f8a331b53a..37671a5d4ed9 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -67,7 +67,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
67 return IRQ_HANDLED; 67 return IRQ_HANDLED;
68} 68}
69 69
70struct irqaction gic_compare_irqaction = { 70static struct irqaction gic_compare_irqaction = {
71 .handler = gic_compare_interrupt, 71 .handler = gic_compare_interrupt,
72 .percpu_dev_id = &gic_clockevent_device, 72 .percpu_dev_id = &gic_clockevent_device,
73 .flags = IRQF_PERCPU | IRQF_TIMER, 73 .flags = IRQF_PERCPU | IRQF_TIMER,
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 43f4d5c4d6fa..f987027ca566 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -71,7 +71,7 @@ static u64 tc_get_cycles32(struct clocksource *cs)
71 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); 71 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
72} 72}
73 73
74void tc_clksrc_suspend(struct clocksource *cs) 74static void tc_clksrc_suspend(struct clocksource *cs)
75{ 75{
76 int i; 76 int i;
77 77
@@ -86,7 +86,7 @@ void tc_clksrc_suspend(struct clocksource *cs)
86 bmr_cache = readl(tcaddr + ATMEL_TC_BMR); 86 bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
87} 87}
88 88
89void tc_clksrc_resume(struct clocksource *cs) 89static void tc_clksrc_resume(struct clocksource *cs)
90{ 90{
91 int i; 91 int i;
92 92
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e8163693e936..5e6038fbf115 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void)
58static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = { 58static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
59 .name = "riscv_clocksource", 59 .name = "riscv_clocksource",
60 .rating = 300, 60 .rating = 300,
61 .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), 61 .mask = CLOCKSOURCE_MASK(64),
62 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 62 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
63 .read = riscv_clocksource_rdtime, 63 .read = riscv_clocksource_rdtime,
64}; 64};
@@ -120,8 +120,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
120 return error; 120 return error;
121 } 121 }
122 122
123 sched_clock_register(riscv_sched_clock, 123 sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
124 BITS_PER_LONG, riscv_timebase);
125 124
126 error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING, 125 error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
127 "clockevents/riscv/timer:starting", 126 "clockevents/riscv/timer:starting",
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index c364027638e1..3352da6ed61f 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -586,8 +586,8 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
586} 586}
587 587
588/* Optimized set_load which removes costly spin wait in timer_start */ 588/* Optimized set_load which removes costly spin wait in timer_start */
589int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, 589static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
590 unsigned int load) 590 int autoreload, unsigned int load)
591{ 591{
592 u32 l; 592 u32 l;
593 593
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e22f0dbaebb1..b599c7318aab 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
385 if (ret) 385 if (ret)
386 return ret; 386 return ret;
387 387
388 return cppc_perf.guaranteed_perf; 388 if (cppc_perf.guaranteed_perf)
389 return cppc_perf.guaranteed_perf;
390
391 return cppc_perf.nominal_perf;
389} 392}
390 393
391#else /* CONFIG_ACPI_CPPC_LIB */ 394#else /* CONFIG_ACPI_CPPC_LIB */
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 3f49427766b8..2b51e0718c9f 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
189 189
190 clk_put(priv->clk); 190 clk_put(priv->clk);
191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
192 kfree(priv);
193 dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 192 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
193 kfree(priv);
194 194
195 return 0; 195 return 0;
196} 196}
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 4e0eede599a8..ac0301b69593 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
1578 1578
1579 dmadev->nr_channels = nr_channels; 1579 dmadev->nr_channels = nr_channels;
1580 dmadev->nr_requests = nr_requests; 1580 dmadev->nr_requests = nr_requests;
1581 ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", 1581 device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
1582 dmadev->ahb_addr_masks, 1582 dmadev->ahb_addr_masks,
1583 count); 1583 count);
1584 if (ret)
1585 return ret;
1586 dmadev->nr_ahb_addr_masks = count; 1584 dmadev->nr_ahb_addr_masks = count;
1587 1585
1588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 91b90c0cea73..12acdac85820 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
132 if (err < 0) 132 if (err < 0)
133 goto out; 133 goto out;
134 134
135 if (err & BIT(pos)) 135 if (value & BIT(pos)) {
136 err = -EACCES; 136 err = -EPERM;
137 goto out;
138 }
137 139
138 err = 0; 140 err = 0;
139 141
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 854bce4fb9e7..217507002dbc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
1224 1224
1225 gpio->offset_timer = 1225 gpio->offset_timer =
1226 devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL); 1226 devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
1227 if (!gpio->offset_timer)
1228 return -ENOMEM;
1227 1229
1228 return aspeed_gpio_setup_irqs(gpio, pdev); 1230 return aspeed_gpio_setup_irqs(gpio, pdev);
1229} 1231}
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 0ecd2369c2ca..a09d2f9ebacc 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
148 mutex_init(&exar_gpio->lock); 148 mutex_init(&exar_gpio->lock);
149 149
150 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); 150 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
151 if (index < 0)
152 goto err_destroy;
151 153
152 sprintf(exar_gpio->name, "exar_gpio%d", index); 154 sprintf(exar_gpio->name, "exar_gpio%d", index);
153 exar_gpio->gpio_chip.label = exar_gpio->name; 155 exar_gpio->gpio_chip.label = exar_gpio->name;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 154d959e8993..b6a4efce7c92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
204 struct gpio_mockup_chip *chip; 204 struct gpio_mockup_chip *chip;
205 struct seq_file *sfile; 205 struct seq_file *sfile;
206 struct gpio_chip *gc; 206 struct gpio_chip *gc;
207 int val, cnt;
207 char buf[3]; 208 char buf[3];
208 int val, rv;
209 209
210 if (*ppos != 0) 210 if (*ppos != 0)
211 return 0; 211 return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
216 gc = &chip->gc; 216 gc = &chip->gc;
217 217
218 val = gpio_mockup_get(gc, priv->offset); 218 val = gpio_mockup_get(gc, priv->offset);
219 snprintf(buf, sizeof(buf), "%d\n", val); 219 cnt = snprintf(buf, sizeof(buf), "%d\n", val);
220 220
221 rv = copy_to_user(usr_buf, buf, sizeof(buf)); 221 return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
222 if (rv)
223 return rv;
224
225 return sizeof(buf) - 1;
226} 222}
227 223
228static ssize_t gpio_mockup_debugfs_write(struct file *file, 224static ssize_t gpio_mockup_debugfs_write(struct file *file,
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8b9c3ab70f6e..6a3ec575a404 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
120 * to determine if the flags should have inverted semantics. 120 * to determine if the flags should have inverted semantics.
121 */ 121 */
122 if (IS_ENABLED(CONFIG_SPI_MASTER) && 122 if (IS_ENABLED(CONFIG_SPI_MASTER) &&
123 of_property_read_bool(np, "cs-gpios")) { 123 of_property_read_bool(np, "cs-gpios") &&
124 !strcmp(propname, "cs-gpios")) {
124 struct device_node *child; 125 struct device_node *child;
125 u32 cs; 126 u32 cs;
126 int ret; 127 int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
142 * conflict and the "spi-cs-high" flag will 143 * conflict and the "spi-cs-high" flag will
143 * take precedence. 144 * take precedence.
144 */ 145 */
145 if (of_property_read_bool(np, "spi-cs-high")) { 146 if (of_property_read_bool(child, "spi-cs-high")) {
146 if (*flags & OF_GPIO_ACTIVE_LOW) { 147 if (*flags & OF_GPIO_ACTIVE_LOW) {
147 pr_warn("%s GPIO handle specifies active low - ignored\n", 148 pr_warn("%s GPIO handle specifies active low - ignored\n",
148 of_node_full_name(np)); 149 of_node_full_name(child));
149 *flags &= ~OF_GPIO_ACTIVE_LOW; 150 *flags &= ~OF_GPIO_ACTIVE_LOW;
150 } 151 }
151 } else { 152 } else {
152 if (!(*flags & OF_GPIO_ACTIVE_LOW)) 153 if (!(*flags & OF_GPIO_ACTIVE_LOW))
153 pr_info("%s enforce active low on chipselect handle\n", 154 pr_info("%s enforce active low on chipselect handle\n",
154 of_node_full_name(np)); 155 of_node_full_name(child));
155 *flags |= OF_GPIO_ACTIVE_LOW; 156 *flags |= OF_GPIO_ACTIVE_LOW;
156 } 157 }
157 break; 158 break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
717 718
718 of_node_get(chip->of_node); 719 of_node_get(chip->of_node);
719 720
720 return of_gpiochip_scan_gpios(chip); 721 status = of_gpiochip_scan_gpios(chip);
722 if (status) {
723 of_node_put(chip->of_node);
724 gpiochip_remove_pin_ranges(chip);
725 }
726
727 return status;
721} 728}
722 729
723void of_gpiochip_remove(struct gpio_chip *chip) 730void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 144af0733581..0495bf1d480a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
2776 } 2776 }
2777 2777
2778 config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); 2778 config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
2779 return gpio_set_config(chip, gpio_chip_hwgpio(desc), config); 2779 return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
2780} 2780}
2781EXPORT_SYMBOL_GPL(gpiod_set_debounce); 2781EXPORT_SYMBOL_GPL(gpiod_set_debounce);
2782 2782
@@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
2813 packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, 2813 packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
2814 !transitory); 2814 !transitory);
2815 gpio = gpio_chip_hwgpio(desc); 2815 gpio = gpio_chip_hwgpio(desc);
2816 rc = gpio_set_config(chip, gpio, packed); 2816 rc = chip->set_config(chip, gpio, packed);
2817 if (rc == -ENOTSUPP) { 2817 if (rc == -ENOTSUPP) {
2818 dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", 2818 dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
2819 gpio); 2819 gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bfa9062ce6b9..16fcb56c232b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
700 struct amdgpu_vm_bo_base *bo_base, *tmp; 700 struct amdgpu_vm_bo_base *bo_base, *tmp;
701 int r = 0; 701 int r = 0;
702 702
703 vm->bulk_moveable &= list_empty(&vm->evicted);
704
703 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { 705 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
704 struct amdgpu_bo *bo = bo_base->bo; 706 struct amdgpu_bo *bo = bo_base->bo;
705 707
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 600259b4e291..2fe8397241ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
742 } 742 }
743 743
744 ring->vm_inv_eng = inv_eng - 1; 744 ring->vm_inv_eng = inv_eng - 1;
745 change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); 745 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
746 746
747 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 747 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
748 ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 748 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index fb27783d7a54..81127f7d6ed1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5429,9 +5429,11 @@ static void get_freesync_config_for_crtc(
5429 struct amdgpu_dm_connector *aconnector = 5429 struct amdgpu_dm_connector *aconnector =
5430 to_amdgpu_dm_connector(new_con_state->base.connector); 5430 to_amdgpu_dm_connector(new_con_state->base.connector);
5431 struct drm_display_mode *mode = &new_crtc_state->base.mode; 5431 struct drm_display_mode *mode = &new_crtc_state->base.mode;
5432 int vrefresh = drm_mode_vrefresh(mode);
5432 5433
5433 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 5434 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
5434 aconnector->min_vfreq <= drm_mode_vrefresh(mode); 5435 vrefresh >= aconnector->min_vfreq &&
5436 vrefresh <= aconnector->max_vfreq;
5435 5437
5436 if (new_crtc_state->vrr_supported) { 5438 if (new_crtc_state->vrr_supported) {
5437 new_crtc_state->stream->ignore_msa_timing_param = true; 5439 new_crtc_state->stream->ignore_msa_timing_param = true;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 381581b01d48..05bbc2b622fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
376 synchronize_srcu(&drm_unplug_srcu); 376 synchronize_srcu(&drm_unplug_srcu);
377 377
378 drm_dev_unregister(dev); 378 drm_dev_unregister(dev);
379 379 drm_dev_put(dev);
380 mutex_lock(&drm_global_mutex);
381 if (dev->open_count == 0)
382 drm_dev_put(dev);
383 mutex_unlock(&drm_global_mutex);
384} 380}
385EXPORT_SYMBOL(drm_dev_unplug); 381EXPORT_SYMBOL(drm_dev_unplug);
386 382
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0e9349ff2d16..af2ab640cadb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1963 best_depth = fmt->depth; 1963 best_depth = fmt->depth;
1964 } 1964 }
1965 } 1965 }
1966 if (sizes.surface_depth != best_depth) { 1966 if (sizes.surface_depth != best_depth && best_depth) {
1967 DRM_INFO("requested bpp %d, scaled depth down to %d", 1967 DRM_INFO("requested bpp %d, scaled depth down to %d",
1968 sizes.surface_bpp, best_depth); 1968 sizes.surface_bpp, best_depth);
1969 sizes.surface_depth = best_depth; 1969 sizes.surface_depth = best_depth;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 83a5bbca6e7e..7caa3c7ed978 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
489 489
490 drm_close_helper(filp); 490 drm_close_helper(filp);
491 491
492 if (!--dev->open_count) { 492 if (!--dev->open_count)
493 drm_lastclose(dev); 493 drm_lastclose(dev);
494 if (drm_dev_is_unplugged(dev)) 494
495 drm_put_dev(dev);
496 }
497 mutex_unlock(&drm_global_mutex); 495 mutex_unlock(&drm_global_mutex);
498 496
499 drm_minor_release(minor); 497 drm_minor_release(minor);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 0573eab0e190..f35e4ab55b27 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -20,6 +20,7 @@
20#include "regs-vp.h" 20#include "regs-vp.h"
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/ktime.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/wait.h> 25#include <linux/wait.h>
25#include <linux/i2c.h> 26#include <linux/i2c.h>
@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
352 mixer_reg_write(ctx, MXR_VIDEO_CFG, val); 353 mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
353} 354}
354 355
355static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) 356static bool mixer_is_synced(struct mixer_context *ctx)
356{ 357{
357 /* block update on vsync */ 358 u32 base, shadow;
358 mixer_reg_writemask(ctx, MXR_STATUS, enable ?
359 MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
360 359
360 if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
361 ctx->mxr_ver == MXR_VER_128_0_0_184)
362 return !(mixer_reg_read(ctx, MXR_CFG) &
363 MXR_CFG_LAYER_UPDATE_COUNT_MASK);
364
365 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
366 vp_reg_read(ctx, VP_SHADOW_UPDATE))
367 return false;
368
369 base = mixer_reg_read(ctx, MXR_CFG);
370 shadow = mixer_reg_read(ctx, MXR_CFG_S);
371 if (base != shadow)
372 return false;
373
374 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
375 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
376 if (base != shadow)
377 return false;
378
379 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
380 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
381 if (base != shadow)
382 return false;
383
384 return true;
385}
386
387static int mixer_wait_for_sync(struct mixer_context *ctx)
388{
389 ktime_t timeout = ktime_add_us(ktime_get(), 100000);
390
391 while (!mixer_is_synced(ctx)) {
392 usleep_range(1000, 2000);
393 if (ktime_compare(ktime_get(), timeout) > 0)
394 return -ETIMEDOUT;
395 }
396 return 0;
397}
398
399static void mixer_disable_sync(struct mixer_context *ctx)
400{
401 mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
402}
403
404static void mixer_enable_sync(struct mixer_context *ctx)
405{
406 if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
407 ctx->mxr_ver == MXR_VER_128_0_0_184)
408 mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
409 mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
361 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) 410 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
362 vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? 411 vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
363 VP_SHADOW_UPDATE_ENABLE : 0);
364} 412}
365 413
366static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) 414static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
498 546
499 spin_lock_irqsave(&ctx->reg_slock, flags); 547 spin_lock_irqsave(&ctx->reg_slock, flags);
500 548
501 vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
502 /* interlace or progressive scan mode */ 549 /* interlace or progressive scan mode */
503 val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); 550 val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
504 vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); 551 vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
553 vp_regs_dump(ctx); 600 vp_regs_dump(ctx);
554} 601}
555 602
556static void mixer_layer_update(struct mixer_context *ctx)
557{
558 mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
559}
560
561static void mixer_graph_buffer(struct mixer_context *ctx, 603static void mixer_graph_buffer(struct mixer_context *ctx,
562 struct exynos_drm_plane *plane) 604 struct exynos_drm_plane *plane)
563{ 605{
@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
640 mixer_cfg_layer(ctx, win, priority, true); 682 mixer_cfg_layer(ctx, win, priority, true);
641 mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha); 683 mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
642 684
643 /* layer update mandatory for mixer 16.0.33.0 */
644 if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
645 ctx->mxr_ver == MXR_VER_128_0_0_184)
646 mixer_layer_update(ctx);
647
648 spin_unlock_irqrestore(&ctx->reg_slock, flags); 685 spin_unlock_irqrestore(&ctx->reg_slock, flags);
649 686
650 mixer_regs_dump(ctx); 687 mixer_regs_dump(ctx);
@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
709static irqreturn_t mixer_irq_handler(int irq, void *arg) 746static irqreturn_t mixer_irq_handler(int irq, void *arg)
710{ 747{
711 struct mixer_context *ctx = arg; 748 struct mixer_context *ctx = arg;
712 u32 val, base, shadow; 749 u32 val;
713 750
714 spin_lock(&ctx->reg_slock); 751 spin_lock(&ctx->reg_slock);
715 752
@@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
723 val &= ~MXR_INT_STATUS_VSYNC; 760 val &= ~MXR_INT_STATUS_VSYNC;
724 761
725 /* interlace scan need to check shadow register */ 762 /* interlace scan need to check shadow register */
726 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { 763 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
727 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && 764 && !mixer_is_synced(ctx))
728 vp_reg_read(ctx, VP_SHADOW_UPDATE)) 765 goto out;
729 goto out;
730
731 base = mixer_reg_read(ctx, MXR_CFG);
732 shadow = mixer_reg_read(ctx, MXR_CFG_S);
733 if (base != shadow)
734 goto out;
735
736 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
737 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
738 if (base != shadow)
739 goto out;
740
741 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
742 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
743 if (base != shadow)
744 goto out;
745 }
746 766
747 drm_crtc_handle_vblank(&ctx->crtc->base); 767 drm_crtc_handle_vblank(&ctx->crtc->base);
748 } 768 }
@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
917 937
918static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) 938static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
919{ 939{
920 struct mixer_context *mixer_ctx = crtc->ctx; 940 struct mixer_context *ctx = crtc->ctx;
921 941
922 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) 942 if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
923 return; 943 return;
924 944
925 mixer_vsync_set_update(mixer_ctx, false); 945 if (mixer_wait_for_sync(ctx))
946 dev_err(ctx->dev, "timeout waiting for VSYNC\n");
947 mixer_disable_sync(ctx);
926} 948}
927 949
928static void mixer_update_plane(struct exynos_drm_crtc *crtc, 950static void mixer_update_plane(struct exynos_drm_crtc *crtc,
@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
964 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) 986 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
965 return; 987 return;
966 988
967 mixer_vsync_set_update(mixer_ctx, true); 989 mixer_enable_sync(mixer_ctx);
968 exynos_crtc_handle_event(crtc); 990 exynos_crtc_handle_event(crtc);
969} 991}
970 992
@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
979 1001
980 exynos_drm_pipe_clk_enable(crtc, true); 1002 exynos_drm_pipe_clk_enable(crtc, true);
981 1003
982 mixer_vsync_set_update(ctx, false); 1004 mixer_disable_sync(ctx);
983 1005
984 mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); 1006 mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
985 1007
@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
992 1014
993 mixer_commit(ctx); 1015 mixer_commit(ctx);
994 1016
995 mixer_vsync_set_update(ctx, true); 1017 mixer_enable_sync(ctx);
996 1018
997 set_bit(MXR_BIT_POWERED, &ctx->flags); 1019 set_bit(MXR_BIT_POWERED, &ctx->flags);
998} 1020}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 35b4ec3f7618..3592d04c33b2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1441 } 1441 }
1442 1442
1443 if (index_mode) { 1443 if (index_mode) {
1444 if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { 1444 if (guest_gma >= I915_GTT_PAGE_SIZE) {
1445 ret = -EFAULT; 1445 ret = -EFAULT;
1446 goto err; 1446 goto err;
1447 } 1447 }
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c7103dd2d8d5..d7052ab7908c 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1882 } 1882 }
1883 1883
1884 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); 1884 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1885
1886 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1885 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); 1887 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1888 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1889
1886 return mm; 1890 return mm;
1887} 1891}
1888 1892
@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1967 if (ret) 1971 if (ret)
1968 return ret; 1972 return ret;
1969 1973
1974 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1970 list_move_tail(&mm->ppgtt_mm.lru_list, 1975 list_move_tail(&mm->ppgtt_mm.lru_list,
1971 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); 1976 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
1972 1977 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1973 } 1978 }
1974 1979
1975 return 0; 1980 return 0;
@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
1980 struct intel_vgpu_mm *mm; 1985 struct intel_vgpu_mm *mm;
1981 struct list_head *pos, *n; 1986 struct list_head *pos, *n;
1982 1987
1988 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1989
1983 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { 1990 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
1984 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); 1991 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
1985 1992
@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
1987 continue; 1994 continue;
1988 1995
1989 list_del_init(&mm->ppgtt_mm.lru_list); 1996 list_del_init(&mm->ppgtt_mm.lru_list);
1997 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1990 invalidate_ppgtt_mm(mm); 1998 invalidate_ppgtt_mm(mm);
1991 return 1; 1999 return 1;
1992 } 2000 }
2001 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1993 return 0; 2002 return 0;
1994} 2003}
1995 2004
@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2659 } 2668 }
2660 } 2669 }
2661 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); 2670 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2671 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2662 return 0; 2672 return 0;
2663} 2673}
2664 2674
@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2699 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2709 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2700 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2710 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2701 if (mm->type == INTEL_GVT_MM_PPGTT) { 2711 if (mm->type == INTEL_GVT_MM_PPGTT) {
2712 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2702 list_del_init(&mm->ppgtt_mm.lru_list); 2713 list_del_init(&mm->ppgtt_mm.lru_list);
2714 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2703 if (mm->ppgtt_mm.shadowed) 2715 if (mm->ppgtt_mm.shadowed)
2704 invalidate_ppgtt_mm(mm); 2716 invalidate_ppgtt_mm(mm);
2705 } 2717 }
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d8cb04cc946d..edb610dc5d86 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
88 void (*mm_free_page_table)(struct intel_vgpu_mm *mm); 88 void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
89 struct list_head oos_page_use_list_head; 89 struct list_head oos_page_use_list_head;
90 struct list_head oos_page_free_list_head; 90 struct list_head oos_page_free_list_head;
91 struct mutex ppgtt_mm_lock;
91 struct list_head ppgtt_mm_lru_list_head; 92 struct list_head ppgtt_mm_lru_list_head;
92 93
93 struct page *scratch_page; 94 struct page *scratch_page;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7d84cfb9051a..7902fb162d09 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
132 132
133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ 134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
135 {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
135 136
136 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 137 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
137 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 138 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb8f936fdaa..159192c097cc 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
346 int i = 0; 346 int i = 0;
347 347
348 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) 348 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
349 return -1; 349 return -EINVAL;
350 350
351 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 351 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
352 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; 352 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
410 if (workload->shadow) 410 if (workload->shadow)
411 return 0; 411 return 0;
412 412
413 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
414 if (ret < 0) {
415 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
416 return ret;
417 }
418
419 /* pin shadow context by gvt even the shadow context will be pinned 413 /* pin shadow context by gvt even the shadow context will be pinned
420 * when i915 alloc request. That is because gvt will update the guest 414 * when i915 alloc request. That is because gvt will update the guest
421 * context from shadow context when workload is completed, and at that 415 * context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
678{ 672{
679 struct intel_vgpu *vgpu = workload->vgpu; 673 struct intel_vgpu *vgpu = workload->vgpu;
680 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 674 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
675 struct intel_vgpu_submission *s = &vgpu->submission;
676 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
677 struct i915_request *rq;
681 int ring_id = workload->ring_id; 678 int ring_id = workload->ring_id;
682 int ret; 679 int ret;
683 680
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
687 mutex_lock(&vgpu->vgpu_lock); 684 mutex_lock(&vgpu->vgpu_lock);
688 mutex_lock(&dev_priv->drm.struct_mutex); 685 mutex_lock(&dev_priv->drm.struct_mutex);
689 686
687 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
688 if (ret < 0) {
689 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
690 goto err_req;
691 }
692
690 ret = intel_gvt_workload_req_alloc(workload); 693 ret = intel_gvt_workload_req_alloc(workload);
691 if (ret) 694 if (ret)
692 goto err_req; 695 goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
703 706
704 ret = prepare_workload(workload); 707 ret = prepare_workload(workload);
705out: 708out:
709 if (ret) {
710 /* We might still need to add request with
711 * clean ctx to retire it properly..
712 */
713 rq = fetch_and_zero(&workload->req);
714 i915_request_put(rq);
715 }
716
706 if (!IS_ERR_OR_NULL(workload->req)) { 717 if (!IS_ERR_OR_NULL(workload->req)) {
707 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 718 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
708 ring_id, workload->req); 719 ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
739 goto out; 750 goto out;
740 } 751 }
741 752
742 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) 753 if (!scheduler->current_vgpu->active ||
754 list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
743 goto out; 755 goto out;
744 756
745 /* 757 /*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9adc7bb9e69c..a67a63b5aa84 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
2346 INTEL_DEVID(dev_priv) == 0x5915 || \ 2346 INTEL_DEVID(dev_priv) == 0x5915 || \
2347 INTEL_DEVID(dev_priv) == 0x591E) 2347 INTEL_DEVID(dev_priv) == 0x591E)
2348#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ 2348#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
2349 INTEL_DEVID(dev_priv) == 0x87C0) 2349 INTEL_DEVID(dev_priv) == 0x87C0 || \
2350 INTEL_DEVID(dev_priv) == 0x87CA)
2350#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2351#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2351 INTEL_INFO(dev_priv)->gt == 2) 2352 INTEL_INFO(dev_priv)->gt == 2)
2352#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2353#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 30d516e975c6..8558e81fdc2a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1734,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1734 * pages from. 1734 * pages from.
1735 */ 1735 */
1736 if (!obj->base.filp) { 1736 if (!obj->base.filp) {
1737 i915_gem_object_put(obj); 1737 addr = -ENXIO;
1738 return -ENXIO; 1738 goto err;
1739 }
1740
1741 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
1742 addr = -EINVAL;
1743 goto err;
1739 } 1744 }
1740 1745
1741 addr = vm_mmap(obj->base.filp, 0, args->size, 1746 addr = vm_mmap(obj->base.filp, 0, args->size,
@@ -1749,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1749 struct vm_area_struct *vma; 1754 struct vm_area_struct *vma;
1750 1755
1751 if (down_write_killable(&mm->mmap_sem)) { 1756 if (down_write_killable(&mm->mmap_sem)) {
1752 i915_gem_object_put(obj); 1757 addr = -EINTR;
1753 return -EINTR; 1758 goto err;
1754 } 1759 }
1755 vma = find_vma(mm, addr); 1760 vma = find_vma(mm, addr);
1756 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) 1761 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
@@ -1768,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1768 i915_gem_object_put(obj); 1773 i915_gem_object_put(obj);
1769 1774
1770 args->addr_ptr = (u64)addr; 1775 args->addr_ptr = (u64)addr;
1771
1772 return 0; 1776 return 0;
1773 1777
1774err: 1778err:
1775 i915_gem_object_put(obj); 1779 i915_gem_object_put(obj);
1776
1777 return addr; 1780 return addr;
1778} 1781}
1779 1782
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9a65341fec09..aa6791255252 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
1721 i915_error_generate_code(error, engines)); 1721 i915_error_generate_code(error, engines));
1722 if (engines) { 1722 if (engines) {
1723 /* Just show the first executing process, more is confusing */ 1723 /* Just show the first executing process, more is confusing */
1724 i = ffs(engines); 1724 i = __ffs(engines);
1725 len += scnprintf(error->error_msg + len, 1725 len += scnprintf(error->error_msg + len,
1726 sizeof(error->error_msg) - len, 1726 sizeof(error->error_msg) - len,
1727 ", in %s [%d]", 1727 ", in %s [%d]",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 638a586469f9..047855dd8c6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
2863#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) 2863#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
2864#define GEN11_GT_VDBOX_DISABLE_MASK 0xff 2864#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
2865#define GEN11_GT_VEBOX_DISABLE_SHIFT 16 2865#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
2866#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT) 2866#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
2867 2867
2868#define GEN11_EU_DISABLE _MMIO(0x9134) 2868#define GEN11_EU_DISABLE _MMIO(0x9134)
2869#define GEN11_EU_DIS_MASK 0xFF 2869#define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
9243#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ 9243#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
9244 _TRANS_DDI_FUNC_CTL2_A) 9244 _TRANS_DDI_FUNC_CTL2_A)
9245#define PORT_SYNC_MODE_ENABLE (1 << 4) 9245#define PORT_SYNC_MODE_ENABLE (1 << 4)
9246#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) 9246#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
9247#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) 9247#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
9248#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 9248#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
9249 9249
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b508d8a735e0..4364f42cac6b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
1673 info->supports_dvi = (port != PORT_A && port != PORT_E); 1673 info->supports_dvi = (port != PORT_A && port != PORT_E);
1674 info->supports_hdmi = info->supports_dvi; 1674 info->supports_hdmi = info->supports_dvi;
1675 info->supports_dp = (port != PORT_E); 1675 info->supports_dp = (port != PORT_E);
1676 info->supports_edp = (port == PORT_A);
1676 } 1677 }
1677} 1678}
1678 1679
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f63..b9b0ea4e2404 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
455 struct i915_gem_context *ctx; 455 struct i915_gem_context *ctx;
456 456
457 ctx = live_context(i915, file); 457 ctx = live_context(i915, file);
458 if (!ctx) 458 if (IS_ERR(ctx))
459 break; 459 break;
460 460
461 /* We will need some GGTT space for the rq's context */ 461 /* We will need some GGTT space for the rq's context */
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2281ed3eb774..8a4ebcb6405c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
337 337
338 ret = drm_dev_register(drm, 0); 338 ret = drm_dev_register(drm, 0);
339 if (ret) 339 if (ret)
340 goto free_drm; 340 goto uninstall_irq;
341 341
342 drm_fbdev_generic_setup(drm, 32); 342 drm_fbdev_generic_setup(drm, 32);
343 343
344 return 0; 344 return 0;
345 345
346uninstall_irq:
347 drm_irq_uninstall(drm);
346free_drm: 348free_drm:
347 drm_dev_put(drm); 349 drm_dev_put(drm);
348 350
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
356 358
357static void meson_drv_unbind(struct device *dev) 359static void meson_drv_unbind(struct device *dev)
358{ 360{
359 struct drm_device *drm = dev_get_drvdata(dev); 361 struct meson_drm *priv = dev_get_drvdata(dev);
360 struct meson_drm *priv = drm->dev_private; 362 struct drm_device *drm = priv->drm;
361 363
362 if (priv->canvas) { 364 if (priv->canvas) {
363 meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 365 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
367 } 369 }
368 370
369 drm_dev_unregister(drm); 371 drm_dev_unregister(drm);
372 drm_irq_uninstall(drm);
370 drm_kms_helper_poll_fini(drm); 373 drm_kms_helper_poll_fini(drm);
371 drm_mode_config_cleanup(drm); 374 drm_mode_config_cleanup(drm);
372 drm_dev_put(drm); 375 drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index e28814f4ea6c..563953ec6ad0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
569 DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 569 DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
570 570
571 /* If sink max TMDS clock, we reject the mode */ 571 /* If sink max TMDS clock, we reject the mode */
572 if (mode->clock > connector->display_info.max_tmds_clock) 572 if (connector->display_info.max_tmds_clock &&
573 mode->clock > connector->display_info.max_tmds_clock)
573 return MODE_BAD; 574 return MODE_BAD;
574 575
575 /* Check against non-VIC supported modes */ 576 /* Check against non-VIC supported modes */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 88a52f6b39fe..7dfbbbc1beea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
181 } 181 }
182 182
183 ret = pm_runtime_get_sync(drm->dev); 183 ret = pm_runtime_get_sync(drm->dev);
184 if (IS_ERR_VALUE(ret) && ret != -EACCES) 184 if (ret < 0 && ret != -EACCES)
185 return ret; 185 return ret;
186 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); 186 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
187 pm_runtime_put_autosuspend(drm->dev); 187 pm_runtime_put_autosuspend(drm->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index aa9fec80492d..40c47d6a7d78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -100,12 +100,10 @@ static void
100nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) 100nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
101{ 101{
102 struct nouveau_dmem_chunk *chunk; 102 struct nouveau_dmem_chunk *chunk;
103 struct nouveau_drm *drm;
104 unsigned long idx; 103 unsigned long idx;
105 104
106 chunk = (void *)hmm_devmem_page_get_drvdata(page); 105 chunk = (void *)hmm_devmem_page_get_drvdata(page);
107 idx = page_to_pfn(page) - chunk->pfn_first; 106 idx = page_to_pfn(page) - chunk->pfn_first;
108 drm = chunk->drm;
109 107
110 /* 108 /*
111 * FIXME: 109 * FIXME:
@@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
456 /* FIXME handle pin failure */ 454 /* FIXME handle pin failure */
457 WARN_ON(ret); 455 WARN_ON(ret);
458 } 456 }
459 list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
460 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
461 /* FIXME handle pin failure */
462 WARN_ON(ret);
463 }
464 mutex_unlock(&drm->dmem->mutex); 457 mutex_unlock(&drm->dmem->mutex);
465} 458}
466 459
@@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
479 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) { 472 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
480 nouveau_bo_unpin(chunk->bo); 473 nouveau_bo_unpin(chunk->bo);
481 } 474 }
482 list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
483 nouveau_bo_unpin(chunk->bo);
484 }
485 mutex_unlock(&drm->dmem->mutex); 475 mutex_unlock(&drm->dmem->mutex);
486} 476}
487 477
@@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
623 */ 613 */
624 drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, 614 drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
625 device, size); 615 device, size);
626 if (drm->dmem->devmem == NULL) { 616 if (IS_ERR(drm->dmem->devmem)) {
627 kfree(drm->dmem); 617 kfree(drm->dmem);
628 drm->dmem = NULL; 618 drm->dmem = NULL;
629 return; 619 return;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7d4c6073ea5..0d4ade9d4722 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
541 clk_disable(vop->hclk); 541 clk_disable(vop->hclk);
542} 542}
543 543
544static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
545{
546 if (win->phy->scl && win->phy->scl->ext) {
547 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
548 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
549 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
550 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
551 }
552
553 VOP_WIN_SET(vop, win, enable, 0);
554}
555
544static int vop_enable(struct drm_crtc *crtc) 556static int vop_enable(struct drm_crtc *crtc)
545{ 557{
546 struct vop *vop = to_vop(crtc); 558 struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
586 struct vop_win *vop_win = &vop->win[i]; 598 struct vop_win *vop_win = &vop->win[i];
587 const struct vop_win_data *win = vop_win->data; 599 const struct vop_win_data *win = vop_win->data;
588 600
589 VOP_WIN_SET(vop, win, enable, 0); 601 vop_win_disable(vop, win);
590 } 602 }
591 spin_unlock(&vop->reg_lock); 603 spin_unlock(&vop->reg_lock);
592 604
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
735 747
736 spin_lock(&vop->reg_lock); 748 spin_lock(&vop->reg_lock);
737 749
738 VOP_WIN_SET(vop, win, enable, 0); 750 vop_win_disable(vop, win);
739 751
740 spin_unlock(&vop->reg_lock); 752 spin_unlock(&vop->reg_lock);
741} 753}
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
1622 int channel = i * 2 + 1; 1634 int channel = i * 2 + 1;
1623 1635
1624 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); 1636 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
1625 VOP_WIN_SET(vop, win, enable, 0); 1637 vop_win_disable(vop, win);
1626 VOP_WIN_SET(vop, win, gate, 1); 1638 VOP_WIN_SET(vop, win, gate, 1);
1627 } 1639 }
1628 1640
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index ba9b3cfb8c3d..b3436c2aed68 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
378static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, 378static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
379 struct drm_plane_state *old_state) 379 struct drm_plane_state *old_state)
380{ 380{
381 struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
382 struct tegra_plane *p = to_tegra_plane(plane); 381 struct tegra_plane *p = to_tegra_plane(plane);
382 struct tegra_dc *dc;
383 u32 value; 383 u32 value;
384 384
385 /* rien ne va plus */ 385 /* rien ne va plus */
386 if (!old_state || !old_state->crtc) 386 if (!old_state || !old_state->crtc)
387 return; 387 return;
388 388
389 dc = to_tegra_dc(old_state->crtc);
390
389 /* 391 /*
390 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even 392 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
391 * on planes that are already disabled. Make sure we fallback to the 393 * on planes that are already disabled. Make sure we fallback to the
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 39bfed9623de..982ce37ecde1 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
106 if (vic->booted) 106 if (vic->booted)
107 return 0; 107 return 0;
108 108
109#ifdef CONFIG_IOMMU_API
109 if (vic->config->supports_sid) { 110 if (vic->config->supports_sid) {
110 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); 111 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
111 u32 value; 112 u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
121 vic_writel(vic, value, VIC_THI_STREAMID1); 122 vic_writel(vic, value, VIC_THI_STREAMID1);
122 } 123 }
123 } 124 }
125#endif
124 126
125 /* setup clockgating registers */ 127 /* setup clockgating registers */
126 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | 128 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 66885c24590f..c1bd5e3d9e4a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -18,18 +18,19 @@
18#include "udl_connector.h" 18#include "udl_connector.h"
19#include "udl_drv.h" 19#include "udl_drv.h"
20 20
21static bool udl_get_edid_block(struct udl_device *udl, int block_idx, 21static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
22 u8 *buff) 22 size_t len)
23{ 23{
24 int ret, i; 24 int ret, i;
25 u8 *read_buff; 25 u8 *read_buff;
26 struct udl_device *udl = data;
26 27
27 read_buff = kmalloc(2, GFP_KERNEL); 28 read_buff = kmalloc(2, GFP_KERNEL);
28 if (!read_buff) 29 if (!read_buff)
29 return false; 30 return -1;
30 31
31 for (i = 0; i < EDID_LENGTH; i++) { 32 for (i = 0; i < len; i++) {
32 int bval = (i + block_idx * EDID_LENGTH) << 8; 33 int bval = (i + block * EDID_LENGTH) << 8;
33 ret = usb_control_msg(udl->udev, 34 ret = usb_control_msg(udl->udev,
34 usb_rcvctrlpipe(udl->udev, 0), 35 usb_rcvctrlpipe(udl->udev, 0),
35 (0x02), (0x80 | (0x02 << 5)), bval, 36 (0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
37 if (ret < 1) { 38 if (ret < 1) {
38 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); 39 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
39 kfree(read_buff); 40 kfree(read_buff);
40 return false; 41 return -1;
41 } 42 }
42 buff[i] = read_buff[1]; 43 buf[i] = read_buff[1];
43 } 44 }
44 45
45 kfree(read_buff); 46 kfree(read_buff);
46 return true; 47 return 0;
47}
48
49static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
50 int *result_buff_size)
51{
52 int i, extensions;
53 u8 *block_buff = NULL, *buff_ptr;
54
55 block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
56 if (block_buff == NULL)
57 return false;
58
59 if (udl_get_edid_block(udl, 0, block_buff) &&
60 memchr_inv(block_buff, 0, EDID_LENGTH)) {
61 extensions = ((struct edid *)block_buff)->extensions;
62 if (extensions > 0) {
63 /* we have to read all extensions one by one */
64 *result_buff_size = EDID_LENGTH * (extensions + 1);
65 *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
66 buff_ptr = *result_buff;
67 if (buff_ptr == NULL) {
68 kfree(block_buff);
69 return false;
70 }
71 memcpy(buff_ptr, block_buff, EDID_LENGTH);
72 kfree(block_buff);
73 buff_ptr += EDID_LENGTH;
74 for (i = 1; i < extensions; ++i) {
75 if (udl_get_edid_block(udl, i, buff_ptr)) {
76 buff_ptr += EDID_LENGTH;
77 } else {
78 kfree(*result_buff);
79 *result_buff = NULL;
80 return false;
81 }
82 }
83 return true;
84 }
85 /* we have only base edid block */
86 *result_buff = block_buff;
87 *result_buff_size = EDID_LENGTH;
88 return true;
89 }
90
91 kfree(block_buff);
92
93 return false;
94} 48}
95 49
96static int udl_get_modes(struct drm_connector *connector) 50static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
122static enum drm_connector_status 76static enum drm_connector_status
123udl_detect(struct drm_connector *connector, bool force) 77udl_detect(struct drm_connector *connector, bool force)
124{ 78{
125 u8 *edid_buff = NULL;
126 int edid_buff_size = 0;
127 struct udl_device *udl = connector->dev->dev_private; 79 struct udl_device *udl = connector->dev->dev_private;
128 struct udl_drm_connector *udl_connector = 80 struct udl_drm_connector *udl_connector =
129 container_of(connector, 81 container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
136 udl_connector->edid = NULL; 88 udl_connector->edid = NULL;
137 } 89 }
138 90
139 91 udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
140 if (!udl_get_edid(udl, &edid_buff, &edid_buff_size)) 92 if (!udl_connector->edid)
141 return connector_status_disconnected; 93 return connector_status_disconnected;
142 94
143 udl_connector->edid = (struct edid *)edid_buff;
144
145 return connector_status_connected; 95 return connector_status_connected;
146} 96}
147 97
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index d5a23295dd80..bb7b58407039 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
224 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); 224 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
225 225
226out: 226out:
227 drm_gem_object_put(&gobj->base); 227 drm_gem_object_put_unlocked(&gobj->base);
228unlock: 228unlock:
229 mutex_unlock(&udl->gem_lock); 229 mutex_unlock(&udl->gem_lock);
230 return ret; 230 return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5930facd6d2d..11a8f99ba18c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
191 ret = drm_gem_handle_create(file, &obj->base, handle); 191 ret = drm_gem_handle_create(file, &obj->base, handle);
192 drm_gem_object_put_unlocked(&obj->base); 192 drm_gem_object_put_unlocked(&obj->base);
193 if (ret) 193 if (ret)
194 goto err; 194 return ERR_PTR(ret);
195 195
196 return &obj->base; 196 return &obj->base;
197
198err:
199 __vgem_gem_destroy(obj);
200 return ERR_PTR(ret);
201} 197}
202 198
203static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 199static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 138b0bb325cf..69048e73377d 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
111 111
112 ret = drm_gem_handle_create(file, &obj->gem, handle); 112 ret = drm_gem_handle_create(file, &obj->gem, handle);
113 drm_gem_object_put_unlocked(&obj->gem); 113 drm_gem_object_put_unlocked(&obj->gem);
114 if (ret) { 114 if (ret)
115 drm_gem_object_release(&obj->gem);
116 kfree(obj);
117 return ERR_PTR(ret); 115 return ERR_PTR(ret);
118 }
119 116
120 return &obj->gem; 117 return &obj->gem;
121} 118}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b913a56f3426..2a9112515f46 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
564 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 564 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
565 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 565 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
566 }; 566 };
567 struct drm_display_mode *old_mode;
568 struct drm_display_mode *mode; 567 struct drm_display_mode *mode;
569 int ret; 568 int ret;
570 569
571 old_mode = par->set_mode;
572 mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); 570 mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
573 if (!mode) { 571 if (!mode) {
574 DRM_ERROR("Could not create new fb mode.\n"); 572 DRM_ERROR("Could not create new fb mode.\n");
@@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
579 mode->vdisplay = var->yres; 577 mode->vdisplay = var->yres;
580 vmw_guess_mode_timing(mode); 578 vmw_guess_mode_timing(mode);
581 579
582 if (old_mode && drm_mode_equal(old_mode, mode)) { 580 if (!vmw_kms_validate_mode_vram(vmw_priv,
583 drm_mode_destroy(vmw_priv->dev, mode);
584 mode = old_mode;
585 old_mode = NULL;
586 } else if (!vmw_kms_validate_mode_vram(vmw_priv,
587 mode->hdisplay * 581 mode->hdisplay *
588 DIV_ROUND_UP(var->bits_per_pixel, 8), 582 DIV_ROUND_UP(var->bits_per_pixel, 8),
589 mode->vdisplay)) { 583 mode->vdisplay)) {
@@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
620 schedule_delayed_work(&par->local_work, 0); 614 schedule_delayed_work(&par->local_work, 0);
621 615
622out_unlock: 616out_unlock:
623 if (old_mode) 617 if (par->set_mode)
624 drm_mode_destroy(vmw_priv->dev, old_mode); 618 drm_mode_destroy(vmw_priv->dev, par->set_mode);
625 par->set_mode = mode; 619 par->set_mode = mode;
626 620
627 mutex_unlock(&par->bo_mutex); 621 mutex_unlock(&par->bo_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b93c558dd86e..7da752ca1c34 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
57 57
58 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); 58 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
59 if (id < 0) 59 if (id < 0)
60 return id; 60 return (id != -ENOMEM ? 0 : id);
61 61
62 spin_lock(&gman->lock); 62 spin_lock(&gman->lock);
63 63
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 690f0d3a5543..977161bb7062 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -141,6 +141,7 @@ config I2C_I801
141 Cannon Lake (PCH) 141 Cannon Lake (PCH)
142 Cedar Fork (PCH) 142 Cedar Fork (PCH)
143 Ice Lake (PCH) 143 Ice Lake (PCH)
144 Comet Lake (PCH)
144 145
145 This driver can also be built as a module. If so, the module 146 This driver can also be built as a module. If so, the module
146 will be called i2c-i801. 147 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c91e145ef5a5..679c6c41f64b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
71 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes 71 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
72 * Cedar Fork (PCH) 0x18df 32 hard yes yes yes 72 * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
73 * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes 73 * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
74 * Comet Lake (PCH) 0x02a3 32 hard yes yes yes
74 * 75 *
75 * Features supported by this driver: 76 * Features supported by this driver:
76 * Software PEC no 77 * Software PEC no
@@ -240,6 +241,7 @@
240#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 241#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
241#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 242#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
242#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 243#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
244#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
243 245
244struct i801_mux_config { 246struct i801_mux_config {
245 char *gpio_chip; 247 char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
1038 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, 1040 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
1039 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, 1041 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
1040 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, 1042 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
1043 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
1041 { 0, } 1044 { 0, }
1042}; 1045};
1043 1046
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1534 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1537 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1535 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: 1538 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
1536 case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: 1539 case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
1540 case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
1537 priv->features |= FEATURE_I2C_BLOCK_READ; 1541 priv->features |= FEATURE_I2C_BLOCK_READ;
1538 priv->features |= FEATURE_IRQ; 1542 priv->features |= FEATURE_IRQ;
1539 priv->features |= FEATURE_SMBUS_PEC; 1543 priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index d50454c565ee..4eecffc26527 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -221,8 +221,8 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
221 return -EINVAL; 221 return -EINVAL;
222 } 222 }
223 223
224 priv = devm_kzalloc(&pdev->dev, sizeof(*priv) 224 priv = devm_kzalloc(&pdev->dev, struct_size(priv, chan, num_chan),
225 + num_chan * sizeof(struct i2c_demux_pinctrl_chan), GFP_KERNEL); 225 GFP_KERNEL);
226 226
227 props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL); 227 props = devm_kcalloc(&pdev->dev, num_chan, sizeof(*props), GFP_KERNEL);
228 228
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 9e75d6b9140b..50e1fb4aedf5 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -22,7 +22,6 @@
22#include <linux/i2c-mux.h> 22#include <linux/i2c-mux.h>
23#include <linux/jiffies.h> 23#include <linux/jiffies.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_data/pca954x.h>
26#include <linux/slab.h> 25#include <linux/slab.h>
27 26
28/* 27/*
@@ -287,10 +286,8 @@ static int pca9541_probe(struct i2c_client *client,
287 const struct i2c_device_id *id) 286 const struct i2c_device_id *id)
288{ 287{
289 struct i2c_adapter *adap = client->adapter; 288 struct i2c_adapter *adap = client->adapter;
290 struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
291 struct i2c_mux_core *muxc; 289 struct i2c_mux_core *muxc;
292 struct pca9541 *data; 290 struct pca9541 *data;
293 int force;
294 int ret; 291 int ret;
295 292
296 if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA)) 293 if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -306,9 +303,6 @@ static int pca9541_probe(struct i2c_client *client,
306 303
307 /* Create mux adapter */ 304 /* Create mux adapter */
308 305
309 force = 0;
310 if (pdata)
311 force = pdata->modes[0].adap_id;
312 muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data), 306 muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data),
313 I2C_MUX_ARBITRATOR, 307 I2C_MUX_ARBITRATOR,
314 pca9541_select_chan, pca9541_release_chan); 308 pca9541_select_chan, pca9541_release_chan);
@@ -320,7 +314,7 @@ static int pca9541_probe(struct i2c_client *client,
320 314
321 i2c_set_clientdata(client, muxc); 315 i2c_set_clientdata(client, muxc);
322 316
323 ret = i2c_mux_add_adapter(muxc, force, 0, 0); 317 ret = i2c_mux_add_adapter(muxc, 0, 0, 0);
324 if (ret) 318 if (ret)
325 return ret; 319 return ret;
326 320
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index bfabf985e830..923aa3a5a3dc 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -46,10 +46,10 @@
46#include <linux/of.h> 46#include <linux/of.h>
47#include <linux/of_device.h> 47#include <linux/of_device.h>
48#include <linux/of_irq.h> 48#include <linux/of_irq.h>
49#include <linux/platform_data/pca954x.h>
50#include <linux/pm.h> 49#include <linux/pm.h>
51#include <linux/slab.h> 50#include <linux/slab.h>
52#include <linux/spinlock.h> 51#include <linux/spinlock.h>
52#include <dt-bindings/mux/mux.h>
53 53
54#define PCA954X_MAX_NCHANS 8 54#define PCA954X_MAX_NCHANS 8
55 55
@@ -85,7 +85,9 @@ struct pca954x {
85 const struct chip_desc *chip; 85 const struct chip_desc *chip;
86 86
87 u8 last_chan; /* last register value */ 87 u8 last_chan; /* last register value */
88 u8 deselect; 88 /* MUX_IDLE_AS_IS, MUX_IDLE_DISCONNECT or >= 0 for channel */
89 s8 idle_state;
90
89 struct i2c_client *client; 91 struct i2c_client *client;
90 92
91 struct irq_domain *irq; 93 struct irq_domain *irq;
@@ -254,15 +256,71 @@ static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
254{ 256{
255 struct pca954x *data = i2c_mux_priv(muxc); 257 struct pca954x *data = i2c_mux_priv(muxc);
256 struct i2c_client *client = data->client; 258 struct i2c_client *client = data->client;
259 s8 idle_state;
260
261 idle_state = READ_ONCE(data->idle_state);
262 if (idle_state >= 0)
263 /* Set the mux back to a predetermined channel */
264 return pca954x_select_chan(muxc, idle_state);
265
266 if (idle_state == MUX_IDLE_DISCONNECT) {
267 /* Deselect active channel */
268 data->last_chan = 0;
269 return pca954x_reg_write(muxc->parent, client,
270 data->last_chan);
271 }
257 272
258 if (!(data->deselect & (1 << chan))) 273 /* otherwise leave as-is */
259 return 0;
260 274
261 /* Deselect active channel */ 275 return 0;
262 data->last_chan = 0; 276}
263 return pca954x_reg_write(muxc->parent, client, data->last_chan); 277
278static ssize_t idle_state_show(struct device *dev,
279 struct device_attribute *attr,
280 char *buf)
281{
282 struct i2c_client *client = to_i2c_client(dev);
283 struct i2c_mux_core *muxc = i2c_get_clientdata(client);
284 struct pca954x *data = i2c_mux_priv(muxc);
285
286 return sprintf(buf, "%d\n", READ_ONCE(data->idle_state));
264} 287}
265 288
289static ssize_t idle_state_store(struct device *dev,
290 struct device_attribute *attr,
291 const char *buf, size_t count)
292{
293 struct i2c_client *client = to_i2c_client(dev);
294 struct i2c_mux_core *muxc = i2c_get_clientdata(client);
295 struct pca954x *data = i2c_mux_priv(muxc);
296 int val;
297 int ret;
298
299 ret = kstrtoint(buf, 0, &val);
300 if (ret < 0)
301 return ret;
302
303 if (val != MUX_IDLE_AS_IS && val != MUX_IDLE_DISCONNECT &&
304 (val < 0 || val >= data->chip->nchans))
305 return -EINVAL;
306
307 i2c_lock_bus(muxc->parent, I2C_LOCK_SEGMENT);
308
309 WRITE_ONCE(data->idle_state, val);
310 /*
311 * Set the mux into a state consistent with the new
312 * idle_state.
313 */
314 if (data->last_chan || val != MUX_IDLE_DISCONNECT)
315 ret = pca954x_deselect_mux(muxc, 0);
316
317 i2c_unlock_bus(muxc->parent, I2C_LOCK_SEGMENT);
318
319 return ret < 0 ? ret : count;
320}
321
322static DEVICE_ATTR_RW(idle_state);
323
266static irqreturn_t pca954x_irq_handler(int irq, void *dev_id) 324static irqreturn_t pca954x_irq_handler(int irq, void *dev_id)
267{ 325{
268 struct pca954x *data = dev_id; 326 struct pca954x *data = dev_id;
@@ -329,8 +387,11 @@ static int pca954x_irq_setup(struct i2c_mux_core *muxc)
329static void pca954x_cleanup(struct i2c_mux_core *muxc) 387static void pca954x_cleanup(struct i2c_mux_core *muxc)
330{ 388{
331 struct pca954x *data = i2c_mux_priv(muxc); 389 struct pca954x *data = i2c_mux_priv(muxc);
390 struct i2c_client *client = data->client;
332 int c, irq; 391 int c, irq;
333 392
393 device_remove_file(&client->dev, &dev_attr_idle_state);
394
334 if (data->irq) { 395 if (data->irq) {
335 for (c = 0; c < data->chip->nchans; c++) { 396 for (c = 0; c < data->chip->nchans; c++) {
336 irq = irq_find_mapping(data->irq, c); 397 irq = irq_find_mapping(data->irq, c);
@@ -348,14 +409,13 @@ static int pca954x_probe(struct i2c_client *client,
348 const struct i2c_device_id *id) 409 const struct i2c_device_id *id)
349{ 410{
350 struct i2c_adapter *adap = client->adapter; 411 struct i2c_adapter *adap = client->adapter;
351 struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
352 struct device *dev = &client->dev; 412 struct device *dev = &client->dev;
353 struct device_node *np = dev->of_node; 413 struct device_node *np = dev->of_node;
354 bool idle_disconnect_dt; 414 bool idle_disconnect_dt;
355 struct gpio_desc *gpio; 415 struct gpio_desc *gpio;
356 int num, force, class;
357 struct i2c_mux_core *muxc; 416 struct i2c_mux_core *muxc;
358 struct pca954x *data; 417 struct pca954x *data;
418 int num;
359 int ret; 419 int ret;
360 420
361 if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE)) 421 if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
@@ -412,9 +472,12 @@ static int pca954x_probe(struct i2c_client *client,
412 } 472 }
413 473
414 data->last_chan = 0; /* force the first selection */ 474 data->last_chan = 0; /* force the first selection */
475 data->idle_state = MUX_IDLE_AS_IS;
415 476
416 idle_disconnect_dt = np && 477 idle_disconnect_dt = np &&
417 of_property_read_bool(np, "i2c-mux-idle-disconnect"); 478 of_property_read_bool(np, "i2c-mux-idle-disconnect");
479 if (idle_disconnect_dt)
480 data->idle_state = MUX_IDLE_DISCONNECT;
418 481
419 ret = pca954x_irq_setup(muxc); 482 ret = pca954x_irq_setup(muxc);
420 if (ret) 483 if (ret)
@@ -422,24 +485,7 @@ static int pca954x_probe(struct i2c_client *client,
422 485
423 /* Now create an adapter for each channel */ 486 /* Now create an adapter for each channel */
424 for (num = 0; num < data->chip->nchans; num++) { 487 for (num = 0; num < data->chip->nchans; num++) {
425 bool idle_disconnect_pd = false; 488 ret = i2c_mux_add_adapter(muxc, 0, num, 0);
426
427 force = 0; /* dynamic adap number */
428 class = 0; /* no class by default */
429 if (pdata) {
430 if (num < pdata->num_modes) {
431 /* force static number */
432 force = pdata->modes[num].adap_id;
433 class = pdata->modes[num].class;
434 } else
435 /* discard unconfigured channels */
436 break;
437 idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
438 }
439 data->deselect |= (idle_disconnect_pd ||
440 idle_disconnect_dt) << num;
441
442 ret = i2c_mux_add_adapter(muxc, force, num, class);
443 if (ret) 489 if (ret)
444 goto fail_cleanup; 490 goto fail_cleanup;
445 } 491 }
@@ -453,6 +499,12 @@ static int pca954x_probe(struct i2c_client *client,
453 goto fail_cleanup; 499 goto fail_cleanup;
454 } 500 }
455 501
502 /*
503 * The attr probably isn't going to be needed in most cases,
504 * so don't fail completely on error.
505 */
506 device_create_file(dev, &dev_attr_idle_state);
507
456 dev_info(dev, "registered %d multiplexed busses for I2C %s %s\n", 508 dev_info(dev, "registered %d multiplexed busses for I2C %s %s\n",
457 num, data->chip->muxtype == pca954x_ismux 509 num, data->chip->muxtype == pca954x_ismux
458 ? "mux" : "switch", client->name); 510 ? "mux" : "switch", client->name);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index c5a881172524..337410f40860 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
173 173
174 rcu_read_lock(); 174 rcu_read_lock();
175 in = __in_dev_get_rcu(upper_dev); 175 in = __in_dev_get_rcu(upper_dev);
176 local_ipaddr = ntohl(in->ifa_list->ifa_address); 176
177 if (!in->ifa_list)
178 local_ipaddr = 0;
179 else
180 local_ipaddr = ntohl(in->ifa_list->ifa_address);
181
177 rcu_read_unlock(); 182 rcu_read_unlock();
178 } else { 183 } else {
179 local_ipaddr = ntohl(ifa->ifa_address); 184 local_ipaddr = ntohl(ifa->ifa_address);
@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
185 case NETDEV_UP: 190 case NETDEV_UP:
186 /* Fall through */ 191 /* Fall through */
187 case NETDEV_CHANGEADDR: 192 case NETDEV_CHANGEADDR:
193
194 /* Just skip if no need to handle ARP cache */
195 if (!local_ipaddr)
196 break;
197
188 i40iw_manage_arp_cache(iwdev, 198 i40iw_manage_arp_cache(iwdev,
189 netdev->dev_addr, 199 netdev->dev_addr,
190 &local_ipaddr, 200 &local_ipaddr,
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 782499abcd98..2a0b59a4b6eb 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
804 unsigned long flags; 804 unsigned long flags;
805 805
806 for (i = 0 ; i < dev->num_ports; i++) { 806 for (i = 0 ; i < dev->num_ports; i++) {
807 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
808 det = &sriov->alias_guid.ports_guid[i]; 807 det = &sriov->alias_guid.ports_guid[i];
808 cancel_delayed_work_sync(&det->alias_guid_work);
809 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); 809 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
810 while (!list_empty(&det->cb_list)) { 810 while (!list_empty(&det->cb_list)) {
811 cb_ctx = list_entry(det->cb_list.next, 811 cb_ctx = list_entry(det->cb_list.next,
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index eaa055007f28..9e08df7914aa 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -20,6 +20,7 @@
20 20
21enum devx_obj_flags { 21enum devx_obj_flags {
22 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0, 22 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
23 DEVX_OBJ_FLAGS_DCT = 1 << 1,
23}; 24};
24 25
25struct devx_async_data { 26struct devx_async_data {
@@ -39,7 +40,10 @@ struct devx_obj {
39 u32 dinlen; /* destroy inbox length */ 40 u32 dinlen; /* destroy inbox length */
40 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; 41 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
41 u32 flags; 42 u32 flags;
42 struct mlx5_ib_devx_mr devx_mr; 43 union {
44 struct mlx5_ib_devx_mr devx_mr;
45 struct mlx5_core_dct core_dct;
46 };
43}; 47};
44 48
45struct devx_umem { 49struct devx_umem {
@@ -347,7 +351,6 @@ static u64 devx_get_obj_id(const void *in)
347 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, 351 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
348 MLX5_GET(arm_rq_in, in, srq_number)); 352 MLX5_GET(arm_rq_in, in, srq_number));
349 break; 353 break;
350 case MLX5_CMD_OP_DRAIN_DCT:
351 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 354 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
352 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, 355 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
353 MLX5_GET(drain_dct_in, in, dctn)); 356 MLX5_GET(drain_dct_in, in, dctn));
@@ -618,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in)
618 case MLX5_CMD_OP_2RST_QP: 621 case MLX5_CMD_OP_2RST_QP:
619 case MLX5_CMD_OP_ARM_XRC_SRQ: 622 case MLX5_CMD_OP_ARM_XRC_SRQ:
620 case MLX5_CMD_OP_ARM_RQ: 623 case MLX5_CMD_OP_ARM_RQ:
621 case MLX5_CMD_OP_DRAIN_DCT:
622 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 624 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
623 case MLX5_CMD_OP_ARM_XRQ: 625 case MLX5_CMD_OP_ARM_XRQ:
624 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: 626 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
@@ -1124,7 +1126,11 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
1124 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) 1126 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1125 devx_cleanup_mkey(obj); 1127 devx_cleanup_mkey(obj);
1126 1128
1127 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); 1129 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1130 ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
1131 else
1132 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
1133 sizeof(out));
1128 if (ib_is_destroy_retryable(ret, why, uobject)) 1134 if (ib_is_destroy_retryable(ret, why, uobject))
1129 return ret; 1135 return ret;
1130 1136
@@ -1185,9 +1191,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1185 devx_set_umem_valid(cmd_in); 1191 devx_set_umem_valid(cmd_in);
1186 } 1192 }
1187 1193
1188 err = mlx5_cmd_exec(dev->mdev, cmd_in, 1194 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1189 cmd_in_len, 1195 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1190 cmd_out, cmd_out_len); 1196 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1197 cmd_in, cmd_in_len,
1198 cmd_out, cmd_out_len);
1199 } else {
1200 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1201 cmd_in_len,
1202 cmd_out, cmd_out_len);
1203 }
1204
1191 if (err) 1205 if (err)
1192 goto obj_free; 1206 goto obj_free;
1193 1207
@@ -1214,7 +1228,11 @@ err_copy:
1214 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) 1228 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1215 devx_cleanup_mkey(obj); 1229 devx_cleanup_mkey(obj);
1216obj_destroy: 1230obj_destroy:
1217 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); 1231 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1232 mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
1233 else
1234 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
1235 sizeof(out));
1218obj_free: 1236obj_free:
1219 kfree(obj); 1237 kfree(obj);
1220 return err; 1238 return err;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 994c19d01211..531ff20b32ad 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
415 *active_speed = IB_SPEED_EDR; 415 *active_speed = IB_SPEED_EDR;
416 break; 416 break;
417 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): 417 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
418 *active_width = IB_WIDTH_2X;
419 *active_speed = IB_SPEED_EDR;
420 break;
418 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): 421 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
419 *active_width = IB_WIDTH_1X; 422 *active_width = IB_WIDTH_1X;
420 *active_speed = IB_SPEED_HDR; 423 *active_speed = IB_SPEED_HDR;
421 break; 424 break;
425 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
426 *active_width = IB_WIDTH_4X;
427 *active_speed = IB_SPEED_EDR;
428 break;
422 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): 429 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
423 *active_width = IB_WIDTH_2X; 430 *active_width = IB_WIDTH_2X;
424 *active_speed = IB_SPEED_HDR; 431 *active_speed = IB_SPEED_HDR;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6b1f0e76900b..7cd006da1dae 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3729,6 +3729,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3729 3729
3730 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 3730 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3731 struct mlx5_ib_modify_qp_resp resp = {}; 3731 struct mlx5_ib_modify_qp_resp resp = {};
3732 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
3732 u32 min_resp_len = offsetof(typeof(resp), dctn) + 3733 u32 min_resp_len = offsetof(typeof(resp), dctn) +
3733 sizeof(resp.dctn); 3734 sizeof(resp.dctn);
3734 3735
@@ -3747,7 +3748,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3747 MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); 3748 MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
3748 3749
3749 err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in, 3750 err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
3750 MLX5_ST_SZ_BYTES(create_dct_in)); 3751 MLX5_ST_SZ_BYTES(create_dct_in), out,
3752 sizeof(out));
3751 if (err) 3753 if (err)
3752 return err; 3754 return err;
3753 resp.dctn = qp->dct.mdct.mqp.qpn; 3755 resp.dctn = qp->dct.mdct.mqp.qpn;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b319e51c379b..f7cdd2ab7f11 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2608 2608
2609 /* Everything is mapped - write the right values into s->dma_address */ 2609 /* Everything is mapped - write the right values into s->dma_address */
2610 for_each_sg(sglist, s, nelems, i) { 2610 for_each_sg(sglist, s, nelems, i) {
2611 s->dma_address += address + s->offset; 2611 /*
2612 * Add in the remaining piece of the scatter-gather offset that
2613 * was masked out when we were determining the physical address
2614 * via (sg_phys(s) & PAGE_MASK) earlier.
2615 */
2616 s->dma_address += address + (s->offset & ~PAGE_MASK);
2612 s->dma_length = s->length; 2617 s->dma_length = s->length;
2613 } 2618 }
2614 2619
@@ -3164,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
3164 return; 3169 return;
3165 3170
3166 list_for_each_entry(entry, &amd_iommu_unity_map, list) { 3171 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3172 int type, prot = 0;
3167 size_t length; 3173 size_t length;
3168 int prot = 0;
3169 3174
3170 if (devid < entry->devid_start || devid > entry->devid_end) 3175 if (devid < entry->devid_start || devid > entry->devid_end)
3171 continue; 3176 continue;
3172 3177
3178 type = IOMMU_RESV_DIRECT;
3173 length = entry->address_end - entry->address_start; 3179 length = entry->address_end - entry->address_start;
3174 if (entry->prot & IOMMU_PROT_IR) 3180 if (entry->prot & IOMMU_PROT_IR)
3175 prot |= IOMMU_READ; 3181 prot |= IOMMU_READ;
3176 if (entry->prot & IOMMU_PROT_IW) 3182 if (entry->prot & IOMMU_PROT_IW)
3177 prot |= IOMMU_WRITE; 3183 prot |= IOMMU_WRITE;
3184 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
3185 /* Exclusion range */
3186 type = IOMMU_RESV_RESERVED;
3178 3187
3179 region = iommu_alloc_resv_region(entry->address_start, 3188 region = iommu_alloc_resv_region(entry->address_start,
3180 length, prot, 3189 length, prot, type);
3181 IOMMU_RESV_DIRECT);
3182 if (!region) { 3190 if (!region) {
3183 dev_err(dev, "Out of memory allocating dm-regions\n"); 3191 dev_err(dev, "Out of memory allocating dm-regions\n");
3184 return; 3192 return;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index f773792d77fd..1b1378619fc9 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
2013 if (e == NULL) 2013 if (e == NULL)
2014 return -ENOMEM; 2014 return -ENOMEM;
2015 2015
2016 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2017 init_exclusion_range(m);
2018
2016 switch (m->type) { 2019 switch (m->type) {
2017 default: 2020 default:
2018 kfree(e); 2021 kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
2059 2062
2060 while (p < end) { 2063 while (p < end) {
2061 m = (struct ivmd_header *)p; 2064 m = (struct ivmd_header *)p;
2062 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2065 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2063 init_exclusion_range(m);
2064 else if (m->flags & IVMD_FLAG_UNITY_MAP)
2065 init_unity_map_range(m); 2066 init_unity_map_range(m);
2066 2067
2067 p += m->length; 2068 p += m->length;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index eae0741f72dc..87965e4d9647 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -374,6 +374,8 @@
374#define IOMMU_PROT_IR 0x01 374#define IOMMU_PROT_IR 0x01
375#define IOMMU_PROT_IW 0x02 375#define IOMMU_PROT_IW 0x02
376 376
377#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
378
377/* IOMMU capabilities */ 379/* IOMMU capabilities */
378#define IOMMU_CAP_IOTLB 24 380#define IOMMU_CAP_IOTLB 24
379#define IOMMU_CAP_NPCACHE 26 381#define IOMMU_CAP_NPCACHE 26
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 87274b54febd..28cb713d728c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1538 u32 pmen; 1538 u32 pmen;
1539 unsigned long flags; 1539 unsigned long flags;
1540 1540
1541 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1542 return;
1543
1541 raw_spin_lock_irqsave(&iommu->register_lock, flags); 1544 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1542 pmen = readl(iommu->reg + DMAR_PMEN_REG); 1545 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1543 pmen &= ~DMA_PMEN_EPM; 1546 pmen &= ~DMA_PMEN_EPM;
@@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
5332 5335
5333 ctx_lo = context[0].lo; 5336 ctx_lo = context[0].lo;
5334 5337
5335 sdev->did = domain->iommu_did[iommu->seq_id]; 5338 sdev->did = FLPT_DEFAULT_DID;
5336 sdev->sid = PCI_DEVID(info->bus, info->devfn); 5339 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5337 5340
5338 if (!(ctx_lo & CONTEXT_PASIDE)) { 5341 if (!(ctx_lo & CONTEXT_PASIDE)) {
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f101afc315ab..9a8a8870e267 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -160,6 +160,14 @@
160 160
161#define ARM_V7S_TCR_PD1 BIT(5) 161#define ARM_V7S_TCR_PD1 BIT(5)
162 162
163#ifdef CONFIG_ZONE_DMA32
164#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
165#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
166#else
167#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
168#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
169#endif
170
163typedef u32 arm_v7s_iopte; 171typedef u32 arm_v7s_iopte;
164 172
165static bool selftest_running; 173static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
197 void *table = NULL; 205 void *table = NULL;
198 206
199 if (lvl == 1) 207 if (lvl == 1)
200 table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); 208 table = (void *)__get_free_pages(
209 __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
201 else if (lvl == 2) 210 else if (lvl == 2)
202 table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); 211 table = kmem_cache_zalloc(data->l2_tables, gfp);
203 phys = virt_to_phys(table); 212 phys = virt_to_phys(table);
204 if (phys != (arm_v7s_iopte)phys) 213 if (phys != (arm_v7s_iopte)phys) {
205 /* Doesn't fit in PTE */ 214 /* Doesn't fit in PTE */
215 dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
206 goto out_free; 216 goto out_free;
217 }
207 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { 218 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
208 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); 219 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
209 if (dma_mapping_error(dev, dma)) 220 if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
733 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", 744 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
734 ARM_V7S_TABLE_SIZE(2), 745 ARM_V7S_TABLE_SIZE(2),
735 ARM_V7S_TABLE_SIZE(2), 746 ARM_V7S_TABLE_SIZE(2),
736 SLAB_CACHE_DMA, NULL); 747 ARM_V7S_TABLE_SLAB_FLAGS, NULL);
737 if (!data->l2_tables) 748 if (!data->l2_tables)
738 goto out_free_data; 749 goto out_free_data;
739 750
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33a982e33716..109de67d5d72 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1105 1105
1106 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); 1106 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1107 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { 1107 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1108 dev_warn(dev,
1109 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1110 iommu_def_domain_type);
1111 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); 1108 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1109 if (dom) {
1110 dev_warn(dev,
1111 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1112 iommu_def_domain_type);
1113 }
1112 } 1114 }
1113 1115
1114 group->default_domain = dom; 1116 group->default_domain = dom;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f8d3ba247523..2de8122e218f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
207 curr_iova = rb_entry(curr, struct iova, node); 207 curr_iova = rb_entry(curr, struct iova, node);
208 } while (curr && new_pfn <= curr_iova->pfn_hi); 208 } while (curr && new_pfn <= curr_iova->pfn_hi);
209 209
210 if (limit_pfn < size || new_pfn < iovad->start_pfn) 210 if (limit_pfn < size || new_pfn < iovad->start_pfn) {
211 iovad->max32_alloc_size = size;
211 goto iova32_full; 212 goto iova32_full;
213 }
212 214
213 /* pfn_lo will point to size aligned address if size_aligned is set */ 215 /* pfn_lo will point to size aligned address if size_aligned is set */
214 new->pfn_lo = new_pfn; 216 new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
222 return 0; 224 return 0;
223 225
224iova32_full: 226iova32_full:
225 iovad->max32_alloc_size = size;
226 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 227 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
227 return -ENOMEM; 228 return -ENOMEM;
228} 229}
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 83364fedbf0a..5e4ca139e4ea 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -275,14 +275,14 @@ out_free:
275 return ret; 275 return ret;
276} 276}
277 277
278int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, 278static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
279 struct device_node *parent) 279 struct device_node *parent)
280{ 280{
281 return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init); 281 return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
282} 282}
283IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init); 283IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
284 284
285int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, 285static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
286 struct device_node *parent) 286 struct device_node *parent)
287{ 287{
288 return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init); 288 return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 2dd1ff0cf558..7577755bdcf4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
1482 ra = container_of(a, struct lpi_range, entry); 1482 ra = container_of(a, struct lpi_range, entry);
1483 rb = container_of(b, struct lpi_range, entry); 1483 rb = container_of(b, struct lpi_range, entry);
1484 1484
1485 return rb->base_id - ra->base_id; 1485 return ra->base_id - rb->base_id;
1486} 1486}
1487 1487
1488static void merge_lpi_ranges(void) 1488static void merge_lpi_ranges(void)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ba2a37a27a54..fd3110c171ba 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1089,11 +1089,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
1089#endif 1089#endif
1090} 1090}
1091 1091
1092static int gic_init_bases(struct gic_chip_data *gic, int irq_start, 1092static int gic_init_bases(struct gic_chip_data *gic,
1093 struct fwnode_handle *handle) 1093 struct fwnode_handle *handle)
1094{ 1094{
1095 irq_hw_number_t hwirq_base; 1095 int gic_irqs, ret;
1096 int gic_irqs, irq_base, ret;
1097 1096
1098 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { 1097 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1099 /* Frankein-GIC without banked registers... */ 1098 /* Frankein-GIC without banked registers... */
@@ -1145,28 +1144,21 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
1145 } else { /* Legacy support */ 1144 } else { /* Legacy support */
1146 /* 1145 /*
1147 * For primary GICs, skip over SGIs. 1146 * For primary GICs, skip over SGIs.
1148 * For secondary GICs, skip over PPIs, too. 1147 * No secondary GIC support whatsoever.
1149 */ 1148 */
1150 if (gic == &gic_data[0] && (irq_start & 31) > 0) { 1149 int irq_base;
1151 hwirq_base = 16;
1152 if (irq_start != -1)
1153 irq_start = (irq_start & ~31) + 16;
1154 } else {
1155 hwirq_base = 32;
1156 }
1157 1150
1158 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 1151 gic_irqs -= 16; /* calculate # of irqs to allocate */
1159 1152
1160 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, 1153 irq_base = irq_alloc_descs(16, 16, gic_irqs,
1161 numa_node_id()); 1154 numa_node_id());
1162 if (irq_base < 0) { 1155 if (irq_base < 0) {
1163 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 1156 WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
1164 irq_start); 1157 irq_base = 16;
1165 irq_base = irq_start;
1166 } 1158 }
1167 1159
1168 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, 1160 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
1169 hwirq_base, &gic_irq_domain_ops, gic); 1161 16, &gic_irq_domain_ops, gic);
1170 } 1162 }
1171 1163
1172 if (WARN_ON(!gic->domain)) { 1164 if (WARN_ON(!gic->domain)) {
@@ -1195,7 +1187,6 @@ error:
1195} 1187}
1196 1188
1197static int __init __gic_init_bases(struct gic_chip_data *gic, 1189static int __init __gic_init_bases(struct gic_chip_data *gic,
1198 int irq_start,
1199 struct fwnode_handle *handle) 1190 struct fwnode_handle *handle)
1200{ 1191{
1201 char *name; 1192 char *name;
@@ -1231,32 +1222,28 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
1231 gic_init_chip(gic, NULL, name, false); 1222 gic_init_chip(gic, NULL, name, false);
1232 } 1223 }
1233 1224
1234 ret = gic_init_bases(gic, irq_start, handle); 1225 ret = gic_init_bases(gic, handle);
1235 if (ret) 1226 if (ret)
1236 kfree(name); 1227 kfree(name);
1237 1228
1238 return ret; 1229 return ret;
1239} 1230}
1240 1231
1241void __init gic_init(unsigned int gic_nr, int irq_start, 1232void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
1242 void __iomem *dist_base, void __iomem *cpu_base)
1243{ 1233{
1244 struct gic_chip_data *gic; 1234 struct gic_chip_data *gic;
1245 1235
1246 if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
1247 return;
1248
1249 /* 1236 /*
1250 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1237 * Non-DT/ACPI systems won't run a hypervisor, so let's not
1251 * bother with these... 1238 * bother with these...
1252 */ 1239 */
1253 static_branch_disable(&supports_deactivate_key); 1240 static_branch_disable(&supports_deactivate_key);
1254 1241
1255 gic = &gic_data[gic_nr]; 1242 gic = &gic_data[0];
1256 gic->raw_dist_base = dist_base; 1243 gic->raw_dist_base = dist_base;
1257 gic->raw_cpu_base = cpu_base; 1244 gic->raw_cpu_base = cpu_base;
1258 1245
1259 __gic_init_bases(gic, irq_start, NULL); 1246 __gic_init_bases(gic, NULL);
1260} 1247}
1261 1248
1262static void gic_teardown(struct gic_chip_data *gic) 1249static void gic_teardown(struct gic_chip_data *gic)
@@ -1399,7 +1386,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
1399 if (ret) 1386 if (ret)
1400 return ret; 1387 return ret;
1401 1388
1402 ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode); 1389 ret = gic_init_bases(*gic, &dev->of_node->fwnode);
1403 if (ret) { 1390 if (ret) {
1404 gic_teardown(*gic); 1391 gic_teardown(*gic);
1405 return ret; 1392 return ret;
@@ -1459,7 +1446,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1459 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base)) 1446 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
1460 static_branch_disable(&supports_deactivate_key); 1447 static_branch_disable(&supports_deactivate_key);
1461 1448
1462 ret = __gic_init_bases(gic, -1, &node->fwnode); 1449 ret = __gic_init_bases(gic, &node->fwnode);
1463 if (ret) { 1450 if (ret) {
1464 gic_teardown(gic); 1451 gic_teardown(gic);
1465 return ret; 1452 return ret;
@@ -1650,7 +1637,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
1650 return -ENOMEM; 1637 return -ENOMEM;
1651 } 1638 }
1652 1639
1653 ret = __gic_init_bases(gic, -1, domain_handle); 1640 ret = __gic_init_bases(gic, domain_handle);
1654 if (ret) { 1641 if (ret) {
1655 pr_err("Failed to initialise GIC\n"); 1642 pr_err("Failed to initialise GIC\n");
1656 irq_domain_free_fwnode(domain_handle); 1643 irq_domain_free_fwnode(domain_handle);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index d1098f4da6a4..88df3d00052c 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -169,8 +169,12 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
169 169
170 raw_spin_lock_init(&data->lock); 170 raw_spin_lock_init(&data->lock);
171 171
172 of_property_read_u32(np, "fsl,num-irqs", &irqs_num); 172 ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
173 of_property_read_u32(np, "fsl,channel", &data->channel); 173 if (ret)
174 return ret;
175 ret = of_property_read_u32(np, "fsl,channel", &data->channel);
176 if (ret)
177 return ret;
174 178
175 /* 179 /*
176 * There is one output irq for each group of 64 inputs. 180 * There is one output irq for each group of 64 inputs.
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 567b29c47608..98b6e1d4b1a6 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
161 void __iomem *base = d->chip_data; 161 void __iomem *base = d->chip_data;
162 u32 val; 162 u32 val;
163 163
164 if (!msg->address_lo && !msg->address_hi)
165 return;
166
164 base += get_mbigen_vec_reg(d->hwirq); 167 base += get_mbigen_vec_reg(d->hwirq);
165 val = readl_relaxed(base); 168 val = readl_relaxed(base);
166 169
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 3496b61a312a..8eed478f3b7e 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -179,7 +179,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
179 return 0; 179 return 0;
180} 180}
181 181
182const struct irq_domain_ops mmp_irq_domain_ops = { 182static const struct irq_domain_ops mmp_irq_domain_ops = {
183 .map = mmp_irq_domain_map, 183 .map = mmp_irq_domain_map,
184 .xlate = mmp_irq_domain_xlate, 184 .xlate = mmp_irq_domain_xlate,
185}; 185};
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index add4c9c934c8..18832ccc8ff8 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -478,7 +478,7 @@ dispose_irq:
478 return ret; 478 return ret;
479} 479}
480 480
481struct mvebu_sei_caps mvebu_sei_ap806_caps = { 481static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
482 .ap_range = { 482 .ap_range = {
483 .first = 0, 483 .first = 0,
484 .size = 21, 484 .size = 21,
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a93296b9b45d..7bd1d4cb2e19 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
716 const struct stm32_exti_bank *stm32_bank; 716 const struct stm32_exti_bank *stm32_bank;
717 struct stm32_exti_chip_data *chip_data; 717 struct stm32_exti_chip_data *chip_data;
718 void __iomem *base = h_data->base; 718 void __iomem *base = h_data->base;
719 u32 irqs_mask;
720 719
721 stm32_bank = h_data->drv_data->exti_banks[bank_idx]; 720 stm32_bank = h_data->drv_data->exti_banks[bank_idx];
722 chip_data = &h_data->chips_data[bank_idx]; 721 chip_data = &h_data->chips_data[bank_idx];
@@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
725 724
726 raw_spin_lock_init(&chip_data->rlock); 725 raw_spin_lock_init(&chip_data->rlock);
727 726
728 /* Determine number of irqs supported */
729 writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
730 irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
731
732 /* 727 /*
733 * This IP has no reset, so after hot reboot we should 728 * This IP has no reset, so after hot reboot we should
734 * clear registers to avoid residue 729 * clear registers to avoid residue
735 */ 730 */
736 writel_relaxed(0, base + stm32_bank->imr_ofst); 731 writel_relaxed(0, base + stm32_bank->imr_ofst);
737 writel_relaxed(0, base + stm32_bank->emr_ofst); 732 writel_relaxed(0, base + stm32_bank->emr_ofst);
738 writel_relaxed(0, base + stm32_bank->rtsr_ofst);
739 writel_relaxed(0, base + stm32_bank->ftsr_ofst);
740 writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
741 if (stm32_bank->fpr_ofst != UNDEF_REG)
742 writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
743 733
744 pr_info("%pOF: bank%d\n", h_data->node, bank_idx); 734 pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
745 735
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 4d85645c87f7..0928fd1f0e0c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
4365 if (m->clock2) 4365 if (m->clock2)
4366 test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip); 4366 test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
4367 4367
4368 if (ent->device == 0xB410) { 4368 if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
4369 ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
4369 test_and_set_bit(HFC_CHIP_B410P, &hc->chip); 4370 test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
4370 test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip); 4371 test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
4371 test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip); 4372 test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7fea18b0c15d..7cb4d685a1f1 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
513 const struct i2c_device_id *id) 513 const struct i2c_device_id *id)
514{ 514{
515 int devid; 515 int devid;
516 const struct of_device_id *of_id;
516 struct pca9532_data *data = i2c_get_clientdata(client); 517 struct pca9532_data *data = i2c_get_clientdata(client);
517 struct pca9532_platform_data *pca9532_pdata = 518 struct pca9532_platform_data *pca9532_pdata =
518 dev_get_platdata(&client->dev); 519 dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
528 dev_err(&client->dev, "no platform data\n"); 529 dev_err(&client->dev, "no platform data\n");
529 return -EINVAL; 530 return -EINVAL;
530 } 531 }
531 devid = (int)(uintptr_t)of_match_device( 532 of_id = of_match_device(of_pca9532_leds_match,
532 of_pca9532_leds_match, &client->dev)->data; 533 &client->dev);
534 if (unlikely(!of_id))
535 return -EINVAL;
536 devid = (int)(uintptr_t) of_id->data;
533 } else { 537 } else {
534 devid = id->driver_data; 538 devid = id->driver_data;
535 } 539 }
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 3dd3ed46d473..136f86a1627d 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
122 trigger_data->net_dev = NULL; 122 trigger_data->net_dev = NULL;
123 } 123 }
124 124
125 strncpy(trigger_data->device_name, buf, size); 125 memcpy(trigger_data->device_name, buf, size);
126 trigger_data->device_name[size] = 0;
126 if (size > 0 && trigger_data->device_name[size - 1] == '\n') 127 if (size > 0 && trigger_data->device_name[size - 1] == '\n')
127 trigger_data->device_name[size - 1] = 0; 128 trigger_data->device_name[size - 1] = 0;
128 129
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
301 container_of(nb, struct led_netdev_data, notifier); 302 container_of(nb, struct led_netdev_data, notifier);
302 303
303 if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE 304 if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
304 && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER 305 && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
305 && evt != NETDEV_CHANGENAME)
306 return NOTIFY_DONE; 306 return NOTIFY_DONE;
307 307
308 if (strcmp(dev->name, trigger_data->device_name)) 308 if (!(dev == trigger_data->net_dev ||
309 (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
309 return NOTIFY_DONE; 310 return NOTIFY_DONE;
310 311
311 cancel_delayed_work_sync(&trigger_data->work); 312 cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
320 dev_hold(dev); 321 dev_hold(dev);
321 trigger_data->net_dev = dev; 322 trigger_data->net_dev = dev;
322 break; 323 break;
323 case NETDEV_CHANGENAME:
324 case NETDEV_UNREGISTER: 324 case NETDEV_UNREGISTER:
325 if (trigger_data->net_dev) { 325 dev_put(trigger_data->net_dev);
326 dev_put(trigger_data->net_dev); 326 trigger_data->net_dev = NULL;
327 trigger_data->net_dev = NULL;
328 }
329 break; 327 break;
330 case NETDEV_UP: 328 case NETDEV_UP:
331 case NETDEV_CHANGE: 329 case NETDEV_CHANGE:
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 3525236ed8d9..19c84214a7ea 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
179 179
180 /* We also need to update CI for internal queues */ 180 /* We also need to update CI for internal queues */
181 if (cs->submitted) { 181 if (cs->submitted) {
182 int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
183
184 WARN_ONCE((cs_cnt < 0),
185 "hl%d: error in CS active cnt %d\n",
186 hdev->id, cs_cnt);
187
182 hl_int_hw_queue_update_ci(cs); 188 hl_int_hw_queue_update_ci(cs);
183 189
184 spin_lock(&hdev->hw_queues_mirror_lock); 190 spin_lock(&hdev->hw_queues_mirror_lock);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a53c12aff6ad..974a87789bd8 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; 232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
233 enum vm_type_t *vm_type; 233 enum vm_type_t *vm_type;
234 bool once = true; 234 bool once = true;
235 u64 j;
235 int i; 236 int i;
236 237
237 if (!dev_entry->hdev->mmu_enable) 238 if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
260 } else { 261 } else {
261 phys_pg_pack = hnode->ptr; 262 phys_pg_pack = hnode->ptr;
262 seq_printf(s, 263 seq_printf(s,
263 " 0x%-14llx %-10u %-4u\n", 264 " 0x%-14llx %-10llu %-4u\n",
264 hnode->vaddr, phys_pg_pack->total_size, 265 hnode->vaddr, phys_pg_pack->total_size,
265 phys_pg_pack->handle); 266 phys_pg_pack->handle);
266 } 267 }
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
282 phys_pg_pack->page_size); 283 phys_pg_pack->page_size);
283 seq_puts(s, " physical address\n"); 284 seq_puts(s, " physical address\n");
284 seq_puts(s, "---------------------\n"); 285 seq_puts(s, "---------------------\n");
285 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 286 for (j = 0 ; j < phys_pg_pack->npages ; j++) {
286 seq_printf(s, " 0x%-14llx\n", 287 seq_printf(s, " 0x%-14llx\n",
287 phys_pg_pack->pages[i]); 288 phys_pg_pack->pages[j]);
288 } 289 }
289 } 290 }
290 spin_unlock(&vm->idr_lock); 291 spin_unlock(&vm->idr_lock);
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index de46aa6ed154..77d51be66c7e 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -11,6 +11,8 @@
11#include <linux/sched/signal.h> 11#include <linux/sched/signal.h>
12#include <linux/hwmon.h> 12#include <linux/hwmon.h>
13 13
14#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
15
14bool hl_device_disabled_or_in_reset(struct hl_device *hdev) 16bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
15{ 17{
16 if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) 18 if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
216 spin_lock_init(&hdev->hw_queues_mirror_lock); 218 spin_lock_init(&hdev->hw_queues_mirror_lock);
217 atomic_set(&hdev->in_reset, 0); 219 atomic_set(&hdev->in_reset, 0);
218 atomic_set(&hdev->fd_open_cnt, 0); 220 atomic_set(&hdev->fd_open_cnt, 0);
221 atomic_set(&hdev->cs_active_cnt, 0);
219 222
220 return 0; 223 return 0;
221 224
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
413 416
414 pci_save_state(hdev->pdev); 417 pci_save_state(hdev->pdev);
415 418
419 /* Block future CS/VM/JOB completion operations */
420 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
421 if (rc) {
422 dev_err(hdev->dev, "Can't suspend while in reset\n");
423 return -EIO;
424 }
425
426 /* This blocks all other stuff that is not blocked by in_reset */
427 hdev->disabled = true;
428
429 /*
430 * Flush anyone that is inside the critical section of enqueue
431 * jobs to the H/W
432 */
433 hdev->asic_funcs->hw_queues_lock(hdev);
434 hdev->asic_funcs->hw_queues_unlock(hdev);
435
436 /* Flush processes that are sending message to CPU */
437 mutex_lock(&hdev->send_cpu_message_lock);
438 mutex_unlock(&hdev->send_cpu_message_lock);
439
416 rc = hdev->asic_funcs->suspend(hdev); 440 rc = hdev->asic_funcs->suspend(hdev);
417 if (rc) 441 if (rc)
418 dev_err(hdev->dev, 442 dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
440 464
441 pci_set_power_state(hdev->pdev, PCI_D0); 465 pci_set_power_state(hdev->pdev, PCI_D0);
442 pci_restore_state(hdev->pdev); 466 pci_restore_state(hdev->pdev);
443 rc = pci_enable_device(hdev->pdev); 467 rc = pci_enable_device_mem(hdev->pdev);
444 if (rc) { 468 if (rc) {
445 dev_err(hdev->dev, 469 dev_err(hdev->dev,
446 "Failed to enable PCI device in resume\n"); 470 "Failed to enable PCI device in resume\n");
447 return rc; 471 return rc;
448 } 472 }
449 473
474 pci_set_master(hdev->pdev);
475
450 rc = hdev->asic_funcs->resume(hdev); 476 rc = hdev->asic_funcs->resume(hdev);
451 if (rc) { 477 if (rc) {
452 dev_err(hdev->dev, 478 dev_err(hdev->dev, "Failed to resume device after suspend\n");
453 "Failed to enable PCI access from device CPU\n"); 479 goto disable_device;
454 return rc; 480 }
481
482
483 hdev->disabled = false;
484 atomic_set(&hdev->in_reset, 0);
485
486 rc = hl_device_reset(hdev, true, false);
487 if (rc) {
488 dev_err(hdev->dev, "Failed to reset device during resume\n");
489 goto disable_device;
455 } 490 }
456 491
457 return 0; 492 return 0;
493
494disable_device:
495 pci_clear_master(hdev->pdev);
496 pci_disable_device(hdev->pdev);
497
498 return rc;
458} 499}
459 500
460static void hl_device_hard_reset_pending(struct work_struct *work) 501static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
462 struct hl_device_reset_work *device_reset_work = 503 struct hl_device_reset_work *device_reset_work =
463 container_of(work, struct hl_device_reset_work, reset_work); 504 container_of(work, struct hl_device_reset_work, reset_work);
464 struct hl_device *hdev = device_reset_work->hdev; 505 struct hl_device *hdev = device_reset_work->hdev;
465 u16 pending_cnt = HL_PENDING_RESET_PER_SEC; 506 u16 pending_total, pending_cnt;
466 struct task_struct *task = NULL; 507 struct task_struct *task = NULL;
467 508
509 if (hdev->pldm)
510 pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
511 else
512 pending_total = HL_PENDING_RESET_PER_SEC;
513
514 pending_cnt = pending_total;
515
468 /* Flush all processes that are inside hl_open */ 516 /* Flush all processes that are inside hl_open */
469 mutex_lock(&hdev->fd_open_cnt_lock); 517 mutex_lock(&hdev->fd_open_cnt_lock);
470 518
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
489 } 537 }
490 } 538 }
491 539
540 pending_cnt = pending_total;
541
542 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
543
544 pending_cnt--;
545
546 ssleep(1);
547 }
548
549 if (atomic_read(&hdev->fd_open_cnt))
550 dev_crit(hdev->dev,
551 "Going to hard reset with open user contexts\n");
552
492 mutex_unlock(&hdev->fd_open_cnt_lock); 553 mutex_unlock(&hdev->fd_open_cnt_lock);
493 554
494 hl_device_reset(hdev, true, true); 555 hl_device_reset(hdev, true, true);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 238dd57c541b..ea979ebd62fb 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
1201 return retval; 1201 return retval;
1202} 1202}
1203 1203
1204static void goya_resume_external_queues(struct hl_device *hdev)
1205{
1206 WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
1207 WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
1208 WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
1209 WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
1210 WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
1211}
1212
1213/* 1204/*
1214 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU 1205 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1215 * 1206 *
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
2178 return retval; 2169 return retval;
2179} 2170}
2180 2171
2181static void goya_resume_internal_queues(struct hl_device *hdev)
2182{
2183 WREG32(mmMME_QM_GLBL_CFG1, 0);
2184 WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
2185
2186 WREG32(mmTPC0_QM_GLBL_CFG1, 0);
2187 WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
2188
2189 WREG32(mmTPC1_QM_GLBL_CFG1, 0);
2190 WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
2191
2192 WREG32(mmTPC2_QM_GLBL_CFG1, 0);
2193 WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
2194
2195 WREG32(mmTPC3_QM_GLBL_CFG1, 0);
2196 WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
2197
2198 WREG32(mmTPC4_QM_GLBL_CFG1, 0);
2199 WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
2200
2201 WREG32(mmTPC5_QM_GLBL_CFG1, 0);
2202 WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
2203
2204 WREG32(mmTPC6_QM_GLBL_CFG1, 0);
2205 WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
2206
2207 WREG32(mmTPC7_QM_GLBL_CFG1, 0);
2208 WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
2209}
2210
2211static void goya_dma_stall(struct hl_device *hdev) 2172static void goya_dma_stall(struct hl_device *hdev)
2212{ 2173{
2213 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); 2174 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
2905{ 2866{
2906 int rc; 2867 int rc;
2907 2868
2908 rc = goya_stop_internal_queues(hdev);
2909
2910 if (rc) {
2911 dev_err(hdev->dev, "failed to stop internal queues\n");
2912 return rc;
2913 }
2914
2915 rc = goya_stop_external_queues(hdev);
2916
2917 if (rc) {
2918 dev_err(hdev->dev, "failed to stop external queues\n");
2919 return rc;
2920 }
2921
2922 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); 2869 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2923 if (rc) 2870 if (rc)
2924 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); 2871 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
2928 2875
2929int goya_resume(struct hl_device *hdev) 2876int goya_resume(struct hl_device *hdev)
2930{ 2877{
2931 int rc; 2878 return goya_init_iatu(hdev);
2932
2933 goya_resume_external_queues(hdev);
2934 goya_resume_internal_queues(hdev);
2935
2936 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
2937 if (rc)
2938 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
2939 return rc;
2940} 2879}
2941 2880
2942static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, 2881static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3070 3009
3071 *dma_handle = hdev->asic_prop.sram_base_address; 3010 *dma_handle = hdev->asic_prop.sram_base_address;
3072 3011
3073 base = hdev->pcie_bar[SRAM_CFG_BAR_ID]; 3012 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
3074 3013
3075 switch (queue_id) { 3014 switch (queue_id) {
3076 case GOYA_QUEUE_ID_MME: 3015 case GOYA_QUEUE_ID_MME:
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a7c95e9f9b9a..a8ee52c880cd 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
793 * struct hl_vm_phys_pg_pack - physical page pack. 793 * struct hl_vm_phys_pg_pack - physical page pack.
794 * @vm_type: describes the type of the virtual area descriptor. 794 * @vm_type: describes the type of the virtual area descriptor.
795 * @pages: the physical page array. 795 * @pages: the physical page array.
796 * @npages: num physical pages in the pack.
797 * @total_size: total size of all the pages in this list.
796 * @mapping_cnt: number of shared mappings. 798 * @mapping_cnt: number of shared mappings.
797 * @asid: the context related to this list. 799 * @asid: the context related to this list.
798 * @npages: num physical pages in the pack.
799 * @page_size: size of each page in the pack. 800 * @page_size: size of each page in the pack.
800 * @total_size: total size of all the pages in this list.
801 * @flags: HL_MEM_* flags related to this list. 801 * @flags: HL_MEM_* flags related to this list.
802 * @handle: the provided handle related to this list. 802 * @handle: the provided handle related to this list.
803 * @offset: offset from the first page. 803 * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
807struct hl_vm_phys_pg_pack { 807struct hl_vm_phys_pg_pack {
808 enum vm_type_t vm_type; /* must be first */ 808 enum vm_type_t vm_type; /* must be first */
809 u64 *pages; 809 u64 *pages;
810 u64 npages;
811 u64 total_size;
810 atomic_t mapping_cnt; 812 atomic_t mapping_cnt;
811 u32 asid; 813 u32 asid;
812 u32 npages;
813 u32 page_size; 814 u32 page_size;
814 u32 total_size;
815 u32 flags; 815 u32 flags;
816 u32 handle; 816 u32 handle;
817 u32 offset; 817 u32 offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
1056 * @cb_pool_lock: protects the CB pool. 1056 * @cb_pool_lock: protects the CB pool.
1057 * @user_ctx: current user context executing. 1057 * @user_ctx: current user context executing.
1058 * @dram_used_mem: current DRAM memory consumption. 1058 * @dram_used_mem: current DRAM memory consumption.
1059 * @in_reset: is device in reset flow.
1060 * @curr_pll_profile: current PLL profile.
1061 * @fd_open_cnt: number of open user processes.
1062 * @timeout_jiffies: device CS timeout value. 1059 * @timeout_jiffies: device CS timeout value.
1063 * @max_power: the max power of the device, as configured by the sysadmin. This 1060 * @max_power: the max power of the device, as configured by the sysadmin. This
1064 * value is saved so in case of hard-reset, KMD will restore this 1061 * value is saved so in case of hard-reset, KMD will restore this
1065 * value and update the F/W after the re-initialization 1062 * value and update the F/W after the re-initialization
1063 * @in_reset: is device in reset flow.
1064 * @curr_pll_profile: current PLL profile.
1065 * @fd_open_cnt: number of open user processes.
1066 * @cs_active_cnt: number of active command submissions on this device (active
1067 * means already in H/W queues)
1066 * @major: habanalabs KMD major. 1068 * @major: habanalabs KMD major.
1067 * @high_pll: high PLL profile frequency. 1069 * @high_pll: high PLL profile frequency.
1068 * @soft_reset_cnt: number of soft reset since KMD loading. 1070 * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
1128 struct hl_ctx *user_ctx; 1130 struct hl_ctx *user_ctx;
1129 1131
1130 atomic64_t dram_used_mem; 1132 atomic64_t dram_used_mem;
1133 u64 timeout_jiffies;
1134 u64 max_power;
1131 atomic_t in_reset; 1135 atomic_t in_reset;
1132 atomic_t curr_pll_profile; 1136 atomic_t curr_pll_profile;
1133 atomic_t fd_open_cnt; 1137 atomic_t fd_open_cnt;
1134 u64 timeout_jiffies; 1138 atomic_t cs_active_cnt;
1135 u64 max_power;
1136 u32 major; 1139 u32 major;
1137 u32 high_pll; 1140 u32 high_pll;
1138 u32 soft_reset_cnt; 1141 u32 soft_reset_cnt;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 67bece26417c..ef3bb6951360 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
370 spin_unlock(&hdev->hw_queues_mirror_lock); 370 spin_unlock(&hdev->hw_queues_mirror_lock);
371 } 371 }
372 372
373 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) { 373 atomic_inc(&hdev->cs_active_cnt);
374
375 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
374 if (job->ext_queue) 376 if (job->ext_queue)
375 ext_hw_queue_schedule_job(job); 377 ext_hw_queue_schedule_job(job);
376 else 378 else
377 int_hw_queue_schedule_job(job); 379 int_hw_queue_schedule_job(job);
378 }
379 380
380 cs->submitted = true; 381 cs->submitted = true;
381 382
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 3a12fd1a5274..ce1fda40a8b8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
56 struct hl_device *hdev = ctx->hdev; 56 struct hl_device *hdev = ctx->hdev;
57 struct hl_vm *vm = &hdev->vm; 57 struct hl_vm *vm = &hdev->vm;
58 struct hl_vm_phys_pg_pack *phys_pg_pack; 58 struct hl_vm_phys_pg_pack *phys_pg_pack;
59 u64 paddr = 0; 59 u64 paddr = 0, total_size, num_pgs, i;
60 u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; 60 u32 num_curr_pgs, page_size, page_shift;
61 int handle, rc, i; 61 int handle, rc;
62 bool contiguous; 62 bool contiguous;
63 63
64 num_curr_pgs = 0; 64 num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); 73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
74 if (!paddr) { 74 if (!paddr) {
75 dev_err(hdev->dev, 75 dev_err(hdev->dev,
76 "failed to allocate %u huge contiguous pages\n", 76 "failed to allocate %llu huge contiguous pages\n",
77 num_pgs); 77 num_pgs);
78 return -ENOMEM; 78 return -ENOMEM;
79 } 79 }
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
93 phys_pg_pack->flags = args->flags; 93 phys_pg_pack->flags = args->flags;
94 phys_pg_pack->contiguous = contiguous; 94 phys_pg_pack->contiguous = contiguous;
95 95
96 phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); 96 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
97 if (!phys_pg_pack->pages) { 97 if (!phys_pg_pack->pages) {
98 rc = -ENOMEM; 98 rc = -ENOMEM;
99 goto pages_arr_err; 99 goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], 148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
149 page_size); 149 page_size);
150 150
151 kfree(phys_pg_pack->pages); 151 kvfree(phys_pg_pack->pages);
152pages_arr_err: 152pages_arr_err:
153 kfree(phys_pg_pack); 153 kfree(phys_pg_pack);
154pages_pack_err: 154pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
267 struct hl_vm_phys_pg_pack *phys_pg_pack) 267 struct hl_vm_phys_pg_pack *phys_pg_pack)
268{ 268{
269 struct hl_vm *vm = &hdev->vm; 269 struct hl_vm *vm = &hdev->vm;
270 int i; 270 u64 i;
271 271
272 if (!phys_pg_pack->created_from_userptr) { 272 if (!phys_pg_pack->created_from_userptr) {
273 if (phys_pg_pack->contiguous) { 273 if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
288 } 288 }
289 } 289 }
290 290
291 kfree(phys_pg_pack->pages); 291 kvfree(phys_pg_pack->pages);
292 kfree(phys_pg_pack); 292 kfree(phys_pg_pack);
293} 293}
294 294
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
519 * - Return the start address of the virtual block 519 * - Return the start address of the virtual block
520 */ 520 */
521static u64 get_va_block(struct hl_device *hdev, 521static u64 get_va_block(struct hl_device *hdev,
522 struct hl_va_range *va_range, u32 size, u64 hint_addr, 522 struct hl_va_range *va_range, u64 size, u64 hint_addr,
523 bool is_userptr) 523 bool is_userptr)
524{ 524{
525 struct hl_vm_va_block *va_block, *new_va_block = NULL; 525 struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
577 } 577 }
578 578
579 if (!new_va_block) { 579 if (!new_va_block) {
580 dev_err(hdev->dev, "no available va block for size %u\n", size); 580 dev_err(hdev->dev, "no available va block for size %llu\n",
581 size);
581 goto out; 582 goto out;
582 } 583 }
583 584
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
648 struct hl_vm_phys_pg_pack *phys_pg_pack; 649 struct hl_vm_phys_pg_pack *phys_pg_pack;
649 struct scatterlist *sg; 650 struct scatterlist *sg;
650 dma_addr_t dma_addr; 651 dma_addr_t dma_addr;
651 u64 page_mask; 652 u64 page_mask, total_npages;
652 u32 npages, total_npages, page_size = PAGE_SIZE; 653 u32 npages, page_size = PAGE_SIZE;
653 bool first = true, is_huge_page_opt = true; 654 bool first = true, is_huge_page_opt = true;
654 int rc, i, j; 655 int rc, i, j;
655 656
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
691 692
692 page_mask = ~(((u64) page_size) - 1); 693 page_mask = ~(((u64) page_size) - 1);
693 694
694 phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); 695 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
696 GFP_KERNEL);
695 if (!phys_pg_pack->pages) { 697 if (!phys_pg_pack->pages) {
696 rc = -ENOMEM; 698 rc = -ENOMEM;
697 goto page_pack_arr_mem_err; 699 goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
750 struct hl_vm_phys_pg_pack *phys_pg_pack) 752 struct hl_vm_phys_pg_pack *phys_pg_pack)
751{ 753{
752 struct hl_device *hdev = ctx->hdev; 754 struct hl_device *hdev = ctx->hdev;
753 u64 next_vaddr = vaddr, paddr; 755 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
754 u32 page_size = phys_pg_pack->page_size; 756 u32 page_size = phys_pg_pack->page_size;
755 int i, rc = 0, mapped_pg_cnt = 0; 757 int rc = 0;
756 758
757 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 759 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
758 paddr = phys_pg_pack->pages[i]; 760 paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
764 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); 766 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
765 if (rc) { 767 if (rc) {
766 dev_err(hdev->dev, 768 dev_err(hdev->dev,
767 "map failed for handle %u, npages: %d, mapped: %d", 769 "map failed for handle %u, npages: %llu, mapped: %llu",
768 phys_pg_pack->handle, phys_pg_pack->npages, 770 phys_pg_pack->handle, phys_pg_pack->npages,
769 mapped_pg_cnt); 771 mapped_pg_cnt);
770 goto err; 772 goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
985 struct hl_vm_hash_node *hnode = NULL; 987 struct hl_vm_hash_node *hnode = NULL;
986 struct hl_userptr *userptr = NULL; 988 struct hl_userptr *userptr = NULL;
987 enum vm_type_t *vm_type; 989 enum vm_type_t *vm_type;
988 u64 next_vaddr; 990 u64 next_vaddr, i;
989 u32 page_size; 991 u32 page_size;
990 bool is_userptr; 992 bool is_userptr;
991 int i, rc; 993 int rc;
992 994
993 /* protect from double entrance */ 995 /* protect from double entrance */
994 mutex_lock(&ctx->mem_hash_lock); 996 mutex_lock(&ctx->mem_hash_lock);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 2f2e99cb2743..3a5a2cec8305 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -832,7 +832,7 @@ err:
832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) 832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
833{ 833{
834 struct hl_device *hdev = ctx->hdev; 834 struct hl_device *hdev = ctx->hdev;
835 u64 real_virt_addr; 835 u64 real_virt_addr, real_phys_addr;
836 u32 real_page_size, npages; 836 u32 real_page_size, npages;
837 int i, rc, mapped_cnt = 0; 837 int i, rc, mapped_cnt = 0;
838 838
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
857 857
858 npages = page_size / real_page_size; 858 npages = page_size / real_page_size;
859 real_virt_addr = virt_addr; 859 real_virt_addr = virt_addr;
860 real_phys_addr = phys_addr;
860 861
861 for (i = 0 ; i < npages ; i++) { 862 for (i = 0 ; i < npages ; i++) {
862 rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr, 863 rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
863 real_page_size); 864 real_page_size);
864 if (rc) 865 if (rc)
865 goto err; 866 goto err;
866 867
867 real_virt_addr += real_page_size; 868 real_virt_addr += real_page_size;
869 real_phys_addr += real_page_size;
868 mapped_cnt++; 870 mapped_cnt++;
869 } 871 }
870 872
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index c712b7deb3a9..82a97866e0cf 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1044,14 +1044,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
1044 mmc->caps2 = MMC_CAP2_NO_SDIO; 1044 mmc->caps2 = MMC_CAP2_NO_SDIO;
1045 mmc->ops = &alcor_sdc_ops; 1045 mmc->ops = &alcor_sdc_ops;
1046 1046
1047 /* Hardware cannot do scatter lists */ 1047 /* The hardware does DMA data transfer of 4096 bytes to/from a single
1048 * buffer address. Scatterlists are not supported, but upon DMA
1049 * completion (signalled via IRQ), the original vendor driver does
1050 * then immediately set up another DMA transfer of the next 4096
1051 * bytes.
1052 *
1053 * This means that we need to handle the I/O in 4096 byte chunks.
1054 * Lacking a way to limit the sglist entries to 4096 bytes, we instead
1055 * impose that only one segment is provided, with maximum size 4096,
1056 * which also happens to be the minimum size. This means that the
1057 * single-entry sglist handled by this driver can be handed directly
1058 * to the hardware, nice and simple.
1059 *
1060 * Unfortunately though, that means we only do 4096 bytes I/O per
1061 * MMC command. A future improvement would be to make the driver
1062 * accept sg lists and entries of any size, and simply iterate
1063 * through them 4096 bytes at a time.
1064 */
1048 mmc->max_segs = AU6601_MAX_DMA_SEGMENTS; 1065 mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
1049 mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE; 1066 mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
1050 1067 mmc->max_req_size = mmc->max_seg_size;
1051 mmc->max_blk_size = mmc->max_seg_size;
1052 mmc->max_blk_count = mmc->max_segs;
1053
1054 mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
1055} 1068}
1056 1069
1057static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) 1070static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 49e0daf2ef5e..f37003df1e01 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1117{ 1117{
1118} 1118}
1119#endif 1119#endif
1120static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1120static void init_mmcsd_host(struct mmc_davinci_host *host)
1121{ 1121{
1122 1122
1123 mmc_davinci_reset_ctrl(host, 1); 1123 mmc_davinci_reset_ctrl(host, 1);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index d54612257b06..45f7b9b53d48 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
290 struct scatterlist *sg; 290 struct scatterlist *sg;
291 int i; 291 int i;
292 292
293 for_each_sg(data->sg, sg, data->sg_len, i) { 293 for_each_sg(data->sg, sg, data->sg_len, i)
294 void *buf = kmap_atomic(sg_page(sg) + sg->offset); 294 buffer_swap32(sg_virt(sg), sg->length);
295 buffer_swap32(buf, sg->length);
296 kunmap_atomic(buf);
297 }
298} 295}
299#else 296#else
300static inline void mxcmci_swap_buffers(struct mmc_data *data) {} 297static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
611{ 608{
612 struct mmc_data *data = host->req->data; 609 struct mmc_data *data = host->req->data;
613 struct scatterlist *sg; 610 struct scatterlist *sg;
614 void *buf;
615 int stat, i; 611 int stat, i;
616 612
617 host->data = data; 613 host->data = data;
@@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
619 615
620 if (data->flags & MMC_DATA_READ) { 616 if (data->flags & MMC_DATA_READ) {
621 for_each_sg(data->sg, sg, data->sg_len, i) { 617 for_each_sg(data->sg, sg, data->sg_len, i) {
622 buf = kmap_atomic(sg_page(sg) + sg->offset); 618 stat = mxcmci_pull(host, sg_virt(sg), sg->length);
623 stat = mxcmci_pull(host, buf, sg->length);
624 kunmap(buf);
625 if (stat) 619 if (stat)
626 return stat; 620 return stat;
627 host->datasize += sg->length; 621 host->datasize += sg->length;
628 } 622 }
629 } else { 623 } else {
630 for_each_sg(data->sg, sg, data->sg_len, i) { 624 for_each_sg(data->sg, sg, data->sg_len, i) {
631 buf = kmap_atomic(sg_page(sg) + sg->offset); 625 stat = mxcmci_push(host, sg_virt(sg), sg->length);
632 stat = mxcmci_push(host, buf, sg->length);
633 kunmap(buf);
634 if (stat) 626 if (stat)
635 return stat; 627 return stat;
636 host->datasize += sg->length; 628 host->datasize += sg->length;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c907bf502a12..c1d3f0e38921 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
162static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) 162static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
163{ 163{
164 struct dma_async_tx_descriptor *tx; 164 struct dma_async_tx_descriptor *tx;
165 enum dma_data_direction direction; 165 enum dma_transfer_direction direction;
166 struct dma_slave_config config; 166 struct dma_slave_config config;
167 struct dma_chan *chan; 167 struct dma_chan *chan;
168 unsigned int nob = data->blocks; 168 unsigned int nob = data->blocks;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 71e13844df6c..8742e27e4e8b 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
641 struct renesas_sdhi *priv; 641 struct renesas_sdhi *priv;
642 struct resource *res; 642 struct resource *res;
643 int irq, ret, i; 643 int irq, ret, i;
644 u16 ver;
644 645
645 of_data = of_device_get_match_data(&pdev->dev); 646 of_data = of_device_get_match_data(&pdev->dev);
646 647
@@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
773 if (ret) 774 if (ret)
774 goto efree; 775 goto efree;
775 776
777 ver = sd_ctrl_read16(host, CTL_VERSION);
778 /* GEN2_SDR104 is first known SDHI to use 32bit block count */
779 if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
780 mmc_data->max_blk_count = U16_MAX;
781
776 ret = tmio_mmc_host_probe(host); 782 ret = tmio_mmc_host_probe(host);
777 if (ret < 0) 783 if (ret < 0)
778 goto edisclk; 784 goto edisclk;
779 785
780 /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ 786 /* One Gen2 SDHI incarnation does NOT have a CBSY bit */
781 if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) 787 if (ver == SDHI_VER_GEN2_SDR50)
782 mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; 788 mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
783 789
784 /* Enable tuning iff we have an SCC and a supported mode */ 790 /* Enable tuning iff we have an SCC and a supported mode */
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index b1a66ca3821a..5bbed477c9b1 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1056,6 +1056,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
1056 mmc->f_max = 48000000; 1056 mmc->f_max = 48000000;
1057 } 1057 }
1058 1058
1059 if (!mmc_can_gpio_ro(mmc))
1060 mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1061
1059 pltfm_host->clk = devm_clk_get(dev, "fck"); 1062 pltfm_host->clk = devm_clk_get(dev, "fck");
1060 if (IS_ERR(pltfm_host->clk)) { 1063 if (IS_ERR(pltfm_host->clk)) {
1061 ret = PTR_ERR(pltfm_host->clk); 1064 ret = PTR_ERR(pltfm_host->clk);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5e4ca082cfcd..7a96d168efc4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -216,8 +216,8 @@ config GENEVE
216 216
217config GTP 217config GTP
218 tristate "GPRS Tunneling Protocol datapath (GTP-U)" 218 tristate "GPRS Tunneling Protocol datapath (GTP-U)"
219 depends on INET && NET_UDP_TUNNEL 219 depends on INET
220 select NET_IP_TUNNEL 220 select NET_UDP_TUNNEL
221 ---help--- 221 ---help---
222 This allows one to create gtp virtual interfaces that provide 222 This allows one to create gtp virtual interfaces that provide
223 the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol 223 the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 576b37d12a63..c4fa400efdcc 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
481 qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); 481 qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
482} 482}
483 483
484static u32
485qca8k_port_to_phy(int port)
486{
487 /* From Andrew Lunn:
488 * Port 0 has no internal phy.
489 * Port 1 has an internal PHY at MDIO address 0.
490 * Port 2 has an internal PHY at MDIO address 1.
491 * ...
492 * Port 5 has an internal PHY at MDIO address 4.
493 * Port 6 has no internal PHY.
494 */
495
496 return port - 1;
497}
498
499static int
500qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
501{
502 u32 phy, val;
503
504 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
505 return -EINVAL;
506
507 /* callee is responsible for not passing bad ports,
508 * but we still would like to make spills impossible.
509 */
510 phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
511 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
512 QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
513 QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
514 QCA8K_MDIO_MASTER_DATA(data);
515
516 qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
517
518 return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
519 QCA8K_MDIO_MASTER_BUSY);
520}
521
522static int
523qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
524{
525 u32 phy, val;
526
527 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
528 return -EINVAL;
529
530 /* callee is responsible for not passing bad ports,
531 * but we still would like to make spills impossible.
532 */
533 phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
534 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
535 QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
536 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
537
538 qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
539
540 if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
541 QCA8K_MDIO_MASTER_BUSY))
542 return -ETIMEDOUT;
543
544 val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
545 QCA8K_MDIO_MASTER_DATA_MASK);
546
547 return val;
548}
549
550static int
551qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
552{
553 struct qca8k_priv *priv = ds->priv;
554
555 return qca8k_mdio_write(priv, port, regnum, data);
556}
557
558static int
559qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
560{
561 struct qca8k_priv *priv = ds->priv;
562 int ret;
563
564 ret = qca8k_mdio_read(priv, port, regnum);
565
566 if (ret < 0)
567 return 0xffff;
568
569 return ret;
570}
571
572static int
573qca8k_setup_mdio_bus(struct qca8k_priv *priv)
574{
575 u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
576 struct device_node *ports, *port;
577 int err;
578
579 ports = of_get_child_by_name(priv->dev->of_node, "ports");
580 if (!ports)
581 return -EINVAL;
582
583 for_each_available_child_of_node(ports, port) {
584 err = of_property_read_u32(port, "reg", &reg);
585 if (err)
586 return err;
587
588 if (!dsa_is_user_port(priv->ds, reg))
589 continue;
590
591 if (of_property_read_bool(port, "phy-handle"))
592 external_mdio_mask |= BIT(reg);
593 else
594 internal_mdio_mask |= BIT(reg);
595 }
596
597 if (!external_mdio_mask && !internal_mdio_mask) {
598 dev_err(priv->dev, "no PHYs are defined.\n");
599 return -EINVAL;
600 }
601
602 /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
603 * the MDIO_MASTER register also _disconnects_ the external MDC
604 * passthrough to the internal PHYs. It's not possible to use both
605 * configurations at the same time!
606 *
607 * Because this came up during the review process:
608 * If the external mdio-bus driver is capable magically disabling
609 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
610 * accessors for the time being, it would be possible to pull this
611 * off.
612 */
613 if (!!external_mdio_mask && !!internal_mdio_mask) {
614 dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
615 return -EINVAL;
616 }
617
618 if (external_mdio_mask) {
619 /* Make sure to disable the internal mdio bus in cases
620 * a dt-overlay and driver reload changed the configuration
621 */
622
623 qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
624 QCA8K_MDIO_MASTER_EN);
625 return 0;
626 }
627
628 priv->ops.phy_read = qca8k_phy_read;
629 priv->ops.phy_write = qca8k_phy_write;
630 return 0;
631}
632
484static int 633static int
485qca8k_setup(struct dsa_switch *ds) 634qca8k_setup(struct dsa_switch *ds)
486{ 635{
@@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds)
502 if (IS_ERR(priv->regmap)) 651 if (IS_ERR(priv->regmap))
503 pr_warn("regmap initialization failed"); 652 pr_warn("regmap initialization failed");
504 653
654 ret = qca8k_setup_mdio_bus(priv);
655 if (ret)
656 return ret;
657
505 /* Initialize CPU port pad mode (xMII type, delays...) */ 658 /* Initialize CPU port pad mode (xMII type, delays...) */
506 phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); 659 phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
507 if (phy_mode < 0) { 660 if (phy_mode < 0) {
@@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
624 qca8k_port_set_status(priv, port, 1); 777 qca8k_port_set_status(priv, port, 1);
625} 778}
626 779
627static int
628qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
629{
630 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
631
632 return mdiobus_read(priv->bus, phy, regnum);
633}
634
635static int
636qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
637{
638 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
639
640 return mdiobus_write(priv->bus, phy, regnum, val);
641}
642
643static void 780static void
644qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) 781qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
645{ 782{
@@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
879 .setup = qca8k_setup, 1016 .setup = qca8k_setup,
880 .adjust_link = qca8k_adjust_link, 1017 .adjust_link = qca8k_adjust_link,
881 .get_strings = qca8k_get_strings, 1018 .get_strings = qca8k_get_strings,
882 .phy_read = qca8k_phy_read,
883 .phy_write = qca8k_phy_write,
884 .get_ethtool_stats = qca8k_get_ethtool_stats, 1019 .get_ethtool_stats = qca8k_get_ethtool_stats,
885 .get_sset_count = qca8k_get_sset_count, 1020 .get_sset_count = qca8k_get_sset_count,
886 .get_mac_eee = qca8k_get_mac_eee, 1021 .get_mac_eee = qca8k_get_mac_eee,
@@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
923 return -ENOMEM; 1058 return -ENOMEM;
924 1059
925 priv->ds->priv = priv; 1060 priv->ds->priv = priv;
926 priv->ds->ops = &qca8k_switch_ops; 1061 priv->ops = qca8k_switch_ops;
1062 priv->ds->ops = &priv->ops;
927 mutex_init(&priv->reg_mutex); 1063 mutex_init(&priv->reg_mutex);
928 dev_set_drvdata(&mdiodev->dev, priv); 1064 dev_set_drvdata(&mdiodev->dev, priv);
929 1065
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index d146e54c8a6c..249fd62268e5 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -49,6 +49,18 @@
49#define QCA8K_MIB_FLUSH BIT(24) 49#define QCA8K_MIB_FLUSH BIT(24)
50#define QCA8K_MIB_CPU_KEEP BIT(20) 50#define QCA8K_MIB_CPU_KEEP BIT(20)
51#define QCA8K_MIB_BUSY BIT(17) 51#define QCA8K_MIB_BUSY BIT(17)
52#define QCA8K_MDIO_MASTER_CTRL 0x3c
53#define QCA8K_MDIO_MASTER_BUSY BIT(31)
54#define QCA8K_MDIO_MASTER_EN BIT(30)
55#define QCA8K_MDIO_MASTER_READ BIT(27)
56#define QCA8K_MDIO_MASTER_WRITE 0
57#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
58#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
59#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
60#define QCA8K_MDIO_MASTER_DATA(x) (x)
61#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
62#define QCA8K_MDIO_MASTER_MAX_PORTS 5
63#define QCA8K_MDIO_MASTER_MAX_REG 32
52#define QCA8K_GOL_MAC_ADDR0 0x60 64#define QCA8K_GOL_MAC_ADDR0 0x60
53#define QCA8K_GOL_MAC_ADDR1 0x64 65#define QCA8K_GOL_MAC_ADDR1 0x64
54#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) 66#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
@@ -169,6 +181,7 @@ struct qca8k_priv {
169 struct dsa_switch *ds; 181 struct dsa_switch *ds;
170 struct mutex reg_mutex; 182 struct mutex reg_mutex;
171 struct device *dev; 183 struct device *dev;
184 struct dsa_switch_ops ops;
172}; 185};
173 186
174struct qca8k_mib_desc { 187struct qca8k_mib_desc {
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 808abb6b3671..b15752267c8d 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev)
1521static void set_rx_mode(struct net_device *dev) 1521static void set_rx_mode(struct net_device *dev)
1522{ 1522{
1523 int ioaddr = dev->base_addr; 1523 int ioaddr = dev->base_addr;
1524 short new_mode; 1524 unsigned short new_mode;
1525 1525
1526 if (dev->flags & IFF_PROMISC) { 1526 if (dev->flags & IFF_PROMISC) {
1527 if (corkscrew_debug > 3) 1527 if (corkscrew_debug > 3)
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 342ae08ec3c2..d60a86aa8aa8 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
153static void dayna_block_output(struct net_device *dev, int count, 153static void dayna_block_output(struct net_device *dev, int count,
154 const unsigned char *buf, int start_page); 154 const unsigned char *buf, int start_page);
155 155
156#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
157
158/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 156/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
159static void slow_sane_get_8390_hdr(struct net_device *dev, 157static void slow_sane_get_8390_hdr(struct net_device *dev,
160 struct e8390_pkt_hdr *hdr, int ring_page); 158 struct e8390_pkt_hdr *hdr, int ring_page);
@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
233 231
234static enum mac8390_access mac8390_testio(unsigned long membase) 232static enum mac8390_access mac8390_testio(unsigned long membase)
235{ 233{
236 unsigned long outdata = 0xA5A0B5B0; 234 u32 outdata = 0xA5A0B5B0;
237 unsigned long indata = 0x00000000; 235 u32 indata = 0;
236
238 /* Try writing 32 bits */ 237 /* Try writing 32 bits */
239 memcpy_toio((void __iomem *)membase, &outdata, 4); 238 nubus_writel(outdata, membase);
240 /* Now compare them */ 239 /* Now read it back */
241 if (memcmp_withio(&outdata, membase, 4) == 0) 240 indata = nubus_readl(membase);
241 if (outdata == indata)
242 return ACCESS_32; 242 return ACCESS_32;
243
244 outdata = 0xC5C0D5D0;
245 indata = 0;
246
243 /* Write 16 bit output */ 247 /* Write 16 bit output */
244 word_memcpy_tocard(membase, &outdata, 4); 248 word_memcpy_tocard(membase, &outdata, 4);
245 /* Now read it back */ 249 /* Now read it back */
246 word_memcpy_fromcard(&indata, membase, 4); 250 word_memcpy_fromcard(&indata, membase, 4);
247 if (outdata == indata) 251 if (outdata == indata)
248 return ACCESS_16; 252 return ACCESS_16;
253
249 return ACCESS_UNKNOWN; 254 return ACCESS_UNKNOWN;
250} 255}
251 256
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 74550ccc7a20..e2ffb159cbe2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
186 } 186 }
187 if (buff->is_ip_cso) { 187 if (buff->is_ip_cso) {
188 __skb_incr_checksum_unnecessary(skb); 188 __skb_incr_checksum_unnecessary(skb);
189 if (buff->is_udp_cso || buff->is_tcp_cso)
190 __skb_incr_checksum_unnecessary(skb);
191 } else { 189 } else {
192 skb->ip_summed = CHECKSUM_NONE; 190 skb->ip_summed = CHECKSUM_NONE;
193 } 191 }
192
193 if (buff->is_udp_cso || buff->is_tcp_cso)
194 __skb_incr_checksum_unnecessary(skb);
194} 195}
195 196
196#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 197#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ad099fd01b45..1522aee81884 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3370,14 +3370,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3370 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3370 *hclk = devm_clk_get(&pdev->dev, "hclk");
3371 } 3371 }
3372 3372
3373 if (IS_ERR(*pclk)) { 3373 if (IS_ERR_OR_NULL(*pclk)) {
3374 err = PTR_ERR(*pclk); 3374 err = PTR_ERR(*pclk);
3375 if (!err)
3376 err = -ENODEV;
3377
3375 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 3378 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3376 return err; 3379 return err;
3377 } 3380 }
3378 3381
3379 if (IS_ERR(*hclk)) { 3382 if (IS_ERR_OR_NULL(*hclk)) {
3380 err = PTR_ERR(*hclk); 3383 err = PTR_ERR(*hclk);
3384 if (!err)
3385 err = -ENODEV;
3386
3381 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 3387 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3382 return err; 3388 return err;
3383 } 3389 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 3130b43bba52..02959035ed3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
2620 } 2620 }
2621 2621
2622 /* should never happen! */ 2622 /* should never happen! */
2623 BUG_ON(1); 2623 BUG();
2624 return NULL; 2624 return NULL;
2625} 2625}
2626 2626
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 88773ca58e6b..b3da81e90132 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
476 break; 476 break;
477 477
478 default: 478 default:
479 BUG_ON(1); 479 BUG();
480 } 480 }
481 481
482 return buf_size; 482 return buf_size;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2ba49e959c3f..dc339dc1adb2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
815 */ 815 */
816 queue_mapping = skb_get_queue_mapping(skb); 816 queue_mapping = skb_get_queue_mapping(skb);
817 fq = &priv->fq[queue_mapping]; 817 fq = &priv->fq[queue_mapping];
818
819 fd_len = dpaa2_fd_get_len(&fd);
820 nq = netdev_get_tx_queue(net_dev, queue_mapping);
821 netdev_tx_sent_queue(nq, fd_len);
822
823 /* Everything that happens after this enqueues might race with
824 * the Tx confirmation callback for this frame
825 */
818 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 826 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
819 err = priv->enqueue(priv, fq, &fd, 0); 827 err = priv->enqueue(priv, fq, &fd, 0);
820 if (err != -EBUSY) 828 if (err != -EBUSY)
@@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
825 percpu_stats->tx_errors++; 833 percpu_stats->tx_errors++;
826 /* Clean up everything, including freeing the skb */ 834 /* Clean up everything, including freeing the skb */
827 free_tx_fd(priv, fq, &fd, false); 835 free_tx_fd(priv, fq, &fd, false);
836 netdev_tx_completed_queue(nq, 1, fd_len);
828 } else { 837 } else {
829 fd_len = dpaa2_fd_get_len(&fd);
830 percpu_stats->tx_packets++; 838 percpu_stats->tx_packets++;
831 percpu_stats->tx_bytes += fd_len; 839 percpu_stats->tx_bytes += fd_len;
832
833 nq = netdev_get_tx_queue(net_dev, queue_mapping);
834 netdev_tx_sent_queue(nq, fd_len);
835 } 840 }
836 841
837 return NETDEV_TX_OK; 842 return NETDEV_TX_OK;
@@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
1817 dpaa2_fd_set_format(&fd, dpaa2_fd_single); 1822 dpaa2_fd_set_format(&fd, dpaa2_fd_single);
1818 dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); 1823 dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
1819 1824
1820 fq = &priv->fq[smp_processor_id()]; 1825 fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
1821 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 1826 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1822 err = priv->enqueue(priv, fq, &fd, 0); 1827 err = priv->enqueue(priv, fq, &fd, 0);
1823 if (err != -EBUSY) 1828 if (err != -EBUSY)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 1c1f17ec6be2..162cb9afa0e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -22,6 +22,7 @@
22#include "hns3_enet.h" 22#include "hns3_enet.h"
23 23
24#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) 24#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
25#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
25 26
26static void hns3_clear_all_ring(struct hnae3_handle *h); 27static void hns3_clear_all_ring(struct hnae3_handle *h);
27static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); 28static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
@@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1079 1080
1080 desc_cb->length = size; 1081 desc_cb->length = size;
1081 1082
1082 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; 1083 frag_buf_num = hns3_tx_bd_count(size);
1083 sizeoflast = size & HNS3_TX_LAST_SIZE_M; 1084 sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1084 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1085 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1085 1086
@@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1124 int i; 1125 int i;
1125 1126
1126 size = skb_headlen(skb); 1127 size = skb_headlen(skb);
1127 buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; 1128 buf_num = hns3_tx_bd_count(size);
1128 1129
1129 frag_num = skb_shinfo(skb)->nr_frags; 1130 frag_num = skb_shinfo(skb)->nr_frags;
1130 for (i = 0; i < frag_num; i++) { 1131 for (i = 0; i < frag_num; i++) {
1131 frag = &skb_shinfo(skb)->frags[i]; 1132 frag = &skb_shinfo(skb)->frags[i];
1132 size = skb_frag_size(frag); 1133 size = skb_frag_size(frag);
1133 bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >> 1134 bdnum_for_frag = hns3_tx_bd_count(size);
1134 HNS3_MAX_BD_SIZE_OFFSET;
1135 if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) 1135 if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
1136 return -ENOMEM; 1136 return -ENOMEM;
1137 1137
@@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1139 } 1139 }
1140 1140
1141 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { 1141 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1142 buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> 1142 buf_num = hns3_tx_bd_count(skb->len);
1143 HNS3_MAX_BD_SIZE_OFFSET;
1144 if (ring_space(ring) < buf_num) 1143 if (ring_space(ring) < buf_num)
1145 return -EBUSY; 1144 return -EBUSY;
1146 /* manual split the send packet */ 1145 /* manual split the send packet */
@@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1169 buf_num = skb_shinfo(skb)->nr_frags + 1; 1168 buf_num = skb_shinfo(skb)->nr_frags + 1;
1170 1169
1171 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { 1170 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1172 buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1171 buf_num = hns3_tx_bd_count(skb->len);
1173 if (ring_space(ring) < buf_num) 1172 if (ring_space(ring) < buf_num)
1174 return -EBUSY; 1173 return -EBUSY;
1175 /* manual split the send packet */ 1174 /* manual split the send packet */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1db0bd41d209..75669cd0c311 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -193,7 +193,6 @@ enum hns3_nic_state {
193#define HNS3_VECTOR_INITED 1 193#define HNS3_VECTOR_INITED 1
194 194
195#define HNS3_MAX_BD_SIZE 65535 195#define HNS3_MAX_BD_SIZE 65535
196#define HNS3_MAX_BD_SIZE_OFFSET 16
197#define HNS3_MAX_BD_PER_FRAG 8 196#define HNS3_MAX_BD_PER_FRAG 8
198#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS 197#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
199 198
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3baabdc89726..90b62c1412c8 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
3160 3160
3161 if (ehea_add_adapter_mr(adapter)) { 3161 if (ehea_add_adapter_mr(adapter)) {
3162 pr_err("creating MR failed\n"); 3162 pr_err("creating MR failed\n");
3163 of_node_put(eth_dn);
3163 return -EIO; 3164 return -EIO;
3164 } 3165 }
3165 3166
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 370ca94b6775..b8ba74de9555 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -40,6 +40,9 @@
40#include "mlx5_core.h" 40#include "mlx5_core.h"
41#include "lib/eq.h" 41#include "lib/eq.h"
42 42
43static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
44 struct mlx5_core_dct *dct);
45
43static struct mlx5_core_rsc_common * 46static struct mlx5_core_rsc_common *
44mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) 47mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
45{ 48{
@@ -227,20 +230,49 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
227 wait_for_completion(&qp->common.free); 230 wait_for_completion(&qp->common.free);
228} 231}
229 232
233static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
234 struct mlx5_core_dct *dct, bool need_cleanup)
235{
236 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
237 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
238 struct mlx5_core_qp *qp = &dct->mqp;
239 int err;
240
241 err = mlx5_core_drain_dct(dev, dct);
242 if (err) {
243 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
244 goto destroy;
245 } else {
246 mlx5_core_warn(
247 dev, "failed drain DCT 0x%x with error 0x%x\n",
248 qp->qpn, err);
249 return err;
250 }
251 }
252 wait_for_completion(&dct->drained);
253destroy:
254 if (need_cleanup)
255 destroy_resource_common(dev, &dct->mqp);
256 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
257 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
258 MLX5_SET(destroy_dct_in, in, uid, qp->uid);
259 err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
260 (void *)&out, sizeof(out));
261 return err;
262}
263
230int mlx5_core_create_dct(struct mlx5_core_dev *dev, 264int mlx5_core_create_dct(struct mlx5_core_dev *dev,
231 struct mlx5_core_dct *dct, 265 struct mlx5_core_dct *dct,
232 u32 *in, int inlen) 266 u32 *in, int inlen,
267 u32 *out, int outlen)
233{ 268{
234 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
235 u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
236 u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
237 struct mlx5_core_qp *qp = &dct->mqp; 269 struct mlx5_core_qp *qp = &dct->mqp;
238 int err; 270 int err;
239 271
240 init_completion(&dct->drained); 272 init_completion(&dct->drained);
241 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT); 273 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
242 274
243 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 275 err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
244 if (err) { 276 if (err) {
245 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err); 277 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
246 return err; 278 return err;
@@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
254 286
255 return 0; 287 return 0;
256err_cmd: 288err_cmd:
257 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT); 289 _mlx5_core_destroy_dct(dev, dct, false);
258 MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
259 MLX5_SET(destroy_dct_in, din, uid, qp->uid);
260 mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
261 (void *)&out, sizeof(dout));
262 return err; 290 return err;
263} 291}
264EXPORT_SYMBOL_GPL(mlx5_core_create_dct); 292EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
@@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
323int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, 351int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
324 struct mlx5_core_dct *dct) 352 struct mlx5_core_dct *dct)
325{ 353{
326 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; 354 return _mlx5_core_destroy_dct(dev, dct, true);
327 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
328 struct mlx5_core_qp *qp = &dct->mqp;
329 int err;
330
331 err = mlx5_core_drain_dct(dev, dct);
332 if (err) {
333 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
334 goto destroy;
335 } else {
336 mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
337 return err;
338 }
339 }
340 wait_for_completion(&dct->drained);
341destroy:
342 destroy_resource_common(dev, &dct->mqp);
343 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
344 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
345 MLX5_SET(destroy_dct_in, in, uid, qp->uid);
346 err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
347 (void *)&out, sizeof(out));
348 return err;
349} 355}
350EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct); 356EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
351 357
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 7a15e932ed2f..c1c1965d7acc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
113 return 0; 113 return 0;
114 default: 114 default:
115 /* Do not consider thresholds for zero temperature. */ 115 /* Do not consider thresholds for zero temperature. */
116 if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) { 116 if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
117 *temp = 0; 117 *temp = 0;
118 return 0; 118 return 0;
119 } 119 }
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index bd6e9014bc74..7849119d407a 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -142,6 +142,12 @@ struct ks8851_net {
142 142
143static int msg_enable; 143static int msg_enable;
144 144
145/* SPI frame opcodes */
146#define KS_SPIOP_RD (0x00)
147#define KS_SPIOP_WR (0x40)
148#define KS_SPIOP_RXFIFO (0x80)
149#define KS_SPIOP_TXFIFO (0xC0)
150
145/* shift for byte-enable data */ 151/* shift for byte-enable data */
146#define BYTE_EN(_x) ((_x) << 2) 152#define BYTE_EN(_x) ((_x) << 2)
147 153
@@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
535 /* set dma read address */ 541 /* set dma read address */
536 ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); 542 ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
537 543
538 /* start the packet dma process, and set auto-dequeue rx */ 544 /* start DMA access */
539 ks8851_wrreg16(ks, KS_RXQCR, 545 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
540 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
541 546
542 if (rxlen > 4) { 547 if (rxlen > 4) {
543 unsigned int rxalign; 548 unsigned int rxalign;
@@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
568 } 573 }
569 } 574 }
570 575
571 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); 576 /* end DMA access and dequeue packet */
577 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
572 } 578 }
573} 579}
574 580
@@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work)
785static int ks8851_net_open(struct net_device *dev) 791static int ks8851_net_open(struct net_device *dev)
786{ 792{
787 struct ks8851_net *ks = netdev_priv(dev); 793 struct ks8851_net *ks = netdev_priv(dev);
794 int ret;
795
796 ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
797 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
798 dev->name, ks);
799 if (ret < 0) {
800 netdev_err(dev, "failed to get irq\n");
801 return ret;
802 }
788 803
789 /* lock the card, even if we may not actually be doing anything 804 /* lock the card, even if we may not actually be doing anything
790 * else at the moment */ 805 * else at the moment */
@@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev)
849 netif_dbg(ks, ifup, ks->netdev, "network device up\n"); 864 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
850 865
851 mutex_unlock(&ks->lock); 866 mutex_unlock(&ks->lock);
867 mii_check_link(&ks->mii);
852 return 0; 868 return 0;
853} 869}
854 870
@@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev)
899 dev_kfree_skb(txb); 915 dev_kfree_skb(txb);
900 } 916 }
901 917
918 free_irq(dev->irq, ks);
919
902 return 0; 920 return 0;
903} 921}
904 922
@@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi)
1508 1526
1509 spi_set_drvdata(spi, ks); 1527 spi_set_drvdata(spi, ks);
1510 1528
1529 netif_carrier_off(ks->netdev);
1511 ndev->if_port = IF_PORT_100BASET; 1530 ndev->if_port = IF_PORT_100BASET;
1512 ndev->netdev_ops = &ks8851_netdev_ops; 1531 ndev->netdev_ops = &ks8851_netdev_ops;
1513 ndev->irq = spi->irq; 1532 ndev->irq = spi->irq;
@@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi)
1529 ks8851_read_selftest(ks); 1548 ks8851_read_selftest(ks);
1530 ks8851_init_mac(ks); 1549 ks8851_init_mac(ks);
1531 1550
1532 ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
1533 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1534 ndev->name, ks);
1535 if (ret < 0) {
1536 dev_err(&spi->dev, "failed to get irq\n");
1537 goto err_irq;
1538 }
1539
1540 ret = register_netdev(ndev); 1551 ret = register_netdev(ndev);
1541 if (ret) { 1552 if (ret) {
1542 dev_err(&spi->dev, "failed to register network device\n"); 1553 dev_err(&spi->dev, "failed to register network device\n");
@@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi)
1549 1560
1550 return 0; 1561 return 0;
1551 1562
1552
1553err_netdev: 1563err_netdev:
1554 free_irq(ndev->irq, ks); 1564err_id:
1555
1556err_irq:
1557 if (gpio_is_valid(gpio)) 1565 if (gpio_is_valid(gpio))
1558 gpio_set_value(gpio, 0); 1566 gpio_set_value(gpio, 0);
1559err_id:
1560 regulator_disable(ks->vdd_reg); 1567 regulator_disable(ks->vdd_reg);
1561err_reg: 1568err_reg:
1562 regulator_disable(ks->vdd_io); 1569 regulator_disable(ks->vdd_io);
@@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi)
1574 dev_info(&spi->dev, "remove\n"); 1581 dev_info(&spi->dev, "remove\n");
1575 1582
1576 unregister_netdev(priv->netdev); 1583 unregister_netdev(priv->netdev);
1577 free_irq(spi->irq, priv);
1578 if (gpio_is_valid(priv->gpio)) 1584 if (gpio_is_valid(priv->gpio))
1579 gpio_set_value(priv->gpio, 0); 1585 gpio_set_value(priv->gpio, 0);
1580 regulator_disable(priv->vdd_reg); 1586 regulator_disable(priv->vdd_reg);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 852256ef1f22..23da1e3ee429 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -11,9 +11,15 @@
11*/ 11*/
12 12
13#define KS_CCR 0x08 13#define KS_CCR 0x08
14#define CCR_LE (1 << 10) /* KSZ8851-16MLL */
14#define CCR_EEPROM (1 << 9) 15#define CCR_EEPROM (1 << 9)
15#define CCR_SPI (1 << 8) 16#define CCR_SPI (1 << 8) /* KSZ8851SNL */
16#define CCR_32PIN (1 << 0) 17#define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */
18#define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */
19#define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */
20#define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */
21#define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */
22#define CCR_32PIN (1 << 0) /* KSZ8851SNL */
17 23
18/* MAC address registers */ 24/* MAC address registers */
19#define KS_MAR(_m) (0x15 - (_m)) 25#define KS_MAR(_m) (0x15 - (_m))
@@ -112,13 +118,13 @@
112#define RXCR1_RXE (1 << 0) 118#define RXCR1_RXE (1 << 0)
113 119
114#define KS_RXCR2 0x76 120#define KS_RXCR2 0x76
115#define RXCR2_SRDBL_MASK (0x7 << 5) 121#define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */
116#define RXCR2_SRDBL_SHIFT (5) 122#define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */
117#define RXCR2_SRDBL_4B (0x0 << 5) 123#define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */
118#define RXCR2_SRDBL_8B (0x1 << 5) 124#define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */
119#define RXCR2_SRDBL_16B (0x2 << 5) 125#define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */
120#define RXCR2_SRDBL_32B (0x3 << 5) 126#define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */
121#define RXCR2_SRDBL_FRAME (0x4 << 5) 127#define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */
122#define RXCR2_IUFFP (1 << 4) 128#define RXCR2_IUFFP (1 << 4)
123#define RXCR2_RXIUFCEZ (1 << 3) 129#define RXCR2_RXIUFCEZ (1 << 3)
124#define RXCR2_UDPLFE (1 << 2) 130#define RXCR2_UDPLFE (1 << 2)
@@ -143,8 +149,10 @@
143#define RXFSHR_RXCE (1 << 0) 149#define RXFSHR_RXCE (1 << 0)
144 150
145#define KS_RXFHBCR 0x7E 151#define KS_RXFHBCR 0x7E
152#define RXFHBCR_CNT_MASK (0xfff << 0)
153
146#define KS_TXQCR 0x80 154#define KS_TXQCR 0x80
147#define TXQCR_AETFE (1 << 2) 155#define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */
148#define TXQCR_TXQMAM (1 << 1) 156#define TXQCR_TXQMAM (1 << 1)
149#define TXQCR_METFE (1 << 0) 157#define TXQCR_METFE (1 << 0)
150 158
@@ -167,6 +175,10 @@
167 175
168#define KS_RXFDPR 0x86 176#define KS_RXFDPR 0x86
169#define RXFDPR_RXFPAI (1 << 14) 177#define RXFDPR_RXFPAI (1 << 14)
178#define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */
179#define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */
180#define RXFDPR_RXFP_MASK (0x7ff << 0)
181#define RXFDPR_RXFP_SHIFT (0)
170 182
171#define KS_RXDTTR 0x8C 183#define KS_RXDTTR 0x8C
172#define KS_RXDBCTR 0x8E 184#define KS_RXDBCTR 0x8E
@@ -184,7 +196,7 @@
184#define IRQ_RXMPDI (1 << 4) 196#define IRQ_RXMPDI (1 << 4)
185#define IRQ_LDI (1 << 3) 197#define IRQ_LDI (1 << 3)
186#define IRQ_EDI (1 << 2) 198#define IRQ_EDI (1 << 2)
187#define IRQ_SPIBEI (1 << 1) 199#define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */
188#define IRQ_DEDI (1 << 0) 200#define IRQ_DEDI (1 << 0)
189 201
190#define KS_RXFCTR 0x9C 202#define KS_RXFCTR 0x9C
@@ -257,42 +269,37 @@
257#define KS_P1ANLPR 0xEE 269#define KS_P1ANLPR 0xEE
258 270
259#define KS_P1SCLMD 0xF4 271#define KS_P1SCLMD 0xF4
260#define P1SCLMD_LEDOFF (1 << 15)
261#define P1SCLMD_TXIDS (1 << 14)
262#define P1SCLMD_RESTARTAN (1 << 13)
263#define P1SCLMD_DISAUTOMDIX (1 << 10)
264#define P1SCLMD_FORCEMDIX (1 << 9)
265#define P1SCLMD_AUTONEGEN (1 << 7)
266#define P1SCLMD_FORCE100 (1 << 6)
267#define P1SCLMD_FORCEFDX (1 << 5)
268#define P1SCLMD_ADV_FLOW (1 << 4)
269#define P1SCLMD_ADV_100BT_FDX (1 << 3)
270#define P1SCLMD_ADV_100BT_HDX (1 << 2)
271#define P1SCLMD_ADV_10BT_FDX (1 << 1)
272#define P1SCLMD_ADV_10BT_HDX (1 << 0)
273 272
274#define KS_P1CR 0xF6 273#define KS_P1CR 0xF6
275#define P1CR_HP_MDIX (1 << 15) 274#define P1CR_LEDOFF (1 << 15)
276#define P1CR_REV_POL (1 << 13) 275#define P1CR_TXIDS (1 << 14)
277#define P1CR_OP_100M (1 << 10) 276#define P1CR_RESTARTAN (1 << 13)
278#define P1CR_OP_FDX (1 << 9) 277#define P1CR_DISAUTOMDIX (1 << 10)
279#define P1CR_OP_MDI (1 << 7) 278#define P1CR_FORCEMDIX (1 << 9)
280#define P1CR_AN_DONE (1 << 6) 279#define P1CR_AUTONEGEN (1 << 7)
281#define P1CR_LINK_GOOD (1 << 5) 280#define P1CR_FORCE100 (1 << 6)
282#define P1CR_PNTR_FLOW (1 << 4) 281#define P1CR_FORCEFDX (1 << 5)
283#define P1CR_PNTR_100BT_FDX (1 << 3) 282#define P1CR_ADV_FLOW (1 << 4)
284#define P1CR_PNTR_100BT_HDX (1 << 2) 283#define P1CR_ADV_100BT_FDX (1 << 3)
285#define P1CR_PNTR_10BT_FDX (1 << 1) 284#define P1CR_ADV_100BT_HDX (1 << 2)
286#define P1CR_PNTR_10BT_HDX (1 << 0) 285#define P1CR_ADV_10BT_FDX (1 << 1)
286#define P1CR_ADV_10BT_HDX (1 << 0)
287
288#define KS_P1SR 0xF8
289#define P1SR_HP_MDIX (1 << 15)
290#define P1SR_REV_POL (1 << 13)
291#define P1SR_OP_100M (1 << 10)
292#define P1SR_OP_FDX (1 << 9)
293#define P1SR_OP_MDI (1 << 7)
294#define P1SR_AN_DONE (1 << 6)
295#define P1SR_LINK_GOOD (1 << 5)
296#define P1SR_PNTR_FLOW (1 << 4)
297#define P1SR_PNTR_100BT_FDX (1 << 3)
298#define P1SR_PNTR_100BT_HDX (1 << 2)
299#define P1SR_PNTR_10BT_FDX (1 << 1)
300#define P1SR_PNTR_10BT_HDX (1 << 0)
287 301
288/* TX Frame control */ 302/* TX Frame control */
289
290#define TXFR_TXIC (1 << 15) 303#define TXFR_TXIC (1 << 15)
291#define TXFR_TXFID_MASK (0x3f << 0) 304#define TXFR_TXFID_MASK (0x3f << 0)
292#define TXFR_TXFID_SHIFT (0) 305#define TXFR_TXFID_SHIFT (0)
293
294/* SPI frame opcodes */
295#define KS_SPIOP_RD (0x00)
296#define KS_SPIOP_WR (0x40)
297#define KS_SPIOP_RXFIFO (0x80)
298#define KS_SPIOP_TXFIFO (0xC0)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 35f8c9ef204d..c946841c0a06 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,6 +40,8 @@
40#include <linux/of_device.h> 40#include <linux/of_device.h>
41#include <linux/of_net.h> 41#include <linux/of_net.h>
42 42
43#include "ks8851.h"
44
43#define DRV_NAME "ks8851_mll" 45#define DRV_NAME "ks8851_mll"
44 46
45static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; 47static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
@@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
48#define TX_BUF_SIZE 2000 50#define TX_BUF_SIZE 2000
49#define RX_BUF_SIZE 2000 51#define RX_BUF_SIZE 2000
50 52
51#define KS_CCR 0x08
52#define CCR_EEPROM (1 << 9)
53#define CCR_SPI (1 << 8)
54#define CCR_8BIT (1 << 7)
55#define CCR_16BIT (1 << 6)
56#define CCR_32BIT (1 << 5)
57#define CCR_SHARED (1 << 4)
58#define CCR_32PIN (1 << 0)
59
60/* MAC address registers */
61#define KS_MARL 0x10
62#define KS_MARM 0x12
63#define KS_MARH 0x14
64
65#define KS_OBCR 0x20
66#define OBCR_ODS_16MA (1 << 6)
67
68#define KS_EEPCR 0x22
69#define EEPCR_EESA (1 << 4)
70#define EEPCR_EESB (1 << 3)
71#define EEPCR_EEDO (1 << 2)
72#define EEPCR_EESCK (1 << 1)
73#define EEPCR_EECS (1 << 0)
74
75#define KS_MBIR 0x24
76#define MBIR_TXMBF (1 << 12)
77#define MBIR_TXMBFA (1 << 11)
78#define MBIR_RXMBF (1 << 4)
79#define MBIR_RXMBFA (1 << 3)
80
81#define KS_GRR 0x26
82#define GRR_QMU (1 << 1)
83#define GRR_GSR (1 << 0)
84
85#define KS_WFCR 0x2A
86#define WFCR_MPRXE (1 << 7)
87#define WFCR_WF3E (1 << 3)
88#define WFCR_WF2E (1 << 2)
89#define WFCR_WF1E (1 << 1)
90#define WFCR_WF0E (1 << 0)
91
92#define KS_WF0CRC0 0x30
93#define KS_WF0CRC1 0x32
94#define KS_WF0BM0 0x34
95#define KS_WF0BM1 0x36
96#define KS_WF0BM2 0x38
97#define KS_WF0BM3 0x3A
98
99#define KS_WF1CRC0 0x40
100#define KS_WF1CRC1 0x42
101#define KS_WF1BM0 0x44
102#define KS_WF1BM1 0x46
103#define KS_WF1BM2 0x48
104#define KS_WF1BM3 0x4A
105
106#define KS_WF2CRC0 0x50
107#define KS_WF2CRC1 0x52
108#define KS_WF2BM0 0x54
109#define KS_WF2BM1 0x56
110#define KS_WF2BM2 0x58
111#define KS_WF2BM3 0x5A
112
113#define KS_WF3CRC0 0x60
114#define KS_WF3CRC1 0x62
115#define KS_WF3BM0 0x64
116#define KS_WF3BM1 0x66
117#define KS_WF3BM2 0x68
118#define KS_WF3BM3 0x6A
119
120#define KS_TXCR 0x70
121#define TXCR_TCGICMP (1 << 8)
122#define TXCR_TCGUDP (1 << 7)
123#define TXCR_TCGTCP (1 << 6)
124#define TXCR_TCGIP (1 << 5)
125#define TXCR_FTXQ (1 << 4)
126#define TXCR_TXFCE (1 << 3)
127#define TXCR_TXPE (1 << 2)
128#define TXCR_TXCRC (1 << 1)
129#define TXCR_TXE (1 << 0)
130
131#define KS_TXSR 0x72
132#define TXSR_TXLC (1 << 13)
133#define TXSR_TXMC (1 << 12)
134#define TXSR_TXFID_MASK (0x3f << 0)
135#define TXSR_TXFID_SHIFT (0)
136#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
137
138
139#define KS_RXCR1 0x74
140#define RXCR1_FRXQ (1 << 15)
141#define RXCR1_RXUDPFCC (1 << 14)
142#define RXCR1_RXTCPFCC (1 << 13)
143#define RXCR1_RXIPFCC (1 << 12)
144#define RXCR1_RXPAFMA (1 << 11)
145#define RXCR1_RXFCE (1 << 10)
146#define RXCR1_RXEFE (1 << 9)
147#define RXCR1_RXMAFMA (1 << 8)
148#define RXCR1_RXBE (1 << 7)
149#define RXCR1_RXME (1 << 6)
150#define RXCR1_RXUE (1 << 5)
151#define RXCR1_RXAE (1 << 4)
152#define RXCR1_RXINVF (1 << 1)
153#define RXCR1_RXE (1 << 0)
154#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ 53#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
155 RXCR1_RXMAFMA | RXCR1_RXPAFMA) 54 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
156
157#define KS_RXCR2 0x76
158#define RXCR2_SRDBL_MASK (0x7 << 5)
159#define RXCR2_SRDBL_SHIFT (5)
160#define RXCR2_SRDBL_4B (0x0 << 5)
161#define RXCR2_SRDBL_8B (0x1 << 5)
162#define RXCR2_SRDBL_16B (0x2 << 5)
163#define RXCR2_SRDBL_32B (0x3 << 5)
164/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
165#define RXCR2_IUFFP (1 << 4)
166#define RXCR2_RXIUFCEZ (1 << 3)
167#define RXCR2_UDPLFE (1 << 2)
168#define RXCR2_RXICMPFCC (1 << 1)
169#define RXCR2_RXSAF (1 << 0)
170
171#define KS_TXMIR 0x78
172
173#define KS_RXFHSR 0x7C
174#define RXFSHR_RXFV (1 << 15)
175#define RXFSHR_RXICMPFCS (1 << 13)
176#define RXFSHR_RXIPFCS (1 << 12)
177#define RXFSHR_RXTCPFCS (1 << 11)
178#define RXFSHR_RXUDPFCS (1 << 10)
179#define RXFSHR_RXBF (1 << 7)
180#define RXFSHR_RXMF (1 << 6)
181#define RXFSHR_RXUF (1 << 5)
182#define RXFSHR_RXMR (1 << 4)
183#define RXFSHR_RXFT (1 << 3)
184#define RXFSHR_RXFTL (1 << 2)
185#define RXFSHR_RXRF (1 << 1)
186#define RXFSHR_RXCE (1 << 0)
187#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
188 RXFSHR_RXFTL | RXFSHR_RXMR |\
189 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
190 RXFSHR_RXTCPFCS)
191#define KS_RXFHBCR 0x7E
192#define RXFHBCR_CNT_MASK 0x0FFF
193
194#define KS_TXQCR 0x80
195#define TXQCR_AETFE (1 << 2)
196#define TXQCR_TXQMAM (1 << 1)
197#define TXQCR_METFE (1 << 0)
198
199#define KS_RXQCR 0x82
200#define RXQCR_RXDTTS (1 << 12)
201#define RXQCR_RXDBCTS (1 << 11)
202#define RXQCR_RXFCTS (1 << 10)
203#define RXQCR_RXIPHTOE (1 << 9)
204#define RXQCR_RXDTTE (1 << 7)
205#define RXQCR_RXDBCTE (1 << 6)
206#define RXQCR_RXFCTE (1 << 5)
207#define RXQCR_ADRFE (1 << 4)
208#define RXQCR_SDA (1 << 3)
209#define RXQCR_RRXEF (1 << 0)
210#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) 55#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
211 56
212#define KS_TXFDPR 0x84
213#define TXFDPR_TXFPAI (1 << 14)
214#define TXFDPR_TXFP_MASK (0x7ff << 0)
215#define TXFDPR_TXFP_SHIFT (0)
216
217#define KS_RXFDPR 0x86
218#define RXFDPR_RXFPAI (1 << 14)
219
220#define KS_RXDTTR 0x8C
221#define KS_RXDBCTR 0x8E
222
223#define KS_IER 0x90
224#define KS_ISR 0x92
225#define IRQ_LCI (1 << 15)
226#define IRQ_TXI (1 << 14)
227#define IRQ_RXI (1 << 13)
228#define IRQ_RXOI (1 << 11)
229#define IRQ_TXPSI (1 << 9)
230#define IRQ_RXPSI (1 << 8)
231#define IRQ_TXSAI (1 << 6)
232#define IRQ_RXWFDI (1 << 5)
233#define IRQ_RXMPDI (1 << 4)
234#define IRQ_LDI (1 << 3)
235#define IRQ_EDI (1 << 2)
236#define IRQ_SPIBEI (1 << 1)
237#define IRQ_DEDI (1 << 0)
238
239#define KS_RXFCTR 0x9C
240#define RXFCTR_THRESHOLD_MASK 0x00FF
241
242#define KS_RXFC 0x9D
243#define RXFCTR_RXFC_MASK (0xff << 8)
244#define RXFCTR_RXFC_SHIFT (8)
245#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
246#define RXFCTR_RXFCT_MASK (0xff << 0)
247#define RXFCTR_RXFCT_SHIFT (0)
248
249#define KS_TXNTFSR 0x9E
250
251#define KS_MAHTR0 0xA0
252#define KS_MAHTR1 0xA2
253#define KS_MAHTR2 0xA4
254#define KS_MAHTR3 0xA6
255
256#define KS_FCLWR 0xB0
257#define KS_FCHWR 0xB2
258#define KS_FCOWR 0xB4
259
260#define KS_CIDER 0xC0
261#define CIDER_ID 0x8870
262#define CIDER_REV_MASK (0x7 << 1)
263#define CIDER_REV_SHIFT (1)
264#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
265
266#define KS_CGCR 0xC6
267#define KS_IACR 0xC8
268#define IACR_RDEN (1 << 12)
269#define IACR_TSEL_MASK (0x3 << 10)
270#define IACR_TSEL_SHIFT (10)
271#define IACR_TSEL_MIB (0x3 << 10)
272#define IACR_ADDR_MASK (0x1f << 0)
273#define IACR_ADDR_SHIFT (0)
274
275#define KS_IADLR 0xD0
276#define KS_IAHDR 0xD2
277
278#define KS_PMECR 0xD4
279#define PMECR_PME_DELAY (1 << 14)
280#define PMECR_PME_POL (1 << 12)
281#define PMECR_WOL_WAKEUP (1 << 11)
282#define PMECR_WOL_MAGICPKT (1 << 10)
283#define PMECR_WOL_LINKUP (1 << 9)
284#define PMECR_WOL_ENERGY (1 << 8)
285#define PMECR_AUTO_WAKE_EN (1 << 7)
286#define PMECR_WAKEUP_NORMAL (1 << 6)
287#define PMECR_WKEVT_MASK (0xf << 2)
288#define PMECR_WKEVT_SHIFT (2)
289#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
290#define PMECR_WKEVT_ENERGY (0x1 << 2)
291#define PMECR_WKEVT_LINK (0x2 << 2)
292#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
293#define PMECR_WKEVT_FRAME (0x8 << 2)
294#define PMECR_PM_MASK (0x3 << 0)
295#define PMECR_PM_SHIFT (0)
296#define PMECR_PM_NORMAL (0x0 << 0)
297#define PMECR_PM_ENERGY (0x1 << 0)
298#define PMECR_PM_SOFTDOWN (0x2 << 0)
299#define PMECR_PM_POWERSAVE (0x3 << 0)
300
301/* Standard MII PHY data */
302#define KS_P1MBCR 0xE4
303#define P1MBCR_FORCE_FDX (1 << 8)
304
305#define KS_P1MBSR 0xE6
306#define P1MBSR_AN_COMPLETE (1 << 5)
307#define P1MBSR_AN_CAPABLE (1 << 3)
308#define P1MBSR_LINK_UP (1 << 2)
309
310#define KS_PHY1ILR 0xE8
311#define KS_PHY1IHR 0xEA
312#define KS_P1ANAR 0xEC
313#define KS_P1ANLPR 0xEE
314
315#define KS_P1SCLMD 0xF4
316#define P1SCLMD_LEDOFF (1 << 15)
317#define P1SCLMD_TXIDS (1 << 14)
318#define P1SCLMD_RESTARTAN (1 << 13)
319#define P1SCLMD_DISAUTOMDIX (1 << 10)
320#define P1SCLMD_FORCEMDIX (1 << 9)
321#define P1SCLMD_AUTONEGEN (1 << 7)
322#define P1SCLMD_FORCE100 (1 << 6)
323#define P1SCLMD_FORCEFDX (1 << 5)
324#define P1SCLMD_ADV_FLOW (1 << 4)
325#define P1SCLMD_ADV_100BT_FDX (1 << 3)
326#define P1SCLMD_ADV_100BT_HDX (1 << 2)
327#define P1SCLMD_ADV_10BT_FDX (1 << 1)
328#define P1SCLMD_ADV_10BT_HDX (1 << 0)
329
330#define KS_P1CR 0xF6
331#define P1CR_HP_MDIX (1 << 15)
332#define P1CR_REV_POL (1 << 13)
333#define P1CR_OP_100M (1 << 10)
334#define P1CR_OP_FDX (1 << 9)
335#define P1CR_OP_MDI (1 << 7)
336#define P1CR_AN_DONE (1 << 6)
337#define P1CR_LINK_GOOD (1 << 5)
338#define P1CR_PNTR_FLOW (1 << 4)
339#define P1CR_PNTR_100BT_FDX (1 << 3)
340#define P1CR_PNTR_100BT_HDX (1 << 2)
341#define P1CR_PNTR_10BT_FDX (1 << 1)
342#define P1CR_PNTR_10BT_HDX (1 << 0)
343
344/* TX Frame control */
345
346#define TXFR_TXIC (1 << 15)
347#define TXFR_TXFID_MASK (0x3f << 0)
348#define TXFR_TXFID_SHIFT (0)
349
350#define KS_P1SR 0xF8
351#define P1SR_HP_MDIX (1 << 15)
352#define P1SR_REV_POL (1 << 13)
353#define P1SR_OP_100M (1 << 10)
354#define P1SR_OP_FDX (1 << 9)
355#define P1SR_OP_MDI (1 << 7)
356#define P1SR_AN_DONE (1 << 6)
357#define P1SR_LINK_GOOD (1 << 5)
358#define P1SR_PNTR_FLOW (1 << 4)
359#define P1SR_PNTR_100BT_FDX (1 << 3)
360#define P1SR_PNTR_100BT_HDX (1 << 2)
361#define P1SR_PNTR_10BT_FDX (1 << 1)
362#define P1SR_PNTR_10BT_HDX (1 << 0)
363
364#define ENUM_BUS_NONE 0 57#define ENUM_BUS_NONE 0
365#define ENUM_BUS_8BIT 1 58#define ENUM_BUS_8BIT 1
366#define ENUM_BUS_16BIT 2 59#define ENUM_BUS_16BIT 2
@@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks)
1475 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); 1168 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1476 1169
1477 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ 1170 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1478 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK); 1171 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
1479 1172
1480 /* Setup RxQ Command Control (RXQCR) */ 1173 /* Setup RxQ Command Control (RXQCR) */
1481 ks->rc_rxqcr = RXQCR_CMD_CNTL; 1174 ks->rc_rxqcr = RXQCR_CMD_CNTL;
@@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks)
1488 */ 1181 */
1489 1182
1490 w = ks_rdreg16(ks, KS_P1MBCR); 1183 w = ks_rdreg16(ks, KS_P1MBCR);
1491 w &= ~P1MBCR_FORCE_FDX; 1184 w &= ~BMCR_FULLDPLX;
1492 ks_wrreg16(ks, KS_P1MBCR, w); 1185 ks_wrreg16(ks, KS_P1MBCR, w);
1493 1186
1494 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; 1187 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
@@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev)
1629 ks_setup_int(ks); 1322 ks_setup_int(ks);
1630 1323
1631 data = ks_rdreg16(ks, KS_OBCR); 1324 data = ks_rdreg16(ks, KS_OBCR);
1632 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); 1325 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
1633 1326
1634 /* overwriting the default MAC address */ 1327 /* overwriting the default MAC address */
1635 if (pdev->dev.of_node) { 1328 if (pdev->dev.of_node) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 3b0adda7cc9c..a4cd6f2cfb86 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
1048 1048
1049 for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { 1049 for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
1050 skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); 1050 skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
1051 if (!skb)
1052 break;
1051 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); 1053 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
1052 skb_put(skb, QLCNIC_ILB_PKT_SIZE); 1054 skb_put(skb, QLCNIC_ILB_PKT_SIZE);
1053 adapter->ahw->diag_cnt = 0; 1055 adapter->ahw->diag_cnt = 0;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index cfb67b746595..58e0ca9093d3 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev)
482 write_reg_high(ioaddr, IMR, ISRh_RxErr); 482 write_reg_high(ioaddr, IMR, ISRh_RxErr);
483 483
484 lp->tx_unit_busy = 0; 484 lp->tx_unit_busy = 0;
485 lp->pac_cnt_in_tx_buf = 0; 485 lp->pac_cnt_in_tx_buf = 0;
486 lp->saved_tx_size = 0; 486 lp->saved_tx_size = 0;
487} 487}
488 488
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c29dde064078..7562ccbbb39a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -678,6 +678,7 @@ struct rtl8169_private {
678 struct work_struct work; 678 struct work_struct work;
679 } wk; 679 } wk;
680 680
681 unsigned irq_enabled:1;
681 unsigned supports_gmii:1; 682 unsigned supports_gmii:1;
682 dma_addr_t counters_phys_addr; 683 dma_addr_t counters_phys_addr;
683 struct rtl8169_counters *counters; 684 struct rtl8169_counters *counters;
@@ -1293,6 +1294,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1293static void rtl_irq_disable(struct rtl8169_private *tp) 1294static void rtl_irq_disable(struct rtl8169_private *tp)
1294{ 1295{
1295 RTL_W16(tp, IntrMask, 0); 1296 RTL_W16(tp, IntrMask, 0);
1297 tp->irq_enabled = 0;
1296} 1298}
1297 1299
1298#define RTL_EVENT_NAPI_RX (RxOK | RxErr) 1300#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@@ -1301,6 +1303,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
1301 1303
1302static void rtl_irq_enable(struct rtl8169_private *tp) 1304static void rtl_irq_enable(struct rtl8169_private *tp)
1303{ 1305{
1306 tp->irq_enabled = 1;
1304 RTL_W16(tp, IntrMask, tp->irq_mask); 1307 RTL_W16(tp, IntrMask, tp->irq_mask);
1305} 1308}
1306 1309
@@ -6520,9 +6523,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6520{ 6523{
6521 struct rtl8169_private *tp = dev_instance; 6524 struct rtl8169_private *tp = dev_instance;
6522 u16 status = RTL_R16(tp, IntrStatus); 6525 u16 status = RTL_R16(tp, IntrStatus);
6523 u16 irq_mask = RTL_R16(tp, IntrMask);
6524 6526
6525 if (status == 0xffff || !(status & irq_mask)) 6527 if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
6526 return IRQ_NONE; 6528 return IRQ_NONE;
6527 6529
6528 if (unlikely(status & SYSErr)) { 6530 if (unlikely(status & SYSErr)) {
@@ -6540,7 +6542,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6540 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); 6542 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6541 } 6543 }
6542 6544
6543 if (status & RTL_EVENT_NAPI) { 6545 if (status & (RTL_EVENT_NAPI | LinkChg)) {
6544 rtl_irq_disable(tp); 6546 rtl_irq_disable(tp);
6545 napi_schedule_irqoff(&tp->napi); 6547 napi_schedule_irqoff(&tp->napi);
6546 } 6548 }
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6073387511f8..67f9bb6e941b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev)
730 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); 730 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
731 731
732 /* Link ON & Not select default PHY & not ghost PHY */ 732 /* Link ON & Not select default PHY & not ghost PHY */
733 if ((status & MII_STAT_LINK) && !default_phy && 733 if ((status & MII_STAT_LINK) && !default_phy &&
734 (phy->phy_types != UNKNOWN)) 734 (phy->phy_types != UNKNOWN)) {
735 default_phy = phy; 735 default_phy = phy;
736 else { 736 } else {
737 status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL); 737 status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
738 mdio_write(net_dev, phy->phy_addr, MII_CONTROL, 738 mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
739 status | MII_CNTL_AUTO | MII_CNTL_ISOLATE); 739 status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
@@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev)
741 phy_home = phy; 741 phy_home = phy;
742 else if(phy->phy_types == LAN) 742 else if(phy->phy_types == LAN)
743 phy_lan = phy; 743 phy_lan = phy;
744 } 744 }
745 } 745 }
746 746
747 if (!default_phy && phy_home) 747 if (!default_phy && phy_home)
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d8c5bc412219..4d9bcb4d0378 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
59 59
60 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 60 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
61 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, 61 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
62 STMMAC_RING_MODE, 1, false, skb->len); 62 STMMAC_RING_MODE, 0, false, skb->len);
63 tx_q->tx_skbuff[entry] = NULL; 63 tx_q->tx_skbuff[entry] = NULL;
64 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 64 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
65 65
@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
79 79
80 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 80 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
81 stmmac_prepare_tx_desc(priv, desc, 0, len, csum, 81 stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
82 STMMAC_RING_MODE, 1, true, skb->len); 82 STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
83 skb->len);
83 } else { 84 } else {
84 des2 = dma_map_single(priv->device, skb->data, 85 des2 = dma_map_single(priv->device, skb->data,
85 nopaged_len, DMA_TO_DEVICE); 86 nopaged_len, DMA_TO_DEVICE);
@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
91 tx_q->tx_skbuff_dma[entry].is_jumbo = true; 92 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
92 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 93 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
93 stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, 94 stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
94 STMMAC_RING_MODE, 1, true, skb->len); 95 STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
96 skb->len);
95 } 97 }
96 98
97 tx_q->cur_tx = entry; 99 tx_q->cur_tx = entry;
@@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
111 113
112static void refill_desc3(void *priv_ptr, struct dma_desc *p) 114static void refill_desc3(void *priv_ptr, struct dma_desc *p)
113{ 115{
114 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; 116 struct stmmac_rx_queue *rx_q = priv_ptr;
117 struct stmmac_priv *priv = rx_q->priv_data;
115 118
116 /* Fill DES3 in case of RING mode */ 119 /* Fill DES3 in case of RING mode */
117 if (priv->dma_buf_sz >= BUF_SIZE_8KiB) 120 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
118 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); 121 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
119} 122}
120 123
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 97c5e1aad88f..6a2e1031a62a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3216,14 +3216,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3216 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3216 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3217 csum_insertion, priv->mode, 1, last_segment, 3217 csum_insertion, priv->mode, 1, last_segment,
3218 skb->len); 3218 skb->len);
3219 3219 } else {
3220 /* The own bit must be the latest setting done when prepare the 3220 stmmac_set_tx_owner(priv, first);
3221 * descriptor and then barrier is needed to make sure that
3222 * all is coherent before granting the DMA engine.
3223 */
3224 wmb();
3225 } 3221 }
3226 3222
3223 /* The own bit must be the latest setting done when prepare the
3224 * descriptor and then barrier is needed to make sure that
3225 * all is coherent before granting the DMA engine.
3226 */
3227 wmb();
3228
3227 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3229 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3228 3230
3229 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3231 stmmac_enable_dma_transmission(priv, priv->ioaddr);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 5174d318901e..0a920c5936b2 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3657 3657
3658 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 3658 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3659 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 3659 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3660 if (ret) 3660 if (ret) {
3661 of_node_put(interfaces);
3661 return ret; 3662 return ret;
3663 }
3662 3664
3663 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 3665 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3664 if (ret) 3666 if (ret) {
3667 of_node_put(interfaces);
3665 return ret; 3668 return ret;
3669 }
3666 3670
3667 /* Create network interfaces */ 3671 /* Create network interfaces */
3668 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 3672 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ec7e7ec24ff9..4041c75997ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
1575 ret = of_address_to_resource(np, 0, &dmares); 1575 ret = of_address_to_resource(np, 0, &dmares);
1576 if (ret) { 1576 if (ret) {
1577 dev_err(&pdev->dev, "unable to get DMA resource\n"); 1577 dev_err(&pdev->dev, "unable to get DMA resource\n");
1578 of_node_put(np);
1578 goto free_netdev; 1579 goto free_netdev;
1579 } 1580 }
1580 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); 1581 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
1581 if (IS_ERR(lp->dma_regs)) { 1582 if (IS_ERR(lp->dma_regs)) {
1582 dev_err(&pdev->dev, "could not map DMA regs\n"); 1583 dev_err(&pdev->dev, "could not map DMA regs\n");
1583 ret = PTR_ERR(lp->dma_regs); 1584 ret = PTR_ERR(lp->dma_regs);
1585 of_node_put(np);
1584 goto free_netdev; 1586 goto free_netdev;
1585 } 1587 }
1586 lp->rx_irq = irq_of_parse_and_map(np, 1); 1588 lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index cd1d8faccca5..cd6b95e673a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
1268 INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); 1268 INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
1269 lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), 1269 lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
1270 WQ_MEM_RECLAIM); 1270 WQ_MEM_RECLAIM);
1271 if (unlikely(!lp->wqueue)) {
1272 ret = -ENOMEM;
1273 goto err_hw_init;
1274 }
1271 1275
1272 ret = adf7242_hw_init(lp); 1276 ret = adf7242_hw_init(lp);
1273 if (ret) 1277 if (ret)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b6743f03dce0..3b88846de31b 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
324 goto out_err; 324 goto out_err;
325 } 325 }
326 326
327 genlmsg_reply(skb, info); 327 res = genlmsg_reply(skb, info);
328 break; 328 break;
329 } 329 }
330 330
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 071869db44cf..520657945b82 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE
7 help 7 help
8 MDIO devices and driver infrastructure code. 8 MDIO devices and driver infrastructure code.
9 9
10if MDIO_DEVICE
11
10config MDIO_BUS 12config MDIO_BUS
11 tristate 13 tristate
12 default m if PHYLIB=m 14 default m if PHYLIB=m
@@ -179,6 +181,7 @@ config MDIO_XGENE
179 APM X-Gene SoC's. 181 APM X-Gene SoC's.
180 182
181endif 183endif
184endif
182 185
183config PHYLINK 186config PHYLINK
184 tristate 187 tristate
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 9605d4fe540b..cb86a3e90c7d 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev)
323 323
324 bcm54xx_phydsp_config(phydev); 324 bcm54xx_phydsp_config(phydev);
325 325
326 /* Encode link speed into LED1 and LED3 pair (green/amber).
327 * Also flash these two LEDs on activity. This means configuring
328 * them for MULTICOLOR and encoding link/activity into them.
329 */
330 val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
331 BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
332 bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
333
334 val = BCM_LED_MULTICOLOR_IN_PHASE |
335 BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
336 BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
337 bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
338
326 return 0; 339 return 0;
327} 340}
328 341
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index bbd8c22067f3..97d45bd5b38e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -15,6 +15,8 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16 16
17#define DP83822_PHY_ID 0x2000a240 17#define DP83822_PHY_ID 0x2000a240
18#define DP83825I_PHY_ID 0x2000a150
19
18#define DP83822_DEVADDR 0x1f 20#define DP83822_DEVADDR 0x1f
19 21
20#define MII_DP83822_PHYSCR 0x11 22#define MII_DP83822_PHYSCR 0x11
@@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev)
304 return 0; 306 return 0;
305} 307}
306 308
309#define DP83822_PHY_DRIVER(_id, _name) \
310 { \
311 PHY_ID_MATCH_MODEL(_id), \
312 .name = (_name), \
313 .features = PHY_BASIC_FEATURES, \
314 .soft_reset = dp83822_phy_reset, \
315 .config_init = dp83822_config_init, \
316 .get_wol = dp83822_get_wol, \
317 .set_wol = dp83822_set_wol, \
318 .ack_interrupt = dp83822_ack_interrupt, \
319 .config_intr = dp83822_config_intr, \
320 .suspend = dp83822_suspend, \
321 .resume = dp83822_resume, \
322 }
323
307static struct phy_driver dp83822_driver[] = { 324static struct phy_driver dp83822_driver[] = {
308 { 325 DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
309 .phy_id = DP83822_PHY_ID, 326 DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
310 .phy_id_mask = 0xfffffff0,
311 .name = "TI DP83822",
312 .features = PHY_BASIC_FEATURES,
313 .config_init = dp83822_config_init,
314 .soft_reset = dp83822_phy_reset,
315 .get_wol = dp83822_get_wol,
316 .set_wol = dp83822_set_wol,
317 .ack_interrupt = dp83822_ack_interrupt,
318 .config_intr = dp83822_config_intr,
319 .suspend = dp83822_suspend,
320 .resume = dp83822_resume,
321 },
322}; 327};
323module_phy_driver(dp83822_driver); 328module_phy_driver(dp83822_driver);
324 329
325static struct mdio_device_id __maybe_unused dp83822_tbl[] = { 330static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
326 { DP83822_PHY_ID, 0xfffffff0 }, 331 { DP83822_PHY_ID, 0xfffffff0 },
332 { DP83825I_PHY_ID, 0xfffffff0 },
327 { }, 333 { },
328}; 334};
329MODULE_DEVICE_TABLE(mdio, dp83822_tbl); 335MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index a238388eb1a5..0eec2913c289 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
201static int meson_gxl_config_intr(struct phy_device *phydev) 201static int meson_gxl_config_intr(struct phy_device *phydev)
202{ 202{
203 u16 val; 203 u16 val;
204 int ret;
204 205
205 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { 206 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
206 val = INTSRC_ANEG_PR 207 val = INTSRC_ANEG_PR
@@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
213 val = 0; 214 val = 0;
214 } 215 }
215 216
217 /* Ack any pending IRQ */
218 ret = meson_gxl_ack_interrupt(phydev);
219 if (ret)
220 return ret;
221
216 return phy_write(phydev, INTSRC_MASK, val); 222 return phy_write(phydev, INTSRC_MASK, val);
217} 223}
218 224
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 49fdd1ee798e..77068c545de0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev)
1831{ 1831{
1832 int ret; 1832 int ret;
1833 1833
1834 ret = phy_write(phydev, MII_BMCR, BMCR_RESET); 1834 ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
1835 if (ret < 0) 1835 if (ret < 0)
1836 return ret; 1836 return ret;
1837 1837
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1d68921723dc..e9ca1c088d0b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1763 int skb_xdp = 1; 1763 int skb_xdp = 1;
1764 bool frags = tun_napi_frags_enabled(tfile); 1764 bool frags = tun_napi_frags_enabled(tfile);
1765 1765
1766 if (!(tun->dev->flags & IFF_UP))
1767 return -EIO;
1768
1769 if (!(tun->flags & IFF_NO_PI)) { 1766 if (!(tun->flags & IFF_NO_PI)) {
1770 if (len < sizeof(pi)) 1767 if (len < sizeof(pi))
1771 return -EINVAL; 1768 return -EINVAL;
@@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1867 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1864 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1868 1865
1869 if (err) { 1866 if (err) {
1867 err = -EFAULT;
1868drop:
1870 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1869 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1871 kfree_skb(skb); 1870 kfree_skb(skb);
1872 if (frags) { 1871 if (frags) {
@@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1874 mutex_unlock(&tfile->napi_mutex); 1873 mutex_unlock(&tfile->napi_mutex);
1875 } 1874 }
1876 1875
1877 return -EFAULT; 1876 return err;
1878 } 1877 }
1879 } 1878 }
1880 1879
@@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1958 !tfile->detached) 1957 !tfile->detached)
1959 rxhash = __skb_get_hash_symmetric(skb); 1958 rxhash = __skb_get_hash_symmetric(skb);
1960 1959
1960 rcu_read_lock();
1961 if (unlikely(!(tun->dev->flags & IFF_UP))) {
1962 err = -EIO;
1963 rcu_read_unlock();
1964 goto drop;
1965 }
1966
1961 if (frags) { 1967 if (frags) {
1962 /* Exercise flow dissector code path. */ 1968 /* Exercise flow dissector code path. */
1963 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 1969 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
@@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1965 if (unlikely(headlen > skb_headlen(skb))) { 1971 if (unlikely(headlen > skb_headlen(skb))) {
1966 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1972 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1967 napi_free_frags(&tfile->napi); 1973 napi_free_frags(&tfile->napi);
1974 rcu_read_unlock();
1968 mutex_unlock(&tfile->napi_mutex); 1975 mutex_unlock(&tfile->napi_mutex);
1969 WARN_ON(1); 1976 WARN_ON(1);
1970 return -ENOMEM; 1977 return -ENOMEM;
@@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1992 } else { 1999 } else {
1993 netif_rx_ni(skb); 2000 netif_rx_ni(skb);
1994 } 2001 }
2002 rcu_read_unlock();
1995 2003
1996 stats = get_cpu_ptr(tun->pcpu_stats); 2004 stats = get_cpu_ptr(tun->pcpu_stats);
1997 u64_stats_update_begin(&stats->syncp); 2005 u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 820a2fe7d027..aff995be2a31 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
1301 .tx_fixup = aqc111_tx_fixup, 1301 .tx_fixup = aqc111_tx_fixup,
1302}; 1302};
1303 1303
1304static const struct driver_info qnap_info = {
1305 .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
1306 .bind = aqc111_bind,
1307 .unbind = aqc111_unbind,
1308 .status = aqc111_status,
1309 .link_reset = aqc111_link_reset,
1310 .reset = aqc111_reset,
1311 .stop = aqc111_stop,
1312 .flags = FLAG_ETHER | FLAG_FRAMING_AX |
1313 FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
1314 .rx_fixup = aqc111_rx_fixup,
1315 .tx_fixup = aqc111_tx_fixup,
1316};
1317
1304static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) 1318static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
1305{ 1319{
1306 struct usbnet *dev = usb_get_intfdata(intf); 1320 struct usbnet *dev = usb_get_intfdata(intf);
@@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
1455 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, 1469 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
1456 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, 1470 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
1457 {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, 1471 {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
1472 {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
1458 { },/* END */ 1473 { },/* END */
1459}; 1474};
1460MODULE_DEVICE_TABLE(usb, products); 1475MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5512a1038721..3e9b2c319e45 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -851,6 +851,14 @@ static const struct usb_device_id products[] = {
851 .driver_info = 0, 851 .driver_info = 0,
852}, 852},
853 853
854/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
855{
856 USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
857 USB_CDC_SUBCLASS_ETHERNET,
858 USB_CDC_PROTO_NONE),
859 .driver_info = 0,
860},
861
854/* WHITELIST!!! 862/* WHITELIST!!!
855 * 863 *
856 * CDC Ether uses two interfaces, not necessarily consecutive. 864 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 077f1b9f2761..d76dfed8d9bb 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
4335 /* If vxlan->dev is in the same netns, it has already been added 4335 /* If vxlan->dev is in the same netns, it has already been added
4336 * to the list by the previous loop. 4336 * to the list by the previous loop.
4337 */ 4337 */
4338 if (!net_eq(dev_net(vxlan->dev), net)) { 4338 if (!net_eq(dev_net(vxlan->dev), net))
4339 gro_cells_destroy(&vxlan->gro_cells);
4340 unregister_netdevice_queue(vxlan->dev, head); 4339 unregister_netdevice_queue(vxlan->dev, head);
4341 }
4342 } 4340 }
4343 4341
4344 for (h = 0; h < PORT_HASH_SIZE; ++h) 4342 for (h = 0; h < PORT_HASH_SIZE; ++h)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index e9822a3ec373..94132cfd1f56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
460static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, 460static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
461 struct cfg80211_pmsr_result *res) 461 struct cfg80211_pmsr_result *res)
462{ 462{
463 s64 rtt_avg = res->ftm.rtt_avg * 100; 463 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
464
465 do_div(rtt_avg, 6666);
466 464
467 IWL_DEBUG_INFO(mvm, "entry %d\n", index); 465 IWL_DEBUG_INFO(mvm, "entry %d\n", index);
468 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); 466 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 6eedc0ec7661..76629b98c78d 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
130static void 130static void
131mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 131mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
132{ 132{
133 iowrite32(q->desc_dma, &q->regs->desc_base);
134 iowrite32(q->ndesc, &q->regs->ring_size);
133 q->head = ioread32(&q->regs->dma_idx); 135 q->head = ioread32(&q->regs->dma_idx);
134 q->tail = q->head; 136 q->tail = q->head;
135 iowrite32(q->head, &q->regs->cpu_idx); 137 iowrite32(q->head, &q->regs->cpu_idx);
@@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
180 else 182 else
181 mt76_dma_sync_idx(dev, q); 183 mt76_dma_sync_idx(dev, q);
182 184
183 wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; 185 wake = wake && q->stopped &&
186 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
187 if (wake)
188 q->stopped = false;
184 189
185 if (!q->queued) 190 if (!q->queued)
186 wake_up(&dev->tx_wait); 191 wake_up(&dev->tx_wait);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index a033745adb2f..316167404729 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -679,19 +679,15 @@ out:
679 return ret; 679 return ret;
680} 680}
681 681
682static void 682void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
683mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 683 struct ieee80211_sta *sta)
684 struct ieee80211_sta *sta)
685{ 684{
686 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 685 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
687 int idx = wcid->idx; 686 int i, idx = wcid->idx;
688 int i;
689 687
690 rcu_assign_pointer(dev->wcid[idx], NULL); 688 rcu_assign_pointer(dev->wcid[idx], NULL);
691 synchronize_rcu(); 689 synchronize_rcu();
692 690
693 mutex_lock(&dev->mutex);
694
695 if (dev->drv->sta_remove) 691 if (dev->drv->sta_remove)
696 dev->drv->sta_remove(dev, vif, sta); 692 dev->drv->sta_remove(dev, vif, sta);
697 693
@@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
699 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 695 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
700 mt76_txq_remove(dev, sta->txq[i]); 696 mt76_txq_remove(dev, sta->txq[i]);
701 mt76_wcid_free(dev->wcid_mask, idx); 697 mt76_wcid_free(dev->wcid_mask, idx);
698}
699EXPORT_SYMBOL_GPL(__mt76_sta_remove);
702 700
701static void
702mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
703 struct ieee80211_sta *sta)
704{
705 mutex_lock(&dev->mutex);
706 __mt76_sta_remove(dev, vif, sta);
703 mutex_unlock(&dev->mutex); 707 mutex_unlock(&dev->mutex);
704} 708}
705 709
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 5dfb0601f101..bcbfd3c4a44b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -126,6 +126,7 @@ struct mt76_queue {
126 int ndesc; 126 int ndesc;
127 int queued; 127 int queued;
128 int buf_size; 128 int buf_size;
129 bool stopped;
129 130
130 u8 buf_offset; 131 u8 buf_offset;
131 u8 hw_idx; 132 u8 hw_idx;
@@ -143,6 +144,7 @@ struct mt76_mcu_ops {
143 const struct mt76_reg_pair *rp, int len); 144 const struct mt76_reg_pair *rp, int len);
144 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 145 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
145 struct mt76_reg_pair *rp, int len); 146 struct mt76_reg_pair *rp, int len);
147 int (*mcu_restart)(struct mt76_dev *dev);
146}; 148};
147 149
148struct mt76_queue_ops { 150struct mt76_queue_ops {
@@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
693 struct ieee80211_sta *sta, 695 struct ieee80211_sta *sta,
694 enum ieee80211_sta_state old_state, 696 enum ieee80211_sta_state old_state,
695 enum ieee80211_sta_state new_state); 697 enum ieee80211_sta_state new_state);
698void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
699 struct ieee80211_sta *sta);
696 700
697struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); 701struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
698 702
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index afcd86f735b4..4dcb465095d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
135 135
136out: 136out:
137 mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); 137 mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
138 if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > 138 if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
139 __sw_hweight8(dev->beacon_mask))
140 dev->beacon_check++; 139 dev->beacon_check++;
141} 140}
142 141
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index d69e82c66ab2..b3ae0aaea62a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -27,12 +27,16 @@ static void
27mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) 27mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
28{ 28{
29 __le32 *txd = (__le32 *)skb->data; 29 __le32 *txd = (__le32 *)skb->data;
30 struct ieee80211_hdr *hdr;
31 struct ieee80211_sta *sta;
30 struct mt7603_sta *msta; 32 struct mt7603_sta *msta;
31 struct mt76_wcid *wcid; 33 struct mt76_wcid *wcid;
34 void *priv;
32 int idx; 35 int idx;
33 u32 val; 36 u32 val;
37 u8 tid;
34 38
35 if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr)) 39 if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
36 goto free; 40 goto free;
37 41
38 val = le32_to_cpu(txd[1]); 42 val = le32_to_cpu(txd[1]);
@@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
46 if (!wcid) 50 if (!wcid)
47 goto free; 51 goto free;
48 52
49 msta = container_of(wcid, struct mt7603_sta, wcid); 53 priv = msta = container_of(wcid, struct mt7603_sta, wcid);
50 val = le32_to_cpu(txd[0]); 54 val = le32_to_cpu(txd[0]);
51 skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val)); 55 skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
52 56
57 val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
58 val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
59 txd[0] = cpu_to_le32(val);
60
61 sta = container_of(priv, struct ieee80211_sta, drv_priv);
62 hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
63 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
64 ieee80211_sta_set_buffered(sta, tid, true);
65
53 spin_lock_bh(&dev->ps_lock); 66 spin_lock_bh(&dev->ps_lock);
54 __skb_queue_tail(&msta->psq, skb); 67 __skb_queue_tail(&msta->psq, skb);
55 if (skb_queue_len(&msta->psq) >= 64) { 68 if (skb_queue_len(&msta->psq) >= 64) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 15cc8f33b34d..d54dda67d036 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -112,7 +112,7 @@ static void
112mt7603_phy_init(struct mt7603_dev *dev) 112mt7603_phy_init(struct mt7603_dev *dev)
113{ 113{
114 int rx_chains = dev->mt76.antenna_mask; 114 int rx_chains = dev->mt76.antenna_mask;
115 int tx_chains = __sw_hweight8(rx_chains) - 1; 115 int tx_chains = hweight8(rx_chains) - 1;
116 116
117 mt76_rmw(dev, MT_WF_RMAC_RMCR, 117 mt76_rmw(dev, MT_WF_RMAC_RMCR,
118 (MT_WF_RMAC_RMCR_SMPS_MODE | 118 (MT_WF_RMAC_RMCR_SMPS_MODE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 0a0115861b51..5e31d7da96fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1072,7 +1072,7 @@ out:
1072 case MT_PHY_TYPE_HT: 1072 case MT_PHY_TYPE_HT:
1073 final_rate_flags |= IEEE80211_TX_RC_MCS; 1073 final_rate_flags |= IEEE80211_TX_RC_MCS;
1074 final_rate &= GENMASK(5, 0); 1074 final_rate &= GENMASK(5, 0);
1075 if (i > 15) 1075 if (final_rate > 15)
1076 return false; 1076 return false;
1077 break; 1077 break;
1078 default: 1078 default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index b10775ed92e6..cc0fe0933b2d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -5,6 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include "mt7603.h" 7#include "mt7603.h"
8#include "mac.h"
8#include "eeprom.h" 9#include "eeprom.h"
9 10
10static int 11static int
@@ -386,6 +387,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
386} 387}
387 388
388static void 389static void
390mt7603_ps_set_more_data(struct sk_buff *skb)
391{
392 struct ieee80211_hdr *hdr;
393
394 hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
395 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
396}
397
398static void
389mt7603_release_buffered_frames(struct ieee80211_hw *hw, 399mt7603_release_buffered_frames(struct ieee80211_hw *hw,
390 struct ieee80211_sta *sta, 400 struct ieee80211_sta *sta,
391 u16 tids, int nframes, 401 u16 tids, int nframes,
@@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
399 409
400 __skb_queue_head_init(&list); 410 __skb_queue_head_init(&list);
401 411
412 mt7603_wtbl_set_ps(dev, msta, false);
413
402 spin_lock_bh(&dev->ps_lock); 414 spin_lock_bh(&dev->ps_lock);
403 skb_queue_walk_safe(&msta->psq, skb, tmp) { 415 skb_queue_walk_safe(&msta->psq, skb, tmp) {
404 if (!nframes) 416 if (!nframes)
@@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
409 421
410 skb_set_queue_mapping(skb, MT_TXQ_PSD); 422 skb_set_queue_mapping(skb, MT_TXQ_PSD);
411 __skb_unlink(skb, &msta->psq); 423 __skb_unlink(skb, &msta->psq);
424 mt7603_ps_set_more_data(skb);
412 __skb_queue_tail(&list, skb); 425 __skb_queue_tail(&list, skb);
413 nframes--; 426 nframes--;
414 } 427 }
415 spin_unlock_bh(&dev->ps_lock); 428 spin_unlock_bh(&dev->ps_lock);
416 429
430 if (!skb_queue_empty(&list))
431 ieee80211_sta_eosp(sta);
432
417 mt7603_ps_tx_list(dev, &list); 433 mt7603_ps_tx_list(dev, &list);
418 434
419 if (nframes) 435 if (nframes)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 4b0713f1fd5e..d06905ea8cc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
433{ 433{
434 struct cfg80211_chan_def *chandef = &dev->mt76.chandef; 434 struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
435 struct ieee80211_hw *hw = mt76_hw(dev); 435 struct ieee80211_hw *hw = mt76_hw(dev);
436 int n_chains = __sw_hweight8(dev->mt76.antenna_mask); 436 int n_chains = hweight8(dev->mt76.antenna_mask);
437 struct { 437 struct {
438 u8 control_chan; 438 u8 control_chan;
439 u8 center_chan; 439 u8 center_chan;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index e13fea80d970..b920be1f5718 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
23 } 23 }
24 24
25 mem_base = devm_ioremap_resource(&pdev->dev, res); 25 mem_base = devm_ioremap_resource(&pdev->dev, res);
26 if (!mem_base) { 26 if (IS_ERR(mem_base)) {
27 dev_err(&pdev->dev, "Failed to get memory resource\n"); 27 dev_err(&pdev->dev, "Failed to get memory resource\n");
28 return -EINVAL; 28 return PTR_ERR(mem_base);
29 } 29 }
30 30
31 mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, 31 mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 0290ba5869a5..736f81752b5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
46 { MT_MM20_PROT_CFG, 0x01742004 }, 46 { MT_MM20_PROT_CFG, 0x01742004 },
47 { MT_MM40_PROT_CFG, 0x03f42084 }, 47 { MT_MM40_PROT_CFG, 0x03f42084 },
48 { MT_TXOP_CTRL_CFG, 0x0000583f }, 48 { MT_TXOP_CTRL_CFG, 0x0000583f },
49 { MT_TX_RTS_CFG, 0x00092b20 }, 49 { MT_TX_RTS_CFG, 0x00ffff20 },
50 { MT_EXP_ACK_TIME, 0x002400ca }, 50 { MT_EXP_ACK_TIME, 0x002400ca },
51 { MT_TXOP_HLDR_ET, 0x00000002 }, 51 { MT_TXOP_HLDR_ET, 0x00000002 },
52 { MT_XIFS_TIME_CFG, 0x33a41010 }, 52 { MT_XIFS_TIME_CFG, 0x33a41010 },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 91718647da02..e5a06f74a6f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
229 struct usb_device *usb_dev = interface_to_usbdev(usb_intf); 229 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
230 struct mt76x02_dev *dev; 230 struct mt76x02_dev *dev;
231 struct mt76_dev *mdev; 231 struct mt76_dev *mdev;
232 u32 asic_rev, mac_rev; 232 u32 mac_rev;
233 int ret; 233 int ret;
234 234
235 mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, 235 mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
@@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
262 goto err; 262 goto err;
263 } 263 }
264 264
265 asic_rev = mt76_rr(dev, MT_ASIC_VERSION); 265 mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
266 mac_rev = mt76_rr(dev, MT_MAC_CSR0); 266 mac_rev = mt76_rr(dev, MT_MAC_CSR0);
267 dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n", 267 dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
268 asic_rev, mac_rev); 268 mdev->rev, mac_rev);
269 if (!is_mt76x0(dev)) {
270 ret = -ENODEV;
271 goto err;
272 }
269 273
270 /* Note: vendor driver skips this check for MT76X0U */ 274 /* Note: vendor driver skips this check for MT76X0U */
271 if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) 275 if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 6915cce5def9..07061eb4d1e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -51,6 +51,7 @@ struct mt76x02_calibration {
51 u16 false_cca; 51 u16 false_cca;
52 s8 avg_rssi_all; 52 s8 avg_rssi_all;
53 s8 agc_gain_adjust; 53 s8 agc_gain_adjust;
54 s8 agc_lowest_gain;
54 s8 low_gain; 55 s8 low_gain;
55 56
56 s8 temp_vco; 57 s8 temp_vco;
@@ -114,8 +115,11 @@ struct mt76x02_dev {
114 struct mt76x02_dfs_pattern_detector dfs_pd; 115 struct mt76x02_dfs_pattern_detector dfs_pd;
115 116
116 /* edcca monitor */ 117 /* edcca monitor */
118 unsigned long ed_trigger_timeout;
117 bool ed_tx_blocked; 119 bool ed_tx_blocked;
118 bool ed_monitor; 120 bool ed_monitor;
121 u8 ed_monitor_enabled;
122 u8 ed_monitor_learning;
119 u8 ed_trigger; 123 u8 ed_trigger;
120 u8 ed_silent; 124 u8 ed_silent;
121 ktime_t ed_time; 125 ktime_t ed_time;
@@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
188 192
189void mt76x02_init_debugfs(struct mt76x02_dev *dev); 193void mt76x02_init_debugfs(struct mt76x02_dev *dev);
190 194
195static inline bool is_mt76x0(struct mt76x02_dev *dev)
196{
197 return mt76_chip(&dev->mt76) == 0x7610 ||
198 mt76_chip(&dev->mt76) == 0x7630 ||
199 mt76_chip(&dev->mt76) == 0x7650;
200}
201
191static inline bool is_mt76x2(struct mt76x02_dev *dev) 202static inline bool is_mt76x2(struct mt76x02_dev *dev)
192{ 203{
193 return mt76_chip(&dev->mt76) == 0x7612 || 204 return mt76_chip(&dev->mt76) == 0x7612 ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index 7580c5c986ff..b1d6fd4861e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
116 return 0; 116 return 0;
117} 117}
118 118
119static int
120mt76_edcca_set(void *data, u64 val)
121{
122 struct mt76x02_dev *dev = data;
123 enum nl80211_dfs_regions region = dev->dfs_pd.region;
124
125 dev->ed_monitor_enabled = !!val;
126 dev->ed_monitor = dev->ed_monitor_enabled &&
127 region == NL80211_DFS_ETSI;
128 mt76x02_edcca_init(dev, true);
129
130 return 0;
131}
132
133static int
134mt76_edcca_get(void *data, u64 *val)
135{
136 struct mt76x02_dev *dev = data;
137
138 *val = dev->ed_monitor_enabled;
139 return 0;
140}
141
142DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
143 "%lld\n");
144
119void mt76x02_init_debugfs(struct mt76x02_dev *dev) 145void mt76x02_init_debugfs(struct mt76x02_dev *dev)
120{ 146{
121 struct dentry *dir; 147 struct dentry *dir;
@@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
127 debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); 153 debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
128 debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); 154 debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
129 155
156 debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
130 debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); 157 debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
131 debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); 158 debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
132 debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, 159 debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index e4649103efd4..17d12d212d1b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
885 if (dfs_pd->region != region) { 885 if (dfs_pd->region != region) {
886 tasklet_disable(&dfs_pd->dfs_tasklet); 886 tasklet_disable(&dfs_pd->dfs_tasklet);
887 887
888 dev->ed_monitor = region == NL80211_DFS_ETSI; 888 dev->ed_monitor = dev->ed_monitor_enabled &&
889 region == NL80211_DFS_ETSI;
889 mt76x02_edcca_init(dev, true); 890 mt76x02_edcca_init(dev, true);
890 891
891 dfs_pd->region = region; 892 dfs_pd->region = region;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 91ff6598eccf..9ed231abe916 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
67} 67}
68EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); 68EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
69 69
70void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
71 struct ieee80211_key_conf *key)
72{
73 enum mt76x02_cipher_type cipher;
74 u8 key_data[32];
75 u32 iv, eiv;
76 u64 pn;
77
78 cipher = mt76x02_mac_get_key_info(key, key_data);
79 iv = mt76_rr(dev, MT_WCID_IV(idx));
80 eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
81
82 pn = (u64)eiv << 16;
83 if (cipher == MT_CIPHER_TKIP) {
84 pn |= (iv >> 16) & 0xff;
85 pn |= (iv & 0xff) << 8;
86 } else if (cipher >= MT_CIPHER_AES_CCMP) {
87 pn |= iv & 0xffff;
88 } else {
89 return;
90 }
91
92 atomic64_set(&key->tx_pn, pn);
93}
94
95
70int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, 96int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
71 struct ieee80211_key_conf *key) 97 struct ieee80211_key_conf *key)
72{ 98{
73 enum mt76x02_cipher_type cipher; 99 enum mt76x02_cipher_type cipher;
74 u8 key_data[32]; 100 u8 key_data[32];
75 u8 iv_data[8]; 101 u8 iv_data[8];
102 u64 pn;
76 103
77 cipher = mt76x02_mac_get_key_info(key, key_data); 104 cipher = mt76x02_mac_get_key_info(key, key_data);
78 if (cipher == MT_CIPHER_NONE && key) 105 if (cipher == MT_CIPHER_NONE && key)
@@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
85 if (key) { 112 if (key) {
86 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, 113 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
87 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); 114 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
115
116 pn = atomic64_read(&key->tx_pn);
117
88 iv_data[3] = key->keyidx << 6; 118 iv_data[3] = key->keyidx << 6;
89 if (cipher >= MT_CIPHER_TKIP) 119 if (cipher >= MT_CIPHER_TKIP) {
90 iv_data[3] |= 0x20; 120 iv_data[3] |= 0x20;
121 put_unaligned_le32(pn >> 16, &iv_data[4]);
122 }
123
124 if (cipher == MT_CIPHER_TKIP) {
125 iv_data[0] = (pn >> 8) & 0xff;
126 iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
127 iv_data[2] = pn & 0xff;
128 } else if (cipher >= MT_CIPHER_AES_CCMP) {
129 put_unaligned_le16((pn & 0xffff), &iv_data[0]);
130 }
91 } 131 }
92 132
93 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); 133 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
@@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
920 } 960 }
921 } 961 }
922 mt76x02_edcca_tx_enable(dev, true); 962 mt76x02_edcca_tx_enable(dev, true);
963 dev->ed_monitor_learning = true;
923 964
924 /* clear previous CCA timer value */ 965 /* clear previous CCA timer value */
925 mt76_rr(dev, MT_ED_CCA_TIMER); 966 mt76_rr(dev, MT_ED_CCA_TIMER);
@@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
929 970
930#define MT_EDCCA_TH 92 971#define MT_EDCCA_TH 92
931#define MT_EDCCA_BLOCK_TH 2 972#define MT_EDCCA_BLOCK_TH 2
973#define MT_EDCCA_LEARN_TH 50
974#define MT_EDCCA_LEARN_CCA 180
975#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
976
932static void mt76x02_edcca_check(struct mt76x02_dev *dev) 977static void mt76x02_edcca_check(struct mt76x02_dev *dev)
933{ 978{
934 ktime_t cur_time; 979 ktime_t cur_time;
@@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
951 dev->ed_trigger = 0; 996 dev->ed_trigger = 0;
952 } 997 }
953 998
954 if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && 999 if (dev->cal.agc_lowest_gain &&
955 !dev->ed_tx_blocked) 1000 dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
1001 dev->ed_trigger > MT_EDCCA_LEARN_TH) {
1002 dev->ed_monitor_learning = false;
1003 dev->ed_trigger_timeout = jiffies + 20 * HZ;
1004 } else if (!dev->ed_monitor_learning &&
1005 time_is_after_jiffies(dev->ed_trigger_timeout)) {
1006 dev->ed_monitor_learning = true;
1007 mt76x02_edcca_tx_enable(dev, true);
1008 }
1009
1010 if (dev->ed_monitor_learning)
1011 return;
1012
1013 if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
956 mt76x02_edcca_tx_enable(dev, false); 1014 mt76x02_edcca_tx_enable(dev, false);
957 else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && 1015 else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
958 dev->ed_tx_blocked)
959 mt76x02_edcca_tx_enable(dev, true); 1016 mt76x02_edcca_tx_enable(dev, true);
960} 1017}
961 1018
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 6b1f25d2f64c..caeeef96c42f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
177 u8 key_idx, struct ieee80211_key_conf *key); 177 u8 key_idx, struct ieee80211_key_conf *key);
178int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, 178int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
179 struct ieee80211_key_conf *key); 179 struct ieee80211_key_conf *key);
180void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
181 struct ieee80211_key_conf *key);
180void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, 182void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
181 u8 *mac); 183 u8 *mac);
182void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop); 184void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 1229f19f2b02..daaed1220147 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -19,6 +19,7 @@
19#include <linux/irq.h> 19#include <linux/irq.h>
20 20
21#include "mt76x02.h" 21#include "mt76x02.h"
22#include "mt76x02_mcu.h"
22#include "mt76x02_trace.h" 23#include "mt76x02_trace.h"
23 24
24struct beacon_bc_data { 25struct beacon_bc_data {
@@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
418 return i < 4; 419 return i < 4;
419} 420}
420 421
422static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
423 struct ieee80211_sta *sta,
424 struct ieee80211_key_conf *key, void *data)
425{
426 struct mt76x02_dev *dev = hw->priv;
427 struct mt76_wcid *wcid;
428
429 if (!sta)
430 return;
431
432 wcid = (struct mt76_wcid *) sta->drv_priv;
433
434 if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
435 return;
436
437 mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
438}
439
440static void mt76x02_reset_state(struct mt76x02_dev *dev)
441{
442 int i;
443
444 lockdep_assert_held(&dev->mt76.mutex);
445
446 clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
447
448 rcu_read_lock();
449 ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
450 rcu_read_unlock();
451
452 for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
453 struct ieee80211_sta *sta;
454 struct ieee80211_vif *vif;
455 struct mt76x02_sta *msta;
456 struct mt76_wcid *wcid;
457 void *priv;
458
459 wcid = rcu_dereference_protected(dev->mt76.wcid[i],
460 lockdep_is_held(&dev->mt76.mutex));
461 if (!wcid)
462 continue;
463
464 priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
465 sta = container_of(priv, struct ieee80211_sta, drv_priv);
466
467 priv = msta->vif;
468 vif = container_of(priv, struct ieee80211_vif, drv_priv);
469
470 __mt76_sta_remove(&dev->mt76, vif, sta);
471 memset(msta, 0, sizeof(*msta));
472 }
473
474 dev->vif_mask = 0;
475 dev->beacon_mask = 0;
476}
477
421static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) 478static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
422{ 479{
423 u32 mask = dev->mt76.mmio.irqmask; 480 u32 mask = dev->mt76.mmio.irqmask;
481 bool restart = dev->mt76.mcu_ops->mcu_restart;
424 int i; 482 int i;
425 483
426 ieee80211_stop_queues(dev->mt76.hw); 484 ieee80211_stop_queues(dev->mt76.hw);
@@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
434 492
435 mutex_lock(&dev->mt76.mutex); 493 mutex_lock(&dev->mt76.mutex);
436 494
495 if (restart)
496 mt76x02_reset_state(dev);
497
437 if (dev->beacon_mask) 498 if (dev->beacon_mask)
438 mt76_clear(dev, MT_BEACON_TIME_CFG, 499 mt76_clear(dev, MT_BEACON_TIME_CFG,
439 MT_BEACON_TIME_CFG_BEACON_TX | 500 MT_BEACON_TIME_CFG_BEACON_TX |
@@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
452 /* let fw reset DMA */ 513 /* let fw reset DMA */
453 mt76_set(dev, 0x734, 0x3); 514 mt76_set(dev, 0x734, 0x3);
454 515
516 if (restart)
517 dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
518
455 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) 519 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
456 mt76_queue_tx_cleanup(dev, i, true); 520 mt76_queue_tx_cleanup(dev, i, true);
457 521
458 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) 522 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
459 mt76_queue_rx_reset(dev, i); 523 mt76_queue_rx_reset(dev, i);
460 524
461 mt76_wr(dev, MT_MAC_SYS_CTRL, 525 mt76x02_mac_start(dev);
462 MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); 526
463 mt76_set(dev, MT_WPDMA_GLO_CFG,
464 MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
465 if (dev->ed_monitor) 527 if (dev->ed_monitor)
466 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 528 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
467 529
468 if (dev->beacon_mask) 530 if (dev->beacon_mask && !restart)
469 mt76_set(dev, MT_BEACON_TIME_CFG, 531 mt76_set(dev, MT_BEACON_TIME_CFG,
470 MT_BEACON_TIME_CFG_BEACON_TX | 532 MT_BEACON_TIME_CFG_BEACON_TX |
471 MT_BEACON_TIME_CFG_TBTT_EN); 533 MT_BEACON_TIME_CFG_TBTT_EN);
@@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
486 napi_schedule(&dev->mt76.napi[i]); 548 napi_schedule(&dev->mt76.napi[i]);
487 } 549 }
488 550
489 ieee80211_wake_queues(dev->mt76.hw); 551 if (restart) {
490 552 mt76x02_mcu_function_select(dev, Q_SELECT, 1);
491 mt76_txq_schedule_all(&dev->mt76); 553 ieee80211_restart_hw(dev->mt76.hw);
554 } else {
555 ieee80211_wake_queues(dev->mt76.hw);
556 mt76_txq_schedule_all(&dev->mt76);
557 }
492} 558}
493 559
494static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) 560static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index a020c757ba5c..a54b63a96eae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
194 ret = true; 194 ret = true;
195 } 195 }
196 196
197 dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
198
197 return ret; 199 return ret;
198} 200}
199EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain); 201EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 43f07461c8d3..6fb52b596d42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
85 85
86 mt76x02_insert_hdr_pad(skb); 86 mt76x02_insert_hdr_pad(skb);
87 87
88 txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); 88 txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
89 mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); 89 mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
90 skb_push(skb, sizeof(struct mt76x02_txwi));
90 91
91 pid = mt76_tx_status_skb_add(mdev, wcid, skb); 92 pid = mt76_tx_status_skb_add(mdev, wcid, skb);
92 txwi->pktid = pid; 93 txwi->pktid = pid;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index a48c261b0c63..cd072ac614f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
237 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 237 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
238 int idx = 0; 238 int idx = 0;
239 239
240 memset(msta, 0, sizeof(*msta));
241
240 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); 242 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
241 if (idx < 0) 243 if (idx < 0)
242 return -ENOSPC; 244 return -ENOSPC;
@@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
274 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 276 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
275 struct mt76_txq *mtxq; 277 struct mt76_txq *mtxq;
276 278
279 memset(mvif, 0, sizeof(*mvif));
280
277 mvif->idx = idx; 281 mvif->idx = idx;
278 mvif->group_wcid.idx = MT_VIF_WCID(idx); 282 mvif->group_wcid.idx = MT_VIF_WCID(idx);
279 mvif->group_wcid.hw_key_idx = -1; 283 mvif->group_wcid.hw_key_idx = -1;
@@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
289 struct mt76x02_dev *dev = hw->priv; 293 struct mt76x02_dev *dev = hw->priv;
290 unsigned int idx = 0; 294 unsigned int idx = 0;
291 295
296 /* Allow to change address in HW if we create first interface. */
297 if (!dev->vif_mask &&
298 (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
299 memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
300 mt76x02_mac_setaddr(dev, vif->addr);
301
292 if (vif->addr[0] & BIT(1)) 302 if (vif->addr[0] & BIT(1))
293 idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); 303 idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
294 304
@@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
311 if (dev->vif_mask & BIT(idx)) 321 if (dev->vif_mask & BIT(idx))
312 return -EBUSY; 322 return -EBUSY;
313 323
314 /* Allow to change address in HW if we create first interface. */
315 if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
316 mt76x02_mac_setaddr(dev, vif->addr);
317
318 dev->vif_mask |= BIT(idx); 324 dev->vif_mask |= BIT(idx);
319 325
320 mt76x02_vif_init(dev, vif, idx); 326 mt76x02_vif_init(dev, vif, idx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index f8534362e2c8..a30ef2c5a9db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
106 { MT_TX_SW_CFG1, 0x00010000 }, 106 { MT_TX_SW_CFG1, 0x00010000 },
107 { MT_TX_SW_CFG2, 0x00000000 }, 107 { MT_TX_SW_CFG2, 0x00000000 },
108 { MT_TXOP_CTRL_CFG, 0x0400583f }, 108 { MT_TXOP_CTRL_CFG, 0x0400583f },
109 { MT_TX_RTS_CFG, 0x00100020 }, 109 { MT_TX_RTS_CFG, 0x00ffff20 },
110 { MT_TX_TIMEOUT_CFG, 0x000a2290 }, 110 { MT_TX_TIMEOUT_CFG, 0x000a2290 },
111 { MT_TX_RETRY_CFG, 0x47f01f0f }, 111 { MT_TX_RETRY_CFG, 0x47f01f0f },
112 { MT_EXP_ACK_TIME, 0x002c00dc }, 112 { MT_EXP_ACK_TIME, 0x002c00dc },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index 6c619f1c65c9..d7abe3d73bad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
71 71
72void mt76x2_cleanup(struct mt76x02_dev *dev); 72void mt76x2_cleanup(struct mt76x02_dev *dev);
73 73
74int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
74void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); 75void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
75void mt76x2_init_txpower(struct mt76x02_dev *dev, 76void mt76x2_init_txpower(struct mt76x02_dev *dev,
76 struct ieee80211_supported_band *sband); 77 struct ieee80211_supported_band *sband);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 984d9c4c2e1a..d3927a13e92e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
77 } 77 }
78} 78}
79 79
80static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) 80int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
81{ 81{
82 const u8 *macaddr = dev->mt76.macaddr; 82 const u8 *macaddr = dev->mt76.macaddr;
83 u32 val; 83 u32 val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
index 03e24ae7f66c..605dc66ae83b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -165,9 +165,30 @@ error:
165 return -ENOENT; 165 return -ENOENT;
166} 166}
167 167
168static int
169mt76pci_mcu_restart(struct mt76_dev *mdev)
170{
171 struct mt76x02_dev *dev;
172 int ret;
173
174 dev = container_of(mdev, struct mt76x02_dev, mt76);
175
176 mt76x02_mcu_cleanup(dev);
177 mt76x2_mac_reset(dev, true);
178
179 ret = mt76pci_load_firmware(dev);
180 if (ret)
181 return ret;
182
183 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
184
185 return 0;
186}
187
168int mt76x2_mcu_init(struct mt76x02_dev *dev) 188int mt76x2_mcu_init(struct mt76x02_dev *dev)
169{ 189{
170 static const struct mt76_mcu_ops mt76x2_mcu_ops = { 190 static const struct mt76_mcu_ops mt76x2_mcu_ops = {
191 .mcu_restart = mt76pci_mcu_restart,
171 .mcu_send_msg = mt76x02_mcu_msg_send, 192 .mcu_send_msg = mt76x02_mcu_msg_send,
172 }; 193 };
173 int ret; 194 int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index 1848e8ab2e21..769a9b972044 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
260 gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; 260 gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
261 gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; 261 gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
262 262
263 if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) 263 val = 0x1836 << 16;
264 if (!mt76x2_has_ext_lna(dev) &&
265 dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
264 val = 0x1e42 << 16; 266 val = 0x1e42 << 16;
265 else 267
266 val = 0x1836 << 16; 268 if (mt76x2_has_ext_lna(dev) &&
269 dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
270 dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
271 val = 0x0f36 << 16;
267 272
268 val |= 0xf8; 273 val |= 0xf8;
269 274
@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
280{ 285{
281 u8 *gain = dev->cal.agc_gain_init; 286 u8 *gain = dev->cal.agc_gain_init;
282 u8 low_gain_delta, gain_delta; 287 u8 low_gain_delta, gain_delta;
288 u32 agc_35, agc_37;
283 bool gain_change; 289 bool gain_change;
284 int low_gain; 290 int low_gain;
285 u32 val; 291 u32 val;
@@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
318 else 324 else
319 low_gain_delta = 14; 325 low_gain_delta = 14;
320 326
327 agc_37 = 0x2121262c;
328 if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
329 agc_35 = 0x11111516;
330 else if (low_gain == 2)
331 agc_35 = agc_37 = 0x08080808;
332 else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
333 agc_35 = 0x10101014;
334 else
335 agc_35 = 0x11111116;
336
321 if (low_gain == 2) { 337 if (low_gain == 2) {
322 mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); 338 mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
323 mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); 339 mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
@@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
326 dev->cal.agc_gain_adjust = 0; 342 dev->cal.agc_gain_adjust = 0;
327 } else { 343 } else {
328 mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); 344 mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
329 if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
330 mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
331 else
332 mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
333 mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
334 gain_delta = 0; 345 gain_delta = 0;
335 dev->cal.agc_gain_adjust = low_gain_delta; 346 dev->cal.agc_gain_adjust = low_gain_delta;
336 } 347 }
337 348
349 mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
350 mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
351
338 dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; 352 dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
339 dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; 353 dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
340 mt76x2_phy_set_gain_val(dev); 354 mt76x2_phy_set_gain_val(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index ddb6b2c48e01..ac0f13d46299 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -21,11 +21,10 @@
21#include "mt76x2u.h" 21#include "mt76x2u.h"
22 22
23static const struct usb_device_id mt76x2u_device_table[] = { 23static const struct usb_device_id mt76x2u_device_table[] = {
24 { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
25 { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */ 24 { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
26 { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */ 25 { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
27 { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */ 26 { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
28 { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */ 27 { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
29 { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ 28 { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
30 { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ 29 { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
31 { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ 30 { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
@@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
66 65
67 mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); 66 mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
68 dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); 67 dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
68 if (!is_mt76x2(dev)) {
69 err = -ENODEV;
70 goto err;
71 }
69 72
70 err = mt76x2u_register_device(dev); 73 err = mt76x2u_register_device(dev);
71 if (err < 0) 74 if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index 5e84b4535cb1..3b82345756ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
93 mt76_wr(dev, MT_TX_LINK_CFG, 0x1020); 93 mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
94 mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13); 94 mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
95 mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00); 95 mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
96 mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
97 96
98 mt76_wr(dev, MT_WMM_AIFSN, 0x2273); 97 mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
99 mt76_wr(dev, MT_WMM_CWMIN, 0x2344); 98 mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 5a349fe3e576..2585df512335 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
289 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); 289 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
290 dev->queue_ops->kick(dev, q); 290 dev->queue_ops->kick(dev, q);
291 291
292 if (q->queued > q->ndesc - 8) 292 if (q->queued > q->ndesc - 8 && !q->stopped) {
293 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); 293 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
294 q->stopped = true;
295 }
296
294 spin_unlock_bh(&q->lock); 297 spin_unlock_bh(&q->lock);
295} 298}
296EXPORT_SYMBOL_GPL(mt76_tx); 299EXPORT_SYMBOL_GPL(mt76_tx);
@@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
374 if (last_skb) { 377 if (last_skb) {
375 mt76_queue_ps_skb(dev, sta, last_skb, true); 378 mt76_queue_ps_skb(dev, sta, last_skb, true);
376 dev->queue_ops->kick(dev, hwq); 379 dev->queue_ops->kick(dev, hwq);
380 } else {
381 ieee80211_sta_eosp(sta);
377 } 382 }
383
378 spin_unlock_bh(&hwq->lock); 384 spin_unlock_bh(&hwq->lock);
379} 385}
380EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 386EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
@@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
577 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 583 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
578 struct mt76_queue *hwq = mtxq->hwq; 584 struct mt76_queue *hwq = mtxq->hwq;
579 585
586 if (!test_bit(MT76_STATE_RUNNING, &dev->state))
587 return;
588
580 spin_lock_bh(&hwq->lock); 589 spin_lock_bh(&hwq->lock);
581 if (list_empty(&mtxq->list)) 590 if (list_empty(&mtxq->list))
582 list_add_tail(&mtxq->list, &hwq->swq); 591 list_add_tail(&mtxq->list, &hwq->swq);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index ae6ada370597..4c1abd492405 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
655 spin_lock_bh(&q->lock); 655 spin_lock_bh(&q->lock);
656 } 656 }
657 mt76_txq_schedule(dev, q); 657 mt76_txq_schedule(dev, q);
658 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; 658
659 wake = q->stopped && q->queued < q->ndesc - 8;
660 if (wake)
661 q->stopped = false;
662
659 if (!q->queued) 663 if (!q->queued)
660 wake_up(&dev->tx_wait); 664 wake_up(&dev->tx_wait);
661 665
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index d8b7863f7926..6ae7f14dc9bf 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
303 mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); 303 mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
304 dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", 304 dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
305 asic_rev, mac_rev); 305 asic_rev, mac_rev);
306 if ((asic_rev >> 16) != 0x7601) {
307 ret = -ENODEV;
308 goto err;
309 }
306 310
307 /* Note: vendor driver skips this check for MT7601U */ 311 /* Note: vendor driver skips this check for MT7601U */
308 if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) 312 if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2839bb70badf..f0716f6ce41f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
404static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, 404static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
405 struct nvme_ns *ns) 405 struct nvme_ns *ns)
406{ 406{
407 enum nvme_ana_state old;
408
409 mutex_lock(&ns->head->lock); 407 mutex_lock(&ns->head->lock);
410 old = ns->ana_state;
411 ns->ana_grpid = le32_to_cpu(desc->grpid); 408 ns->ana_grpid = le32_to_cpu(desc->grpid);
412 ns->ana_state = desc->state; 409 ns->ana_state = desc->state;
413 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); 410 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
414 411
415 if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old)) 412 if (nvme_state_is_live(ns->ana_state))
416 nvme_mpath_set_live(ns); 413 nvme_mpath_set_live(ns);
417 mutex_unlock(&ns->head->lock); 414 mutex_unlock(&ns->head->lock);
418} 415}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e7e08889865e..68c49dd67210 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
627 return ret; 627 return ret;
628} 628}
629 629
630static inline void nvme_tcp_end_request(struct request *rq, __le16 status) 630static inline void nvme_tcp_end_request(struct request *rq, u16 status)
631{ 631{
632 union nvme_result res = {}; 632 union nvme_result res = {};
633 633
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 2d73b66e3686..b3e765a95af8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
509 509
510 ret = nvmet_p2pmem_ns_enable(ns); 510 ret = nvmet_p2pmem_ns_enable(ns);
511 if (ret) 511 if (ret)
512 goto out_unlock; 512 goto out_dev_disable;
513 513
514 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 514 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
515 nvmet_p2pmem_ns_add_p2p(ctrl, ns); 515 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
550out_dev_put: 550out_dev_put:
551 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 551 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
552 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); 552 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
553 553out_dev_disable:
554 nvmet_ns_dev_disable(ns); 554 nvmet_ns_dev_disable(ns);
555 goto out_unlock; 555 goto out_unlock;
556} 556}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 3e43212d3c1c..bc6ebb51b0bf 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -75,11 +75,11 @@ err:
75 return ret; 75 return ret;
76} 76}
77 77
78static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) 78static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
79{ 79{
80 bv->bv_page = sg_page_iter_page(iter); 80 bv->bv_page = sg_page(sg);
81 bv->bv_offset = iter->sg->offset; 81 bv->bv_offset = sg->offset;
82 bv->bv_len = PAGE_SIZE - iter->sg->offset; 82 bv->bv_len = sg->length;
83} 83}
84 84
85static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, 85static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
128 128
129static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) 129static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
130{ 130{
131 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 131 ssize_t nr_bvec = req->sg_cnt;
132 struct sg_page_iter sg_pg_iter;
133 unsigned long bv_cnt = 0; 132 unsigned long bv_cnt = 0;
134 bool is_sync = false; 133 bool is_sync = false;
135 size_t len = 0, total_len = 0; 134 size_t len = 0, total_len = 0;
136 ssize_t ret = 0; 135 ssize_t ret = 0;
137 loff_t pos; 136 loff_t pos;
138 137 int i;
138 struct scatterlist *sg;
139 139
140 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) 140 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
141 is_sync = true; 141 is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
147 } 147 }
148 148
149 memset(&req->f.iocb, 0, sizeof(struct kiocb)); 149 memset(&req->f.iocb, 0, sizeof(struct kiocb));
150 for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { 150 for_each_sg(req->sg, sg, req->sg_cnt, i) {
151 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); 151 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
152 len += req->f.bvec[bv_cnt].bv_len; 152 len += req->f.bvec[bv_cnt].bv_len;
153 total_len += req->f.bvec[bv_cnt].bv_len; 153 total_len += req->f.bvec[bv_cnt].bv_len;
154 bv_cnt++; 154 bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
225 225
226static void nvmet_file_execute_rw(struct nvmet_req *req) 226static void nvmet_file_execute_rw(struct nvmet_req *req)
227{ 227{
228 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 228 ssize_t nr_bvec = req->sg_cnt;
229 229
230 if (!req->sg_cnt || !nr_bvec) { 230 if (!req->sg_cnt || !nr_bvec) {
231 nvmet_req_complete(req, 0); 231 nvmet_req_complete(req, 0);
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 56dd83a45e55..5484a46dafda 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port)
213struct pardevice *parport_open(int devnum, const char *name) 213struct pardevice *parport_open(int devnum, const char *name)
214{ 214{
215 struct daisydev *p = topology; 215 struct daisydev *p = topology;
216 struct pardev_cb par_cb;
217 struct parport *port; 216 struct parport *port;
218 struct pardevice *dev; 217 struct pardevice *dev;
219 int daisy; 218 int daisy;
220 219
221 memset(&par_cb, 0, sizeof(par_cb));
222 spin_lock(&topology_lock); 220 spin_lock(&topology_lock);
223 while (p && p->devnum != devnum) 221 while (p && p->devnum != devnum)
224 p = p->next; 222 p = p->next;
@@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name)
232 port = parport_get_port(p->port); 230 port = parport_get_port(p->port);
233 spin_unlock(&topology_lock); 231 spin_unlock(&topology_lock);
234 232
235 dev = parport_register_dev_model(port, name, &par_cb, devnum); 233 dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
236 parport_put_port(port); 234 parport_put_port(port);
237 if (!dev) 235 if (!dev)
238 return NULL; 236 return NULL;
@@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port)
482 kfree(deviceid); 480 kfree(deviceid);
483 return detected; 481 return detected;
484} 482}
485
486static int daisy_drv_probe(struct pardevice *par_dev)
487{
488 struct device_driver *drv = par_dev->dev.driver;
489
490 if (strcmp(drv->name, "daisy_drv"))
491 return -ENODEV;
492 if (strcmp(par_dev->name, daisy_dev_name))
493 return -ENODEV;
494
495 return 0;
496}
497
498static struct parport_driver daisy_driver = {
499 .name = "daisy_drv",
500 .probe = daisy_drv_probe,
501 .devmodel = true,
502};
503
504int daisy_drv_init(void)
505{
506 return parport_register_driver(&daisy_driver);
507}
508
509void daisy_drv_exit(void)
510{
511 parport_unregister_driver(&daisy_driver);
512}
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e5e6a463a941..e035174ba205 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
257ssize_t parport_device_id (int devnum, char *buffer, size_t count) 257ssize_t parport_device_id (int devnum, char *buffer, size_t count)
258{ 258{
259 ssize_t retval = -ENXIO; 259 ssize_t retval = -ENXIO;
260 struct pardevice *dev = parport_open(devnum, daisy_dev_name); 260 struct pardevice *dev = parport_open (devnum, "Device ID probe");
261 if (!dev) 261 if (!dev)
262 return -ENXIO; 262 return -ENXIO;
263 263
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 0171b8dbcdcd..5dc53d420ca8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = {
137 137
138int parport_bus_init(void) 138int parport_bus_init(void)
139{ 139{
140 int retval; 140 return bus_register(&parport_bus_type);
141
142 retval = bus_register(&parport_bus_type);
143 if (retval)
144 return retval;
145 daisy_drv_init();
146
147 return 0;
148} 141}
149 142
150void parport_bus_exit(void) 143void parport_bus_exit(void)
151{ 144{
152 daisy_drv_exit();
153 bus_unregister(&parport_bus_type); 145 bus_unregister(&parport_bus_type);
154} 146}
155 147
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 224d88634115..d994839a3e24 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
273u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, 273u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
274 enum pcie_link_width *width); 274 enum pcie_link_width *width);
275void __pcie_print_link_status(struct pci_dev *dev, bool verbose); 275void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
276void pcie_report_downtraining(struct pci_dev *dev);
276 277
277/* Single Root I/O Virtualization */ 278/* Single Root I/O Virtualization */
278struct pci_sriov { 279struct pci_sriov {
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index d2eae3b7cc0f..4fa9e3523ee1 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
30{ 30{
31 u16 lnk_ctl; 31 u16 lnk_ctl;
32 32
33 pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
34
33 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); 35 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
34 lnk_ctl |= PCI_EXP_LNKCTL_LBMIE; 36 lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
35 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); 37 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
44 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); 46 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
45} 47}
46 48
47static irqreturn_t pcie_bw_notification_handler(int irq, void *context) 49static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
48{ 50{
49 struct pcie_device *srv = context; 51 struct pcie_device *srv = context;
50 struct pci_dev *port = srv->port; 52 struct pci_dev *port = srv->port;
51 struct pci_dev *dev;
52 u16 link_status, events; 53 u16 link_status, events;
53 int ret; 54 int ret;
54 55
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
58 if (ret != PCIBIOS_SUCCESSFUL || !events) 59 if (ret != PCIBIOS_SUCCESSFUL || !events)
59 return IRQ_NONE; 60 return IRQ_NONE;
60 61
62 pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
63 pcie_update_link_speed(port->subordinate, link_status);
64 return IRQ_WAKE_THREAD;
65}
66
67static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
68{
69 struct pcie_device *srv = context;
70 struct pci_dev *port = srv->port;
71 struct pci_dev *dev;
72
61 /* 73 /*
62 * Print status from downstream devices, not this root port or 74 * Print status from downstream devices, not this root port or
63 * downstream switch port. 75 * downstream switch port.
64 */ 76 */
65 down_read(&pci_bus_sem); 77 down_read(&pci_bus_sem);
66 list_for_each_entry(dev, &port->subordinate->devices, bus_list) 78 list_for_each_entry(dev, &port->subordinate->devices, bus_list)
67 __pcie_print_link_status(dev, false); 79 pcie_report_downtraining(dev);
68 up_read(&pci_bus_sem); 80 up_read(&pci_bus_sem);
69 81
70 pcie_update_link_speed(port->subordinate, link_status);
71 pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
72 return IRQ_HANDLED; 82 return IRQ_HANDLED;
73} 83}
74 84
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
80 if (!pcie_link_bandwidth_notification_supported(srv->port)) 90 if (!pcie_link_bandwidth_notification_supported(srv->port))
81 return -ENODEV; 91 return -ENODEV;
82 92
83 ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler, 93 ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
94 pcie_bw_notification_handler,
84 IRQF_SHARED, "PCIe BW notif", srv); 95 IRQF_SHARED, "PCIe BW notif", srv);
85 if (ret) 96 if (ret)
86 return ret; 97 return ret;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2ec0df04e0dc..7e12d0163863 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2388 return dev; 2388 return dev;
2389} 2389}
2390 2390
2391static void pcie_report_downtraining(struct pci_dev *dev) 2391void pcie_report_downtraining(struct pci_dev *dev)
2392{ 2392{
2393 if (!pci_is_pcie(dev)) 2393 if (!pci_is_pcie(dev))
2394 return; 2394 return;
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 5163097b43df..4bbd9ede38c8 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
485 struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); 485 struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
486 int new_mode; 486 int new_mode;
487 487
488 if (phy->index != 0) 488 if (phy->index != 0) {
489 if (mode == PHY_MODE_USB_HOST)
490 return 0;
489 return -EINVAL; 491 return -EINVAL;
492 }
490 493
491 switch (mode) { 494 switch (mode) {
492 case PHY_MODE_USB_HOST: 495 case PHY_MODE_USB_HOST:
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 900c7073c46f..71308766e891 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -440,7 +440,7 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
440 440
441 ret = cros_ec_create_pdinfo(debug_info); 441 ret = cros_ec_create_pdinfo(debug_info);
442 if (ret) 442 if (ret)
443 goto remove_debugfs; 443 goto remove_log;
444 444
445 ec->debug_info = debug_info; 445 ec->debug_info = debug_info;
446 446
@@ -448,6 +448,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
448 448
449 return 0; 449 return 0;
450 450
451remove_log:
452 cros_ec_cleanup_console_log(debug_info);
451remove_debugfs: 453remove_debugfs:
452 debugfs_remove_recursive(debug_info->dir); 454 debugfs_remove_recursive(debug_info->dir);
453 return ret; 455 return ret;
@@ -467,7 +469,8 @@ static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev)
467{ 469{
468 struct cros_ec_dev *ec = dev_get_drvdata(dev); 470 struct cros_ec_dev *ec = dev_get_drvdata(dev);
469 471
470 cancel_delayed_work_sync(&ec->debug_info->log_poll_work); 472 if (ec->debug_info->log_buffer.buf)
473 cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
471 474
472 return 0; 475 return 0;
473} 476}
@@ -476,7 +479,8 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
476{ 479{
477 struct cros_ec_dev *ec = dev_get_drvdata(dev); 480 struct cros_ec_dev *ec = dev_get_drvdata(dev);
478 481
479 schedule_delayed_work(&ec->debug_info->log_poll_work, 0); 482 if (ec->debug_info->log_buffer.buf)
483 schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
480 484
481 return 0; 485 return 0;
482} 486}
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
index f6ff29a11f1a..14355668ddfa 100644
--- a/drivers/platform/chrome/wilco_ec/mailbox.c
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -223,11 +223,11 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
223 msg->command, msg->type, msg->flags, msg->response_size, 223 msg->command, msg->type, msg->flags, msg->response_size,
224 msg->request_size); 224 msg->request_size);
225 225
226 mutex_lock(&ec->mailbox_lock);
226 /* Prepare request packet */ 227 /* Prepare request packet */
227 rq = ec->data_buffer; 228 rq = ec->data_buffer;
228 wilco_ec_prepare(msg, rq); 229 wilco_ec_prepare(msg, rq);
229 230
230 mutex_lock(&ec->mailbox_lock);
231 ret = wilco_ec_transfer(ec, msg, rq); 231 ret = wilco_ec_transfer(ec, msg, rq);
232 mutex_unlock(&ec->mailbox_lock); 232 mutex_unlock(&ec->mailbox_lock);
233 233
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4159c63a5fd2..a835b31aad99 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,6 +24,7 @@
24#include <asm/crw.h> 24#include <asm/crw.h>
25#include <asm/isc.h> 25#include <asm/isc.h>
26#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
27#include <asm/ap.h>
27 28
28#include "css.h" 29#include "css.h"
29#include "cio.h" 30#include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
586 " failed (rc=%d).\n", ret); 587 " failed (rc=%d).\n", ret);
587} 588}
588 589
590static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
591{
592 CIO_CRW_EVENT(3, "chsc: ap config changed\n");
593 if (sei_area->rs != 5)
594 return;
595
596 ap_bus_cfg_chg();
597}
598
589static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 599static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
590{ 600{
591 switch (sei_area->cc) { 601 switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
612 case 2: /* i/o resource accessibility */ 622 case 2: /* i/o resource accessibility */
613 chsc_process_sei_res_acc(sei_area); 623 chsc_process_sei_res_acc(sei_area);
614 break; 624 break;
625 case 3: /* ap config changed */
626 chsc_process_sei_ap_cfg_chg(sei_area);
627 break;
615 case 7: /* channel-path-availability information */ 628 case 7: /* channel-path-availability information */
616 chsc_process_sei_chp_avail(sei_area); 629 chsc_process_sei_chp_avail(sei_area);
617 break; 630 break;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..0b3b9de45c60 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
72{ 72{
73 struct vfio_ccw_private *private; 73 struct vfio_ccw_private *private;
74 struct irb *irb; 74 struct irb *irb;
75 bool is_final;
75 76
76 private = container_of(work, struct vfio_ccw_private, io_work); 77 private = container_of(work, struct vfio_ccw_private, io_work);
77 irb = &private->irb; 78 irb = &private->irb;
78 79
80 is_final = !(scsw_actl(&irb->scsw) &
81 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
79 if (scsw_is_solicited(&irb->scsw)) { 82 if (scsw_is_solicited(&irb->scsw)) {
80 cp_update_scsw(&private->cp, &irb->scsw); 83 cp_update_scsw(&private->cp, &irb->scsw);
81 cp_free(&private->cp); 84 if (is_final)
85 cp_free(&private->cp);
82 } 86 }
83 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 87 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
84 88
85 if (private->io_trigger) 89 if (private->io_trigger)
86 eventfd_signal(private->io_trigger, 1); 90 eventfd_signal(private->io_trigger, 1);
87 91
88 if (private->mdev) 92 if (private->mdev && is_final)
89 private->state = VFIO_CCW_STATE_IDLE; 93 private->state = VFIO_CCW_STATE_IDLE;
90} 94}
91 95
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e15816ff1265..1546389d71db 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
810 struct ap_device *ap_dev = to_ap_dev(dev); 810 struct ap_device *ap_dev = to_ap_dev(dev);
811 struct ap_driver *ap_drv = ap_dev->drv; 811 struct ap_driver *ap_drv = ap_dev->drv;
812 812
813 /* prepare ap queue device removal */
813 if (is_queue_dev(dev)) 814 if (is_queue_dev(dev))
814 ap_queue_remove(to_ap_queue(dev)); 815 ap_queue_prepare_remove(to_ap_queue(dev));
816
817 /* driver's chance to clean up gracefully */
815 if (ap_drv->remove) 818 if (ap_drv->remove)
816 ap_drv->remove(ap_dev); 819 ap_drv->remove(ap_dev);
817 820
821 /* now do the ap queue device remove */
822 if (is_queue_dev(dev))
823 ap_queue_remove(to_ap_queue(dev));
824
818 /* Remove queue/card from list of active queues/cards */ 825 /* Remove queue/card from list of active queues/cards */
819 spin_lock_bh(&ap_list_lock); 826 spin_lock_bh(&ap_list_lock);
820 if (is_card_dev(dev)) 827 if (is_card_dev(dev))
@@ -861,6 +868,16 @@ void ap_bus_force_rescan(void)
861EXPORT_SYMBOL(ap_bus_force_rescan); 868EXPORT_SYMBOL(ap_bus_force_rescan);
862 869
863/* 870/*
871* A config change has happened, force an ap bus rescan.
872*/
873void ap_bus_cfg_chg(void)
874{
875 AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
876
877 ap_bus_force_rescan();
878}
879
880/*
864 * hex2bitmap() - parse hex mask string and set bitmap. 881 * hex2bitmap() - parse hex mask string and set bitmap.
865 * Valid strings are "0x012345678" with at least one valid hex number. 882 * Valid strings are "0x012345678" with at least one valid hex number.
866 * Rest of the bitmap to the right is padded with 0. No spaces allowed 883 * Rest of the bitmap to the right is padded with 0. No spaces allowed
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d0059eae5d94..15a98a673c5c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,6 +91,7 @@ enum ap_state {
91 AP_STATE_WORKING, 91 AP_STATE_WORKING,
92 AP_STATE_QUEUE_FULL, 92 AP_STATE_QUEUE_FULL,
93 AP_STATE_SUSPEND_WAIT, 93 AP_STATE_SUSPEND_WAIT,
94 AP_STATE_REMOVE, /* about to be removed from driver */
94 AP_STATE_UNBOUND, /* momentary not bound to a driver */ 95 AP_STATE_UNBOUND, /* momentary not bound to a driver */
95 AP_STATE_BORKED, /* broken */ 96 AP_STATE_BORKED, /* broken */
96 NR_AP_STATES 97 NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
252 253
253void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 254void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
254struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 255struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
256void ap_queue_prepare_remove(struct ap_queue *aq);
255void ap_queue_remove(struct ap_queue *aq); 257void ap_queue_remove(struct ap_queue *aq);
256void ap_queue_suspend(struct ap_device *ap_dev); 258void ap_queue_suspend(struct ap_device *ap_dev);
257void ap_queue_resume(struct ap_device *ap_dev); 259void ap_queue_resume(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba261210c6da..6a340f2c3556 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
420 [AP_EVENT_POLL] = ap_sm_suspend_read, 420 [AP_EVENT_POLL] = ap_sm_suspend_read,
421 [AP_EVENT_TIMEOUT] = ap_sm_nop, 421 [AP_EVENT_TIMEOUT] = ap_sm_nop,
422 }, 422 },
423 [AP_STATE_REMOVE] = {
424 [AP_EVENT_POLL] = ap_sm_nop,
425 [AP_EVENT_TIMEOUT] = ap_sm_nop,
426 },
423 [AP_STATE_UNBOUND] = { 427 [AP_STATE_UNBOUND] = {
424 [AP_EVENT_POLL] = ap_sm_nop, 428 [AP_EVENT_POLL] = ap_sm_nop,
425 [AP_EVENT_TIMEOUT] = ap_sm_nop, 429 [AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
740} 744}
741EXPORT_SYMBOL(ap_flush_queue); 745EXPORT_SYMBOL(ap_flush_queue);
742 746
743void ap_queue_remove(struct ap_queue *aq) 747void ap_queue_prepare_remove(struct ap_queue *aq)
744{ 748{
745 ap_flush_queue(aq); 749 spin_lock_bh(&aq->lock);
750 /* flush queue */
751 __ap_flush_queue(aq);
752 /* set REMOVE state to prevent new messages are queued in */
753 aq->state = AP_STATE_REMOVE;
746 del_timer_sync(&aq->timeout); 754 del_timer_sync(&aq->timeout);
755 spin_unlock_bh(&aq->lock);
756}
747 757
748 /* reset with zero, also clears irq registration */ 758void ap_queue_remove(struct ap_queue *aq)
759{
760 /*
761 * all messages have been flushed and the state is
762 * AP_STATE_REMOVE. Now reset with zero which also
763 * clears the irq registration and move the state
764 * to AP_STATE_UNBOUND to signal that this queue
765 * is not used by any driver currently.
766 */
749 spin_lock_bh(&aq->lock); 767 spin_lock_bh(&aq->lock);
750 ap_zapq(aq->qid); 768 ap_zapq(aq->qid);
751 aq->state = AP_STATE_UNBOUND; 769 aq->state = AP_STATE_UNBOUND;
752 spin_unlock_bh(&aq->lock); 770 spin_unlock_bh(&aq->lock);
753} 771}
754EXPORT_SYMBOL(ap_queue_remove);
755 772
756void ap_queue_reinit_state(struct ap_queue *aq) 773void ap_queue_reinit_state(struct ap_queue *aq)
757{ 774{
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
760 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 777 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
761 spin_unlock_bh(&aq->lock); 778 spin_unlock_bh(&aq->lock);
762} 779}
763EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..689c2af7026a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
586 586
587static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 587static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
588 struct zcrypt_queue *zq, 588 struct zcrypt_queue *zq,
589 struct module **pmod,
589 unsigned int weight) 590 unsigned int weight)
590{ 591{
591 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 592 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
595 atomic_add(weight, &zc->load); 596 atomic_add(weight, &zc->load);
596 atomic_add(weight, &zq->load); 597 atomic_add(weight, &zq->load);
597 zq->request_count++; 598 zq->request_count++;
599 *pmod = zq->queue->ap_dev.drv->driver.owner;
598 return zq; 600 return zq;
599} 601}
600 602
601static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 603static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
602 struct zcrypt_queue *zq, 604 struct zcrypt_queue *zq,
605 struct module *mod,
603 unsigned int weight) 606 unsigned int weight)
604{ 607{
605 struct module *mod = zq->queue->ap_dev.drv->driver.owner;
606
607 zq->request_count--; 608 zq->request_count--;
608 atomic_sub(weight, &zc->load); 609 atomic_sub(weight, &zc->load);
609 atomic_sub(weight, &zq->load); 610 atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
653 unsigned int weight, pref_weight; 654 unsigned int weight, pref_weight;
654 unsigned int func_code; 655 unsigned int func_code;
655 int qid = 0, rc = -ENODEV; 656 int qid = 0, rc = -ENODEV;
657 struct module *mod;
656 658
657 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 659 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
658 660
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
706 pref_weight = weight; 708 pref_weight = weight;
707 } 709 }
708 } 710 }
709 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 711 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
710 spin_unlock(&zcrypt_list_lock); 712 spin_unlock(&zcrypt_list_lock);
711 713
712 if (!pref_zq) { 714 if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
718 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 720 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
719 721
720 spin_lock(&zcrypt_list_lock); 722 spin_lock(&zcrypt_list_lock);
721 zcrypt_drop_queue(pref_zc, pref_zq, weight); 723 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
722 spin_unlock(&zcrypt_list_lock); 724 spin_unlock(&zcrypt_list_lock);
723 725
724out: 726out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
735 unsigned int weight, pref_weight; 737 unsigned int weight, pref_weight;
736 unsigned int func_code; 738 unsigned int func_code;
737 int qid = 0, rc = -ENODEV; 739 int qid = 0, rc = -ENODEV;
740 struct module *mod;
738 741
739 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 742 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
740 743
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
788 pref_weight = weight; 791 pref_weight = weight;
789 } 792 }
790 } 793 }
791 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 794 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
792 spin_unlock(&zcrypt_list_lock); 795 spin_unlock(&zcrypt_list_lock);
793 796
794 if (!pref_zq) { 797 if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
800 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 803 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
801 804
802 spin_lock(&zcrypt_list_lock); 805 spin_lock(&zcrypt_list_lock);
803 zcrypt_drop_queue(pref_zc, pref_zq, weight); 806 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
804 spin_unlock(&zcrypt_list_lock); 807 spin_unlock(&zcrypt_list_lock);
805 808
806out: 809out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
819 unsigned int func_code; 822 unsigned int func_code;
820 unsigned short *domain; 823 unsigned short *domain;
821 int qid = 0, rc = -ENODEV; 824 int qid = 0, rc = -ENODEV;
825 struct module *mod;
822 826
823 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 827 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
824 828
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
865 pref_weight = weight; 869 pref_weight = weight;
866 } 870 }
867 } 871 }
868 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 872 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
869 spin_unlock(&zcrypt_list_lock); 873 spin_unlock(&zcrypt_list_lock);
870 874
871 if (!pref_zq) { 875 if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
881 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 885 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
882 886
883 spin_lock(&zcrypt_list_lock); 887 spin_lock(&zcrypt_list_lock);
884 zcrypt_drop_queue(pref_zc, pref_zq, weight); 888 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
885 spin_unlock(&zcrypt_list_lock); 889 spin_unlock(&zcrypt_list_lock);
886 890
887out: 891out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
932 unsigned int func_code; 936 unsigned int func_code;
933 struct ap_message ap_msg; 937 struct ap_message ap_msg;
934 int qid = 0, rc = -ENODEV; 938 int qid = 0, rc = -ENODEV;
939 struct module *mod;
935 940
936 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 941 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
937 942
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
1000 pref_weight = weight; 1005 pref_weight = weight;
1001 } 1006 }
1002 } 1007 }
1003 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1008 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
1004 spin_unlock(&zcrypt_list_lock); 1009 spin_unlock(&zcrypt_list_lock);
1005 1010
1006 if (!pref_zq) { 1011 if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
1012 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 1017 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
1013 1018
1014 spin_lock(&zcrypt_list_lock); 1019 spin_lock(&zcrypt_list_lock);
1015 zcrypt_drop_queue(pref_zc, pref_zq, weight); 1020 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
1016 spin_unlock(&zcrypt_list_lock); 1021 spin_unlock(&zcrypt_list_lock);
1017 1022
1018out_free: 1023out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
1033 struct ap_message ap_msg; 1038 struct ap_message ap_msg;
1034 unsigned int domain; 1039 unsigned int domain;
1035 int qid = 0, rc = -ENODEV; 1040 int qid = 0, rc = -ENODEV;
1041 struct module *mod;
1036 1042
1037 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1043 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1038 1044
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
1064 pref_weight = weight; 1070 pref_weight = weight;
1065 } 1071 }
1066 } 1072 }
1067 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1073 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
1068 spin_unlock(&zcrypt_list_lock); 1074 spin_unlock(&zcrypt_list_lock);
1069 1075
1070 if (!pref_zq) { 1076 if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
1076 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1082 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1077 1083
1078 spin_lock(&zcrypt_list_lock); 1084 spin_lock(&zcrypt_list_lock);
1079 zcrypt_drop_queue(pref_zc, pref_zq, weight); 1085 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
1080 spin_unlock(&zcrypt_list_lock); 1086 spin_unlock(&zcrypt_list_lock);
1081 1087
1082out: 1088out:
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 197b0f5b63e7..44bd6f04c145 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1150 1150
1151static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) 1151static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1152{ 1152{
1153 struct sk_buff *skb;
1154
1153 /* release may never happen from within CQ tasklet scope */ 1155 /* release may never happen from within CQ tasklet scope */
1154 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 1156 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1155 1157
1156 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) 1158 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1157 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); 1159 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
1158 1160
1159 __skb_queue_purge(&buf->skb_list); 1161 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
1162 consume_skb(skb);
1160} 1163}
1161 1164
1162static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1165static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8efb2e8ff8f4..c3067fd3bd9e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
629 } /* else fall through */ 629 } /* else fall through */
630 630
631 QETH_TXQ_STAT_INC(queue, tx_dropped); 631 QETH_TXQ_STAT_INC(queue, tx_dropped);
632 QETH_TXQ_STAT_INC(queue, tx_errors); 632 kfree_skb(skb);
633 dev_kfree_skb_any(skb);
634 netif_wake_queue(dev); 633 netif_wake_queue(dev);
635 return NETDEV_TX_OK; 634 return NETDEV_TX_OK;
636} 635}
@@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
645 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 644 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
646 int rc; 645 int rc;
647 646
647 qeth_l2_vnicc_set_defaults(card);
648
648 if (gdev->dev.type == &qeth_generic_devtype) { 649 if (gdev->dev.type == &qeth_generic_devtype) {
649 rc = qeth_l2_create_device_attributes(&gdev->dev); 650 rc = qeth_l2_create_device_attributes(&gdev->dev);
650 if (rc) 651 if (rc)
@@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
652 } 653 }
653 654
654 hash_init(card->mac_htable); 655 hash_init(card->mac_htable);
655 card->info.hwtrap = 0;
656 qeth_l2_vnicc_set_defaults(card);
657 return 0; 656 return 0;
658} 657}
659 658
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7e68d9d16859..53712cf26406 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
2096 2096
2097tx_drop: 2097tx_drop:
2098 QETH_TXQ_STAT_INC(queue, tx_dropped); 2098 QETH_TXQ_STAT_INC(queue, tx_dropped);
2099 QETH_TXQ_STAT_INC(queue, tx_errors); 2099 kfree_skb(skb);
2100 dev_kfree_skb_any(skb);
2101 netif_wake_queue(dev); 2100 netif_wake_queue(dev);
2102 return NETDEV_TX_OK; 2101 return NETDEV_TX_OK;
2103} 2102}
@@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
2253 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2252 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2254 int rc; 2253 int rc;
2255 2254
2255 hash_init(card->ip_htable);
2256
2256 if (gdev->dev.type == &qeth_generic_devtype) { 2257 if (gdev->dev.type == &qeth_generic_devtype) {
2257 rc = qeth_l3_create_device_attributes(&gdev->dev); 2258 rc = qeth_l3_create_device_attributes(&gdev->dev);
2258 if (rc) 2259 if (rc)
2259 return rc; 2260 return rc;
2260 } 2261 }
2261 hash_init(card->ip_htable); 2262
2262 hash_init(card->ip_mc_htable); 2263 hash_init(card->ip_mc_htable);
2263 card->info.hwtrap = 0;
2264 return 0; 2264 return 0;
2265} 2265}
2266 2266
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 744a64680d5b..e8fc28dba8df 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
624 add_timer(&erp_action->timer); 624 add_timer(&erp_action->timer);
625} 625}
626 626
627void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
628 int clear, char *dbftag)
629{
630 unsigned long flags;
631 struct zfcp_port *port;
632
633 write_lock_irqsave(&adapter->erp_lock, flags);
634 read_lock(&adapter->port_list_lock);
635 list_for_each_entry(port, &adapter->port_list, list)
636 _zfcp_erp_port_forced_reopen(port, clear, dbftag);
637 read_unlock(&adapter->port_list_lock);
638 write_unlock_irqrestore(&adapter->erp_lock, flags);
639}
640
627static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, 641static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
628 int clear, char *dbftag) 642 int clear, char *dbftag)
629{ 643{
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
1341 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); 1355 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
1342 int lun_status; 1356 int lun_status;
1343 1357
1358 if (sdev->sdev_state == SDEV_DEL ||
1359 sdev->sdev_state == SDEV_CANCEL)
1360 continue;
1344 if (zsdev->port != port) 1361 if (zsdev->port != port)
1345 continue; 1362 continue;
1346 /* LUN under port of interest */ 1363 /* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3fce47b0b21b..c6acca521ffe 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
70 char *dbftag); 70 char *dbftag);
71extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); 71extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
72extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); 72extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
73extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
74 int clear, char *dbftag);
73extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); 75extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
74extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); 76extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
75extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); 77extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index db00b5e3abbe..33eddb02ee30 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
239 list_for_each_entry(port, &adapter->port_list, list) { 239 list_for_each_entry(port, &adapter->port_list, list) {
240 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) 240 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
241 zfcp_fc_test_link(port); 241 zfcp_fc_test_link(port);
242 if (!port->d_id)
243 zfcp_erp_port_reopen(port,
244 ZFCP_STATUS_COMMON_ERP_FAILED,
245 "fcrscn1");
246 } 242 }
247 read_unlock_irqrestore(&adapter->port_list_lock, flags); 243 read_unlock_irqrestore(&adapter->port_list_lock, flags);
248} 244}
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
250static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) 246static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
251{ 247{
252 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; 248 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
249 struct zfcp_adapter *adapter = fsf_req->adapter;
253 struct fc_els_rscn *head; 250 struct fc_els_rscn *head;
254 struct fc_els_rscn_page *page; 251 struct fc_els_rscn_page *page;
255 u16 i; 252 u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
263 no_entries = be16_to_cpu(head->rscn_plen) / 260 no_entries = be16_to_cpu(head->rscn_plen) /
264 sizeof(struct fc_els_rscn_page); 261 sizeof(struct fc_els_rscn_page);
265 262
263 if (no_entries > 1) {
264 /* handle failed ports */
265 unsigned long flags;
266 struct zfcp_port *port;
267
268 read_lock_irqsave(&adapter->port_list_lock, flags);
269 list_for_each_entry(port, &adapter->port_list, list) {
270 if (port->d_id)
271 continue;
272 zfcp_erp_port_reopen(port,
273 ZFCP_STATUS_COMMON_ERP_FAILED,
274 "fcrscn1");
275 }
276 read_unlock_irqrestore(&adapter->port_list_lock, flags);
277 }
278
266 for (i = 1; i < no_entries; i++) { 279 for (i = 1; i < no_entries; i++) {
267 /* skip head and start with 1st element */ 280 /* skip head and start with 1st element */
268 page++; 281 page++;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f4f6a07c5222..221d0dfb8493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
368 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 368 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
369 int ret = SUCCESS, fc_ret; 369 int ret = SUCCESS, fc_ret;
370 370
371 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
372 zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
373 zfcp_erp_wait(adapter);
374 }
371 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); 375 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
372 zfcp_erp_wait(adapter); 376 zfcp_erp_wait(adapter);
373 fc_ret = fc_block_scsi_eh(scpnt); 377 fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 1df5171594b8..11fb68d7e60d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
2640 return capacity; 2640 return capacity;
2641} 2641}
2642 2642
2643static inline int aac_pci_offline(struct aac_dev *dev)
2644{
2645 return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
2646}
2647
2643static inline int aac_adapter_check_health(struct aac_dev *dev) 2648static inline int aac_adapter_check_health(struct aac_dev *dev)
2644{ 2649{
2645 if (unlikely(pci_channel_offline(dev->pdev))) 2650 if (unlikely(aac_pci_offline(dev)))
2646 return -1; 2651 return -1;
2647 2652
2648 return (dev)->a_ops.adapter_check_health(dev); 2653 return (dev)->a_ops.adapter_check_health(dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e67e032936ef..78430a7b294c 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
672 return -ETIMEDOUT; 672 return -ETIMEDOUT;
673 } 673 }
674 674
675 if (unlikely(pci_channel_offline(dev->pdev))) 675 if (unlikely(aac_pci_offline(dev)))
676 return -EFAULT; 676 return -EFAULT;
677 677
678 if ((blink = aac_adapter_check_health(dev)) > 0) { 678 if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
772 772
773 spin_unlock_irqrestore(&fibptr->event_lock, flags); 773 spin_unlock_irqrestore(&fibptr->event_lock, flags);
774 774
775 if (unlikely(pci_channel_offline(dev->pdev))) 775 if (unlikely(aac_pci_offline(dev)))
776 return -EFAULT; 776 return -EFAULT;
777 777
778 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; 778 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 3c3cf89f713f..14bac4966c87 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1801,6 +1801,12 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1801 } 1801 }
1802 hisi_sas_dereg_device(hisi_hba, device); 1802 hisi_sas_dereg_device(hisi_hba, device);
1803 1803
1804 if (dev_is_sata(device)) {
1805 rc = hisi_sas_softreset_ata_disk(device);
1806 if (rc)
1807 return TMF_RESP_FUNC_FAILED;
1808 }
1809
1804 rc = hisi_sas_debug_I_T_nexus_reset(device); 1810 rc = hisi_sas_debug_I_T_nexus_reset(device);
1805 1811
1806 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1812 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index dbaa4f131433..3ad997ac3510 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -139,6 +139,7 @@ static const struct {
139 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, 139 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
140 140
141 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, 141 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
142 { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
142}; 143};
143 144
144static void ibmvfc_npiv_login(struct ibmvfc_host *); 145static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1494 if (rsp->flags & FCP_RSP_LEN_VALID) 1495 if (rsp->flags & FCP_RSP_LEN_VALID)
1495 rsp_code = rsp->data.info.rsp_code; 1496 rsp_code = rsp->data.info.rsp_code;
1496 1497
1497 scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) " 1498 scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1498 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", 1499 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1499 cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error, 1500 cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1500 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); 1501 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1501} 1502}
1502 1503
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2022 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " 2023 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2023 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, 2024 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2024 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), 2025 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2025 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, 2026 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2026 fc_rsp->scsi_status); 2027 fc_rsp->scsi_status);
2027 rsp_rc = -EIO; 2028 rsp_rc = -EIO;
2028 } else 2029 } else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2381 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " 2382 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2382 "flags: %x fcp_rsp: %x, scsi_status: %x\n", 2383 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2383 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), 2384 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2384 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, 2385 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2385 fc_rsp->scsi_status); 2386 fc_rsp->scsi_status);
2386 rsp_rc = -EIO; 2387 rsp_rc = -EIO;
2387 } else 2388 } else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2755 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 2756 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2756 if (crq->format == IBMVFC_PARTITION_MIGRATED) { 2757 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2757 /* We need to re-setup the interpartition connection */ 2758 /* We need to re-setup the interpartition connection */
2758 dev_info(vhost->dev, "Re-enabling adapter\n"); 2759 dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
2759 vhost->client_migrated = 1; 2760 vhost->client_migrated = 1;
2760 ibmvfc_purge_requests(vhost, DID_REQUEUE); 2761 ibmvfc_purge_requests(vhost, DID_REQUEUE);
2761 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); 2762 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2762 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); 2763 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2763 } else { 2764 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
2764 dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); 2765 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
2765 ibmvfc_purge_requests(vhost, DID_ERROR); 2766 ibmvfc_purge_requests(vhost, DID_ERROR);
2766 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); 2767 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2767 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); 2768 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2769 } else {
2770 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
2768 } 2771 }
2769 return; 2772 return;
2770 case IBMVFC_CRQ_CMD_RSP: 2773 case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3348 3351
3349 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", 3352 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3350 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3353 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3351 rsp->status, rsp->error, status); 3354 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3352 break; 3355 break;
3353 } 3356 }
3354 3357
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3446 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3449 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3447 3450
3448 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3451 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3449 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error, 3452 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3450 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type, 3453 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3451 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status); 3454 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3455 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
3452 break; 3456 break;
3453 } 3457 }
3454 3458
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3619 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; 3623 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
3620 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3624 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3621 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), 3625 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
3622 mad->iu.status, mad->iu.error, 3626 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
3623 ibmvfc_get_fc_type(fc_reason), fc_reason, 3627 ibmvfc_get_fc_type(fc_reason), fc_reason,
3624 ibmvfc_get_ls_explain(fc_explain), fc_explain, status); 3628 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3625 break; 3629 break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3831 3835
3832 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3836 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3833 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3837 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3834 rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), 3838 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3835 rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), 3839 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3836 rsp->fc_explain, status); 3840 ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
3841 status);
3837 break; 3842 break;
3838 } 3843 }
3839 3844
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3959 level += ibmvfc_retry_host_init(vhost); 3964 level += ibmvfc_retry_host_init(vhost);
3960 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", 3965 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3961 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3966 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3962 rsp->status, rsp->error); 3967 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
3963 break; 3968 break;
3964 case IBMVFC_MAD_DRIVER_FAILED: 3969 case IBMVFC_MAD_DRIVER_FAILED:
3965 break; 3970 break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4024 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 4029 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4025 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", 4030 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
4026 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 4031 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4027 rsp->status, rsp->error); 4032 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4028 ibmvfc_free_event(evt); 4033 ibmvfc_free_event(evt);
4029 return; 4034 return;
4030 case IBMVFC_MAD_CRQ_ERROR: 4035 case IBMVFC_MAD_CRQ_ERROR:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b81a53c4a9a8..459cc288ba1d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
78 IBMVFC_CRQ_XPORT_EVENT = 0xFF, 78 IBMVFC_CRQ_XPORT_EVENT = 0xFF,
79}; 79};
80 80
81enum ibmvfc_crq_format { 81enum ibmvfc_crq_init_msg {
82 IBMVFC_CRQ_INIT = 0x01, 82 IBMVFC_CRQ_INIT = 0x01,
83 IBMVFC_CRQ_INIT_COMPLETE = 0x02, 83 IBMVFC_CRQ_INIT_COMPLETE = 0x02,
84};
85
86enum ibmvfc_crq_xport_evts {
87 IBMVFC_PARTNER_FAILED = 0x01,
88 IBMVFC_PARTNER_DEREGISTER = 0x02,
84 IBMVFC_PARTITION_MIGRATED = 0x06, 89 IBMVFC_PARTITION_MIGRATED = 0x06,
85}; 90};
86 91
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 1135e74646e2..8cec5230fe31 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -96,6 +96,7 @@ static int client_reserve = 1;
96static char partition_name[96] = "UNKNOWN"; 96static char partition_name[96] = "UNKNOWN";
97static unsigned int partition_number = -1; 97static unsigned int partition_number = -1;
98static LIST_HEAD(ibmvscsi_head); 98static LIST_HEAD(ibmvscsi_head);
99static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
99 100
100static struct scsi_transport_template *ibmvscsi_transport_template; 101static struct scsi_transport_template *ibmvscsi_transport_template;
101 102
@@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2270 } 2271 }
2271 2272
2272 dev_set_drvdata(&vdev->dev, hostdata); 2273 dev_set_drvdata(&vdev->dev, hostdata);
2274 spin_lock(&ibmvscsi_driver_lock);
2273 list_add_tail(&hostdata->host_list, &ibmvscsi_head); 2275 list_add_tail(&hostdata->host_list, &ibmvscsi_head);
2276 spin_unlock(&ibmvscsi_driver_lock);
2274 return 0; 2277 return 0;
2275 2278
2276 add_srp_port_failed: 2279 add_srp_port_failed:
@@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2292static int ibmvscsi_remove(struct vio_dev *vdev) 2295static int ibmvscsi_remove(struct vio_dev *vdev)
2293{ 2296{
2294 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); 2297 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2295 list_del(&hostdata->host_list); 2298 unsigned long flags;
2296 unmap_persist_bufs(hostdata); 2299
2300 srp_remove_host(hostdata->host);
2301 scsi_remove_host(hostdata->host);
2302
2303 purge_requests(hostdata, DID_ERROR);
2304
2305 spin_lock_irqsave(hostdata->host->host_lock, flags);
2297 release_event_pool(&hostdata->pool, hostdata); 2306 release_event_pool(&hostdata->pool, hostdata);
2307 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2308
2298 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, 2309 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
2299 max_events); 2310 max_events);
2300 2311
2301 kthread_stop(hostdata->work_thread); 2312 kthread_stop(hostdata->work_thread);
2302 srp_remove_host(hostdata->host); 2313 unmap_persist_bufs(hostdata);
2303 scsi_remove_host(hostdata->host); 2314
2315 spin_lock(&ibmvscsi_driver_lock);
2316 list_del(&hostdata->host_list);
2317 spin_unlock(&ibmvscsi_driver_lock);
2318
2304 scsi_host_put(hostdata->host); 2319 scsi_host_put(hostdata->host);
2305 2320
2306 return 0; 2321 return 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e57774472e75..1d8c584ec1e9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3281 3281
3282 if (smid < ioc->hi_priority_smid) { 3282 if (smid < ioc->hi_priority_smid) {
3283 struct scsiio_tracker *st; 3283 struct scsiio_tracker *st;
3284 void *request;
3284 3285
3285 st = _get_st_from_smid(ioc, smid); 3286 st = _get_st_from_smid(ioc, smid);
3286 if (!st) { 3287 if (!st) {
3287 _base_recovery_check(ioc); 3288 _base_recovery_check(ioc);
3288 return; 3289 return;
3289 } 3290 }
3291
3292 /* Clear MPI request frame */
3293 request = mpt3sas_base_get_msg_frame(ioc, smid);
3294 memset(request, 0, ioc->request_sz);
3295
3290 mpt3sas_base_clear_st(ioc, st); 3296 mpt3sas_base_clear_st(ioc, st);
3291 _base_recovery_check(ioc); 3297 _base_recovery_check(ioc);
3292 return; 3298 return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8bb5b8f9f4d2..1ccfbc7eebe0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1462{ 1462{
1463 struct scsi_cmnd *scmd = NULL; 1463 struct scsi_cmnd *scmd = NULL;
1464 struct scsiio_tracker *st; 1464 struct scsiio_tracker *st;
1465 Mpi25SCSIIORequest_t *mpi_request;
1465 1466
1466 if (smid > 0 && 1467 if (smid > 0 &&
1467 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1468 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1468 u32 unique_tag = smid - 1; 1469 u32 unique_tag = smid - 1;
1469 1470
1471 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1472
1473 /*
1474 * If SCSI IO request is outstanding at driver level then
1475 * DevHandle filed must be non-zero. If DevHandle is zero
1476 * then it means that this smid is free at driver level,
1477 * so return NULL.
1478 */
1479 if (!mpi_request->DevHandle)
1480 return scmd;
1481
1470 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1482 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1471 if (scmd) { 1483 if (scmd) {
1472 st = scsi_cmd_priv(scmd); 1484 st = scsi_cmd_priv(scmd);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 420045155ba0..0c700b140ce7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4991,6 +4991,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4991 if ((domain & 0xf0) == 0xf0) 4991 if ((domain & 0xf0) == 0xf0)
4992 continue; 4992 continue;
4993 4993
4994 /* Bypass if not same domain and area of adapter. */
4995 if (area && domain && ((area != vha->d_id.b.area) ||
4996 (domain != vha->d_id.b.domain)) &&
4997 (ha->current_topology == ISP_CFG_NL))
4998 continue;
4999
5000
4994 /* Bypass invalid local loop ID. */ 5001 /* Bypass invalid local loop ID. */
4995 if (loop_id > LAST_LOCAL_LOOP_ID) 5002 if (loop_id > LAST_LOCAL_LOOP_ID)
4996 continue; 5003 continue;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 677f82fdf56f..91f576d743fe 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1517,7 +1517,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1517 goto eh_reset_failed; 1517 goto eh_reset_failed;
1518 } 1518 }
1519 err = 2; 1519 err = 2;
1520 if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1) 1520 if (do_reset(fcport, cmd->device->lun, 1)
1521 != QLA_SUCCESS) { 1521 != QLA_SUCCESS) {
1522 ql_log(ql_log_warn, vha, 0x800c, 1522 ql_log(ql_log_warn, vha, 0x800c,
1523 "do_reset failed for cmd=%p.\n", cmd); 1523 "do_reset failed for cmd=%p.\n", cmd);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 16a18d5d856f..6e4f4931ae17 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
3203 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3203 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
3204 return -EINVAL; 3204 return -EINVAL;
3205 ep = iscsi_lookup_endpoint(transport_fd); 3205 ep = iscsi_lookup_endpoint(transport_fd);
3206 if (!ep)
3207 return -EINVAL;
3206 conn = cls_conn->dd_data; 3208 conn = cls_conn->dd_data;
3207 qla_conn = conn->dd_data; 3209 qla_conn = conn->dd_data;
3208 qla_conn->qla_ep = ep->dd_data; 3210 qla_conn->qla_ep = ep->dd_data;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 20189675677a..601b9f1de267 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -585,10 +585,17 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
585 if (!blk_rq_is_scsi(req)) { 585 if (!blk_rq_is_scsi(req)) {
586 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); 586 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
587 cmd->flags &= ~SCMD_INITIALIZED; 587 cmd->flags &= ~SCMD_INITIALIZED;
588 destroy_rcu_head(&cmd->rcu);
589 } 588 }
590 589
591 /* 590 /*
591 * Calling rcu_barrier() is not necessary here because the
592 * SCSI error handler guarantees that the function called by
593 * call_rcu() has been called before scsi_end_request() is
594 * called.
595 */
596 destroy_rcu_head(&cmd->rcu);
597
598 /*
592 * In the MQ case the command gets freed by __blk_mq_end_request, 599 * In the MQ case the command gets freed by __blk_mq_end_request,
593 * so we have to do all cleanup that depends on it earlier. 600 * so we have to do all cleanup that depends on it earlier.
594 * 601 *
@@ -2541,8 +2548,10 @@ void scsi_device_resume(struct scsi_device *sdev)
2541 * device deleted during suspend) 2548 * device deleted during suspend)
2542 */ 2549 */
2543 mutex_lock(&sdev->state_mutex); 2550 mutex_lock(&sdev->state_mutex);
2544 sdev->quiesced_by = NULL; 2551 if (sdev->quiesced_by) {
2545 blk_clear_pm_only(sdev->request_queue); 2552 sdev->quiesced_by = NULL;
2553 blk_clear_pm_only(sdev->request_queue);
2554 }
2546 if (sdev->sdev_state == SDEV_QUIESCE) 2555 if (sdev->sdev_state == SDEV_QUIESCE)
2547 scsi_device_set_state(sdev, SDEV_RUNNING); 2556 scsi_device_set_state(sdev, SDEV_RUNNING);
2548 mutex_unlock(&sdev->state_mutex); 2557 mutex_unlock(&sdev->state_mutex);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6a9040faed00..3b119ca0cc0c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
771 771
772 mutex_lock(&sdev->state_mutex); 772 mutex_lock(&sdev->state_mutex);
773 ret = scsi_device_set_state(sdev, state); 773 ret = scsi_device_set_state(sdev, state);
774 /*
775 * If the device state changes to SDEV_RUNNING, we need to run
776 * the queue to avoid I/O hang.
777 */
778 if (ret == 0 && state == SDEV_RUNNING)
779 blk_mq_run_hw_queues(sdev->request_queue, true);
774 mutex_unlock(&sdev->state_mutex); 780 mutex_unlock(&sdev->state_mutex);
775 781
776 return ret == 0 ? count : -EINVAL; 782 return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0508831d6fb9..0a82e93566dc 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
2200 scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); 2200 scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
2201 /* flush running scans then delete devices */ 2201 /* flush running scans then delete devices */
2202 flush_work(&session->scan_work); 2202 flush_work(&session->scan_work);
2203 /* flush running unbind operations */
2204 flush_work(&session->unbind_work);
2203 __iscsi_unbind_session(&session->unbind_work); 2205 __iscsi_unbind_session(&session->unbind_work);
2204 2206
2205 /* hw iscsi may not have removed all connections from session */ 2207 /* hw iscsi may not have removed all connections from session */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 251db30d0882..2b2bc4b49d78 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
1415 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1415 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1416 } 1416 }
1417 1417
1418 /*
1419 * XXX and what if there are packets in flight and this close()
1420 * XXX is followed by a "rmmod sd_mod"?
1421 */
1422
1423 scsi_disk_put(sdkp); 1418 scsi_disk_put(sdkp);
1424} 1419}
1425 1420
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3076 unsigned int opt_xfer_bytes = 3071 unsigned int opt_xfer_bytes =
3077 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3072 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3078 3073
3074 if (sdkp->opt_xfer_blocks == 0)
3075 return false;
3076
3079 if (sdkp->opt_xfer_blocks > dev_max) { 3077 if (sdkp->opt_xfer_blocks > dev_max) {
3080 sd_first_printk(KERN_WARNING, sdkp, 3078 sd_first_printk(KERN_WARNING, sdkp,
3081 "Optimal transfer size %u logical blocks " \ 3079 "Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
3505{ 3503{
3506 struct scsi_disk *sdkp = to_scsi_disk(dev); 3504 struct scsi_disk *sdkp = to_scsi_disk(dev);
3507 struct gendisk *disk = sdkp->disk; 3505 struct gendisk *disk = sdkp->disk;
3508 3506 struct request_queue *q = disk->queue;
3507
3509 ida_free(&sd_index_ida, sdkp->index); 3508 ida_free(&sd_index_ida, sdkp->index);
3510 3509
3510 /*
3511 * Wait until all requests that are in progress have completed.
3512 * This is necessary to avoid that e.g. scsi_end_request() crashes
3513 * due to clearing the disk->private_data pointer. Wait from inside
3514 * scsi_disk_release() instead of from sd_release() to avoid that
3515 * freezing and unfreezing the request queue affects user space I/O
3516 * in case multiple processes open a /dev/sd... node concurrently.
3517 */
3518 blk_mq_freeze_queue(q);
3519 blk_mq_unfreeze_queue(q);
3520
3511 disk->private_data = NULL; 3521 disk->private_data = NULL;
3512 put_disk(disk); 3522 put_disk(disk);
3513 put_device(&sdkp->device->sdev_gendev); 3523 put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 9351349cf0a9..1e0041ec8132 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -150,7 +150,12 @@ struct bcm2835_power {
150 150
151static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) 151static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
152{ 152{
153 u64 start = ktime_get_ns(); 153 u64 start;
154
155 if (!reg)
156 return 0;
157
158 start = ktime_get_ns();
154 159
155 /* Enable the module's async AXI bridges. */ 160 /* Enable the module's async AXI bridges. */
156 ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); 161 ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
165 170
166static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) 171static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
167{ 172{
168 u64 start = ktime_get_ns(); 173 u64 start;
174
175 if (!reg)
176 return 0;
177
178 start = ktime_get_ns();
169 179
170 /* Enable the module's async AXI bridges. */ 180 /* Enable the module's async AXI bridges. */
171 ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); 181 ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
475 } 485 }
476} 486}
477 487
478static void 488static int
479bcm2835_init_power_domain(struct bcm2835_power *power, 489bcm2835_init_power_domain(struct bcm2835_power *power,
480 int pd_xlate_index, const char *name) 490 int pd_xlate_index, const char *name)
481{ 491{
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
483 struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; 493 struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
484 494
485 dom->clk = devm_clk_get(dev->parent, name); 495 dom->clk = devm_clk_get(dev->parent, name);
496 if (IS_ERR(dom->clk)) {
497 int ret = PTR_ERR(dom->clk);
498
499 if (ret == -EPROBE_DEFER)
500 return ret;
501
502 /* Some domains don't have a clk, so make sure that we
503 * don't deref an error pointer later.
504 */
505 dom->clk = NULL;
506 }
486 507
487 dom->base.name = name; 508 dom->base.name = name;
488 dom->base.power_on = bcm2835_power_pd_power_on; 509 dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
495 pm_genpd_init(&dom->base, NULL, true); 516 pm_genpd_init(&dom->base, NULL, true);
496 517
497 power->pd_xlate.domains[pd_xlate_index] = &dom->base; 518 power->pd_xlate.domains[pd_xlate_index] = &dom->base;
519
520 return 0;
498} 521}
499 522
500/** bcm2835_reset_reset - Resets a block that has a reset line in the 523/** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
592 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, 615 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
593 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, 616 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
594 }; 617 };
595 int ret, i; 618 int ret = 0, i;
596 u32 id; 619 u32 id;
597 620
598 power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); 621 power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
619 642
620 power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); 643 power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
621 644
622 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) 645 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
623 bcm2835_init_power_domain(power, i, power_domain_names[i]); 646 ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
647 if (ret)
648 goto fail;
649 }
624 650
625 for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { 651 for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
626 pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, 652 pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
634 660
635 ret = devm_reset_controller_register(dev, &power->reset); 661 ret = devm_reset_controller_register(dev, &power->reset);
636 if (ret) 662 if (ret)
637 return ret; 663 goto fail;
638 664
639 of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); 665 of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
640 666
641 dev_info(dev, "Broadcom BCM2835 power domains driver"); 667 dev_info(dev, "Broadcom BCM2835 power domains driver");
642 return 0; 668 return 0;
669
670fail:
671 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
672 struct generic_pm_domain *dom = &power->domains[i].base;
673
674 if (dom->name)
675 pm_genpd_remove(dom);
676 }
677 return ret;
643} 678}
644 679
645static int bcm2835_power_remove(struct platform_device *pdev) 680static int bcm2835_power_remove(struct platform_device *pdev)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index c0901b96cfe4..62951e836cbc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
114 114
115source "drivers/staging/mt7621-mmc/Kconfig" 115source "drivers/staging/mt7621-mmc/Kconfig"
116 116
117source "drivers/staging/mt7621-eth/Kconfig"
118
119source "drivers/staging/mt7621-dts/Kconfig" 117source "drivers/staging/mt7621-dts/Kconfig"
120 118
121source "drivers/staging/gasket/Kconfig" 119source "drivers/staging/gasket/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 57c6bce13ff4..d1b17ddcd354 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
47obj-$(CONFIG_SOC_MT7621) += mt7621-dma/ 47obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
48obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ 48obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
49obj-$(CONFIG_MTK_MMC) += mt7621-mmc/ 49obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
50obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
51obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ 50obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
52obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ 51obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
53obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ 52obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 687537203d9c..d9725888af6f 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4config XIL_AXIS_FIFO 4config XIL_AXIS_FIFO
5 tristate "Xilinx AXI-Stream FIFO IP core driver" 5 tristate "Xilinx AXI-Stream FIFO IP core driver"
6 depends on OF
6 default n 7 default n
7 help 8 help
8 This adds support for the Xilinx AXI-Stream 9 This adds support for the Xilinx AXI-Stream
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index a7d569cfca5d..0dff1ac057cd 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
1001 unsigned int mask); 1001 unsigned int mask);
1002unsigned int comedi_dio_update_state(struct comedi_subdevice *s, 1002unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
1003 unsigned int *data); 1003 unsigned int *data);
1004unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
1005 struct comedi_cmd *cmd);
1004unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); 1006unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
1005unsigned int comedi_nscans_left(struct comedi_subdevice *s, 1007unsigned int comedi_nscans_left(struct comedi_subdevice *s,
1006 unsigned int nscans); 1008 unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index eefa62f42c0f..5a32b8fc000e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
394EXPORT_SYMBOL_GPL(comedi_dio_update_state); 394EXPORT_SYMBOL_GPL(comedi_dio_update_state);
395 395
396/** 396/**
397 * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes 397 * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
398 * bytes
398 * @s: COMEDI subdevice. 399 * @s: COMEDI subdevice.
400 * @cmd: COMEDI command.
399 * 401 *
400 * Determines the overall scan length according to the subdevice type and the 402 * Determines the overall scan length according to the subdevice type and the
401 * number of channels in the scan. 403 * number of channels in the scan for the specified command.
402 * 404 *
403 * For digital input, output or input/output subdevices, samples for 405 * For digital input, output or input/output subdevices, samples for
404 * multiple channels are assumed to be packed into one or more unsigned 406 * multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
408 * 410 *
409 * Returns the overall scan length in bytes. 411 * Returns the overall scan length in bytes.
410 */ 412 */
411unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) 413unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
414 struct comedi_cmd *cmd)
412{ 415{
413 struct comedi_cmd *cmd = &s->async->cmd;
414 unsigned int num_samples; 416 unsigned int num_samples;
415 unsigned int bits_per_sample; 417 unsigned int bits_per_sample;
416 418
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
427 } 429 }
428 return comedi_samples_to_bytes(s, num_samples); 430 return comedi_samples_to_bytes(s, num_samples);
429} 431}
432EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
433
434/**
435 * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
436 * @s: COMEDI subdevice.
437 *
438 * Determines the overall scan length according to the subdevice type and the
439 * number of channels in the scan for the current command.
440 *
441 * For digital input, output or input/output subdevices, samples for
442 * multiple channels are assumed to be packed into one or more unsigned
443 * short or unsigned int values according to the subdevice's %SDF_LSAMPL
444 * flag. For other types of subdevice, samples are assumed to occupy a
445 * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
446 *
447 * Returns the overall scan length in bytes.
448 */
449unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
450{
451 struct comedi_cmd *cmd = &s->async->cmd;
452
453 return comedi_bytes_per_scan_cmd(s, cmd);
454}
430EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); 455EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
431 456
432static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, 457static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5edf59ac6706..b04dad8c7092 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
3545 struct comedi_subdevice *s, struct comedi_cmd *cmd) 3545 struct comedi_subdevice *s, struct comedi_cmd *cmd)
3546{ 3546{
3547 struct ni_private *devpriv = dev->private; 3547 struct ni_private *devpriv = dev->private;
3548 unsigned int bytes_per_scan;
3548 int err = 0; 3549 int err = 0;
3549 3550
3550 /* Step 1 : check if triggers are trivially valid */ 3551 /* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
3579 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); 3580 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
3580 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, 3581 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
3581 cmd->chanlist_len); 3582 cmd->chanlist_len);
3582 err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 3583 bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
3583 s->async->prealloc_bufsz / 3584 if (bytes_per_scan) {
3584 comedi_bytes_per_scan(s)); 3585 err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
3586 s->async->prealloc_bufsz /
3587 bytes_per_scan);
3588 }
3585 3589
3586 if (err) 3590 if (err)
3587 return 3; 3591 return 3;
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 829f7b12e0dc..9bbc68729c11 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
23 [EROFS_FT_SYMLINK] = DT_LNK, 23 [EROFS_FT_SYMLINK] = DT_LNK,
24}; 24};
25 25
26static void debug_one_dentry(unsigned char d_type, const char *de_name,
27 unsigned int de_namelen)
28{
29#ifdef CONFIG_EROFS_FS_DEBUG
30 /* since the on-disk name could not have the trailing '\0' */
31 unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
32
33 memcpy(dbg_namebuf, de_name, de_namelen);
34 dbg_namebuf[de_namelen] = '\0';
35
36 debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
37 de_namelen, d_type);
38#endif
39}
40
26static int erofs_fill_dentries(struct dir_context *ctx, 41static int erofs_fill_dentries(struct dir_context *ctx,
27 void *dentry_blk, unsigned int *ofs, 42 void *dentry_blk, unsigned int *ofs,
28 unsigned int nameoff, unsigned int maxsize) 43 unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
33 de = dentry_blk + *ofs; 48 de = dentry_blk + *ofs;
34 while (de < end) { 49 while (de < end) {
35 const char *de_name; 50 const char *de_name;
36 int de_namelen; 51 unsigned int de_namelen;
37 unsigned char d_type; 52 unsigned char d_type;
38#ifdef CONFIG_EROFS_FS_DEBUG
39 unsigned int dbg_namelen;
40 unsigned char dbg_namebuf[EROFS_NAME_LEN];
41#endif
42 53
43 if (unlikely(de->file_type < EROFS_FT_MAX)) 54 if (de->file_type < EROFS_FT_MAX)
44 d_type = erofs_filetype_table[de->file_type]; 55 d_type = erofs_filetype_table[de->file_type];
45 else 56 else
46 d_type = DT_UNKNOWN; 57 d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
48 nameoff = le16_to_cpu(de->nameoff); 59 nameoff = le16_to_cpu(de->nameoff);
49 de_name = (char *)dentry_blk + nameoff; 60 de_name = (char *)dentry_blk + nameoff;
50 61
51 de_namelen = unlikely(de + 1 >= end) ? 62 /* the last dirent in the block? */
52 /* last directory entry */ 63 if (de + 1 >= end)
53 strnlen(de_name, maxsize - nameoff) : 64 de_namelen = strnlen(de_name, maxsize - nameoff);
54 le16_to_cpu(de[1].nameoff) - nameoff; 65 else
66 de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
55 67
56 /* a corrupted entry is found */ 68 /* a corrupted entry is found */
57 if (unlikely(de_namelen < 0)) { 69 if (unlikely(nameoff + de_namelen > maxsize ||
70 de_namelen > EROFS_NAME_LEN)) {
58 DBG_BUGON(1); 71 DBG_BUGON(1);
59 return -EIO; 72 return -EIO;
60 } 73 }
61 74
62#ifdef CONFIG_EROFS_FS_DEBUG 75 debug_one_dentry(d_type, de_name, de_namelen);
63 dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
64 memcpy(dbg_namebuf, de_name, dbg_namelen);
65 dbg_namebuf[dbg_namelen] = '\0';
66
67 debugln("%s, found de_name %s de_len %d d_type %d", __func__,
68 dbg_namebuf, de_namelen, d_type);
69#endif
70
71 if (!dir_emit(ctx, de_name, de_namelen, 76 if (!dir_emit(ctx, de_name, de_namelen,
72 le64_to_cpu(de->nid), d_type)) 77 le64_to_cpu(de->nid), d_type))
73 /* stopped by some reason */ 78 /* stopped by some reason */
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8715bc50e09c..31eef8395774 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -972,6 +972,7 @@ repeat:
972 overlapped = false; 972 overlapped = false;
973 compressed_pages = grp->compressed_pages; 973 compressed_pages = grp->compressed_pages;
974 974
975 err = 0;
975 for (i = 0; i < clusterpages; ++i) { 976 for (i = 0; i < clusterpages; ++i) {
976 unsigned int pagenr; 977 unsigned int pagenr;
977 978
@@ -981,26 +982,39 @@ repeat:
981 DBG_BUGON(!page); 982 DBG_BUGON(!page);
982 DBG_BUGON(!page->mapping); 983 DBG_BUGON(!page->mapping);
983 984
984 if (z_erofs_is_stagingpage(page)) 985 if (!z_erofs_is_stagingpage(page)) {
985 continue;
986#ifdef EROFS_FS_HAS_MANAGED_CACHE 986#ifdef EROFS_FS_HAS_MANAGED_CACHE
987 if (page->mapping == MNGD_MAPPING(sbi)) { 987 if (page->mapping == MNGD_MAPPING(sbi)) {
988 DBG_BUGON(!PageUptodate(page)); 988 if (unlikely(!PageUptodate(page)))
989 continue; 989 err = -EIO;
990 } 990 continue;
991 }
991#endif 992#endif
992 993
993 /* only non-head page could be reused as a compressed page */ 994 /*
994 pagenr = z_erofs_onlinepage_index(page); 995 * only if non-head page can be selected
996 * for inplace decompression
997 */
998 pagenr = z_erofs_onlinepage_index(page);
995 999
996 DBG_BUGON(pagenr >= nr_pages); 1000 DBG_BUGON(pagenr >= nr_pages);
997 DBG_BUGON(pages[pagenr]); 1001 DBG_BUGON(pages[pagenr]);
998 ++sparsemem_pages; 1002 ++sparsemem_pages;
999 pages[pagenr] = page; 1003 pages[pagenr] = page;
1000 1004
1001 overlapped = true; 1005 overlapped = true;
1006 }
1007
1008 /* PG_error needs checking for inplaced and staging pages */
1009 if (unlikely(PageError(page))) {
1010 DBG_BUGON(PageUptodate(page));
1011 err = -EIO;
1012 }
1002 } 1013 }
1003 1014
1015 if (unlikely(err))
1016 goto out;
1017
1004 llen = (nr_pages << PAGE_SHIFT) - work->pageofs; 1018 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1005 1019
1006 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) { 1020 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ repeat:
1029 1043
1030skip_allocpage: 1044skip_allocpage:
1031 vout = erofs_vmap(pages, nr_pages); 1045 vout = erofs_vmap(pages, nr_pages);
1046 if (!vout) {
1047 err = -ENOMEM;
1048 goto out;
1049 }
1032 1050
1033 err = z_erofs_vle_unzip_vmap(compressed_pages, 1051 err = z_erofs_vle_unzip_vmap(compressed_pages,
1034 clusterpages, vout, llen, work->pageofs, overlapped); 1052 clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ repeat:
1194 if (page->mapping == mc) { 1212 if (page->mapping == mc) {
1195 WRITE_ONCE(grp->compressed_pages[nr], page); 1213 WRITE_ONCE(grp->compressed_pages[nr], page);
1196 1214
1215 ClearPageError(page);
1197 if (!PagePrivate(page)) { 1216 if (!PagePrivate(page)) {
1198 /* 1217 /*
1199 * impossible to be !PagePrivate(page) for 1218 * impossible to be !PagePrivate(page) for
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 48b263a2731a..0daac9b984a8 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
136 136
137 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); 137 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
138 138
139 if (clusterpages == 1) 139 if (clusterpages == 1) {
140 vin = kmap_atomic(compressed_pages[0]); 140 vin = kmap_atomic(compressed_pages[0]);
141 else 141 } else {
142 vin = erofs_vmap(compressed_pages, clusterpages); 142 vin = erofs_vmap(compressed_pages, clusterpages);
143 if (!vin)
144 return -ENOMEM;
145 }
143 146
144 preempt_disable(); 147 preempt_disable();
145 vout = erofs_pcpubuf[smp_processor_id()].data; 148 vout = erofs_pcpubuf[smp_processor_id()].data;
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index b73385540216..250c15ace2a7 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -117,22 +117,6 @@
117 status = "okay"; 117 status = "okay";
118}; 118};
119 119
120&ethernet {
121 //mtd-mac-address = <&factory 0xe000>;
122 gmac1: mac@0 {
123 compatible = "mediatek,eth-mac";
124 reg = <0>;
125 phy-handle = <&phy1>;
126 };
127
128 mdio-bus {
129 phy1: ethernet-phy@1 {
130 reg = <1>;
131 phy-mode = "rgmii";
132 };
133 };
134};
135
136&pinctrl { 120&pinctrl {
137 state_default: pinctrl0 { 121 state_default: pinctrl0 {
138 gpio { 122 gpio {
@@ -141,3 +125,16 @@
141 }; 125 };
142 }; 126 };
143}; 127};
128
129&switch0 {
130 ports {
131 port@0 {
132 label = "ethblack";
133 status = "ok";
134 };
135 port@4 {
136 label = "ethblue";
137 status = "ok";
138 };
139 };
140};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6aff3680ce4b..17020e24abd2 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -372,16 +372,83 @@
372 372
373 mediatek,ethsys = <&ethsys>; 373 mediatek,ethsys = <&ethsys>;
374 374
375 mediatek,switch = <&gsw>;
376 375
376 gmac0: mac@0 {
377 compatible = "mediatek,eth-mac";
378 reg = <0>;
379 phy-mode = "rgmii";
380 fixed-link {
381 speed = <1000>;
382 full-duplex;
383 pause;
384 };
385 };
386 gmac1: mac@1 {
387 compatible = "mediatek,eth-mac";
388 reg = <1>;
389 status = "off";
390 phy-mode = "rgmii";
391 phy-handle = <&phy5>;
392 };
377 mdio-bus { 393 mdio-bus {
378 #address-cells = <1>; 394 #address-cells = <1>;
379 #size-cells = <0>; 395 #size-cells = <0>;
380 396
381 phy1f: ethernet-phy@1f { 397 phy5: ethernet-phy@5 {
382 reg = <0x1f>; 398 reg = <5>;
383 phy-mode = "rgmii"; 399 phy-mode = "rgmii";
384 }; 400 };
401
402 switch0: switch0@0 {
403 compatible = "mediatek,mt7621";
404 #address-cells = <1>;
405 #size-cells = <0>;
406 reg = <0>;
407 mediatek,mcm;
408 resets = <&rstctrl 2>;
409 reset-names = "mcm";
410
411 ports {
412 #address-cells = <1>;
413 #size-cells = <0>;
414 reg = <0>;
415 port@0 {
416 status = "off";
417 reg = <0>;
418 label = "lan0";
419 };
420 port@1 {
421 status = "off";
422 reg = <1>;
423 label = "lan1";
424 };
425 port@2 {
426 status = "off";
427 reg = <2>;
428 label = "lan2";
429 };
430 port@3 {
431 status = "off";
432 reg = <3>;
433 label = "lan3";
434 };
435 port@4 {
436 status = "off";
437 reg = <4>;
438 label = "lan4";
439 };
440 port@6 {
441 reg = <6>;
442 label = "cpu";
443 ethernet = <&gmac0>;
444 phy-mode = "trgmii";
445 fixed-link {
446 speed = <1000>;
447 full-duplex;
448 };
449 };
450 };
451 };
385 }; 452 };
386 }; 453 };
387 454
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644
index 596b38552697..000000000000
--- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
+++ /dev/null
@@ -1,48 +0,0 @@
1Mediatek Gigabit Switch
2=======================
3
4The mediatek gigabit switch can be found on Mediatek SoCs.
5
6Required properties:
7- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
8 "mediatek,mt7623-gsw"
9- reg: Address and length of the register set for the device
10- interrupts: Should contain the gigabit switches interrupt
11
12
13Additional required properties for ARM based SoCs:
14- mediatek,reset-pin: phandle describing the reset GPIO
15- clocks: the clocks used by the switch
16- clock-names: the names of the clocks listed in the clocks property
17 these should be "trgpll", "esw", "gp2", "gp1"
18- mt7530-supply: the phandle of the regulator used to power the switch
19- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
20 setup the drive current
21
22
23Optional properties:
24- interrupt-parent: Should be the phandle for the interrupt controller
25 that services interrupts for this device
26
27Example:
28
29gsw: switch@1b100000 {
30 compatible = "mediatek,mt7623-gsw";
31 reg = <0 0x1b110000 0 0x300000>;
32
33 interrupt-parent = <&pio>;
34 interrupts = <168 IRQ_TYPE_EDGE_RISING>;
35
36 clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
37 <&ethsys CLK_ETHSYS_ESW>,
38 <&ethsys CLK_ETHSYS_GP2>,
39 <&ethsys CLK_ETHSYS_GP1>;
40 clock-names = "trgpll", "esw", "gp2", "gp1";
41
42 mt7530-supply = <&mt6323_vpa_reg>;
43
44 mediatek,pctl-regmap = <&syscfg_pctl_a>;
45 mediatek,reset-pin = <&pio 15 0>;
46
47 status = "okay";
48};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644
index 44ea86c7a96c..000000000000
--- a/drivers/staging/mt7621-eth/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
1config NET_VENDOR_MEDIATEK_STAGING
2 bool "MediaTek ethernet driver - staging version"
3 depends on RALINK
4 ---help---
5 If you have an MT7621 Mediatek SoC with ethernet, say Y.
6
7if NET_VENDOR_MEDIATEK_STAGING
8choice
9 prompt "MAC type"
10
11config NET_MEDIATEK_MT7621
12 bool "MT7621"
13 depends on MIPS && SOC_MT7621
14
15endchoice
16
17config NET_MEDIATEK_SOC_STAGING
18 tristate "MediaTek SoC Gigabit Ethernet support"
19 depends on NET_VENDOR_MEDIATEK_STAGING
20 select PHYLIB
21 ---help---
22 This driver supports the gigabit ethernet MACs in the
23 MediaTek SoC family.
24
25config NET_MEDIATEK_MDIO
26 def_bool NET_MEDIATEK_SOC_STAGING
27 depends on NET_MEDIATEK_MT7621
28 select PHYLIB
29
30config NET_MEDIATEK_MDIO_MT7620
31 def_bool NET_MEDIATEK_SOC_STAGING
32 depends on NET_MEDIATEK_MT7621
33 select NET_MEDIATEK_MDIO
34
35config NET_MEDIATEK_GSW_MT7621
36 def_tristate NET_MEDIATEK_SOC_STAGING
37 depends on NET_MEDIATEK_MT7621
38
39endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644
index 018bcc3596b3..000000000000
--- a/drivers/staging/mt7621-eth/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
1#
2# Makefile for the Ralink SoCs built-in ethernet macs
3#
4
5mtk-eth-soc-y += mtk_eth_soc.o ethtool.o
6
7mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o
8mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
9
10mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o
11
12obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o
13
14obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644
index f9e47d4b4cd4..000000000000
--- a/drivers/staging/mt7621-eth/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
1
2- verify devicetree documentation is consistent with code
3- fix ethtool - currently doesn't return valid data.
4- general code review and clean up
5- add support for second MAC on mt7621
6- convert gsw code to use switchdev interfaces
7- md7620_mmi_write etc should probably be wrapped
8 in a regmap abstraction.
9- Get soc_mt7621 to work with QDMA TX if possible.
10- Ensure phys are correctly configured when a cable
11 is plugged in.
12
13Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644
index 8c4228e2c987..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ /dev/null
@@ -1,250 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/* This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
12 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
13 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
14 */
15
16#include "mtk_eth_soc.h"
17#include "ethtool.h"
18
19struct mtk_stat {
20 char name[ETH_GSTRING_LEN];
21 unsigned int idx;
22};
23
24#define MTK_HW_STAT(stat) { \
25 .name = #stat, \
26 .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
27}
28
29static const struct mtk_stat mtk_ethtool_hw_stats[] = {
30 MTK_HW_STAT(tx_bytes),
31 MTK_HW_STAT(tx_packets),
32 MTK_HW_STAT(tx_skip),
33 MTK_HW_STAT(tx_collisions),
34 MTK_HW_STAT(rx_bytes),
35 MTK_HW_STAT(rx_packets),
36 MTK_HW_STAT(rx_overflow),
37 MTK_HW_STAT(rx_fcs_errors),
38 MTK_HW_STAT(rx_short_errors),
39 MTK_HW_STAT(rx_long_errors),
40 MTK_HW_STAT(rx_checksum_errors),
41 MTK_HW_STAT(rx_flow_control_packets),
42};
43
44#define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats)
45
46static int mtk_get_link_ksettings(struct net_device *dev,
47 struct ethtool_link_ksettings *cmd)
48{
49 struct mtk_mac *mac = netdev_priv(dev);
50 int err;
51
52 if (!mac->phy_dev)
53 return -ENODEV;
54
55 if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
56 err = phy_read_status(mac->phy_dev);
57 if (err)
58 return -ENODEV;
59 }
60
61 phy_ethtool_ksettings_get(mac->phy_dev, cmd);
62 return 0;
63}
64
65static int mtk_set_link_ksettings(struct net_device *dev,
66 const struct ethtool_link_ksettings *cmd)
67{
68 struct mtk_mac *mac = netdev_priv(dev);
69
70 if (!mac->phy_dev)
71 return -ENODEV;
72
73 if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
74 if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
75 mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
76 mac->phy_flags = MTK_PHY_FLAG_PORT;
77 } else if (mac->hw->mii_bus) {
78 mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
79 cmd->base.phy_address);
80 if (!mac->phy_dev)
81 return -ENODEV;
82 mac->phy_flags = MTK_PHY_FLAG_ATTACH;
83 } else {
84 return -ENODEV;
85 }
86 }
87
88 return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
89}
90
91static void mtk_get_drvinfo(struct net_device *dev,
92 struct ethtool_drvinfo *info)
93{
94 struct mtk_mac *mac = netdev_priv(dev);
95 struct mtk_soc_data *soc = mac->hw->soc;
96
97 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
98 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
99
100 if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
101 info->n_stats = MTK_HW_STATS_LEN;
102}
103
104static u32 mtk_get_msglevel(struct net_device *dev)
105{
106 struct mtk_mac *mac = netdev_priv(dev);
107
108 return mac->hw->msg_enable;
109}
110
111static void mtk_set_msglevel(struct net_device *dev, u32 value)
112{
113 struct mtk_mac *mac = netdev_priv(dev);
114
115 mac->hw->msg_enable = value;
116}
117
118static int mtk_nway_reset(struct net_device *dev)
119{
120 struct mtk_mac *mac = netdev_priv(dev);
121
122 if (!mac->phy_dev)
123 return -EOPNOTSUPP;
124
125 return genphy_restart_aneg(mac->phy_dev);
126}
127
128static u32 mtk_get_link(struct net_device *dev)
129{
130 struct mtk_mac *mac = netdev_priv(dev);
131 int err;
132
133 if (!mac->phy_dev)
134 goto out_get_link;
135
136 if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
137 err = genphy_update_link(mac->phy_dev);
138 if (err)
139 goto out_get_link;
140 }
141
142 return mac->phy_dev->link;
143
144out_get_link:
145 return ethtool_op_get_link(dev);
146}
147
148static int mtk_set_ringparam(struct net_device *dev,
149 struct ethtool_ringparam *ring)
150{
151 struct mtk_mac *mac = netdev_priv(dev);
152
153 if ((ring->tx_pending < 2) ||
154 (ring->rx_pending < 2) ||
155 (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
156 (ring->tx_pending > mac->hw->soc->dma_ring_size))
157 return -EINVAL;
158
159 dev->netdev_ops->ndo_stop(dev);
160
161 mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
162 mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
163
164 return dev->netdev_ops->ndo_open(dev);
165}
166
167static void mtk_get_ringparam(struct net_device *dev,
168 struct ethtool_ringparam *ring)
169{
170 struct mtk_mac *mac = netdev_priv(dev);
171
172 ring->rx_max_pending = mac->hw->soc->dma_ring_size;
173 ring->tx_max_pending = mac->hw->soc->dma_ring_size;
174 ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
175 ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
176}
177
178static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
179{
180 int i;
181
182 switch (stringset) {
183 case ETH_SS_STATS:
184 for (i = 0; i < MTK_HW_STATS_LEN; i++) {
185 memcpy(data, mtk_ethtool_hw_stats[i].name,
186 ETH_GSTRING_LEN);
187 data += ETH_GSTRING_LEN;
188 }
189 break;
190 }
191}
192
193static int mtk_get_sset_count(struct net_device *dev, int sset)
194{
195 switch (sset) {
196 case ETH_SS_STATS:
197 return MTK_HW_STATS_LEN;
198 default:
199 return -EOPNOTSUPP;
200 }
201}
202
203static void mtk_get_ethtool_stats(struct net_device *dev,
204 struct ethtool_stats *stats, u64 *data)
205{
206 struct mtk_mac *mac = netdev_priv(dev);
207 struct mtk_hw_stats *hwstats = mac->hw_stats;
208 unsigned int start;
209 int i;
210
211 if (netif_running(dev) && netif_device_present(dev)) {
212 if (spin_trylock(&hwstats->stats_lock)) {
213 mtk_stats_update_mac(mac);
214 spin_unlock(&hwstats->stats_lock);
215 }
216 }
217
218 do {
219 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
220 for (i = 0; i < MTK_HW_STATS_LEN; i++)
221 data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
222
223 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
224}
225
226static struct ethtool_ops mtk_ethtool_ops = {
227 .get_link_ksettings = mtk_get_link_ksettings,
228 .set_link_ksettings = mtk_set_link_ksettings,
229 .get_drvinfo = mtk_get_drvinfo,
230 .get_msglevel = mtk_get_msglevel,
231 .set_msglevel = mtk_set_msglevel,
232 .nway_reset = mtk_nway_reset,
233 .get_link = mtk_get_link,
234 .set_ringparam = mtk_set_ringparam,
235 .get_ringparam = mtk_get_ringparam,
236};
237
238void mtk_set_ethtool_ops(struct net_device *netdev)
239{
240 struct mtk_mac *mac = netdev_priv(netdev);
241 struct mtk_soc_data *soc = mac->hw->soc;
242
243 if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
244 mtk_ethtool_ops.get_strings = mtk_get_strings;
245 mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
246 mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
247 }
248
249 netdev->ethtool_ops = &mtk_ethtool_ops;
250}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644
index 0071469aea6c..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
4 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
5 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
6 */
7
8#ifndef MTK_ETHTOOL_H
9#define MTK_ETHTOOL_H
10
11#include <linux/ethtool.h>
12
13void mtk_set_ethtool_ops(struct net_device *netdev);
14
15#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644
index 70f7e5481952..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7620.h
+++ /dev/null
@@ -1,277 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef _RALINK_GSW_MT7620_H__
16#define _RALINK_GSW_MT7620_H__
17
18#define GSW_REG_PHY_TIMEOUT (5 * HZ)
19
20#define MT7620_GSW_REG_PIAC 0x0004
21
22#define GSW_NUM_VLANS 16
23#define GSW_NUM_VIDS 4096
24#define GSW_NUM_PORTS 7
25#define GSW_PORT6 6
26
27#define GSW_MDIO_ACCESS BIT(31)
28#define GSW_MDIO_READ BIT(19)
29#define GSW_MDIO_WRITE BIT(18)
30#define GSW_MDIO_START BIT(16)
31#define GSW_MDIO_ADDR_SHIFT 20
32#define GSW_MDIO_REG_SHIFT 25
33
34#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100))
35#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
36#define GSW_REG_SMACCR0 0x3fE4
37#define GSW_REG_SMACCR1 0x3fE8
38#define GSW_REG_CKGCR 0x3ff0
39
40#define GSW_REG_IMR 0x7008
41#define GSW_REG_ISR 0x700c
42#define GSW_REG_GPC1 0x7014
43
44#define SYSC_REG_CHIP_REV_ID 0x0c
45#define SYSC_REG_CFG 0x10
46#define SYSC_REG_CFG1 0x14
47#define RST_CTRL_MCM BIT(2)
48#define SYSC_PAD_RGMII2_MDIO 0x58
49#define SYSC_GPIO_MODE 0x60
50
51#define PORT_IRQ_ST_CHG 0x7f
52
53#define MT7621_ESW_PHY_POLLING 0x0000
54#define MT7620_ESW_PHY_POLLING 0x7000
55
56#define PMCR_IPG BIT(18)
57#define PMCR_MAC_MODE BIT(16)
58#define PMCR_FORCE BIT(15)
59#define PMCR_TX_EN BIT(14)
60#define PMCR_RX_EN BIT(13)
61#define PMCR_BACKOFF BIT(9)
62#define PMCR_BACKPRES BIT(8)
63#define PMCR_RX_FC BIT(5)
64#define PMCR_TX_FC BIT(4)
65#define PMCR_SPEED(_x) (_x << 2)
66#define PMCR_DUPLEX BIT(1)
67#define PMCR_LINK BIT(0)
68
69#define PHY_AN_EN BIT(31)
70#define PHY_PRE_EN BIT(30)
71#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24)
72
73/* ethernet subsystem config register */
74#define ETHSYS_SYSCFG0 0x14
75/* ethernet subsystem clock register */
76#define ETHSYS_CLKCFG0 0x2c
77#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
78
79/* p5 RGMII wrapper TX clock control register */
80#define MT7530_P5RGMIITXCR 0x7b04
81/* p5 RGMII wrapper RX clock control register */
82#define MT7530_P5RGMIIRXCR 0x7b00
83/* TRGMII TDX ODT registers */
84#define MT7530_TRGMII_TD0_ODT 0x7a54
85#define MT7530_TRGMII_TD1_ODT 0x7a5c
86#define MT7530_TRGMII_TD2_ODT 0x7a64
87#define MT7530_TRGMII_TD3_ODT 0x7a6c
88#define MT7530_TRGMII_TD4_ODT 0x7a74
89#define MT7530_TRGMII_TD5_ODT 0x7a7c
90/* TRGMII TCK ctrl register */
91#define MT7530_TRGMII_TCK_CTRL 0x7a78
92/* TRGMII Tx ctrl register */
93#define MT7530_TRGMII_TXCTRL 0x7a40
94/* port 6 extended control register */
95#define MT7530_P6ECR 0x7830
96/* IO driver control register */
97#define MT7530_IO_DRV_CR 0x7810
98/* top signal control register */
99#define MT7530_TOP_SIG_CTRL 0x7808
100/* modified hwtrap register */
101#define MT7530_MHWTRAP 0x7804
102/* hwtrap status register */
103#define MT7530_HWTRAP 0x7800
104/* status interrupt register */
105#define MT7530_SYS_INT_STS 0x700c
106/* system nterrupt register */
107#define MT7530_SYS_INT_EN 0x7008
108/* system control register */
109#define MT7530_SYS_CTRL 0x7000
110/* port MAC status register */
111#define MT7530_PMSR_P(x) (0x3008 + (x * 0x100))
112/* port MAC control register */
113#define MT7530_PMCR_P(x) (0x3000 + (x * 0x100))
114
115#define MT7621_XTAL_SHIFT 6
116#define MT7621_XTAL_MASK 0x7
117#define MT7621_XTAL_25 6
118#define MT7621_XTAL_40 3
119#define MT7621_MDIO_DRV_MASK (3 << 4)
120#define MT7621_GE1_MODE_MASK (3 << 12)
121
122#define TRGMII_TXCTRL_TXC_INV BIT(30)
123#define P6ECR_INTF_MODE_RGMII BIT(1)
124#define P5RGMIIRXCR_C_ALIGN BIT(8)
125#define P5RGMIIRXCR_DELAY_2 BIT(1)
126#define P5RGMIITXCR_DELAY_2 (BIT(8) | BIT(2))
127
128/* TOP_SIG_CTRL bits */
129#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
130
131/* MHWTRAP bits */
132#define MHWTRAP_MANUAL BIT(16)
133#define MHWTRAP_P5_MAC_SEL BIT(13)
134#define MHWTRAP_P6_DIS BIT(8)
135#define MHWTRAP_P5_RGMII_MODE BIT(7)
136#define MHWTRAP_P5_DIS BIT(6)
137#define MHWTRAP_PHY_ACCESS BIT(5)
138
139/* HWTRAP bits */
140#define HWTRAP_XTAL_SHIFT 9
141#define HWTRAP_XTAL_MASK 0x3
142
143/* SYS_CTRL bits */
144#define SYS_CTRL_SW_RST BIT(1)
145#define SYS_CTRL_REG_RST BIT(0)
146
147/* PMCR bits */
148#define PMCR_IFG_XMIT_96 BIT(18)
149#define PMCR_MAC_MODE BIT(16)
150#define PMCR_FORCE_MODE BIT(15)
151#define PMCR_TX_EN BIT(14)
152#define PMCR_RX_EN BIT(13)
153#define PMCR_BACK_PRES_EN BIT(9)
154#define PMCR_BACKOFF_EN BIT(8)
155#define PMCR_TX_FC_EN BIT(5)
156#define PMCR_RX_FC_EN BIT(4)
157#define PMCR_FORCE_SPEED_1000 BIT(3)
158#define PMCR_FORCE_FDX BIT(1)
159#define PMCR_FORCE_LNK BIT(0)
160#define PMCR_FIXED_LINK (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
161 PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
162 PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
163 PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
164 PMCR_FORCE_LNK)
165
166#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \
167 PMCR_TX_FC_EN | PMCR_RX_FC_EN)
168
169/* TRGMII control registers */
170#define GSW_INTF_MODE 0x390
171#define GSW_TRGMII_TD0_ODT 0x354
172#define GSW_TRGMII_TD1_ODT 0x35c
173#define GSW_TRGMII_TD2_ODT 0x364
174#define GSW_TRGMII_TD3_ODT 0x36c
175#define GSW_TRGMII_TXCTL_ODT 0x374
176#define GSW_TRGMII_TCK_ODT 0x37c
177#define GSW_TRGMII_RCK_CTRL 0x300
178
179#define INTF_MODE_TRGMII BIT(1)
180#define TRGMII_RCK_CTRL_RX_RST BIT(31)
181
182/* Mac control registers */
183#define MTK_MAC_P2_MCR 0x200
184#define MTK_MAC_P1_MCR 0x100
185
186#define MAC_MCR_MAX_RX_2K BIT(29)
187#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
188#define MAC_MCR_FORCE_MODE BIT(15)
189#define MAC_MCR_TX_EN BIT(14)
190#define MAC_MCR_RX_EN BIT(13)
191#define MAC_MCR_BACKOFF_EN BIT(9)
192#define MAC_MCR_BACKPR_EN BIT(8)
193#define MAC_MCR_FORCE_RX_FC BIT(5)
194#define MAC_MCR_FORCE_TX_FC BIT(4)
195#define MAC_MCR_SPEED_1000 BIT(3)
196#define MAC_MCR_FORCE_DPX BIT(1)
197#define MAC_MCR_FORCE_LINK BIT(0)
198#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
199 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
200 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
201 MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
202 MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
203 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
204#define MAC_MCR_FIXED_LINK_FC (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
205 MAC_MCR_FIXED_LINK)
206
207/* possible XTAL speed */
208#define MT7623_XTAL_40 0
209#define MT7623_XTAL_20 1
210#define MT7623_XTAL_25 3
211
212/* GPIO port control registers */
213#define GPIO_OD33_CTRL8 0x4c0
214#define GPIO_BIAS_CTRL 0xed0
215#define GPIO_DRV_SEL10 0xf00
216
217/* on MT7620 the functio of port 4 can be software configured */
218enum {
219 PORT4_EPHY = 0,
220 PORT4_EXT,
221};
222
223/* struct mt7620_gsw - the structure that holds the SoC specific data
224 * @dev: The Device struct
225 * @base: The base address
226 * @piac_offset: The PIAC base may change depending on SoC
227 * @irq: The IRQ we are using
228 * @port4: The port4 mode on MT7620
229 * @autopoll: Is MDIO autopolling enabled
230 * @ethsys: The ethsys register map
231 * @pctl: The pin control register map
232 * @clk_gsw: The switch clock
233 * @clk_gp1: The gmac1 clock
234 * @clk_gp2: The gmac2 clock
235 * @clk_trgpll: The trgmii pll clock
236 */
237struct mt7620_gsw {
238 struct device *dev;
239 void __iomem *base;
240 u32 piac_offset;
241 int irq;
242 int port4;
243 unsigned long int autopoll;
244
245 struct regmap *ethsys;
246 struct regmap *pctl;
247
248 struct clk *clk_gsw;
249 struct clk *clk_gp1;
250 struct clk *clk_gp2;
251 struct clk *clk_trgpll;
252};
253
254/* switch register I/O wrappers */
255void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
256u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
257
258/* the callback used by the driver core to bringup the switch */
259int mtk_gsw_init(struct mtk_eth *eth);
260
261/* MDIO access wrappers */
262int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
263int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
264void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
265int mt7620_has_carrier(struct mtk_eth *eth);
266void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
267 int speed, int duplex);
268void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
269u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
270void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
271
272u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
273 u32 phy_register, u32 write_data);
274u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
275void mt7620_handle_carrier(struct mtk_eth *eth);
276
277#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644
index 53767b17bad9..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7621.c
+++ /dev/null
@@ -1,297 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/platform_device.h>
19#include <linux/of_device.h>
20#include <linux/of_irq.h>
21
22#include <ralink_regs.h>
23
24#include "mtk_eth_soc.h"
25#include "gsw_mt7620.h"
26
27void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
28{
29 iowrite32(val, gsw->base + reg);
30}
31EXPORT_SYMBOL_GPL(mtk_switch_w32);
32
33u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
34{
35 return ioread32(gsw->base + reg);
36}
37EXPORT_SYMBOL_GPL(mtk_switch_r32);
38
39static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
40{
41 struct mtk_eth *eth = (struct mtk_eth *)_eth;
42 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
43 u32 reg, i;
44
45 reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
46
47 for (i = 0; i < 5; i++) {
48 unsigned int link;
49
50 if ((reg & BIT(i)) == 0)
51 continue;
52
53 link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
54
55 if (link == eth->link[i])
56 continue;
57
58 eth->link[i] = link;
59 if (link)
60 netdev_info(*eth->netdev,
61 "port %d link up\n", i);
62 else
63 netdev_info(*eth->netdev,
64 "port %d link down\n", i);
65 }
66
67 mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
68
69 return IRQ_HANDLED;
70}
71
72static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
73 struct device_node *np)
74{
75 u32 i;
76 u32 val;
77
78 /* hardware reset the switch */
79 mtk_reset(eth, RST_CTRL_MCM);
80 mdelay(10);
81
82 /* reduce RGMII2 PAD driving strength */
83 rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
84
85 /* gpio mux - RGMII1=Normal mode */
86 rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
87
88 /* set GMAC1 RGMII mode */
89 rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
90
91 /* enable MDIO to control MT7530 */
92 rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
93
94 /* turn off all PHYs */
95 for (i = 0; i <= 4; i++) {
96 val = _mt7620_mii_read(gsw, i, 0x0);
97 val |= BIT(11);
98 _mt7620_mii_write(gsw, i, 0x0, val);
99 }
100
101 /* reset the switch */
102 mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
103 SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
104 usleep_range(10, 20);
105
106 if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
107 /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
108 mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
109 mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
110 } else {
111 /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
112 mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
113 mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
114 }
115
116 /* GE2, Link down */
117 mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
118
119 /* Enable Port 6, P5 as GMAC5, P5 disable */
120 val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
121 /* Enable Port 6 */
122 val &= ~MHWTRAP_P6_DIS;
123 /* Disable Port 5 */
124 val |= MHWTRAP_P5_DIS;
125 /* manual override of HW-Trap */
126 val |= MHWTRAP_MANUAL;
127 mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
128
129 val = rt_sysc_r32(SYSC_REG_CFG);
130 val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
131 if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
132 /* 40Mhz */
133
134 /* disable MT7530 core clock */
135 _mt7620_mii_write(gsw, 0, 13, 0x1f);
136 _mt7620_mii_write(gsw, 0, 14, 0x410);
137 _mt7620_mii_write(gsw, 0, 13, 0x401f);
138 _mt7620_mii_write(gsw, 0, 14, 0x0);
139
140 /* disable MT7530 PLL */
141 _mt7620_mii_write(gsw, 0, 13, 0x1f);
142 _mt7620_mii_write(gsw, 0, 14, 0x40d);
143 _mt7620_mii_write(gsw, 0, 13, 0x401f);
144 _mt7620_mii_write(gsw, 0, 14, 0x2020);
145
146 /* for MT7530 core clock = 500Mhz */
147 _mt7620_mii_write(gsw, 0, 13, 0x1f);
148 _mt7620_mii_write(gsw, 0, 14, 0x40e);
149 _mt7620_mii_write(gsw, 0, 13, 0x401f);
150 _mt7620_mii_write(gsw, 0, 14, 0x119);
151
152 /* enable MT7530 PLL */
153 _mt7620_mii_write(gsw, 0, 13, 0x1f);
154 _mt7620_mii_write(gsw, 0, 14, 0x40d);
155 _mt7620_mii_write(gsw, 0, 13, 0x401f);
156 _mt7620_mii_write(gsw, 0, 14, 0x2820);
157
158 usleep_range(20, 40);
159
160 /* enable MT7530 core clock */
161 _mt7620_mii_write(gsw, 0, 13, 0x1f);
162 _mt7620_mii_write(gsw, 0, 14, 0x410);
163 _mt7620_mii_write(gsw, 0, 13, 0x401f);
164 }
165
166 /* RGMII */
167 _mt7620_mii_write(gsw, 0, 14, 0x1);
168
169 /* set MT7530 central align */
170 mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
171 mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
172 MT7530_TRGMII_TXCTRL);
173 mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
174
175 /* delay setting for 10/1000M */
176 mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
177 P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
178 mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
179
180 /* lower Tx Driving*/
181 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
182 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
183 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
184 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
185 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
186 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
187
188 /* turn on all PHYs */
189 for (i = 0; i <= 4; i++) {
190 val = _mt7620_mii_read(gsw, i, 0);
191 val &= ~BIT(11);
192 _mt7620_mii_write(gsw, i, 0, val);
193 }
194
195#define MT7530_NUM_PORTS 8
196#define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8))
197#define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8))
198#define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8))
199#define MT7530_CPU_PORT 6
200
201 /* This is copied from mt7530_apply_config in libreCMC driver */
202 {
203 int i;
204
205 for (i = 0; i < MT7530_NUM_PORTS; i++)
206 mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
207
208 mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
209 0x00ff0000);
210
211 for (i = 0; i < MT7530_NUM_PORTS; i++)
212 mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
213 }
214
215 /* enable irq */
216 mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
217 mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
218}
219
220static const struct of_device_id mediatek_gsw_match[] = {
221 { .compatible = "mediatek,mt7621-gsw" },
222 {},
223};
224MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
225
226int mtk_gsw_init(struct mtk_eth *eth)
227{
228 struct device_node *np = eth->switch_np;
229 struct platform_device *pdev = of_find_device_by_node(np);
230 struct mt7620_gsw *gsw;
231
232 if (!pdev)
233 return -ENODEV;
234
235 if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
236 return -EINVAL;
237
238 gsw = platform_get_drvdata(pdev);
239 eth->sw_priv = gsw;
240
241 if (!gsw->irq)
242 return -EINVAL;
243
244 request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
245 "gsw", eth);
246 disable_irq(gsw->irq);
247
248 mt7621_hw_init(eth, gsw, np);
249
250 enable_irq(gsw->irq);
251
252 return 0;
253}
254EXPORT_SYMBOL_GPL(mtk_gsw_init);
255
256static int mt7621_gsw_probe(struct platform_device *pdev)
257{
258 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
259 struct mt7620_gsw *gsw;
260
261 gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
262 if (!gsw)
263 return -ENOMEM;
264
265 gsw->base = devm_ioremap_resource(&pdev->dev, res);
266 if (IS_ERR(gsw->base))
267 return PTR_ERR(gsw->base);
268
269 gsw->dev = &pdev->dev;
270 gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
271
272 platform_set_drvdata(pdev, gsw);
273
274 return 0;
275}
276
277static int mt7621_gsw_remove(struct platform_device *pdev)
278{
279 platform_set_drvdata(pdev, NULL);
280
281 return 0;
282}
283
284static struct platform_driver gsw_driver = {
285 .probe = mt7621_gsw_probe,
286 .remove = mt7621_gsw_remove,
287 .driver = {
288 .name = "mt7621-gsw",
289 .of_match_table = mediatek_gsw_match,
290 },
291};
292
293module_platform_driver(gsw_driver);
294
295MODULE_LICENSE("GPL");
296MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
297MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644
index 5fea6a447eed..000000000000
--- a/drivers/staging/mt7621-eth/mdio.c
+++ /dev/null
@@ -1,275 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
6 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
7 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/phy.h>
13#include <linux/of_net.h>
14#include <linux/of_mdio.h>
15
16#include "mtk_eth_soc.h"
17#include "mdio.h"
18
19static int mtk_mdio_reset(struct mii_bus *bus)
20{
21 /* TODO */
22 return 0;
23}
24
25static void mtk_phy_link_adjust(struct net_device *dev)
26{
27 struct mtk_eth *eth = netdev_priv(dev);
28 unsigned long flags;
29 int i;
30
31 spin_lock_irqsave(&eth->phy->lock, flags);
32 for (i = 0; i < 8; i++) {
33 if (eth->phy->phy_node[i]) {
34 struct phy_device *phydev = eth->phy->phy[i];
35 int status_change = 0;
36
37 if (phydev->link)
38 if (eth->phy->duplex[i] != phydev->duplex ||
39 eth->phy->speed[i] != phydev->speed)
40 status_change = 1;
41
42 if (phydev->link != eth->link[i])
43 status_change = 1;
44
45 switch (phydev->speed) {
46 case SPEED_1000:
47 case SPEED_100:
48 case SPEED_10:
49 eth->link[i] = phydev->link;
50 eth->phy->duplex[i] = phydev->duplex;
51 eth->phy->speed[i] = phydev->speed;
52
53 if (status_change &&
54 eth->soc->mdio_adjust_link)
55 eth->soc->mdio_adjust_link(eth, i);
56 break;
57 }
58 }
59 }
60 spin_unlock_irqrestore(&eth->phy->lock, flags);
61}
62
63int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
64 struct device_node *phy_node)
65{
66 const __be32 *_port = NULL;
67 struct phy_device *phydev;
68 int phy_mode, port;
69
70 _port = of_get_property(phy_node, "reg", NULL);
71
72 if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
73 pr_err("%pOFn: invalid port id\n", phy_node);
74 return -EINVAL;
75 }
76 port = be32_to_cpu(*_port);
77 phy_mode = of_get_phy_mode(phy_node);
78 if (phy_mode < 0) {
79 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
80 eth->phy->phy_node[port] = NULL;
81 return -EINVAL;
82 }
83
84 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
85 mtk_phy_link_adjust, 0, phy_mode);
86 if (!phydev) {
87 dev_err(eth->dev, "could not connect to PHY\n");
88 eth->phy->phy_node[port] = NULL;
89 return -ENODEV;
90 }
91
92 phydev->supported &= PHY_1000BT_FEATURES;
93 phydev->advertising = phydev->supported;
94
95 dev_info(eth->dev,
96 "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
97 port, phydev_name(phydev), phydev->phy_id,
98 phydev->drv->name);
99
100 eth->phy->phy[port] = phydev;
101 eth->link[port] = 0;
102
103 return 0;
104}
105
106static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
107 struct phy_device *phy)
108{
109 phy_attach(eth->netdev[mac->id], phydev_name(phy),
110 PHY_INTERFACE_MODE_MII);
111
112 phy->autoneg = AUTONEG_ENABLE;
113 phy->speed = 0;
114 phy->duplex = 0;
115 phy_set_max_speed(phy, SPEED_100);
116 phy->advertising = phy->supported | ADVERTISED_Autoneg;
117
118 phy_start_aneg(phy);
119}
120
121static int mtk_phy_connect(struct mtk_mac *mac)
122{
123 struct mtk_eth *eth = mac->hw;
124 int i;
125
126 for (i = 0; i < 8; i++) {
127 if (eth->phy->phy_node[i]) {
128 if (!mac->phy_dev) {
129 mac->phy_dev = eth->phy->phy[i];
130 mac->phy_flags = MTK_PHY_FLAG_PORT;
131 }
132 } else if (eth->mii_bus) {
133 struct phy_device *phy;
134
135 phy = mdiobus_get_phy(eth->mii_bus, i);
136 if (phy) {
137 phy_init(eth, mac, phy);
138 if (!mac->phy_dev) {
139 mac->phy_dev = phy;
140 mac->phy_flags = MTK_PHY_FLAG_ATTACH;
141 }
142 }
143 }
144 }
145
146 return 0;
147}
148
149static void mtk_phy_disconnect(struct mtk_mac *mac)
150{
151 struct mtk_eth *eth = mac->hw;
152 unsigned long flags;
153 int i;
154
155 for (i = 0; i < 8; i++)
156 if (eth->phy->phy_fixed[i]) {
157 spin_lock_irqsave(&eth->phy->lock, flags);
158 eth->link[i] = 0;
159 if (eth->soc->mdio_adjust_link)
160 eth->soc->mdio_adjust_link(eth, i);
161 spin_unlock_irqrestore(&eth->phy->lock, flags);
162 } else if (eth->phy->phy[i]) {
163 phy_disconnect(eth->phy->phy[i]);
164 } else if (eth->mii_bus) {
165 struct phy_device *phy =
166 mdiobus_get_phy(eth->mii_bus, i);
167
168 if (phy)
169 phy_detach(phy);
170 }
171}
172
173static void mtk_phy_start(struct mtk_mac *mac)
174{
175 struct mtk_eth *eth = mac->hw;
176 unsigned long flags;
177 int i;
178
179 for (i = 0; i < 8; i++) {
180 if (eth->phy->phy_fixed[i]) {
181 spin_lock_irqsave(&eth->phy->lock, flags);
182 eth->link[i] = 1;
183 if (eth->soc->mdio_adjust_link)
184 eth->soc->mdio_adjust_link(eth, i);
185 spin_unlock_irqrestore(&eth->phy->lock, flags);
186 } else if (eth->phy->phy[i]) {
187 phy_start(eth->phy->phy[i]);
188 }
189 }
190}
191
192static void mtk_phy_stop(struct mtk_mac *mac)
193{
194 struct mtk_eth *eth = mac->hw;
195 unsigned long flags;
196 int i;
197
198 for (i = 0; i < 8; i++)
199 if (eth->phy->phy_fixed[i]) {
200 spin_lock_irqsave(&eth->phy->lock, flags);
201 eth->link[i] = 0;
202 if (eth->soc->mdio_adjust_link)
203 eth->soc->mdio_adjust_link(eth, i);
204 spin_unlock_irqrestore(&eth->phy->lock, flags);
205 } else if (eth->phy->phy[i]) {
206 phy_stop(eth->phy->phy[i]);
207 }
208}
209
210static struct mtk_phy phy_ralink = {
211 .connect = mtk_phy_connect,
212 .disconnect = mtk_phy_disconnect,
213 .start = mtk_phy_start,
214 .stop = mtk_phy_stop,
215};
216
217int mtk_mdio_init(struct mtk_eth *eth)
218{
219 struct device_node *mii_np;
220 int err;
221
222 if (!eth->soc->mdio_read || !eth->soc->mdio_write)
223 return 0;
224
225 spin_lock_init(&phy_ralink.lock);
226 eth->phy = &phy_ralink;
227
228 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
229 if (!mii_np) {
230 dev_err(eth->dev, "no %s child node found", "mdio-bus");
231 return -ENODEV;
232 }
233
234 if (!of_device_is_available(mii_np)) {
235 err = 0;
236 goto err_put_node;
237 }
238
239 eth->mii_bus = mdiobus_alloc();
240 if (!eth->mii_bus) {
241 err = -ENOMEM;
242 goto err_put_node;
243 }
244
245 eth->mii_bus->name = "mdio";
246 eth->mii_bus->read = eth->soc->mdio_read;
247 eth->mii_bus->write = eth->soc->mdio_write;
248 eth->mii_bus->reset = mtk_mdio_reset;
249 eth->mii_bus->priv = eth;
250 eth->mii_bus->parent = eth->dev;
251
252 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
253 err = of_mdiobus_register(eth->mii_bus, mii_np);
254 if (err)
255 goto err_free_bus;
256
257 return 0;
258
259err_free_bus:
260 kfree(eth->mii_bus);
261err_put_node:
262 of_node_put(mii_np);
263 eth->mii_bus = NULL;
264 return err;
265}
266
267void mtk_mdio_cleanup(struct mtk_eth *eth)
268{
269 if (!eth->mii_bus)
270 return;
271
272 mdiobus_unregister(eth->mii_bus);
273 of_node_put(eth->mii_bus->dev.of_node);
274 kfree(eth->mii_bus);
275}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644
index b14e23842a01..000000000000
--- a/drivers/staging/mt7621-eth/mdio.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef _RALINK_MDIO_H__
16#define _RALINK_MDIO_H__
17
18#ifdef CONFIG_NET_MEDIATEK_MDIO
19int mtk_mdio_init(struct mtk_eth *eth);
20void mtk_mdio_cleanup(struct mtk_eth *eth);
21int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
22 struct device_node *phy_node);
23#else
24static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
25static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
26#endif
27#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644
index ced605c2914e..000000000000
--- a/drivers/staging/mt7621-eth/mdio_mt7620.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18
19#include "mtk_eth_soc.h"
20#include "gsw_mt7620.h"
21#include "mdio.h"
22
23static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
24{
25 unsigned long t_start = jiffies;
26
27 while (1) {
28 if (!(mtk_switch_r32(gsw,
29 gsw->piac_offset + MT7620_GSW_REG_PIAC) &
30 GSW_MDIO_ACCESS))
31 return 0;
32 if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
33 break;
34 }
35
36 dev_err(gsw->dev, "mdio: MDIO timeout\n");
37 return -1;
38}
39
40u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
41 u32 phy_register, u32 write_data)
42{
43 if (mt7620_mii_busy_wait(gsw))
44 return -1;
45
46 write_data &= 0xffff;
47
48 mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
49 (phy_register << GSW_MDIO_REG_SHIFT) |
50 (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
51 MT7620_GSW_REG_PIAC);
52
53 if (mt7620_mii_busy_wait(gsw))
54 return -1;
55
56 return 0;
57}
58EXPORT_SYMBOL_GPL(_mt7620_mii_write);
59
60u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
61{
62 u32 d;
63
64 if (mt7620_mii_busy_wait(gsw))
65 return 0xffff;
66
67 mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
68 (phy_reg << GSW_MDIO_REG_SHIFT) |
69 (phy_addr << GSW_MDIO_ADDR_SHIFT),
70 MT7620_GSW_REG_PIAC);
71
72 if (mt7620_mii_busy_wait(gsw))
73 return 0xffff;
74
75 d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
76
77 return d;
78}
79EXPORT_SYMBOL_GPL(_mt7620_mii_read);
80
81int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
82{
83 struct mtk_eth *eth = bus->priv;
84 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
85
86 return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
87}
88
89int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
90{
91 struct mtk_eth *eth = bus->priv;
92 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
93
94 return _mt7620_mii_read(gsw, phy_addr, phy_reg);
95}
96
97void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
98{
99 _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
100 _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
101 _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
102}
103EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
104
105u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
106{
107 u16 high, low;
108
109 _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
110 low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
111 high = _mt7620_mii_read(gsw, 0x1f, 0x10);
112
113 return (high << 16) | (low & 0xffff);
114}
115EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
116
117void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
118{
119 u32 val = mt7530_mdio_r32(gsw, reg);
120
121 val &= ~mask;
122 val |= set;
123 mt7530_mdio_w32(gsw, reg, val);
124}
125EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
126
127static unsigned char *mtk_speed_str(int speed)
128{
129 switch (speed) {
130 case 2:
131 case SPEED_1000:
132 return "1000";
133 case 1:
134 case SPEED_100:
135 return "100";
136 case 0:
137 case SPEED_10:
138 return "10";
139 }
140
141 return "? ";
142}
143
144int mt7620_has_carrier(struct mtk_eth *eth)
145{
146 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
147 int i;
148
149 for (i = 0; i < GSW_PORT6; i++)
150 if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
151 return 1;
152 return 0;
153}
154
155void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
156 int speed, int duplex)
157{
158 struct mt7620_gsw *gsw = eth->sw_priv;
159
160 if (link)
161 dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
162 port, mtk_speed_str(speed),
163 (duplex) ? "Full" : "Half");
164 else
165 dev_info(gsw->dev, "port %d link down\n", port);
166}
167
168void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
169{
170 mt7620_print_link_state(eth, port, eth->link[port],
171 eth->phy->speed[port],
172 (eth->phy->duplex[port] == DUPLEX_FULL));
173}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644
index 6027b19f7bc2..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ /dev/null
@@ -1,2176 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/dma-mapping.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/ethtool.h>
23#include <linux/platform_device.h>
24#include <linux/of_device.h>
25#include <linux/mfd/syscon.h>
26#include <linux/clk.h>
27#include <linux/of_net.h>
28#include <linux/of_mdio.h>
29#include <linux/if_vlan.h>
30#include <linux/reset.h>
31#include <linux/tcp.h>
32#include <linux/io.h>
33#include <linux/bug.h>
34#include <linux/regmap.h>
35
36#include "mtk_eth_soc.h"
37#include "mdio.h"
38#include "ethtool.h"
39
40#define MAX_RX_LENGTH 1536
41#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
42#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
43#define DMA_DUMMY_DESC 0xffffffff
44#define MTK_DEFAULT_MSG_ENABLE \
45 (NETIF_MSG_DRV | \
46 NETIF_MSG_PROBE | \
47 NETIF_MSG_LINK | \
48 NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | \
50 NETIF_MSG_IFUP | \
51 NETIF_MSG_RX_ERR | \
52 NETIF_MSG_TX_ERR)
53
54#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
55#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
56#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
57
58#define SYSC_REG_RSTCTRL 0x34
59
60static int mtk_msg_level = -1;
61module_param_named(msg_level, mtk_msg_level, int, 0);
62MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
63
64static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
65 [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
66 [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
67 [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
68 [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
69 [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
70 [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
71 [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
72 [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
73 [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
74 [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
75 [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
76 [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
77 [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
78 [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
79 [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
80 [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
81};
82
83static const u16 *mtk_reg_table = mtk_reg_table_default;
84
85void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
86{
87 __raw_writel(val, eth->base + reg);
88}
89
90u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
91{
92 return __raw_readl(eth->base + reg);
93}
94
95static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
96{
97 mtk_w32(eth, val, mtk_reg_table[reg]);
98}
99
100static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
101{
102 return mtk_r32(eth, mtk_reg_table[reg]);
103}
104
105/* these bits are also exposed via the reset-controller API. however the switch
106 * and FE need to be brought out of reset in the exakt same moemtn and the
107 * reset-controller api does not provide this feature yet. Do the reset manually
108 * until we fixed the reset-controller api to be able to do this
109 */
110void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
111{
112 u32 val;
113
114 regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
115 val |= reset_bits;
116 regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
117 usleep_range(10, 20);
118 val &= ~reset_bits;
119 regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
120 usleep_range(10, 20);
121}
122EXPORT_SYMBOL(mtk_reset);
123
124static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
125{
126 if (eth->soc->dma_type & MTK_PDMA)
127 mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
128 if (eth->soc->dma_type & MTK_QDMA)
129 mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
130}
131
132static inline u32 mtk_irq_pending(struct mtk_eth *eth)
133{
134 u32 status = 0;
135
136 if (eth->soc->dma_type & MTK_PDMA)
137 status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
138 if (eth->soc->dma_type & MTK_QDMA)
139 status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
140
141 return status;
142}
143
144static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
145{
146 u32 status_reg = MTK_REG_MTK_INT_STATUS;
147
148 if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
149 status_reg = MTK_REG_MTK_INT_STATUS2;
150
151 mtk_reg_w32(eth, mask, status_reg);
152}
153
154static u32 mtk_irq_pending_status(struct mtk_eth *eth)
155{
156 u32 status_reg = MTK_REG_MTK_INT_STATUS;
157
158 if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
159 status_reg = MTK_REG_MTK_INT_STATUS2;
160
161 return mtk_reg_r32(eth, status_reg);
162}
163
164static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
165{
166 u32 val;
167
168 if (eth->soc->dma_type & MTK_PDMA) {
169 val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
170 mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
171 /* flush write */
172 mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
173 }
174 if (eth->soc->dma_type & MTK_QDMA) {
175 val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
176 mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
177 /* flush write */
178 mtk_r32(eth, MTK_QMTK_INT_ENABLE);
179 }
180}
181
182static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
183{
184 u32 val;
185
186 if (eth->soc->dma_type & MTK_PDMA) {
187 val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
188 mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
189 /* flush write */
190 mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
191 }
192 if (eth->soc->dma_type & MTK_QDMA) {
193 val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
194 mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
195 /* flush write */
196 mtk_r32(eth, MTK_QMTK_INT_ENABLE);
197 }
198}
199
200static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
201{
202 u32 enabled = 0;
203
204 if (eth->soc->dma_type & MTK_PDMA)
205 enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
206 if (eth->soc->dma_type & MTK_QDMA)
207 enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
208
209 return enabled;
210}
211
212static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
213 unsigned char *macaddr)
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&mac->hw->page_lock, flags);
218 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
219 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
220 (macaddr[4] << 8) | macaddr[5],
221 MTK_GDMA1_MAC_ADRL);
222 spin_unlock_irqrestore(&mac->hw->page_lock, flags);
223}
224
225static int mtk_set_mac_address(struct net_device *dev, void *p)
226{
227 int ret = eth_mac_addr(dev, p);
228 struct mtk_mac *mac = netdev_priv(dev);
229 struct mtk_eth *eth = mac->hw;
230
231 if (ret)
232 return ret;
233
234 if (eth->soc->set_mac)
235 eth->soc->set_mac(mac, dev->dev_addr);
236 else
237 mtk_hw_set_macaddr(mac, p);
238
239 return 0;
240}
241
242static inline int mtk_max_frag_size(int mtu)
243{
244 /* make sure buf_size will be at least MAX_RX_LENGTH */
245 if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
246 mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
247
248 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
250}
251
252static inline int mtk_max_buf_size(int frag_size)
253{
254 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
255 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
256
257 WARN_ON(buf_size < MAX_RX_LENGTH);
258
259 return buf_size;
260}
261
262static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
263 struct mtk_rx_dma *dma_rxd)
264{
265 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
266 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
267 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
268 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
269}
270
271static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
272 struct mtk_tx_dma *dma_txd)
273{
274 WRITE_ONCE(dma_txd->txd1, txd->txd1);
275 WRITE_ONCE(dma_txd->txd3, txd->txd3);
276 WRITE_ONCE(dma_txd->txd4, txd->txd4);
277 /* clean dma done flag last */
278 WRITE_ONCE(dma_txd->txd2, txd->txd2);
279}
280
281static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
282{
283 int i;
284
285 if (ring->rx_data && ring->rx_dma) {
286 for (i = 0; i < ring->rx_ring_size; i++) {
287 if (!ring->rx_data[i])
288 continue;
289 if (!ring->rx_dma[i].rxd1)
290 continue;
291 dma_unmap_single(eth->dev,
292 ring->rx_dma[i].rxd1,
293 ring->rx_buf_size,
294 DMA_FROM_DEVICE);
295 skb_free_frag(ring->rx_data[i]);
296 }
297 kfree(ring->rx_data);
298 ring->rx_data = NULL;
299 }
300
301 if (ring->rx_dma) {
302 dma_free_coherent(eth->dev,
303 ring->rx_ring_size * sizeof(*ring->rx_dma),
304 ring->rx_dma,
305 ring->rx_phys);
306 ring->rx_dma = NULL;
307 }
308}
309
310static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
311{
312 int i, pad = 0;
313
314 ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
315 ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
316 ring->rx_ring_size = eth->soc->dma_ring_size;
317 ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
318 GFP_KERNEL);
319 if (!ring->rx_data)
320 goto no_rx_mem;
321
322 for (i = 0; i < ring->rx_ring_size; i++) {
323 ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
324 if (!ring->rx_data[i])
325 goto no_rx_mem;
326 }
327
328 ring->rx_dma =
329 dma_alloc_coherent(eth->dev,
330 ring->rx_ring_size * sizeof(*ring->rx_dma),
331 &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
332 if (!ring->rx_dma)
333 goto no_rx_mem;
334
335 if (!eth->soc->rx_2b_offset)
336 pad = NET_IP_ALIGN;
337
338 for (i = 0; i < ring->rx_ring_size; i++) {
339 dma_addr_t dma_addr = dma_map_single(eth->dev,
340 ring->rx_data[i] + NET_SKB_PAD + pad,
341 ring->rx_buf_size,
342 DMA_FROM_DEVICE);
343 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
344 goto no_rx_mem;
345 ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
346
347 if (eth->soc->rx_sg_dma)
348 ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
349 else
350 ring->rx_dma[i].rxd2 = RX_DMA_LSO;
351 }
352 ring->rx_calc_idx = ring->rx_ring_size - 1;
353 /* make sure that all changes to the dma ring are flushed before we
354 * continue
355 */
356 wmb();
357
358 return 0;
359
360no_rx_mem:
361 return -ENOMEM;
362}
363
364static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
365{
366 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
367 dma_unmap_single(dev,
368 dma_unmap_addr(tx_buf, dma_addr0),
369 dma_unmap_len(tx_buf, dma_len0),
370 DMA_TO_DEVICE);
371 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
372 dma_unmap_page(dev,
373 dma_unmap_addr(tx_buf, dma_addr0),
374 dma_unmap_len(tx_buf, dma_len0),
375 DMA_TO_DEVICE);
376 }
377 if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
378 dma_unmap_page(dev,
379 dma_unmap_addr(tx_buf, dma_addr1),
380 dma_unmap_len(tx_buf, dma_len1),
381 DMA_TO_DEVICE);
382
383 tx_buf->flags = 0;
384 if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
385 dev_kfree_skb_any(tx_buf->skb);
386 tx_buf->skb = NULL;
387}
388
389static void mtk_pdma_tx_clean(struct mtk_eth *eth)
390{
391 struct mtk_tx_ring *ring = &eth->tx_ring;
392 int i;
393
394 if (ring->tx_buf) {
395 for (i = 0; i < ring->tx_ring_size; i++)
396 mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
397 kfree(ring->tx_buf);
398 ring->tx_buf = NULL;
399 }
400
401 if (ring->tx_dma) {
402 dma_free_coherent(eth->dev,
403 ring->tx_ring_size * sizeof(*ring->tx_dma),
404 ring->tx_dma,
405 ring->tx_phys);
406 ring->tx_dma = NULL;
407 }
408}
409
410static void mtk_qdma_tx_clean(struct mtk_eth *eth)
411{
412 struct mtk_tx_ring *ring = &eth->tx_ring;
413 int i;
414
415 if (ring->tx_buf) {
416 for (i = 0; i < ring->tx_ring_size; i++)
417 mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
418 kfree(ring->tx_buf);
419 ring->tx_buf = NULL;
420 }
421
422 if (ring->tx_dma) {
423 dma_free_coherent(eth->dev,
424 ring->tx_ring_size * sizeof(*ring->tx_dma),
425 ring->tx_dma,
426 ring->tx_phys);
427 ring->tx_dma = NULL;
428 }
429}
430
431void mtk_stats_update_mac(struct mtk_mac *mac)
432{
433 struct mtk_hw_stats *hw_stats = mac->hw_stats;
434 unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
435 u64 stats;
436
437 base += hw_stats->reg_offset;
438
439 u64_stats_update_begin(&hw_stats->syncp);
440
441 if (mac->hw->soc->new_stats) {
442 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
443 stats = mtk_r32(mac->hw, base + 0x04);
444 if (stats)
445 hw_stats->rx_bytes += (stats << 32);
446 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
447 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
448 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
449 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
450 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
451 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
452 hw_stats->rx_flow_control_packets +=
453 mtk_r32(mac->hw, base + 0x24);
454 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
455 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
456 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
457 stats = mtk_r32(mac->hw, base + 0x34);
458 if (stats)
459 hw_stats->tx_bytes += (stats << 32);
460 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
461 } else {
462 hw_stats->tx_bytes += mtk_r32(mac->hw, base);
463 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
464 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
465 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
466 hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
467 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
468 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
469 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
470 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
471 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
472 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
473 hw_stats->rx_flow_control_packets +=
474 mtk_r32(mac->hw, base + 0x3c);
475 }
476
477 u64_stats_update_end(&hw_stats->syncp);
478}
479
480static void mtk_get_stats64(struct net_device *dev,
481 struct rtnl_link_stats64 *storage)
482{
483 struct mtk_mac *mac = netdev_priv(dev);
484 struct mtk_hw_stats *hw_stats = mac->hw_stats;
485 unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
486 unsigned int start;
487
488 if (!base) {
489 netdev_stats_to_stats64(storage, &dev->stats);
490 return;
491 }
492
493 if (netif_running(dev) && netif_device_present(dev)) {
494 if (spin_trylock(&hw_stats->stats_lock)) {
495 mtk_stats_update_mac(mac);
496 spin_unlock(&hw_stats->stats_lock);
497 }
498 }
499
500 do {
501 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
502 storage->rx_packets = hw_stats->rx_packets;
503 storage->tx_packets = hw_stats->tx_packets;
504 storage->rx_bytes = hw_stats->rx_bytes;
505 storage->tx_bytes = hw_stats->tx_bytes;
506 storage->collisions = hw_stats->tx_collisions;
507 storage->rx_length_errors = hw_stats->rx_short_errors +
508 hw_stats->rx_long_errors;
509 storage->rx_over_errors = hw_stats->rx_overflow;
510 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
511 storage->rx_errors = hw_stats->rx_checksum_errors;
512 storage->tx_aborted_errors = hw_stats->tx_skip;
513 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
514
515 storage->tx_errors = dev->stats.tx_errors;
516 storage->rx_dropped = dev->stats.rx_dropped;
517 storage->tx_dropped = dev->stats.tx_dropped;
518}
519
520static int mtk_vlan_rx_add_vid(struct net_device *dev,
521 __be16 proto, u16 vid)
522{
523 struct mtk_mac *mac = netdev_priv(dev);
524 struct mtk_eth *eth = mac->hw;
525 u32 idx = (vid & 0xf);
526 u32 vlan_cfg;
527
528 if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
529 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
530 return 0;
531
532 if (test_bit(idx, &eth->vlan_map)) {
533 netdev_warn(dev, "disable tx vlan offload\n");
534 dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
535 netdev_update_features(dev);
536 } else {
537 vlan_cfg = mtk_r32(eth,
538 mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
539 ((idx >> 1) << 2));
540 if (idx & 0x1) {
541 vlan_cfg &= 0xffff;
542 vlan_cfg |= (vid << 16);
543 } else {
544 vlan_cfg &= 0xffff0000;
545 vlan_cfg |= vid;
546 }
547 mtk_w32(eth,
548 vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
549 ((idx >> 1) << 2));
550 set_bit(idx, &eth->vlan_map);
551 }
552
553 return 0;
554}
555
556static int mtk_vlan_rx_kill_vid(struct net_device *dev,
557 __be16 proto, u16 vid)
558{
559 struct mtk_mac *mac = netdev_priv(dev);
560 struct mtk_eth *eth = mac->hw;
561 u32 idx = (vid & 0xf);
562
563 if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
564 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
565 return 0;
566
567 clear_bit(idx, &eth->vlan_map);
568
569 return 0;
570}
571
572static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
573{
574 barrier();
575 return (u32)(ring->tx_ring_size -
576 ((ring->tx_next_idx - ring->tx_free_idx) &
577 (ring->tx_ring_size - 1)));
578}
579
580static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
581{
582 unsigned int len;
583 int ret;
584
585 if (unlikely(skb->len >= VLAN_ETH_ZLEN))
586 return 0;
587
588 if (eth->soc->padding_64b && !eth->soc->padding_bug)
589 return 0;
590
591 if (skb_vlan_tag_present(skb))
592 len = ETH_ZLEN;
593 else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
594 len = VLAN_ETH_ZLEN;
595 else if (!eth->soc->padding_64b)
596 len = ETH_ZLEN;
597 else
598 return 0;
599
600 if (skb->len >= len)
601 return 0;
602
603 ret = skb_pad(skb, len - skb->len);
604 if (ret < 0)
605 return ret;
606 skb->len = len;
607 skb_set_tail_pointer(skb, len);
608
609 return ret;
610}
611
612static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
613 int tx_num, struct mtk_tx_ring *ring, bool gso)
614{
615 struct mtk_mac *mac = netdev_priv(dev);
616 struct mtk_eth *eth = mac->hw;
617 struct skb_frag_struct *frag;
618 struct mtk_tx_dma txd, *ptxd;
619 struct mtk_tx_buf *tx_buf;
620 int i, j, k, frag_size, frag_map_size, offset;
621 dma_addr_t mapped_addr;
622 unsigned int nr_frags;
623 u32 def_txd4;
624
625 if (mtk_skb_padto(skb, eth)) {
626 netif_warn(eth, tx_err, dev, "tx padding failed!\n");
627 return -1;
628 }
629
630 tx_buf = &ring->tx_buf[ring->tx_next_idx];
631 memset(tx_buf, 0, sizeof(*tx_buf));
632 memset(&txd, 0, sizeof(txd));
633 nr_frags = skb_shinfo(skb)->nr_frags;
634
635 /* init tx descriptor */
636 def_txd4 = eth->soc->txd4;
637 txd.txd4 = def_txd4;
638
639 if (eth->soc->mac_count > 1)
640 txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
641
642 if (gso)
643 txd.txd4 |= TX_DMA_TSO;
644
645 /* TX Checksum offload */
646 if (skb->ip_summed == CHECKSUM_PARTIAL)
647 txd.txd4 |= TX_DMA_CHKSUM;
648
649 /* VLAN header offload */
650 if (skb_vlan_tag_present(skb)) {
651 u16 tag = skb_vlan_tag_get(skb);
652
653 txd.txd4 |= TX_DMA_INS_VLAN |
654 ((tag >> VLAN_PRIO_SHIFT) << 4) |
655 (tag & 0xF);
656 }
657
658 mapped_addr = dma_map_single(&dev->dev, skb->data,
659 skb_headlen(skb), DMA_TO_DEVICE);
660 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
661 return -1;
662
663 txd.txd1 = mapped_addr;
664 txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
665
666 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
667 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
668 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
669
670 /* TX SG offload */
671 j = ring->tx_next_idx;
672 k = 0;
673 for (i = 0; i < nr_frags; i++) {
674 offset = 0;
675 frag = &skb_shinfo(skb)->frags[i];
676 frag_size = skb_frag_size(frag);
677
678 while (frag_size > 0) {
679 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
680 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
681 frag_map_size,
682 DMA_TO_DEVICE);
683 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
684 goto err_dma;
685
686 if (k & 0x1) {
687 j = NEXT_TX_DESP_IDX(j);
688 txd.txd1 = mapped_addr;
689 txd.txd2 = TX_DMA_PLEN0(frag_map_size);
690 txd.txd4 = def_txd4;
691
692 tx_buf = &ring->tx_buf[j];
693 memset(tx_buf, 0, sizeof(*tx_buf));
694
695 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
696 dma_unmap_addr_set(tx_buf, dma_addr0,
697 mapped_addr);
698 dma_unmap_len_set(tx_buf, dma_len0,
699 frag_map_size);
700 } else {
701 txd.txd3 = mapped_addr;
702 txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
703
704 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
705 tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
706 dma_unmap_addr_set(tx_buf, dma_addr1,
707 mapped_addr);
708 dma_unmap_len_set(tx_buf, dma_len1,
709 frag_map_size);
710
711 if (!((i == (nr_frags - 1)) &&
712 (frag_map_size == frag_size))) {
713 mtk_set_txd_pdma(&txd,
714 &ring->tx_dma[j]);
715 memset(&txd, 0, sizeof(txd));
716 }
717 }
718 frag_size -= frag_map_size;
719 offset += frag_map_size;
720 k++;
721 }
722 }
723
724 /* set last segment */
725 if (k & 0x1)
726 txd.txd2 |= TX_DMA_LS1;
727 else
728 txd.txd2 |= TX_DMA_LS0;
729 mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
730
731 /* store skb to cleanup */
732 tx_buf->skb = skb;
733
734 netdev_sent_queue(dev, skb->len);
735 skb_tx_timestamp(skb);
736
737 ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
738 /* make sure that all changes to the dma ring are flushed before we
739 * continue
740 */
741 wmb();
742 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
743
744 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
745 mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
746
747 return 0;
748
749err_dma:
750 j = ring->tx_next_idx;
751 for (i = 0; i < tx_num; i++) {
752 ptxd = &ring->tx_dma[j];
753 tx_buf = &ring->tx_buf[j];
754
755 /* unmap dma */
756 mtk_txd_unmap(&dev->dev, tx_buf);
757
758 ptxd->txd2 = TX_DMA_DESP2_DEF;
759 j = NEXT_TX_DESP_IDX(j);
760 }
761 /* make sure that all changes to the dma ring are flushed before we
762 * continue
763 */
764 wmb();
765 return -1;
766}
767
768/* the qdma core needs scratch memory to be setup */
769static int mtk_init_fq_dma(struct mtk_eth *eth)
770{
771 dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
772 int cnt = eth->soc->dma_ring_size;
773 int i;
774
775 eth->scratch_ring = dma_alloc_coherent(eth->dev,
776 cnt * sizeof(struct mtk_tx_dma),
777 &phy_ring_head,
778 GFP_ATOMIC | __GFP_ZERO);
779 if (unlikely(!eth->scratch_ring))
780 return -ENOMEM;
781
782 eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
783 GFP_KERNEL);
784 dma_addr = dma_map_single(eth->dev,
785 eth->scratch_head, cnt * QDMA_PAGE_SIZE,
786 DMA_FROM_DEVICE);
787 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
788 return -ENOMEM;
789
790 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
791 phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
792
793 for (i = 0; i < cnt; i++) {
794 eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
795 if (i < cnt - 1)
796 eth->scratch_ring[i].txd2 = (phy_ring_head +
797 ((i + 1) * sizeof(struct mtk_tx_dma)));
798 eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
799 }
800
801 mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
802 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
803 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
804 mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
805
806 return 0;
807}
808
809static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
810{
811 void *ret = ring->tx_dma;
812
813 return ret + (desc - ring->tx_phys);
814}
815
816static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
817 struct mtk_tx_dma *txd)
818{
819 return mtk_qdma_phys_to_virt(ring, txd->txd2);
820}
821
822static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
823 struct mtk_tx_dma *txd)
824{
825 int idx = txd - ring->tx_dma;
826
827 return &ring->tx_buf[idx];
828}
829
830static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
831 int tx_num, struct mtk_tx_ring *ring, bool gso)
832{
833 struct mtk_mac *mac = netdev_priv(dev);
834 struct mtk_eth *eth = mac->hw;
835 struct mtk_tx_dma *itxd, *txd;
836 struct mtk_tx_buf *tx_buf;
837 dma_addr_t mapped_addr;
838 unsigned int nr_frags;
839 int i, n_desc = 1;
840 u32 txd4 = eth->soc->txd4;
841
842 itxd = ring->tx_next_free;
843 if (itxd == ring->tx_last_free)
844 return -ENOMEM;
845
846 if (eth->soc->mac_count > 1)
847 txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
848
849 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
850 memset(tx_buf, 0, sizeof(*tx_buf));
851
852 if (gso)
853 txd4 |= TX_DMA_TSO;
854
855 /* TX Checksum offload */
856 if (skb->ip_summed == CHECKSUM_PARTIAL)
857 txd4 |= TX_DMA_CHKSUM;
858
859 /* VLAN header offload */
860 if (skb_vlan_tag_present(skb))
861 txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
862
863 mapped_addr = dma_map_single(&dev->dev, skb->data,
864 skb_headlen(skb), DMA_TO_DEVICE);
865 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
866 return -ENOMEM;
867
868 WRITE_ONCE(itxd->txd1, mapped_addr);
869 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
870 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
871 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
872
873 /* TX SG offload */
874 txd = itxd;
875 nr_frags = skb_shinfo(skb)->nr_frags;
876 for (i = 0; i < nr_frags; i++) {
877 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
878 unsigned int offset = 0;
879 int frag_size = skb_frag_size(frag);
880
881 while (frag_size) {
882 bool last_frag = false;
883 unsigned int frag_map_size;
884
885 txd = mtk_tx_next_qdma(ring, txd);
886 if (txd == ring->tx_last_free)
887 goto err_dma;
888
889 n_desc++;
890 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
891 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
892 frag_map_size,
893 DMA_TO_DEVICE);
894 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
895 goto err_dma;
896
897 if (i == nr_frags - 1 &&
898 (frag_size - frag_map_size) == 0)
899 last_frag = true;
900
901 WRITE_ONCE(txd->txd1, mapped_addr);
902 WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
903 TX_DMA_PLEN0(frag_map_size) |
904 last_frag * TX_DMA_LS0) |
905 mac->id);
906 WRITE_ONCE(txd->txd4, 0);
907
908 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
909 tx_buf = mtk_desc_to_tx_buf(ring, txd);
910 memset(tx_buf, 0, sizeof(*tx_buf));
911
912 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
913 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
914 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
915 frag_size -= frag_map_size;
916 offset += frag_map_size;
917 }
918 }
919
920 /* store skb to cleanup */
921 tx_buf->skb = skb;
922
923 WRITE_ONCE(itxd->txd4, txd4);
924 WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
925 (!nr_frags * TX_DMA_LS0)));
926
927 netdev_sent_queue(dev, skb->len);
928 skb_tx_timestamp(skb);
929
930 ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
931 atomic_sub(n_desc, &ring->tx_free_count);
932
933 /* make sure that all changes to the dma ring are flushed before we
934 * continue
935 */
936 wmb();
937
938 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
939 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
940
941 return 0;
942
943err_dma:
944 do {
945 tx_buf = mtk_desc_to_tx_buf(ring, txd);
946
947 /* unmap dma */
948 mtk_txd_unmap(&dev->dev, tx_buf);
949
950 itxd->txd3 = TX_DMA_DESP2_DEF;
951 itxd = mtk_tx_next_qdma(ring, itxd);
952 } while (itxd != txd);
953
954 return -ENOMEM;
955}
956
957static inline int mtk_cal_txd_req(struct sk_buff *skb)
958{
959 int i, nfrags;
960 struct skb_frag_struct *frag;
961
962 nfrags = 1;
963 if (skb_is_gso(skb)) {
964 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
965 frag = &skb_shinfo(skb)->frags[i];
966 nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
967 }
968 } else {
969 nfrags += skb_shinfo(skb)->nr_frags;
970 }
971
972 return DIV_ROUND_UP(nfrags, 2);
973}
974
975static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
976{
977 struct mtk_mac *mac = netdev_priv(dev);
978 struct mtk_eth *eth = mac->hw;
979 struct mtk_tx_ring *ring = &eth->tx_ring;
980 struct net_device_stats *stats = &dev->stats;
981 int tx_num;
982 int len = skb->len;
983 bool gso = false;
984
985 tx_num = mtk_cal_txd_req(skb);
986 if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
987 netif_stop_queue(dev);
988 netif_err(eth, tx_queued, dev,
989 "Tx Ring full when queue awake!\n");
990 return NETDEV_TX_BUSY;
991 }
992
993 /* TSO: fill MSS info in tcp checksum field */
994 if (skb_is_gso(skb)) {
995 if (skb_cow_head(skb, 0)) {
996 netif_warn(eth, tx_err, dev,
997 "GSO expand head fail.\n");
998 goto drop;
999 }
1000
1001 if (skb_shinfo(skb)->gso_type &
1002 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1003 gso = true;
1004 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1005 }
1006 }
1007
1008 if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
1009 goto drop;
1010
1011 stats->tx_packets++;
1012 stats->tx_bytes += len;
1013
1014 if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
1015 netif_stop_queue(dev);
1016 smp_mb();
1017 if (unlikely(atomic_read(&ring->tx_free_count) >
1018 ring->tx_thresh))
1019 netif_wake_queue(dev);
1020 }
1021
1022 return NETDEV_TX_OK;
1023
1024drop:
1025 stats->tx_dropped++;
1026 dev_kfree_skb(skb);
1027 return NETDEV_TX_OK;
1028}
1029
1030static int mtk_poll_rx(struct napi_struct *napi, int budget,
1031 struct mtk_eth *eth, u32 rx_intr)
1032{
1033 struct mtk_soc_data *soc = eth->soc;
1034 struct mtk_rx_ring *ring = &eth->rx_ring[0];
1035 int idx = ring->rx_calc_idx;
1036 u32 checksum_bit;
1037 struct sk_buff *skb;
1038 u8 *data, *new_data;
1039 struct mtk_rx_dma *rxd, trxd;
1040 int done = 0, pad;
1041
1042 if (eth->soc->hw_features & NETIF_F_RXCSUM)
1043 checksum_bit = soc->checksum_bit;
1044 else
1045 checksum_bit = 0;
1046
1047 if (eth->soc->rx_2b_offset)
1048 pad = 0;
1049 else
1050 pad = NET_IP_ALIGN;
1051
1052 while (done < budget) {
1053 struct net_device *netdev;
1054 unsigned int pktlen;
1055 dma_addr_t dma_addr;
1056 int mac = 0;
1057
1058 idx = NEXT_RX_DESP_IDX(idx);
1059 rxd = &ring->rx_dma[idx];
1060 data = ring->rx_data[idx];
1061
1062 mtk_get_rxd(&trxd, rxd);
1063 if (!(trxd.rxd2 & RX_DMA_DONE))
1064 break;
1065
1066 /* find out which mac the packet come from. values start at 1 */
1067 if (eth->soc->mac_count > 1) {
1068 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1069 RX_DMA_FPORT_MASK;
1070 mac--;
1071 if (mac < 0 || mac >= eth->soc->mac_count)
1072 goto release_desc;
1073 }
1074
1075 netdev = eth->netdev[mac];
1076
1077 /* alloc new buffer */
1078 new_data = napi_alloc_frag(ring->frag_size);
1079 if (unlikely(!new_data || !netdev)) {
1080 netdev->stats.rx_dropped++;
1081 goto release_desc;
1082 }
1083 dma_addr = dma_map_single(&netdev->dev,
1084 new_data + NET_SKB_PAD + pad,
1085 ring->rx_buf_size,
1086 DMA_FROM_DEVICE);
1087 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
1088 skb_free_frag(new_data);
1089 goto release_desc;
1090 }
1091
1092 /* receive data */
1093 skb = build_skb(data, ring->frag_size);
1094 if (unlikely(!skb)) {
1095 put_page(virt_to_head_page(new_data));
1096 goto release_desc;
1097 }
1098 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1099
1100 dma_unmap_single(&netdev->dev, trxd.rxd1,
1101 ring->rx_buf_size, DMA_FROM_DEVICE);
1102 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1103 skb->dev = netdev;
1104 skb_put(skb, pktlen);
1105 if (trxd.rxd4 & checksum_bit)
1106 skb->ip_summed = CHECKSUM_UNNECESSARY;
1107 else
1108 skb_checksum_none_assert(skb);
1109 skb->protocol = eth_type_trans(skb, netdev);
1110
1111 netdev->stats.rx_packets++;
1112 netdev->stats.rx_bytes += pktlen;
1113
1114 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1115 RX_DMA_VID(trxd.rxd3))
1116 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1117 RX_DMA_VID(trxd.rxd3));
1118 napi_gro_receive(napi, skb);
1119
1120 ring->rx_data[idx] = new_data;
1121 rxd->rxd1 = (unsigned int)dma_addr;
1122
1123release_desc:
1124 if (eth->soc->rx_sg_dma)
1125 rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
1126 else
1127 rxd->rxd2 = RX_DMA_LSO;
1128
1129 ring->rx_calc_idx = idx;
1130 /* make sure that all changes to the dma ring are flushed before
1131 * we continue
1132 */
1133 wmb();
1134 if (eth->soc->dma_type == MTK_QDMA)
1135 mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
1136 else
1137 mtk_reg_w32(eth, ring->rx_calc_idx,
1138 MTK_REG_RX_CALC_IDX0);
1139 done++;
1140 }
1141
1142 if (done < budget)
1143 mtk_irq_ack(eth, rx_intr);
1144
1145 return done;
1146}
1147
1148static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
1149{
1150 struct sk_buff *skb;
1151 struct mtk_tx_buf *tx_buf;
1152 int done = 0;
1153 u32 idx, hwidx;
1154 struct mtk_tx_ring *ring = &eth->tx_ring;
1155 unsigned int bytes = 0;
1156
1157 idx = ring->tx_free_idx;
1158 hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
1159
1160 while ((idx != hwidx) && budget) {
1161 tx_buf = &ring->tx_buf[idx];
1162 skb = tx_buf->skb;
1163
1164 if (!skb)
1165 break;
1166
1167 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
1168 bytes += skb->len;
1169 done++;
1170 budget--;
1171 }
1172 mtk_txd_unmap(eth->dev, tx_buf);
1173 idx = NEXT_TX_DESP_IDX(idx);
1174 }
1175 ring->tx_free_idx = idx;
1176 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
1177
1178 /* read hw index again make sure no new tx packet */
1179 if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
1180 *tx_again = 1;
1181
1182 if (done)
1183 netdev_completed_queue(*eth->netdev, done, bytes);
1184
1185 return done;
1186}
1187
1188static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
1189{
1190 struct mtk_tx_ring *ring = &eth->tx_ring;
1191 struct mtk_tx_dma *desc;
1192 struct sk_buff *skb;
1193 struct mtk_tx_buf *tx_buf;
1194 int total = 0, done[MTK_MAX_DEVS];
1195 unsigned int bytes[MTK_MAX_DEVS];
1196 u32 cpu, dma;
1197 int i;
1198
1199 memset(done, 0, sizeof(done));
1200 memset(bytes, 0, sizeof(bytes));
1201
1202 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1203 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1204
1205 desc = mtk_qdma_phys_to_virt(ring, cpu);
1206
1207 while ((cpu != dma) && budget) {
1208 u32 next_cpu = desc->txd2;
1209 int mac;
1210
1211 desc = mtk_tx_next_qdma(ring, desc);
1212 if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
1213 break;
1214
1215 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
1216 TX_DMA_FPORT_MASK;
1217 mac--;
1218
1219 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1220 skb = tx_buf->skb;
1221 if (!skb)
1222 break;
1223
1224 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
1225 bytes[mac] += skb->len;
1226 done[mac]++;
1227 budget--;
1228 }
1229 mtk_txd_unmap(eth->dev, tx_buf);
1230
1231 ring->tx_last_free->txd2 = next_cpu;
1232 ring->tx_last_free = desc;
1233 atomic_inc(&ring->tx_free_count);
1234
1235 cpu = next_cpu;
1236 }
1237
1238 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1239
1240 /* read hw index again make sure no new tx packet */
1241 if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
1242 *tx_again = true;
1243
1244 for (i = 0; i < eth->soc->mac_count; i++) {
1245 if (!done[i])
1246 continue;
1247 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1248 total += done[i];
1249 }
1250
1251 return total;
1252}
1253
1254static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
1255 bool *tx_again)
1256{
1257 struct mtk_tx_ring *ring = &eth->tx_ring;
1258 struct net_device *netdev = eth->netdev[0];
1259 int done;
1260
1261 done = eth->tx_ring.tx_poll(eth, budget, tx_again);
1262 if (!*tx_again)
1263 mtk_irq_ack(eth, tx_intr);
1264
1265 if (!done)
1266 return 0;
1267
1268 smp_mb();
1269 if (unlikely(!netif_queue_stopped(netdev)))
1270 return done;
1271
1272 if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
1273 netif_wake_queue(netdev);
1274
1275 return done;
1276}
1277
1278static void mtk_stats_update(struct mtk_eth *eth)
1279{
1280 int i;
1281
1282 for (i = 0; i < eth->soc->mac_count; i++) {
1283 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1284 continue;
1285 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1286 mtk_stats_update_mac(eth->mac[i]);
1287 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1288 }
1289 }
1290}
1291
1292static int mtk_poll(struct napi_struct *napi, int budget)
1293{
1294 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1295 u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
1296 int tx_done, rx_done;
1297 bool tx_again = false;
1298
1299 status = mtk_irq_pending(eth);
1300 mtk_status = mtk_irq_pending_status(eth);
1301 tx_intr = eth->soc->tx_int;
1302 rx_intr = eth->soc->rx_int;
1303 status_intr = eth->soc->status_int;
1304 tx_done = 0;
1305 rx_done = 0;
1306 tx_again = 0;
1307
1308 if (status & tx_intr)
1309 tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
1310
1311 if (status & rx_intr)
1312 rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
1313
1314 if (unlikely(mtk_status & status_intr)) {
1315 mtk_stats_update(eth);
1316 mtk_irq_ack_status(eth, status_intr);
1317 }
1318
1319 if (unlikely(netif_msg_intr(eth))) {
1320 mask = mtk_irq_enabled(eth);
1321 netdev_info(eth->netdev[0],
1322 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
1323 tx_done, rx_done, status, mask);
1324 }
1325
1326 if (tx_again || rx_done == budget)
1327 return budget;
1328
1329 status = mtk_irq_pending(eth);
1330 if (status & (tx_intr | rx_intr))
1331 return budget;
1332
1333 napi_complete(napi);
1334 mtk_irq_enable(eth, tx_intr | rx_intr);
1335
1336 return rx_done;
1337}
1338
1339static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
1340{
1341 int i;
1342 struct mtk_tx_ring *ring = &eth->tx_ring;
1343
1344 ring->tx_ring_size = eth->soc->dma_ring_size;
1345 ring->tx_free_idx = 0;
1346 ring->tx_next_idx = 0;
1347 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
1348 MAX_SKB_FRAGS);
1349
1350 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
1351 GFP_KERNEL);
1352 if (!ring->tx_buf)
1353 goto no_tx_mem;
1354
1355 ring->tx_dma =
1356 dma_alloc_coherent(eth->dev,
1357 ring->tx_ring_size * sizeof(*ring->tx_dma),
1358 &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
1359 if (!ring->tx_dma)
1360 goto no_tx_mem;
1361
1362 for (i = 0; i < ring->tx_ring_size; i++) {
1363 ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
1364 ring->tx_dma[i].txd4 = eth->soc->txd4;
1365 }
1366
1367 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
1368 ring->tx_map = mtk_pdma_tx_map;
1369 ring->tx_poll = mtk_pdma_tx_poll;
1370 ring->tx_clean = mtk_pdma_tx_clean;
1371
1372 /* make sure that all changes to the dma ring are flushed before we
1373 * continue
1374 */
1375 wmb();
1376
1377 mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
1378 mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
1379 mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
1380 mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
1381
1382 return 0;
1383
1384no_tx_mem:
1385 return -ENOMEM;
1386}
1387
1388static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
1389{
1390 struct mtk_tx_ring *ring = &eth->tx_ring;
1391 int i, sz = sizeof(*ring->tx_dma);
1392
1393 ring->tx_ring_size = eth->soc->dma_ring_size;
1394 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
1395 GFP_KERNEL);
1396 if (!ring->tx_buf)
1397 goto no_tx_mem;
1398
1399 ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
1400 &ring->tx_phys,
1401 GFP_ATOMIC | __GFP_ZERO);
1402 if (!ring->tx_dma)
1403 goto no_tx_mem;
1404
1405 for (i = 0; i < ring->tx_ring_size; i++) {
1406 int next = (i + 1) % ring->tx_ring_size;
1407 u32 next_ptr = ring->tx_phys + next * sz;
1408
1409 ring->tx_dma[i].txd2 = next_ptr;
1410 ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
1411 }
1412
1413 atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
1414 ring->tx_next_free = &ring->tx_dma[0];
1415 ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
1416 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
1417 MAX_SKB_FRAGS);
1418
1419 ring->tx_map = mtk_qdma_tx_map;
1420 ring->tx_poll = mtk_qdma_tx_poll;
1421 ring->tx_clean = mtk_qdma_tx_clean;
1422
1423 /* make sure that all changes to the dma ring are flushed before we
1424 * continue
1425 */
1426 wmb();
1427
1428 mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
1429 mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
1430 mtk_w32(eth,
1431 ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
1432 MTK_QTX_CRX_PTR);
1433 mtk_w32(eth,
1434 ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
1435 MTK_QTX_DRX_PTR);
1436
1437 return 0;
1438
1439no_tx_mem:
1440 return -ENOMEM;
1441}
1442
1443static int mtk_qdma_init(struct mtk_eth *eth, int ring)
1444{
1445 int err;
1446
1447 err = mtk_init_fq_dma(eth);
1448 if (err)
1449 return err;
1450
1451 err = mtk_qdma_tx_alloc_tx(eth);
1452 if (err)
1453 return err;
1454
1455 err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
1456 if (err)
1457 return err;
1458
1459 mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
1460 mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
1461 mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
1462 mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
1463 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1464
1465 /* Enable random early drop and set drop threshold automatically */
1466 mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
1467 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1468
1469 return 0;
1470}
1471
1472static int mtk_pdma_qdma_init(struct mtk_eth *eth)
1473{
1474 int err = mtk_qdma_init(eth, 1);
1475
1476 if (err)
1477 return err;
1478
1479 err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
1480 if (err)
1481 return err;
1482
1483 mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
1484 mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
1485 mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
1486 mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
1487
1488 return 0;
1489}
1490
1491static int mtk_pdma_init(struct mtk_eth *eth)
1492{
1493 struct mtk_rx_ring *ring = &eth->rx_ring[0];
1494 int err;
1495
1496 err = mtk_pdma_tx_alloc(eth);
1497 if (err)
1498 return err;
1499
1500 err = mtk_dma_rx_alloc(eth, ring);
1501 if (err)
1502 return err;
1503
1504 mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
1505 mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
1506 mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
1507 mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
1508
1509 return 0;
1510}
1511
1512static void mtk_dma_free(struct mtk_eth *eth)
1513{
1514 int i;
1515
1516 for (i = 0; i < eth->soc->mac_count; i++)
1517 if (eth->netdev[i])
1518 netdev_reset_queue(eth->netdev[i]);
1519 eth->tx_ring.tx_clean(eth);
1520 mtk_clean_rx(eth, &eth->rx_ring[0]);
1521 mtk_clean_rx(eth, &eth->rx_ring[1]);
1522 kfree(eth->scratch_head);
1523}
1524
1525static void mtk_tx_timeout(struct net_device *dev)
1526{
1527 struct mtk_mac *mac = netdev_priv(dev);
1528 struct mtk_eth *eth = mac->hw;
1529 struct mtk_tx_ring *ring = &eth->tx_ring;
1530
1531 eth->netdev[mac->id]->stats.tx_errors++;
1532 netif_err(eth, tx_err, dev,
1533 "transmit timed out\n");
1534 if (eth->soc->dma_type & MTK_PDMA) {
1535 netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
1536 mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
1537 netif_info(eth, drv, dev,
1538 "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
1539 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
1540 mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
1541 mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
1542 mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
1543 ring->tx_free_idx,
1544 ring->tx_next_idx);
1545 }
1546 if (eth->soc->dma_type & MTK_QDMA) {
1547 netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
1548 mtk_r32(eth, MTK_QDMA_GLO_CFG));
1549 netif_info(eth, drv, dev,
1550 "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
1551 0, mtk_r32(eth, MTK_QTX_CTX_PTR),
1552 mtk_r32(eth, MTK_QTX_DTX_PTR),
1553 mtk_r32(eth, MTK_QTX_CRX_PTR),
1554 mtk_r32(eth, MTK_QTX_DRX_PTR),
1555 atomic_read(&ring->tx_free_count));
1556 }
1557 netif_info(eth, drv, dev,
1558 "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
1559 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
1560 mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
1561 mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
1562 mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
1563
1564 schedule_work(&mac->pending_work);
1565}
1566
1567static irqreturn_t mtk_handle_irq(int irq, void *_eth)
1568{
1569 struct mtk_eth *eth = _eth;
1570 u32 status, int_mask;
1571
1572 status = mtk_irq_pending(eth);
1573 if (unlikely(!status))
1574 return IRQ_NONE;
1575
1576 int_mask = (eth->soc->rx_int | eth->soc->tx_int);
1577 if (likely(status & int_mask)) {
1578 if (likely(napi_schedule_prep(&eth->rx_napi)))
1579 __napi_schedule(&eth->rx_napi);
1580 } else {
1581 mtk_irq_ack(eth, status);
1582 }
1583 mtk_irq_disable(eth, int_mask);
1584
1585 return IRQ_HANDLED;
1586}
1587
1588#ifdef CONFIG_NET_POLL_CONTROLLER
1589static void mtk_poll_controller(struct net_device *dev)
1590{
1591 struct mtk_mac *mac = netdev_priv(dev);
1592 struct mtk_eth *eth = mac->hw;
1593 u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
1594
1595 mtk_irq_disable(eth, int_mask);
1596 mtk_handle_irq(dev->irq, dev);
1597 mtk_irq_enable(eth, int_mask);
1598}
1599#endif
1600
1601int mtk_set_clock_cycle(struct mtk_eth *eth)
1602{
1603 unsigned long sysclk = eth->sysclk;
1604
1605 sysclk /= MTK_US_CYC_CNT_DIVISOR;
1606 sysclk <<= MTK_US_CYC_CNT_SHIFT;
1607
1608 mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
1609 ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
1610 sysclk,
1611 MTK_GLO_CFG);
1612 return 0;
1613}
1614
1615void mtk_fwd_config(struct mtk_eth *eth)
1616{
1617 u32 fwd_cfg;
1618
1619 fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
1620
1621 /* disable jumbo frame */
1622 if (eth->soc->jumbo_frame)
1623 fwd_cfg &= ~MTK_GDM1_JMB_EN;
1624
1625 /* set unicast/multicast/broadcast frame to cpu */
1626 fwd_cfg &= ~0xffff;
1627
1628 mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
1629}
1630
1631void mtk_csum_config(struct mtk_eth *eth)
1632{
1633 if (eth->soc->hw_features & NETIF_F_RXCSUM)
1634 mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
1635 (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
1636 MTK_GDMA1_FWD_CFG);
1637 else
1638 mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
1639 ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
1640 MTK_GDMA1_FWD_CFG);
1641 if (eth->soc->hw_features & NETIF_F_IP_CSUM)
1642 mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
1643 (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
1644 MTK_CDMA_CSG_CFG);
1645 else
1646 mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
1647 ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
1648 MTK_CDMA_CSG_CFG);
1649}
1650
1651static int mtk_start_dma(struct mtk_eth *eth)
1652{
1653 unsigned long flags;
1654 u32 val;
1655 int err;
1656
1657 if (eth->soc->dma_type == MTK_PDMA)
1658 err = mtk_pdma_init(eth);
1659 else if (eth->soc->dma_type == MTK_QDMA)
1660 err = mtk_qdma_init(eth, 0);
1661 else
1662 err = mtk_pdma_qdma_init(eth);
1663 if (err) {
1664 mtk_dma_free(eth);
1665 return err;
1666 }
1667
1668 spin_lock_irqsave(&eth->page_lock, flags);
1669
1670 val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
1671 if (eth->soc->rx_2b_offset)
1672 val |= MTK_RX_2B_OFFSET;
1673 val |= eth->soc->pdma_glo_cfg;
1674
1675 if (eth->soc->dma_type & MTK_PDMA)
1676 mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
1677
1678 if (eth->soc->dma_type & MTK_QDMA)
1679 mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
1680
1681 spin_unlock_irqrestore(&eth->page_lock, flags);
1682
1683 return 0;
1684}
1685
1686static int mtk_open(struct net_device *dev)
1687{
1688 struct mtk_mac *mac = netdev_priv(dev);
1689 struct mtk_eth *eth = mac->hw;
1690
1691 dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
1692
1693 if (!atomic_read(&eth->dma_refcnt)) {
1694 int err = mtk_start_dma(eth);
1695
1696 if (err)
1697 return err;
1698
1699 napi_enable(&eth->rx_napi);
1700 mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
1701 }
1702 atomic_inc(&eth->dma_refcnt);
1703
1704 if (eth->phy)
1705 eth->phy->start(mac);
1706
1707 if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
1708 netif_carrier_on(dev);
1709
1710 netif_start_queue(dev);
1711 eth->soc->fwd_config(eth);
1712
1713 return 0;
1714}
1715
1716static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1717{
1718 unsigned long flags;
1719 u32 val;
1720 int i;
1721
1722 /* stop the dma enfine */
1723 spin_lock_irqsave(&eth->page_lock, flags);
1724 val = mtk_r32(eth, glo_cfg);
1725 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1726 glo_cfg);
1727 spin_unlock_irqrestore(&eth->page_lock, flags);
1728
1729 /* wait for dma stop */
1730 for (i = 0; i < 10; i++) {
1731 val = mtk_r32(eth, glo_cfg);
1732 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1733 msleep(20);
1734 continue;
1735 }
1736 break;
1737 }
1738}
1739
1740static int mtk_stop(struct net_device *dev)
1741{
1742 struct mtk_mac *mac = netdev_priv(dev);
1743 struct mtk_eth *eth = mac->hw;
1744
1745 netif_tx_disable(dev);
1746 if (eth->phy)
1747 eth->phy->stop(mac);
1748
1749 if (!atomic_dec_and_test(&eth->dma_refcnt))
1750 return 0;
1751
1752 mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
1753 napi_disable(&eth->rx_napi);
1754
1755 if (eth->soc->dma_type & MTK_PDMA)
1756 mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
1757
1758 if (eth->soc->dma_type & MTK_QDMA)
1759 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1760
1761 mtk_dma_free(eth);
1762
1763 return 0;
1764}
1765
1766static int __init mtk_init_hw(struct mtk_eth *eth)
1767{
1768 int i, err;
1769
1770 eth->soc->reset_fe(eth);
1771
1772 if (eth->soc->switch_init)
1773 if (eth->soc->switch_init(eth)) {
1774 dev_err(eth->dev, "failed to initialize switch core\n");
1775 return -ENODEV;
1776 }
1777
1778 err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
1779 dev_name(eth->dev), eth);
1780 if (err)
1781 return err;
1782
1783 err = mtk_mdio_init(eth);
1784 if (err)
1785 return err;
1786
1787 /* disable delay and normal interrupt */
1788 mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
1789 if (eth->soc->dma_type & MTK_QDMA)
1790 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1791 mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
1792
1793 /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
1794 if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
1795 for (i = 0; i < 16; i += 2)
1796 mtk_w32(eth, ((i + 1) << 16) + i,
1797 mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
1798 (i * 2));
1799
1800 if (eth->soc->fwd_config(eth))
1801 dev_err(eth->dev, "unable to get clock\n");
1802
1803 if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
1804 mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
1805 mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
1806 }
1807
1808 return 0;
1809}
1810
1811static int __init mtk_init(struct net_device *dev)
1812{
1813 struct mtk_mac *mac = netdev_priv(dev);
1814 struct mtk_eth *eth = mac->hw;
1815 struct device_node *port;
1816 const char *mac_addr;
1817 int err;
1818
1819 mac_addr = of_get_mac_address(mac->of_node);
1820 if (mac_addr)
1821 ether_addr_copy(dev->dev_addr, mac_addr);
1822
1823 /* If the mac address is invalid, use random mac address */
1824 if (!is_valid_ether_addr(dev->dev_addr)) {
1825 eth_hw_addr_random(dev);
1826 dev_err(eth->dev, "generated random MAC address %pM\n",
1827 dev->dev_addr);
1828 }
1829 mac->hw->soc->set_mac(mac, dev->dev_addr);
1830
1831 if (eth->soc->port_init)
1832 for_each_child_of_node(mac->of_node, port)
1833 if (of_device_is_compatible(port,
1834 "mediatek,eth-port") &&
1835 of_device_is_available(port))
1836 eth->soc->port_init(eth, mac, port);
1837
1838 if (eth->phy) {
1839 err = eth->phy->connect(mac);
1840 if (err)
1841 return err;
1842 }
1843
1844 return 0;
1845}
1846
1847static void mtk_uninit(struct net_device *dev)
1848{
1849 struct mtk_mac *mac = netdev_priv(dev);
1850 struct mtk_eth *eth = mac->hw;
1851
1852 if (eth->phy)
1853 eth->phy->disconnect(mac);
1854 mtk_mdio_cleanup(eth);
1855
1856 mtk_irq_disable(eth, ~0);
1857 free_irq(dev->irq, dev);
1858}
1859
1860static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1861{
1862 struct mtk_mac *mac = netdev_priv(dev);
1863
1864 if (!mac->phy_dev)
1865 return -ENODEV;
1866
1867 switch (cmd) {
1868 case SIOCGMIIPHY:
1869 case SIOCGMIIREG:
1870 case SIOCSMIIREG:
1871 return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1872 default:
1873 break;
1874 }
1875
1876 return -EOPNOTSUPP;
1877}
1878
1879static int mtk_change_mtu(struct net_device *dev, int new_mtu)
1880{
1881 struct mtk_mac *mac = netdev_priv(dev);
1882 struct mtk_eth *eth = mac->hw;
1883 int frag_size, old_mtu;
1884 u32 fwd_cfg;
1885
1886 if (!eth->soc->jumbo_frame)
1887 return eth_change_mtu(dev, new_mtu);
1888
1889 frag_size = mtk_max_frag_size(new_mtu);
1890 if (new_mtu < 68 || frag_size > PAGE_SIZE)
1891 return -EINVAL;
1892
1893 old_mtu = dev->mtu;
1894 dev->mtu = new_mtu;
1895
1896 /* return early if the buffer sizes will not change */
1897 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1898 return 0;
1899 if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
1900 return 0;
1901
1902 if (new_mtu <= ETH_DATA_LEN)
1903 eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1904 else
1905 eth->rx_ring[0].frag_size = PAGE_SIZE;
1906 eth->rx_ring[0].rx_buf_size =
1907 mtk_max_buf_size(eth->rx_ring[0].frag_size);
1908
1909 if (!netif_running(dev))
1910 return 0;
1911
1912 mtk_stop(dev);
1913 fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
1914 if (new_mtu <= ETH_DATA_LEN) {
1915 fwd_cfg &= ~MTK_GDM1_JMB_EN;
1916 } else {
1917 fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
1918 fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
1919 MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
1920 }
1921 mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
1922
1923 return mtk_open(dev);
1924}
1925
1926static void mtk_pending_work(struct work_struct *work)
1927{
1928 struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
1929 struct mtk_eth *eth = mac->hw;
1930 struct net_device *dev = eth->netdev[mac->id];
1931 int err;
1932
1933 rtnl_lock();
1934 mtk_stop(dev);
1935
1936 err = mtk_open(dev);
1937 if (err) {
1938 netif_alert(eth, ifup, dev,
1939 "Driver up/down cycle failed, closing device.\n");
1940 dev_close(dev);
1941 }
1942 rtnl_unlock();
1943}
1944
1945static int mtk_cleanup(struct mtk_eth *eth)
1946{
1947 int i;
1948
1949 for (i = 0; i < eth->soc->mac_count; i++) {
1950 struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
1951
1952 if (!eth->netdev[i])
1953 continue;
1954
1955 unregister_netdev(eth->netdev[i]);
1956 free_netdev(eth->netdev[i]);
1957 cancel_work_sync(&mac->pending_work);
1958 }
1959
1960 return 0;
1961}
1962
1963static const struct net_device_ops mtk_netdev_ops = {
1964 .ndo_init = mtk_init,
1965 .ndo_uninit = mtk_uninit,
1966 .ndo_open = mtk_open,
1967 .ndo_stop = mtk_stop,
1968 .ndo_start_xmit = mtk_start_xmit,
1969 .ndo_set_mac_address = mtk_set_mac_address,
1970 .ndo_validate_addr = eth_validate_addr,
1971 .ndo_do_ioctl = mtk_do_ioctl,
1972 .ndo_change_mtu = mtk_change_mtu,
1973 .ndo_tx_timeout = mtk_tx_timeout,
1974 .ndo_get_stats64 = mtk_get_stats64,
1975 .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid,
1976 .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid,
1977#ifdef CONFIG_NET_POLL_CONTROLLER
1978 .ndo_poll_controller = mtk_poll_controller,
1979#endif
1980};
1981
1982static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1983{
1984 struct mtk_mac *mac;
1985 const __be32 *_id = of_get_property(np, "reg", NULL);
1986 int id, err;
1987
1988 if (!_id) {
1989 dev_err(eth->dev, "missing mac id\n");
1990 return -EINVAL;
1991 }
1992 id = be32_to_cpup(_id);
1993 if (id >= eth->soc->mac_count || eth->netdev[id]) {
1994 dev_err(eth->dev, "%d is not a valid mac id\n", id);
1995 return -EINVAL;
1996 }
1997
1998 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
1999 if (!eth->netdev[id]) {
2000 dev_err(eth->dev, "alloc_etherdev failed\n");
2001 return -ENOMEM;
2002 }
2003 mac = netdev_priv(eth->netdev[id]);
2004 eth->mac[id] = mac;
2005 mac->id = id;
2006 mac->hw = eth;
2007 mac->of_node = np;
2008 INIT_WORK(&mac->pending_work, mtk_pending_work);
2009
2010 if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
2011 mac->hw_stats = devm_kzalloc(eth->dev,
2012 sizeof(*mac->hw_stats),
2013 GFP_KERNEL);
2014 if (!mac->hw_stats) {
2015 err = -ENOMEM;
2016 goto free_netdev;
2017 }
2018 spin_lock_init(&mac->hw_stats->stats_lock);
2019 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2020 }
2021
2022 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2023 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2024 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2025
2026 if (eth->soc->init_data)
2027 eth->soc->init_data(eth->soc, eth->netdev[id]);
2028
2029 eth->netdev[id]->vlan_features = eth->soc->hw_features &
2030 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2031 eth->netdev[id]->features |= eth->soc->hw_features;
2032
2033 if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
2034 eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2035
2036 mtk_set_ethtool_ops(eth->netdev[id]);
2037
2038 err = register_netdev(eth->netdev[id]);
2039 if (err) {
2040 dev_err(eth->dev, "error bringing up device\n");
2041 err = -ENOMEM;
2042 goto free_netdev;
2043 }
2044 eth->netdev[id]->irq = eth->irq;
2045 netif_info(eth, probe, eth->netdev[id],
2046 "mediatek frame engine at 0x%08lx, irq %d\n",
2047 eth->netdev[id]->base_addr, eth->netdev[id]->irq);
2048
2049 return 0;
2050
2051free_netdev:
2052 free_netdev(eth->netdev[id]);
2053 return err;
2054}
2055
2056static int mtk_probe(struct platform_device *pdev)
2057{
2058 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2059 const struct of_device_id *match;
2060 struct device_node *mac_np;
2061 struct mtk_soc_data *soc;
2062 struct mtk_eth *eth;
2063 struct clk *sysclk;
2064 int err;
2065
2066 device_reset(&pdev->dev);
2067
2068 match = of_match_device(of_mtk_match, &pdev->dev);
2069 soc = (struct mtk_soc_data *)match->data;
2070
2071 if (soc->reg_table)
2072 mtk_reg_table = soc->reg_table;
2073
2074 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2075 if (!eth)
2076 return -ENOMEM;
2077
2078 eth->base = devm_ioremap_resource(&pdev->dev, res);
2079 if (IS_ERR(eth->base))
2080 return PTR_ERR(eth->base);
2081
2082 spin_lock_init(&eth->page_lock);
2083
2084 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2085 "mediatek,ethsys");
2086 if (IS_ERR(eth->ethsys))
2087 return PTR_ERR(eth->ethsys);
2088
2089 eth->irq = platform_get_irq(pdev, 0);
2090 if (eth->irq < 0) {
2091 dev_err(&pdev->dev, "no IRQ resource found\n");
2092 return -ENXIO;
2093 }
2094
2095 sysclk = devm_clk_get(&pdev->dev, NULL);
2096 if (IS_ERR(sysclk)) {
2097 dev_err(&pdev->dev,
2098 "the clock is not defined in the devicetree\n");
2099 return -ENXIO;
2100 }
2101 eth->sysclk = clk_get_rate(sysclk);
2102
2103 eth->switch_np = of_parse_phandle(pdev->dev.of_node,
2104 "mediatek,switch", 0);
2105 if (soc->has_switch && !eth->switch_np) {
2106 dev_err(&pdev->dev, "failed to read switch phandle\n");
2107 return -ENODEV;
2108 }
2109
2110 eth->dev = &pdev->dev;
2111 eth->soc = soc;
2112 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2113
2114 err = mtk_init_hw(eth);
2115 if (err)
2116 return err;
2117
2118 if (eth->soc->mac_count > 1) {
2119 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2120 if (!of_device_is_compatible(mac_np,
2121 "mediatek,eth-mac"))
2122 continue;
2123
2124 if (!of_device_is_available(mac_np))
2125 continue;
2126
2127 err = mtk_add_mac(eth, mac_np);
2128 if (err)
2129 goto err_free_dev;
2130 }
2131
2132 init_dummy_netdev(&eth->dummy_dev);
2133 netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
2134 soc->napi_weight);
2135 } else {
2136 err = mtk_add_mac(eth, pdev->dev.of_node);
2137 if (err)
2138 goto err_free_dev;
2139 netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
2140 soc->napi_weight);
2141 }
2142
2143 platform_set_drvdata(pdev, eth);
2144
2145 return 0;
2146
2147err_free_dev:
2148 mtk_cleanup(eth);
2149 return err;
2150}
2151
2152static int mtk_remove(struct platform_device *pdev)
2153{
2154 struct mtk_eth *eth = platform_get_drvdata(pdev);
2155
2156 netif_napi_del(&eth->rx_napi);
2157 mtk_cleanup(eth);
2158 platform_set_drvdata(pdev, NULL);
2159
2160 return 0;
2161}
2162
2163static struct platform_driver mtk_driver = {
2164 .probe = mtk_probe,
2165 .remove = mtk_remove,
2166 .driver = {
2167 .name = "mtk_soc_eth",
2168 .of_match_table = of_mtk_match,
2169 },
2170};
2171
2172module_platform_driver(mtk_driver);
2173
2174MODULE_LICENSE("GPL");
2175MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2176MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644
index e6ed80433f49..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.h
+++ /dev/null
@@ -1,716 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef MTK_ETH_H
16#define MTK_ETH_H
17
18#include <linux/mii.h>
19#include <linux/interrupt.h>
20#include <linux/netdevice.h>
21#include <linux/dma-mapping.h>
22#include <linux/phy.h>
23#include <linux/ethtool.h>
24#include <linux/version.h>
25#include <linux/atomic.h>
26
27/* these registers have different offsets depending on the SoC. we use a lookup
28 * table for these
29 */
30enum mtk_reg {
31 MTK_REG_PDMA_GLO_CFG = 0,
32 MTK_REG_PDMA_RST_CFG,
33 MTK_REG_DLY_INT_CFG,
34 MTK_REG_TX_BASE_PTR0,
35 MTK_REG_TX_MAX_CNT0,
36 MTK_REG_TX_CTX_IDX0,
37 MTK_REG_TX_DTX_IDX0,
38 MTK_REG_RX_BASE_PTR0,
39 MTK_REG_RX_MAX_CNT0,
40 MTK_REG_RX_CALC_IDX0,
41 MTK_REG_RX_DRX_IDX0,
42 MTK_REG_MTK_INT_ENABLE,
43 MTK_REG_MTK_INT_STATUS,
44 MTK_REG_MTK_DMA_VID_BASE,
45 MTK_REG_MTK_COUNTER_BASE,
46 MTK_REG_MTK_RST_GL,
47 MTK_REG_MTK_INT_STATUS2,
48 MTK_REG_COUNT
49};
50
51/* delayed interrupt bits */
52#define MTK_DELAY_EN_INT 0x80
53#define MTK_DELAY_MAX_INT 0x04
54#define MTK_DELAY_MAX_TOUT 0x04
55#define MTK_DELAY_TIME 20
56#define MTK_DELAY_CHAN (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
57 | MTK_DELAY_MAX_TOUT)
58#define MTK_DELAY_INIT ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
59#define MTK_PSE_FQFC_CFG_INIT 0x80504000
60#define MTK_PSE_FQFC_CFG_256Q 0xff908000
61
62/* interrupt bits */
63#define MTK_CNT_PPE_AF BIT(31)
64#define MTK_CNT_GDM_AF BIT(29)
65#define MTK_PSE_P2_FC BIT(26)
66#define MTK_PSE_BUF_DROP BIT(24)
67#define MTK_GDM_OTHER_DROP BIT(23)
68#define MTK_PSE_P1_FC BIT(22)
69#define MTK_PSE_P0_FC BIT(21)
70#define MTK_PSE_FQ_EMPTY BIT(20)
71#define MTK_GE1_STA_CHG BIT(18)
72#define MTK_TX_COHERENT BIT(17)
73#define MTK_RX_COHERENT BIT(16)
74#define MTK_TX_DONE_INT3 BIT(11)
75#define MTK_TX_DONE_INT2 BIT(10)
76#define MTK_TX_DONE_INT1 BIT(9)
77#define MTK_TX_DONE_INT0 BIT(8)
78#define MTK_RX_DONE_INT0 BIT(2)
79#define MTK_TX_DLY_INT BIT(1)
80#define MTK_RX_DLY_INT BIT(0)
81
82#define MTK_RX_DONE_INT MTK_RX_DONE_INT0
83#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
84 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
85
86#define RT5350_RX_DLY_INT BIT(30)
87#define RT5350_TX_DLY_INT BIT(28)
88#define RT5350_RX_DONE_INT1 BIT(17)
89#define RT5350_RX_DONE_INT0 BIT(16)
90#define RT5350_TX_DONE_INT3 BIT(3)
91#define RT5350_TX_DONE_INT2 BIT(2)
92#define RT5350_TX_DONE_INT1 BIT(1)
93#define RT5350_TX_DONE_INT0 BIT(0)
94
95#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
96#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
97 RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
98
99/* registers */
100#define MTK_GDMA_OFFSET 0x0020
101#define MTK_PSE_OFFSET 0x0040
102#define MTK_GDMA2_OFFSET 0x0060
103#define MTK_CDMA_OFFSET 0x0080
104#define MTK_DMA_VID0 0x00a8
105#define MTK_PDMA_OFFSET 0x0100
106#define MTK_PPE_OFFSET 0x0200
107#define MTK_CMTABLE_OFFSET 0x0400
108#define MTK_POLICYTABLE_OFFSET 0x1000
109
110#define MT7621_GDMA_OFFSET 0x0500
111#define MT7620_GDMA_OFFSET 0x0600
112
113#define RT5350_PDMA_OFFSET 0x0800
114#define RT5350_SDM_OFFSET 0x0c00
115
116#define MTK_MDIO_ACCESS 0x00
117#define MTK_MDIO_CFG 0x04
118#define MTK_GLO_CFG 0x08
119#define MTK_RST_GL 0x0C
120#define MTK_INT_STATUS 0x10
121#define MTK_INT_ENABLE 0x14
122#define MTK_MDIO_CFG2 0x18
123#define MTK_FOC_TS_T 0x1C
124
125#define MTK_GDMA1_FWD_CFG (MTK_GDMA_OFFSET + 0x00)
126#define MTK_GDMA1_SCH_CFG (MTK_GDMA_OFFSET + 0x04)
127#define MTK_GDMA1_SHPR_CFG (MTK_GDMA_OFFSET + 0x08)
128#define MTK_GDMA1_MAC_ADRL (MTK_GDMA_OFFSET + 0x0C)
129#define MTK_GDMA1_MAC_ADRH (MTK_GDMA_OFFSET + 0x10)
130
131#define MTK_GDMA2_FWD_CFG (MTK_GDMA2_OFFSET + 0x00)
132#define MTK_GDMA2_SCH_CFG (MTK_GDMA2_OFFSET + 0x04)
133#define MTK_GDMA2_SHPR_CFG (MTK_GDMA2_OFFSET + 0x08)
134#define MTK_GDMA2_MAC_ADRL (MTK_GDMA2_OFFSET + 0x0C)
135#define MTK_GDMA2_MAC_ADRH (MTK_GDMA2_OFFSET + 0x10)
136
137#define MTK_PSE_FQ_CFG (MTK_PSE_OFFSET + 0x00)
138#define MTK_CDMA_FC_CFG (MTK_PSE_OFFSET + 0x04)
139#define MTK_GDMA1_FC_CFG (MTK_PSE_OFFSET + 0x08)
140#define MTK_GDMA2_FC_CFG (MTK_PSE_OFFSET + 0x0C)
141
142#define MTK_CDMA_CSG_CFG (MTK_CDMA_OFFSET + 0x00)
143#define MTK_CDMA_SCH_CFG (MTK_CDMA_OFFSET + 0x04)
144
145#define MT7621_GDMA_FWD_CFG(x) (MT7621_GDMA_OFFSET + (x * 0x1000))
146
147/* FIXME this might be different for different SOCs */
148#define MT7620_GDMA1_FWD_CFG (MT7621_GDMA_OFFSET + 0x00)
149
150#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00)
151#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04)
152#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08)
153#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C)
154#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10)
155#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14)
156#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18)
157#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C)
158#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20)
159#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24)
160#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28)
161#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C)
162#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30)
163#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34)
164#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38)
165#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C)
166#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100)
167#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104)
168#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108)
169#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C)
170#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110)
171#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114)
172#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118)
173#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C)
174#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204)
175#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208)
176#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c)
177#define RT5350_MTK_INT_STATUS (RT5350_PDMA_OFFSET + 0x220)
178#define RT5350_MTK_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228)
179#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280)
180
181#define MTK_PDMA_GLO_CFG (MTK_PDMA_OFFSET + 0x00)
182#define MTK_PDMA_RST_CFG (MTK_PDMA_OFFSET + 0x04)
183#define MTK_PDMA_SCH_CFG (MTK_PDMA_OFFSET + 0x08)
184#define MTK_DLY_INT_CFG (MTK_PDMA_OFFSET + 0x0C)
185#define MTK_TX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x10)
186#define MTK_TX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x14)
187#define MTK_TX_CTX_IDX0 (MTK_PDMA_OFFSET + 0x18)
188#define MTK_TX_DTX_IDX0 (MTK_PDMA_OFFSET + 0x1C)
189#define MTK_TX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x20)
190#define MTK_TX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x24)
191#define MTK_TX_CTX_IDX1 (MTK_PDMA_OFFSET + 0x28)
192#define MTK_TX_DTX_IDX1 (MTK_PDMA_OFFSET + 0x2C)
193#define MTK_RX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x30)
194#define MTK_RX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x34)
195#define MTK_RX_CALC_IDX0 (MTK_PDMA_OFFSET + 0x38)
196#define MTK_RX_DRX_IDX0 (MTK_PDMA_OFFSET + 0x3C)
197#define MTK_TX_BASE_PTR2 (MTK_PDMA_OFFSET + 0x40)
198#define MTK_TX_MAX_CNT2 (MTK_PDMA_OFFSET + 0x44)
199#define MTK_TX_CTX_IDX2 (MTK_PDMA_OFFSET + 0x48)
200#define MTK_TX_DTX_IDX2 (MTK_PDMA_OFFSET + 0x4C)
201#define MTK_TX_BASE_PTR3 (MTK_PDMA_OFFSET + 0x50)
202#define MTK_TX_MAX_CNT3 (MTK_PDMA_OFFSET + 0x54)
203#define MTK_TX_CTX_IDX3 (MTK_PDMA_OFFSET + 0x58)
204#define MTK_TX_DTX_IDX3 (MTK_PDMA_OFFSET + 0x5C)
205#define MTK_RX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x60)
206#define MTK_RX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x64)
207#define MTK_RX_CALC_IDX1 (MTK_PDMA_OFFSET + 0x68)
208#define MTK_RX_DRX_IDX1 (MTK_PDMA_OFFSET + 0x6C)
209
210/* Switch DMA configuration */
211#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00)
212#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04)
213#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08)
214#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C)
215#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10)
216#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100)
217#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104)
218#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108)
219#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C)
220#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110)
221
222#define RT5350_SDM_ICS_EN BIT(16)
223#define RT5350_SDM_TCS_EN BIT(17)
224#define RT5350_SDM_UCS_EN BIT(18)
225
226/* QDMA registers */
227#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
228#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
229#define MTK_QRX_BASE_PTR0 0x1900
230#define MTK_QRX_MAX_CNT0 0x1904
231#define MTK_QRX_CRX_IDX0 0x1908
232#define MTK_QRX_DRX_IDX0 0x190C
233#define MTK_QDMA_GLO_CFG 0x1A04
234#define MTK_QDMA_RST_IDX 0x1A08
235#define MTK_QDMA_DELAY_INT 0x1A0C
236#define MTK_QDMA_FC_THRES 0x1A10
237#define MTK_QMTK_INT_STATUS 0x1A18
238#define MTK_QMTK_INT_ENABLE 0x1A1C
239#define MTK_QDMA_HRED2 0x1A44
240
241#define MTK_QTX_CTX_PTR 0x1B00
242#define MTK_QTX_DTX_PTR 0x1B04
243
244#define MTK_QTX_CRX_PTR 0x1B10
245#define MTK_QTX_DRX_PTR 0x1B14
246
247#define MTK_QDMA_FQ_HEAD 0x1B20
248#define MTK_QDMA_FQ_TAIL 0x1B24
249#define MTK_QDMA_FQ_CNT 0x1B28
250#define MTK_QDMA_FQ_BLEN 0x1B2C
251
252#define QDMA_PAGE_SIZE 2048
253#define QDMA_TX_OWNER_CPU BIT(31)
254#define QDMA_TX_SWC BIT(14)
255#define TX_QDMA_SDL(_x) (((_x) & 0x3fff) << 16)
256#define QDMA_RES_THRES 4
257
258/* MDIO_CFG register bits */
259#define MTK_MDIO_CFG_AUTO_POLL_EN BIT(29)
260#define MTK_MDIO_CFG_GP1_BP_EN BIT(16)
261#define MTK_MDIO_CFG_GP1_FRC_EN BIT(15)
262#define MTK_MDIO_CFG_GP1_SPEED_10 (0 << 13)
263#define MTK_MDIO_CFG_GP1_SPEED_100 (1 << 13)
264#define MTK_MDIO_CFG_GP1_SPEED_1000 (2 << 13)
265#define MTK_MDIO_CFG_GP1_DUPLEX BIT(12)
266#define MTK_MDIO_CFG_GP1_FC_TX BIT(11)
267#define MTK_MDIO_CFG_GP1_FC_RX BIT(10)
268#define MTK_MDIO_CFG_GP1_LNK_DWN BIT(9)
269#define MTK_MDIO_CFG_GP1_AN_FAIL BIT(8)
270#define MTK_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6)
271#define MTK_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6)
272#define MTK_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6)
273#define MTK_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6)
274#define MTK_MDIO_CFG_TURBO_MII_FREQ BIT(5)
275#define MTK_MDIO_CFG_TURBO_MII_MODE BIT(4)
276#define MTK_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2)
277#define MTK_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2)
278#define MTK_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2)
279#define MTK_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2)
280#define MTK_MDIO_CFG_TX_CLK_SKEW_0 0
281#define MTK_MDIO_CFG_TX_CLK_SKEW_200 1
282#define MTK_MDIO_CFG_TX_CLK_SKEW_400 2
283#define MTK_MDIO_CFG_TX_CLK_SKEW_INV 3
284
285/* uni-cast port */
286#define MTK_GDM1_JMB_LEN_MASK 0xf
287#define MTK_GDM1_JMB_LEN_SHIFT 28
288#define MTK_GDM1_ICS_EN BIT(22)
289#define MTK_GDM1_TCS_EN BIT(21)
290#define MTK_GDM1_UCS_EN BIT(20)
291#define MTK_GDM1_JMB_EN BIT(19)
292#define MTK_GDM1_STRPCRC BIT(16)
293#define MTK_GDM1_UFRC_P_CPU (0 << 12)
294#define MTK_GDM1_UFRC_P_GDMA1 (1 << 12)
295#define MTK_GDM1_UFRC_P_PPE (6 << 12)
296
297/* checksums */
298#define MTK_ICS_GEN_EN BIT(2)
299#define MTK_UCS_GEN_EN BIT(1)
300#define MTK_TCS_GEN_EN BIT(0)
301
302/* dma mode */
303#define MTK_PDMA BIT(0)
304#define MTK_QDMA BIT(1)
305#define MTK_PDMA_RX_QDMA_TX (MTK_PDMA | MTK_QDMA)
306
307/* dma ring */
308#define MTK_PST_DRX_IDX0 BIT(16)
309#define MTK_PST_DTX_IDX3 BIT(3)
310#define MTK_PST_DTX_IDX2 BIT(2)
311#define MTK_PST_DTX_IDX1 BIT(1)
312#define MTK_PST_DTX_IDX0 BIT(0)
313
314#define MTK_RX_2B_OFFSET BIT(31)
315#define MTK_TX_WB_DDONE BIT(6)
316#define MTK_RX_DMA_BUSY BIT(3)
317#define MTK_TX_DMA_BUSY BIT(1)
318#define MTK_RX_DMA_EN BIT(2)
319#define MTK_TX_DMA_EN BIT(0)
320
321#define MTK_PDMA_SIZE_4DWORDS (0 << 4)
322#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
323#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
324
325#define MTK_US_CYC_CNT_MASK 0xff
326#define MTK_US_CYC_CNT_SHIFT 0x8
327#define MTK_US_CYC_CNT_DIVISOR 1000000
328
329/* PDMA descriptor rxd2 */
330#define RX_DMA_DONE BIT(31)
331#define RX_DMA_LSO BIT(30)
332#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
333#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
334#define RX_DMA_TAG BIT(15)
335
336/* PDMA descriptor rxd3 */
337#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff)
338#define RX_DMA_VID(_x) ((_x) & 0xfff)
339
340/* PDMA descriptor rxd4 */
341#define RX_DMA_L4VALID BIT(30)
342#define RX_DMA_FPORT_SHIFT 19
343#define RX_DMA_FPORT_MASK 0x7
344
345struct mtk_rx_dma {
346 unsigned int rxd1;
347 unsigned int rxd2;
348 unsigned int rxd3;
349 unsigned int rxd4;
350} __packed __aligned(4);
351
352/* PDMA tx descriptor bits */
353#define TX_DMA_BUF_LEN 0x3fff
354#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16)
355#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16)
356#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
357#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN)
358#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
359#define TX_DMA_LS1 BIT(14)
360#define TX_DMA_LS0 BIT(30)
361#define TX_DMA_DONE BIT(31)
362#define TX_DMA_FPORT_SHIFT 25
363#define TX_DMA_FPORT_MASK 0x7
364#define TX_DMA_INS_VLAN_MT7621 BIT(16)
365#define TX_DMA_INS_VLAN BIT(7)
366#define TX_DMA_INS_PPPOE BIT(12)
367#define TX_DMA_TAG BIT(15)
368#define TX_DMA_TAG_MASK BIT(15)
369#define TX_DMA_QN(_x) ((_x) << 16)
370#define TX_DMA_PN(_x) ((_x) << 24)
371#define TX_DMA_QN_MASK TX_DMA_QN(0x7)
372#define TX_DMA_PN_MASK TX_DMA_PN(0x7)
373#define TX_DMA_UDF BIT(20)
374#define TX_DMA_CHKSUM (0x7 << 29)
375#define TX_DMA_TSO BIT(28)
376#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
377
378/* frame engine counters */
379#define MTK_PPE_AC_BCNT0 (MTK_CMTABLE_OFFSET + 0x00)
380#define MTK_GDMA1_TX_GBCNT (MTK_CMTABLE_OFFSET + 0x300)
381#define MTK_GDMA2_TX_GBCNT (MTK_GDMA1_TX_GBCNT + 0x40)
382
383/* phy device flags */
384#define MTK_PHY_FLAG_PORT BIT(0)
385#define MTK_PHY_FLAG_ATTACH BIT(1)
386
387struct mtk_tx_dma {
388 unsigned int txd1;
389 unsigned int txd2;
390 unsigned int txd3;
391 unsigned int txd4;
392} __packed __aligned(4);
393
394struct mtk_eth;
395struct mtk_mac;
396
397/* manage the attached phys */
398struct mtk_phy {
399 spinlock_t lock;
400
401 struct phy_device *phy[8];
402 struct device_node *phy_node[8];
403 const __be32 *phy_fixed[8];
404 int duplex[8];
405 int speed[8];
406 int tx_fc[8];
407 int rx_fc[8];
408 int (*connect)(struct mtk_mac *mac);
409 void (*disconnect)(struct mtk_mac *mac);
410 void (*start)(struct mtk_mac *mac);
411 void (*stop)(struct mtk_mac *mac);
412};
413
414/* struct mtk_soc_data - the structure that holds the SoC specific data
415 * @reg_table: Some of the legacy registers changed their location
416 * over time. Their offsets are stored in this table
417 *
418 * @init_data: Some features depend on the silicon revision. This
419 * callback allows runtime modification of the content of
420 * this struct
421 * @reset_fe: This callback is used to trigger the reset of the frame
422 * engine
423 * @set_mac: This callback is used to set the unicast mac address
424 * filter
425 * @fwd_config: This callback is used to setup the forward config
426 * register of the MAC
427 * @switch_init: This callback is used to bring up the switch core
428 * @port_init: Some SoCs have ports that can be router to a switch port
429 * or an external PHY. This callback is used to setup these
430 * ports.
431 * @has_carrier: This callback allows driver to check if there is a cable
432 * attached.
433 * @mdio_init: This callbck is used to setup the MDIO bus if one is
434 * present
435 * @mdio_cleanup: This callback is used to cleanup the MDIO state.
436 * @mdio_write: This callback is used to write data to the MDIO bus.
437 * @mdio_read: This callback is used to write data to the MDIO bus.
438 * @mdio_adjust_link: This callback is used to apply the PHY settings.
439 * @piac_offset: the PIAC register has a different different base offset
440 * @hw_features: feature set depends on the SoC type
441 * @dma_ring_size: allow GBit SoCs to set bigger rings than FE SoCs
442 * @napi_weight: allow GBit SoCs to set bigger napi weight than FE SoCs
443 * @dma_type: SoCs is PDMA, QDMA or a mix of the 2
444 * @pdma_glo_cfg: the default DMA configuration
445 * @rx_int: the TX interrupt bits used by the SoC
446 * @tx_int: the TX interrupt bits used by the SoC
447 * @status_int: the Status interrupt bits used by the SoC
448 * @checksum_bit: the bits used to turn on HW checksumming
449 * @txd4: default value of the TXD4 descriptor
450 * @mac_count: the number of MACs that the SoC has
451 * @new_stats: there is a old and new way to read hardware stats
452 * registers
453 * @jumbo_frame: does the SoC support jumbo frames ?
454 * @rx_2b_offset: tell the rx dma to offset the data by 2 bytes
455 * @rx_sg_dma: scatter gather support
456 * @padding_64b enable 64 bit padding
457 * @padding_bug: rt2880 has a padding bug
458 * @has_switch: does the SoC have a built-in switch
459 *
460 * Although all of the supported SoCs share the same basic functionality, there
461 * are several SoC specific functions and features that we need to support. This
462 * struct holds the SoC specific data so that the common core can figure out
463 * how to setup and use these differences.
464 */
465struct mtk_soc_data {
466 const u16 *reg_table;
467
468 void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
469 void (*reset_fe)(struct mtk_eth *eth);
470 void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
471 int (*fwd_config)(struct mtk_eth *eth);
472 int (*switch_init)(struct mtk_eth *eth);
473 void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
474 struct device_node *port);
475 int (*has_carrier)(struct mtk_eth *eth);
476 int (*mdio_init)(struct mtk_eth *eth);
477 void (*mdio_cleanup)(struct mtk_eth *eth);
478 int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
479 u16 val);
480 int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
481 void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
482 u32 piac_offset;
483 netdev_features_t hw_features;
484 u32 dma_ring_size;
485 u32 napi_weight;
486 u32 dma_type;
487 u32 pdma_glo_cfg;
488 u32 rx_int;
489 u32 tx_int;
490 u32 status_int;
491 u32 checksum_bit;
492 u32 txd4;
493 u32 mac_count;
494
495 u32 new_stats:1;
496 u32 jumbo_frame:1;
497 u32 rx_2b_offset:1;
498 u32 rx_sg_dma:1;
499 u32 padding_64b:1;
500 u32 padding_bug:1;
501 u32 has_switch:1;
502};
503
504#define MTK_STAT_OFFSET 0x40
505
506/* struct mtk_hw_stats - the structure that holds the traffic statistics.
507 * @stats_lock: make sure that stats operations are atomic
508 * @reg_offset: the status register offset of the SoC
509 * @syncp: the refcount
510 *
511 * All of the supported SoCs have hardware counters for traffic statstics.
512 * Whenever the status IRQ triggers we can read the latest stats from these
513 * counters and store them in this struct.
514 */
515struct mtk_hw_stats {
516 spinlock_t stats_lock;
517 u32 reg_offset;
518 struct u64_stats_sync syncp;
519
520 u64 tx_bytes;
521 u64 tx_packets;
522 u64 tx_skip;
523 u64 tx_collisions;
524 u64 rx_bytes;
525 u64 rx_packets;
526 u64 rx_overflow;
527 u64 rx_fcs_errors;
528 u64 rx_short_errors;
529 u64 rx_long_errors;
530 u64 rx_checksum_errors;
531 u64 rx_flow_control_packets;
532};
533
534/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
535 * memory was allocated so that it can be freed properly
536 */
537enum mtk_tx_flags {
538 MTK_TX_FLAGS_SINGLE0 = 0x01,
539 MTK_TX_FLAGS_PAGE0 = 0x02,
540 MTK_TX_FLAGS_PAGE1 = 0x04,
541};
542
543/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
544 * by the TX descriptor s
545 * @skb: The SKB pointer of the packet being sent
546 * @dma_addr0: The base addr of the first segment
547 * @dma_len0: The length of the first segment
548 * @dma_addr1: The base addr of the second segment
549 * @dma_len1: The length of the second segment
550 */
551struct mtk_tx_buf {
552 struct sk_buff *skb;
553 u32 flags;
554 DEFINE_DMA_UNMAP_ADDR(dma_addr0);
555 DEFINE_DMA_UNMAP_LEN(dma_len0);
556 DEFINE_DMA_UNMAP_ADDR(dma_addr1);
557 DEFINE_DMA_UNMAP_LEN(dma_len1);
558};
559
560/* struct mtk_tx_ring - This struct holds info describing a TX ring
561 * @tx_dma: The descriptor ring
562 * @tx_buf: The memory pointed at by the ring
563 * @tx_phys: The physical addr of tx_buf
564 * @tx_next_free: Pointer to the next free descriptor
565 * @tx_last_free: Pointer to the last free descriptor
566 * @tx_thresh: The threshold of minimum amount of free descriptors
567 * @tx_map: Callback to map a new packet into the ring
568 * @tx_poll: Callback for the housekeeping function
569 * @tx_clean: Callback for the cleanup function
570 * @tx_ring_size: How many descriptors are in the ring
571 * @tx_free_idx: The index of th next free descriptor
572 * @tx_next_idx: QDMA uses a linked list. This element points to the next
573 * free descriptor in the list
574 * @tx_free_count: QDMA uses a linked list. Track how many free descriptors
575 * are present
576 */
577struct mtk_tx_ring {
578 struct mtk_tx_dma *tx_dma;
579 struct mtk_tx_buf *tx_buf;
580 dma_addr_t tx_phys;
581 struct mtk_tx_dma *tx_next_free;
582 struct mtk_tx_dma *tx_last_free;
583 u16 tx_thresh;
584 int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
585 struct mtk_tx_ring *ring, bool gso);
586 int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
587 void (*tx_clean)(struct mtk_eth *eth);
588
589 /* PDMA only */
590 u16 tx_ring_size;
591 u16 tx_free_idx;
592
593 /* QDMA only */
594 u16 tx_next_idx;
595 atomic_t tx_free_count;
596};
597
598/* struct mtk_rx_ring - This struct holds info describing a RX ring
599 * @rx_dma: The descriptor ring
600 * @rx_data: The memory pointed at by the ring
601 * @trx_phys: The physical addr of rx_buf
602 * @rx_ring_size: How many descriptors are in the ring
603 * @rx_buf_size: The size of each packet buffer
604 * @rx_calc_idx: The current head of ring
605 */
606struct mtk_rx_ring {
607 struct mtk_rx_dma *rx_dma;
608 u8 **rx_data;
609 dma_addr_t rx_phys;
610 u16 rx_ring_size;
611 u16 frag_size;
612 u16 rx_buf_size;
613 u16 rx_calc_idx;
614};
615
616/* currently no SoC has more than 2 macs */
617#define MTK_MAX_DEVS 2
618
619/* struct mtk_eth - This is the main datasructure for holding the state
620 * of the driver
621 * @dev: The device pointer
622 * @base: The mapped register i/o base
623 * @page_lock: Make sure that register operations are atomic
624 * @soc: pointer to our SoC specific data
625 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
626 * dummy for NAPI to work
627 * @netdev: The netdev instances
628 * @mac: Each netdev is linked to a physical MAC
629 * @switch_np: The phandle for the switch
630 * @irq: The IRQ that we are using
631 * @msg_enable: Ethtool msg level
632 * @ysclk: The sysclk rate - neeed for calibration
633 * @ethsys: The register map pointing at the range used to setup
634 * MII modes
635 * @dma_refcnt: track how many netdevs are using the DMA engine
636 * @tx_ring: Pointer to the memore holding info about the TX ring
637 * @rx_ring: Pointer to the memore holding info about the RX ring
638 * @rx_napi: The NAPI struct
639 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
640 * @scratch_head: The scratch memory that scratch_ring points to.
641 * @phy: Info about the attached PHYs
642 * @mii_bus: If there is a bus we need to create an instance for it
643 * @link: Track if the ports have a physical link
644 * @sw_priv: Pointer to the switches private data
645 * @vlan_map: RX VID tracking
646 */
647
648struct mtk_eth {
649 struct device *dev;
650 void __iomem *base;
651 spinlock_t page_lock;
652 struct mtk_soc_data *soc;
653 struct net_device dummy_dev;
654 struct net_device *netdev[MTK_MAX_DEVS];
655 struct mtk_mac *mac[MTK_MAX_DEVS];
656 struct device_node *switch_np;
657 int irq;
658 u32 msg_enable;
659 unsigned long sysclk;
660 struct regmap *ethsys;
661 atomic_t dma_refcnt;
662 struct mtk_tx_ring tx_ring;
663 struct mtk_rx_ring rx_ring[2];
664 struct napi_struct rx_napi;
665 struct mtk_tx_dma *scratch_ring;
666 void *scratch_head;
667 struct mtk_phy *phy;
668 struct mii_bus *mii_bus;
669 int link[8];
670 void *sw_priv;
671 unsigned long vlan_map;
672};
673
674/* struct mtk_mac - the structure that holds the info about the MACs of the
675 * SoC
676 * @id: The number of the MAC
677 * @of_node: Our devicetree node
678 * @hw: Backpointer to our main datastruture
679 * @hw_stats: Packet statistics counter
680 * @phy_dev: The attached PHY if available
681 * @phy_flags: The PHYs flags
682 * @pending_work: The workqueue used to reset the dma ring
683 */
684struct mtk_mac {
685 int id;
686 struct device_node *of_node;
687 struct mtk_eth *hw;
688 struct mtk_hw_stats *hw_stats;
689 struct phy_device *phy_dev;
690 u32 phy_flags;
691 struct work_struct pending_work;
692};
693
694/* the struct describing the SoC. these are declared in the soc_xyz.c files */
695extern const struct of_device_id of_mtk_match[];
696
697/* read the hardware status register */
698void mtk_stats_update_mac(struct mtk_mac *mac);
699
700/* default checksum setup handler */
701void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
702
703/* register i/o wrappers */
704void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
705u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
706
707/* default clock calibration handler */
708int mtk_set_clock_cycle(struct mtk_eth *eth);
709
710/* default checksum setup handler */
711void mtk_csum_config(struct mtk_eth *eth);
712
713/* default forward config handler */
714void mtk_fwd_config(struct mtk_eth *eth);
715
716#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644
index 5d63b5d96f6b..000000000000
--- a/drivers/staging/mt7621-eth/soc_mt7621.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/if_vlan.h>
18#include <linux/of_net.h>
19
20#include <asm/mach-ralink/ralink_regs.h>
21
22#include "mtk_eth_soc.h"
23#include "gsw_mt7620.h"
24#include "mdio.h"
25
26#define MT7620_CDMA_CSG_CFG 0x400
27#define MT7621_CDMP_IG_CTRL (MT7620_CDMA_CSG_CFG + 0x00)
28#define MT7621_CDMP_EG_CTRL (MT7620_CDMA_CSG_CFG + 0x04)
29#define MT7621_RESET_FE BIT(6)
30#define MT7621_L4_VALID BIT(24)
31
32#define MT7621_TX_DMA_UDF BIT(19)
33
34#define CDMA_ICS_EN BIT(2)
35#define CDMA_UCS_EN BIT(1)
36#define CDMA_TCS_EN BIT(0)
37
38#define GDMA_ICS_EN BIT(22)
39#define GDMA_TCS_EN BIT(21)
40#define GDMA_UCS_EN BIT(20)
41
42/* frame engine counters */
43#define MT7621_REG_MIB_OFFSET 0x2000
44#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00)
45#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400)
46#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40)
47
48#define GSW_REG_GDMA1_MAC_ADRL 0x508
49#define GSW_REG_GDMA1_MAC_ADRH 0x50C
50#define GSW_REG_GDMA2_MAC_ADRL 0x1508
51#define GSW_REG_GDMA2_MAC_ADRH 0x150C
52
53#define MT7621_MTK_RST_GL 0x04
54#define MT7620_MTK_INT_STATUS2 0x08
55
56/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
57 * but after test it should be BIT(13).
58 */
59#define MT7621_MTK_GDM1_AF BIT(28)
60#define MT7621_MTK_GDM2_AF BIT(29)
61
62static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
63 [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
64 [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
65 [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
66 [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
67 [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
68 [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
69 [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
70 [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
71 [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
72 [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
73 [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
74 [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
75 [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
76 [MTK_REG_MTK_DMA_VID_BASE] = 0,
77 [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
78 [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
79 [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
80};
81
82static void mt7621_mtk_reset(struct mtk_eth *eth)
83{
84 mtk_reset(eth, MT7621_RESET_FE);
85}
86
87static int mt7621_fwd_config(struct mtk_eth *eth)
88{
89 /* Setup GMAC1 only, there is no support for GMAC2 yet */
90 mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
91 MT7620_GDMA1_FWD_CFG);
92
93 /* Enable RX checksum */
94 mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
95 GDMA_TCS_EN | GDMA_UCS_EN),
96 MT7620_GDMA1_FWD_CFG);
97
98 /* Enable RX VLan Offloading */
99 mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
100
101 return 0;
102}
103
104static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
105{
106 unsigned long flags;
107
108 spin_lock_irqsave(&mac->hw->page_lock, flags);
109 if (mac->id == 0) {
110 mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
111 GSW_REG_GDMA1_MAC_ADRH);
112 mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
113 (hwaddr[4] << 8) | hwaddr[5],
114 GSW_REG_GDMA1_MAC_ADRL);
115 }
116 if (mac->id == 1) {
117 mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
118 GSW_REG_GDMA2_MAC_ADRH);
119 mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
120 (hwaddr[4] << 8) | hwaddr[5],
121 GSW_REG_GDMA2_MAC_ADRL);
122 }
123 spin_unlock_irqrestore(&mac->hw->page_lock, flags);
124}
125
126static struct mtk_soc_data mt7621_data = {
127 .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
128 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
129 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
130 NETIF_F_IPV6_CSUM,
131 .dma_type = MTK_PDMA,
132 .dma_ring_size = 256,
133 .napi_weight = 64,
134 .new_stats = 1,
135 .padding_64b = 1,
136 .rx_2b_offset = 1,
137 .rx_sg_dma = 1,
138 .has_switch = 1,
139 .mac_count = 2,
140 .reset_fe = mt7621_mtk_reset,
141 .set_mac = mt7621_set_mac,
142 .fwd_config = mt7621_fwd_config,
143 .switch_init = mtk_gsw_init,
144 .reg_table = mt7621_reg_table,
145 .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
146 .rx_int = RT5350_RX_DONE_INT,
147 .tx_int = RT5350_TX_DONE_INT,
148 .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
149 .checksum_bit = MT7621_L4_VALID,
150 .has_carrier = mt7620_has_carrier,
151 .mdio_read = mt7620_mdio_read,
152 .mdio_write = mt7620_mdio_write,
153 .mdio_adjust_link = mt7620_mdio_link_adjust,
154};
155
156const struct of_device_id of_mtk_match[] = {
157 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
158 {},
159};
160
161MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index d33533872a16..c8fa17cfa807 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,6 +1,7 @@
1config PCI_MT7621 1config PCI_MT7621
2 tristate "MediaTek MT7621 PCI Controller" 2 tristate "MediaTek MT7621 PCI Controller"
3 depends on RALINK 3 depends on RALINK
4 depends on PCI
4 select PCI_DRIVERS_GENERIC 5 select PCI_DRIVERS_GENERIC
5 help 6 help
6 This selects a driver for the MediaTek MT7621 PCI Controller. 7 This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index d6248eecf123..2aee64fdaec5 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
163 goto no_phy; 163 goto no_phy;
164 164
165 phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, 165 phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
166 PHY_INTERFACE_MODE_GMII); 166 priv->phy_mode);
167 of_node_put(phy_node); 167 of_node_put(phy_node);
168 168
169 if (!phydev) 169 if (!phydev)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ce61c5670ef6..986db76705cc 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
653 return np; 653 return np;
654} 654}
655 655
656static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port) 656static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
657 int port)
657{ 658{
659 struct device_node *np = priv->of_node;
658 u32 delay_value; 660 u32 delay_value;
661 bool rx_delay;
662 bool tx_delay;
659 663
660 if (!of_property_read_u32(np, "rx-delay", &delay_value)) 664 /* By default, both RX/TX delay is enabled in
665 * __cvmx_helper_rgmii_enable().
666 */
667 rx_delay = true;
668 tx_delay = true;
669
670 if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
661 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value); 671 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
662 if (!of_property_read_u32(np, "tx-delay", &delay_value)) 672 rx_delay = delay_value > 0;
673 }
674 if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
663 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value); 675 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
676 tx_delay = delay_value > 0;
677 }
678
679 if (!rx_delay && !tx_delay)
680 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
681 else if (!rx_delay)
682 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
683 else if (!tx_delay)
684 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
685 else
686 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
664} 687}
665 688
666static int cvm_oct_probe(struct platform_device *pdev) 689static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
825 priv->port = port; 848 priv->port = port;
826 priv->queue = cvmx_pko_get_base_queue(priv->port); 849 priv->queue = cvmx_pko_get_base_queue(priv->port);
827 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; 850 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
851 priv->phy_mode = PHY_INTERFACE_MODE_NA;
828 for (qos = 0; qos < 16; qos++) 852 for (qos = 0; qos < 16; qos++)
829 skb_queue_head_init(&priv->tx_free_list[qos]); 853 skb_queue_head_init(&priv->tx_free_list[qos]);
830 for (qos = 0; qos < cvmx_pko_get_num_queues(port); 854 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
856 break; 880 break;
857 881
858 case CVMX_HELPER_INTERFACE_MODE_SGMII: 882 case CVMX_HELPER_INTERFACE_MODE_SGMII:
883 priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
859 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; 884 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
860 strcpy(dev->name, "eth%d"); 885 strcpy(dev->name, "eth%d");
861 break; 886 break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
865 strcpy(dev->name, "spi%d"); 890 strcpy(dev->name, "spi%d");
866 break; 891 break;
867 892
868 case CVMX_HELPER_INTERFACE_MODE_RGMII:
869 case CVMX_HELPER_INTERFACE_MODE_GMII: 893 case CVMX_HELPER_INTERFACE_MODE_GMII:
894 priv->phy_mode = PHY_INTERFACE_MODE_GMII;
895 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
896 strcpy(dev->name, "eth%d");
897 break;
898
899 case CVMX_HELPER_INTERFACE_MODE_RGMII:
870 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; 900 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
871 strcpy(dev->name, "eth%d"); 901 strcpy(dev->name, "eth%d");
872 cvm_set_rgmii_delay(priv->of_node, interface, 902 cvm_set_rgmii_delay(priv, interface,
873 port_index); 903 port_index);
874 break; 904 break;
875 } 905 }
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 4a07e7f43d12..be570d33685a 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -12,7 +12,7 @@
12#define OCTEON_ETHERNET_H 12#define OCTEON_ETHERNET_H
13 13
14#include <linux/of.h> 14#include <linux/of.h>
15 15#include <linux/phy.h>
16#include <asm/octeon/cvmx-helper-board.h> 16#include <asm/octeon/cvmx-helper-board.h>
17 17
18/** 18/**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
33 * cvmx_helper_interface_mode_t 33 * cvmx_helper_interface_mode_t
34 */ 34 */
35 int imode; 35 int imode;
36 /* PHY mode */
37 phy_interface_t phy_mode;
36 /* List of outstanding tx buffers per queue */ 38 /* List of outstanding tx buffers per queue */
37 struct sk_buff_head tx_free_list[16]; 39 struct sk_buff_head tx_free_list[16];
38 unsigned int last_speed; 40 unsigned int last_speed;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 80b8d4153414..a54286498a47 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
45{ 45{
46 unsigned char lob; 46 unsigned char lob;
47 int ret, i; 47 int ret, i;
48 struct dcon_gpio *pin = &gpios_asis[0]; 48 const struct dcon_gpio *pin = &gpios_asis[0];
49 49
50 for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) { 50 for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
51 gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name, 51 gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 1723a47a96b4..952f2ab51347 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
174 174
175 pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; 175 pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
176 176
177 rtw_alloc_hwxmits(padapter); 177 res = rtw_alloc_hwxmits(padapter);
178 if (res == _FAIL)
179 goto exit;
178 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); 180 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
179 181
180 for (i = 0; i < 4; i++) 182 for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ exit:
1503 return res; 1505 return res;
1504} 1506}
1505 1507
1506void rtw_alloc_hwxmits(struct adapter *padapter) 1508s32 rtw_alloc_hwxmits(struct adapter *padapter)
1507{ 1509{
1508 struct hw_xmit *hwxmits; 1510 struct hw_xmit *hwxmits;
1509 struct xmit_priv *pxmitpriv = &padapter->xmitpriv; 1511 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1512 1514
1513 pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, 1515 pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
1514 sizeof(struct hw_xmit), GFP_KERNEL); 1516 sizeof(struct hw_xmit), GFP_KERNEL);
1517 if (!pxmitpriv->hwxmits)
1518 return _FAIL;
1515 1519
1516 hwxmits = pxmitpriv->hwxmits; 1520 hwxmits = pxmitpriv->hwxmits;
1517 1521
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1519 hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; 1523 hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
1520 hwxmits[2] .sta_queue = &pxmitpriv->be_pending; 1524 hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
1521 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; 1525 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
1526 return _SUCCESS;
1522} 1527}
1523 1528
1524void rtw_free_hwxmits(struct adapter *padapter) 1529void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 788f59c74ea1..ba7e15fbde72 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
336void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); 336void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
337s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); 337s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
338void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); 338void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
339void rtw_alloc_hwxmits(struct adapter *padapter); 339s32 rtw_alloc_hwxmits(struct adapter *padapter);
340void rtw_free_hwxmits(struct adapter *padapter); 340void rtw_free_hwxmits(struct adapter *padapter);
341s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); 341s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
342 342
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 1920d02f7c9f..8c36acedf507 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
147 147
148static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) 148static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
149{ 149{
150 u32 val;
151 void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
152 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; 150 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
153 151
154 if (pcmd->rsp && pcmd->rspsz > 0) 152 r8712_free_cmd_obj(pcmd);
155 memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
156 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
157 if (!pcmd_callback)
158 r8712_free_cmd_obj(pcmd);
159 else
160 pcmd_callback(padapter, pcmd);
161 return H2C_SUCCESS; 153 return H2C_SUCCESS;
162} 154}
163 155
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 92fb77666d44..1ef86b8c592f 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
140static struct _cmd_callback cmd_callback[] = { 140static struct _cmd_callback cmd_callback[] = {
141 {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ 141 {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
142 {GEN_CMD_CODE(_Write_MACREG), NULL}, 142 {GEN_CMD_CODE(_Write_MACREG), NULL},
143 {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback}, 143 {GEN_CMD_CODE(_Read_BBREG), NULL},
144 {GEN_CMD_CODE(_Write_BBREG), NULL}, 144 {GEN_CMD_CODE(_Write_BBREG), NULL},
145 {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback}, 145 {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
146 {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ 146 {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 094d61bcb469..b87f13a0b563 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
260 } 260 }
261 } 261 }
262 262
263 rtw_alloc_hwxmits(padapter); 263 res = rtw_alloc_hwxmits(padapter);
264 if (res == _FAIL)
265 goto exit;
264 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); 266 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
265 267
266 for (i = 0; i < 4; i++) { 268 for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ exit:
2144 return res; 2146 return res;
2145} 2147}
2146 2148
2147void rtw_alloc_hwxmits(struct adapter *padapter) 2149s32 rtw_alloc_hwxmits(struct adapter *padapter)
2148{ 2150{
2149 struct hw_xmit *hwxmits; 2151 struct hw_xmit *hwxmits;
2150 struct xmit_priv *pxmitpriv = &padapter->xmitpriv; 2152 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
2155 2157
2156 pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry); 2158 pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
2157 2159
2158 if (pxmitpriv->hwxmits == NULL) { 2160 if (!pxmitpriv->hwxmits)
2159 DBG_871X("alloc hwxmits fail!...\n"); 2161 return _FAIL;
2160 return;
2161 }
2162 2162
2163 hwxmits = pxmitpriv->hwxmits; 2163 hwxmits = pxmitpriv->hwxmits;
2164 2164
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
2204 2204
2205 } 2205 }
2206 2206
2207 2207 return _SUCCESS;
2208} 2208}
2209 2209
2210void rtw_free_hwxmits(struct adapter *padapter) 2210void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 1b38b9182b31..37f42b2f22f1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
487void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); 487void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
488 488
489 489
490void rtw_alloc_hwxmits(struct adapter *padapter); 490s32 rtw_alloc_hwxmits(struct adapter *padapter);
491void rtw_free_hwxmits(struct adapter *padapter); 491void rtw_free_hwxmits(struct adapter *padapter);
492 492
493 493
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 9930ed954abb..4cc77b2016e1 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
180 180
181 rtlpriv->phydm.internal = 181 rtlpriv->phydm.internal =
182 kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL); 182 kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
183 if (!rtlpriv->phydm.internal)
184 return 0;
183 185
184 _rtl_phydm_init_com_info(rtlpriv, ic, params); 186 _rtl_phydm_init_com_info(rtlpriv, ic, params);
185 187
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index f061dd1382aa..cf6b7a80b753 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
743 u1_rsvd_page_loc, 3); 743 u1_rsvd_page_loc, 3);
744 744
745 skb = dev_alloc_skb(totalpacketlen); 745 skb = dev_alloc_skb(totalpacketlen);
746 if (!skb)
747 return;
746 memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, 748 memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
747 totalpacketlen); 749 totalpacketlen);
748 750
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index edff6ce85655..9d85a3a1af4c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
210 return -EINVAL; 210 return -EINVAL;
211 211
212 spin_lock_irqsave(&speakup_info.spinlock, flags); 212 spin_lock_irqsave(&speakup_info.spinlock, flags);
213 synth_soft.alive = 1;
213 while (1) { 214 while (1) {
214 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); 215 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
215 if (!unicode) 216 if (synth_current() == &synth_soft) {
216 synth_buffer_skip_nonlatin1(); 217 if (!unicode)
217 if (!synth_buffer_empty() || speakup_info.flushing) 218 synth_buffer_skip_nonlatin1();
218 break; 219 if (!synth_buffer_empty() || speakup_info.flushing)
220 break;
221 }
219 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 222 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
220 if (fp->f_flags & O_NONBLOCK) { 223 if (fp->f_flags & O_NONBLOCK) {
221 finish_wait(&speakup_event, &wait); 224 finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
235 238
236 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ 239 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
237 while (chars_sent <= count - bytes_per_ch) { 240 while (chars_sent <= count - bytes_per_ch) {
241 if (synth_current() != &synth_soft)
242 break;
238 if (speakup_info.flushing) { 243 if (speakup_info.flushing) {
239 speakup_info.flushing = 0; 244 speakup_info.flushing = 0;
240 ch = '\x18'; 245 ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
331 poll_wait(fp, &speakup_event, wait); 336 poll_wait(fp, &speakup_event, wait);
332 337
333 spin_lock_irqsave(&speakup_info.spinlock, flags); 338 spin_lock_irqsave(&speakup_info.spinlock, flags);
334 if (!synth_buffer_empty() || speakup_info.flushing) 339 if (synth_current() == &synth_soft &&
340 (!synth_buffer_empty() || speakup_info.flushing))
335 ret = EPOLLIN | EPOLLRDNORM; 341 ret = EPOLLIN | EPOLLRDNORM;
336 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 342 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
337 return ret; 343 return ret;
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index c8e688878fc7..ac6a74883af4 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
74int synth_release_region(unsigned long start, unsigned long n); 74int synth_release_region(unsigned long start, unsigned long n);
75int synth_add(struct spk_synth *in_synth); 75int synth_add(struct spk_synth *in_synth);
76void synth_remove(struct spk_synth *in_synth); 76void synth_remove(struct spk_synth *in_synth);
77struct spk_synth *synth_current(void);
77 78
78extern struct speakup_info_t speakup_info; 79extern struct speakup_info_t speakup_info;
79 80
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 25f259ee4ffc..3568bfb89912 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
481} 481}
482EXPORT_SYMBOL_GPL(synth_remove); 482EXPORT_SYMBOL_GPL(synth_remove);
483 483
484struct spk_synth *synth_current(void)
485{
486 return synth;
487}
488EXPORT_SYMBOL_GPL(synth_current);
489
484short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM }; 490short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 804daf83be35..064d0db4c51e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
3513 struct device_node *fw_node; 3513 struct device_node *fw_node;
3514 const struct of_device_id *of_id; 3514 const struct of_device_id *of_id;
3515 struct vchiq_drvdata *drvdata; 3515 struct vchiq_drvdata *drvdata;
3516 struct device *vchiq_dev;
3516 int err; 3517 int err;
3517 3518
3518 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); 3519 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
3547 goto failed_platform_init; 3548 goto failed_platform_init;
3548 } 3549 }
3549 3550
3550 if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid, 3551 vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
3551 NULL, "vchiq"))) 3552 "vchiq");
3553 if (IS_ERR(vchiq_dev)) {
3554 err = PTR_ERR(vchiq_dev);
3552 goto failed_device_create; 3555 goto failed_device_create;
3556 }
3553 3557
3554 vchiq_debugfs_init(); 3558 vchiq_debugfs_init();
3555 3559
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b370985b58a1..c6bb4aaf9bd0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
1033 return; 1033 return;
1034 } 1034 }
1035 1035
1036 MACvIntDisable(priv->PortOffset);
1037
1038 spin_lock_irqsave(&priv->lock, flags); 1036 spin_lock_irqsave(&priv->lock, flags);
1039 1037
1040 /* Read low level stats */ 1038 /* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
1122 } 1120 }
1123 1121
1124 spin_unlock_irqrestore(&priv->lock, flags); 1122 spin_unlock_irqrestore(&priv->lock, flags);
1125
1126 MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1127} 1123}
1128 1124
1129static void vnt_interrupt_work(struct work_struct *work) 1125static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
1133 1129
1134 if (priv->vif) 1130 if (priv->vif)
1135 vnt_interrupt_process(priv); 1131 vnt_interrupt_process(priv);
1132
1133 MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1136} 1134}
1137 1135
1138static irqreturn_t vnt_interrupt(int irq, void *arg) 1136static irqreturn_t vnt_interrupt(int irq, void *arg)
1139{ 1137{
1140 struct vnt_private *priv = arg; 1138 struct vnt_private *priv = arg;
1141 1139
1142 if (priv->vif) 1140 schedule_work(&priv->interrupt_work);
1143 schedule_work(&priv->interrupt_work); 1141
1142 MACvIntDisable(priv->PortOffset);
1144 1143
1145 return IRQ_HANDLED; 1144 return IRQ_HANDLED;
1146} 1145}
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 720760cd493f..ba39647a690c 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
119 119
120static void bcm2835_thermal_debugfs(struct platform_device *pdev) 120static void bcm2835_thermal_debugfs(struct platform_device *pdev)
121{ 121{
122 struct thermal_zone_device *tz = platform_get_drvdata(pdev); 122 struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
123 struct bcm2835_thermal_data *data = tz->devdata;
124 struct debugfs_regset32 *regset; 123 struct debugfs_regset32 *regset;
125 124
126 data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL); 125 data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
@@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
266 265
267 data->tz = tz; 266 data->tz = tz;
268 267
269 platform_set_drvdata(pdev, tz); 268 platform_set_drvdata(pdev, data);
270 269
271 /* 270 /*
272 * Thermal_zone doesn't enable hwmon as default, 271 * Thermal_zone doesn't enable hwmon as default,
@@ -290,8 +289,8 @@ err_clk:
290 289
291static int bcm2835_thermal_remove(struct platform_device *pdev) 290static int bcm2835_thermal_remove(struct platform_device *pdev)
292{ 291{
293 struct thermal_zone_device *tz = platform_get_drvdata(pdev); 292 struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
294 struct bcm2835_thermal_data *data = tz->devdata; 293 struct thermal_zone_device *tz = data->tz;
295 294
296 debugfs_remove_recursive(data->debugfsdir); 295 debugfs_remove_recursive(data->debugfsdir);
297 thermal_zone_of_sensor_unregister(&pdev->dev, tz); 296 thermal_zone_of_sensor_unregister(&pdev->dev, tz);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6fff16113628..f7c1f49ec87f 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -536,12 +536,11 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
536 struct thermal_zone_device *tz, u32 power, 536 struct thermal_zone_device *tz, u32 power,
537 unsigned long *state) 537 unsigned long *state)
538{ 538{
539 unsigned int cur_freq, target_freq; 539 unsigned int target_freq;
540 u32 last_load, normalised_power; 540 u32 last_load, normalised_power;
541 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 541 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
542 struct cpufreq_policy *policy = cpufreq_cdev->policy; 542 struct cpufreq_policy *policy = cpufreq_cdev->policy;
543 543
544 cur_freq = cpufreq_quick_get(policy->cpu);
545 power = power > 0 ? power : 0; 544 power = power > 0 ? power : 0;
546 last_load = cpufreq_cdev->last_load ?: 1; 545 last_load = cpufreq_cdev->last_load ?: 1;
547 normalised_power = (power * 100) / last_load; 546 normalised_power = (power * 100) / last_load;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 61ca7ce3624e..5f3ed24e26ec 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
22 INT3400_THERMAL_PASSIVE_1, 22 INT3400_THERMAL_PASSIVE_1,
23 INT3400_THERMAL_ACTIVE, 23 INT3400_THERMAL_ACTIVE,
24 INT3400_THERMAL_CRITICAL, 24 INT3400_THERMAL_CRITICAL,
25 INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
26 INT3400_THERMAL_EMERGENCY_CALL_MODE,
27 INT3400_THERMAL_PASSIVE_2,
28 INT3400_THERMAL_POWER_BOSS,
29 INT3400_THERMAL_VIRTUAL_SENSOR,
30 INT3400_THERMAL_COOLING_MODE,
31 INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
25 INT3400_THERMAL_MAXIMUM_UUID, 32 INT3400_THERMAL_MAXIMUM_UUID,
26}; 33};
27 34
@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
29 "42A441D6-AE6A-462b-A84B-4A8CE79027D3", 36 "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
30 "3A95C389-E4B8-4629-A526-C52C88626BAE", 37 "3A95C389-E4B8-4629-A526-C52C88626BAE",
31 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", 38 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
39 "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
40 "5349962F-71E6-431D-9AE8-0A635B710AEE",
41 "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
42 "F5A35014-C209-46A4-993A-EB56DE7530A1",
43 "6ED722A7-9240-48A5-B479-31EEF723D7CF",
44 "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
45 "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
32}; 46};
33 47
34struct int3400_thermal_priv { 48struct int3400_thermal_priv {
@@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
299 313
300 platform_set_drvdata(pdev, priv); 314 platform_set_drvdata(pdev, priv);
301 315
302 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) { 316 int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
303 int3400_thermal_ops.get_mode = int3400_thermal_get_mode; 317 int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
304 int3400_thermal_ops.set_mode = int3400_thermal_set_mode; 318
305 }
306 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, 319 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
307 priv, &int3400_thermal_ops, 320 priv, &int3400_thermal_ops,
308 &int3400_thermal_params, 0, 0); 321 &int3400_thermal_params, 0, 0);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 7571f7c2e7c9..ac7256b5f020 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
101 bool clamping; 101 bool clamping;
102}; 102};
103 103
104static struct powerclamp_worker_data * __percpu worker_data; 104static struct powerclamp_worker_data __percpu *worker_data;
105static struct thermal_cooling_device *cooling_dev; 105static struct thermal_cooling_device *cooling_dev;
106static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu 106static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
107 * clamping kthread worker 107 * clamping kthread worker
@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
494 struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu); 494 struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
495 struct kthread_worker *worker; 495 struct kthread_worker *worker;
496 496
497 worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu); 497 worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
498 if (IS_ERR(worker)) 498 if (IS_ERR(worker))
499 return; 499 return;
500 500
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 5c07a61447d3..e4ea7f6aef20 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -199,6 +199,9 @@ enum {
199#define MT7622_TS1 0 199#define MT7622_TS1 0
200#define MT7622_NUM_CONTROLLER 1 200#define MT7622_NUM_CONTROLLER 1
201 201
202/* The maximum number of banks */
203#define MAX_NUM_ZONES 8
204
202/* The calibration coefficient of sensor */ 205/* The calibration coefficient of sensor */
203#define MT7622_CALIBRATION 165 206#define MT7622_CALIBRATION 165
204 207
@@ -249,7 +252,7 @@ struct mtk_thermal_data {
249 const int num_controller; 252 const int num_controller;
250 const int *controller_offset; 253 const int *controller_offset;
251 bool need_switch_bank; 254 bool need_switch_bank;
252 struct thermal_bank_cfg bank_data[]; 255 struct thermal_bank_cfg bank_data[MAX_NUM_ZONES];
253}; 256};
254 257
255struct mtk_thermal { 258struct mtk_thermal {
@@ -268,7 +271,7 @@ struct mtk_thermal {
268 s32 vts[MAX_NUM_VTS]; 271 s32 vts[MAX_NUM_VTS];
269 272
270 const struct mtk_thermal_data *conf; 273 const struct mtk_thermal_data *conf;
271 struct mtk_thermal_bank banks[]; 274 struct mtk_thermal_bank banks[MAX_NUM_ZONES];
272}; 275};
273 276
274/* MT8183 thermal sensor data */ 277/* MT8183 thermal sensor data */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 48eef552cba4..fc9399d9c082 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
666 struct exynos_tmu_data *data = p; 666 struct exynos_tmu_data *data = p;
667 int value, ret = 0; 667 int value, ret = 0;
668 668
669 if (!data || !data->tmu_read || !data->enabled) 669 if (!data || !data->tmu_read)
670 return -EINVAL; 670 return -EINVAL;
671 else if (!data->enabled) 671 else if (!data->enabled)
672 /* 672 /*
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index db5df3d54818..3bdd56a1021b 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
49 struct clk *clk; 49 struct clk *clk;
50}; 50};
51 51
52static inline bool ar933x_uart_console_enabled(void)
53{
54 return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
55}
56
57static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, 52static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
58 int offset) 53 int offset)
59{ 54{
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
508 .verify_port = ar933x_uart_verify_port, 503 .verify_port = ar933x_uart_verify_port,
509}; 504};
510 505
506#ifdef CONFIG_SERIAL_AR933X_CONSOLE
511static struct ar933x_uart_port * 507static struct ar933x_uart_port *
512ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; 508ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
513 509
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
604 .index = -1, 600 .index = -1,
605 .data = &ar933x_uart_driver, 601 .data = &ar933x_uart_driver,
606}; 602};
607 603#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
608static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
609{
610 if (!ar933x_uart_console_enabled())
611 return;
612
613 ar933x_console_ports[up->port.line] = up;
614}
615 604
616static struct uart_driver ar933x_uart_driver = { 605static struct uart_driver ar933x_uart_driver = {
617 .owner = THIS_MODULE, 606 .owner = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
700 baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); 689 baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
701 up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); 690 up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
702 691
703 ar933x_uart_add_console_port(up); 692#ifdef CONFIG_SERIAL_AR933X_CONSOLE
693 ar933x_console_ports[up->port.line] = up;
694#endif
704 695
705 ret = uart_add_one_port(&ar933x_uart_driver, &up->port); 696 ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
706 if (ret) 697 if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
749{ 740{
750 int ret; 741 int ret;
751 742
752 if (ar933x_uart_console_enabled()) 743#ifdef CONFIG_SERIAL_AR933X_CONSOLE
753 ar933x_uart_driver.cons = &ar933x_uart_console; 744 ar933x_uart_driver.cons = &ar933x_uart_console;
745#endif
754 746
755 ret = uart_register_driver(&ar933x_uart_driver); 747 ret = uart_register_driver(&ar933x_uart_driver);
756 if (ret) 748 if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 05147fe24343..0b4f36905321 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,6 +166,8 @@ struct atmel_uart_port {
166 unsigned int pending_status; 166 unsigned int pending_status;
167 spinlock_t lock_suspended; 167 spinlock_t lock_suspended;
168 168
169 bool hd_start_rx; /* can start RX during half-duplex operation */
170
169 /* ISO7816 */ 171 /* ISO7816 */
170 unsigned int fidi_min; 172 unsigned int fidi_min;
171 unsigned int fidi_max; 173 unsigned int fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
231 __raw_writeb(value, port->membase + ATMEL_US_THR); 233 __raw_writeb(value, port->membase + ATMEL_US_THR);
232} 234}
233 235
236static inline int atmel_uart_is_half_duplex(struct uart_port *port)
237{
238 return ((port->rs485.flags & SER_RS485_ENABLED) &&
239 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
240 (port->iso7816.flags & SER_ISO7816_ENABLED);
241}
242
234#ifdef CONFIG_SERIAL_ATMEL_PDC 243#ifdef CONFIG_SERIAL_ATMEL_PDC
235static bool atmel_use_pdc_rx(struct uart_port *port) 244static bool atmel_use_pdc_rx(struct uart_port *port)
236{ 245{
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
608 /* Disable interrupts */ 617 /* Disable interrupts */
609 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 618 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
610 619
611 if (((port->rs485.flags & SER_RS485_ENABLED) && 620 if (atmel_uart_is_half_duplex(port))
612 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
613 port->iso7816.flags & SER_ISO7816_ENABLED)
614 atmel_start_rx(port); 621 atmel_start_rx(port);
622
615} 623}
616 624
617/* 625/*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
628 return; 636 return;
629 637
630 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 638 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
631 if (((port->rs485.flags & SER_RS485_ENABLED) && 639 if (atmel_uart_is_half_duplex(port))
632 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
633 port->iso7816.flags & SER_ISO7816_ENABLED)
634 atmel_stop_rx(port); 640 atmel_stop_rx(port);
635 641
636 if (atmel_use_pdc_tx(port)) 642 if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
928 */ 934 */
929 if (!uart_circ_empty(xmit)) 935 if (!uart_circ_empty(xmit))
930 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 936 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
931 else if (((port->rs485.flags & SER_RS485_ENABLED) && 937 else if (atmel_uart_is_half_duplex(port)) {
932 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 938 /*
933 port->iso7816.flags & SER_ISO7816_ENABLED) { 939 * DMA done, re-enable TXEMPTY and signal that we can stop
934 /* DMA done, stop TX, start RX for RS485 */ 940 * TX and start RX for RS485
935 atmel_start_rx(port); 941 */
942 atmel_port->hd_start_rx = true;
943 atmel_uart_writel(port, ATMEL_US_IER,
944 atmel_port->tx_done_mask);
936 } 945 }
937 946
938 spin_unlock_irqrestore(&port->lock, flags); 947 spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
1288 sg_dma_len(&atmel_port->sg_rx)/2, 1297 sg_dma_len(&atmel_port->sg_rx)/2,
1289 DMA_DEV_TO_MEM, 1298 DMA_DEV_TO_MEM,
1290 DMA_PREP_INTERRUPT); 1299 DMA_PREP_INTERRUPT);
1300 if (!desc) {
1301 dev_err(port->dev, "Preparing DMA cyclic failed\n");
1302 goto chan_err;
1303 }
1291 desc->callback = atmel_complete_rx_dma; 1304 desc->callback = atmel_complete_rx_dma;
1292 desc->callback_param = port; 1305 desc->callback_param = port;
1293 atmel_port->desc_rx = desc; 1306 atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1376 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1389 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1377 1390
1378 if (pending & atmel_port->tx_done_mask) { 1391 if (pending & atmel_port->tx_done_mask) {
1379 /* Either PDC or interrupt transmission */
1380 atmel_uart_writel(port, ATMEL_US_IDR, 1392 atmel_uart_writel(port, ATMEL_US_IDR,
1381 atmel_port->tx_done_mask); 1393 atmel_port->tx_done_mask);
1394
1395 /* Start RX if flag was set and FIFO is empty */
1396 if (atmel_port->hd_start_rx) {
1397 if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1398 & ATMEL_US_TXEMPTY))
1399 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1400
1401 atmel_port->hd_start_rx = false;
1402 atmel_start_rx(port);
1403 return;
1404 }
1405
1382 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1406 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1383 } 1407 }
1384} 1408}
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
1508 atmel_uart_writel(port, ATMEL_US_IER, 1532 atmel_uart_writel(port, ATMEL_US_IER,
1509 atmel_port->tx_done_mask); 1533 atmel_port->tx_done_mask);
1510 } else { 1534 } else {
1511 if (((port->rs485.flags & SER_RS485_ENABLED) && 1535 if (atmel_uart_is_half_duplex(port)) {
1512 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
1513 port->iso7816.flags & SER_ISO7816_ENABLED) {
1514 /* DMA done, stop TX, start RX for RS485 */ 1536 /* DMA done, stop TX, start RX for RS485 */
1515 atmel_start_rx(port); 1537 atmel_start_rx(port);
1516 } 1538 }
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 6fb312e7af71..bfe5e9e034ec 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
148 char *cptr = config; 148 char *cptr = config;
149 struct console *cons; 149 struct console *cons;
150 150
151 if (!strlen(config) || isspace(config[0])) 151 if (!strlen(config) || isspace(config[0])) {
152 err = 0;
152 goto noconfig; 153 goto noconfig;
154 }
153 155
154 kgdboc_io_ops.is_console = 0; 156 kgdboc_io_ops.is_console = 0;
155 kgdb_tty_driver = NULL; 157 kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f5bdde405627..450ba6d7996c 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
1415 if (spi->dev.of_node) { 1415 if (spi->dev.of_node) {
1416 const struct of_device_id *of_id = 1416 const struct of_device_id *of_id =
1417 of_match_device(max310x_dt_ids, &spi->dev); 1417 of_match_device(max310x_dt_ids, &spi->dev);
1418 if (!of_id)
1419 return -ENODEV;
1418 1420
1419 devtype = (struct max310x_devtype *)of_id->data; 1421 devtype = (struct max310x_devtype *)of_id->data;
1420 } else { 1422 } else {
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231f751d1ef4..7e7b1559fa36 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
810 return -EINVAL; 810 return -EINVAL;
811 } 811 }
812 812
813 if (!match)
814 return -ENODEV;
815
813 /* Assume that all UART ports have a DT alias or none has */ 816 /* Assume that all UART ports have a DT alias or none has */
814 id = of_alias_get_id(pdev->dev.of_node, "serial"); 817 id = of_alias_get_id(pdev->dev.of_node, "serial");
815 if (!pdev->dev.of_node || id < 0) 818 if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 27235a526cce..4c188f4079b3 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
1686 1686
1687 s->port.mapbase = r->start; 1687 s->port.mapbase = r->start;
1688 s->port.membase = ioremap(r->start, resource_size(r)); 1688 s->port.membase = ioremap(r->start, resource_size(r));
1689 if (!s->port.membase) {
1690 ret = -ENOMEM;
1691 goto out_disable_clks;
1692 }
1689 s->port.ops = &mxs_auart_ops; 1693 s->port.ops = &mxs_auart_ops;
1690 s->port.iotype = UPIO_MEM; 1694 s->port.iotype = UPIO_MEM;
1691 s->port.fifosize = MXS_AUART_FIFO_SIZE; 1695 s->port.fifosize = MXS_AUART_FIFO_SIZE;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 3bcec1c20219..35e5f9c5d5be 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
1050{ 1050{
1051 struct uart_port *uport; 1051 struct uart_port *uport;
1052 struct qcom_geni_serial_port *port; 1052 struct qcom_geni_serial_port *port;
1053 int baud; 1053 int baud = 9600;
1054 int bits = 8; 1054 int bits = 8;
1055 int parity = 'n'; 1055 int parity = 'n';
1056 int flow = 'n'; 1056 int flow = 'n';
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 635178cf3eed..09a183dfc526 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
1507 ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); 1507 ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
1508 if (ret < 0) { 1508 if (ret < 0) {
1509 pr_err("failed to init sc16is7xx i2c --> %d\n", ret); 1509 pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
1510 return ret; 1510 goto err_i2c;
1511 } 1511 }
1512#endif 1512#endif
1513 1513
@@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void)
1515 ret = spi_register_driver(&sc16is7xx_spi_uart_driver); 1515 ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
1516 if (ret < 0) { 1516 if (ret < 0) {
1517 pr_err("failed to init sc16is7xx spi --> %d\n", ret); 1517 pr_err("failed to init sc16is7xx spi --> %d\n", ret);
1518 return ret; 1518 goto err_spi;
1519 } 1519 }
1520#endif 1520#endif
1521 return ret; 1521 return ret;
1522
1523err_spi:
1524#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
1525 i2c_del_driver(&sc16is7xx_i2c_uart_driver);
1526#endif
1527err_i2c:
1528 uart_unregister_driver(&sc16is7xx_uart);
1529 return ret;
1522} 1530}
1523module_init(sc16is7xx_init); 1531module_init(sc16is7xx_init);
1524 1532
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 060fcd42b6d5..2d1c626312cd 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
838 838
839 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 839 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
840 uart_write_wakeup(port); 840 uart_write_wakeup(port);
841 if (uart_circ_empty(xmit)) { 841 if (uart_circ_empty(xmit))
842 sci_stop_tx(port); 842 sci_stop_tx(port);
843 } else {
844 ctrl = serial_port_in(port, SCSCR);
845
846 if (port->type != PORT_SCI) {
847 serial_port_in(port, SCxSR); /* Dummy read */
848 sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
849 }
850 843
851 ctrl |= SCSCR_TIE;
852 serial_port_out(port, SCSCR, ctrl);
853 }
854} 844}
855 845
856/* On SH3, SCIF may read end-of-break as a space->mark char */ 846/* On SH3, SCIF may read end-of-break as a space->mark char */
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 044c3cbdcfa4..a9e12b3bc31d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
325 if (tty && C_HUPCL(tty)) 325 if (tty && C_HUPCL(tty))
326 tty_port_lower_dtr_rts(port); 326 tty_port_lower_dtr_rts(port);
327 327
328 if (port->ops->shutdown) 328 if (port->ops && port->ops->shutdown)
329 port->ops->shutdown(port); 329 port->ops->shutdown(port);
330 } 330 }
331out: 331out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
398 */ 398 */
399int tty_port_carrier_raised(struct tty_port *port) 399int tty_port_carrier_raised(struct tty_port *port)
400{ 400{
401 if (port->ops->carrier_raised == NULL) 401 if (!port->ops || !port->ops->carrier_raised)
402 return 1; 402 return 1;
403 return port->ops->carrier_raised(port); 403 return port->ops->carrier_raised(port);
404} 404}
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
414 */ 414 */
415void tty_port_raise_dtr_rts(struct tty_port *port) 415void tty_port_raise_dtr_rts(struct tty_port *port)
416{ 416{
417 if (port->ops->dtr_rts) 417 if (port->ops && port->ops->dtr_rts)
418 port->ops->dtr_rts(port, 1); 418 port->ops->dtr_rts(port, 1);
419} 419}
420EXPORT_SYMBOL(tty_port_raise_dtr_rts); 420EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
429 */ 429 */
430void tty_port_lower_dtr_rts(struct tty_port *port) 430void tty_port_lower_dtr_rts(struct tty_port *port)
431{ 431{
432 if (port->ops->dtr_rts) 432 if (port->ops && port->ops->dtr_rts)
433 port->ops->dtr_rts(port, 0); 433 port->ops->dtr_rts(port, 0);
434} 434}
435EXPORT_SYMBOL(tty_port_lower_dtr_rts); 435EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
684 684
685 if (!tty_port_initialized(port)) { 685 if (!tty_port_initialized(port)) {
686 clear_bit(TTY_IO_ERROR, &tty->flags); 686 clear_bit(TTY_IO_ERROR, &tty->flags);
687 if (port->ops->activate) { 687 if (port->ops && port->ops->activate) {
688 int retval = port->ops->activate(port, tty); 688 int retval = port->ops->activate(port, tty);
689 if (retval) { 689 if (retval) {
690 mutex_unlock(&port->mutex); 690 mutex_unlock(&port->mutex);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 739f8960811a..ec666eb4b7b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
558 clear_bit(EVENT_RX_STALL, &acm->flags); 558 clear_bit(EVENT_RX_STALL, &acm->flags);
559 } 559 }
560 560
561 if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { 561 if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
562 tty_port_tty_wakeup(&acm->port); 562 tty_port_tty_wakeup(&acm->port);
563 clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
564 }
565} 563}
566 564
567/* 565/*
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 48277bbc15e4..73c8e6591746 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
145 145
146 do { 146 do {
147 controller = of_find_node_with_property(controller, "phys"); 147 controller = of_find_node_with_property(controller, "phys");
148 if (!of_device_is_available(controller))
149 continue;
148 index = 0; 150 index = 0;
149 do { 151 do {
150 if (arg0 == -1) { 152 if (arg0 == -1) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3189181bb628..975d7c1288e3 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
2742 retval = usb_phy_roothub_set_mode(hcd->phy_roothub, 2742 retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
2743 PHY_MODE_USB_HOST_SS); 2743 PHY_MODE_USB_HOST_SS);
2744 if (retval) 2744 if (retval)
2745 retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
2746 PHY_MODE_USB_HOST);
2747 if (retval)
2745 goto err_usb_phy_roothub_power_on; 2748 goto err_usb_phy_roothub_power_on;
2746 2749
2747 retval = usb_phy_roothub_power_on(hcd->phy_roothub); 2750 retval = usb_phy_roothub_power_on(hcd->phy_roothub);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index fdc6e4e403e8..8cced3609e24 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
29#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa 29#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
30#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 30#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
31#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 31#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
32#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee
32#define PCI_DEVICE_ID_INTEL_GLK 0x31aa 33#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
33#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee 34#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
34#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e 35#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
305 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), 306 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
306 (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, 307 (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
307 308
309 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
310 (kernel_ulong_t) &dwc3_pci_intel_properties, },
311
308 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), 312 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
309 (kernel_ulong_t) &dwc3_pci_intel_properties, }, 313 (kernel_ulong_t) &dwc3_pci_intel_properties, },
310 314
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 75b113a5b25c..f3816a5c861e 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -391,20 +391,20 @@ try_again:
391 req->complete = f_hidg_req_complete; 391 req->complete = f_hidg_req_complete;
392 req->context = hidg; 392 req->context = hidg;
393 393
394 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
395
394 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); 396 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
395 if (status < 0) { 397 if (status < 0) {
396 ERROR(hidg->func.config->cdev, 398 ERROR(hidg->func.config->cdev,
397 "usb_ep_queue error on int endpoint %zd\n", status); 399 "usb_ep_queue error on int endpoint %zd\n", status);
398 goto release_write_pending_unlocked; 400 goto release_write_pending;
399 } else { 401 } else {
400 status = count; 402 status = count;
401 } 403 }
402 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
403 404
404 return status; 405 return status;
405release_write_pending: 406release_write_pending:
406 spin_lock_irqsave(&hidg->write_spinlock, flags); 407 spin_lock_irqsave(&hidg->write_spinlock, flags);
407release_write_pending_unlocked:
408 hidg->write_pending = 0; 408 hidg->write_pending = 0;
409 spin_unlock_irqrestore(&hidg->write_spinlock, flags); 409 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
410 410
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index b77f3126580e..c2011cd7df8c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
945 break; 945 break;
946 } 946 }
947 if (&req->req != _req) { 947 if (&req->req != _req) {
948 ep->stopped = stopped;
948 spin_unlock_irqrestore(&ep->dev->lock, flags); 949 spin_unlock_irqrestore(&ep->dev->lock, flags);
949 return -EINVAL; 950 return -EINVAL;
950 } 951 }
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f63f82450bf4..898339e5df10 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
866 (void) readl(&ep->dev->pci->pcimstctl); 866 (void) readl(&ep->dev->pci->pcimstctl);
867 867
868 writel(BIT(DMA_START), &dma->dmastat); 868 writel(BIT(DMA_START), &dma->dmastat);
869
870 if (!ep->is_in)
871 stop_out_naking(ep);
872} 869}
873 870
874static void start_dma(struct net2280_ep *ep, struct net2280_request *req) 871static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
907 writel(BIT(DMA_START), &dma->dmastat); 904 writel(BIT(DMA_START), &dma->dmastat);
908 return; 905 return;
909 } 906 }
907 stop_out_naking(ep);
910 } 908 }
911 909
912 tmp = dmactl_default; 910 tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1275 break; 1273 break;
1276 } 1274 }
1277 if (&req->req != _req) { 1275 if (&req->req != _req) {
1276 ep->stopped = stopped;
1278 spin_unlock_irqrestore(&ep->dev->lock, flags); 1277 spin_unlock_irqrestore(&ep->dev->lock, flags);
1279 dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", 1278 ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
1280 __func__);
1281 return -EINVAL; 1279 return -EINVAL;
1282 } 1280 }
1283 1281
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 934584f0a20a..6343fbacd244 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
3204 printk(KERN_INFO "driver %s\n", hcd_name); 3204 printk(KERN_INFO "driver %s\n", hcd_name);
3205 workqueue = create_singlethread_workqueue("u132"); 3205 workqueue = create_singlethread_workqueue("u132");
3206 retval = platform_driver_register(&u132_platform_driver); 3206 retval = platform_driver_register(&u132_platform_driver);
3207 if (retval)
3208 destroy_workqueue(workqueue);
3209
3207 return retval; 3210 return retval;
3208} 3211}
3209 3212
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index c78be578abb0..d932cc31711e 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
516 return -1; 516 return -1;
517 517
518 writel(0, &dbc->regs->control); 518 writel(0, &dbc->regs->control);
519 xhci_dbc_mem_cleanup(xhci);
520 dbc->state = DS_DISABLED; 519 dbc->state = DS_DISABLED;
521 520
522 return 0; 521 return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
562 ret = xhci_do_dbc_stop(xhci); 561 ret = xhci_do_dbc_stop(xhci);
563 spin_unlock_irqrestore(&dbc->lock, flags); 562 spin_unlock_irqrestore(&dbc->lock, flags);
564 563
565 if (!ret) 564 if (!ret) {
565 xhci_dbc_mem_cleanup(xhci);
566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
567 }
567} 568}
568 569
569static void 570static void
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e2eece693655..96a740543183 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1545 port_index = max_ports; 1545 port_index = max_ports;
1546 while (port_index--) { 1546 while (port_index--) {
1547 u32 t1, t2; 1547 u32 t1, t2;
1548 1548 int retries = 10;
1549retry:
1549 t1 = readl(ports[port_index]->addr); 1550 t1 = readl(ports[port_index]->addr);
1550 t2 = xhci_port_state_to_neutral(t1); 1551 t2 = xhci_port_state_to_neutral(t1);
1551 portsc_buf[port_index] = 0; 1552 portsc_buf[port_index] = 0;
1552 1553
1553 /* Bail out if a USB3 port has a new device in link training */ 1554 /*
1554 if ((hcd->speed >= HCD_USB3) && 1555 * Give a USB3 port in link training time to finish, but don't
1556 * prevent suspend as port might be stuck
1557 */
1558 if ((hcd->speed >= HCD_USB3) && retries-- &&
1555 (t1 & PORT_PLS_MASK) == XDEV_POLLING) { 1559 (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1556 bus_state->bus_suspended = 0;
1557 spin_unlock_irqrestore(&xhci->lock, flags); 1560 spin_unlock_irqrestore(&xhci->lock, flags);
1558 xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); 1561 msleep(XHCI_PORT_POLLING_LFPS_TIME);
1559 return -EBUSY; 1562 spin_lock_irqsave(&xhci->lock, flags);
1563 xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
1564 port_index);
1565 goto retry;
1560 } 1566 }
1561
1562 /* suspend ports in U0, or bail out for new connect changes */ 1567 /* suspend ports in U0, or bail out for new connect changes */
1563 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { 1568 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
1564 if ((t1 & PORT_CSC) && wake_enabled) { 1569 if ((t1 & PORT_CSC) && wake_enabled) {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index a6e463715779..671bce18782c 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
246 if (!xhci_rcar_wait_for_pll_active(hcd)) 246 if (!xhci_rcar_wait_for_pll_active(hcd))
247 return -ETIMEDOUT; 247 return -ETIMEDOUT;
248 248
249 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
249 return xhci_rcar_download_firmware(hcd); 250 return xhci_rcar_download_firmware(hcd);
250} 251}
251 252
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 40fa25c4d041..9215a28dad40 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
1647 } 1647 }
1648 } 1648 }
1649 1649
1650 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 && 1650 if ((portsc & PORT_PLC) &&
1651 DEV_SUPERSPEED_ANY(portsc)) { 1651 DEV_SUPERSPEED_ANY(portsc) &&
1652 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
1653 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
1654 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
1652 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1655 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1653 /* We've just brought the device into U0 through either the 1656 /* We've just brought the device into U0/1/2 through either the
1654 * Resume state after a device remote wakeup, or through the 1657 * Resume state after a device remote wakeup, or through the
1655 * U3Exit state after a host-initiated resume. If it's a device 1658 * U3Exit state after a host-initiated resume. If it's a device
1656 * initiated remote wake, don't pass up the link state change, 1659 * initiated remote wake, don't pass up the link state change,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 652dc36e3012..9334cdee382a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -452,6 +452,14 @@ struct xhci_op_regs {
452 */ 452 */
453#define XHCI_DEFAULT_BESL 4 453#define XHCI_DEFAULT_BESL 4
454 454
455/*
456 * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
457 * to complete link training. usually link trainig completes much faster
458 * so check status 10 times with 36ms sleep in places we need to wait for
459 * polling to complete.
460 */
461#define XHCI_PORT_POLLING_LFPS_TIME 36
462
455/** 463/**
456 * struct xhci_intr_reg - Interrupt Register Set 464 * struct xhci_intr_reg - Interrupt Register Set
457 * @irq_pending: IMAN - Interrupt Management Register. Used to enable 465 * @irq_pending: IMAN - Interrupt Management Register. Used to enable
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d72b7d1d383..04684849d683 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
547 */ 547 */
548 hub->port_swap = USB251XB_DEF_PORT_SWAP; 548 hub->port_swap = USB251XB_DEF_PORT_SWAP;
549 of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) { 549 of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
550 if ((port >= 0) && (port <= data->port_cnt)) 550 if (port <= data->port_cnt)
551 hub->port_swap |= BIT(port); 551 hub->port_swap |= BIT(port);
552 } 552 }
553 553
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
612 dev); 612 dev);
613 int err; 613 int err;
614 614
615 if (np) { 615 if (np && of_id) {
616 err = usb251xb_get_ofdata(hub, 616 err = usb251xb_get_ofdata(hub,
617 (struct usb251xb_data *)of_id->data); 617 (struct usb251xb_data *)of_id->data);
618 if (err) { 618 if (err) {
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index bcc23486c4ed..928c2cd6fc00 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -6,6 +6,7 @@ config USB_MTU3
6 tristate "MediaTek USB3 Dual Role controller" 6 tristate "MediaTek USB3 Dual Role controller"
7 depends on USB || USB_GADGET 7 depends on USB || USB_GADGET
8 depends on ARCH_MEDIATEK || COMPILE_TEST 8 depends on ARCH_MEDIATEK || COMPILE_TEST
9 depends on EXTCON || !EXTCON
9 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD 10 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
10 help 11 help
11 Say Y or M here if your system runs on MediaTek SoCs with 12 Say Y or M here if your system runs on MediaTek SoCs with
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fffe23ab0189..979bef9bfb6b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
80 { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ 80 { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
81 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ 81 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
82 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ 82 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
83 { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
83 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ 84 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
84 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ 85 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
85 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ 86 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8f5b17471759..1d8461ae2c34 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
609 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 609 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
610 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), 610 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
611 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 611 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
612 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
613 { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
612 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, 614 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
613 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, 615 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
614 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, 616 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index b863bedb55a1..5755f0df0025 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -567,7 +567,9 @@
567/* 567/*
568 * NovaTech product ids (FTDI_VID) 568 * NovaTech product ids (FTDI_VID)
569 */ 569 */
570#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ 570#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
571#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
572#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
571 573
572/* 574/*
573 * Synapse Wireless product ids (FTDI_VID) 575 * Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index fc52ac75fbf6..18110225d506 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
366 if (!urbtrack) 366 if (!urbtrack)
367 return -ENOMEM; 367 return -ENOMEM;
368 368
369 kref_get(&mos_parport->ref_count);
370 urbtrack->mos_parport = mos_parport;
371 urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC); 369 urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
372 if (!urbtrack->urb) { 370 if (!urbtrack->urb) {
373 kfree(urbtrack); 371 kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
388 usb_sndctrlpipe(usbdev, 0), 386 usb_sndctrlpipe(usbdev, 0),
389 (unsigned char *)urbtrack->setup, 387 (unsigned char *)urbtrack->setup,
390 NULL, 0, async_complete, urbtrack); 388 NULL, 0, async_complete, urbtrack);
389 kref_get(&mos_parport->ref_count);
390 urbtrack->mos_parport = mos_parport;
391 kref_init(&urbtrack->ref_count); 391 kref_init(&urbtrack->ref_count);
392 INIT_LIST_HEAD(&urbtrack->urblist_entry); 392 INIT_LIST_HEAD(&urbtrack->urblist_entry);
393 393
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 11b21d9410f3..83869065b802 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
246#define QUECTEL_PRODUCT_EC25 0x0125 246#define QUECTEL_PRODUCT_EC25 0x0125
247#define QUECTEL_PRODUCT_BG96 0x0296 247#define QUECTEL_PRODUCT_BG96 0x0296
248#define QUECTEL_PRODUCT_EP06 0x0306 248#define QUECTEL_PRODUCT_EP06 0x0306
249#define QUECTEL_PRODUCT_EM12 0x0512
249 250
250#define CMOTECH_VENDOR_ID 0x16d8 251#define CMOTECH_VENDOR_ID 0x16d8
251#define CMOTECH_PRODUCT_6001 0x6001 252#define CMOTECH_PRODUCT_6001 0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
1066 .driver_info = RSVD(3) }, 1067 .driver_info = RSVD(3) },
1067 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1068 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1068 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1069 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1069 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1070 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
1071 .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
1070 /* Quectel products using Qualcomm vendor ID */ 1072 /* Quectel products using Qualcomm vendor ID */
1071 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, 1073 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1072 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), 1074 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
1087 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), 1089 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
1088 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, 1090 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1089 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, 1091 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
1092 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
1093 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1094 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
1090 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1095 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1091 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1096 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1092 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1097 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
1940 .driver_info = RSVD(4) }, 1945 .driver_info = RSVD(4) },
1941 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 1946 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1942 .driver_info = RSVD(4) }, 1947 .driver_info = RSVD(4) },
1943 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1948 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1944 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1949 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1945 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 1950 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1946 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1951 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1952 .driver_info = RSVD(4) },
1953 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1947 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1954 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1948 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1955 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1949 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, 1956 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 0f62db091d8d..a2233d72ae7c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -37,6 +37,7 @@
37 S(SRC_ATTACHED), \ 37 S(SRC_ATTACHED), \
38 S(SRC_STARTUP), \ 38 S(SRC_STARTUP), \
39 S(SRC_SEND_CAPABILITIES), \ 39 S(SRC_SEND_CAPABILITIES), \
40 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
40 S(SRC_NEGOTIATE_CAPABILITIES), \ 41 S(SRC_NEGOTIATE_CAPABILITIES), \
41 S(SRC_TRANSITION_SUPPLY), \ 42 S(SRC_TRANSITION_SUPPLY), \
42 S(SRC_READY), \ 43 S(SRC_READY), \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
2966 /* port->hard_reset_count = 0; */ 2967 /* port->hard_reset_count = 0; */
2967 port->caps_count = 0; 2968 port->caps_count = 0;
2968 port->pd_capable = true; 2969 port->pd_capable = true;
2969 tcpm_set_state_cond(port, hard_reset_state(port), 2970 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
2970 PD_T_SEND_SOURCE_CAP); 2971 PD_T_SEND_SOURCE_CAP);
2971 } 2972 }
2972 break; 2973 break;
2974 case SRC_SEND_CAPABILITIES_TIMEOUT:
2975 /*
2976 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
2977 *
2978 * PD 2.0 sinks are supposed to accept src-capabilities with a
2979 * 3.0 header and simply ignore any src PDOs which the sink does
2980 * not understand such as PPS but some 2.0 sinks instead ignore
2981 * the entire PD_DATA_SOURCE_CAP message, causing contract
2982 * negotiation to fail.
2983 *
2984 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
2985 * sending src-capabilities with a lower PD revision to
2986 * make these broken sinks work.
2987 */
2988 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
2989 tcpm_set_state(port, HARD_RESET_SEND, 0);
2990 } else if (port->negotiated_rev > PD_REV20) {
2991 port->negotiated_rev--;
2992 port->hard_reset_count = 0;
2993 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2994 } else {
2995 tcpm_set_state(port, hard_reset_state(port), 0);
2996 }
2997 break;
2973 case SRC_NEGOTIATE_CAPABILITIES: 2998 case SRC_NEGOTIATE_CAPABILITIES:
2974 ret = tcpm_pd_check_request(port); 2999 ret = tcpm_pd_check_request(port);
2975 if (ret < 0) { 3000 if (ret < 0) {
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..6770afd40765 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
615 wcove->dev = &pdev->dev; 615 wcove->dev = &pdev->dev;
616 wcove->regmap = pmic->regmap; 616 wcove->regmap = pmic->regmap;
617 617
618 irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, 618 irq = platform_get_irq(pdev, 0);
619 platform_get_irq(pdev, 0)); 619 if (irq < 0) {
620 dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
621 return irq;
622 }
623
624 irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
620 if (irq < 0) 625 if (irq < 0)
621 return irq; 626 return irq;
622 627
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index df7d09409efe..8ca333f21292 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -27,6 +27,10 @@
27 27
28#define GUEST_MAPPINGS_TRIES 5 28#define GUEST_MAPPINGS_TRIES 5
29 29
30#define VBG_KERNEL_REQUEST \
31 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
32 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
33
30/** 34/**
31 * Reserves memory in which the VMM can relocate any guest mappings 35 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around. 36 * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
48 int i, rc; 52 int i, rc;
49 53
50 /* Query the required space. */ 54 /* Query the required space. */
51 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); 55 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
56 VBG_KERNEL_REQUEST);
52 if (!req) 57 if (!req)
53 return; 58 return;
54 59
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
135 * Tell the host that we're going to free the memory we reserved for 140 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.) 141 * it, the free it up. (Leak the memory if anything goes wrong here.)
137 */ 142 */
138 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); 143 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
144 VBG_KERNEL_REQUEST);
139 if (!req) 145 if (!req)
140 return; 146 return;
141 147
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
172 struct vmmdev_guest_info2 *req2 = NULL; 178 struct vmmdev_guest_info2 *req2 = NULL;
173 int rc, ret = -ENOMEM; 179 int rc, ret = -ENOMEM;
174 180
175 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); 181 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
176 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); 182 VBG_KERNEL_REQUEST);
183 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
184 VBG_KERNEL_REQUEST);
177 if (!req1 || !req2) 185 if (!req1 || !req2)
178 goto out_free; 186 goto out_free;
179 187
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
187 req2->additions_minor = VBG_VERSION_MINOR; 195 req2->additions_minor = VBG_VERSION_MINOR;
188 req2->additions_build = VBG_VERSION_BUILD; 196 req2->additions_build = VBG_VERSION_BUILD;
189 req2->additions_revision = VBG_SVN_REV; 197 req2->additions_revision = VBG_SVN_REV;
190 /* (no features defined yet) */ 198 req2->additions_features =
191 req2->additions_features = 0; 199 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
192 strlcpy(req2->name, VBG_VERSION_STRING, 200 strlcpy(req2->name, VBG_VERSION_STRING,
193 sizeof(req2->name)); 201 sizeof(req2->name));
194 202
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
230 struct vmmdev_guest_status *req; 238 struct vmmdev_guest_status *req;
231 int rc; 239 int rc;
232 240
233 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); 241 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
242 VBG_KERNEL_REQUEST);
234 if (!req) 243 if (!req)
235 return -ENOMEM; 244 return -ENOMEM;
236 245
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
423 struct vmmdev_heartbeat *req; 432 struct vmmdev_heartbeat *req;
424 int rc; 433 int rc;
425 434
426 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); 435 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
436 VBG_KERNEL_REQUEST);
427 if (!req) 437 if (!req)
428 return -ENOMEM; 438 return -ENOMEM;
429 439
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
457 467
458 gdev->guest_heartbeat_req = vbg_req_alloc( 468 gdev->guest_heartbeat_req = vbg_req_alloc(
459 sizeof(*gdev->guest_heartbeat_req), 469 sizeof(*gdev->guest_heartbeat_req),
460 VMMDEVREQ_GUEST_HEARTBEAT); 470 VMMDEVREQ_GUEST_HEARTBEAT,
471 VBG_KERNEL_REQUEST);
461 if (!gdev->guest_heartbeat_req) 472 if (!gdev->guest_heartbeat_req)
462 return -ENOMEM; 473 return -ENOMEM;
463 474
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
528 struct vmmdev_mask *req; 539 struct vmmdev_mask *req;
529 int rc; 540 int rc;
530 541
531 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 542 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
543 VBG_KERNEL_REQUEST);
532 if (!req) 544 if (!req)
533 return -ENOMEM; 545 return -ENOMEM;
534 546
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
567 u32 changed, previous; 579 u32 changed, previous;
568 int rc, ret = 0; 580 int rc, ret = 0;
569 581
570 /* Allocate a request buffer before taking the spinlock */ 582 /*
571 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 583 * Allocate a request buffer before taking the spinlock, when
584 * the session is being terminated the requestor is the kernel,
585 * as we're cleaning up.
586 */
587 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
588 session_termination ? VBG_KERNEL_REQUEST :
589 session->requestor);
572 if (!req) { 590 if (!req) {
573 if (!session_termination) 591 if (!session_termination)
574 return -ENOMEM; 592 return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
627 struct vmmdev_mask *req; 645 struct vmmdev_mask *req;
628 int rc; 646 int rc;
629 647
630 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 648 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
649 VBG_KERNEL_REQUEST);
631 if (!req) 650 if (!req)
632 return -ENOMEM; 651 return -ENOMEM;
633 652
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
662 u32 changed, previous; 681 u32 changed, previous;
663 int rc, ret = 0; 682 int rc, ret = 0;
664 683
665 /* Allocate a request buffer before taking the spinlock */ 684 /*
666 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 685 * Allocate a request buffer before taking the spinlock, when
686 * the session is being terminated the requestor is the kernel,
687 * as we're cleaning up.
688 */
689 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
690 session_termination ? VBG_KERNEL_REQUEST :
691 session->requestor);
667 if (!req) { 692 if (!req) {
668 if (!session_termination) 693 if (!session_termination)
669 return -ENOMEM; 694 return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
722 struct vmmdev_host_version *req; 747 struct vmmdev_host_version *req;
723 int rc, ret; 748 int rc, ret;
724 749
725 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); 750 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
751 VBG_KERNEL_REQUEST);
726 if (!req) 752 if (!req)
727 return -ENOMEM; 753 return -ENOMEM;
728 754
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
783 809
784 gdev->mem_balloon.get_req = 810 gdev->mem_balloon.get_req =
785 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), 811 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
786 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); 812 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
813 VBG_KERNEL_REQUEST);
787 gdev->mem_balloon.change_req = 814 gdev->mem_balloon.change_req =
788 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), 815 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
789 VMMDEVREQ_CHANGE_MEMBALLOON); 816 VMMDEVREQ_CHANGE_MEMBALLOON,
817 VBG_KERNEL_REQUEST);
790 gdev->cancel_req = 818 gdev->cancel_req =
791 vbg_req_alloc(sizeof(*(gdev->cancel_req)), 819 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
792 VMMDEVREQ_HGCM_CANCEL2); 820 VMMDEVREQ_HGCM_CANCEL2,
821 VBG_KERNEL_REQUEST);
793 gdev->ack_events_req = 822 gdev->ack_events_req =
794 vbg_req_alloc(sizeof(*gdev->ack_events_req), 823 vbg_req_alloc(sizeof(*gdev->ack_events_req),
795 VMMDEVREQ_ACKNOWLEDGE_EVENTS); 824 VMMDEVREQ_ACKNOWLEDGE_EVENTS,
825 VBG_KERNEL_REQUEST);
796 gdev->mouse_status_req = 826 gdev->mouse_status_req =
797 vbg_req_alloc(sizeof(*gdev->mouse_status_req), 827 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
798 VMMDEVREQ_GET_MOUSE_STATUS); 828 VMMDEVREQ_GET_MOUSE_STATUS,
829 VBG_KERNEL_REQUEST);
799 830
800 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || 831 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
801 !gdev->cancel_req || !gdev->ack_events_req || 832 !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
892 * vboxguest_linux.c calls this when userspace opens the char-device. 923 * vboxguest_linux.c calls this when userspace opens the char-device.
893 * Return: A pointer to the new session or an ERR_PTR on error. 924 * Return: A pointer to the new session or an ERR_PTR on error.
894 * @gdev: The Guest extension device. 925 * @gdev: The Guest extension device.
895 * @user: Set if this is a session for the vboxuser device. 926 * @requestor: VMMDEV_REQUESTOR_* flags
896 */ 927 */
897struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) 928struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
898{ 929{
899 struct vbg_session *session; 930 struct vbg_session *session;
900 931
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
903 return ERR_PTR(-ENOMEM); 934 return ERR_PTR(-ENOMEM);
904 935
905 session->gdev = gdev; 936 session->gdev = gdev;
906 session->user_session = user; 937 session->requestor = requestor;
907 938
908 return session; 939 return session;
909} 940}
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
924 if (!session->hgcm_client_ids[i]) 955 if (!session->hgcm_client_ids[i])
925 continue; 956 continue;
926 957
927 vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); 958 /* requestor is kernel here, as we're cleaning up. */
959 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
960 session->hgcm_client_ids[i], &rc);
928 } 961 }
929 962
930 kfree(session); 963 kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1152 return -EPERM; 1185 return -EPERM;
1153 } 1186 }
1154 1187
1155 if (trusted_apps_only && session->user_session) { 1188 if (trusted_apps_only &&
1189 (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1156 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", 1190 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1157 req->request_type); 1191 req->request_type);
1158 return -EPERM; 1192 return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1209 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1243 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1210 return -EMFILE; 1244 return -EMFILE;
1211 1245
1212 ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, 1246 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1213 &conn->hdr.rc); 1247 &client_id, &conn->hdr.rc);
1214 1248
1215 mutex_lock(&gdev->session_mutex); 1249 mutex_lock(&gdev->session_mutex);
1216 if (ret == 0 && conn->hdr.rc >= 0) { 1250 if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1251 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1285 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1252 return -EINVAL; 1286 return -EINVAL;
1253 1287
1254 ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); 1288 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1289 &disconn->hdr.rc);
1255 1290
1256 mutex_lock(&gdev->session_mutex); 1291 mutex_lock(&gdev->session_mutex);
1257 if (ret == 0 && disconn->hdr.rc >= 0) 1292 if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1313 } 1348 }
1314 1349
1315 if (IS_ENABLED(CONFIG_COMPAT) && f32bit) 1350 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1316 ret = vbg_hgcm_call32(gdev, client_id, 1351 ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1317 call->function, call->timeout_ms, 1352 call->function, call->timeout_ms,
1318 VBG_IOCTL_HGCM_CALL_PARMS32(call), 1353 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1319 call->parm_count, &call->hdr.rc); 1354 call->parm_count, &call->hdr.rc);
1320 else 1355 else
1321 ret = vbg_hgcm_call(gdev, client_id, 1356 ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1322 call->function, call->timeout_ms, 1357 call->function, call->timeout_ms,
1323 VBG_IOCTL_HGCM_CALL_PARMS(call), 1358 VBG_IOCTL_HGCM_CALL_PARMS(call),
1324 call->parm_count, &call->hdr.rc); 1359 call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1408} 1443}
1409 1444
1410static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, 1445static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1446 struct vbg_session *session,
1411 struct vbg_ioctl_write_coredump *dump) 1447 struct vbg_ioctl_write_coredump *dump)
1412{ 1448{
1413 struct vmmdev_write_core_dump *req; 1449 struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1415 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) 1451 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1416 return -EINVAL; 1452 return -EINVAL;
1417 1453
1418 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); 1454 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1455 session->requestor);
1419 if (!req) 1456 if (!req)
1420 return -ENOMEM; 1457 return -ENOMEM;
1421 1458
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1476 case VBG_IOCTL_CHECK_BALLOON: 1513 case VBG_IOCTL_CHECK_BALLOON:
1477 return vbg_ioctl_check_balloon(gdev, data); 1514 return vbg_ioctl_check_balloon(gdev, data);
1478 case VBG_IOCTL_WRITE_CORE_DUMP: 1515 case VBG_IOCTL_WRITE_CORE_DUMP:
1479 return vbg_ioctl_write_core_dump(gdev, data); 1516 return vbg_ioctl_write_core_dump(gdev, session, data);
1480 } 1517 }
1481 1518
1482 /* Variable sized requests. */ 1519 /* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1508 struct vmmdev_mouse_status *req; 1545 struct vmmdev_mouse_status *req;
1509 int rc; 1546 int rc;
1510 1547
1511 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); 1548 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1549 VBG_KERNEL_REQUEST);
1512 if (!req) 1550 if (!req)
1513 return -ENOMEM; 1551 return -ENOMEM;
1514 1552
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec45bfa9..4188c12b839f 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -154,15 +154,15 @@ struct vbg_session {
154 * host. Protected by vbg_gdev.session_mutex. 154 * host. Protected by vbg_gdev.session_mutex.
155 */ 155 */
156 u32 guest_caps; 156 u32 guest_caps;
157 /** Does this session belong to a root process or a user one? */ 157 /** VMMDEV_REQUESTOR_* flags */
158 bool user_session; 158 u32 requestor;
159 /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ 159 /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
160 bool cancel_waiters; 160 bool cancel_waiters;
161}; 161};
162 162
163int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); 163int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
164void vbg_core_exit(struct vbg_dev *gdev); 164void vbg_core_exit(struct vbg_dev *gdev);
165struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user); 165struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
166void vbg_core_close_session(struct vbg_session *session); 166void vbg_core_close_session(struct vbg_session *session);
167int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); 167int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
168int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); 168int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
172void vbg_linux_mouse_event(struct vbg_dev *gdev); 172void vbg_linux_mouse_event(struct vbg_dev *gdev);
173 173
174/* Private (non exported) functions form vboxguest_utils.c */ 174/* Private (non exported) functions form vboxguest_utils.c */
175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); 175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
176 u32 requestor);
176void vbg_req_free(void *req, size_t len); 177void vbg_req_free(void *req, size_t len);
177int vbg_req_perform(struct vbg_dev *gdev, void *req); 178int vbg_req_perform(struct vbg_dev *gdev, void *req);
178int vbg_hgcm_call32( 179int vbg_hgcm_call32(
179 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 180 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
180 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 181 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
181 int *vbox_status); 182 u32 parm_count, int *vbox_status);
182 183
183#endif 184#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a9619192d..6e8c0f1c1056 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2006-2016 Oracle Corporation 5 * Copyright (C) 2006-2016 Oracle Corporation
6 */ 6 */
7 7
8#include <linux/cred.h>
8#include <linux/input.h> 9#include <linux/input.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/miscdevice.h> 11#include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
28/** Global vbg_gdev pointer used by vbg_get/put_gdev. */ 29/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
29static struct vbg_dev *vbg_gdev; 30static struct vbg_dev *vbg_gdev;
30 31
32static u32 vbg_misc_device_requestor(struct inode *inode)
33{
34 u32 requestor = VMMDEV_REQUESTOR_USERMODE |
35 VMMDEV_REQUESTOR_CON_DONT_KNOW |
36 VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
37
38 if (from_kuid(current_user_ns(), current->cred->uid) == 0)
39 requestor |= VMMDEV_REQUESTOR_USR_ROOT;
40 else
41 requestor |= VMMDEV_REQUESTOR_USR_USER;
42
43 if (in_egroup_p(inode->i_gid))
44 requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
45
46 return requestor;
47}
48
31static int vbg_misc_device_open(struct inode *inode, struct file *filp) 49static int vbg_misc_device_open(struct inode *inode, struct file *filp)
32{ 50{
33 struct vbg_session *session; 51 struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
36 /* misc_open sets filp->private_data to our misc device */ 54 /* misc_open sets filp->private_data to our misc device */
37 gdev = container_of(filp->private_data, struct vbg_dev, misc_device); 55 gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
38 56
39 session = vbg_core_open_session(gdev, false); 57 session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
40 if (IS_ERR(session)) 58 if (IS_ERR(session))
41 return PTR_ERR(session); 59 return PTR_ERR(session);
42 60
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
53 gdev = container_of(filp->private_data, struct vbg_dev, 71 gdev = container_of(filp->private_data, struct vbg_dev,
54 misc_device_user); 72 misc_device_user);
55 73
56 session = vbg_core_open_session(gdev, false); 74 session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
75 VMMDEV_REQUESTOR_USER_DEVICE);
57 if (IS_ERR(session)) 76 if (IS_ERR(session))
58 return PTR_ERR(session); 77 return PTR_ERR(session);
59 78
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
115 req == VBG_IOCTL_VMMDEV_REQUEST_BIG; 134 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
116 135
117 if (is_vmmdev_req) 136 if (is_vmmdev_req)
118 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); 137 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
138 session->requestor);
119 else 139 else
120 buf = kmalloc(size, GFP_KERNEL); 140 buf = kmalloc(size, GFP_KERNEL);
121 if (!buf) 141 if (!buf)
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index bf4474214b4d..75fd140b02ff 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
62VBG_LOG(vbg_debug, pr_debug); 62VBG_LOG(vbg_debug, pr_debug);
63#endif 63#endif
64 64
65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) 65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
66 u32 requestor)
66{ 67{
67 struct vmmdev_request_header *req; 68 struct vmmdev_request_header *req;
68 int order = get_order(PAGE_ALIGN(len)); 69 int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
78 req->request_type = req_type; 79 req->request_type = req_type;
79 req->rc = VERR_GENERAL_FAILURE; 80 req->rc = VERR_GENERAL_FAILURE;
80 req->reserved1 = 0; 81 req->reserved1 = 0;
81 req->reserved2 = 0; 82 req->requestor = requestor;
82 83
83 return req; 84 return req;
84} 85}
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
119 return done; 120 return done;
120} 121}
121 122
122int vbg_hgcm_connect(struct vbg_dev *gdev, 123int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
123 struct vmmdev_hgcm_service_location *loc, 124 struct vmmdev_hgcm_service_location *loc,
124 u32 *client_id, int *vbox_status) 125 u32 *client_id, int *vbox_status)
125{ 126{
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
127 int rc; 128 int rc;
128 129
129 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), 130 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
130 VMMDEVREQ_HGCM_CONNECT); 131 VMMDEVREQ_HGCM_CONNECT, requestor);
131 if (!hgcm_connect) 132 if (!hgcm_connect)
132 return -ENOMEM; 133 return -ENOMEM;
133 134
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
153} 154}
154EXPORT_SYMBOL(vbg_hgcm_connect); 155EXPORT_SYMBOL(vbg_hgcm_connect);
155 156
156int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) 157int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
158 u32 client_id, int *vbox_status)
157{ 159{
158 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; 160 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
159 int rc; 161 int rc;
160 162
161 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), 163 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
162 VMMDEVREQ_HGCM_DISCONNECT); 164 VMMDEVREQ_HGCM_DISCONNECT,
165 requestor);
163 if (!hgcm_disconnect) 166 if (!hgcm_disconnect)
164 return -ENOMEM; 167 return -ENOMEM;
165 168
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
593 return 0; 596 return 0;
594} 597}
595 598
596int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, 599int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
597 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 600 u32 function, u32 timeout_ms,
598 u32 parm_count, int *vbox_status) 601 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
602 int *vbox_status)
599{ 603{
600 struct vmmdev_hgcm_call *call; 604 struct vmmdev_hgcm_call *call;
601 void **bounce_bufs = NULL; 605 void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
615 goto free_bounce_bufs; 619 goto free_bounce_bufs;
616 } 620 }
617 621
618 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL); 622 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
619 if (!call) { 623 if (!call) {
620 ret = -ENOMEM; 624 ret = -ENOMEM;
621 goto free_bounce_bufs; 625 goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
647 651
648#ifdef CONFIG_COMPAT 652#ifdef CONFIG_COMPAT
649int vbg_hgcm_call32( 653int vbg_hgcm_call32(
650 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 654 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
651 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 655 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
652 int *vbox_status) 656 u32 parm_count, int *vbox_status)
653{ 657{
654 struct vmmdev_hgcm_function_parameter *parm64 = NULL; 658 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
655 u32 i, size; 659 u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
689 goto out_free; 693 goto out_free;
690 } 694 }
691 695
692 ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms, 696 ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
693 parm64, parm_count, vbox_status); 697 parm64, parm_count, vbox_status);
694 if (ret < 0) 698 if (ret < 0)
695 goto out_free; 699 goto out_free;
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
index 77f0c8f8a231..84834dad38d5 100644
--- a/drivers/virt/vboxguest/vboxguest_version.h
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -9,11 +9,10 @@
9#ifndef __VBOX_VERSION_H__ 9#ifndef __VBOX_VERSION_H__
10#define __VBOX_VERSION_H__ 10#define __VBOX_VERSION_H__
11 11
12/* Last synced October 4th 2017 */ 12#define VBG_VERSION_MAJOR 6
13#define VBG_VERSION_MAJOR 5 13#define VBG_VERSION_MINOR 0
14#define VBG_VERSION_MINOR 2
15#define VBG_VERSION_BUILD 0 14#define VBG_VERSION_BUILD 0
16#define VBG_SVN_REV 68940 15#define VBG_SVN_REV 127566
17#define VBG_VERSION_STRING "5.2.0" 16#define VBG_VERSION_STRING "6.0.0"
18 17
19#endif 18#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae978935d..6337b8d75d96 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
98 s32 rc; 98 s32 rc;
99 /** Reserved field no.1. MBZ. */ 99 /** Reserved field no.1. MBZ. */
100 u32 reserved1; 100 u32 reserved1;
101 /** Reserved field no.2. MBZ. */ 101 /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
102 u32 reserved2; 102 u32 requestor;
103}; 103};
104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24); 104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
105 105
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
247}; 247};
248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8); 248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
249 249
250#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0)
251
250/** struct vmmdev_guestinfo2 - Guest information report, version 2. */ 252/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
251struct vmmdev_guest_info2 { 253struct vmmdev_guest_info2 {
252 /** Header. */ 254 /** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
259 u32 additions_build; 261 u32 additions_build;
260 /** SVN revision. */ 262 /** SVN revision. */
261 u32 additions_revision; 263 u32 additions_revision;
262 /** Feature mask, currently unused. */ 264 /** Feature mask. */
263 u32 additions_features; 265 u32 additions_features;
264 /** 266 /**
265 * The intentional meaning of this field was: 267 * The intentional meaning of this field was:
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index ca08c83168f5..0b37867b5c20 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
1515 1515
1516 xdr_encode_AFS_StoreStatus(&bp, attr); 1516 xdr_encode_AFS_StoreStatus(&bp, attr);
1517 1517
1518 *bp++ = 0; /* position of start of write */ 1518 *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */
1519 *bp++ = 0; 1519 *bp++ = htonl((u32) attr->ia_size);
1520 *bp++ = 0; /* size of write */ 1520 *bp++ = 0; /* size of write */
1521 *bp++ = 0; 1521 *bp++ = 0;
1522 *bp++ = htonl(attr->ia_size >> 32); /* new file length */ 1522 *bp++ = htonl(attr->ia_size >> 32); /* new file length */
@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
1564 1564
1565 xdr_encode_AFS_StoreStatus(&bp, attr); 1565 xdr_encode_AFS_StoreStatus(&bp, attr);
1566 1566
1567 *bp++ = 0; /* position of start of write */ 1567 *bp++ = htonl(attr->ia_size); /* position of start of write */
1568 *bp++ = 0; /* size of write */ 1568 *bp++ = 0; /* size of write */
1569 *bp++ = htonl(attr->ia_size); /* new file length */ 1569 *bp++ = htonl(attr->ia_size); /* new file length */
1570 1570
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 5aa57929e8c2..6e97a42d24d1 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
1514 bp = xdr_encode_u32(bp, 0); /* RPC flags */ 1514 bp = xdr_encode_u32(bp, 0); /* RPC flags */
1515 bp = xdr_encode_YFSFid(bp, &vnode->fid); 1515 bp = xdr_encode_YFSFid(bp, &vnode->fid);
1516 bp = xdr_encode_YFS_StoreStatus(bp, attr); 1516 bp = xdr_encode_YFS_StoreStatus(bp, attr);
1517 bp = xdr_encode_u64(bp, 0); /* position of start of write */ 1517 bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
1518 bp = xdr_encode_u64(bp, 0); /* size of write */ 1518 bp = xdr_encode_u64(bp, 0); /* size of write */
1519 bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */ 1519 bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
1520 yfs_check_req(call, bp); 1520 yfs_check_req(call, bp);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e9faa52bb489..78d3257435c0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -336,12 +336,14 @@ static void blkdev_bio_end_io(struct bio *bio)
336 if (should_dirty) { 336 if (should_dirty) {
337 bio_check_pages_dirty(bio); 337 bio_check_pages_dirty(bio);
338 } else { 338 } else {
339 struct bio_vec *bvec; 339 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
340 int i; 340 struct bvec_iter_all iter_all;
341 struct bvec_iter_all iter_all; 341 struct bio_vec *bvec;
342 int i;
342 343
343 bio_for_each_segment_all(bvec, bio, i, iter_all) 344 bio_for_each_segment_all(bvec, bio, i, iter_all)
344 put_page(bvec->bv_page); 345 put_page(bvec->bv_page);
346 }
345 bio_put(bio); 347 bio_put(bio);
346 } 348 }
347} 349}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1d49694e6ae3..c5880329ae37 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6174,7 +6174,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
6174 * 6174 *
6175 * This is overestimating in most cases. 6175 * This is overestimating in most cases.
6176 */ 6176 */
6177 qgroup_rsv_size = outstanding_extents * fs_info->nodesize; 6177 qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
6178 6178
6179 spin_lock(&block_rsv->lock); 6179 spin_lock(&block_rsv->lock);
6180 block_rsv->size = reserve_size; 6180 block_rsv->size = reserve_size;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index eb680b715dd6..e659d9d61107 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1922,8 +1922,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
1922 int i; 1922 int i;
1923 1923
1924 /* Level sanity check */ 1924 /* Level sanity check */
1925 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL || 1925 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
1926 root_level < 0 || root_level >= BTRFS_MAX_LEVEL || 1926 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
1927 root_level < cur_level) { 1927 root_level < cur_level) {
1928 btrfs_err_rl(fs_info, 1928 btrfs_err_rl(fs_info,
1929 "%s: bad levels, cur_level=%d root_level=%d", 1929 "%s: bad levels, cur_level=%d root_level=%d",
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 1869ba8e5981..67a6f7d47402 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2430,8 +2430,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2430 bitmap_clear(rbio->dbitmap, pagenr, 1); 2430 bitmap_clear(rbio->dbitmap, pagenr, 1);
2431 kunmap(p); 2431 kunmap(p);
2432 2432
2433 for (stripe = 0; stripe < rbio->real_stripes; stripe++) 2433 for (stripe = 0; stripe < nr_data; stripe++)
2434 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 2434 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2435 kunmap(p_page);
2435 } 2436 }
2436 2437
2437 __free_page(p_page); 2438 __free_page(p_page);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index acdad6d658f5..e4e665f422fc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1886,8 +1886,10 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
1886 } 1886 }
1887} 1887}
1888 1888
1889static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) 1889static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
1890{ 1890{
1891 struct btrfs_fs_info *fs_info = trans->fs_info;
1892
1891 /* 1893 /*
1892 * We use writeback_inodes_sb here because if we used 1894 * We use writeback_inodes_sb here because if we used
1893 * btrfs_start_delalloc_roots we would deadlock with fs freeze. 1895 * btrfs_start_delalloc_roots we would deadlock with fs freeze.
@@ -1897,15 +1899,50 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
1897 * from already being in a transaction and our join_transaction doesn't 1899 * from already being in a transaction and our join_transaction doesn't
1898 * have to re-take the fs freeze lock. 1900 * have to re-take the fs freeze lock.
1899 */ 1901 */
1900 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) 1902 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1901 writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC); 1903 writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
1904 } else {
1905 struct btrfs_pending_snapshot *pending;
1906 struct list_head *head = &trans->transaction->pending_snapshots;
1907
1908 /*
1909 * Flush dellaloc for any root that is going to be snapshotted.
1910 * This is done to avoid a corrupted version of files, in the
1911 * snapshots, that had both buffered and direct IO writes (even
1912 * if they were done sequentially) due to an unordered update of
1913 * the inode's size on disk.
1914 */
1915 list_for_each_entry(pending, head, list) {
1916 int ret;
1917
1918 ret = btrfs_start_delalloc_snapshot(pending->root);
1919 if (ret)
1920 return ret;
1921 }
1922 }
1902 return 0; 1923 return 0;
1903} 1924}
1904 1925
1905static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) 1926static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
1906{ 1927{
1907 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) 1928 struct btrfs_fs_info *fs_info = trans->fs_info;
1929
1930 if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
1908 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); 1931 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
1932 } else {
1933 struct btrfs_pending_snapshot *pending;
1934 struct list_head *head = &trans->transaction->pending_snapshots;
1935
1936 /*
1937 * Wait for any dellaloc that we started previously for the roots
1938 * that are going to be snapshotted. This is to avoid a corrupted
1939 * version of files in the snapshots that had both buffered and
1940 * direct IO writes (even if they were done sequentially).
1941 */
1942 list_for_each_entry(pending, head, list)
1943 btrfs_wait_ordered_extents(pending->root,
1944 U64_MAX, 0, U64_MAX);
1945 }
1909} 1946}
1910 1947
1911int btrfs_commit_transaction(struct btrfs_trans_handle *trans) 1948int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
@@ -2023,7 +2060,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2023 2060
2024 extwriter_counter_dec(cur_trans, trans->type); 2061 extwriter_counter_dec(cur_trans, trans->type);
2025 2062
2026 ret = btrfs_start_delalloc_flush(fs_info); 2063 ret = btrfs_start_delalloc_flush(trans);
2027 if (ret) 2064 if (ret)
2028 goto cleanup_transaction; 2065 goto cleanup_transaction;
2029 2066
@@ -2039,7 +2076,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2039 if (ret) 2076 if (ret)
2040 goto cleanup_transaction; 2077 goto cleanup_transaction;
2041 2078
2042 btrfs_wait_delalloc_flush(fs_info); 2079 btrfs_wait_delalloc_flush(trans);
2043 2080
2044 btrfs_scrub_pause(fs_info); 2081 btrfs_scrub_pause(fs_info);
2045 /* 2082 /*
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f06454a55e00..561884f60d35 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3578,9 +3578,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3578 } 3578 }
3579 btrfs_release_path(path); 3579 btrfs_release_path(path);
3580 3580
3581 /* find the first key from this transaction again */ 3581 /*
3582 * Find the first key from this transaction again. See the note for
3583 * log_new_dir_dentries, if we're logging a directory recursively we
3584 * won't be holding its i_mutex, which means we can modify the directory
3585 * while we're logging it. If we remove an entry between our first
3586 * search and this search we'll not find the key again and can just
3587 * bail.
3588 */
3582 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3589 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3583 if (WARN_ON(ret != 0)) 3590 if (ret != 0)
3584 goto done; 3591 goto done;
3585 3592
3586 /* 3593 /*
@@ -4544,6 +4551,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4544 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4551 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4545 struct btrfs_inode_item); 4552 struct btrfs_inode_item);
4546 *size_ret = btrfs_inode_size(path->nodes[0], item); 4553 *size_ret = btrfs_inode_size(path->nodes[0], item);
4554 /*
4555 * If the in-memory inode's i_size is smaller then the inode
4556 * size stored in the btree, return the inode's i_size, so
4557 * that we get a correct inode size after replaying the log
4558 * when before a power failure we had a shrinking truncate
4559 * followed by addition of a new name (rename / new hard link).
4560 * Otherwise return the inode size from the btree, to avoid
4561 * data loss when replaying a log due to previously doing a
4562 * write that expands the inode's size and logging a new name
4563 * immediately after.
4564 */
4565 if (*size_ret > inode->vfs_inode.i_size)
4566 *size_ret = inode->vfs_inode.i_size;
4547 } 4567 }
4548 4568
4549 btrfs_release_path(path); 4569 btrfs_release_path(path);
@@ -4705,15 +4725,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4705 struct btrfs_file_extent_item); 4725 struct btrfs_file_extent_item);
4706 4726
4707 if (btrfs_file_extent_type(leaf, extent) == 4727 if (btrfs_file_extent_type(leaf, extent) ==
4708 BTRFS_FILE_EXTENT_INLINE) { 4728 BTRFS_FILE_EXTENT_INLINE)
4709 len = btrfs_file_extent_ram_bytes(leaf, extent);
4710 ASSERT(len == i_size ||
4711 (len == fs_info->sectorsize &&
4712 btrfs_file_extent_compression(leaf, extent) !=
4713 BTRFS_COMPRESS_NONE) ||
4714 (len < i_size && i_size < fs_info->sectorsize));
4715 return 0; 4729 return 0;
4716 }
4717 4730
4718 len = btrfs_file_extent_num_bytes(leaf, extent); 4731 len = btrfs_file_extent_num_bytes(leaf, extent);
4719 /* Last extent goes beyond i_size, no need to log a hole. */ 4732 /* Last extent goes beyond i_size, no need to log a hole. */
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9024eee889b9..db934ceae9c1 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6407,7 +6407,7 @@ static void btrfs_end_bio(struct bio *bio)
6407 if (bio_op(bio) == REQ_OP_WRITE) 6407 if (bio_op(bio) == REQ_OP_WRITE)
6408 btrfs_dev_stat_inc_and_print(dev, 6408 btrfs_dev_stat_inc_and_print(dev,
6409 BTRFS_DEV_STAT_WRITE_ERRS); 6409 BTRFS_DEV_STAT_WRITE_ERRS);
6410 else 6410 else if (!(bio->bi_opf & REQ_RAHEAD))
6411 btrfs_dev_stat_inc_and_print(dev, 6411 btrfs_dev_stat_inc_and_print(dev,
6412 BTRFS_DEV_STAT_READ_ERRS); 6412 BTRFS_DEV_STAT_READ_ERRS);
6413 if (bio->bi_opf & REQ_PREFLUSH) 6413 if (bio->bi_opf & REQ_PREFLUSH)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e3346628efe2..2d61ddda9bf5 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
524 struct inode *inode = container_of(head, struct inode, i_rcu); 524 struct inode *inode = container_of(head, struct inode, i_rcu);
525 struct ceph_inode_info *ci = ceph_inode(inode); 525 struct ceph_inode_info *ci = ceph_inode(inode);
526 526
527 kfree(ci->i_symlink);
527 kmem_cache_free(ceph_inode_cachep, ci); 528 kmem_cache_free(ceph_inode_cachep, ci);
528} 529}
529 530
@@ -566,7 +567,6 @@ void ceph_destroy_inode(struct inode *inode)
566 } 567 }
567 } 568 }
568 569
569 kfree(ci->i_symlink);
570 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 570 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
571 frag = rb_entry(n, struct ceph_inode_frag, node); 571 frag = rb_entry(n, struct ceph_inode_frag, node);
572 rb_erase(n, &ci->i_fragtree); 572 rb_erase(n, &ci->i_fragtree);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 217276b8b942..f9b71c12cc9f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1008,7 +1008,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1008 unsigned int xid; 1008 unsigned int xid;
1009 int rc; 1009 int rc;
1010 1010
1011 if (remap_flags & ~REMAP_FILE_ADVISORY) 1011 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1012 return -EINVAL; 1012 return -EINVAL;
1013 1013
1014 cifs_dbg(FYI, "clone range\n"); 1014 cifs_dbg(FYI, "clone range\n");
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 142164ef1f05..5c0298b9998f 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
150extern const struct export_operations cifs_export_ops; 150extern const struct export_operations cifs_export_ops;
151#endif /* CONFIG_CIFS_NFSD_EXPORT */ 151#endif /* CONFIG_CIFS_NFSD_EXPORT */
152 152
153#define CIFS_VERSION "2.18" 153#define CIFS_VERSION "2.19"
154#endif /* _CIFSFS_H */ 154#endif /* _CIFSFS_H */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 2a6d20c0ce02..89006e044973 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2632,43 +2632,56 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2632 struct TCP_Server_Info *server = 2632 struct TCP_Server_Info *server =
2633 tlink_tcon(wdata->cfile->tlink)->ses->server; 2633 tlink_tcon(wdata->cfile->tlink)->ses->server;
2634 2634
2635 /*
2636 * Wait for credits to resend this wdata.
2637 * Note: we are attempting to resend the whole wdata not in segments
2638 */
2639 do { 2635 do {
2640 rc = server->ops->wait_mtu_credits(server, wdata->bytes, &wsize, 2636 if (wdata->cfile->invalidHandle) {
2641 &credits); 2637 rc = cifs_reopen_file(wdata->cfile, false);
2638 if (rc == -EAGAIN)
2639 continue;
2640 else if (rc)
2641 break;
2642 }
2642 2643
2643 if (rc)
2644 goto out;
2645 2644
2646 if (wsize < wdata->bytes) { 2645 /*
2647 add_credits_and_wake_if(server, &credits, 0); 2646 * Wait for credits to resend this wdata.
2648 msleep(1000); 2647 * Note: we are attempting to resend the whole wdata not in
2649 } 2648 * segments
2650 } while (wsize < wdata->bytes); 2649 */
2650 do {
2651 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
2652 &wsize, &credits);
2653 if (rc)
2654 goto fail;
2655
2656 if (wsize < wdata->bytes) {
2657 add_credits_and_wake_if(server, &credits, 0);
2658 msleep(1000);
2659 }
2660 } while (wsize < wdata->bytes);
2661 wdata->credits = credits;
2651 2662
2652 wdata->credits = credits; 2663 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
2653 rc = -EAGAIN; 2664
2654 while (rc == -EAGAIN) { 2665 if (!rc) {
2655 rc = 0; 2666 if (wdata->cfile->invalidHandle)
2656 if (wdata->cfile->invalidHandle) 2667 rc = -EAGAIN;
2657 rc = cifs_reopen_file(wdata->cfile, false); 2668 else
2658 if (!rc) 2669 rc = server->ops->async_writev(wdata,
2659 rc = server->ops->async_writev(wdata,
2660 cifs_uncached_writedata_release); 2670 cifs_uncached_writedata_release);
2661 } 2671 }
2662 2672
2663 if (!rc) { 2673 /* If the write was successfully sent, we are done */
2664 list_add_tail(&wdata->list, wdata_list); 2674 if (!rc) {
2665 return 0; 2675 list_add_tail(&wdata->list, wdata_list);
2666 } 2676 return 0;
2677 }
2667 2678
2668 add_credits_and_wake_if(server, &wdata->credits, 0); 2679 /* Roll back credits and retry if needed */
2669out: 2680 add_credits_and_wake_if(server, &wdata->credits, 0);
2670 kref_put(&wdata->refcount, cifs_uncached_writedata_release); 2681 } while (rc == -EAGAIN);
2671 2682
2683fail:
2684 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2672 return rc; 2685 return rc;
2673} 2686}
2674 2687
@@ -2896,12 +2909,12 @@ restart_loop:
2896 wdata->bytes, &tmp_from, 2909 wdata->bytes, &tmp_from,
2897 ctx->cfile, cifs_sb, &tmp_list, 2910 ctx->cfile, cifs_sb, &tmp_list,
2898 ctx); 2911 ctx);
2912
2913 kref_put(&wdata->refcount,
2914 cifs_uncached_writedata_release);
2899 } 2915 }
2900 2916
2901 list_splice(&tmp_list, &ctx->list); 2917 list_splice(&tmp_list, &ctx->list);
2902
2903 kref_put(&wdata->refcount,
2904 cifs_uncached_writedata_release);
2905 goto restart_loop; 2918 goto restart_loop;
2906 } 2919 }
2907 } 2920 }
@@ -3348,44 +3361,55 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
3348 struct TCP_Server_Info *server = 3361 struct TCP_Server_Info *server =
3349 tlink_tcon(rdata->cfile->tlink)->ses->server; 3362 tlink_tcon(rdata->cfile->tlink)->ses->server;
3350 3363
3351 /*
3352 * Wait for credits to resend this rdata.
3353 * Note: we are attempting to resend the whole rdata not in segments
3354 */
3355 do { 3364 do {
3356 rc = server->ops->wait_mtu_credits(server, rdata->bytes, 3365 if (rdata->cfile->invalidHandle) {
3366 rc = cifs_reopen_file(rdata->cfile, true);
3367 if (rc == -EAGAIN)
3368 continue;
3369 else if (rc)
3370 break;
3371 }
3372
3373 /*
3374 * Wait for credits to resend this rdata.
3375 * Note: we are attempting to resend the whole rdata not in
3376 * segments
3377 */
3378 do {
3379 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3357 &rsize, &credits); 3380 &rsize, &credits);
3358 3381
3359 if (rc) 3382 if (rc)
3360 goto out; 3383 goto fail;
3361 3384
3362 if (rsize < rdata->bytes) { 3385 if (rsize < rdata->bytes) {
3363 add_credits_and_wake_if(server, &credits, 0); 3386 add_credits_and_wake_if(server, &credits, 0);
3364 msleep(1000); 3387 msleep(1000);
3365 } 3388 }
3366 } while (rsize < rdata->bytes); 3389 } while (rsize < rdata->bytes);
3390 rdata->credits = credits;
3367 3391
3368 rdata->credits = credits; 3392 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3369 rc = -EAGAIN; 3393 if (!rc) {
3370 while (rc == -EAGAIN) { 3394 if (rdata->cfile->invalidHandle)
3371 rc = 0; 3395 rc = -EAGAIN;
3372 if (rdata->cfile->invalidHandle) 3396 else
3373 rc = cifs_reopen_file(rdata->cfile, true); 3397 rc = server->ops->async_readv(rdata);
3374 if (!rc) 3398 }
3375 rc = server->ops->async_readv(rdata);
3376 }
3377 3399
3378 if (!rc) { 3400 /* If the read was successfully sent, we are done */
3379 /* Add to aio pending list */ 3401 if (!rc) {
3380 list_add_tail(&rdata->list, rdata_list); 3402 /* Add to aio pending list */
3381 return 0; 3403 list_add_tail(&rdata->list, rdata_list);
3382 } 3404 return 0;
3405 }
3383 3406
3384 add_credits_and_wake_if(server, &rdata->credits, 0); 3407 /* Roll back credits and retry if needed */
3385out: 3408 add_credits_and_wake_if(server, &rdata->credits, 0);
3386 kref_put(&rdata->refcount, 3409 } while (rc == -EAGAIN);
3387 cifs_uncached_readdata_release);
3388 3410
3411fail:
3412 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3389 return rc; 3413 return rc;
3390} 3414}
3391 3415
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 924269cec135..e32c264e3adb 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -1036,7 +1036,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
1036 {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO, 1036 {STATUS_UNFINISHED_CONTEXT_DELETED, -EIO,
1037 "STATUS_UNFINISHED_CONTEXT_DELETED"}, 1037 "STATUS_UNFINISHED_CONTEXT_DELETED"},
1038 {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"}, 1038 {STATUS_NO_TGT_REPLY, -EIO, "STATUS_NO_TGT_REPLY"},
1039 {STATUS_OBJECTID_NOT_FOUND, -EIO, "STATUS_OBJECTID_NOT_FOUND"}, 1039 /* Note that ENOATTTR and ENODATA are the same errno */
1040 {STATUS_OBJECTID_NOT_FOUND, -ENODATA, "STATUS_OBJECTID_NOT_FOUND"},
1040 {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"}, 1041 {STATUS_NO_IP_ADDRESSES, -EIO, "STATUS_NO_IP_ADDRESSES"},
1041 {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO, 1042 {STATUS_WRONG_CREDENTIAL_HANDLE, -EIO,
1042 "STATUS_WRONG_CREDENTIAL_HANDLE"}, 1043 "STATUS_WRONG_CREDENTIAL_HANDLE"},
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c399e09b76e6..21ac19ff19cb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1628,9 +1628,16 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1628 iov[1].iov_base = unc_path; 1628 iov[1].iov_base = unc_path;
1629 iov[1].iov_len = unc_path_len; 1629 iov[1].iov_len = unc_path_len;
1630 1630
1631 /* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 */ 1631 /*
1632 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
1633 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
1634 * (Samba servers don't always set the flag so also check if null user)
1635 */
1632 if ((ses->server->dialect == SMB311_PROT_ID) && 1636 if ((ses->server->dialect == SMB311_PROT_ID) &&
1633 !smb3_encryption_required(tcon)) 1637 !smb3_encryption_required(tcon) &&
1638 !(ses->session_flags &
1639 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
1640 ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
1634 req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED; 1641 req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
1635 1642
1636 memset(&rqst, 0, sizeof(struct smb_rqst)); 1643 memset(&rqst, 0, sizeof(struct smb_rqst));
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index fa226de48ef3..99c4d799c24b 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -549,19 +549,19 @@ DECLARE_EVENT_CLASS(smb3_tcon_class,
549 __field(unsigned int, xid) 549 __field(unsigned int, xid)
550 __field(__u32, tid) 550 __field(__u32, tid)
551 __field(__u64, sesid) 551 __field(__u64, sesid)
552 __field(const char *, unc_name) 552 __string(name, unc_name)
553 __field(int, rc) 553 __field(int, rc)
554 ), 554 ),
555 TP_fast_assign( 555 TP_fast_assign(
556 __entry->xid = xid; 556 __entry->xid = xid;
557 __entry->tid = tid; 557 __entry->tid = tid;
558 __entry->sesid = sesid; 558 __entry->sesid = sesid;
559 __entry->unc_name = unc_name; 559 __assign_str(name, unc_name);
560 __entry->rc = rc; 560 __entry->rc = rc;
561 ), 561 ),
562 TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d", 562 TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
563 __entry->xid, __entry->sesid, __entry->tid, 563 __entry->xid, __entry->sesid, __entry->tid,
564 __entry->unc_name, __entry->rc) 564 __get_str(name), __entry->rc)
565) 565)
566 566
567#define DEFINE_SMB3_TCON_EVENT(name) \ 567#define DEFINE_SMB3_TCON_EVENT(name) \
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index a1ac7e9245ec..75a5309f2231 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -384,7 +384,7 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
384{ 384{
385 struct ext4_inode_info *ei = EXT4_I(inode); 385 struct ext4_inode_info *ei = EXT4_I(inode);
386 386
387 if (ext4_handle_valid(handle)) { 387 if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
388 ei->i_sync_tid = handle->h_transaction->t_tid; 388 ei->i_sync_tid = handle->h_transaction->t_tid;
389 if (datasync) 389 if (datasync)
390 ei->i_datasync_tid = handle->h_transaction->t_tid; 390 ei->i_datasync_tid = handle->h_transaction->t_tid;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 69d65d49837b..98ec11f69cd4 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -125,7 +125,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
125 struct super_block *sb = inode->i_sb; 125 struct super_block *sb = inode->i_sb;
126 int blockmask = sb->s_blocksize - 1; 126 int blockmask = sb->s_blocksize - 1;
127 127
128 if (pos >= i_size_read(inode)) 128 if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
129 return 0; 129 return 0;
130 130
131 if ((pos | iov_iter_alignment(from)) & blockmask) 131 if ((pos | iov_iter_alignment(from)) & blockmask)
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index c2225f0d31b5..2024d3fa5504 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1222,6 +1222,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1222 ext4_lblk_t offsets[4], offsets2[4]; 1222 ext4_lblk_t offsets[4], offsets2[4];
1223 Indirect chain[4], chain2[4]; 1223 Indirect chain[4], chain2[4];
1224 Indirect *partial, *partial2; 1224 Indirect *partial, *partial2;
1225 Indirect *p = NULL, *p2 = NULL;
1225 ext4_lblk_t max_block; 1226 ext4_lblk_t max_block;
1226 __le32 nr = 0, nr2 = 0; 1227 __le32 nr = 0, nr2 = 0;
1227 int n = 0, n2 = 0; 1228 int n = 0, n2 = 0;
@@ -1263,7 +1264,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1263 } 1264 }
1264 1265
1265 1266
1266 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 1267 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1267 if (nr) { 1268 if (nr) {
1268 if (partial == chain) { 1269 if (partial == chain) {
1269 /* Shared branch grows from the inode */ 1270 /* Shared branch grows from the inode */
@@ -1288,13 +1289,11 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
1288 partial->p + 1, 1289 partial->p + 1,
1289 (__le32 *)partial->bh->b_data+addr_per_block, 1290 (__le32 *)partial->bh->b_data+addr_per_block,
1290 (chain+n-1) - partial); 1291 (chain+n-1) - partial);
1291 BUFFER_TRACE(partial->bh, "call brelse");
1292 brelse(partial->bh);
1293 partial--; 1292 partial--;
1294 } 1293 }
1295 1294
1296end_range: 1295end_range:
1297 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); 1296 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1298 if (nr2) { 1297 if (nr2) {
1299 if (partial2 == chain2) { 1298 if (partial2 == chain2) {
1300 /* 1299 /*
@@ -1324,16 +1323,14 @@ end_range:
1324 (__le32 *)partial2->bh->b_data, 1323 (__le32 *)partial2->bh->b_data,
1325 partial2->p, 1324 partial2->p,
1326 (chain2+n2-1) - partial2); 1325 (chain2+n2-1) - partial2);
1327 BUFFER_TRACE(partial2->bh, "call brelse");
1328 brelse(partial2->bh);
1329 partial2--; 1326 partial2--;
1330 } 1327 }
1331 goto do_indirects; 1328 goto do_indirects;
1332 } 1329 }
1333 1330
1334 /* Punch happened within the same level (n == n2) */ 1331 /* Punch happened within the same level (n == n2) */
1335 partial = ext4_find_shared(inode, n, offsets, chain, &nr); 1332 partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
1336 partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); 1333 partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
1337 1334
1338 /* Free top, but only if partial2 isn't its subtree. */ 1335 /* Free top, but only if partial2 isn't its subtree. */
1339 if (nr) { 1336 if (nr) {
@@ -1390,11 +1387,7 @@ end_range:
1390 partial->p + 1, 1387 partial->p + 1,
1391 partial2->p, 1388 partial2->p,
1392 (chain+n-1) - partial); 1389 (chain+n-1) - partial);
1393 BUFFER_TRACE(partial->bh, "call brelse"); 1390 goto cleanup;
1394 brelse(partial->bh);
1395 BUFFER_TRACE(partial2->bh, "call brelse");
1396 brelse(partial2->bh);
1397 return 0;
1398 } 1391 }
1399 1392
1400 /* 1393 /*
@@ -1409,8 +1402,6 @@ end_range:
1409 partial->p + 1, 1402 partial->p + 1,
1410 (__le32 *)partial->bh->b_data+addr_per_block, 1403 (__le32 *)partial->bh->b_data+addr_per_block,
1411 (chain+n-1) - partial); 1404 (chain+n-1) - partial);
1412 BUFFER_TRACE(partial->bh, "call brelse");
1413 brelse(partial->bh);
1414 partial--; 1405 partial--;
1415 } 1406 }
1416 if (partial2 > chain2 && depth2 <= depth) { 1407 if (partial2 > chain2 && depth2 <= depth) {
@@ -1418,11 +1409,21 @@ end_range:
1418 (__le32 *)partial2->bh->b_data, 1409 (__le32 *)partial2->bh->b_data,
1419 partial2->p, 1410 partial2->p,
1420 (chain2+n2-1) - partial2); 1411 (chain2+n2-1) - partial2);
1421 BUFFER_TRACE(partial2->bh, "call brelse");
1422 brelse(partial2->bh);
1423 partial2--; 1412 partial2--;
1424 } 1413 }
1425 } 1414 }
1415
1416cleanup:
1417 while (p && p > chain) {
1418 BUFFER_TRACE(p->bh, "call brelse");
1419 brelse(p->bh);
1420 p--;
1421 }
1422 while (p2 && p2 > chain2) {
1423 BUFFER_TRACE(p2->bh, "call brelse");
1424 brelse(p2->bh);
1425 p2--;
1426 }
1426 return 0; 1427 return 0;
1427 1428
1428do_indirects: 1429do_indirects:
@@ -1430,7 +1431,7 @@ do_indirects:
1430 switch (offsets[0]) { 1431 switch (offsets[0]) {
1431 default: 1432 default:
1432 if (++n >= n2) 1433 if (++n >= n2)
1433 return 0; 1434 break;
1434 nr = i_data[EXT4_IND_BLOCK]; 1435 nr = i_data[EXT4_IND_BLOCK];
1435 if (nr) { 1436 if (nr) {
1436 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); 1437 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
@@ -1439,7 +1440,7 @@ do_indirects:
1439 /* fall through */ 1440 /* fall through */
1440 case EXT4_IND_BLOCK: 1441 case EXT4_IND_BLOCK:
1441 if (++n >= n2) 1442 if (++n >= n2)
1442 return 0; 1443 break;
1443 nr = i_data[EXT4_DIND_BLOCK]; 1444 nr = i_data[EXT4_DIND_BLOCK];
1444 if (nr) { 1445 if (nr) {
1445 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); 1446 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
@@ -1448,7 +1449,7 @@ do_indirects:
1448 /* fall through */ 1449 /* fall through */
1449 case EXT4_DIND_BLOCK: 1450 case EXT4_DIND_BLOCK:
1450 if (++n >= n2) 1451 if (++n >= n2)
1451 return 0; 1452 break;
1452 nr = i_data[EXT4_TIND_BLOCK]; 1453 nr = i_data[EXT4_TIND_BLOCK];
1453 if (nr) { 1454 if (nr) {
1454 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); 1455 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
@@ -1458,5 +1459,5 @@ do_indirects:
1458 case EXT4_TIND_BLOCK: 1459 case EXT4_TIND_BLOCK:
1459 ; 1460 ;
1460 } 1461 }
1461 return 0; 1462 goto cleanup;
1462} 1463}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b54b261ded36..b32a57bc5d5d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -6080,36 +6080,6 @@ out:
6080 return; 6080 return;
6081} 6081}
6082 6082
6083#if 0
6084/*
6085 * Bind an inode's backing buffer_head into this transaction, to prevent
6086 * it from being flushed to disk early. Unlike
6087 * ext4_reserve_inode_write, this leaves behind no bh reference and
6088 * returns no iloc structure, so the caller needs to repeat the iloc
6089 * lookup to mark the inode dirty later.
6090 */
6091static int ext4_pin_inode(handle_t *handle, struct inode *inode)
6092{
6093 struct ext4_iloc iloc;
6094
6095 int err = 0;
6096 if (handle) {
6097 err = ext4_get_inode_loc(inode, &iloc);
6098 if (!err) {
6099 BUFFER_TRACE(iloc.bh, "get_write_access");
6100 err = jbd2_journal_get_write_access(handle, iloc.bh);
6101 if (!err)
6102 err = ext4_handle_dirty_metadata(handle,
6103 NULL,
6104 iloc.bh);
6105 brelse(iloc.bh);
6106 }
6107 }
6108 ext4_std_error(inode->i_sb, err);
6109 return err;
6110}
6111#endif
6112
6113int ext4_change_inode_journal_flag(struct inode *inode, int val) 6083int ext4_change_inode_journal_flag(struct inode *inode, int val)
6114{ 6084{
6115 journal_t *journal; 6085 journal_t *journal;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 3c4f8bb59f8a..bab3da4f1e0d 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -1000,6 +1000,13 @@ resizefs_out:
1000 if (!blk_queue_discard(q)) 1000 if (!blk_queue_discard(q))
1001 return -EOPNOTSUPP; 1001 return -EOPNOTSUPP;
1002 1002
1003 /*
1004 * We haven't replayed the journal, so we cannot use our
1005 * block-bitmap-guided storage zapping commands.
1006 */
1007 if (test_opt(sb, NOLOAD) && ext4_has_feature_journal(sb))
1008 return -EROFS;
1009
1003 if (copy_from_user(&range, (struct fstrim_range __user *)arg, 1010 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
1004 sizeof(range))) 1011 sizeof(range)))
1005 return -EFAULT; 1012 return -EFAULT;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 3d9b18505c0c..e7ae26e36c9c 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -932,11 +932,18 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
932 memcpy(n_group_desc, o_group_desc, 932 memcpy(n_group_desc, o_group_desc,
933 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); 933 EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
934 n_group_desc[gdb_num] = gdb_bh; 934 n_group_desc[gdb_num] = gdb_bh;
935
936 BUFFER_TRACE(gdb_bh, "get_write_access");
937 err = ext4_journal_get_write_access(handle, gdb_bh);
938 if (err) {
939 kvfree(n_group_desc);
940 brelse(gdb_bh);
941 return err;
942 }
943
935 EXT4_SB(sb)->s_group_desc = n_group_desc; 944 EXT4_SB(sb)->s_group_desc = n_group_desc;
936 EXT4_SB(sb)->s_gdb_count++; 945 EXT4_SB(sb)->s_gdb_count++;
937 kvfree(o_group_desc); 946 kvfree(o_group_desc);
938 BUFFER_TRACE(gdb_bh, "get_write_access");
939 err = ext4_journal_get_write_access(handle, gdb_bh);
940 return err; 947 return err;
941} 948}
942 949
@@ -2073,6 +2080,10 @@ out:
2073 free_flex_gd(flex_gd); 2080 free_flex_gd(flex_gd);
2074 if (resize_inode != NULL) 2081 if (resize_inode != NULL)
2075 iput(resize_inode); 2082 iput(resize_inode);
2076 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu", n_blocks_count); 2083 if (err)
2084 ext4_warning(sb, "error (%d) occurred during "
2085 "file system resize", err);
2086 ext4_msg(sb, KERN_INFO, "resized filesystem to %llu",
2087 ext4_blocks_count(es));
2077 return err; 2088 return err;
2078} 2089}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index f5b828bf1299..6ed4eb81e674 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -430,6 +430,12 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
430 spin_unlock(&sbi->s_md_lock); 430 spin_unlock(&sbi->s_md_lock);
431} 431}
432 432
433static bool system_going_down(void)
434{
435 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
436 || system_state == SYSTEM_RESTART;
437}
438
433/* Deal with the reporting of failure conditions on a filesystem such as 439/* Deal with the reporting of failure conditions on a filesystem such as
434 * inconsistencies detected or read IO failures. 440 * inconsistencies detected or read IO failures.
435 * 441 *
@@ -460,7 +466,12 @@ static void ext4_handle_error(struct super_block *sb)
460 if (journal) 466 if (journal)
461 jbd2_journal_abort(journal, -EIO); 467 jbd2_journal_abort(journal, -EIO);
462 } 468 }
463 if (test_opt(sb, ERRORS_RO)) { 469 /*
470 * We force ERRORS_RO behavior when system is rebooting. Otherwise we
471 * could panic during 'reboot -f' as the underlying device got already
472 * disabled.
473 */
474 if (test_opt(sb, ERRORS_RO) || system_going_down()) {
464 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only"); 475 ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
465 /* 476 /*
466 * Make sure updated value of ->s_mount_flags will be visible 477 * Make sure updated value of ->s_mount_flags will be visible
@@ -468,8 +479,7 @@ static void ext4_handle_error(struct super_block *sb)
468 */ 479 */
469 smp_wmb(); 480 smp_wmb();
470 sb->s_flags |= SB_RDONLY; 481 sb->s_flags |= SB_RDONLY;
471 } 482 } else if (test_opt(sb, ERRORS_PANIC)) {
472 if (test_opt(sb, ERRORS_PANIC)) {
473 if (EXT4_SB(sb)->s_journal && 483 if (EXT4_SB(sb)->s_journal &&
474 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) 484 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
475 return; 485 return;
diff --git a/fs/fs_parser.c b/fs/fs_parser.c
index 842e8f749db6..570d71043acf 100644
--- a/fs/fs_parser.c
+++ b/fs/fs_parser.c
@@ -410,7 +410,7 @@ bool fs_validate_description(const struct fs_parameter_description *desc)
410 for (param = desc->specs; param->name; param++) { 410 for (param = desc->specs; param->name; param++) {
411 if (param->opt == e->opt && 411 if (param->opt == e->opt &&
412 param->type != fs_param_is_enum) { 412 param->type != fs_param_is_enum) {
413 pr_err("VALIDATE %s: e[%lu] enum val for %s\n", 413 pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
414 name, e - desc->enums, param->name); 414 name, e - desc->enums, param->name);
415 good = false; 415 good = false;
416 } 416 }
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c88088d92613..bbdbd56cf2ac 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -189,17 +189,28 @@ struct sqe_submit {
189 bool needs_fixed_file; 189 bool needs_fixed_file;
190}; 190};
191 191
192/*
193 * First field must be the file pointer in all the
194 * iocb unions! See also 'struct kiocb' in <linux/fs.h>
195 */
192struct io_poll_iocb { 196struct io_poll_iocb {
193 struct file *file; 197 struct file *file;
194 struct wait_queue_head *head; 198 struct wait_queue_head *head;
195 __poll_t events; 199 __poll_t events;
196 bool woken; 200 bool done;
197 bool canceled; 201 bool canceled;
198 struct wait_queue_entry wait; 202 struct wait_queue_entry wait;
199}; 203};
200 204
205/*
206 * NOTE! Each of the iocb union members has the file pointer
207 * as the first entry in their struct definition. So you can
208 * access the file pointer through any of the sub-structs,
209 * or directly as just 'ki_filp' in this struct.
210 */
201struct io_kiocb { 211struct io_kiocb {
202 union { 212 union {
213 struct file *file;
203 struct kiocb rw; 214 struct kiocb rw;
204 struct io_poll_iocb poll; 215 struct io_poll_iocb poll;
205 }; 216 };
@@ -214,6 +225,7 @@ struct io_kiocb {
214#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */ 225#define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */
215#define REQ_F_FIXED_FILE 4 /* ctx owns file */ 226#define REQ_F_FIXED_FILE 4 /* ctx owns file */
216#define REQ_F_SEQ_PREV 8 /* sequential with previous */ 227#define REQ_F_SEQ_PREV 8 /* sequential with previous */
228#define REQ_F_PREPPED 16 /* prep already done */
217 u64 user_data; 229 u64 user_data;
218 u64 error; 230 u64 error;
219 231
@@ -355,20 +367,25 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
355 } 367 }
356} 368}
357 369
358static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 ki_user_data, 370static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
371{
372 if (waitqueue_active(&ctx->wait))
373 wake_up(&ctx->wait);
374 if (waitqueue_active(&ctx->sqo_wait))
375 wake_up(&ctx->sqo_wait);
376}
377
378static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
359 long res, unsigned ev_flags) 379 long res, unsigned ev_flags)
360{ 380{
361 unsigned long flags; 381 unsigned long flags;
362 382
363 spin_lock_irqsave(&ctx->completion_lock, flags); 383 spin_lock_irqsave(&ctx->completion_lock, flags);
364 io_cqring_fill_event(ctx, ki_user_data, res, ev_flags); 384 io_cqring_fill_event(ctx, user_data, res, ev_flags);
365 io_commit_cqring(ctx); 385 io_commit_cqring(ctx);
366 spin_unlock_irqrestore(&ctx->completion_lock, flags); 386 spin_unlock_irqrestore(&ctx->completion_lock, flags);
367 387
368 if (waitqueue_active(&ctx->wait)) 388 io_cqring_ev_posted(ctx);
369 wake_up(&ctx->wait);
370 if (waitqueue_active(&ctx->sqo_wait))
371 wake_up(&ctx->sqo_wait);
372} 389}
373 390
374static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs) 391static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
@@ -382,13 +399,14 @@ static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
382static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, 399static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
383 struct io_submit_state *state) 400 struct io_submit_state *state)
384{ 401{
402 gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
385 struct io_kiocb *req; 403 struct io_kiocb *req;
386 404
387 if (!percpu_ref_tryget(&ctx->refs)) 405 if (!percpu_ref_tryget(&ctx->refs))
388 return NULL; 406 return NULL;
389 407
390 if (!state) { 408 if (!state) {
391 req = kmem_cache_alloc(req_cachep, __GFP_NOWARN); 409 req = kmem_cache_alloc(req_cachep, gfp);
392 if (unlikely(!req)) 410 if (unlikely(!req))
393 goto out; 411 goto out;
394 } else if (!state->free_reqs) { 412 } else if (!state->free_reqs) {
@@ -396,10 +414,18 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
396 int ret; 414 int ret;
397 415
398 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs)); 416 sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
399 ret = kmem_cache_alloc_bulk(req_cachep, __GFP_NOWARN, sz, 417 ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
400 state->reqs); 418
401 if (unlikely(ret <= 0)) 419 /*
402 goto out; 420 * Bulk alloc is all-or-nothing. If we fail to get a batch,
421 * retry single alloc to be on the safe side.
422 */
423 if (unlikely(ret <= 0)) {
424 state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
425 if (!state->reqs[0])
426 goto out;
427 ret = 1;
428 }
403 state->free_reqs = ret - 1; 429 state->free_reqs = ret - 1;
404 state->cur_req = 1; 430 state->cur_req = 1;
405 req = state->reqs[0]; 431 req = state->reqs[0];
@@ -411,7 +437,8 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
411 437
412 req->ctx = ctx; 438 req->ctx = ctx;
413 req->flags = 0; 439 req->flags = 0;
414 refcount_set(&req->refs, 0); 440 /* one is dropped after submission, the other at completion */
441 refcount_set(&req->refs, 2);
415 return req; 442 return req;
416out: 443out:
417 io_ring_drop_ctx_refs(ctx, 1); 444 io_ring_drop_ctx_refs(ctx, 1);
@@ -429,10 +456,16 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
429 456
430static void io_free_req(struct io_kiocb *req) 457static void io_free_req(struct io_kiocb *req)
431{ 458{
432 if (!refcount_read(&req->refs) || refcount_dec_and_test(&req->refs)) { 459 if (req->file && !(req->flags & REQ_F_FIXED_FILE))
433 io_ring_drop_ctx_refs(req->ctx, 1); 460 fput(req->file);
434 kmem_cache_free(req_cachep, req); 461 io_ring_drop_ctx_refs(req->ctx, 1);
435 } 462 kmem_cache_free(req_cachep, req);
463}
464
465static void io_put_req(struct io_kiocb *req)
466{
467 if (refcount_dec_and_test(&req->refs))
468 io_free_req(req);
436} 469}
437 470
438/* 471/*
@@ -442,44 +475,34 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
442 struct list_head *done) 475 struct list_head *done)
443{ 476{
444 void *reqs[IO_IOPOLL_BATCH]; 477 void *reqs[IO_IOPOLL_BATCH];
445 int file_count, to_free;
446 struct file *file = NULL;
447 struct io_kiocb *req; 478 struct io_kiocb *req;
479 int to_free;
448 480
449 file_count = to_free = 0; 481 to_free = 0;
450 while (!list_empty(done)) { 482 while (!list_empty(done)) {
451 req = list_first_entry(done, struct io_kiocb, list); 483 req = list_first_entry(done, struct io_kiocb, list);
452 list_del(&req->list); 484 list_del(&req->list);
453 485
454 io_cqring_fill_event(ctx, req->user_data, req->error, 0); 486 io_cqring_fill_event(ctx, req->user_data, req->error, 0);
455
456 reqs[to_free++] = req;
457 (*nr_events)++; 487 (*nr_events)++;
458 488
459 /* 489 if (refcount_dec_and_test(&req->refs)) {
460 * Batched puts of the same file, to avoid dirtying the 490 /* If we're not using fixed files, we have to pair the
461 * file usage count multiple times, if avoidable. 491 * completion part with the file put. Use regular
462 */ 492 * completions for those, only batch free for fixed
463 if (!(req->flags & REQ_F_FIXED_FILE)) { 493 * file.
464 if (!file) { 494 */
465 file = req->rw.ki_filp; 495 if (req->flags & REQ_F_FIXED_FILE) {
466 file_count = 1; 496 reqs[to_free++] = req;
467 } else if (file == req->rw.ki_filp) { 497 if (to_free == ARRAY_SIZE(reqs))
468 file_count++; 498 io_free_req_many(ctx, reqs, &to_free);
469 } else { 499 } else {
470 fput_many(file, file_count); 500 io_free_req(req);
471 file = req->rw.ki_filp;
472 file_count = 1;
473 } 501 }
474 } 502 }
475
476 if (to_free == ARRAY_SIZE(reqs))
477 io_free_req_many(ctx, reqs, &to_free);
478 } 503 }
479 io_commit_cqring(ctx);
480 504
481 if (file) 505 io_commit_cqring(ctx);
482 fput_many(file, file_count);
483 io_free_req_many(ctx, reqs, &to_free); 506 io_free_req_many(ctx, reqs, &to_free);
484} 507}
485 508
@@ -602,21 +625,14 @@ static void kiocb_end_write(struct kiocb *kiocb)
602 } 625 }
603} 626}
604 627
605static void io_fput(struct io_kiocb *req)
606{
607 if (!(req->flags & REQ_F_FIXED_FILE))
608 fput(req->rw.ki_filp);
609}
610
611static void io_complete_rw(struct kiocb *kiocb, long res, long res2) 628static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
612{ 629{
613 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); 630 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
614 631
615 kiocb_end_write(kiocb); 632 kiocb_end_write(kiocb);
616 633
617 io_fput(req);
618 io_cqring_add_event(req->ctx, req->user_data, res, 0); 634 io_cqring_add_event(req->ctx, req->user_data, res, 0);
619 io_free_req(req); 635 io_put_req(req);
620} 636}
621 637
622static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) 638static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
@@ -731,31 +747,18 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
731 const struct io_uring_sqe *sqe = s->sqe; 747 const struct io_uring_sqe *sqe = s->sqe;
732 struct io_ring_ctx *ctx = req->ctx; 748 struct io_ring_ctx *ctx = req->ctx;
733 struct kiocb *kiocb = &req->rw; 749 struct kiocb *kiocb = &req->rw;
734 unsigned ioprio, flags; 750 unsigned ioprio;
735 int fd, ret; 751 int ret;
736 752
753 if (!req->file)
754 return -EBADF;
737 /* For -EAGAIN retry, everything is already prepped */ 755 /* For -EAGAIN retry, everything is already prepped */
738 if (kiocb->ki_filp) 756 if (req->flags & REQ_F_PREPPED)
739 return 0; 757 return 0;
740 758
741 flags = READ_ONCE(sqe->flags); 759 if (force_nonblock && !io_file_supports_async(req->file))
742 fd = READ_ONCE(sqe->fd); 760 force_nonblock = false;
743 761
744 if (flags & IOSQE_FIXED_FILE) {
745 if (unlikely(!ctx->user_files ||
746 (unsigned) fd >= ctx->nr_user_files))
747 return -EBADF;
748 kiocb->ki_filp = ctx->user_files[fd];
749 req->flags |= REQ_F_FIXED_FILE;
750 } else {
751 if (s->needs_fixed_file)
752 return -EBADF;
753 kiocb->ki_filp = io_file_get(state, fd);
754 if (unlikely(!kiocb->ki_filp))
755 return -EBADF;
756 if (force_nonblock && !io_file_supports_async(kiocb->ki_filp))
757 force_nonblock = false;
758 }
759 kiocb->ki_pos = READ_ONCE(sqe->off); 762 kiocb->ki_pos = READ_ONCE(sqe->off);
760 kiocb->ki_flags = iocb_flags(kiocb->ki_filp); 763 kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
761 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); 764 kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
@@ -764,7 +767,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
764 if (ioprio) { 767 if (ioprio) {
765 ret = ioprio_check_cap(ioprio); 768 ret = ioprio_check_cap(ioprio);
766 if (ret) 769 if (ret)
767 goto out_fput; 770 return ret;
768 771
769 kiocb->ki_ioprio = ioprio; 772 kiocb->ki_ioprio = ioprio;
770 } else 773 } else
@@ -772,38 +775,26 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
772 775
773 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); 776 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
774 if (unlikely(ret)) 777 if (unlikely(ret))
775 goto out_fput; 778 return ret;
776 if (force_nonblock) { 779 if (force_nonblock) {
777 kiocb->ki_flags |= IOCB_NOWAIT; 780 kiocb->ki_flags |= IOCB_NOWAIT;
778 req->flags |= REQ_F_FORCE_NONBLOCK; 781 req->flags |= REQ_F_FORCE_NONBLOCK;
779 } 782 }
780 if (ctx->flags & IORING_SETUP_IOPOLL) { 783 if (ctx->flags & IORING_SETUP_IOPOLL) {
781 ret = -EOPNOTSUPP;
782 if (!(kiocb->ki_flags & IOCB_DIRECT) || 784 if (!(kiocb->ki_flags & IOCB_DIRECT) ||
783 !kiocb->ki_filp->f_op->iopoll) 785 !kiocb->ki_filp->f_op->iopoll)
784 goto out_fput; 786 return -EOPNOTSUPP;
785 787
786 req->error = 0; 788 req->error = 0;
787 kiocb->ki_flags |= IOCB_HIPRI; 789 kiocb->ki_flags |= IOCB_HIPRI;
788 kiocb->ki_complete = io_complete_rw_iopoll; 790 kiocb->ki_complete = io_complete_rw_iopoll;
789 } else { 791 } else {
790 if (kiocb->ki_flags & IOCB_HIPRI) { 792 if (kiocb->ki_flags & IOCB_HIPRI)
791 ret = -EINVAL; 793 return -EINVAL;
792 goto out_fput;
793 }
794 kiocb->ki_complete = io_complete_rw; 794 kiocb->ki_complete = io_complete_rw;
795 } 795 }
796 req->flags |= REQ_F_PREPPED;
796 return 0; 797 return 0;
797out_fput:
798 if (!(flags & IOSQE_FIXED_FILE)) {
799 /*
800 * in case of error, we didn't use this file reference. drop it.
801 */
802 if (state)
803 state->used_refs--;
804 io_file_put(state, kiocb->ki_filp);
805 }
806 return ret;
807} 798}
808 799
809static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) 800static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
@@ -864,6 +855,9 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
864 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len); 855 iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
865 if (offset) 856 if (offset)
866 iov_iter_advance(iter, offset); 857 iov_iter_advance(iter, offset);
858
859 /* don't drop a reference to these pages */
860 iter->type |= ITER_BVEC_FLAG_NO_REF;
867 return 0; 861 return 0;
868} 862}
869 863
@@ -887,7 +881,7 @@ static int io_import_iovec(struct io_ring_ctx *ctx, int rw,
887 opcode = READ_ONCE(sqe->opcode); 881 opcode = READ_ONCE(sqe->opcode);
888 if (opcode == IORING_OP_READ_FIXED || 882 if (opcode == IORING_OP_READ_FIXED ||
889 opcode == IORING_OP_WRITE_FIXED) { 883 opcode == IORING_OP_WRITE_FIXED) {
890 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter); 884 int ret = io_import_fixed(ctx, rw, sqe, iter);
891 *iovec = NULL; 885 *iovec = NULL;
892 return ret; 886 return ret;
893 } 887 }
@@ -945,31 +939,29 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
945 async_list->io_end = io_end; 939 async_list->io_end = io_end;
946} 940}
947 941
948static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s, 942static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
949 bool force_nonblock, struct io_submit_state *state) 943 bool force_nonblock, struct io_submit_state *state)
950{ 944{
951 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 945 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
952 struct kiocb *kiocb = &req->rw; 946 struct kiocb *kiocb = &req->rw;
953 struct iov_iter iter; 947 struct iov_iter iter;
954 struct file *file; 948 struct file *file;
955 size_t iov_count; 949 size_t iov_count;
956 ssize_t ret; 950 int ret;
957 951
958 ret = io_prep_rw(req, s, force_nonblock, state); 952 ret = io_prep_rw(req, s, force_nonblock, state);
959 if (ret) 953 if (ret)
960 return ret; 954 return ret;
961 file = kiocb->ki_filp; 955 file = kiocb->ki_filp;
962 956
963 ret = -EBADF;
964 if (unlikely(!(file->f_mode & FMODE_READ))) 957 if (unlikely(!(file->f_mode & FMODE_READ)))
965 goto out_fput; 958 return -EBADF;
966 ret = -EINVAL;
967 if (unlikely(!file->f_op->read_iter)) 959 if (unlikely(!file->f_op->read_iter))
968 goto out_fput; 960 return -EINVAL;
969 961
970 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); 962 ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
971 if (ret) 963 if (ret)
972 goto out_fput; 964 return ret;
973 965
974 iov_count = iov_iter_count(&iter); 966 iov_count = iov_iter_count(&iter);
975 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count); 967 ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
@@ -991,38 +983,32 @@ static ssize_t io_read(struct io_kiocb *req, const struct sqe_submit *s,
991 } 983 }
992 } 984 }
993 kfree(iovec); 985 kfree(iovec);
994out_fput:
995 /* Hold on to the file for -EAGAIN */
996 if (unlikely(ret && ret != -EAGAIN))
997 io_fput(req);
998 return ret; 986 return ret;
999} 987}
1000 988
1001static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s, 989static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
1002 bool force_nonblock, struct io_submit_state *state) 990 bool force_nonblock, struct io_submit_state *state)
1003{ 991{
1004 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 992 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1005 struct kiocb *kiocb = &req->rw; 993 struct kiocb *kiocb = &req->rw;
1006 struct iov_iter iter; 994 struct iov_iter iter;
1007 struct file *file; 995 struct file *file;
1008 size_t iov_count; 996 size_t iov_count;
1009 ssize_t ret; 997 int ret;
1010 998
1011 ret = io_prep_rw(req, s, force_nonblock, state); 999 ret = io_prep_rw(req, s, force_nonblock, state);
1012 if (ret) 1000 if (ret)
1013 return ret; 1001 return ret;
1014 1002
1015 ret = -EBADF;
1016 file = kiocb->ki_filp; 1003 file = kiocb->ki_filp;
1017 if (unlikely(!(file->f_mode & FMODE_WRITE))) 1004 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1018 goto out_fput; 1005 return -EBADF;
1019 ret = -EINVAL;
1020 if (unlikely(!file->f_op->write_iter)) 1006 if (unlikely(!file->f_op->write_iter))
1021 goto out_fput; 1007 return -EINVAL;
1022 1008
1023 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); 1009 ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
1024 if (ret) 1010 if (ret)
1025 goto out_fput; 1011 return ret;
1026 1012
1027 iov_count = iov_iter_count(&iter); 1013 iov_count = iov_iter_count(&iter);
1028 1014
@@ -1036,6 +1022,8 @@ static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
1036 1022
1037 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); 1023 ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
1038 if (!ret) { 1024 if (!ret) {
1025 ssize_t ret2;
1026
1039 /* 1027 /*
1040 * Open-code file_start_write here to grab freeze protection, 1028 * Open-code file_start_write here to grab freeze protection,
1041 * which will be released by another thread in 1029 * which will be released by another thread in
@@ -1050,14 +1038,22 @@ static ssize_t io_write(struct io_kiocb *req, const struct sqe_submit *s,
1050 SB_FREEZE_WRITE); 1038 SB_FREEZE_WRITE);
1051 } 1039 }
1052 kiocb->ki_flags |= IOCB_WRITE; 1040 kiocb->ki_flags |= IOCB_WRITE;
1053 io_rw_done(kiocb, call_write_iter(file, kiocb, &iter)); 1041
1042 ret2 = call_write_iter(file, kiocb, &iter);
1043 if (!force_nonblock || ret2 != -EAGAIN) {
1044 io_rw_done(kiocb, ret2);
1045 } else {
1046 /*
1047 * If ->needs_lock is true, we're already in async
1048 * context.
1049 */
1050 if (!s->needs_lock)
1051 io_async_list_note(WRITE, req, iov_count);
1052 ret = -EAGAIN;
1053 }
1054 } 1054 }
1055out_free: 1055out_free:
1056 kfree(iovec); 1056 kfree(iovec);
1057out_fput:
1058 /* Hold on to the file for -EAGAIN */
1059 if (unlikely(ret && ret != -EAGAIN))
1060 io_fput(req);
1061 return ret; 1057 return ret;
1062} 1058}
1063 1059
@@ -1072,29 +1068,19 @@ static int io_nop(struct io_kiocb *req, u64 user_data)
1072 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) 1068 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1073 return -EINVAL; 1069 return -EINVAL;
1074 1070
1075 /*
1076 * Twilight zone - it's possible that someone issued an opcode that
1077 * has a file attached, then got -EAGAIN on submission, and changed
1078 * the sqe before we retried it from async context. Avoid dropping
1079 * a file reference for this malicious case, and flag the error.
1080 */
1081 if (req->rw.ki_filp) {
1082 err = -EBADF;
1083 io_fput(req);
1084 }
1085 io_cqring_add_event(ctx, user_data, err, 0); 1071 io_cqring_add_event(ctx, user_data, err, 0);
1086 io_free_req(req); 1072 io_put_req(req);
1087 return 0; 1073 return 0;
1088} 1074}
1089 1075
1090static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1076static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1091{ 1077{
1092 struct io_ring_ctx *ctx = req->ctx; 1078 struct io_ring_ctx *ctx = req->ctx;
1093 unsigned flags;
1094 int fd;
1095 1079
1096 /* Prep already done */ 1080 if (!req->file)
1097 if (req->rw.ki_filp) 1081 return -EBADF;
1082 /* Prep already done (EAGAIN retry) */
1083 if (req->flags & REQ_F_PREPPED)
1098 return 0; 1084 return 0;
1099 1085
1100 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) 1086 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
@@ -1102,20 +1088,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1102 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) 1088 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1103 return -EINVAL; 1089 return -EINVAL;
1104 1090
1105 fd = READ_ONCE(sqe->fd); 1091 req->flags |= REQ_F_PREPPED;
1106 flags = READ_ONCE(sqe->flags);
1107
1108 if (flags & IOSQE_FIXED_FILE) {
1109 if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
1110 return -EBADF;
1111 req->rw.ki_filp = ctx->user_files[fd];
1112 req->flags |= REQ_F_FIXED_FILE;
1113 } else {
1114 req->rw.ki_filp = fget(fd);
1115 if (unlikely(!req->rw.ki_filp))
1116 return -EBADF;
1117 }
1118
1119 return 0; 1092 return 0;
1120} 1093}
1121 1094
@@ -1144,9 +1117,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1144 end > 0 ? end : LLONG_MAX, 1117 end > 0 ? end : LLONG_MAX,
1145 fsync_flags & IORING_FSYNC_DATASYNC); 1118 fsync_flags & IORING_FSYNC_DATASYNC);
1146 1119
1147 io_fput(req);
1148 io_cqring_add_event(req->ctx, sqe->user_data, ret, 0); 1120 io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
1149 io_free_req(req); 1121 io_put_req(req);
1150 return 0; 1122 return 0;
1151} 1123}
1152 1124
@@ -1204,15 +1176,16 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1204 spin_unlock_irq(&ctx->completion_lock); 1176 spin_unlock_irq(&ctx->completion_lock);
1205 1177
1206 io_cqring_add_event(req->ctx, sqe->user_data, ret, 0); 1178 io_cqring_add_event(req->ctx, sqe->user_data, ret, 0);
1207 io_free_req(req); 1179 io_put_req(req);
1208 return 0; 1180 return 0;
1209} 1181}
1210 1182
1211static void io_poll_complete(struct io_kiocb *req, __poll_t mask) 1183static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1184 __poll_t mask)
1212{ 1185{
1213 io_cqring_add_event(req->ctx, req->user_data, mangle_poll(mask), 0); 1186 req->poll.done = true;
1214 io_fput(req); 1187 io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask), 0);
1215 io_free_req(req); 1188 io_commit_cqring(ctx);
1216} 1189}
1217 1190
1218static void io_poll_complete_work(struct work_struct *work) 1191static void io_poll_complete_work(struct work_struct *work)
@@ -1240,9 +1213,11 @@ static void io_poll_complete_work(struct work_struct *work)
1240 return; 1213 return;
1241 } 1214 }
1242 list_del_init(&req->list); 1215 list_del_init(&req->list);
1216 io_poll_complete(ctx, req, mask);
1243 spin_unlock_irq(&ctx->completion_lock); 1217 spin_unlock_irq(&ctx->completion_lock);
1244 1218
1245 io_poll_complete(req, mask); 1219 io_cqring_ev_posted(ctx);
1220 io_put_req(req);
1246} 1221}
1247 1222
1248static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 1223static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
@@ -1253,29 +1228,25 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1253 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll); 1228 struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1254 struct io_ring_ctx *ctx = req->ctx; 1229 struct io_ring_ctx *ctx = req->ctx;
1255 __poll_t mask = key_to_poll(key); 1230 __poll_t mask = key_to_poll(key);
1256 1231 unsigned long flags;
1257 poll->woken = true;
1258 1232
1259 /* for instances that support it check for an event match first: */ 1233 /* for instances that support it check for an event match first: */
1260 if (mask) { 1234 if (mask && !(mask & poll->events))
1261 unsigned long flags; 1235 return 0;
1262 1236
1263 if (!(mask & poll->events)) 1237 list_del_init(&poll->wait.entry);
1264 return 0;
1265 1238
1266 /* try to complete the iocb inline if we can: */ 1239 if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1267 if (spin_trylock_irqsave(&ctx->completion_lock, flags)) { 1240 list_del(&req->list);
1268 list_del(&req->list); 1241 io_poll_complete(ctx, req, mask);
1269 spin_unlock_irqrestore(&ctx->completion_lock, flags); 1242 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1270 1243
1271 list_del_init(&poll->wait.entry); 1244 io_cqring_ev_posted(ctx);
1272 io_poll_complete(req, mask); 1245 io_put_req(req);
1273 return 1; 1246 } else {
1274 } 1247 queue_work(ctx->sqo_wq, &req->work);
1275 } 1248 }
1276 1249
1277 list_del_init(&poll->wait.entry);
1278 queue_work(ctx->sqo_wq, &req->work);
1279 return 1; 1250 return 1;
1280} 1251}
1281 1252
@@ -1305,36 +1276,23 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1305 struct io_poll_iocb *poll = &req->poll; 1276 struct io_poll_iocb *poll = &req->poll;
1306 struct io_ring_ctx *ctx = req->ctx; 1277 struct io_ring_ctx *ctx = req->ctx;
1307 struct io_poll_table ipt; 1278 struct io_poll_table ipt;
1308 unsigned flags; 1279 bool cancel = false;
1309 __poll_t mask; 1280 __poll_t mask;
1310 u16 events; 1281 u16 events;
1311 int fd;
1312 1282
1313 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) 1283 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1314 return -EINVAL; 1284 return -EINVAL;
1315 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) 1285 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1316 return -EINVAL; 1286 return -EINVAL;
1287 if (!poll->file)
1288 return -EBADF;
1317 1289
1318 INIT_WORK(&req->work, io_poll_complete_work); 1290 INIT_WORK(&req->work, io_poll_complete_work);
1319 events = READ_ONCE(sqe->poll_events); 1291 events = READ_ONCE(sqe->poll_events);
1320 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; 1292 poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1321 1293
1322 flags = READ_ONCE(sqe->flags);
1323 fd = READ_ONCE(sqe->fd);
1324
1325 if (flags & IOSQE_FIXED_FILE) {
1326 if (unlikely(!ctx->user_files || fd >= ctx->nr_user_files))
1327 return -EBADF;
1328 poll->file = ctx->user_files[fd];
1329 req->flags |= REQ_F_FIXED_FILE;
1330 } else {
1331 poll->file = fget(fd);
1332 }
1333 if (unlikely(!poll->file))
1334 return -EBADF;
1335
1336 poll->head = NULL; 1294 poll->head = NULL;
1337 poll->woken = false; 1295 poll->done = false;
1338 poll->canceled = false; 1296 poll->canceled = false;
1339 1297
1340 ipt.pt._qproc = io_poll_queue_proc; 1298 ipt.pt._qproc = io_poll_queue_proc;
@@ -1346,56 +1304,44 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1346 INIT_LIST_HEAD(&poll->wait.entry); 1304 INIT_LIST_HEAD(&poll->wait.entry);
1347 init_waitqueue_func_entry(&poll->wait, io_poll_wake); 1305 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1348 1306
1349 /* one for removal from waitqueue, one for this function */
1350 refcount_set(&req->refs, 2);
1351
1352 mask = vfs_poll(poll->file, &ipt.pt) & poll->events; 1307 mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
1353 if (unlikely(!poll->head)) {
1354 /* we did not manage to set up a waitqueue, done */
1355 goto out;
1356 }
1357 1308
1358 spin_lock_irq(&ctx->completion_lock); 1309 spin_lock_irq(&ctx->completion_lock);
1359 spin_lock(&poll->head->lock); 1310 if (likely(poll->head)) {
1360 if (poll->woken) { 1311 spin_lock(&poll->head->lock);
1361 /* wake_up context handles the rest */ 1312 if (unlikely(list_empty(&poll->wait.entry))) {
1362 mask = 0; 1313 if (ipt.error)
1314 cancel = true;
1315 ipt.error = 0;
1316 mask = 0;
1317 }
1318 if (mask || ipt.error)
1319 list_del_init(&poll->wait.entry);
1320 else if (cancel)
1321 WRITE_ONCE(poll->canceled, true);
1322 else if (!poll->done) /* actually waiting for an event */
1323 list_add_tail(&req->list, &ctx->cancel_list);
1324 spin_unlock(&poll->head->lock);
1325 }
1326 if (mask) { /* no async, we'd stolen it */
1327 req->error = mangle_poll(mask);
1363 ipt.error = 0; 1328 ipt.error = 0;
1364 } else if (mask || ipt.error) { 1329 io_poll_complete(ctx, req, mask);
1365 /* if we get an error or a mask we are done */
1366 WARN_ON_ONCE(list_empty(&poll->wait.entry));
1367 list_del_init(&poll->wait.entry);
1368 } else {
1369 /* actually waiting for an event */
1370 list_add_tail(&req->list, &ctx->cancel_list);
1371 } 1330 }
1372 spin_unlock(&poll->head->lock);
1373 spin_unlock_irq(&ctx->completion_lock); 1331 spin_unlock_irq(&ctx->completion_lock);
1374 1332
1375out: 1333 if (mask) {
1376 if (unlikely(ipt.error)) { 1334 io_cqring_ev_posted(ctx);
1377 if (!(flags & IOSQE_FIXED_FILE)) 1335 io_put_req(req);
1378 fput(poll->file);
1379 /*
1380 * Drop one of our refs to this req, __io_submit_sqe() will
1381 * drop the other one since we're returning an error.
1382 */
1383 io_free_req(req);
1384 return ipt.error;
1385 } 1336 }
1386 1337 return ipt.error;
1387 if (mask)
1388 io_poll_complete(req, mask);
1389 io_free_req(req);
1390 return 0;
1391} 1338}
1392 1339
1393static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, 1340static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1394 const struct sqe_submit *s, bool force_nonblock, 1341 const struct sqe_submit *s, bool force_nonblock,
1395 struct io_submit_state *state) 1342 struct io_submit_state *state)
1396{ 1343{
1397 ssize_t ret; 1344 int ret, opcode;
1398 int opcode;
1399 1345
1400 if (unlikely(s->index >= ctx->sq_entries)) 1346 if (unlikely(s->index >= ctx->sq_entries))
1401 return -EINVAL; 1347 return -EINVAL;
@@ -1524,10 +1470,13 @@ restart:
1524 break; 1470 break;
1525 cond_resched(); 1471 cond_resched();
1526 } while (1); 1472 } while (1);
1473
1474 /* drop submission reference */
1475 io_put_req(req);
1527 } 1476 }
1528 if (ret) { 1477 if (ret) {
1529 io_cqring_add_event(ctx, sqe->user_data, ret, 0); 1478 io_cqring_add_event(ctx, sqe->user_data, ret, 0);
1530 io_free_req(req); 1479 io_put_req(req);
1531 } 1480 }
1532 1481
1533 /* async context always use a copy of the sqe */ 1482 /* async context always use a copy of the sqe */
@@ -1614,11 +1563,55 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
1614 return ret; 1563 return ret;
1615} 1564}
1616 1565
1566static bool io_op_needs_file(const struct io_uring_sqe *sqe)
1567{
1568 int op = READ_ONCE(sqe->opcode);
1569
1570 switch (op) {
1571 case IORING_OP_NOP:
1572 case IORING_OP_POLL_REMOVE:
1573 return false;
1574 default:
1575 return true;
1576 }
1577}
1578
1579static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
1580 struct io_submit_state *state, struct io_kiocb *req)
1581{
1582 unsigned flags;
1583 int fd;
1584
1585 flags = READ_ONCE(s->sqe->flags);
1586 fd = READ_ONCE(s->sqe->fd);
1587
1588 if (!io_op_needs_file(s->sqe)) {
1589 req->file = NULL;
1590 return 0;
1591 }
1592
1593 if (flags & IOSQE_FIXED_FILE) {
1594 if (unlikely(!ctx->user_files ||
1595 (unsigned) fd >= ctx->nr_user_files))
1596 return -EBADF;
1597 req->file = ctx->user_files[fd];
1598 req->flags |= REQ_F_FIXED_FILE;
1599 } else {
1600 if (s->needs_fixed_file)
1601 return -EBADF;
1602 req->file = io_file_get(state, fd);
1603 if (unlikely(!req->file))
1604 return -EBADF;
1605 }
1606
1607 return 0;
1608}
1609
1617static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, 1610static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
1618 struct io_submit_state *state) 1611 struct io_submit_state *state)
1619{ 1612{
1620 struct io_kiocb *req; 1613 struct io_kiocb *req;
1621 ssize_t ret; 1614 int ret;
1622 1615
1623 /* enforce forwards compatibility on users */ 1616 /* enforce forwards compatibility on users */
1624 if (unlikely(s->sqe->flags & ~IOSQE_FIXED_FILE)) 1617 if (unlikely(s->sqe->flags & ~IOSQE_FIXED_FILE))
@@ -1628,7 +1621,9 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
1628 if (unlikely(!req)) 1621 if (unlikely(!req))
1629 return -EAGAIN; 1622 return -EAGAIN;
1630 1623
1631 req->rw.ki_filp = NULL; 1624 ret = io_req_set_file(ctx, s, state, req);
1625 if (unlikely(ret))
1626 goto out;
1632 1627
1633 ret = __io_submit_sqe(ctx, req, s, true, state); 1628 ret = __io_submit_sqe(ctx, req, s, true, state);
1634 if (ret == -EAGAIN) { 1629 if (ret == -EAGAIN) {
@@ -1649,11 +1644,23 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
1649 INIT_WORK(&req->work, io_sq_wq_submit_work); 1644 INIT_WORK(&req->work, io_sq_wq_submit_work);
1650 queue_work(ctx->sqo_wq, &req->work); 1645 queue_work(ctx->sqo_wq, &req->work);
1651 } 1646 }
1652 ret = 0; 1647
1648 /*
1649 * Queued up for async execution, worker will release
1650 * submit reference when the iocb is actually
1651 * submitted.
1652 */
1653 return 0;
1653 } 1654 }
1654 } 1655 }
1656
1657out:
1658 /* drop submission reference */
1659 io_put_req(req);
1660
1661 /* and drop final reference, if we failed */
1655 if (ret) 1662 if (ret)
1656 io_free_req(req); 1663 io_put_req(req);
1657 1664
1658 return ret; 1665 return ret;
1659} 1666}
@@ -1975,7 +1982,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
1975 return 0; 1982 return 0;
1976 1983
1977 if (sig) { 1984 if (sig) {
1978 ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz); 1985#ifdef CONFIG_COMPAT
1986 if (in_compat_syscall())
1987 ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
1988 &ksigmask, &sigsaved, sigsz);
1989 else
1990#endif
1991 ret = set_user_sigmask(sig, &ksigmask,
1992 &sigsaved, sigsz);
1993
1979 if (ret) 1994 if (ret)
1980 return ret; 1995 return ret;
1981 } 1996 }
diff --git a/fs/iomap.c b/fs/iomap.c
index 97cb9d486a7d..abdd18e404f8 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1589,12 +1589,14 @@ static void iomap_dio_bio_end_io(struct bio *bio)
1589 if (should_dirty) { 1589 if (should_dirty) {
1590 bio_check_pages_dirty(bio); 1590 bio_check_pages_dirty(bio);
1591 } else { 1591 } else {
1592 struct bio_vec *bvec; 1592 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
1593 int i; 1593 struct bvec_iter_all iter_all;
1594 struct bvec_iter_all iter_all; 1594 struct bio_vec *bvec;
1595 int i;
1595 1596
1596 bio_for_each_segment_all(bvec, bio, i, iter_all) 1597 bio_for_each_segment_all(bvec, bio, i, iter_all)
1597 put_page(bvec->bv_page); 1598 put_page(bvec->bv_page);
1599 }
1598 bio_put(bio); 1600 bio_put(bio);
1599 } 1601 }
1600} 1602}
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 93fb7cf0b92b..f0b5c987d6ae 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
290 290
291 WARN_ON_ONCE(host->h_server); 291 WARN_ON_ONCE(host->h_server);
292 292
293 if (refcount_dec_and_test(&host->h_count)) { 293 if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
294 WARN_ON_ONCE(!list_empty(&host->h_lockowners)); 294 WARN_ON_ONCE(!list_empty(&host->h_lockowners));
295 WARN_ON_ONCE(!list_empty(&host->h_granted)); 295 WARN_ON_ONCE(!list_empty(&host->h_granted));
296 WARN_ON_ONCE(!list_empty(&host->h_reclaim)); 296 WARN_ON_ONCE(!list_empty(&host->h_reclaim));
297 297
298 mutex_lock(&nlm_host_mutex);
299 nlm_destroy_host_locked(host); 298 nlm_destroy_host_locked(host);
300 mutex_unlock(&nlm_host_mutex); 299 mutex_unlock(&nlm_host_mutex);
301 } 300 }
diff --git a/fs/locks.c b/fs/locks.c
index eaa1cfaf73b0..71d0c6c2aac5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1160,6 +1160,11 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
1160 */ 1160 */
1161 error = -EDEADLK; 1161 error = -EDEADLK;
1162 spin_lock(&blocked_lock_lock); 1162 spin_lock(&blocked_lock_lock);
1163 /*
1164 * Ensure that we don't find any locks blocked on this
1165 * request during deadlock detection.
1166 */
1167 __locks_wake_up_blocks(request);
1163 if (likely(!posix_locks_deadlock(request, fl))) { 1168 if (likely(!posix_locks_deadlock(request, fl))) {
1164 error = FILE_LOCK_DEFERRED; 1169 error = FILE_LOCK_DEFERRED;
1165 __locks_insert_block(fl, request, 1170 __locks_insert_block(fl, request,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index fb1cf1a4bda2..90d71fda65ce 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -453,7 +453,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
453 case XPRT_TRANSPORT_RDMA: 453 case XPRT_TRANSPORT_RDMA:
454 if (retrans == NFS_UNSPEC_RETRANS) 454 if (retrans == NFS_UNSPEC_RETRANS)
455 to->to_retries = NFS_DEF_TCP_RETRANS; 455 to->to_retries = NFS_DEF_TCP_RETRANS;
456 if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0) 456 if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
457 to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10; 457 to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
458 if (to->to_initval > NFS_MAX_TCP_TIMEOUT) 458 if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
459 to->to_initval = NFS_MAX_TCP_TIMEOUT; 459 to->to_initval = NFS_MAX_TCP_TIMEOUT;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index f9264e1922a2..6673d4ff5a2a 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1289,6 +1289,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1289static int ff_layout_read_done_cb(struct rpc_task *task, 1289static int ff_layout_read_done_cb(struct rpc_task *task,
1290 struct nfs_pgio_header *hdr) 1290 struct nfs_pgio_header *hdr)
1291{ 1291{
1292 int new_idx = hdr->pgio_mirror_idx;
1292 int err; 1293 int err;
1293 1294
1294 trace_nfs4_pnfs_read(hdr, task->tk_status); 1295 trace_nfs4_pnfs_read(hdr, task->tk_status);
@@ -1307,7 +1308,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
1307 case -NFS4ERR_RESET_TO_PNFS: 1308 case -NFS4ERR_RESET_TO_PNFS:
1308 if (ff_layout_choose_best_ds_for_read(hdr->lseg, 1309 if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1309 hdr->pgio_mirror_idx + 1, 1310 hdr->pgio_mirror_idx + 1,
1310 &hdr->pgio_mirror_idx)) 1311 &new_idx))
1311 goto out_layouterror; 1312 goto out_layouterror;
1312 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1313 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1313 return task->tk_status; 1314 return task->tk_status;
@@ -1320,7 +1321,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
1320 1321
1321 return 0; 1322 return 0;
1322out_layouterror: 1323out_layouterror:
1324 ff_layout_read_record_layoutstats_done(task, hdr);
1323 ff_layout_send_layouterror(hdr->lseg); 1325 ff_layout_send_layouterror(hdr->lseg);
1326 hdr->pgio_mirror_idx = new_idx;
1324out_eagain: 1327out_eagain:
1325 rpc_restart_call_prepare(task); 1328 rpc_restart_call_prepare(task);
1326 return -EAGAIN; 1329 return -EAGAIN;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4dbb0ee23432..741ff8c9c6ed 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2933,7 +2933,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2933 } 2933 }
2934 2934
2935out: 2935out:
2936 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 2936 if (!opendata->cancelled)
2937 nfs4_sequence_free_slot(&opendata->o_res.seq_res);
2937 return ret; 2938 return ret;
2938} 2939}
2939 2940
@@ -6301,7 +6302,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6301 p->arg.seqid = seqid; 6302 p->arg.seqid = seqid;
6302 p->res.seqid = seqid; 6303 p->res.seqid = seqid;
6303 p->lsp = lsp; 6304 p->lsp = lsp;
6304 refcount_inc(&lsp->ls_count);
6305 /* Ensure we don't close file until we're done freeing locks! */ 6305 /* Ensure we don't close file until we're done freeing locks! */
6306 p->ctx = get_nfs_open_context(ctx); 6306 p->ctx = get_nfs_open_context(ctx);
6307 p->l_ctx = nfs_get_lock_context(ctx); 6307 p->l_ctx = nfs_get_lock_context(ctx);
@@ -6526,7 +6526,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
6526 p->res.lock_seqid = p->arg.lock_seqid; 6526 p->res.lock_seqid = p->arg.lock_seqid;
6527 p->lsp = lsp; 6527 p->lsp = lsp;
6528 p->server = server; 6528 p->server = server;
6529 refcount_inc(&lsp->ls_count);
6530 p->ctx = get_nfs_open_context(ctx); 6529 p->ctx = get_nfs_open_context(ctx);
6531 locks_init_lock(&p->fl); 6530 locks_init_lock(&p->fl);
6532 locks_copy_lock(&p->fl, fl); 6531 locks_copy_lock(&p->fl, fl);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 56992b32c6bb..a90bb19dcfa2 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -208,6 +208,7 @@ static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
208{ 208{
209 struct fanotify_event_info_fid info = { }; 209 struct fanotify_event_info_fid info = { };
210 struct file_handle handle = { }; 210 struct file_handle handle = { };
211 unsigned char bounce[FANOTIFY_INLINE_FH_LEN], *fh;
211 size_t fh_len = event->fh_len; 212 size_t fh_len = event->fh_len;
212 size_t len = fanotify_event_info_len(event); 213 size_t len = fanotify_event_info_len(event);
213 214
@@ -233,7 +234,16 @@ static int copy_fid_to_user(struct fanotify_event *event, char __user *buf)
233 234
234 buf += sizeof(handle); 235 buf += sizeof(handle);
235 len -= sizeof(handle); 236 len -= sizeof(handle);
236 if (copy_to_user(buf, fanotify_event_fh(event), fh_len)) 237 /*
238 * For an inline fh, copy through stack to exclude the copy from
239 * usercopy hardening protections.
240 */
241 fh = fanotify_event_fh(event);
242 if (fh_len <= FANOTIFY_INLINE_FH_LEN) {
243 memcpy(bounce, fh, fh_len);
244 fh = bounce;
245 }
246 if (copy_to_user(buf, fh, fh_len))
237 return -EFAULT; 247 return -EFAULT;
238 248
239 /* Pad with 0's */ 249 /* Pad with 0's */
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index e2901fbb9f76..7b53598c8804 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -519,8 +519,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
519 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); 519 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
520 if (!fsn_mark) 520 if (!fsn_mark)
521 return -ENOENT; 521 return -ENOENT;
522 else if (create) 522 else if (create) {
523 return -EEXIST; 523 ret = -EEXIST;
524 goto out;
525 }
524 526
525 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); 527 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
526 528
@@ -548,6 +550,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
548 /* return the wd */ 550 /* return the wd */
549 ret = i_mark->wd; 551 ret = i_mark->wd;
550 552
553out:
551 /* match the get from fsnotify_find_mark() */ 554 /* match the get from fsnotify_find_mark() */
552 fsnotify_put_mark(fsn_mark); 555 fsnotify_put_mark(fsn_mark);
553 556
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index a35259eebc56..1dc9a08e8bdc 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4719,22 +4719,23 @@ out:
4719 4719
4720/* Lock an inode and grab a bh pointing to the inode. */ 4720/* Lock an inode and grab a bh pointing to the inode. */
4721int ocfs2_reflink_inodes_lock(struct inode *s_inode, 4721int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4722 struct buffer_head **bh1, 4722 struct buffer_head **bh_s,
4723 struct inode *t_inode, 4723 struct inode *t_inode,
4724 struct buffer_head **bh2) 4724 struct buffer_head **bh_t)
4725{ 4725{
4726 struct inode *inode1; 4726 struct inode *inode1 = s_inode;
4727 struct inode *inode2; 4727 struct inode *inode2 = t_inode;
4728 struct ocfs2_inode_info *oi1; 4728 struct ocfs2_inode_info *oi1;
4729 struct ocfs2_inode_info *oi2; 4729 struct ocfs2_inode_info *oi2;
4730 struct buffer_head *bh1 = NULL;
4731 struct buffer_head *bh2 = NULL;
4730 bool same_inode = (s_inode == t_inode); 4732 bool same_inode = (s_inode == t_inode);
4733 bool need_swap = (inode1->i_ino > inode2->i_ino);
4731 int status; 4734 int status;
4732 4735
4733 /* First grab the VFS and rw locks. */ 4736 /* First grab the VFS and rw locks. */
4734 lock_two_nondirectories(s_inode, t_inode); 4737 lock_two_nondirectories(s_inode, t_inode);
4735 inode1 = s_inode; 4738 if (need_swap)
4736 inode2 = t_inode;
4737 if (inode1->i_ino > inode2->i_ino)
4738 swap(inode1, inode2); 4739 swap(inode1, inode2);
4739 4740
4740 status = ocfs2_rw_lock(inode1, 1); 4741 status = ocfs2_rw_lock(inode1, 1);
@@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4757 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno, 4758 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
4758 (unsigned long long)oi2->ip_blkno); 4759 (unsigned long long)oi2->ip_blkno);
4759 4760
4760 if (*bh1)
4761 *bh1 = NULL;
4762 if (*bh2)
4763 *bh2 = NULL;
4764
4765 /* We always want to lock the one with the lower lockid first. */ 4761 /* We always want to lock the one with the lower lockid first. */
4766 if (oi1->ip_blkno > oi2->ip_blkno) 4762 if (oi1->ip_blkno > oi2->ip_blkno)
4767 mlog_errno(-ENOLCK); 4763 mlog_errno(-ENOLCK);
4768 4764
4769 /* lock id1 */ 4765 /* lock id1 */
4770 status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET); 4766 status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
4767 OI_LS_REFLINK_TARGET);
4771 if (status < 0) { 4768 if (status < 0) {
4772 if (status != -ENOENT) 4769 if (status != -ENOENT)
4773 mlog_errno(status); 4770 mlog_errno(status);
@@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4776 4773
4777 /* lock id2 */ 4774 /* lock id2 */
4778 if (!same_inode) { 4775 if (!same_inode) {
4779 status = ocfs2_inode_lock_nested(inode2, bh2, 1, 4776 status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
4780 OI_LS_REFLINK_TARGET); 4777 OI_LS_REFLINK_TARGET);
4781 if (status < 0) { 4778 if (status < 0) {
4782 if (status != -ENOENT) 4779 if (status != -ENOENT)
4783 mlog_errno(status); 4780 mlog_errno(status);
4784 goto out_cl1; 4781 goto out_cl1;
4785 } 4782 }
4786 } else 4783 } else {
4787 *bh2 = *bh1; 4784 bh2 = bh1;
4785 }
4786
4787 /*
4788 * If we swapped inode order above, we have to swap the buffer heads
4789 * before passing them back to the caller.
4790 */
4791 if (need_swap)
4792 swap(bh1, bh2);
4793 *bh_s = bh1;
4794 *bh_t = bh2;
4788 4795
4789 trace_ocfs2_double_lock_end( 4796 trace_ocfs2_double_lock_end(
4790 (unsigned long long)oi1->ip_blkno, 4797 (unsigned long long)oi1->ip_blkno,
@@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
4794 4801
4795out_cl1: 4802out_cl1:
4796 ocfs2_inode_unlock(inode1, 1); 4803 ocfs2_inode_unlock(inode1, 1);
4797 brelse(*bh1); 4804 brelse(bh1);
4798 *bh1 = NULL;
4799out_rw2: 4805out_rw2:
4800 ocfs2_rw_unlock(inode2, 1); 4806 ocfs2_rw_unlock(inode2, 1);
4801out_i2: 4807out_i2:
diff --git a/fs/open.c b/fs/open.c
index 0285ce7dbd51..f1c2f855fd43 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
733 return 0; 733 return 0;
734 } 734 }
735 735
736 /* Any file opened for execve()/uselib() has to be a regular file. */
737 if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
738 error = -EACCES;
739 goto cleanup_file;
740 }
741
736 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { 742 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
737 error = get_write_access(inode); 743 error = get_write_access(inode);
738 if (unlikely(error)) 744 if (unlikely(error))
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index bbcc185062bb..f5834488b67d 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -54,6 +54,28 @@ static LIST_HEAD(kclist_head);
54static DECLARE_RWSEM(kclist_lock); 54static DECLARE_RWSEM(kclist_lock);
55static int kcore_need_update = 1; 55static int kcore_need_update = 1;
56 56
57/*
58 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
59 * Same as oldmem_pfn_is_ram in vmcore
60 */
61static int (*mem_pfn_is_ram)(unsigned long pfn);
62
63int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
64{
65 if (mem_pfn_is_ram)
66 return -EBUSY;
67 mem_pfn_is_ram = fn;
68 return 0;
69}
70
71static int pfn_is_ram(unsigned long pfn)
72{
73 if (mem_pfn_is_ram)
74 return mem_pfn_is_ram(pfn);
75 else
76 return 1;
77}
78
57/* This doesn't grab kclist_lock, so it should only be used at init time. */ 79/* This doesn't grab kclist_lock, so it should only be used at init time. */
58void __init kclist_add(struct kcore_list *new, void *addr, size_t size, 80void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
59 int type) 81 int type)
@@ -465,6 +487,11 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
465 goto out; 487 goto out;
466 } 488 }
467 m = NULL; /* skip the list anchor */ 489 m = NULL; /* skip the list anchor */
490 } else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
491 if (clear_user(buffer, tsz)) {
492 ret = -EFAULT;
493 goto out;
494 }
468 } else if (m->type == KCORE_VMALLOC) { 495 } else if (m->type == KCORE_VMALLOC) {
469 vread(buf, (char *)start, tsz); 496 vread(buf, (char *)start, tsz);
470 /* we have to zero-fill user buffer even if no read */ 497 /* we have to zero-fill user buffer even if no read */
@@ -588,7 +615,7 @@ static void __init proc_kcore_text_init(void)
588/* 615/*
589 * MODULES_VADDR has no intersection with VMALLOC_ADDR. 616 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
590 */ 617 */
591struct kcore_list kcore_modules; 618static struct kcore_list kcore_modules;
592static void __init add_modules_range(void) 619static void __init add_modules_range(void)
593{ 620{
594 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) { 621 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 4d598a399bbf..d65390727541 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
1626 if (--header->nreg) 1626 if (--header->nreg)
1627 return; 1627 return;
1628 1628
1629 put_links(header); 1629 if (parent)
1630 put_links(header);
1630 start_unregistering(header); 1631 start_unregistering(header);
1631 if (!--header->count) 1632 if (!--header->count)
1632 kfree_rcu(header, rcu); 1633 kfree_rcu(header, rcu);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index ae796e10f68b..e7276932e433 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1242,8 +1242,10 @@ set_size:
1242 truncate_setsize(inode, newsize); 1242 truncate_setsize(inode, newsize);
1243 down_write(&iinfo->i_data_sem); 1243 down_write(&iinfo->i_data_sem);
1244 udf_clear_extent_cache(inode); 1244 udf_clear_extent_cache(inode);
1245 udf_truncate_extents(inode); 1245 err = udf_truncate_extents(inode);
1246 up_write(&iinfo->i_data_sem); 1246 up_write(&iinfo->i_data_sem);
1247 if (err)
1248 return err;
1247 } 1249 }
1248update_time: 1250update_time:
1249 inode->i_mtime = inode->i_ctime = current_time(inode); 1251 inode->i_mtime = inode->i_ctime = current_time(inode);
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index b647f0bd150c..63a47f1e1d52 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -199,7 +199,7 @@ static void udf_update_alloc_ext_desc(struct inode *inode,
199 * for making file shorter. For making file longer, udf_extend_file() has to 199 * for making file shorter. For making file longer, udf_extend_file() has to
200 * be used. 200 * be used.
201 */ 201 */
202void udf_truncate_extents(struct inode *inode) 202int udf_truncate_extents(struct inode *inode)
203{ 203{
204 struct extent_position epos; 204 struct extent_position epos;
205 struct kernel_lb_addr eloc, neloc = {}; 205 struct kernel_lb_addr eloc, neloc = {};
@@ -224,7 +224,7 @@ void udf_truncate_extents(struct inode *inode)
224 if (etype == -1) { 224 if (etype == -1) {
225 /* We should extend the file? */ 225 /* We should extend the file? */
226 WARN_ON(byte_offset); 226 WARN_ON(byte_offset);
227 return; 227 return 0;
228 } 228 }
229 epos.offset -= adsize; 229 epos.offset -= adsize;
230 extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset); 230 extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
@@ -260,6 +260,9 @@ void udf_truncate_extents(struct inode *inode)
260 epos.block = eloc; 260 epos.block = eloc;
261 epos.bh = udf_tread(sb, 261 epos.bh = udf_tread(sb,
262 udf_get_lb_pblock(sb, &eloc, 0)); 262 udf_get_lb_pblock(sb, &eloc, 0));
263 /* Error reading indirect block? */
264 if (!epos.bh)
265 return -EIO;
263 if (elen) 266 if (elen)
264 indirect_ext_len = 267 indirect_ext_len =
265 (elen + sb->s_blocksize - 1) >> 268 (elen + sb->s_blocksize - 1) >>
@@ -283,4 +286,5 @@ void udf_truncate_extents(struct inode *inode)
283 iinfo->i_lenExtents = inode->i_size; 286 iinfo->i_lenExtents = inode->i_size;
284 287
285 brelse(epos.bh); 288 brelse(epos.bh);
289 return 0;
286} 290}
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index ee246769dee4..d89ef71887fc 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -235,7 +235,7 @@ extern struct inode *udf_new_inode(struct inode *, umode_t);
235/* truncate.c */ 235/* truncate.c */
236extern void udf_truncate_tail_extent(struct inode *); 236extern void udf_truncate_tail_extent(struct inode *);
237extern void udf_discard_prealloc(struct inode *); 237extern void udf_discard_prealloc(struct inode *);
238extern void udf_truncate_extents(struct inode *); 238extern int udf_truncate_extents(struct inode *);
239 239
240/* balloc.c */ 240/* balloc.c */
241extern void udf_free_blocks(struct super_block *, struct inode *, 241extern void udf_free_blocks(struct super_block *, struct inode *,
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 48502cb9990f..4637ae1ae91c 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1191,7 +1191,10 @@ xfs_iread_extents(
1191 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out. 1191 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1192 */ 1192 */
1193 level = be16_to_cpu(block->bb_level); 1193 level = be16_to_cpu(block->bb_level);
1194 ASSERT(level > 0); 1194 if (unlikely(level == 0)) {
1195 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1196 return -EFSCORRUPTED;
1197 }
1195 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1198 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1196 bno = be64_to_cpu(*pp); 1199 bno = be64_to_cpu(*pp);
1197 1200
@@ -4249,9 +4252,13 @@ xfs_bmapi_write(
4249 struct xfs_bmbt_irec *mval, /* output: map values */ 4252 struct xfs_bmbt_irec *mval, /* output: map values */
4250 int *nmap) /* i/o: mval size/count */ 4253 int *nmap) /* i/o: mval size/count */
4251{ 4254{
4255 struct xfs_bmalloca bma = {
4256 .tp = tp,
4257 .ip = ip,
4258 .total = total,
4259 };
4252 struct xfs_mount *mp = ip->i_mount; 4260 struct xfs_mount *mp = ip->i_mount;
4253 struct xfs_ifork *ifp; 4261 struct xfs_ifork *ifp;
4254 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4255 xfs_fileoff_t end; /* end of mapped file region */ 4262 xfs_fileoff_t end; /* end of mapped file region */
4256 bool eof = false; /* after the end of extents */ 4263 bool eof = false; /* after the end of extents */
4257 int error; /* error return */ 4264 int error; /* error return */
@@ -4319,10 +4326,6 @@ xfs_bmapi_write(
4319 eof = true; 4326 eof = true;
4320 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev)) 4327 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4321 bma.prev.br_startoff = NULLFILEOFF; 4328 bma.prev.br_startoff = NULLFILEOFF;
4322 bma.tp = tp;
4323 bma.ip = ip;
4324 bma.total = total;
4325 bma.datatype = 0;
4326 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork); 4329 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4327 4330
4328 n = 0; 4331 n = 0;
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 6f94d1f7322d..117910db51b8 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -415,8 +415,17 @@ xchk_btree_check_owner(
415 struct xfs_btree_cur *cur = bs->cur; 415 struct xfs_btree_cur *cur = bs->cur;
416 struct check_owner *co; 416 struct check_owner *co;
417 417
418 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL) 418 /*
419 * In theory, xfs_btree_get_block should only give us a null buffer
420 * pointer for the root of a root-in-inode btree type, but we need
421 * to check defensively here in case the cursor state is also screwed
422 * up.
423 */
424 if (bp == NULL) {
425 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
426 xchk_btree_set_corrupt(bs->sc, bs->cur, level);
419 return 0; 427 return 0;
428 }
420 429
421 /* 430 /*
422 * We want to cross-reference each btree block with the bnobt 431 * We want to cross-reference each btree block with the bnobt
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index f1260b4bfdee..90527b094878 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -574,6 +574,11 @@ xchk_da_btree(
574 /* Drill another level deeper. */ 574 /* Drill another level deeper. */
575 blkno = be32_to_cpu(key->before); 575 blkno = be32_to_cpu(key->before);
576 level++; 576 level++;
577 if (level >= XFS_DA_NODE_MAXDEPTH) {
578 /* Too deep! */
579 xchk_da_set_corrupt(&ds, level - 1);
580 break;
581 }
577 ds.tree_level--; 582 ds.tree_level--;
578 error = xchk_da_btree_block(&ds, level, blkno); 583 error = xchk_da_btree_block(&ds, level, blkno);
579 if (error) 584 if (error)
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 93f07edafd81..9ee2a7d02e70 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -161,6 +161,14 @@ xfs_ioc_trim(
161 return -EPERM; 161 return -EPERM;
162 if (!blk_queue_discard(q)) 162 if (!blk_queue_discard(q))
163 return -EOPNOTSUPP; 163 return -EOPNOTSUPP;
164
165 /*
166 * We haven't recovered the log, so we cannot use our bnobt-guided
167 * storage zapping commands.
168 */
169 if (mp->m_flags & XFS_MOUNT_NORECOVERY)
170 return -EROFS;
171
164 if (copy_from_user(&range, urange, sizeof(range))) 172 if (copy_from_user(&range, urange, sizeof(range)))
165 return -EFAULT; 173 return -EFAULT;
166 174
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 1f2e2845eb76..a7ceae90110e 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
529 count = iov_iter_count(from); 529 count = iov_iter_count(from);
530 530
531 /* 531 /*
532 * If we are doing unaligned IO, wait for all other IO to drain, 532 * If we are doing unaligned IO, we can't allow any other overlapping IO
533 * otherwise demote the lock if we had to take the exclusive lock 533 * in-flight at the same time or we risk data corruption. Wait for all
534 * for other reasons in xfs_file_aio_write_checks. 534 * other IO to drain before we submit. If the IO is aligned, demote the
535 * iolock if we had to take the exclusive lock in
536 * xfs_file_aio_write_checks() for other reasons.
535 */ 537 */
536 if (unaligned_io) { 538 if (unaligned_io) {
537 /* If we are going to wait for other DIO to finish, bail */ 539 /* unaligned dio always waits, bail */
538 if (iocb->ki_flags & IOCB_NOWAIT) { 540 if (iocb->ki_flags & IOCB_NOWAIT)
539 if (atomic_read(&inode->i_dio_count)) 541 return -EAGAIN;
540 return -EAGAIN; 542 inode_dio_wait(inode);
541 } else {
542 inode_dio_wait(inode);
543 }
544 } else if (iolock == XFS_IOLOCK_EXCL) { 543 } else if (iolock == XFS_IOLOCK_EXCL) {
545 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); 544 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
546 iolock = XFS_IOLOCK_SHARED; 545 iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
548 547
549 trace_xfs_file_direct_write(ip, count, iocb->ki_pos); 548 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
550 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io); 549 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
550
551 /*
552 * If unaligned, this is the only IO in-flight. If it has not yet
553 * completed, wait on it before we release the iolock to prevent
554 * subsequent overlapping IO.
555 */
556 if (ret == -EIOCBQUEUED && unaligned_io)
557 inode_dio_wait(inode);
551out: 558out:
552 xfs_iunlock(ip, iolock); 559 xfs_iunlock(ip, iolock);
553 560
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 30b1ae53689f..c50542dc71e0 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -150,7 +150,10 @@
150 150
151/* Defaults for debug_level, debug and normal */ 151/* Defaults for debug_level, debug and normal */
152 152
153#ifndef ACPI_DEBUG_DEFAULT
153#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR) 154#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
155#endif
156
154#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR) 157#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
155#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL) 158#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
156 159
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 9ff328fd946a..624b90b34085 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -82,6 +82,11 @@
82#define ACPI_NO_ERROR_MESSAGES 82#define ACPI_NO_ERROR_MESSAGES
83#undef ACPI_DEBUG_OUTPUT 83#undef ACPI_DEBUG_OUTPUT
84 84
85/* Use a specific bugging default separate from ACPICA */
86
87#undef ACPI_DEBUG_DEFAULT
88#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
89
85/* External interface for __KERNEL__, stub is needed */ 90/* External interface for __KERNEL__, stub is needed */
86 91
87#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \ 92#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index d5cfc0b15b76..f6034ba774be 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
108#define AARP_RESOLVE_TIME (10 * HZ) 108#define AARP_RESOLVE_TIME (10 * HZ)
109 109
110extern struct datalink_proto *ddp_dl, *aarp_dl; 110extern struct datalink_proto *ddp_dl, *aarp_dl;
111extern void aarp_proto_init(void); 111extern int aarp_proto_init(void);
112 112
113/* Inter module exports */ 113/* Inter module exports */
114 114
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b0c814bcc7e3..cb2aa7ecafff 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -57,7 +57,6 @@ struct blk_mq_hw_ctx {
57 unsigned int queue_num; 57 unsigned int queue_num;
58 58
59 atomic_t nr_active; 59 atomic_t nr_active;
60 unsigned int nr_expired;
61 60
62 struct hlist_node cpuhp_dead; 61 struct hlist_node cpuhp_dead;
63 struct kobject kobj; 62 struct kobject kobj;
@@ -300,8 +299,6 @@ void blk_mq_end_request(struct request *rq, blk_status_t error);
300void __blk_mq_end_request(struct request *rq, blk_status_t error); 299void __blk_mq_end_request(struct request *rq, blk_status_t error);
301 300
302void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); 301void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
303void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
304 bool kick_requeue_list);
305void blk_mq_kick_requeue_list(struct request_queue *q); 302void blk_mq_kick_requeue_list(struct request_queue *q);
306void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); 303void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
307bool blk_mq_complete_request(struct request *rq); 304bool blk_mq_complete_request(struct request *rq);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d66bf5f32610..791fee35df88 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -215,6 +215,7 @@ struct bio {
215/* 215/*
216 * bio flags 216 * bio flags
217 */ 217 */
218#define BIO_NO_PAGE_REF 0 /* don't put release vec pages */
218#define BIO_SEG_VALID 1 /* bi_phys_segments valid */ 219#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
219#define BIO_CLONED 2 /* doesn't own data */ 220#define BIO_CLONED 2 /* doesn't own data */
220#define BIO_BOUNCED 3 /* bio is a bounce bio */ 221#define BIO_BOUNCED 3 /* bio is a bounce bio */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0de92b29f589..5c58a3b2bf00 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -50,6 +50,9 @@ struct blk_stat_callback;
50/* Must be consistent with blk_mq_poll_stats_bkt() */ 50/* Must be consistent with blk_mq_poll_stats_bkt() */
51#define BLK_MQ_POLL_STATS_BKTS 16 51#define BLK_MQ_POLL_STATS_BKTS 16
52 52
53/* Doing classic polling */
54#define BLK_MQ_POLL_CLASSIC -1
55
53/* 56/*
54 * Maximum number of blkcg policies allowed to be registered concurrently. 57 * Maximum number of blkcg policies allowed to be registered concurrently.
55 * Defined here to simplify include dependency. 58 * Defined here to simplify include dependency.
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index a2132e09dc1c..f02367faa58d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -193,7 +193,6 @@ enum bpf_arg_type {
193 193
194 ARG_PTR_TO_CTX, /* pointer to context */ 194 ARG_PTR_TO_CTX, /* pointer to context */
195 ARG_ANYTHING, /* any (initialized) argument is ok */ 195 ARG_ANYTHING, /* any (initialized) argument is ok */
196 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
197 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ 196 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
198 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ 197 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
199}; 198};
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 69f7a3449eda..7d8228d1c898 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -66,6 +66,46 @@ struct bpf_reg_state {
66 * same reference to the socket, to determine proper reference freeing. 66 * same reference to the socket, to determine proper reference freeing.
67 */ 67 */
68 u32 id; 68 u32 id;
69 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
70 * from a pointer-cast helper, bpf_sk_fullsock() and
71 * bpf_tcp_sock().
72 *
73 * Consider the following where "sk" is a reference counted
74 * pointer returned from "sk = bpf_sk_lookup_tcp();":
75 *
76 * 1: sk = bpf_sk_lookup_tcp();
77 * 2: if (!sk) { return 0; }
78 * 3: fullsock = bpf_sk_fullsock(sk);
79 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
80 * 5: tp = bpf_tcp_sock(fullsock);
81 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
82 * 7: bpf_sk_release(sk);
83 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
84 *
85 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
86 * "tp" ptr should be invalidated also. In order to do that,
87 * the reg holding "fullsock" and "sk" need to remember
88 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
89 * such that the verifier can reset all regs which have
90 * ref_obj_id matching the sk_reg->id.
91 *
92 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
93 * sk_reg->id will stay as NULL-marking purpose only.
94 * After NULL-marking is done, sk_reg->id can be reset to 0.
95 *
96 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
97 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
98 *
99 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
100 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
101 * which is the same as sk_reg->ref_obj_id.
102 *
103 * From the verifier perspective, if sk, fullsock and tp
104 * are not NULL, they are the same ptr with different
105 * reg->type. In particular, bpf_sk_release(tp) is also
106 * allowed and has the same effect as bpf_sk_release(sk).
107 */
108 u32 ref_obj_id;
69 /* For scalar types (SCALAR_VALUE), this represents our knowledge of 109 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
70 * the actual value. 110 * the actual value.
71 * For pointer types, this represents the variable part of the offset 111 * For pointer types, this represents the variable part of the offset
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 9cd00a37b8d3..6db2d9a6e503 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -148,6 +148,22 @@
148#define BCM_LED_SRC_OFF 0xe /* Tied high */ 148#define BCM_LED_SRC_OFF 0xe /* Tied high */
149#define BCM_LED_SRC_ON 0xf /* Tied low */ 149#define BCM_LED_SRC_ON 0xf /* Tied low */
150 150
151/*
152 * Broadcom Multicolor LED configurations (expansion register 4)
153 */
154#define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04)
155#define BCM_LED_MULTICOLOR_IN_PHASE BIT(8)
156#define BCM_LED_MULTICOLOR_LINK_ACT 0x0
157#define BCM_LED_MULTICOLOR_SPEED 0x1
158#define BCM_LED_MULTICOLOR_ACT_FLASH 0x2
159#define BCM_LED_MULTICOLOR_FDX 0x3
160#define BCM_LED_MULTICOLOR_OFF 0x4
161#define BCM_LED_MULTICOLOR_ON 0x5
162#define BCM_LED_MULTICOLOR_ALT 0x6
163#define BCM_LED_MULTICOLOR_FLASH 0x7
164#define BCM_LED_MULTICOLOR_LINK 0x8
165#define BCM_LED_MULTICOLOR_ACT 0x9
166#define BCM_LED_MULTICOLOR_PROGRAM 0xa
151 167
152/* 168/*
153 * BCM5482: Shadow registers 169 * BCM5482: Shadow registers
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index a420c07904bc..337d5049ff93 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client);
294extern int __ceph_open_session(struct ceph_client *client, 294extern int __ceph_open_session(struct ceph_client *client,
295 unsigned long started); 295 unsigned long started);
296extern int ceph_open_session(struct ceph_client *client); 296extern int ceph_open_session(struct ceph_client *client);
297int ceph_wait_for_latest_osdmap(struct ceph_client *client,
298 unsigned long timeout);
297 299
298/* pagevec.c */ 300/* pagevec.c */
299extern void ceph_release_page_vector(struct page **pages, int num_pages); 301extern void ceph_release_page_vector(struct page **pages, int num_pages);
diff --git a/include/linux/device.h b/include/linux/device.h
index b425a7ee04ce..4e6987e11f68 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -49,8 +49,6 @@ struct bus_attribute {
49 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); 49 ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
50}; 50};
51 51
52#define BUS_ATTR(_name, _mode, _show, _store) \
53 struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
54#define BUS_ATTR_RW(_name) \ 52#define BUS_ATTR_RW(_name) \
55 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) 53 struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
56#define BUS_ATTR_RO(_name) \ 54#define BUS_ATTR_RO(_name) \
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ea35263eb76b..11943b60f208 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void)
203#define pud_huge(x) 0 203#define pud_huge(x) 0
204#define is_hugepage_only_range(mm, addr, len) 0 204#define is_hugepage_only_range(mm, addr, len) 0
205#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) 205#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
206#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
207#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ 206#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
208 src_addr, pagep) ({ BUG(); 0; }) 207 src_addr, pagep) ({ BUG(); 0; })
209#define huge_pte_offset(mm, address, sz) 0 208#define huge_pte_offset(mm, address, sz) 0
@@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
234{ 233{
235 BUG(); 234 BUG();
236} 235}
236static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
237 struct vm_area_struct *vma, unsigned long address,
238 unsigned int flags)
239{
240 BUG();
241 return 0;
242}
237 243
238#endif /* !CONFIG_HUGETLB_PAGE */ 244#endif /* !CONFIG_HUGETLB_PAGE */
239/* 245/*
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d6160d479b14..7ae8de5ad0f2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -195,7 +195,7 @@ struct irq_data {
195 * IRQD_LEVEL - Interrupt is level triggered 195 * IRQD_LEVEL - Interrupt is level triggered
196 * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup 196 * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
197 * from suspend 197 * from suspend
198 * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process 198 * IRQD_MOVE_PCNTXT - Interrupt can be moved in process
199 * context 199 * context
200 * IRQD_IRQ_DISABLED - Disabled state of the interrupt 200 * IRQD_IRQ_DISABLED - Disabled state of the interrupt
201 * IRQD_IRQ_MASKED - Masked state of the interrupt 201 * IRQD_IRQ_MASKED - Masked state of the interrupt
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 626179077bb0..0f049b384ccd 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -158,8 +158,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
158 * Legacy platforms not converted to DT yet must use this to init 158 * Legacy platforms not converted to DT yet must use this to init
159 * their GIC 159 * their GIC
160 */ 160 */
161void gic_init(unsigned int nr, int start, 161void gic_init(void __iomem *dist , void __iomem *cpu);
162 void __iomem *dist , void __iomem *cpu);
163 162
164int gicv2m_init(struct fwnode_handle *parent_handle, 163int gicv2m_init(struct fwnode_handle *parent_handle,
165 struct irq_domain *parent); 164 struct irq_domain *parent);
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 8c3f8c14eeaa..da676cdbd727 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -38,22 +38,13 @@ struct vmcoredd_node {
38 38
39#ifdef CONFIG_PROC_KCORE 39#ifdef CONFIG_PROC_KCORE
40void __init kclist_add(struct kcore_list *, void *, size_t, int type); 40void __init kclist_add(struct kcore_list *, void *, size_t, int type);
41static inline 41
42void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) 42extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
43{
44 m->vaddr = (unsigned long)vaddr;
45 kclist_add(m, addr, sz, KCORE_REMAP);
46}
47#else 43#else
48static inline 44static inline
49void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) 45void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
50{ 46{
51} 47}
52
53static inline
54void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
55{
56}
57#endif 48#endif
58 49
59#endif /* _LINUX_KCORE_H */ 50#endif /* _LINUX_KCORE_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 79626b5ab36c..58aa3adf94e6 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
207} 207}
208 208
209/** 209/**
210 * list_is_first -- tests whether @ list is the first entry in list @head 210 * list_is_first -- tests whether @list is the first entry in list @head
211 * @list: the entry to test 211 * @list: the entry to test
212 * @head: the head of the list 212 * @head: the head of the list
213 */ 213 */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index b26ea9077384..0343c81d4c5f 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -557,7 +557,8 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
557 557
558int mlx5_core_create_dct(struct mlx5_core_dev *dev, 558int mlx5_core_create_dct(struct mlx5_core_dev *dev,
559 struct mlx5_core_dct *qp, 559 struct mlx5_core_dct *qp,
560 u32 *in, int inlen); 560 u32 *in, int inlen,
561 u32 *out, int outlen);
561int mlx5_core_create_qp(struct mlx5_core_dev *dev, 562int mlx5_core_create_qp(struct mlx5_core_dev *dev,
562 struct mlx5_core_qp *qp, 563 struct mlx5_core_qp *qp,
563 u32 *in, 564 u32 *in,
diff --git a/include/linux/net.h b/include/linux/net.h
index 651fca72286c..c606c72311d0 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -83,6 +83,12 @@ enum sock_type {
83 83
84#endif /* ARCH_HAS_SOCKET_TYPES */ 84#endif /* ARCH_HAS_SOCKET_TYPES */
85 85
86/**
87 * enum sock_shutdown_cmd - Shutdown types
88 * @SHUT_RD: shutdown receptions
89 * @SHUT_WR: shutdown transmissions
90 * @SHUT_RDWR: shutdown receptions/transmissions
91 */
86enum sock_shutdown_cmd { 92enum sock_shutdown_cmd {
87 SHUT_RD, 93 SHUT_RD,
88 SHUT_WR, 94 SHUT_WR,
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 4eb26d278046..280ae96dc4c3 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
41 41
42/* 42/*
43 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. 43 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
44 * If specified range includes migrate types other than MOVABLE or CMA,
45 * this will fail with -EBUSY.
46 *
47 * For isolating all pages in the range finally, the caller have to
48 * free all pages in the range. test_page_isolated() can be used for
49 * test it.
50 *
51 * The following flags are allowed (they can be combined in a bit mask)
52 * SKIP_HWPOISON - ignore hwpoison pages
53 * REPORT_FAILURE - report details about the failure to isolate the range
54 */ 44 */
55int 45int
56start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 46start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
diff --git a/include/linux/parport.h b/include/linux/parport.h
index f41f1d041e2c..397607a0c0eb 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,7 +460,6 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
460 void *, size_t, int); 460 void *, size_t, int);
461 461
462/* IEEE1284.3 functions */ 462/* IEEE1284.3 functions */
463#define daisy_dev_name "Device ID probe"
464extern int parport_daisy_init (struct parport *port); 463extern int parport_daisy_init (struct parport *port);
465extern void parport_daisy_fini (struct parport *port); 464extern void parport_daisy_fini (struct parport *port);
466extern struct pardevice *parport_open (int devnum, const char *name); 465extern struct pardevice *parport_open (int devnum, const char *name);
@@ -469,18 +468,6 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
469extern void parport_daisy_deselect_all (struct parport *port); 468extern void parport_daisy_deselect_all (struct parport *port);
470extern int parport_daisy_select (struct parport *port, int daisy, int mode); 469extern int parport_daisy_select (struct parport *port, int daisy, int mode);
471 470
472#ifdef CONFIG_PARPORT_1284
473extern int daisy_drv_init(void);
474extern void daisy_drv_exit(void);
475#else
476static inline int daisy_drv_init(void)
477{
478 return 0;
479}
480
481static inline void daisy_drv_exit(void) {}
482#endif
483
484/* Lowlevel drivers _can_ call this support function to handle irqs. */ 471/* Lowlevel drivers _can_ call this support function to handle irqs. */
485static inline void parport_generic_irq(struct parport *port) 472static inline void parport_generic_irq(struct parport *port)
486{ 473{
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h
index a867637e172d..9e46678edb2a 100644
--- a/include/linux/platform_data/gpio/gpio-amd-fch.h
+++ b/include/linux/platform_data/gpio/gpio-amd-fch.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL+ */ 1/* SPDX-License-Identifier: GPL-2.0+ */
2 2
3/* 3/*
4 * AMD FCH gpio driver platform-data 4 * AMD FCH gpio driver platform-data
diff --git a/include/linux/platform_data/pca954x.h b/include/linux/platform_data/pca954x.h
deleted file mode 100644
index 1712677d5904..000000000000
--- a/include/linux/platform_data/pca954x.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 *
3 * pca954x.h - I2C multiplexer/switch support
4 *
5 * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
6 * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
7 * Michael Lawnick <michael.lawnick.ext@nsn.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24
25#ifndef _LINUX_I2C_PCA954X_H
26#define _LINUX_I2C_PCA954X_H
27
28/* Platform data for the PCA954x I2C multiplexers */
29
30/* Per channel initialisation data:
31 * @adap_id: bus number for the adapter. 0 = don't care
32 * @deselect_on_exit: set this entry to 1, if your H/W needs deselection
33 * of this channel after transaction.
34 *
35 */
36struct pca954x_platform_mode {
37 int adap_id;
38 unsigned int deselect_on_exit:1;
39 unsigned int class;
40};
41
42/* Per mux/switch data, used with i2c_register_board_info */
43struct pca954x_platform_data {
44 struct pca954x_platform_mode *modes;
45 int num_modes;
46};
47
48#endif /* _LINUX_I2C_PCA954X_H */
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 14d558146aea..20f3e3f029b9 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -330,7 +330,7 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
330/* 330/*
331 * This one is special, since it doesn't actually clear the bit, rather it 331 * This one is special, since it doesn't actually clear the bit, rather it
332 * sets the corresponding bit in the ->cleared mask instead. Paired with 332 * sets the corresponding bit in the ->cleared mask instead. Paired with
333 * the caller doing sbitmap_batch_clear() if a given index is full, which 333 * the caller doing sbitmap_deferred_clear() if a given index is full, which
334 * will clear the previously freed entries in the corresponding ->word. 334 * will clear the previously freed entries in the corresponding ->word.
335 */ 335 */
336static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) 336static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index ae5655197698..e412c092c1e8 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void)
418 set_thread_flag(TIF_RESTORE_SIGMASK); 418 set_thread_flag(TIF_RESTORE_SIGMASK);
419 WARN_ON(!test_thread_flag(TIF_SIGPENDING)); 419 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
420} 420}
421
422static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
423{
424 clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
425}
426
421static inline void clear_restore_sigmask(void) 427static inline void clear_restore_sigmask(void)
422{ 428{
423 clear_thread_flag(TIF_RESTORE_SIGMASK); 429 clear_thread_flag(TIF_RESTORE_SIGMASK);
424} 430}
431static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
432{
433 return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
434}
425static inline bool test_restore_sigmask(void) 435static inline bool test_restore_sigmask(void)
426{ 436{
427 return test_thread_flag(TIF_RESTORE_SIGMASK); 437 return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void)
439 current->restore_sigmask = true; 449 current->restore_sigmask = true;
440 WARN_ON(!test_thread_flag(TIF_SIGPENDING)); 450 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
441} 451}
452static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
453{
454 tsk->restore_sigmask = false;
455}
442static inline void clear_restore_sigmask(void) 456static inline void clear_restore_sigmask(void)
443{ 457{
444 current->restore_sigmask = false; 458 current->restore_sigmask = false;
@@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void)
447{ 461{
448 return current->restore_sigmask; 462 return current->restore_sigmask;
449} 463}
464static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
465{
466 return tsk->restore_sigmask;
467}
450static inline bool test_and_clear_restore_sigmask(void) 468static inline bool test_and_clear_restore_sigmask(void)
451{ 469{
452 if (!current->restore_sigmask) 470 if (!current->restore_sigmask)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 11b45f7ae405..9449b19c5f10 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -32,6 +32,8 @@
32#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) 32#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
33/* Use GFP_DMA memory */ 33/* Use GFP_DMA memory */
34#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) 34#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
35/* Use GFP_DMA32 memory */
36#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
35/* DEBUG: Store the last owner for bug hunting */ 37/* DEBUG: Store the last owner for bug hunting */
36#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) 38#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
37/* Panic if kmem_cache_create() fails */ 39/* Panic if kmem_cache_create() fails */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6016daeecee4..b57cd8bf96e2 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -26,7 +26,7 @@ typedef __kernel_sa_family_t sa_family_t;
26/* 26/*
27 * 1003.1g requires sa_family_t and that sa_data is char. 27 * 1003.1g requires sa_family_t and that sa_data is char.
28 */ 28 */
29 29
30struct sockaddr { 30struct sockaddr {
31 sa_family_t sa_family; /* address family, AF_xxx */ 31 sa_family_t sa_family; /* address family, AF_xxx */
32 char sa_data[14]; /* 14 bytes of protocol address */ 32 char sa_data[14]; /* 14 bytes of protocol address */
@@ -44,7 +44,7 @@ struct linger {
44 * system, not 4.3. Thus msg_accrights(len) are now missing. They 44 * system, not 4.3. Thus msg_accrights(len) are now missing. They
45 * belong in an obscure libc emulation or the bin. 45 * belong in an obscure libc emulation or the bin.
46 */ 46 */
47 47
48struct msghdr { 48struct msghdr {
49 void *msg_name; /* ptr to socket address structure */ 49 void *msg_name; /* ptr to socket address structure */
50 int msg_namelen; /* size of socket address structure */ 50 int msg_namelen; /* size of socket address structure */
@@ -54,7 +54,7 @@ struct msghdr {
54 unsigned int msg_flags; /* flags on received message */ 54 unsigned int msg_flags; /* flags on received message */
55 struct kiocb *msg_iocb; /* ptr to iocb for async requests */ 55 struct kiocb *msg_iocb; /* ptr to iocb for async requests */
56}; 56};
57 57
58struct user_msghdr { 58struct user_msghdr {
59 void __user *msg_name; /* ptr to socket address structure */ 59 void __user *msg_name; /* ptr to socket address structure */
60 int msg_namelen; /* size of socket address structure */ 60 int msg_namelen; /* size of socket address structure */
@@ -122,7 +122,7 @@ struct cmsghdr {
122 * inside range, given by msg->msg_controllen before using 122 * inside range, given by msg->msg_controllen before using
123 * ancillary object DATA. --ANK (980731) 123 * ancillary object DATA. --ANK (980731)
124 */ 124 */
125 125
126static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, 126static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
127 struct cmsghdr *__cmsg) 127 struct cmsghdr *__cmsg)
128{ 128{
@@ -264,10 +264,10 @@ struct ucred {
264/* Maximum queue length specifiable by listen. */ 264/* Maximum queue length specifiable by listen. */
265#define SOMAXCONN 128 265#define SOMAXCONN 128
266 266
267/* Flags we can use with send/ and recv. 267/* Flags we can use with send/ and recv.
268 Added those for 1003.1g not all are supported yet 268 Added those for 1003.1g not all are supported yet
269 */ 269 */
270 270
271#define MSG_OOB 1 271#define MSG_OOB 1
272#define MSG_PEEK 2 272#define MSG_PEEK 2
273#define MSG_DONTROUTE 4 273#define MSG_DONTROUTE 4
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 87477e1640f9..f184af1999a8 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -23,14 +23,23 @@ struct kvec {
23}; 23};
24 24
25enum iter_type { 25enum iter_type {
26 ITER_IOVEC = 0, 26 /* set if ITER_BVEC doesn't hold a bv_page ref */
27 ITER_KVEC = 2, 27 ITER_BVEC_FLAG_NO_REF = 2,
28 ITER_BVEC = 4, 28
29 ITER_PIPE = 8, 29 /* iter types */
30 ITER_DISCARD = 16, 30 ITER_IOVEC = 4,
31 ITER_KVEC = 8,
32 ITER_BVEC = 16,
33 ITER_PIPE = 32,
34 ITER_DISCARD = 64,
31}; 35};
32 36
33struct iov_iter { 37struct iov_iter {
38 /*
39 * Bit 0 is the read/write bit, set if we're writing.
40 * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
41 * the caller isn't expecting to drop a page reference when done.
42 */
34 unsigned int type; 43 unsigned int type;
35 size_t iov_offset; 44 size_t iov_offset;
36 size_t count; 45 size_t count;
@@ -84,6 +93,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
84 return i->type & (READ | WRITE); 93 return i->type & (READ | WRITE);
85} 94}
86 95
96static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i)
97{
98 return (i->type & ITER_BVEC_FLAG_NO_REF) != 0;
99}
100
87/* 101/*
88 * Total number of bytes covered by an iovec. 102 * Total number of bytes covered by an iovec.
89 * 103 *
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index a240ed2a0372..ff56c443180c 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
24#define vbg_debug pr_debug 24#define vbg_debug pr_debug
25#endif 25#endif
26 26
27int vbg_hgcm_connect(struct vbg_dev *gdev, 27int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
28 struct vmmdev_hgcm_service_location *loc, 28 struct vmmdev_hgcm_service_location *loc,
29 u32 *client_id, int *vbox_status); 29 u32 *client_id, int *vbox_status);
30 30
31int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status); 31int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
32 u32 client_id, int *vbox_status);
32 33
33int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, 34int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
34 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 35 u32 function, u32 timeout_ms,
35 u32 parm_count, int *vbox_status); 36 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
37 int *vbox_status);
36 38
37/** 39/**
38 * Convert a VirtualBox status code to a standard Linux kernel return value. 40 * Convert a VirtualBox status code to a standard Linux kernel return value.
diff --git a/include/misc/charlcd.h b/include/misc/charlcd.h
index 23f61850f363..1832402324ce 100644
--- a/include/misc/charlcd.h
+++ b/include/misc/charlcd.h
@@ -35,6 +35,7 @@ struct charlcd_ops {
35}; 35};
36 36
37struct charlcd *charlcd_alloc(unsigned int drvdata_size); 37struct charlcd *charlcd_alloc(unsigned int drvdata_size);
38void charlcd_free(struct charlcd *lcd);
38 39
39int charlcd_register(struct charlcd *lcd); 40int charlcd_register(struct charlcd *lcd);
40int charlcd_unregister(struct charlcd *lcd); 41int charlcd_unregister(struct charlcd *lcd);
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c745e9ccfab2..c61a1bf4e3de 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -39,7 +39,7 @@ struct tc_action {
39 struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw; 39 struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
40 struct gnet_stats_queue __percpu *cpu_qstats; 40 struct gnet_stats_queue __percpu *cpu_qstats;
41 struct tc_cookie __rcu *act_cookie; 41 struct tc_cookie __rcu *act_cookie;
42 struct tcf_chain *goto_chain; 42 struct tcf_chain __rcu *goto_chain;
43}; 43};
44#define tcf_index common.tcfa_index 44#define tcf_index common.tcfa_index
45#define tcf_refcnt common.tcfa_refcnt 45#define tcf_refcnt common.tcfa_refcnt
@@ -90,7 +90,7 @@ struct tc_action_ops {
90 int (*lookup)(struct net *net, struct tc_action **a, u32 index); 90 int (*lookup)(struct net *net, struct tc_action **a, u32 index);
91 int (*init)(struct net *net, struct nlattr *nla, 91 int (*init)(struct net *net, struct nlattr *nla,
92 struct nlattr *est, struct tc_action **act, int ovr, 92 struct nlattr *est, struct tc_action **act, int ovr,
93 int bind, bool rtnl_held, 93 int bind, bool rtnl_held, struct tcf_proto *tp,
94 struct netlink_ext_ack *extack); 94 struct netlink_ext_ack *extack);
95 int (*walk)(struct net *, struct sk_buff *, 95 int (*walk)(struct net *, struct sk_buff *,
96 struct netlink_callback *, int, 96 struct netlink_callback *, int,
@@ -181,6 +181,11 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
181int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); 181int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
182int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); 182int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
183 183
184int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
185 struct tcf_chain **handle,
186 struct netlink_ext_ack *newchain);
187struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
188 struct tcf_chain *newchain);
184#endif /* CONFIG_NET_CLS_ACT */ 189#endif /* CONFIG_NET_CLS_ACT */
185 190
186static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, 191static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 31284c078d06..7d1a0483a17b 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -378,6 +378,7 @@ struct tcf_chain {
378 bool flushing; 378 bool flushing;
379 const struct tcf_proto_ops *tmplt_ops; 379 const struct tcf_proto_ops *tmplt_ops;
380 void *tmplt_priv; 380 void *tmplt_priv;
381 struct rcu_head rcu;
381}; 382};
382 383
383struct tcf_block { 384struct tcf_block {
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 32ee65a30aff..1c6e6c0766ca 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
61static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, 61static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
62 unsigned int offset) 62 unsigned int offset)
63{ 63{
64 struct sctphdr *sh = sctp_hdr(skb); 64 struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
65 const struct skb_checksum_ops ops = { 65 const struct skb_checksum_ops ops = {
66 .update = sctp_csum_update, 66 .update = sctp_csum_update,
67 .combine = sctp_csum_combine, 67 .combine = sctp_csum_combine,
diff --git a/include/net/sock.h b/include/net/sock.h
index 328cb7cb7b0b..8de5ee258b93 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
710 hlist_add_head_rcu(&sk->sk_node, list); 710 hlist_add_head_rcu(&sk->sk_node, list);
711} 711}
712 712
713static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
714{
715 sock_hold(sk);
716 hlist_add_tail_rcu(&sk->sk_node, list);
717}
718
713static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) 719static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
714{ 720{
715 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); 721 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index ee8d005f56fc..eb8f01c819e6 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
56 56
57static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a) 57static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
58{ 58{
59 return a->goto_chain->index; 59 return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
60} 60}
61 61
62#endif /* __NET_TC_GACT_H */ 62#endif /* __NET_TC_GACT_H */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 61cf7dbb6782..d074b6d60f8a 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -36,7 +36,6 @@ struct xdp_umem {
36 u32 headroom; 36 u32 headroom;
37 u32 chunk_size_nohr; 37 u32 chunk_size_nohr;
38 struct user_struct *user; 38 struct user_struct *user;
39 struct pid *pid;
40 unsigned long address; 39 unsigned long address;
41 refcount_t users; 40 refcount_t users;
42 struct work_struct work; 41 struct work_struct work;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 5f24b50c9e88..059dc2bedaf6 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -7,5 +7,7 @@ no-export-headers += kvm.h
7endif 7endif
8 8
9ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),) 9ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),)
10ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),)
10no-export-headers += kvm_para.h 11no-export-headers += kvm_para.h
11endif 12endif
13endif
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3c38ac9a92a7..929c8e537a14 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -502,16 +502,6 @@ union bpf_attr {
502 * Return 502 * Return
503 * 0 on success, or a negative error in case of failure. 503 * 0 on success, or a negative error in case of failure.
504 * 504 *
505 * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
506 * Description
507 * Push an element *value* in *map*. *flags* is one of:
508 *
509 * **BPF_EXIST**
510 * If the queue/stack is full, the oldest element is removed to
511 * make room for this.
512 * Return
513 * 0 on success, or a negative error in case of failure.
514 *
515 * int bpf_probe_read(void *dst, u32 size, const void *src) 505 * int bpf_probe_read(void *dst, u32 size, const void *src)
516 * Description 506 * Description
517 * For tracing programs, safely attempt to read *size* bytes from 507 * For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
1435 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) 1425 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
1436 * Description 1426 * Description
1437 * Equivalent to bpf_get_socket_cookie() helper that accepts 1427 * Equivalent to bpf_get_socket_cookie() helper that accepts
1438 * *skb*, but gets socket from **struct bpf_sock_addr** contex. 1428 * *skb*, but gets socket from **struct bpf_sock_addr** context.
1439 * Return 1429 * Return
1440 * A 8-byte long non-decreasing number. 1430 * A 8-byte long non-decreasing number.
1441 * 1431 *
1442 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) 1432 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
1443 * Description 1433 * Description
1444 * Equivalent to bpf_get_socket_cookie() helper that accepts 1434 * Equivalent to bpf_get_socket_cookie() helper that accepts
1445 * *skb*, but gets socket from **struct bpf_sock_ops** contex. 1435 * *skb*, but gets socket from **struct bpf_sock_ops** context.
1446 * Return 1436 * Return
1447 * A 8-byte long non-decreasing number. 1437 * A 8-byte long non-decreasing number.
1448 * 1438 *
@@ -2098,52 +2088,52 @@ union bpf_attr {
2098 * Return 2088 * Return
2099 * 0 on success, or a negative error in case of failure. 2089 * 0 on success, or a negative error in case of failure.
2100 * 2090 *
2101 * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 2091 * int bpf_rc_repeat(void *ctx)
2102 * Description 2092 * Description
2103 * This helper is used in programs implementing IR decoding, to 2093 * This helper is used in programs implementing IR decoding, to
2104 * report a successfully decoded key press with *scancode*, 2094 * report a successfully decoded repeat key message. This delays
2105 * *toggle* value in the given *protocol*. The scancode will be 2095 * the generation of a key up event for previously generated
2106 * translated to a keycode using the rc keymap, and reported as 2096 * key down event.
2107 * an input key down event. After a period a key up event is
2108 * generated. This period can be extended by calling either
2109 * **bpf_rc_keydown**\ () again with the same values, or calling
2110 * **bpf_rc_repeat**\ ().
2111 * 2097 *
2112 * Some protocols include a toggle bit, in case the button was 2098 * Some IR protocols like NEC have a special IR message for
2113 * released and pressed again between consecutive scancodes. 2099 * repeating last button, for when a button is held down.
2114 * 2100 *
2115 * The *ctx* should point to the lirc sample as passed into 2101 * The *ctx* should point to the lirc sample as passed into
2116 * the program. 2102 * the program.
2117 * 2103 *
2118 * The *protocol* is the decoded protocol number (see
2119 * **enum rc_proto** for some predefined values).
2120 *
2121 * This helper is only available is the kernel was compiled with 2104 * This helper is only available is the kernel was compiled with
2122 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2105 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2123 * "**y**". 2106 * "**y**".
2124 * Return 2107 * Return
2125 * 0 2108 * 0
2126 * 2109 *
2127 * int bpf_rc_repeat(void *ctx) 2110 * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
2128 * Description 2111 * Description
2129 * This helper is used in programs implementing IR decoding, to 2112 * This helper is used in programs implementing IR decoding, to
2130 * report a successfully decoded repeat key message. This delays 2113 * report a successfully decoded key press with *scancode*,
2131 * the generation of a key up event for previously generated 2114 * *toggle* value in the given *protocol*. The scancode will be
2132 * key down event. 2115 * translated to a keycode using the rc keymap, and reported as
2116 * an input key down event. After a period a key up event is
2117 * generated. This period can be extended by calling either
2118 * **bpf_rc_keydown**\ () again with the same values, or calling
2119 * **bpf_rc_repeat**\ ().
2133 * 2120 *
2134 * Some IR protocols like NEC have a special IR message for 2121 * Some protocols include a toggle bit, in case the button was
2135 * repeating last button, for when a button is held down. 2122 * released and pressed again between consecutive scancodes.
2136 * 2123 *
2137 * The *ctx* should point to the lirc sample as passed into 2124 * The *ctx* should point to the lirc sample as passed into
2138 * the program. 2125 * the program.
2139 * 2126 *
2127 * The *protocol* is the decoded protocol number (see
2128 * **enum rc_proto** for some predefined values).
2129 *
2140 * This helper is only available is the kernel was compiled with 2130 * This helper is only available is the kernel was compiled with
2141 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2131 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2142 * "**y**". 2132 * "**y**".
2143 * Return 2133 * Return
2144 * 0 2134 * 0
2145 * 2135 *
2146 * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb) 2136 * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
2147 * Description 2137 * Description
2148 * Return the cgroup v2 id of the socket associated with the *skb*. 2138 * Return the cgroup v2 id of the socket associated with the *skb*.
2149 * This is roughly similar to the **bpf_get_cgroup_classid**\ () 2139 * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
2159 * Return 2149 * Return
2160 * The id is returned or 0 in case the id could not be retrieved. 2150 * The id is returned or 0 in case the id could not be retrieved.
2161 * 2151 *
2162 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2163 * Description
2164 * Return id of cgroup v2 that is ancestor of cgroup associated
2165 * with the *skb* at the *ancestor_level*. The root cgroup is at
2166 * *ancestor_level* zero and each step down the hierarchy
2167 * increments the level. If *ancestor_level* == level of cgroup
2168 * associated with *skb*, then return value will be same as that
2169 * of **bpf_skb_cgroup_id**\ ().
2170 *
2171 * The helper is useful to implement policies based on cgroups
2172 * that are upper in hierarchy than immediate cgroup associated
2173 * with *skb*.
2174 *
2175 * The format of returned id and helper limitations are same as in
2176 * **bpf_skb_cgroup_id**\ ().
2177 * Return
2178 * The id is returned or 0 in case the id could not be retrieved.
2179 *
2180 * u64 bpf_get_current_cgroup_id(void) 2152 * u64 bpf_get_current_cgroup_id(void)
2181 * Return 2153 * Return
2182 * A 64-bit integer containing the current cgroup id based 2154 * A 64-bit integer containing the current cgroup id based
2183 * on the cgroup within which the current task is running. 2155 * on the cgroup within which the current task is running.
2184 * 2156 *
2185 * void* get_local_storage(void *map, u64 flags) 2157 * void *bpf_get_local_storage(void *map, u64 flags)
2186 * Description 2158 * Description
2187 * Get the pointer to the local storage area. 2159 * Get the pointer to the local storage area.
2188 * The type and the size of the local storage is defined 2160 * The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
2209 * Return 2181 * Return
2210 * 0 on success, or a negative error in case of failure. 2182 * 0 on success, or a negative error in case of failure.
2211 * 2183 *
2184 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2185 * Description
2186 * Return id of cgroup v2 that is ancestor of cgroup associated
2187 * with the *skb* at the *ancestor_level*. The root cgroup is at
2188 * *ancestor_level* zero and each step down the hierarchy
2189 * increments the level. If *ancestor_level* == level of cgroup
2190 * associated with *skb*, then return value will be same as that
2191 * of **bpf_skb_cgroup_id**\ ().
2192 *
2193 * The helper is useful to implement policies based on cgroups
2194 * that are upper in hierarchy than immediate cgroup associated
2195 * with *skb*.
2196 *
2197 * The format of returned id and helper limitations are same as in
2198 * **bpf_skb_cgroup_id**\ ().
2199 * Return
2200 * The id is returned or 0 in case the id could not be retrieved.
2201 *
2212 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2202 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2213 * Description 2203 * Description
2214 * Look for TCP socket matching *tuple*, optionally in a child 2204 * Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
2289 * Return 2279 * Return
2290 * 0 on success, or a negative error in case of failure. 2280 * 0 on success, or a negative error in case of failure.
2291 * 2281 *
2282 * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
2283 * Description
2284 * Push an element *value* in *map*. *flags* is one of:
2285 *
2286 * **BPF_EXIST**
2287 * If the queue/stack is full, the oldest element is
2288 * removed to make room for this.
2289 * Return
2290 * 0 on success, or a negative error in case of failure.
2291 *
2292 * int bpf_map_pop_elem(struct bpf_map *map, void *value) 2292 * int bpf_map_pop_elem(struct bpf_map *map, void *value)
2293 * Description 2293 * Description
2294 * Pop an element from *map*. 2294 * Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
2343 * Return 2343 * Return
2344 * 0 2344 * 0
2345 * 2345 *
2346 * int bpf_spin_lock(struct bpf_spin_lock *lock)
2347 * Description
2348 * Acquire a spinlock represented by the pointer *lock*, which is
2349 * stored as part of a value of a map. Taking the lock allows to
2350 * safely update the rest of the fields in that value. The
2351 * spinlock can (and must) later be released with a call to
2352 * **bpf_spin_unlock**\ (\ *lock*\ ).
2353 *
2354 * Spinlocks in BPF programs come with a number of restrictions
2355 * and constraints:
2356 *
2357 * * **bpf_spin_lock** objects are only allowed inside maps of
2358 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
2359 * list could be extended in the future).
2360 * * BTF description of the map is mandatory.
2361 * * The BPF program can take ONE lock at a time, since taking two
2362 * or more could cause dead locks.
2363 * * Only one **struct bpf_spin_lock** is allowed per map element.
2364 * * When the lock is taken, calls (either BPF to BPF or helpers)
2365 * are not allowed.
2366 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
2367 * allowed inside a spinlock-ed region.
2368 * * The BPF program MUST call **bpf_spin_unlock**\ () to release
2369 * the lock, on all execution paths, before it returns.
2370 * * The BPF program can access **struct bpf_spin_lock** only via
2371 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
2372 * helpers. Loading or storing data into the **struct
2373 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
2374 * * To use the **bpf_spin_lock**\ () helper, the BTF description
2375 * of the map value must be a struct and have **struct
2376 * bpf_spin_lock** *anyname*\ **;** field at the top level.
2377 * Nested lock inside another struct is not allowed.
2378 * * The **struct bpf_spin_lock** *lock* field in a map value must
2379 * be aligned on a multiple of 4 bytes in that value.
2380 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
2381 * the **bpf_spin_lock** field to user space.
2382 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
2383 * a BPF program, do not update the **bpf_spin_lock** field.
2384 * * **bpf_spin_lock** cannot be on the stack or inside a
2385 * networking packet (it can only be inside of a map values).
2386 * * **bpf_spin_lock** is available to root only.
2387 * * Tracing programs and socket filter programs cannot use
2388 * **bpf_spin_lock**\ () due to insufficient preemption checks
2389 * (but this may change in the future).
2390 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
2391 * Return
2392 * 0
2393 *
2394 * int bpf_spin_unlock(struct bpf_spin_lock *lock)
2395 * Description
2396 * Release the *lock* previously locked by a call to
2397 * **bpf_spin_lock**\ (\ *lock*\ ).
2398 * Return
2399 * 0
2400 *
2346 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) 2401 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
2347 * Description 2402 * Description
2348 * This helper gets a **struct bpf_sock** pointer such 2403 * This helper gets a **struct bpf_sock** pointer such
2349 * that all the fields in bpf_sock can be accessed. 2404 * that all the fields in this **bpf_sock** can be accessed.
2350 * Return 2405 * Return
2351 * A **struct bpf_sock** pointer on success, or NULL in 2406 * A **struct bpf_sock** pointer on success, or **NULL** in
2352 * case of failure. 2407 * case of failure.
2353 * 2408 *
2354 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) 2409 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
2355 * Description 2410 * Description
2356 * This helper gets a **struct bpf_tcp_sock** pointer from a 2411 * This helper gets a **struct bpf_tcp_sock** pointer from a
2357 * **struct bpf_sock** pointer. 2412 * **struct bpf_sock** pointer.
2358 *
2359 * Return 2413 * Return
2360 * A **struct bpf_tcp_sock** pointer on success, or NULL in 2414 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
2361 * case of failure. 2415 * case of failure.
2362 * 2416 *
2363 * int bpf_skb_ecn_set_ce(struct sk_buf *skb) 2417 * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
2364 * Description 2418 * Description
2365 * Sets ECN of IP header to ce (congestion encountered) if 2419 * Set ECN (Explicit Congestion Notification) field of IP header
2366 * current value is ect (ECN capable). Works with IPv6 and IPv4. 2420 * to **CE** (Congestion Encountered) if current value is **ECT**
2367 * Return 2421 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
2368 * 1 if set, 0 if not set. 2422 * and IPv4.
2423 * Return
2424 * 1 if the **CE** flag is set (either by the current helper call
2425 * or because it was already present), 0 if it is not set.
2426 *
2427 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
2428 * Description
2429 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
2430 * **bpf_sk_release**\ () is unnecessary and not allowed.
2431 * Return
2432 * A **struct bpf_sock** pointer on success, or **NULL** in
2433 * case of failure.
2369 */ 2434 */
2370#define __BPF_FUNC_MAPPER(FN) \ 2435#define __BPF_FUNC_MAPPER(FN) \
2371 FN(unspec), \ 2436 FN(unspec), \
@@ -2465,7 +2530,8 @@ union bpf_attr {
2465 FN(spin_unlock), \ 2530 FN(spin_unlock), \
2466 FN(sk_fullsock), \ 2531 FN(sk_fullsock), \
2467 FN(tcp_sock), \ 2532 FN(tcp_sock), \
2468 FN(skb_ecn_set_ce), 2533 FN(skb_ecn_set_ce), \
2534 FN(get_listener_sock),
2469 2535
2470/* integer value in 'imm' field of BPF_CALL instruction selects which helper 2536/* integer value in 'imm' field of BPF_CALL instruction selects which helper
2471 * function eBPF program intends to call 2537 * function eBPF program intends to call
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 0e68024f36c7..26f39816af14 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
102#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32 102#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
103#endif 103#endif
104 104
105/* vmmdev_request_header.requestor defines */
106
107/* Requestor user not given. */
108#define VMMDEV_REQUESTOR_USR_NOT_GIVEN 0x00000000
109/* The kernel driver (vboxguest) is the requestor. */
110#define VMMDEV_REQUESTOR_USR_DRV 0x00000001
111/* Some other kernel driver is the requestor. */
112#define VMMDEV_REQUESTOR_USR_DRV_OTHER 0x00000002
113/* The root or a admin user is the requestor. */
114#define VMMDEV_REQUESTOR_USR_ROOT 0x00000003
115/* Regular joe user is making the request. */
116#define VMMDEV_REQUESTOR_USR_USER 0x00000006
117/* User classification mask. */
118#define VMMDEV_REQUESTOR_USR_MASK 0x00000007
119
120/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
121#define VMMDEV_REQUESTOR_KERNEL 0x00000000
122/* User mode request. */
123#define VMMDEV_REQUESTOR_USERMODE 0x00000008
124/* User or kernel mode classification mask. */
125#define VMMDEV_REQUESTOR_MODE_MASK 0x00000008
126
127/* Don't know the physical console association of the requestor. */
128#define VMMDEV_REQUESTOR_CON_DONT_KNOW 0x00000000
129/*
130 * The request originates with a process that is NOT associated with the
131 * physical console.
132 */
133#define VMMDEV_REQUESTOR_CON_NO 0x00000010
134/* Requestor process is associated with the physical console. */
135#define VMMDEV_REQUESTOR_CON_YES 0x00000020
136/* Console classification mask. */
137#define VMMDEV_REQUESTOR_CON_MASK 0x00000030
138
139/* Requestor is member of special VirtualBox user group. */
140#define VMMDEV_REQUESTOR_GRP_VBOX 0x00000080
141
142/* Note: trust level is for windows guests only, linux always uses not-given */
143/* Requestor trust level: Unspecified */
144#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN 0x00000000
145/* Requestor trust level: Untrusted (SID S-1-16-0) */
146#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED 0x00001000
147/* Requestor trust level: Untrusted (SID S-1-16-4096) */
148#define VMMDEV_REQUESTOR_TRUST_LOW 0x00002000
149/* Requestor trust level: Medium (SID S-1-16-8192) */
150#define VMMDEV_REQUESTOR_TRUST_MEDIUM 0x00003000
151/* Requestor trust level: Medium plus (SID S-1-16-8448) */
152#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS 0x00004000
153/* Requestor trust level: High (SID S-1-16-12288) */
154#define VMMDEV_REQUESTOR_TRUST_HIGH 0x00005000
155/* Requestor trust level: System (SID S-1-16-16384) */
156#define VMMDEV_REQUESTOR_TRUST_SYSTEM 0x00006000
157/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
158#define VMMDEV_REQUESTOR_TRUST_PROTECTED 0x00007000
159/* Requestor trust level mask */
160#define VMMDEV_REQUESTOR_TRUST_MASK 0x00007000
161
162/* Requestor is using the less trusted user device node (/dev/vboxuser) */
163#define VMMDEV_REQUESTOR_USER_DEVICE 0x00008000
164
105/** HGCM service location types. */ 165/** HGCM service location types. */
106enum vmmdev_hgcm_service_location_type { 166enum vmmdev_hgcm_service_location_type {
107 VMMDEV_HGCM_LOC_INVALID = 0, 167 VMMDEV_HGCM_LOC_INVALID = 0,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 62f6bced3a3c..afca36f53c49 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
136 136
137void *bpf_map_area_alloc(size_t size, int numa_node) 137void *bpf_map_area_alloc(size_t size, int numa_node)
138{ 138{
139 /* We definitely need __GFP_NORETRY, so OOM killer doesn't 139 /* We really just want to fail instead of triggering OOM killer
140 * trigger under memory pressure as we really just want to 140 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
141 * fail instead. 141 * which is used for lower order allocation requests.
142 *
143 * It has been observed that higher order allocation requests done by
144 * vmalloc with __GFP_NORETRY being set might fail due to not trying
145 * to reclaim memory from the page cache, thus we set
146 * __GFP_RETRY_MAYFAIL to avoid such situations.
142 */ 147 */
143 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; 148
149 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
144 void *area; 150 void *area;
145 151
146 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 152 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
147 area = kmalloc_node(size, GFP_USER | flags, numa_node); 153 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
154 numa_node);
148 if (area != NULL) 155 if (area != NULL)
149 return area; 156 return area;
150 } 157 }
151 158
152 return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, 159 return __vmalloc_node_flags_caller(size, numa_node,
153 __builtin_return_address(0)); 160 GFP_KERNEL | __GFP_RETRY_MAYFAIL |
161 flags, __builtin_return_address(0));
154} 162}
155 163
156void bpf_map_area_free(void *area) 164void bpf_map_area_free(void *area)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index ce166a002d16..fd502c1f71eb 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -212,7 +212,7 @@ struct bpf_call_arg_meta {
212 int access_size; 212 int access_size;
213 s64 msize_smax_value; 213 s64 msize_smax_value;
214 u64 msize_umax_value; 214 u64 msize_umax_value;
215 int ptr_id; 215 int ref_obj_id;
216 int func_id; 216 int func_id;
217}; 217};
218 218
@@ -346,35 +346,23 @@ static bool reg_type_may_be_null(enum bpf_reg_type type)
346 type == PTR_TO_TCP_SOCK_OR_NULL; 346 type == PTR_TO_TCP_SOCK_OR_NULL;
347} 347}
348 348
349static bool type_is_refcounted(enum bpf_reg_type type)
350{
351 return type == PTR_TO_SOCKET;
352}
353
354static bool type_is_refcounted_or_null(enum bpf_reg_type type)
355{
356 return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
357}
358
359static bool reg_is_refcounted(const struct bpf_reg_state *reg)
360{
361 return type_is_refcounted(reg->type);
362}
363
364static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 349static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
365{ 350{
366 return reg->type == PTR_TO_MAP_VALUE && 351 return reg->type == PTR_TO_MAP_VALUE &&
367 map_value_has_spin_lock(reg->map_ptr); 352 map_value_has_spin_lock(reg->map_ptr);
368} 353}
369 354
370static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg) 355static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
371{ 356{
372 return type_is_refcounted_or_null(reg->type); 357 return type == PTR_TO_SOCKET ||
358 type == PTR_TO_SOCKET_OR_NULL ||
359 type == PTR_TO_TCP_SOCK ||
360 type == PTR_TO_TCP_SOCK_OR_NULL;
373} 361}
374 362
375static bool arg_type_is_refcounted(enum bpf_arg_type type) 363static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
376{ 364{
377 return type == ARG_PTR_TO_SOCKET; 365 return type == ARG_PTR_TO_SOCK_COMMON;
378} 366}
379 367
380/* Determine whether the function releases some resources allocated by another 368/* Determine whether the function releases some resources allocated by another
@@ -392,6 +380,12 @@ static bool is_acquire_function(enum bpf_func_id func_id)
392 func_id == BPF_FUNC_sk_lookup_udp; 380 func_id == BPF_FUNC_sk_lookup_udp;
393} 381}
394 382
383static bool is_ptr_cast_function(enum bpf_func_id func_id)
384{
385 return func_id == BPF_FUNC_tcp_sock ||
386 func_id == BPF_FUNC_sk_fullsock;
387}
388
395/* string representation of 'enum bpf_reg_type' */ 389/* string representation of 'enum bpf_reg_type' */
396static const char * const reg_type_str[] = { 390static const char * const reg_type_str[] = {
397 [NOT_INIT] = "?", 391 [NOT_INIT] = "?",
@@ -466,6 +460,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
466 verbose(env, ",call_%d", func(env, reg)->callsite); 460 verbose(env, ",call_%d", func(env, reg)->callsite);
467 } else { 461 } else {
468 verbose(env, "(id=%d", reg->id); 462 verbose(env, "(id=%d", reg->id);
463 if (reg_type_may_be_refcounted_or_null(t))
464 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
469 if (t != SCALAR_VALUE) 465 if (t != SCALAR_VALUE)
470 verbose(env, ",off=%d", reg->off); 466 verbose(env, ",off=%d", reg->off);
471 if (type_is_pkt_pointer(t)) 467 if (type_is_pkt_pointer(t))
@@ -2414,16 +2410,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
2414 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ 2410 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
2415 if (!type_is_sk_pointer(type)) 2411 if (!type_is_sk_pointer(type))
2416 goto err_type; 2412 goto err_type;
2417 } else if (arg_type == ARG_PTR_TO_SOCKET) { 2413 if (reg->ref_obj_id) {
2418 expected_type = PTR_TO_SOCKET; 2414 if (meta->ref_obj_id) {
2419 if (type != expected_type) 2415 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
2420 goto err_type; 2416 regno, reg->ref_obj_id,
2421 if (meta->ptr_id || !reg->id) { 2417 meta->ref_obj_id);
2422 verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n", 2418 return -EFAULT;
2423 meta->ptr_id, reg->id); 2419 }
2424 return -EFAULT; 2420 meta->ref_obj_id = reg->ref_obj_id;
2425 } 2421 }
2426 meta->ptr_id = reg->id;
2427 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { 2422 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
2428 if (meta->func_id == BPF_FUNC_spin_lock) { 2423 if (meta->func_id == BPF_FUNC_spin_lock) {
2429 if (process_spin_lock(env, regno, true)) 2424 if (process_spin_lock(env, regno, true))
@@ -2740,32 +2735,38 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
2740 return true; 2735 return true;
2741} 2736}
2742 2737
2743static bool check_refcount_ok(const struct bpf_func_proto *fn) 2738static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
2744{ 2739{
2745 int count = 0; 2740 int count = 0;
2746 2741
2747 if (arg_type_is_refcounted(fn->arg1_type)) 2742 if (arg_type_may_be_refcounted(fn->arg1_type))
2748 count++; 2743 count++;
2749 if (arg_type_is_refcounted(fn->arg2_type)) 2744 if (arg_type_may_be_refcounted(fn->arg2_type))
2750 count++; 2745 count++;
2751 if (arg_type_is_refcounted(fn->arg3_type)) 2746 if (arg_type_may_be_refcounted(fn->arg3_type))
2752 count++; 2747 count++;
2753 if (arg_type_is_refcounted(fn->arg4_type)) 2748 if (arg_type_may_be_refcounted(fn->arg4_type))
2754 count++; 2749 count++;
2755 if (arg_type_is_refcounted(fn->arg5_type)) 2750 if (arg_type_may_be_refcounted(fn->arg5_type))
2756 count++; 2751 count++;
2757 2752
2753 /* A reference acquiring function cannot acquire
2754 * another refcounted ptr.
2755 */
2756 if (is_acquire_function(func_id) && count)
2757 return false;
2758
2758 /* We only support one arg being unreferenced at the moment, 2759 /* We only support one arg being unreferenced at the moment,
2759 * which is sufficient for the helper functions we have right now. 2760 * which is sufficient for the helper functions we have right now.
2760 */ 2761 */
2761 return count <= 1; 2762 return count <= 1;
2762} 2763}
2763 2764
2764static int check_func_proto(const struct bpf_func_proto *fn) 2765static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
2765{ 2766{
2766 return check_raw_mode_ok(fn) && 2767 return check_raw_mode_ok(fn) &&
2767 check_arg_pair_ok(fn) && 2768 check_arg_pair_ok(fn) &&
2768 check_refcount_ok(fn) ? 0 : -EINVAL; 2769 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
2769} 2770}
2770 2771
2771/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 2772/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
@@ -2799,19 +2800,20 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
2799} 2800}
2800 2801
2801static void release_reg_references(struct bpf_verifier_env *env, 2802static void release_reg_references(struct bpf_verifier_env *env,
2802 struct bpf_func_state *state, int id) 2803 struct bpf_func_state *state,
2804 int ref_obj_id)
2803{ 2805{
2804 struct bpf_reg_state *regs = state->regs, *reg; 2806 struct bpf_reg_state *regs = state->regs, *reg;
2805 int i; 2807 int i;
2806 2808
2807 for (i = 0; i < MAX_BPF_REG; i++) 2809 for (i = 0; i < MAX_BPF_REG; i++)
2808 if (regs[i].id == id) 2810 if (regs[i].ref_obj_id == ref_obj_id)
2809 mark_reg_unknown(env, regs, i); 2811 mark_reg_unknown(env, regs, i);
2810 2812
2811 bpf_for_each_spilled_reg(i, state, reg) { 2813 bpf_for_each_spilled_reg(i, state, reg) {
2812 if (!reg) 2814 if (!reg)
2813 continue; 2815 continue;
2814 if (reg_is_refcounted(reg) && reg->id == id) 2816 if (reg->ref_obj_id == ref_obj_id)
2815 __mark_reg_unknown(reg); 2817 __mark_reg_unknown(reg);
2816 } 2818 }
2817} 2819}
@@ -2820,15 +2822,20 @@ static void release_reg_references(struct bpf_verifier_env *env,
2820 * resources. Identify all copies of the same pointer and clear the reference. 2822 * resources. Identify all copies of the same pointer and clear the reference.
2821 */ 2823 */
2822static int release_reference(struct bpf_verifier_env *env, 2824static int release_reference(struct bpf_verifier_env *env,
2823 struct bpf_call_arg_meta *meta) 2825 int ref_obj_id)
2824{ 2826{
2825 struct bpf_verifier_state *vstate = env->cur_state; 2827 struct bpf_verifier_state *vstate = env->cur_state;
2828 int err;
2826 int i; 2829 int i;
2827 2830
2831 err = release_reference_state(cur_func(env), ref_obj_id);
2832 if (err)
2833 return err;
2834
2828 for (i = 0; i <= vstate->curframe; i++) 2835 for (i = 0; i <= vstate->curframe; i++)
2829 release_reg_references(env, vstate->frame[i], meta->ptr_id); 2836 release_reg_references(env, vstate->frame[i], ref_obj_id);
2830 2837
2831 return release_reference_state(cur_func(env), meta->ptr_id); 2838 return 0;
2832} 2839}
2833 2840
2834static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 2841static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
@@ -3047,7 +3054,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
3047 memset(&meta, 0, sizeof(meta)); 3054 memset(&meta, 0, sizeof(meta));
3048 meta.pkt_access = fn->pkt_access; 3055 meta.pkt_access = fn->pkt_access;
3049 3056
3050 err = check_func_proto(fn); 3057 err = check_func_proto(fn, func_id);
3051 if (err) { 3058 if (err) {
3052 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 3059 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
3053 func_id_name(func_id), func_id); 3060 func_id_name(func_id), func_id);
@@ -3093,7 +3100,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
3093 return err; 3100 return err;
3094 } 3101 }
3095 } else if (is_release_function(func_id)) { 3102 } else if (is_release_function(func_id)) {
3096 err = release_reference(env, &meta); 3103 err = release_reference(env, meta.ref_obj_id);
3097 if (err) { 3104 if (err) {
3098 verbose(env, "func %s#%d reference has not been acquired before\n", 3105 verbose(env, "func %s#%d reference has not been acquired before\n",
3099 func_id_name(func_id), func_id); 3106 func_id_name(func_id), func_id);
@@ -3154,8 +3161,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
3154 3161
3155 if (id < 0) 3162 if (id < 0)
3156 return id; 3163 return id;
3157 /* For release_reference() */ 3164 /* For mark_ptr_or_null_reg() */
3158 regs[BPF_REG_0].id = id; 3165 regs[BPF_REG_0].id = id;
3166 /* For release_reference() */
3167 regs[BPF_REG_0].ref_obj_id = id;
3159 } else { 3168 } else {
3160 /* For mark_ptr_or_null_reg() */ 3169 /* For mark_ptr_or_null_reg() */
3161 regs[BPF_REG_0].id = ++env->id_gen; 3170 regs[BPF_REG_0].id = ++env->id_gen;
@@ -3170,6 +3179,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
3170 return -EINVAL; 3179 return -EINVAL;
3171 } 3180 }
3172 3181
3182 if (is_ptr_cast_function(func_id))
3183 /* For release_reference() */
3184 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
3185
3173 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 3186 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
3174 3187
3175 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 3188 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
@@ -3368,7 +3381,7 @@ do_sim:
3368 *dst_reg = *ptr_reg; 3381 *dst_reg = *ptr_reg;
3369 } 3382 }
3370 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); 3383 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3371 if (!ptr_is_dst_reg) 3384 if (!ptr_is_dst_reg && ret)
3372 *dst_reg = tmp; 3385 *dst_reg = tmp;
3373 return !ret ? -EFAULT : 0; 3386 return !ret ? -EFAULT : 0;
3374} 3387}
@@ -4665,11 +4678,19 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
4665 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { 4678 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
4666 reg->type = PTR_TO_TCP_SOCK; 4679 reg->type = PTR_TO_TCP_SOCK;
4667 } 4680 }
4668 if (is_null || !(reg_is_refcounted(reg) || 4681 if (is_null) {
4669 reg_may_point_to_spin_lock(reg))) { 4682 /* We don't need id and ref_obj_id from this point
4670 /* We don't need id from this point onwards anymore, 4683 * onwards anymore, thus we should better reset it,
4671 * thus we should better reset it, so that state 4684 * so that state pruning has chances to take effect.
4672 * pruning has chances to take effect. 4685 */
4686 reg->id = 0;
4687 reg->ref_obj_id = 0;
4688 } else if (!reg_may_point_to_spin_lock(reg)) {
4689 /* For not-NULL ptr, reg->ref_obj_id will be reset
4690 * in release_reg_references().
4691 *
4692 * reg->id is still used by spin_lock ptr. Other
4693 * than spin_lock ptr type, reg->id can be reset.
4673 */ 4694 */
4674 reg->id = 0; 4695 reg->id = 0;
4675 } 4696 }
@@ -4684,11 +4705,16 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
4684{ 4705{
4685 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4706 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4686 struct bpf_reg_state *reg, *regs = state->regs; 4707 struct bpf_reg_state *reg, *regs = state->regs;
4708 u32 ref_obj_id = regs[regno].ref_obj_id;
4687 u32 id = regs[regno].id; 4709 u32 id = regs[regno].id;
4688 int i, j; 4710 int i, j;
4689 4711
4690 if (reg_is_refcounted_or_null(&regs[regno]) && is_null) 4712 if (ref_obj_id && ref_obj_id == id && is_null)
4691 release_reference_state(state, id); 4713 /* regs[regno] is in the " == NULL" branch.
4714 * No one could have freed the reference state before
4715 * doing the NULL check.
4716 */
4717 WARN_ON_ONCE(release_reference_state(state, id));
4692 4718
4693 for (i = 0; i < MAX_BPF_REG; i++) 4719 for (i = 0; i < MAX_BPF_REG; i++)
4694 mark_ptr_or_null_reg(state, &regs[i], id, is_null); 4720 mark_ptr_or_null_reg(state, &regs[i], id, is_null);
@@ -6052,15 +6078,17 @@ static int propagate_liveness(struct bpf_verifier_env *env,
6052 } 6078 }
6053 /* Propagate read liveness of registers... */ 6079 /* Propagate read liveness of registers... */
6054 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 6080 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
6055 /* We don't need to worry about FP liveness because it's read-only */ 6081 for (frame = 0; frame <= vstate->curframe; frame++) {
6056 for (i = 0; i < BPF_REG_FP; i++) { 6082 /* We don't need to worry about FP liveness, it's read-only */
6057 if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) 6083 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
6058 continue; 6084 if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ)
6059 if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { 6085 continue;
6060 err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i], 6086 if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) {
6061 &vparent->frame[vstate->curframe]->regs[i]); 6087 err = mark_reg_read(env, &vstate->frame[frame]->regs[i],
6062 if (err) 6088 &vparent->frame[frame]->regs[i]);
6063 return err; 6089 if (err)
6090 return err;
6091 }
6064 } 6092 }
6065 } 6093 }
6066 6094
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 025f419d16f6..6754f3ecfd94 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -564,6 +564,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
564 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); 564 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
565} 565}
566 566
567static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
568{
569 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
570 return true;
571 /*
572 * When CPU hotplug is disabled, then taking the CPU down is not
573 * possible because takedown_cpu() and the architecture and
574 * subsystem specific mechanisms are not available. So the CPU
575 * which would be completely unplugged again needs to stay around
576 * in the current state.
577 */
578 return st->state <= CPUHP_BRINGUP_CPU;
579}
580
567static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, 581static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
568 enum cpuhp_state target) 582 enum cpuhp_state target)
569{ 583{
@@ -574,8 +588,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
574 st->state++; 588 st->state++;
575 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); 589 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
576 if (ret) { 590 if (ret) {
577 st->target = prev_state; 591 if (can_rollback_cpu(st)) {
578 undo_cpu_up(cpu, st); 592 st->target = prev_state;
593 undo_cpu_up(cpu, st);
594 }
579 break; 595 break;
580 } 596 }
581 } 597 }
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1032a16bd186..72d06e302e99 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7189,6 +7189,7 @@ static void perf_event_mmap_output(struct perf_event *event,
7189 struct perf_output_handle handle; 7189 struct perf_output_handle handle;
7190 struct perf_sample_data sample; 7190 struct perf_sample_data sample;
7191 int size = mmap_event->event_id.header.size; 7191 int size = mmap_event->event_id.header.size;
7192 u32 type = mmap_event->event_id.header.type;
7192 int ret; 7193 int ret;
7193 7194
7194 if (!perf_event_mmap_match(event, data)) 7195 if (!perf_event_mmap_match(event, data))
@@ -7232,6 +7233,7 @@ static void perf_event_mmap_output(struct perf_event *event,
7232 perf_output_end(&handle); 7233 perf_output_end(&handle);
7233out: 7234out:
7234 mmap_event->event_id.header.size = size; 7235 mmap_event->event_id.header.size = size;
7236 mmap_event->event_id.header.type = type;
7235} 7237}
7236 7238
7237static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 7239static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
diff --git a/kernel/futex.c b/kernel/futex.c
index c3b73b0311bc..9e40cf7be606 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -3436,6 +3436,10 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int p
3436{ 3436{
3437 u32 uval, uninitialized_var(nval), mval; 3437 u32 uval, uninitialized_var(nval), mval;
3438 3438
3439 /* Futex address must be 32bit aligned */
3440 if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
3441 return -1;
3442
3439retry: 3443retry:
3440 if (get_user(uval, uaddr)) 3444 if (get_user(uval, uaddr))
3441 return -1; 3445 return -1;
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 5d5378ea0afe..f808c6a97dcc 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -84,8 +84,6 @@ EXPORT_SYMBOL(devm_request_threaded_irq);
84 * @dev: device to request interrupt for 84 * @dev: device to request interrupt for
85 * @irq: Interrupt line to allocate 85 * @irq: Interrupt line to allocate
86 * @handler: Function to be called when the IRQ occurs 86 * @handler: Function to be called when the IRQ occurs
87 * @thread_fn: function to be called in a threaded interrupt context. NULL
88 * for devices which handle everything in @handler
89 * @irqflags: Interrupt type flags 87 * @irqflags: Interrupt type flags
90 * @devname: An ascii name for the claiming device, dev_name(dev) if NULL 88 * @devname: An ascii name for the claiming device, dev_name(dev) if NULL
91 * @dev_id: A cookie passed back to the handler function 89 * @dev_id: A cookie passed back to the handler function
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9ec34a2a6638..1401afa0d58a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -196,6 +196,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
196 case IRQ_SET_MASK_OK: 196 case IRQ_SET_MASK_OK:
197 case IRQ_SET_MASK_OK_DONE: 197 case IRQ_SET_MASK_OK_DONE:
198 cpumask_copy(desc->irq_common_data.affinity, mask); 198 cpumask_copy(desc->irq_common_data.affinity, mask);
199 /* fall through */
199 case IRQ_SET_MASK_OK_NOCOPY: 200 case IRQ_SET_MASK_OK_NOCOPY:
200 irq_validate_effective_affinity(data); 201 irq_validate_effective_affinity(data);
201 irq_set_thread_affinity(desc); 202 irq_set_thread_affinity(desc);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 771e93f9c43f..6f357f4fc859 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -29,6 +29,7 @@
29#include <linux/hw_breakpoint.h> 29#include <linux/hw_breakpoint.h>
30#include <linux/cn_proc.h> 30#include <linux/cn_proc.h>
31#include <linux/compat.h> 31#include <linux/compat.h>
32#include <linux/sched/signal.h>
32 33
33/* 34/*
34 * Access another process' address space via ptrace. 35 * Access another process' address space via ptrace.
@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
924 ret = ptrace_setsiginfo(child, &siginfo); 925 ret = ptrace_setsiginfo(child, &siginfo);
925 break; 926 break;
926 927
927 case PTRACE_GETSIGMASK: 928 case PTRACE_GETSIGMASK: {
929 sigset_t *mask;
930
928 if (addr != sizeof(sigset_t)) { 931 if (addr != sizeof(sigset_t)) {
929 ret = -EINVAL; 932 ret = -EINVAL;
930 break; 933 break;
931 } 934 }
932 935
933 if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t))) 936 if (test_tsk_restore_sigmask(child))
937 mask = &child->saved_sigmask;
938 else
939 mask = &child->blocked;
940
941 if (copy_to_user(datavp, mask, sizeof(sigset_t)))
934 ret = -EFAULT; 942 ret = -EFAULT;
935 else 943 else
936 ret = 0; 944 ret = 0;
937 945
938 break; 946 break;
947 }
939 948
940 case PTRACE_SETSIGMASK: { 949 case PTRACE_SETSIGMASK: {
941 sigset_t new_set; 950 sigset_t new_set;
@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
961 child->blocked = new_set; 970 child->blocked = new_set;
962 spin_unlock_irq(&child->sighand->siglock); 971 spin_unlock_irq(&child->sighand->siglock);
963 972
973 clear_tsk_restore_sigmask(child);
974
964 ret = 0; 975 ret = 0;
965 break; 976 break;
966 } 977 }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ead464a0f2e5..4778c48a7fda 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6998,7 +6998,7 @@ static int __maybe_unused cpu_period_quota_parse(char *buf,
6998{ 6998{
6999 char tok[21]; /* U64_MAX */ 6999 char tok[21]; /* U64_MAX */
7000 7000
7001 if (!sscanf(buf, "%s %llu", tok, periodp)) 7001 if (sscanf(buf, "%20s %llu", tok, periodp) < 1)
7002 return -EINVAL; 7002 return -EINVAL;
7003 7003
7004 *periodp *= NSEC_PER_USEC; 7004 *periodp *= NSEC_PER_USEC;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 2efe629425be..5c41ea367422 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -48,10 +48,10 @@ struct sugov_cpu {
48 48
49 bool iowait_boost_pending; 49 bool iowait_boost_pending;
50 unsigned int iowait_boost; 50 unsigned int iowait_boost;
51 unsigned int iowait_boost_max;
52 u64 last_update; 51 u64 last_update;
53 52
54 unsigned long bw_dl; 53 unsigned long bw_dl;
54 unsigned long min;
55 unsigned long max; 55 unsigned long max;
56 56
57 /* The field below is for single-CPU policies only: */ 57 /* The field below is for single-CPU policies only: */
@@ -303,8 +303,7 @@ static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
303 if (delta_ns <= TICK_NSEC) 303 if (delta_ns <= TICK_NSEC)
304 return false; 304 return false;
305 305
306 sg_cpu->iowait_boost = set_iowait_boost 306 sg_cpu->iowait_boost = set_iowait_boost ? sg_cpu->min : 0;
307 ? sg_cpu->sg_policy->policy->min : 0;
308 sg_cpu->iowait_boost_pending = set_iowait_boost; 307 sg_cpu->iowait_boost_pending = set_iowait_boost;
309 308
310 return true; 309 return true;
@@ -344,14 +343,13 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
344 343
345 /* Double the boost at each request */ 344 /* Double the boost at each request */
346 if (sg_cpu->iowait_boost) { 345 if (sg_cpu->iowait_boost) {
347 sg_cpu->iowait_boost <<= 1; 346 sg_cpu->iowait_boost =
348 if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max) 347 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
349 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
350 return; 348 return;
351 } 349 }
352 350
353 /* First wakeup after IO: start with minimum boost */ 351 /* First wakeup after IO: start with minimum boost */
354 sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min; 352 sg_cpu->iowait_boost = sg_cpu->min;
355} 353}
356 354
357/** 355/**
@@ -373,47 +371,38 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
373 * This mechanism is designed to boost high frequently IO waiting tasks, while 371 * This mechanism is designed to boost high frequently IO waiting tasks, while
374 * being more conservative on tasks which does sporadic IO operations. 372 * being more conservative on tasks which does sporadic IO operations.
375 */ 373 */
376static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time, 374static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
377 unsigned long *util, unsigned long *max) 375 unsigned long util, unsigned long max)
378{ 376{
379 unsigned int boost_util, boost_max; 377 unsigned long boost;
380 378
381 /* No boost currently required */ 379 /* No boost currently required */
382 if (!sg_cpu->iowait_boost) 380 if (!sg_cpu->iowait_boost)
383 return; 381 return util;
384 382
385 /* Reset boost if the CPU appears to have been idle enough */ 383 /* Reset boost if the CPU appears to have been idle enough */
386 if (sugov_iowait_reset(sg_cpu, time, false)) 384 if (sugov_iowait_reset(sg_cpu, time, false))
387 return; 385 return util;
388 386
389 /* 387 if (!sg_cpu->iowait_boost_pending) {
390 * An IO waiting task has just woken up:
391 * allow to further double the boost value
392 */
393 if (sg_cpu->iowait_boost_pending) {
394 sg_cpu->iowait_boost_pending = false;
395 } else {
396 /* 388 /*
397 * Otherwise: reduce the boost value and disable it when we 389 * No boost pending; reduce the boost value.
398 * reach the minimum.
399 */ 390 */
400 sg_cpu->iowait_boost >>= 1; 391 sg_cpu->iowait_boost >>= 1;
401 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) { 392 if (sg_cpu->iowait_boost < sg_cpu->min) {
402 sg_cpu->iowait_boost = 0; 393 sg_cpu->iowait_boost = 0;
403 return; 394 return util;
404 } 395 }
405 } 396 }
406 397
398 sg_cpu->iowait_boost_pending = false;
399
407 /* 400 /*
408 * Apply the current boost value: a CPU is boosted only if its current 401 * @util is already in capacity scale; convert iowait_boost
409 * utilization is smaller then the current IO boost level. 402 * into the same scale so we can compare.
410 */ 403 */
411 boost_util = sg_cpu->iowait_boost; 404 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
412 boost_max = sg_cpu->iowait_boost_max; 405 return max(boost, util);
413 if (*util * boost_max < *max * boost_util) {
414 *util = boost_util;
415 *max = boost_max;
416 }
417} 406}
418 407
419#ifdef CONFIG_NO_HZ_COMMON 408#ifdef CONFIG_NO_HZ_COMMON
@@ -460,7 +449,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
460 449
461 util = sugov_get_util(sg_cpu); 450 util = sugov_get_util(sg_cpu);
462 max = sg_cpu->max; 451 max = sg_cpu->max;
463 sugov_iowait_apply(sg_cpu, time, &util, &max); 452 util = sugov_iowait_apply(sg_cpu, time, util, max);
464 next_f = get_next_freq(sg_policy, util, max); 453 next_f = get_next_freq(sg_policy, util, max);
465 /* 454 /*
466 * Do not reduce the frequency if the CPU has not been idle 455 * Do not reduce the frequency if the CPU has not been idle
@@ -500,7 +489,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
500 489
501 j_util = sugov_get_util(j_sg_cpu); 490 j_util = sugov_get_util(j_sg_cpu);
502 j_max = j_sg_cpu->max; 491 j_max = j_sg_cpu->max;
503 sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max); 492 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
504 493
505 if (j_util * max > j_max * util) { 494 if (j_util * max > j_max * util) {
506 util = j_util; 495 util = j_util;
@@ -837,7 +826,9 @@ static int sugov_start(struct cpufreq_policy *policy)
837 memset(sg_cpu, 0, sizeof(*sg_cpu)); 826 memset(sg_cpu, 0, sizeof(*sg_cpu));
838 sg_cpu->cpu = cpu; 827 sg_cpu->cpu = cpu;
839 sg_cpu->sg_policy = sg_policy; 828 sg_cpu->sg_policy = sg_policy;
840 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; 829 sg_cpu->min =
830 (SCHED_CAPACITY_SCALE * policy->cpuinfo.min_freq) /
831 policy->cpuinfo.max_freq;
841 } 832 }
842 833
843 for_each_cpu(cpu, policy->cpus) { 834 for_each_cpu(cpu, policy->cpus) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ea74d43924b2..fdab7eb6f351 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8060,6 +8060,18 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
8060} 8060}
8061 8061
8062/* 8062/*
8063 * Check whether a rq has a misfit task and if it looks like we can actually
8064 * help that task: we can migrate the task to a CPU of higher capacity, or
8065 * the task's current CPU is heavily pressured.
8066 */
8067static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
8068{
8069 return rq->misfit_task_load &&
8070 (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
8071 check_cpu_capacity(rq, sd));
8072}
8073
8074/*
8063 * Group imbalance indicates (and tries to solve) the problem where balancing 8075 * Group imbalance indicates (and tries to solve) the problem where balancing
8064 * groups is inadequate due to ->cpus_allowed constraints. 8076 * groups is inadequate due to ->cpus_allowed constraints.
8065 * 8077 *
@@ -9586,35 +9598,21 @@ static void nohz_balancer_kick(struct rq *rq)
9586 if (time_before(now, nohz.next_balance)) 9598 if (time_before(now, nohz.next_balance))
9587 goto out; 9599 goto out;
9588 9600
9589 if (rq->nr_running >= 2 || rq->misfit_task_load) { 9601 if (rq->nr_running >= 2) {
9590 flags = NOHZ_KICK_MASK; 9602 flags = NOHZ_KICK_MASK;
9591 goto out; 9603 goto out;
9592 } 9604 }
9593 9605
9594 rcu_read_lock(); 9606 rcu_read_lock();
9595 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
9596 if (sds) {
9597 /*
9598 * If there is an imbalance between LLC domains (IOW we could
9599 * increase the overall cache use), we need some less-loaded LLC
9600 * domain to pull some load. Likewise, we may need to spread
9601 * load within the current LLC domain (e.g. packed SMT cores but
9602 * other CPUs are idle). We can't really know from here how busy
9603 * the others are - so just get a nohz balance going if it looks
9604 * like this LLC domain has tasks we could move.
9605 */
9606 nr_busy = atomic_read(&sds->nr_busy_cpus);
9607 if (nr_busy > 1) {
9608 flags = NOHZ_KICK_MASK;
9609 goto unlock;
9610 }
9611
9612 }
9613 9607
9614 sd = rcu_dereference(rq->sd); 9608 sd = rcu_dereference(rq->sd);
9615 if (sd) { 9609 if (sd) {
9616 if ((rq->cfs.h_nr_running >= 1) && 9610 /*
9617 check_cpu_capacity(rq, sd)) { 9611 * If there's a CFS task and the current CPU has reduced
9612 * capacity; kick the ILB to see if there's a better CPU to run
9613 * on.
9614 */
9615 if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) {
9618 flags = NOHZ_KICK_MASK; 9616 flags = NOHZ_KICK_MASK;
9619 goto unlock; 9617 goto unlock;
9620 } 9618 }
@@ -9622,6 +9620,11 @@ static void nohz_balancer_kick(struct rq *rq)
9622 9620
9623 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); 9621 sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
9624 if (sd) { 9622 if (sd) {
9623 /*
9624 * When ASYM_PACKING; see if there's a more preferred CPU
9625 * currently idle; in which case, kick the ILB to move tasks
9626 * around.
9627 */
9625 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { 9628 for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
9626 if (sched_asym_prefer(i, cpu)) { 9629 if (sched_asym_prefer(i, cpu)) {
9627 flags = NOHZ_KICK_MASK; 9630 flags = NOHZ_KICK_MASK;
@@ -9629,6 +9632,45 @@ static void nohz_balancer_kick(struct rq *rq)
9629 } 9632 }
9630 } 9633 }
9631 } 9634 }
9635
9636 sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu));
9637 if (sd) {
9638 /*
9639 * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU
9640 * to run the misfit task on.
9641 */
9642 if (check_misfit_status(rq, sd)) {
9643 flags = NOHZ_KICK_MASK;
9644 goto unlock;
9645 }
9646
9647 /*
9648 * For asymmetric systems, we do not want to nicely balance
9649 * cache use, instead we want to embrace asymmetry and only
9650 * ensure tasks have enough CPU capacity.
9651 *
9652 * Skip the LLC logic because it's not relevant in that case.
9653 */
9654 goto unlock;
9655 }
9656
9657 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
9658 if (sds) {
9659 /*
9660 * If there is an imbalance between LLC domains (IOW we could
9661 * increase the overall cache use), we need some less-loaded LLC
9662 * domain to pull some load. Likewise, we may need to spread
9663 * load within the current LLC domain (e.g. packed SMT cores but
9664 * other CPUs are idle). We can't really know from here how busy
9665 * the others are - so just get a nohz balance going if it looks
9666 * like this LLC domain has tasks we could move.
9667 */
9668 nr_busy = atomic_read(&sds->nr_busy_cpus);
9669 if (nr_busy > 1) {
9670 flags = NOHZ_KICK_MASK;
9671 goto unlock;
9672 }
9673 }
9632unlock: 9674unlock:
9633 rcu_read_unlock(); 9675 rcu_read_unlock();
9634out: 9676out:
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index dc1b6f1929f9..ac9c03dd6c7d 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -89,7 +89,7 @@ struct clocksource * __init __weak clocksource_default_clock(void)
89 return &clocksource_jiffies; 89 return &clocksource_jiffies;
90} 90}
91 91
92struct clocksource refined_jiffies; 92static struct clocksource refined_jiffies;
93 93
94int register_refined_jiffies(long cycles_per_second) 94int register_refined_jiffies(long cycles_per_second)
95{ 95{
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fa79323331b2..26c8ca9bd06b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1992,7 +1992,7 @@ static void print_bug_type(void)
1992 * modifying the code. @failed should be one of either: 1992 * modifying the code. @failed should be one of either:
1993 * EFAULT - if the problem happens on reading the @ip address 1993 * EFAULT - if the problem happens on reading the @ip address
1994 * EINVAL - if what is read at @ip is not what was expected 1994 * EINVAL - if what is read at @ip is not what was expected
1995 * EPERM - if the problem happens on writting to the @ip address 1995 * EPERM - if the problem happens on writing to the @ip address
1996 */ 1996 */
1997void ftrace_bug(int failed, struct dyn_ftrace *rec) 1997void ftrace_bug(int failed, struct dyn_ftrace *rec)
1998{ 1998{
@@ -2391,7 +2391,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2391 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2391 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2392 } 2392 }
2393 2393
2394 return -1; /* unknow ftrace bug */ 2394 return -1; /* unknown ftrace bug */
2395} 2395}
2396 2396
2397void __weak ftrace_replace_code(int mod_flags) 2397void __weak ftrace_replace_code(int mod_flags)
@@ -3004,7 +3004,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
3004 int cnt; 3004 int cnt;
3005 3005
3006 if (!num_to_init) 3006 if (!num_to_init)
3007 return 0; 3007 return NULL;
3008 3008
3009 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 3009 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3010 if (!pg) 3010 if (!pg)
@@ -4755,7 +4755,7 @@ static int
4755ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, 4755ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4756 int reset, int enable) 4756 int reset, int enable)
4757{ 4757{
4758 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); 4758 return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
4759} 4759}
4760 4760
4761/** 4761/**
@@ -5463,7 +5463,7 @@ void ftrace_create_filter_files(struct ftrace_ops *ops,
5463 5463
5464/* 5464/*
5465 * The name "destroy_filter_files" is really a misnomer. Although 5465 * The name "destroy_filter_files" is really a misnomer. Although
5466 * in the future, it may actualy delete the files, but this is 5466 * in the future, it may actually delete the files, but this is
5467 * really intended to make sure the ops passed in are disabled 5467 * really intended to make sure the ops passed in are disabled
5468 * and that when this function returns, the caller is free to 5468 * and that when this function returns, the caller is free to
5469 * free the ops. 5469 * free the ops.
@@ -5786,7 +5786,7 @@ void ftrace_module_enable(struct module *mod)
5786 /* 5786 /*
5787 * If the tracing is enabled, go ahead and enable the record. 5787 * If the tracing is enabled, go ahead and enable the record.
5788 * 5788 *
5789 * The reason not to enable the record immediatelly is the 5789 * The reason not to enable the record immediately is the
5790 * inherent check of ftrace_make_nop/ftrace_make_call for 5790 * inherent check of ftrace_make_nop/ftrace_make_call for
5791 * correct previous instructions. Making first the NOP 5791 * correct previous instructions. Making first the NOP
5792 * conversion puts the module to the correct state, thus 5792 * conversion puts the module to the correct state, thus
diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
index dd1f43588d70..fa100ed3b4de 100644
--- a/kernel/trace/trace_dynevent.c
+++ b/kernel/trace/trace_dynevent.c
@@ -74,7 +74,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
74static int create_dyn_event(int argc, char **argv) 74static int create_dyn_event(int argc, char **argv)
75{ 75{
76 struct dyn_event_operations *ops; 76 struct dyn_event_operations *ops;
77 int ret; 77 int ret = -ENODEV;
78 78
79 if (argv[0][0] == '-' || argv[0][0] == '!') 79 if (argv[0][0] == '-' || argv[0][0] == '!')
80 return dyn_event_release(argc, argv, NULL); 80 return dyn_event_release(argc, argv, NULL);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index ca46339f3009..795aa2038377 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -3713,7 +3713,6 @@ static void track_data_destroy(struct hist_trigger_data *hist_data,
3713 struct trace_event_file *file = hist_data->event_file; 3713 struct trace_event_file *file = hist_data->event_file;
3714 3714
3715 destroy_hist_field(data->track_data.track_var, 0); 3715 destroy_hist_field(data->track_data.track_var, 0);
3716 destroy_hist_field(data->track_data.var_ref, 0);
3717 3716
3718 if (data->action == ACTION_SNAPSHOT) { 3717 if (data->action == ACTION_SNAPSHOT) {
3719 struct track_data *track_data; 3718 struct track_data *track_data;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 8fbfda94a67b..6a5787233113 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -42,9 +42,9 @@ int __read_mostly watchdog_user_enabled = 1;
42int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT; 42int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
43int __read_mostly soft_watchdog_user_enabled = 1; 43int __read_mostly soft_watchdog_user_enabled = 1;
44int __read_mostly watchdog_thresh = 10; 44int __read_mostly watchdog_thresh = 10;
45int __read_mostly nmi_watchdog_available; 45static int __read_mostly nmi_watchdog_available;
46 46
47struct cpumask watchdog_allowed_mask __read_mostly; 47static struct cpumask watchdog_allowed_mask __read_mostly;
48 48
49struct cpumask watchdog_cpumask __read_mostly; 49struct cpumask watchdog_cpumask __read_mostly;
50unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); 50unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
@@ -554,13 +554,15 @@ static void softlockup_start_all(void)
554 554
555int lockup_detector_online_cpu(unsigned int cpu) 555int lockup_detector_online_cpu(unsigned int cpu)
556{ 556{
557 watchdog_enable(cpu); 557 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
558 watchdog_enable(cpu);
558 return 0; 559 return 0;
559} 560}
560 561
561int lockup_detector_offline_cpu(unsigned int cpu) 562int lockup_detector_offline_cpu(unsigned int cpu)
562{ 563{
563 watchdog_disable(cpu); 564 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
565 watchdog_disable(cpu);
564 return 0; 566 return 0;
565} 567}
566 568
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4026d1871407..ddee541ea97a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4266,7 +4266,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
4266 INIT_LIST_HEAD(&wq->list); 4266 INIT_LIST_HEAD(&wq->list);
4267 4267
4268 if (alloc_and_link_pwqs(wq) < 0) 4268 if (alloc_and_link_pwqs(wq) < 0)
4269 goto err_free_wq; 4269 goto err_unreg_lockdep;
4270 4270
4271 if (wq_online && init_rescuer(wq) < 0) 4271 if (wq_online && init_rescuer(wq) < 0)
4272 goto err_destroy; 4272 goto err_destroy;
@@ -4292,9 +4292,10 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
4292 4292
4293 return wq; 4293 return wq;
4294 4294
4295err_free_wq: 4295err_unreg_lockdep:
4296 wq_unregister_lockdep(wq); 4296 wq_unregister_lockdep(wq);
4297 wq_free_lockdep(wq); 4297 wq_free_lockdep(wq);
4298err_free_wq:
4298 free_workqueue_attrs(wq->unbound_attrs); 4299 free_workqueue_attrs(wq->unbound_attrs);
4299 kfree(wq); 4300 kfree(wq);
4300 return NULL; 4301 return NULL;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 0a105d4af166..97f59abc3e92 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
416 else if (tbl->nest) 416 else if (tbl->nest)
417 err = rhashtable_rehash_alloc(ht, tbl, tbl->size); 417 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
418 418
419 if (!err) 419 if (!err || err == -EEXIST) {
420 err = rhashtable_rehash_table(ht); 420 int nerr;
421
422 nerr = rhashtable_rehash_table(ht);
423 err = err ?: nerr;
424 }
421 425
422 mutex_unlock(&ht->mutex); 426 mutex_unlock(&ht->mutex);
423 427
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 5b382c1244ed..155fe38756ec 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
591void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 591void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
592 unsigned int cpu) 592 unsigned int cpu)
593{ 593{
594 /*
595 * Once the clear bit is set, the bit may be allocated out.
596 *
597 * Orders READ/WRITE on the asssociated instance(such as request
598 * of blk_mq) by this bit for avoiding race with re-allocation,
599 * and its pair is the memory barrier implied in __sbitmap_get_word.
600 *
601 * One invariant is that the clear bit has to be zero when the bit
602 * is in use.
603 */
604 smp_mb__before_atomic();
594 sbitmap_deferred_clear_bit(&sbq->sb, nr); 605 sbitmap_deferred_clear_bit(&sbq->sb, nr);
595 606
596 /* 607 /*
diff --git a/mm/debug.c b/mm/debug.c
index c0b31b6c3877..eee9c221280c 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
79 pr_warn("ksm "); 79 pr_warn("ksm ");
80 else if (mapping) { 80 else if (mapping) {
81 pr_warn("%ps ", mapping->a_ops); 81 pr_warn("%ps ", mapping->a_ops);
82 if (mapping->host->i_dentry.first) { 82 if (mapping->host && mapping->host->i_dentry.first) {
83 struct dentry *dentry; 83 struct dentry *dentry;
84 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); 84 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
85 pr_warn("name:\"%pd\" ", dentry); 85 pr_warn("name:\"%pd\" ", dentry);
@@ -168,7 +168,7 @@ void dump_mm(const struct mm_struct *mm)
168 mm_pgtables_bytes(mm), 168 mm_pgtables_bytes(mm),
169 mm->map_count, 169 mm->map_count,
170 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, 170 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
171 atomic64_read(&mm->pinned_vm), 171 (u64)atomic64_read(&mm->pinned_vm),
172 mm->data_vm, mm->exec_vm, mm->stack_vm, 172 mm->data_vm, mm->exec_vm, mm->stack_vm,
173 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 173 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
174 mm->start_brk, mm->brk, mm->start_stack, 174 mm->start_brk, mm->brk, mm->start_stack,
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 3e0c11f7d7a1..3ce956efa0cb 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -163,7 +163,10 @@ static inline u8 random_tag(void)
163#endif 163#endif
164 164
165#ifndef arch_kasan_set_tag 165#ifndef arch_kasan_set_tag
166#define arch_kasan_set_tag(addr, tag) ((void *)(addr)) 166static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
167{
168 return addr;
169}
167#endif 170#endif
168#ifndef arch_kasan_reset_tag 171#ifndef arch_kasan_reset_tag
169#define arch_kasan_reset_tag(addr) ((void *)(addr)) 172#define arch_kasan_reset_tag(addr) ((void *)(addr))
diff --git a/mm/memory.c b/mm/memory.c
index 47fe250307c7..ab650c21bccd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1549 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); 1549 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
1550 goto out_unlock; 1550 goto out_unlock;
1551 } 1551 }
1552 entry = *pte; 1552 entry = pte_mkyoung(*pte);
1553 goto out_mkwrite; 1553 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1554 } else 1554 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1555 goto out_unlock; 1555 update_mmu_cache(vma, addr, pte);
1556 }
1557 goto out_unlock;
1556 } 1558 }
1557 1559
1558 /* Ok, finally just insert the thing.. */ 1560 /* Ok, finally just insert the thing.. */
@@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1561 else 1563 else
1562 entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 1564 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
1563 1565
1564out_mkwrite:
1565 if (mkwrite) { 1566 if (mkwrite) {
1566 entry = pte_mkyoung(entry); 1567 entry = pte_mkyoung(entry);
1567 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1568 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f767582af4f8..0082d699be94 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
1576{ 1576{
1577 unsigned long pfn, nr_pages; 1577 unsigned long pfn, nr_pages;
1578 long offlined_pages; 1578 long offlined_pages;
1579 int ret, node; 1579 int ret, node, nr_isolate_pageblock;
1580 unsigned long flags; 1580 unsigned long flags;
1581 unsigned long valid_start, valid_end; 1581 unsigned long valid_start, valid_end;
1582 struct zone *zone; 1582 struct zone *zone;
@@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
1602 ret = start_isolate_page_range(start_pfn, end_pfn, 1602 ret = start_isolate_page_range(start_pfn, end_pfn,
1603 MIGRATE_MOVABLE, 1603 MIGRATE_MOVABLE,
1604 SKIP_HWPOISON | REPORT_FAILURE); 1604 SKIP_HWPOISON | REPORT_FAILURE);
1605 if (ret) { 1605 if (ret < 0) {
1606 reason = "failure to isolate range"; 1606 reason = "failure to isolate range";
1607 goto failed_removal; 1607 goto failed_removal;
1608 } 1608 }
1609 nr_isolate_pageblock = ret;
1609 1610
1610 arg.start_pfn = start_pfn; 1611 arg.start_pfn = start_pfn;
1611 arg.nr_pages = nr_pages; 1612 arg.nr_pages = nr_pages;
@@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
1657 /* Ok, all of our target is isolated. 1658 /* Ok, all of our target is isolated.
1658 We cannot do rollback at this point. */ 1659 We cannot do rollback at this point. */
1659 offline_isolated_pages(start_pfn, end_pfn); 1660 offline_isolated_pages(start_pfn, end_pfn);
1660 /* reset pagetype flags and makes migrate type to be MOVABLE */ 1661
1661 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1662 /*
1663 * Onlining will reset pagetype flags and makes migrate type
1664 * MOVABLE, so just need to decrease the number of isolated
1665 * pageblocks zone counter here.
1666 */
1667 spin_lock_irqsave(&zone->lock, flags);
1668 zone->nr_isolate_pageblock -= nr_isolate_pageblock;
1669 spin_unlock_irqrestore(&zone->lock, flags);
1670
1662 /* removal success */ 1671 /* removal success */
1663 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); 1672 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1664 zone->present_pages -= offlined_pages; 1673 zone->present_pages -= offlined_pages;
@@ -1690,12 +1699,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
1690 1699
1691failed_removal_isolated: 1700failed_removal_isolated:
1692 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1701 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1702 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1693failed_removal: 1703failed_removal:
1694 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", 1704 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
1695 (unsigned long long) start_pfn << PAGE_SHIFT, 1705 (unsigned long long) start_pfn << PAGE_SHIFT,
1696 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, 1706 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
1697 reason); 1707 reason);
1698 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1699 /* pushback to free area */ 1708 /* pushback to free area */
1700 mem_hotplug_done(); 1709 mem_hotplug_done();
1701 return ret; 1710 return ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index af171ccb56a2..2219e747df49 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429} 429}
430 430
431/*
432 * queue_pages_pmd() has three possible return values:
433 * 1 - pages are placed on the right node or queued successfully.
434 * 0 - THP was split.
435 * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
436 * page was already on a node that does not follow the policy.
437 */
431static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 438static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432 unsigned long end, struct mm_walk *walk) 439 unsigned long end, struct mm_walk *walk)
433{ 440{
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
437 unsigned long flags; 444 unsigned long flags;
438 445
439 if (unlikely(is_pmd_migration_entry(*pmd))) { 446 if (unlikely(is_pmd_migration_entry(*pmd))) {
440 ret = 1; 447 ret = -EIO;
441 goto unlock; 448 goto unlock;
442 } 449 }
443 page = pmd_page(*pmd); 450 page = pmd_page(*pmd);
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
454 ret = 1; 461 ret = 1;
455 flags = qp->flags; 462 flags = qp->flags;
456 /* go to thp migration */ 463 /* go to thp migration */
457 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 464 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
465 if (!vma_migratable(walk->vma)) {
466 ret = -EIO;
467 goto unlock;
468 }
469
458 migrate_page_add(page, qp->pagelist, flags); 470 migrate_page_add(page, qp->pagelist, flags);
471 } else
472 ret = -EIO;
459unlock: 473unlock:
460 spin_unlock(ptl); 474 spin_unlock(ptl);
461out: 475out:
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
480 ptl = pmd_trans_huge_lock(pmd, vma); 494 ptl = pmd_trans_huge_lock(pmd, vma);
481 if (ptl) { 495 if (ptl) {
482 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 496 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
483 if (ret) 497 if (ret > 0)
484 return 0; 498 return 0;
499 else if (ret < 0)
500 return ret;
485 } 501 }
486 502
487 if (pmd_trans_unstable(pmd)) 503 if (pmd_trans_unstable(pmd))
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
502 continue; 518 continue;
503 if (!queue_pages_required(page, qp)) 519 if (!queue_pages_required(page, qp))
504 continue; 520 continue;
505 migrate_page_add(page, qp->pagelist, flags); 521 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
522 if (!vma_migratable(vma))
523 break;
524 migrate_page_add(page, qp->pagelist, flags);
525 } else
526 break;
506 } 527 }
507 pte_unmap_unlock(pte - 1, ptl); 528 pte_unmap_unlock(pte - 1, ptl);
508 cond_resched(); 529 cond_resched();
509 return 0; 530 return addr != end ? -EIO : 0;
510} 531}
511 532
512static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 533static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
576 unsigned long endvma = vma->vm_end; 597 unsigned long endvma = vma->vm_end;
577 unsigned long flags = qp->flags; 598 unsigned long flags = qp->flags;
578 599
579 if (!vma_migratable(vma)) 600 /*
601 * Need check MPOL_MF_STRICT to return -EIO if possible
602 * regardless of vma_migratable
603 */
604 if (!vma_migratable(vma) &&
605 !(flags & MPOL_MF_STRICT))
580 return 1; 606 return 1;
581 607
582 if (endvma > end) 608 if (endvma > end)
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
603 } 629 }
604 630
605 /* queue pages from current vma */ 631 /* queue pages from current vma */
606 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 632 if (flags & MPOL_MF_VALID)
607 return 0; 633 return 0;
608 return 1; 634 return 1;
609} 635}
diff --git a/mm/migrate.c b/mm/migrate.c
index ac6f4939bb59..663a5449367a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
248 pte = swp_entry_to_pte(entry); 248 pte = swp_entry_to_pte(entry);
249 } else if (is_device_public_page(new)) { 249 } else if (is_device_public_page(new)) {
250 pte = pte_mkdevmap(pte); 250 pte = pte_mkdevmap(pte);
251 flush_dcache_page(new);
252 } 251 }
253 } else 252 }
254 flush_dcache_page(new);
255 253
256#ifdef CONFIG_HUGETLB_PAGE 254#ifdef CONFIG_HUGETLB_PAGE
257 if (PageHuge(new)) { 255 if (PageHuge(new)) {
@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
995 */ 993 */
996 if (!PageMappingFlags(page)) 994 if (!PageMappingFlags(page))
997 page->mapping = NULL; 995 page->mapping = NULL;
996
997 if (unlikely(is_zone_device_page(newpage))) {
998 if (is_device_public_page(newpage))
999 flush_dcache_page(newpage);
1000 } else
1001 flush_dcache_page(newpage);
1002
998 } 1003 }
999out: 1004out:
1000 return rc; 1005 return rc;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 03fcf73d47da..d96ca5bc555b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
8233 8233
8234 ret = start_isolate_page_range(pfn_max_align_down(start), 8234 ret = start_isolate_page_range(pfn_max_align_down(start),
8235 pfn_max_align_up(end), migratetype, 0); 8235 pfn_max_align_up(end), migratetype, 0);
8236 if (ret) 8236 if (ret < 0)
8237 return ret; 8237 return ret;
8238 8238
8239 /* 8239 /*
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index ce323e56b34d..019280712e1b 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
59 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 59 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
60 * We just check MOVABLE pages. 60 * We just check MOVABLE pages.
61 */ 61 */
62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags)) 62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
63 isol_flags))
63 ret = 0; 64 ret = 0;
64 65
65 /* 66 /*
@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
160 return NULL; 161 return NULL;
161} 162}
162 163
163/* 164/**
164 * start_isolate_page_range() -- make page-allocation-type of range of pages 165 * start_isolate_page_range() - make page-allocation-type of range of pages to
165 * to be MIGRATE_ISOLATE. 166 * be MIGRATE_ISOLATE.
166 * @start_pfn: The lower PFN of the range to be isolated. 167 * @start_pfn: The lower PFN of the range to be isolated.
167 * @end_pfn: The upper PFN of the range to be isolated. 168 * @end_pfn: The upper PFN of the range to be isolated.
168 * @migratetype: migrate type to set in error recovery. 169 * start_pfn/end_pfn must be aligned to pageblock_order.
170 * @migratetype: Migrate type to set in error recovery.
171 * @flags: The following flags are allowed (they can be combined in
172 * a bit mask)
173 * SKIP_HWPOISON - ignore hwpoison pages
174 * REPORT_FAILURE - report details about the failure to
175 * isolate the range
169 * 176 *
170 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in 177 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
171 * the range will never be allocated. Any free pages and pages freed in the 178 * the range will never be allocated. Any free pages and pages freed in the
172 * future will not be allocated again. 179 * future will not be allocated again. If specified range includes migrate types
173 * 180 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
174 * start_pfn/end_pfn must be aligned to pageblock_order. 181 * pages in the range finally, the caller have to free all pages in the range.
175 * Return 0 on success and -EBUSY if any part of range cannot be isolated. 182 * test_page_isolated() can be used for test it.
176 * 183 *
177 * There is no high level synchronization mechanism that prevents two threads 184 * There is no high level synchronization mechanism that prevents two threads
178 * from trying to isolate overlapping ranges. If this happens, one thread 185 * from trying to isolate overlapping ranges. If this happens, one thread
179 * will notice pageblocks in the overlapping range already set to isolate. 186 * will notice pageblocks in the overlapping range already set to isolate.
180 * This happens in set_migratetype_isolate, and set_migratetype_isolate 187 * This happens in set_migratetype_isolate, and set_migratetype_isolate
181 * returns an error. We then clean up by restoring the migration type on 188 * returns an error. We then clean up by restoring the migration type on
182 * pageblocks we may have modified and return -EBUSY to caller. This 189 * pageblocks we may have modified and return -EBUSY to caller. This
183 * prevents two threads from simultaneously working on overlapping ranges. 190 * prevents two threads from simultaneously working on overlapping ranges.
191 *
192 * Return: the number of isolated pageblocks on success and -EBUSY if any part
193 * of range cannot be isolated.
184 */ 194 */
185int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 195int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
186 unsigned migratetype, int flags) 196 unsigned migratetype, int flags)
@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
188 unsigned long pfn; 198 unsigned long pfn;
189 unsigned long undo_pfn; 199 unsigned long undo_pfn;
190 struct page *page; 200 struct page *page;
201 int nr_isolate_pageblock = 0;
191 202
192 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 203 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
193 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); 204 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
196 pfn < end_pfn; 207 pfn < end_pfn;
197 pfn += pageblock_nr_pages) { 208 pfn += pageblock_nr_pages) {
198 page = __first_valid_page(pfn, pageblock_nr_pages); 209 page = __first_valid_page(pfn, pageblock_nr_pages);
199 if (page && 210 if (page) {
200 set_migratetype_isolate(page, migratetype, flags)) { 211 if (set_migratetype_isolate(page, migratetype, flags)) {
201 undo_pfn = pfn; 212 undo_pfn = pfn;
202 goto undo; 213 goto undo;
214 }
215 nr_isolate_pageblock++;
203 } 216 }
204 } 217 }
205 return 0; 218 return nr_isolate_pageblock;
206undo: 219undo:
207 for (pfn = start_pfn; 220 for (pfn = start_pfn;
208 pfn < undo_pfn; 221 pfn < undo_pfn;
diff --git a/mm/slab.c b/mm/slab.c
index 28652e4218e0..329bfe67f2ca 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2115,6 +2115,8 @@ done:
2115 cachep->allocflags = __GFP_COMP; 2115 cachep->allocflags = __GFP_COMP;
2116 if (flags & SLAB_CACHE_DMA) 2116 if (flags & SLAB_CACHE_DMA)
2117 cachep->allocflags |= GFP_DMA; 2117 cachep->allocflags |= GFP_DMA;
2118 if (flags & SLAB_CACHE_DMA32)
2119 cachep->allocflags |= GFP_DMA32;
2118 if (flags & SLAB_RECLAIM_ACCOUNT) 2120 if (flags & SLAB_RECLAIM_ACCOUNT)
2119 cachep->allocflags |= __GFP_RECLAIMABLE; 2121 cachep->allocflags |= __GFP_RECLAIMABLE;
2120 cachep->size = size; 2122 cachep->size = size;
diff --git a/mm/slab.h b/mm/slab.h
index e5e6658eeacc..43ac818b8592 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
127 127
128 128
129/* Legal flag mask for kmem_cache_create(), for various configurations */ 129/* Legal flag mask for kmem_cache_create(), for various configurations */
130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
131 SLAB_CACHE_DMA32 | SLAB_PANIC | \
131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 132 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
132 133
133#if defined(CONFIG_DEBUG_SLAB) 134#if defined(CONFIG_DEBUG_SLAB)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 03eeb8b7b4b1..58251ba63e4a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
53 SLAB_FAILSLAB | SLAB_KASAN) 53 SLAB_FAILSLAB | SLAB_KASAN)
54 54
55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
56 SLAB_ACCOUNT) 56 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
57 57
58/* 58/*
59 * Merge control. If this is set then no merging of slab caches will occur. 59 * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 1b08fbcb7e61..d30ede89f4a6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3589,6 +3589,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3589 if (s->flags & SLAB_CACHE_DMA) 3589 if (s->flags & SLAB_CACHE_DMA)
3590 s->allocflags |= GFP_DMA; 3590 s->allocflags |= GFP_DMA;
3591 3591
3592 if (s->flags & SLAB_CACHE_DMA32)
3593 s->allocflags |= GFP_DMA32;
3594
3592 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3595 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3593 s->allocflags |= __GFP_RECLAIMABLE; 3596 s->allocflags |= __GFP_RECLAIMABLE;
3594 3597
@@ -5679,6 +5682,8 @@ static char *create_unique_id(struct kmem_cache *s)
5679 */ 5682 */
5680 if (s->flags & SLAB_CACHE_DMA) 5683 if (s->flags & SLAB_CACHE_DMA)
5681 *p++ = 'd'; 5684 *p++ = 'd';
5685 if (s->flags & SLAB_CACHE_DMA32)
5686 *p++ = 'D';
5682 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5687 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5683 *p++ = 'a'; 5688 *p++ = 'a';
5684 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5689 if (s->flags & SLAB_CONSISTENCY_CHECKS)
diff --git a/mm/sparse.c b/mm/sparse.c
index 69904aa6165b..56e057c432f9 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
567} 567}
568 568
569#ifdef CONFIG_MEMORY_HOTREMOVE 569#ifdef CONFIG_MEMORY_HOTREMOVE
570/* Mark all memory sections within the pfn range as online */ 570/* Mark all memory sections within the pfn range as offline */
571void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 571void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
572{ 572{
573 unsigned long pfn; 573 unsigned long pfn;
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 49a16cee2aae..420a98bf79b5 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -879,15 +879,24 @@ static struct notifier_block aarp_notifier = {
879 879
880static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 }; 880static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
881 881
882void __init aarp_proto_init(void) 882int __init aarp_proto_init(void)
883{ 883{
884 int rc;
885
884 aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv); 886 aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
885 if (!aarp_dl) 887 if (!aarp_dl) {
886 printk(KERN_CRIT "Unable to register AARP with SNAP.\n"); 888 printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
889 return -ENOMEM;
890 }
887 timer_setup(&aarp_timer, aarp_expire_timeout, 0); 891 timer_setup(&aarp_timer, aarp_expire_timeout, 0);
888 aarp_timer.expires = jiffies + sysctl_aarp_expiry_time; 892 aarp_timer.expires = jiffies + sysctl_aarp_expiry_time;
889 add_timer(&aarp_timer); 893 add_timer(&aarp_timer);
890 register_netdevice_notifier(&aarp_notifier); 894 rc = register_netdevice_notifier(&aarp_notifier);
895 if (rc) {
896 del_timer_sync(&aarp_timer);
897 unregister_snap_client(aarp_dl);
898 }
899 return rc;
891} 900}
892 901
893/* Remove the AARP entries associated with a device. */ 902/* Remove the AARP entries associated with a device. */
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 795fbc6c06aa..709d2542f729 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1904,9 +1904,6 @@ static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
1904EXPORT_SYMBOL(atrtr_get_dev); 1904EXPORT_SYMBOL(atrtr_get_dev);
1905EXPORT_SYMBOL(atalk_find_dev_addr); 1905EXPORT_SYMBOL(atalk_find_dev_addr);
1906 1906
1907static const char atalk_err_snap[] __initconst =
1908 KERN_CRIT "Unable to register DDP with SNAP.\n";
1909
1910/* Called by proto.c on kernel start up */ 1907/* Called by proto.c on kernel start up */
1911static int __init atalk_init(void) 1908static int __init atalk_init(void)
1912{ 1909{
@@ -1921,17 +1918,22 @@ static int __init atalk_init(void)
1921 goto out_proto; 1918 goto out_proto;
1922 1919
1923 ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv); 1920 ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
1924 if (!ddp_dl) 1921 if (!ddp_dl) {
1925 printk(atalk_err_snap); 1922 pr_crit("Unable to register DDP with SNAP.\n");
1923 goto out_sock;
1924 }
1926 1925
1927 dev_add_pack(&ltalk_packet_type); 1926 dev_add_pack(&ltalk_packet_type);
1928 dev_add_pack(&ppptalk_packet_type); 1927 dev_add_pack(&ppptalk_packet_type);
1929 1928
1930 rc = register_netdevice_notifier(&ddp_notifier); 1929 rc = register_netdevice_notifier(&ddp_notifier);
1931 if (rc) 1930 if (rc)
1932 goto out_sock; 1931 goto out_snap;
1932
1933 rc = aarp_proto_init();
1934 if (rc)
1935 goto out_dev;
1933 1936
1934 aarp_proto_init();
1935 rc = atalk_proc_init(); 1937 rc = atalk_proc_init();
1936 if (rc) 1938 if (rc)
1937 goto out_aarp; 1939 goto out_aarp;
@@ -1945,11 +1947,13 @@ out_proc:
1945 atalk_proc_exit(); 1947 atalk_proc_exit();
1946out_aarp: 1948out_aarp:
1947 aarp_cleanup_module(); 1949 aarp_cleanup_module();
1950out_dev:
1948 unregister_netdevice_notifier(&ddp_notifier); 1951 unregister_netdevice_notifier(&ddp_notifier);
1949out_sock: 1952out_snap:
1950 dev_remove_pack(&ppptalk_packet_type); 1953 dev_remove_pack(&ppptalk_packet_type);
1951 dev_remove_pack(&ltalk_packet_type); 1954 dev_remove_pack(&ltalk_packet_type);
1952 unregister_snap_client(ddp_dl); 1955 unregister_snap_client(ddp_dl);
1956out_sock:
1953 sock_unregister(PF_APPLETALK); 1957 sock_unregister(PF_APPLETALK);
1954out_proto: 1958out_proto:
1955 proto_unregister(&ddp_proto); 1959 proto_unregister(&ddp_proto);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 9d34de68571b..22afa566cbce 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv,
502 nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr; 502 nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
503 503
504 skb->protocol = htons(ETH_P_IP); 504 skb->protocol = htons(ETH_P_IP);
505 skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
505 506
506 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb, 507 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
507 skb->dev, NULL, 508 skb->dev, NULL,
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 564710f88f93..e88d6641647b 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
235 nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr; 235 nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
236 236
237 skb->protocol = htons(ETH_P_IPV6); 237 skb->protocol = htons(ETH_P_IPV6);
238 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
239
238 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb, 240 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
239 skb->dev, NULL, 241 skb->dev, NULL,
240 br_nf_pre_routing_finish_ipv6); 242 br_nf_pre_routing_finish_ipv6);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 9cab80207ced..79eac465ec65 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -738,7 +738,6 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
738} 738}
739EXPORT_SYMBOL(__ceph_open_session); 739EXPORT_SYMBOL(__ceph_open_session);
740 740
741
742int ceph_open_session(struct ceph_client *client) 741int ceph_open_session(struct ceph_client *client)
743{ 742{
744 int ret; 743 int ret;
@@ -754,6 +753,23 @@ int ceph_open_session(struct ceph_client *client)
754} 753}
755EXPORT_SYMBOL(ceph_open_session); 754EXPORT_SYMBOL(ceph_open_session);
756 755
756int ceph_wait_for_latest_osdmap(struct ceph_client *client,
757 unsigned long timeout)
758{
759 u64 newest_epoch;
760 int ret;
761
762 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
763 if (ret)
764 return ret;
765
766 if (client->osdc.osdmap->epoch >= newest_epoch)
767 return 0;
768
769 ceph_osdc_maybe_request_map(&client->osdc);
770 return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
771}
772EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
757 773
758static int __init init_ceph_lib(void) 774static int __init init_ceph_lib(void)
759{ 775{
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 7e71b0df1fbc..3083988ce729 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
840 size_t bytes) 840 size_t bytes)
841{ 841{
842 struct ceph_bio_iter *it = &cursor->bio_iter; 842 struct ceph_bio_iter *it = &cursor->bio_iter;
843 struct page *page = bio_iter_page(it->bio, it->iter);
843 844
844 BUG_ON(bytes > cursor->resid); 845 BUG_ON(bytes > cursor->resid);
845 BUG_ON(bytes > bio_iter_len(it->bio, it->iter)); 846 BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
@@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
851 return false; /* no more data */ 852 return false; /* no more data */
852 } 853 }
853 854
854 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done)) 855 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
856 page == bio_iter_page(it->bio, it->iter)))
855 return false; /* more bytes to process in this segment */ 857 return false; /* more bytes to process in this segment */
856 858
857 if (!it->iter.bi_size) { 859 if (!it->iter.bi_size) {
@@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
899 size_t bytes) 901 size_t bytes)
900{ 902{
901 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs; 903 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
904 struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
902 905
903 BUG_ON(bytes > cursor->resid); 906 BUG_ON(bytes > cursor->resid);
904 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter)); 907 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
@@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
910 return false; /* no more data */ 913 return false; /* no more data */
911 } 914 }
912 915
913 if (!bytes || cursor->bvec_iter.bi_bvec_done) 916 if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
917 page == bvec_iter_page(bvecs, cursor->bvec_iter)))
914 return false; /* more bytes to process in this segment */ 918 return false; /* more bytes to process in this segment */
915 919
916 BUG_ON(cursor->last_piece); 920 BUG_ON(cursor->last_piece);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 18deb3d889c4..a53e4fbb6319 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -922,6 +922,15 @@ int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
922 mutex_unlock(&monc->mutex); 922 mutex_unlock(&monc->mutex);
923 923
924 ret = wait_generic_request(req); 924 ret = wait_generic_request(req);
925 if (!ret)
926 /*
927 * Make sure we have the osdmap that includes the blacklist
928 * entry. This is needed to ensure that the OSDs pick up the
929 * new blacklist before processing any future requests from
930 * this client.
931 */
932 ret = ceph_wait_for_latest_osdmap(monc->client, 0);
933
925out: 934out:
926 put_generic_request(req); 935 put_generic_request(req);
927 return ret; 936 return ret;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 78e22cea4cc7..da0a29f30885 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -3897,6 +3897,11 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
3897 continue; 3897 continue;
3898 } 3898 }
3899 3899
3900 if (!devlink->ops->info_get) {
3901 idx++;
3902 continue;
3903 }
3904
3900 mutex_lock(&devlink->lock); 3905 mutex_lock(&devlink->lock);
3901 err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET, 3906 err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
3902 NETLINK_CB(cb->skb).portid, 3907 NETLINK_CB(cb->skb).portid,
diff --git a/net/core/filter.c b/net/core/filter.c
index f274620945ff..647c63a7b25b 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1796,8 +1796,6 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1796 1796
1797BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) 1797BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1798{ 1798{
1799 sk = sk_to_full_sk(sk);
1800
1801 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; 1799 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1802} 1800}
1803 1801
@@ -5266,7 +5264,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
5266 .func = bpf_sk_release, 5264 .func = bpf_sk_release,
5267 .gpl_only = false, 5265 .gpl_only = false,
5268 .ret_type = RET_INTEGER, 5266 .ret_type = RET_INTEGER,
5269 .arg1_type = ARG_PTR_TO_SOCKET, 5267 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5270}; 5268};
5271 5269
5272BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, 5270BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
@@ -5407,8 +5405,6 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
5407 5405
5408BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) 5406BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
5409{ 5407{
5410 sk = sk_to_full_sk(sk);
5411
5412 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) 5408 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
5413 return (unsigned long)sk; 5409 return (unsigned long)sk;
5414 5410
@@ -5422,6 +5418,23 @@ static const struct bpf_func_proto bpf_tcp_sock_proto = {
5422 .arg1_type = ARG_PTR_TO_SOCK_COMMON, 5418 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5423}; 5419};
5424 5420
5421BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
5422{
5423 sk = sk_to_full_sk(sk);
5424
5425 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
5426 return (unsigned long)sk;
5427
5428 return (unsigned long)NULL;
5429}
5430
5431static const struct bpf_func_proto bpf_get_listener_sock_proto = {
5432 .func = bpf_get_listener_sock,
5433 .gpl_only = false,
5434 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5435 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5436};
5437
5425BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) 5438BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
5426{ 5439{
5427 unsigned int iphdr_len; 5440 unsigned int iphdr_len;
@@ -5607,6 +5620,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5607#ifdef CONFIG_INET 5620#ifdef CONFIG_INET
5608 case BPF_FUNC_tcp_sock: 5621 case BPF_FUNC_tcp_sock:
5609 return &bpf_tcp_sock_proto; 5622 return &bpf_tcp_sock_proto;
5623 case BPF_FUNC_get_listener_sock:
5624 return &bpf_get_listener_sock_proto;
5610 case BPF_FUNC_skb_ecn_set_ce: 5625 case BPF_FUNC_skb_ecn_set_ce:
5611 return &bpf_skb_ecn_set_ce_proto; 5626 return &bpf_skb_ecn_set_ce_proto;
5612#endif 5627#endif
@@ -5702,6 +5717,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5702 return &bpf_sk_release_proto; 5717 return &bpf_sk_release_proto;
5703 case BPF_FUNC_tcp_sock: 5718 case BPF_FUNC_tcp_sock:
5704 return &bpf_tcp_sock_proto; 5719 return &bpf_tcp_sock_proto;
5720 case BPF_FUNC_get_listener_sock:
5721 return &bpf_get_listener_sock_proto;
5705#endif 5722#endif
5706 default: 5723 default:
5707 return bpf_base_func_proto(func_id); 5724 return bpf_base_func_proto(func_id);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 4ff661f6f989..f8f94303a1f5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -928,6 +928,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
928 if (error) 928 if (error)
929 return error; 929 return error;
930 930
931 dev_hold(queue->dev);
932
931 if (dev->sysfs_rx_queue_group) { 933 if (dev->sysfs_rx_queue_group) {
932 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); 934 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
933 if (error) { 935 if (error) {
@@ -937,7 +939,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
937 } 939 }
938 940
939 kobject_uevent(kobj, KOBJ_ADD); 941 kobject_uevent(kobj, KOBJ_ADD);
940 dev_hold(queue->dev);
941 942
942 return error; 943 return error;
943} 944}
@@ -1464,6 +1465,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
1464 if (error) 1465 if (error)
1465 return error; 1466 return error;
1466 1467
1468 dev_hold(queue->dev);
1469
1467#ifdef CONFIG_BQL 1470#ifdef CONFIG_BQL
1468 error = sysfs_create_group(kobj, &dql_group); 1471 error = sysfs_create_group(kobj, &dql_group);
1469 if (error) { 1472 if (error) {
@@ -1473,7 +1476,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
1473#endif 1476#endif
1474 1477
1475 kobject_uevent(kobj, KOBJ_ADD); 1478 kobject_uevent(kobj, KOBJ_ADD);
1476 dev_hold(queue->dev);
1477 1479
1478 return 0; 1480 return 0;
1479} 1481}
@@ -1745,16 +1747,20 @@ int netdev_register_kobject(struct net_device *ndev)
1745 1747
1746 error = device_add(dev); 1748 error = device_add(dev);
1747 if (error) 1749 if (error)
1748 return error; 1750 goto error_put_device;
1749 1751
1750 error = register_queue_kobjects(ndev); 1752 error = register_queue_kobjects(ndev);
1751 if (error) { 1753 if (error)
1752 device_del(dev); 1754 goto error_device_del;
1753 return error;
1754 }
1755 1755
1756 pm_runtime_set_memalloc_noio(dev, true); 1756 pm_runtime_set_memalloc_noio(dev, true);
1757 1757
1758 return 0;
1759
1760error_device_del:
1761 device_del(dev);
1762error_put_device:
1763 put_device(dev);
1758 return error; 1764 return error;
1759} 1765}
1760 1766
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index d5740bad5b18..57d84e9b7b6f 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
436 newnp->ipv6_mc_list = NULL; 436 newnp->ipv6_mc_list = NULL;
437 newnp->ipv6_ac_list = NULL; 437 newnp->ipv6_ac_list = NULL;
438 newnp->ipv6_fl_list = NULL; 438 newnp->ipv6_fl_list = NULL;
439 newnp->mcast_oif = inet6_iif(skb); 439 newnp->mcast_oif = inet_iif(skb);
440 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 440 newnp->mcast_hops = ip_hdr(skb)->ttl;
441 441
442 /* 442 /*
443 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 443 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/ipv6/netfilter/ip6t_srh.c b/net/ipv6/netfilter/ip6t_srh.c
index 1059894a6f4c..4cb83fb69844 100644
--- a/net/ipv6/netfilter/ip6t_srh.c
+++ b/net/ipv6/netfilter/ip6t_srh.c
@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
210 psidoff = srhoff + sizeof(struct ipv6_sr_hdr) + 210 psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
211 ((srh->segments_left + 1) * sizeof(struct in6_addr)); 211 ((srh->segments_left + 1) * sizeof(struct in6_addr));
212 psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid); 212 psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
213 if (!psid)
214 return false;
213 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID, 215 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
214 ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk, 216 ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
215 &srhinfo->psid_addr))) 217 &srhinfo->psid_addr)))
@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
223 nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) + 225 nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
224 ((srh->segments_left - 1) * sizeof(struct in6_addr)); 226 ((srh->segments_left - 1) * sizeof(struct in6_addr));
225 nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid); 227 nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
228 if (!nsid)
229 return false;
226 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID, 230 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
227 ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk, 231 ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
228 &srhinfo->nsid_addr))) 232 &srhinfo->nsid_addr)))
@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
233 if (srhinfo->mt_flags & IP6T_SRH_LSID) { 237 if (srhinfo->mt_flags & IP6T_SRH_LSID) {
234 lsidoff = srhoff + sizeof(struct ipv6_sr_hdr); 238 lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
235 lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid); 239 lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
240 if (!lsid)
241 return false;
236 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID, 242 if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
237 ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk, 243 ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
238 &srhinfo->lsid_addr))) 244 &srhinfo->lsid_addr)))
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4ef4bbdb49d4..0302e0eb07af 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1040,14 +1040,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
1040 struct rt6_info *nrt; 1040 struct rt6_info *nrt;
1041 1041
1042 if (!fib6_info_hold_safe(rt)) 1042 if (!fib6_info_hold_safe(rt))
1043 return NULL; 1043 goto fallback;
1044 1044
1045 nrt = ip6_dst_alloc(dev_net(dev), dev, flags); 1045 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1046 if (nrt) 1046 if (!nrt) {
1047 ip6_rt_copy_init(nrt, rt);
1048 else
1049 fib6_info_release(rt); 1047 fib6_info_release(rt);
1048 goto fallback;
1049 }
1050 1050
1051 ip6_rt_copy_init(nrt, rt);
1052 return nrt;
1053
1054fallback:
1055 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1056 dst_hold(&nrt->dst);
1051 return nrt; 1057 return nrt;
1052} 1058}
1053 1059
@@ -1096,10 +1102,6 @@ restart:
1096 dst_hold(&rt->dst); 1102 dst_hold(&rt->dst);
1097 } else { 1103 } else {
1098 rt = ip6_create_rt_rcu(f6i); 1104 rt = ip6_create_rt_rcu(f6i);
1099 if (!rt) {
1100 rt = net->ipv6.ip6_null_entry;
1101 dst_hold(&rt->dst);
1102 }
1103 } 1105 }
1104 1106
1105 rcu_read_unlock(); 1107 rcu_read_unlock();
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 57ef69a10889..44d431849d39 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1110,11 +1110,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1110 newnp->ipv6_fl_list = NULL; 1110 newnp->ipv6_fl_list = NULL;
1111 newnp->pktoptions = NULL; 1111 newnp->pktoptions = NULL;
1112 newnp->opt = NULL; 1112 newnp->opt = NULL;
1113 newnp->mcast_oif = tcp_v6_iif(skb); 1113 newnp->mcast_oif = inet_iif(skb);
1114 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 1114 newnp->mcast_hops = ip_hdr(skb)->ttl;
1115 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); 1115 newnp->rcv_flowinfo = 0;
1116 if (np->repflow) 1116 if (np->repflow)
1117 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); 1117 newnp->flow_label = 0;
1118 1118
1119 /* 1119 /*
1120 * No need to charge this sock to the relevant IPv6 refcnt debug socks count 1120 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index dda8930f20e7..f3a8557494d6 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -140,9 +140,15 @@ static int mpls_xmit(struct sk_buff *skb)
140 if (rt) 140 if (rt)
141 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway, 141 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
142 skb); 142 skb);
143 else if (rt6) 143 else if (rt6) {
144 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway, 144 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
145 skb); 145 /* 6PE (RFC 4798) */
146 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3],
147 skb);
148 } else
149 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
150 skb);
151 }
146 if (err) 152 if (err)
147 net_dbg_ratelimited("%s: packet transmission failed: %d\n", 153 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
148 __func__, err); 154 __func__, err);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 5d782445d2fc..bad17bba8ba7 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -251,6 +251,10 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
251 } 251 }
252 252
253 attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST); 253 attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST);
254 if (!attr) {
255 rc = -EMSGSIZE;
256 goto err;
257 }
254 rc = ncsi_write_package_info(skb, ndp, package->id); 258 rc = ncsi_write_package_info(skb, ndp, package->id);
255 if (rc) { 259 if (rc) {
256 nla_nest_cancel(skb, attr); 260 nla_nest_cancel(skb, attr);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index d43ffb09939b..6548271209a0 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1007,6 +1007,7 @@ config NETFILTER_XT_TARGET_TEE
1007 depends on NETFILTER_ADVANCED 1007 depends on NETFILTER_ADVANCED
1008 depends on IPV6 || IPV6=n 1008 depends on IPV6 || IPV6=n
1009 depends on !NF_CONNTRACK || NF_CONNTRACK 1009 depends on !NF_CONNTRACK || NF_CONNTRACK
1010 depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES
1010 select NF_DUP_IPV4 1011 select NF_DUP_IPV4
1011 select NF_DUP_IPV6 if IP6_NF_IPTABLES 1012 select NF_DUP_IPV6 if IP6_NF_IPTABLES
1012 ---help--- 1013 ---help---
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index f067c6b50857..39fcc1ed18f3 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -20,9 +20,9 @@
20#include <linux/udp.h> 20#include <linux/udp.h>
21#include <linux/tcp.h> 21#include <linux/tcp.h>
22#include <linux/netfilter.h> 22#include <linux/netfilter.h>
23#include <linux/netfilter_ipv4.h>
24#include <linux/netfilter_ipv6.h>
23 25
24#include <net/route.h>
25#include <net/ip6_route.h>
26#include <net/netfilter/nf_conntrack.h> 26#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_core.h> 27#include <net/netfilter/nf_conntrack_core.h>
28#include <net/netfilter/nf_conntrack_expect.h> 28#include <net/netfilter/nf_conntrack_expect.h>
@@ -871,38 +871,33 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
871 } else if (sip_external_media) { 871 } else if (sip_external_media) {
872 struct net_device *dev = skb_dst(skb)->dev; 872 struct net_device *dev = skb_dst(skb)->dev;
873 struct net *net = dev_net(dev); 873 struct net *net = dev_net(dev);
874 struct rtable *rt; 874 struct flowi fl;
875 struct flowi4 fl4 = {};
876#if IS_ENABLED(CONFIG_IPV6)
877 struct flowi6 fl6 = {};
878#endif
879 struct dst_entry *dst = NULL; 875 struct dst_entry *dst = NULL;
880 876
877 memset(&fl, 0, sizeof(fl));
878
881 switch (nf_ct_l3num(ct)) { 879 switch (nf_ct_l3num(ct)) {
882 case NFPROTO_IPV4: 880 case NFPROTO_IPV4:
883 fl4.daddr = daddr->ip; 881 fl.u.ip4.daddr = daddr->ip;
884 rt = ip_route_output_key(net, &fl4); 882 nf_ip_route(net, &dst, &fl, false);
885 if (!IS_ERR(rt))
886 dst = &rt->dst;
887 break; 883 break;
888 884
889#if IS_ENABLED(CONFIG_IPV6)
890 case NFPROTO_IPV6: 885 case NFPROTO_IPV6:
891 fl6.daddr = daddr->in6; 886 fl.u.ip6.daddr = daddr->in6;
892 dst = ip6_route_output(net, NULL, &fl6); 887 nf_ip6_route(net, &dst, &fl, false);
893 if (dst->error) {
894 dst_release(dst);
895 dst = NULL;
896 }
897 break; 888 break;
898#endif
899 } 889 }
900 890
901 /* Don't predict any conntracks when media endpoint is reachable 891 /* Don't predict any conntracks when media endpoint is reachable
902 * through the same interface as the signalling peer. 892 * through the same interface as the signalling peer.
903 */ 893 */
904 if (dst && dst->dev == dev) 894 if (dst) {
905 return NF_ACCEPT; 895 bool external_media = (dst->dev == dev);
896
897 dst_release(dst);
898 if (external_media)
899 return NF_ACCEPT;
900 }
906 } 901 }
907 902
908 /* We need to check whether the registration exists before attempting 903 /* We need to check whether the registration exists before attempting
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 513f93118604..ef7772e976cc 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2806,8 +2806,11 @@ err2:
2806 nf_tables_rule_release(&ctx, rule); 2806 nf_tables_rule_release(&ctx, rule);
2807err1: 2807err1:
2808 for (i = 0; i < n; i++) { 2808 for (i = 0; i < n; i++) {
2809 if (info[i].ops != NULL) 2809 if (info[i].ops) {
2810 module_put(info[i].ops->type->owner); 2810 module_put(info[i].ops->type->owner);
2811 if (info[i].ops->type->release_ops)
2812 info[i].ops->type->release_ops(info[i].ops);
2813 }
2811 } 2814 }
2812 kvfree(info); 2815 kvfree(info);
2813 return err; 2816 return err;
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 457a9ceb46af..8dfa798ea683 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -65,21 +65,34 @@ nla_put_failure:
65 return -1; 65 return -1;
66} 66}
67 67
68static void nft_objref_destroy(const struct nft_ctx *ctx, 68static void nft_objref_deactivate(const struct nft_ctx *ctx,
69 const struct nft_expr *expr) 69 const struct nft_expr *expr,
70 enum nft_trans_phase phase)
70{ 71{
71 struct nft_object *obj = nft_objref_priv(expr); 72 struct nft_object *obj = nft_objref_priv(expr);
72 73
74 if (phase == NFT_TRANS_COMMIT)
75 return;
76
73 obj->use--; 77 obj->use--;
74} 78}
75 79
80static void nft_objref_activate(const struct nft_ctx *ctx,
81 const struct nft_expr *expr)
82{
83 struct nft_object *obj = nft_objref_priv(expr);
84
85 obj->use++;
86}
87
76static struct nft_expr_type nft_objref_type; 88static struct nft_expr_type nft_objref_type;
77static const struct nft_expr_ops nft_objref_ops = { 89static const struct nft_expr_ops nft_objref_ops = {
78 .type = &nft_objref_type, 90 .type = &nft_objref_type,
79 .size = NFT_EXPR_SIZE(sizeof(struct nft_object *)), 91 .size = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
80 .eval = nft_objref_eval, 92 .eval = nft_objref_eval,
81 .init = nft_objref_init, 93 .init = nft_objref_init,
82 .destroy = nft_objref_destroy, 94 .activate = nft_objref_activate,
95 .deactivate = nft_objref_deactivate,
83 .dump = nft_objref_dump, 96 .dump = nft_objref_dump,
84}; 97};
85 98
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index f8092926f704..a340cd8a751b 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -233,5 +233,5 @@ module_exit(nft_redir_module_exit);
233 233
234MODULE_LICENSE("GPL"); 234MODULE_LICENSE("GPL");
235MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>"); 235MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
236MODULE_ALIAS_NFT_AF_EXPR(AF_INET4, "redir"); 236MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir");
237MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir"); 237MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir");
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index fa61208371f8..321a0036fdf5 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
308 else if (d > 0) 308 else if (d > 0)
309 parent = parent->rb_right; 309 parent = parent->rb_right;
310 else { 310 else {
311 if (!nft_set_elem_active(&rbe->ext, genmask)) {
312 parent = parent->rb_left;
313 continue;
314 }
315 if (nft_rbtree_interval_end(rbe) && 311 if (nft_rbtree_interval_end(rbe) &&
316 !nft_rbtree_interval_end(this)) { 312 !nft_rbtree_interval_end(this)) {
317 parent = parent->rb_left; 313 parent = parent->rb_left;
@@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
320 nft_rbtree_interval_end(this)) { 316 nft_rbtree_interval_end(this)) {
321 parent = parent->rb_right; 317 parent = parent->rb_right;
322 continue; 318 continue;
319 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
320 parent = parent->rb_left;
321 continue;
323 } 322 }
324 nft_rbtree_flush(net, set, rbe); 323 nft_rbtree_flush(net, set, rbe);
325 return rbe; 324 return rbe;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 25eeb6d2a75a..f0ec068e1d02 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family)
366 start, end + 1, GFP_KERNEL); 366 start, end + 1, GFP_KERNEL);
367 if (family->id < 0) { 367 if (family->id < 0) {
368 err = family->id; 368 err = family->id;
369 goto errout_locked; 369 goto errout_free;
370 } 370 }
371 371
372 err = genl_validate_assign_mc_groups(family); 372 err = genl_validate_assign_mc_groups(family);
@@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
385 385
386errout_remove: 386errout_remove:
387 idr_remove(&genl_fam_idr, family->id); 387 idr_remove(&genl_fam_idr, family->id);
388errout_free:
388 kfree(family->attrbuf); 389 kfree(family->attrbuf);
389errout_locked: 390errout_locked:
390 genl_unlock_all(); 391 genl_unlock_all();
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ae296273ce3d..17dcd0b5eb32 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -726,6 +726,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
726 llcp_sock->service_name = kmemdup(addr->service_name, 726 llcp_sock->service_name = kmemdup(addr->service_name,
727 llcp_sock->service_name_len, 727 llcp_sock->service_name_len,
728 GFP_KERNEL); 728 GFP_KERNEL);
729 if (!llcp_sock->service_name) {
730 ret = -ENOMEM;
731 goto sock_llcp_release;
732 }
729 733
730 nfc_llcp_sock_link(&local->connecting_sockets, sk); 734 nfc_llcp_sock_link(&local->connecting_sockets, sk);
731 735
@@ -745,10 +749,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
745 return ret; 749 return ret;
746 750
747sock_unlink: 751sock_unlink:
748 nfc_llcp_put_ssap(local, llcp_sock->ssap);
749
750 nfc_llcp_sock_unlink(&local->connecting_sockets, sk); 752 nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
751 753
754sock_llcp_release:
755 nfc_llcp_put_ssap(local, llcp_sock->ssap);
756
752put_dev: 757put_dev:
753 nfc_put_device(dev); 758 nfc_put_device(dev);
754 759
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6679e96ab1dc..9dd158ab51b3 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -448,6 +448,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
448 448
449 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 449 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
450 0, upcall_info->cmd); 450 0, upcall_info->cmd);
451 if (!upcall) {
452 err = -EINVAL;
453 goto out;
454 }
451 upcall->dp_ifindex = dp_ifindex; 455 upcall->dp_ifindex = dp_ifindex;
452 456
453 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb); 457 err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
@@ -460,6 +464,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
460 464
461 if (upcall_info->egress_tun_info) { 465 if (upcall_info->egress_tun_info) {
462 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY); 466 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
467 if (!nla) {
468 err = -EMSGSIZE;
469 goto out;
470 }
463 err = ovs_nla_put_tunnel_info(user_skb, 471 err = ovs_nla_put_tunnel_info(user_skb,
464 upcall_info->egress_tun_info); 472 upcall_info->egress_tun_info);
465 BUG_ON(err); 473 BUG_ON(err);
@@ -468,6 +476,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
468 476
469 if (upcall_info->actions_len) { 477 if (upcall_info->actions_len) {
470 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS); 478 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
479 if (!nla) {
480 err = -EMSGSIZE;
481 goto out;
482 }
471 err = ovs_nla_put_actions(upcall_info->actions, 483 err = ovs_nla_put_actions(upcall_info->actions,
472 upcall_info->actions_len, 484 upcall_info->actions_len,
473 user_skb); 485 user_skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8376bc1c1508..9419c5cf4de5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1852,7 +1852,8 @@ oom:
1852 1852
1853static void packet_parse_headers(struct sk_buff *skb, struct socket *sock) 1853static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1854{ 1854{
1855 if (!skb->protocol && sock->type == SOCK_RAW) { 1855 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1856 sock->type == SOCK_RAW) {
1856 skb_reset_mac_header(skb); 1857 skb_reset_mac_header(skb);
1857 skb->protocol = dev_parse_header_protocol(skb); 1858 skb->protocol = dev_parse_header_protocol(skb);
1858 } 1859 }
@@ -3243,7 +3244,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3243 } 3244 }
3244 3245
3245 mutex_lock(&net->packet.sklist_lock); 3246 mutex_lock(&net->packet.sklist_lock);
3246 sk_add_node_rcu(sk, &net->packet.sklist); 3247 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3247 mutex_unlock(&net->packet.sklist_lock); 3248 mutex_unlock(&net->packet.sklist_lock);
3248 3249
3249 preempt_disable(); 3250 preempt_disable();
@@ -4209,7 +4210,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4209 struct pgv *pg_vec; 4210 struct pgv *pg_vec;
4210 int i; 4211 int i;
4211 4212
4212 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL); 4213 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4213 if (unlikely(!pg_vec)) 4214 if (unlikely(!pg_vec))
4214 goto out; 4215 goto out;
4215 4216
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7ca57741b2fb..7849f286bb93 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
105 struct sk_buff *skb; 105 struct sk_buff *skb;
106 unsigned char *dptr; 106 unsigned char *dptr;
107 unsigned char lci1, lci2; 107 unsigned char lci1, lci2;
108 char buffer[100]; 108 int maxfaclen = 0;
109 int len, faclen = 0; 109 int len, faclen;
110 int reserve;
110 111
111 len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1; 112 reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
113 len = ROSE_MIN_LEN;
112 114
113 switch (frametype) { 115 switch (frametype) {
114 case ROSE_CALL_REQUEST: 116 case ROSE_CALL_REQUEST:
115 len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN; 117 len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
116 faclen = rose_create_facilities(buffer, rose); 118 maxfaclen = 256;
117 len += faclen;
118 break; 119 break;
119 case ROSE_CALL_ACCEPTED: 120 case ROSE_CALL_ACCEPTED:
120 case ROSE_CLEAR_REQUEST: 121 case ROSE_CLEAR_REQUEST:
@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
123 break; 124 break;
124 } 125 }
125 126
126 if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) 127 skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
128 if (!skb)
127 return; 129 return;
128 130
129 /* 131 /*
130 * Space for AX.25 header and PID. 132 * Space for AX.25 header and PID.
131 */ 133 */
132 skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1); 134 skb_reserve(skb, reserve);
133 135
134 dptr = skb_put(skb, skb_tailroom(skb)); 136 dptr = skb_put(skb, len);
135 137
136 lci1 = (rose->lci >> 8) & 0x0F; 138 lci1 = (rose->lci >> 8) & 0x0F;
137 lci2 = (rose->lci >> 0) & 0xFF; 139 lci2 = (rose->lci >> 0) & 0xFF;
@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
146 dptr += ROSE_ADDR_LEN; 148 dptr += ROSE_ADDR_LEN;
147 memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); 149 memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
148 dptr += ROSE_ADDR_LEN; 150 dptr += ROSE_ADDR_LEN;
149 memcpy(dptr, buffer, faclen); 151 faclen = rose_create_facilities(dptr, rose);
152 skb_put(skb, faclen);
150 dptr += faclen; 153 dptr += faclen;
151 break; 154 break;
152 155
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 736aa9281100..004c762c2e8d 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -335,7 +335,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
335 struct kvec iov[2]; 335 struct kvec iov[2];
336 rxrpc_serial_t serial; 336 rxrpc_serial_t serial;
337 size_t len; 337 size_t len;
338 bool lost = false;
339 int ret, opt; 338 int ret, opt;
340 339
341 _enter(",{%d}", skb->len); 340 _enter(",{%d}", skb->len);
@@ -393,14 +392,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
393 static int lose; 392 static int lose;
394 if ((lose++ & 7) == 7) { 393 if ((lose++ & 7) == 7) {
395 ret = 0; 394 ret = 0;
396 lost = true; 395 trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
396 whdr.flags, retrans, true);
397 goto done;
397 } 398 }
398 } 399 }
399 400
400 trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, 401 trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
401 retrans, lost); 402 false);
402 if (lost)
403 goto done;
404 403
405 /* send the packet with the don't fragment bit set if we currently 404 /* send the packet with the don't fragment bit set if we currently
406 * think it's small enough */ 405 * think it's small enough */
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 1b9afdee5ba9..5c02ad97ef23 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -358,8 +358,7 @@ config NET_SCH_PIE
358 help 358 help
359 Say Y here if you want to use the Proportional Integral controller 359 Say Y here if you want to use the Proportional Integral controller
360 Enhanced scheduler packet scheduling algorithm. 360 Enhanced scheduler packet scheduling algorithm.
361 For more information, please see 361 For more information, please see https://tools.ietf.org/html/rfc8033
362 http://tools.ietf.org/html/draft-pan-tsvwg-pie-00
363 362
364 To compile this driver as a module, choose M here: the module 363 To compile this driver as a module, choose M here: the module
365 will be called sch_pie. 364 will be called sch_pie.
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index aecf1bf233c8..5a87e271d35a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -28,27 +28,10 @@
28#include <net/act_api.h> 28#include <net/act_api.h>
29#include <net/netlink.h> 29#include <net/netlink.h>
30 30
31static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
32{
33 u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
34
35 if (!tp)
36 return -EINVAL;
37 a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index);
38 if (!a->goto_chain)
39 return -ENOMEM;
40 return 0;
41}
42
43static void tcf_action_goto_chain_fini(struct tc_action *a)
44{
45 tcf_chain_put_by_act(a->goto_chain);
46}
47
48static void tcf_action_goto_chain_exec(const struct tc_action *a, 31static void tcf_action_goto_chain_exec(const struct tc_action *a,
49 struct tcf_result *res) 32 struct tcf_result *res)
50{ 33{
51 const struct tcf_chain *chain = a->goto_chain; 34 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
52 35
53 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 36 res->goto_tp = rcu_dereference_bh(chain->filter_chain);
54} 37}
@@ -71,6 +54,51 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
71 call_rcu(&old->rcu, tcf_free_cookie_rcu); 54 call_rcu(&old->rcu, tcf_free_cookie_rcu);
72} 55}
73 56
57int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
58 struct tcf_chain **newchain,
59 struct netlink_ext_ack *extack)
60{
61 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
62 u32 chain_index;
63
64 if (!opcode)
65 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
66 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
67 ret = 0;
68 if (ret) {
69 NL_SET_ERR_MSG(extack, "invalid control action");
70 goto end;
71 }
72
73 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
74 chain_index = action & TC_ACT_EXT_VAL_MASK;
75 if (!tp || !newchain) {
76 ret = -EINVAL;
77 NL_SET_ERR_MSG(extack,
78 "can't goto NULL proto/chain");
79 goto end;
80 }
81 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
82 if (!*newchain) {
83 ret = -ENOMEM;
84 NL_SET_ERR_MSG(extack,
85 "can't allocate goto_chain");
86 }
87 }
88end:
89 return ret;
90}
91EXPORT_SYMBOL(tcf_action_check_ctrlact);
92
93struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
94 struct tcf_chain *goto_chain)
95{
96 a->tcfa_action = action;
97 rcu_swap_protected(a->goto_chain, goto_chain, 1);
98 return goto_chain;
99}
100EXPORT_SYMBOL(tcf_action_set_ctrlact);
101
74/* XXX: For standalone actions, we don't need a RCU grace period either, because 102/* XXX: For standalone actions, we don't need a RCU grace period either, because
75 * actions are always connected to filters and filters are already destroyed in 103 * actions are always connected to filters and filters are already destroyed in
76 * RCU callbacks, so after a RCU grace period actions are already disconnected 104 * RCU callbacks, so after a RCU grace period actions are already disconnected
@@ -78,13 +106,15 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
78 */ 106 */
79static void free_tcf(struct tc_action *p) 107static void free_tcf(struct tc_action *p)
80{ 108{
109 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
110
81 free_percpu(p->cpu_bstats); 111 free_percpu(p->cpu_bstats);
82 free_percpu(p->cpu_bstats_hw); 112 free_percpu(p->cpu_bstats_hw);
83 free_percpu(p->cpu_qstats); 113 free_percpu(p->cpu_qstats);
84 114
85 tcf_set_action_cookie(&p->act_cookie, NULL); 115 tcf_set_action_cookie(&p->act_cookie, NULL);
86 if (p->goto_chain) 116 if (chain)
87 tcf_action_goto_chain_fini(p); 117 tcf_chain_put_by_act(chain);
88 118
89 kfree(p); 119 kfree(p);
90} 120}
@@ -654,6 +684,10 @@ repeat:
654 return TC_ACT_OK; 684 return TC_ACT_OK;
655 } 685 }
656 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 686 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
687 if (unlikely(!rcu_access_pointer(a->goto_chain))) {
688 net_warn_ratelimited("can't go to NULL chain!\n");
689 return TC_ACT_SHOT;
690 }
657 tcf_action_goto_chain_exec(a, res); 691 tcf_action_goto_chain_exec(a, res);
658 } 692 }
659 693
@@ -800,15 +834,6 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
800 return c; 834 return c;
801} 835}
802 836
803static bool tcf_action_valid(int action)
804{
805 int opcode = TC_ACT_EXT_OPCODE(action);
806
807 if (!opcode)
808 return action <= TC_ACT_VALUE_MAX;
809 return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC;
810}
811
812struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 837struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
813 struct nlattr *nla, struct nlattr *est, 838 struct nlattr *nla, struct nlattr *est,
814 char *name, int ovr, int bind, 839 char *name, int ovr, int bind,
@@ -890,10 +915,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
890 /* backward compatibility for policer */ 915 /* backward compatibility for policer */
891 if (name == NULL) 916 if (name == NULL)
892 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, 917 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
893 rtnl_held, extack); 918 rtnl_held, tp, extack);
894 else 919 else
895 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, 920 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
896 extack); 921 tp, extack);
897 if (err < 0) 922 if (err < 0)
898 goto err_mod; 923 goto err_mod;
899 924
@@ -907,18 +932,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
907 if (err != ACT_P_CREATED) 932 if (err != ACT_P_CREATED)
908 module_put(a_o->owner); 933 module_put(a_o->owner);
909 934
910 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { 935 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
911 err = tcf_action_goto_chain_init(a, tp); 936 !rcu_access_pointer(a->goto_chain)) {
912 if (err) {
913 tcf_action_destroy_1(a, bind);
914 NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
915 return ERR_PTR(err);
916 }
917 }
918
919 if (!tcf_action_valid(a->tcfa_action)) {
920 tcf_action_destroy_1(a, bind); 937 tcf_action_destroy_1(a, bind);
921 NL_SET_ERR_MSG(extack, "Invalid control action value"); 938 NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
922 return ERR_PTR(-EINVAL); 939 return ERR_PTR(-EINVAL);
923 } 940 }
924 941
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index aa5c38d11a30..3841156aa09f 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -17,6 +17,7 @@
17 17
18#include <net/netlink.h> 18#include <net/netlink.h>
19#include <net/pkt_sched.h> 19#include <net/pkt_sched.h>
20#include <net/pkt_cls.h>
20 21
21#include <linux/tc_act/tc_bpf.h> 22#include <linux/tc_act/tc_bpf.h>
22#include <net/tc_act/tc_bpf.h> 23#include <net/tc_act/tc_bpf.h>
@@ -278,10 +279,11 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
278static int tcf_bpf_init(struct net *net, struct nlattr *nla, 279static int tcf_bpf_init(struct net *net, struct nlattr *nla,
279 struct nlattr *est, struct tc_action **act, 280 struct nlattr *est, struct tc_action **act,
280 int replace, int bind, bool rtnl_held, 281 int replace, int bind, bool rtnl_held,
281 struct netlink_ext_ack *extack) 282 struct tcf_proto *tp, struct netlink_ext_ack *extack)
282{ 283{
283 struct tc_action_net *tn = net_generic(net, bpf_net_id); 284 struct tc_action_net *tn = net_generic(net, bpf_net_id);
284 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 285 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
286 struct tcf_chain *goto_ch = NULL;
285 struct tcf_bpf_cfg cfg, old; 287 struct tcf_bpf_cfg cfg, old;
286 struct tc_act_bpf *parm; 288 struct tc_act_bpf *parm;
287 struct tcf_bpf *prog; 289 struct tcf_bpf *prog;
@@ -323,12 +325,16 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
323 return ret; 325 return ret;
324 } 326 }
325 327
328 ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
329 if (ret < 0)
330 goto release_idr;
331
326 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS]; 332 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
327 is_ebpf = tb[TCA_ACT_BPF_FD]; 333 is_ebpf = tb[TCA_ACT_BPF_FD];
328 334
329 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { 335 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
330 ret = -EINVAL; 336 ret = -EINVAL;
331 goto out; 337 goto put_chain;
332 } 338 }
333 339
334 memset(&cfg, 0, sizeof(cfg)); 340 memset(&cfg, 0, sizeof(cfg));
@@ -336,7 +342,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
336 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) : 342 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
337 tcf_bpf_init_from_efd(tb, &cfg); 343 tcf_bpf_init_from_efd(tb, &cfg);
338 if (ret < 0) 344 if (ret < 0)
339 goto out; 345 goto put_chain;
340 346
341 prog = to_bpf(*act); 347 prog = to_bpf(*act);
342 348
@@ -350,10 +356,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
350 if (cfg.bpf_num_ops) 356 if (cfg.bpf_num_ops)
351 prog->bpf_num_ops = cfg.bpf_num_ops; 357 prog->bpf_num_ops = cfg.bpf_num_ops;
352 358
353 prog->tcf_action = parm->action; 359 goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
354 rcu_assign_pointer(prog->filter, cfg.filter); 360 rcu_assign_pointer(prog->filter, cfg.filter);
355 spin_unlock_bh(&prog->tcf_lock); 361 spin_unlock_bh(&prog->tcf_lock);
356 362
363 if (goto_ch)
364 tcf_chain_put_by_act(goto_ch);
365
357 if (res == ACT_P_CREATED) { 366 if (res == ACT_P_CREATED) {
358 tcf_idr_insert(tn, *act); 367 tcf_idr_insert(tn, *act);
359 } else { 368 } else {
@@ -363,9 +372,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
363 } 372 }
364 373
365 return res; 374 return res;
366out:
367 tcf_idr_release(*act, bind);
368 375
376put_chain:
377 if (goto_ch)
378 tcf_chain_put_by_act(goto_ch);
379
380release_idr:
381 tcf_idr_release(*act, bind);
369 return ret; 382 return ret;
370} 383}
371 384
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 5d24993cccfe..32ae0cd6e31c 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -21,6 +21,7 @@
21#include <net/netlink.h> 21#include <net/netlink.h>
22#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
23#include <net/act_api.h> 23#include <net/act_api.h>
24#include <net/pkt_cls.h>
24#include <uapi/linux/tc_act/tc_connmark.h> 25#include <uapi/linux/tc_act/tc_connmark.h>
25#include <net/tc_act/tc_connmark.h> 26#include <net/tc_act/tc_connmark.h>
26 27
@@ -97,13 +98,15 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
97static int tcf_connmark_init(struct net *net, struct nlattr *nla, 98static int tcf_connmark_init(struct net *net, struct nlattr *nla,
98 struct nlattr *est, struct tc_action **a, 99 struct nlattr *est, struct tc_action **a,
99 int ovr, int bind, bool rtnl_held, 100 int ovr, int bind, bool rtnl_held,
101 struct tcf_proto *tp,
100 struct netlink_ext_ack *extack) 102 struct netlink_ext_ack *extack)
101{ 103{
102 struct tc_action_net *tn = net_generic(net, connmark_net_id); 104 struct tc_action_net *tn = net_generic(net, connmark_net_id);
103 struct nlattr *tb[TCA_CONNMARK_MAX + 1]; 105 struct nlattr *tb[TCA_CONNMARK_MAX + 1];
106 struct tcf_chain *goto_ch = NULL;
104 struct tcf_connmark_info *ci; 107 struct tcf_connmark_info *ci;
105 struct tc_connmark *parm; 108 struct tc_connmark *parm;
106 int ret = 0; 109 int ret = 0, err;
107 110
108 if (!nla) 111 if (!nla)
109 return -EINVAL; 112 return -EINVAL;
@@ -128,7 +131,11 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
128 } 131 }
129 132
130 ci = to_connmark(*a); 133 ci = to_connmark(*a);
131 ci->tcf_action = parm->action; 134 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
135 extack);
136 if (err < 0)
137 goto release_idr;
138 tcf_action_set_ctrlact(*a, parm->action, goto_ch);
132 ci->net = net; 139 ci->net = net;
133 ci->zone = parm->zone; 140 ci->zone = parm->zone;
134 141
@@ -142,15 +149,24 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
142 tcf_idr_release(*a, bind); 149 tcf_idr_release(*a, bind);
143 return -EEXIST; 150 return -EEXIST;
144 } 151 }
152 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
153 extack);
154 if (err < 0)
155 goto release_idr;
145 /* replacing action and zone */ 156 /* replacing action and zone */
146 spin_lock_bh(&ci->tcf_lock); 157 spin_lock_bh(&ci->tcf_lock);
147 ci->tcf_action = parm->action; 158 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
148 ci->zone = parm->zone; 159 ci->zone = parm->zone;
149 spin_unlock_bh(&ci->tcf_lock); 160 spin_unlock_bh(&ci->tcf_lock);
161 if (goto_ch)
162 tcf_chain_put_by_act(goto_ch);
150 ret = 0; 163 ret = 0;
151 } 164 }
152 165
153 return ret; 166 return ret;
167release_idr:
168 tcf_idr_release(*a, bind);
169 return err;
154} 170}
155 171
156static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a, 172static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index c79aca29505e..0c77e7bdf6d5 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -33,6 +33,7 @@
33#include <net/sctp/checksum.h> 33#include <net/sctp/checksum.h>
34 34
35#include <net/act_api.h> 35#include <net/act_api.h>
36#include <net/pkt_cls.h>
36 37
37#include <linux/tc_act/tc_csum.h> 38#include <linux/tc_act/tc_csum.h>
38#include <net/tc_act/tc_csum.h> 39#include <net/tc_act/tc_csum.h>
@@ -46,12 +47,13 @@ static struct tc_action_ops act_csum_ops;
46 47
47static int tcf_csum_init(struct net *net, struct nlattr *nla, 48static int tcf_csum_init(struct net *net, struct nlattr *nla,
48 struct nlattr *est, struct tc_action **a, int ovr, 49 struct nlattr *est, struct tc_action **a, int ovr,
49 int bind, bool rtnl_held, 50 int bind, bool rtnl_held, struct tcf_proto *tp,
50 struct netlink_ext_ack *extack) 51 struct netlink_ext_ack *extack)
51{ 52{
52 struct tc_action_net *tn = net_generic(net, csum_net_id); 53 struct tc_action_net *tn = net_generic(net, csum_net_id);
53 struct tcf_csum_params *params_new; 54 struct tcf_csum_params *params_new;
54 struct nlattr *tb[TCA_CSUM_MAX + 1]; 55 struct nlattr *tb[TCA_CSUM_MAX + 1];
56 struct tcf_chain *goto_ch = NULL;
55 struct tc_csum *parm; 57 struct tc_csum *parm;
56 struct tcf_csum *p; 58 struct tcf_csum *p;
57 int ret = 0, err; 59 int ret = 0, err;
@@ -87,21 +89,27 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
87 return err; 89 return err;
88 } 90 }
89 91
92 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
93 if (err < 0)
94 goto release_idr;
95
90 p = to_tcf_csum(*a); 96 p = to_tcf_csum(*a);
91 97
92 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 98 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
93 if (unlikely(!params_new)) { 99 if (unlikely(!params_new)) {
94 tcf_idr_release(*a, bind); 100 err = -ENOMEM;
95 return -ENOMEM; 101 goto put_chain;
96 } 102 }
97 params_new->update_flags = parm->update_flags; 103 params_new->update_flags = parm->update_flags;
98 104
99 spin_lock_bh(&p->tcf_lock); 105 spin_lock_bh(&p->tcf_lock);
100 p->tcf_action = parm->action; 106 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
101 rcu_swap_protected(p->params, params_new, 107 rcu_swap_protected(p->params, params_new,
102 lockdep_is_held(&p->tcf_lock)); 108 lockdep_is_held(&p->tcf_lock));
103 spin_unlock_bh(&p->tcf_lock); 109 spin_unlock_bh(&p->tcf_lock);
104 110
111 if (goto_ch)
112 tcf_chain_put_by_act(goto_ch);
105 if (params_new) 113 if (params_new)
106 kfree_rcu(params_new, rcu); 114 kfree_rcu(params_new, rcu);
107 115
@@ -109,6 +117,12 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
109 tcf_idr_insert(tn, *a); 117 tcf_idr_insert(tn, *a);
110 118
111 return ret; 119 return ret;
120put_chain:
121 if (goto_ch)
122 tcf_chain_put_by_act(goto_ch);
123release_idr:
124 tcf_idr_release(*a, bind);
125 return err;
112} 126}
113 127
114/** 128/**
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 93da0004e9f4..e540e31069d7 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <net/netlink.h> 21#include <net/netlink.h>
22#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
23#include <net/pkt_cls.h>
23#include <linux/tc_act/tc_gact.h> 24#include <linux/tc_act/tc_gact.h>
24#include <net/tc_act/tc_gact.h> 25#include <net/tc_act/tc_gact.h>
25 26
@@ -57,10 +58,11 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
57static int tcf_gact_init(struct net *net, struct nlattr *nla, 58static int tcf_gact_init(struct net *net, struct nlattr *nla,
58 struct nlattr *est, struct tc_action **a, 59 struct nlattr *est, struct tc_action **a,
59 int ovr, int bind, bool rtnl_held, 60 int ovr, int bind, bool rtnl_held,
60 struct netlink_ext_ack *extack) 61 struct tcf_proto *tp, struct netlink_ext_ack *extack)
61{ 62{
62 struct tc_action_net *tn = net_generic(net, gact_net_id); 63 struct tc_action_net *tn = net_generic(net, gact_net_id);
63 struct nlattr *tb[TCA_GACT_MAX + 1]; 64 struct nlattr *tb[TCA_GACT_MAX + 1];
65 struct tcf_chain *goto_ch = NULL;
64 struct tc_gact *parm; 66 struct tc_gact *parm;
65 struct tcf_gact *gact; 67 struct tcf_gact *gact;
66 int ret = 0; 68 int ret = 0;
@@ -116,10 +118,13 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
116 return err; 118 return err;
117 } 119 }
118 120
121 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
122 if (err < 0)
123 goto release_idr;
119 gact = to_gact(*a); 124 gact = to_gact(*a);
120 125
121 spin_lock_bh(&gact->tcf_lock); 126 spin_lock_bh(&gact->tcf_lock);
122 gact->tcf_action = parm->action; 127 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
123#ifdef CONFIG_GACT_PROB 128#ifdef CONFIG_GACT_PROB
124 if (p_parm) { 129 if (p_parm) {
125 gact->tcfg_paction = p_parm->paction; 130 gact->tcfg_paction = p_parm->paction;
@@ -133,9 +138,15 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
133#endif 138#endif
134 spin_unlock_bh(&gact->tcf_lock); 139 spin_unlock_bh(&gact->tcf_lock);
135 140
141 if (goto_ch)
142 tcf_chain_put_by_act(goto_ch);
143
136 if (ret == ACT_P_CREATED) 144 if (ret == ACT_P_CREATED)
137 tcf_idr_insert(tn, *a); 145 tcf_idr_insert(tn, *a);
138 return ret; 146 return ret;
147release_idr:
148 tcf_idr_release(*a, bind);
149 return err;
139} 150}
140 151
141static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, 152static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 9b1f2b3990ee..31c6ffb6abe7 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -29,6 +29,7 @@
29#include <net/net_namespace.h> 29#include <net/net_namespace.h>
30#include <net/netlink.h> 30#include <net/netlink.h>
31#include <net/pkt_sched.h> 31#include <net/pkt_sched.h>
32#include <net/pkt_cls.h>
32#include <uapi/linux/tc_act/tc_ife.h> 33#include <uapi/linux/tc_act/tc_ife.h>
33#include <net/tc_act/tc_ife.h> 34#include <net/tc_act/tc_ife.h>
34#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
@@ -469,11 +470,12 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
469static int tcf_ife_init(struct net *net, struct nlattr *nla, 470static int tcf_ife_init(struct net *net, struct nlattr *nla,
470 struct nlattr *est, struct tc_action **a, 471 struct nlattr *est, struct tc_action **a,
471 int ovr, int bind, bool rtnl_held, 472 int ovr, int bind, bool rtnl_held,
472 struct netlink_ext_ack *extack) 473 struct tcf_proto *tp, struct netlink_ext_ack *extack)
473{ 474{
474 struct tc_action_net *tn = net_generic(net, ife_net_id); 475 struct tc_action_net *tn = net_generic(net, ife_net_id);
475 struct nlattr *tb[TCA_IFE_MAX + 1]; 476 struct nlattr *tb[TCA_IFE_MAX + 1];
476 struct nlattr *tb2[IFE_META_MAX + 1]; 477 struct nlattr *tb2[IFE_META_MAX + 1];
478 struct tcf_chain *goto_ch = NULL;
477 struct tcf_ife_params *p; 479 struct tcf_ife_params *p;
478 struct tcf_ife_info *ife; 480 struct tcf_ife_info *ife;
479 u16 ife_type = ETH_P_IFE; 481 u16 ife_type = ETH_P_IFE;
@@ -531,6 +533,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
531 } 533 }
532 534
533 ife = to_ife(*a); 535 ife = to_ife(*a);
536 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
537 if (err < 0)
538 goto release_idr;
539
534 p->flags = parm->flags; 540 p->flags = parm->flags;
535 541
536 if (parm->flags & IFE_ENCODE) { 542 if (parm->flags & IFE_ENCODE) {
@@ -563,13 +569,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
563 if (tb[TCA_IFE_METALST]) { 569 if (tb[TCA_IFE_METALST]) {
564 err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], 570 err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
565 NULL, NULL); 571 NULL, NULL);
566 if (err) { 572 if (err)
567metadata_parse_err: 573 goto metadata_parse_err;
568 tcf_idr_release(*a, bind);
569 kfree(p);
570 return err;
571 }
572
573 err = populate_metalist(ife, tb2, exists, rtnl_held); 574 err = populate_metalist(ife, tb2, exists, rtnl_held);
574 if (err) 575 if (err)
575 goto metadata_parse_err; 576 goto metadata_parse_err;
@@ -581,21 +582,20 @@ metadata_parse_err:
581 * going to bail out 582 * going to bail out
582 */ 583 */
583 err = use_all_metadata(ife, exists); 584 err = use_all_metadata(ife, exists);
584 if (err) { 585 if (err)
585 tcf_idr_release(*a, bind); 586 goto metadata_parse_err;
586 kfree(p);
587 return err;
588 }
589 } 587 }
590 588
591 if (exists) 589 if (exists)
592 spin_lock_bh(&ife->tcf_lock); 590 spin_lock_bh(&ife->tcf_lock);
593 ife->tcf_action = parm->action;
594 /* protected by tcf_lock when modifying existing action */ 591 /* protected by tcf_lock when modifying existing action */
592 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
595 rcu_swap_protected(ife->params, p, 1); 593 rcu_swap_protected(ife->params, p, 1);
596 594
597 if (exists) 595 if (exists)
598 spin_unlock_bh(&ife->tcf_lock); 596 spin_unlock_bh(&ife->tcf_lock);
597 if (goto_ch)
598 tcf_chain_put_by_act(goto_ch);
599 if (p) 599 if (p)
600 kfree_rcu(p, rcu); 600 kfree_rcu(p, rcu);
601 601
@@ -603,6 +603,13 @@ metadata_parse_err:
603 tcf_idr_insert(tn, *a); 603 tcf_idr_insert(tn, *a);
604 604
605 return ret; 605 return ret;
606metadata_parse_err:
607 if (goto_ch)
608 tcf_chain_put_by_act(goto_ch);
609release_idr:
610 kfree(p);
611 tcf_idr_release(*a, bind);
612 return err;
606} 613}
607 614
608static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, 615static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 98f5b6ea77b4..04a0b5c61194 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -97,7 +97,8 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
97 97
98static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla, 98static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
99 struct nlattr *est, struct tc_action **a, 99 struct nlattr *est, struct tc_action **a,
100 const struct tc_action_ops *ops, int ovr, int bind) 100 const struct tc_action_ops *ops, int ovr, int bind,
101 struct tcf_proto *tp)
101{ 102{
102 struct tc_action_net *tn = net_generic(net, id); 103 struct tc_action_net *tn = net_generic(net, id);
103 struct nlattr *tb[TCA_IPT_MAX + 1]; 104 struct nlattr *tb[TCA_IPT_MAX + 1];
@@ -205,20 +206,20 @@ err1:
205 206
206static int tcf_ipt_init(struct net *net, struct nlattr *nla, 207static int tcf_ipt_init(struct net *net, struct nlattr *nla,
207 struct nlattr *est, struct tc_action **a, int ovr, 208 struct nlattr *est, struct tc_action **a, int ovr,
208 int bind, bool rtnl_held, 209 int bind, bool rtnl_held, struct tcf_proto *tp,
209 struct netlink_ext_ack *extack) 210 struct netlink_ext_ack *extack)
210{ 211{
211 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr, 212 return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
212 bind); 213 bind, tp);
213} 214}
214 215
215static int tcf_xt_init(struct net *net, struct nlattr *nla, 216static int tcf_xt_init(struct net *net, struct nlattr *nla,
216 struct nlattr *est, struct tc_action **a, int ovr, 217 struct nlattr *est, struct tc_action **a, int ovr,
217 int bind, bool unlocked, 218 int bind, bool unlocked, struct tcf_proto *tp,
218 struct netlink_ext_ack *extack) 219 struct netlink_ext_ack *extack)
219{ 220{
220 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr, 221 return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
221 bind); 222 bind, tp);
222} 223}
223 224
224static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a, 225static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 6692fd054617..17cc6bd4c57c 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -94,10 +94,12 @@ static struct tc_action_ops act_mirred_ops;
94static int tcf_mirred_init(struct net *net, struct nlattr *nla, 94static int tcf_mirred_init(struct net *net, struct nlattr *nla,
95 struct nlattr *est, struct tc_action **a, 95 struct nlattr *est, struct tc_action **a,
96 int ovr, int bind, bool rtnl_held, 96 int ovr, int bind, bool rtnl_held,
97 struct tcf_proto *tp,
97 struct netlink_ext_ack *extack) 98 struct netlink_ext_ack *extack)
98{ 99{
99 struct tc_action_net *tn = net_generic(net, mirred_net_id); 100 struct tc_action_net *tn = net_generic(net, mirred_net_id);
100 struct nlattr *tb[TCA_MIRRED_MAX + 1]; 101 struct nlattr *tb[TCA_MIRRED_MAX + 1];
102 struct tcf_chain *goto_ch = NULL;
101 bool mac_header_xmit = false; 103 bool mac_header_xmit = false;
102 struct tc_mirred *parm; 104 struct tc_mirred *parm;
103 struct tcf_mirred *m; 105 struct tcf_mirred *m;
@@ -157,18 +159,23 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
157 tcf_idr_release(*a, bind); 159 tcf_idr_release(*a, bind);
158 return -EEXIST; 160 return -EEXIST;
159 } 161 }
162
160 m = to_mirred(*a); 163 m = to_mirred(*a);
164 if (ret == ACT_P_CREATED)
165 INIT_LIST_HEAD(&m->tcfm_list);
166
167 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
168 if (err < 0)
169 goto release_idr;
161 170
162 spin_lock_bh(&m->tcf_lock); 171 spin_lock_bh(&m->tcf_lock);
163 m->tcf_action = parm->action;
164 m->tcfm_eaction = parm->eaction;
165 172
166 if (parm->ifindex) { 173 if (parm->ifindex) {
167 dev = dev_get_by_index(net, parm->ifindex); 174 dev = dev_get_by_index(net, parm->ifindex);
168 if (!dev) { 175 if (!dev) {
169 spin_unlock_bh(&m->tcf_lock); 176 spin_unlock_bh(&m->tcf_lock);
170 tcf_idr_release(*a, bind); 177 err = -ENODEV;
171 return -ENODEV; 178 goto put_chain;
172 } 179 }
173 mac_header_xmit = dev_is_mac_header_xmit(dev); 180 mac_header_xmit = dev_is_mac_header_xmit(dev);
174 rcu_swap_protected(m->tcfm_dev, dev, 181 rcu_swap_protected(m->tcfm_dev, dev,
@@ -177,7 +184,11 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
177 dev_put(dev); 184 dev_put(dev);
178 m->tcfm_mac_header_xmit = mac_header_xmit; 185 m->tcfm_mac_header_xmit = mac_header_xmit;
179 } 186 }
187 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
188 m->tcfm_eaction = parm->eaction;
180 spin_unlock_bh(&m->tcf_lock); 189 spin_unlock_bh(&m->tcf_lock);
190 if (goto_ch)
191 tcf_chain_put_by_act(goto_ch);
181 192
182 if (ret == ACT_P_CREATED) { 193 if (ret == ACT_P_CREATED) {
183 spin_lock(&mirred_list_lock); 194 spin_lock(&mirred_list_lock);
@@ -188,6 +199,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
188 } 199 }
189 200
190 return ret; 201 return ret;
202put_chain:
203 if (goto_ch)
204 tcf_chain_put_by_act(goto_ch);
205release_idr:
206 tcf_idr_release(*a, bind);
207 return err;
191} 208}
192 209
193static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, 210static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 543eab9193f1..e91bb8eb81ec 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -21,6 +21,7 @@
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/tc_act/tc_nat.h> 22#include <linux/tc_act/tc_nat.h>
23#include <net/act_api.h> 23#include <net/act_api.h>
24#include <net/pkt_cls.h>
24#include <net/icmp.h> 25#include <net/icmp.h>
25#include <net/ip.h> 26#include <net/ip.h>
26#include <net/netlink.h> 27#include <net/netlink.h>
@@ -38,10 +39,12 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
38 39
39static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est, 40static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
40 struct tc_action **a, int ovr, int bind, 41 struct tc_action **a, int ovr, int bind,
41 bool rtnl_held, struct netlink_ext_ack *extack) 42 bool rtnl_held, struct tcf_proto *tp,
43 struct netlink_ext_ack *extack)
42{ 44{
43 struct tc_action_net *tn = net_generic(net, nat_net_id); 45 struct tc_action_net *tn = net_generic(net, nat_net_id);
44 struct nlattr *tb[TCA_NAT_MAX + 1]; 46 struct nlattr *tb[TCA_NAT_MAX + 1];
47 struct tcf_chain *goto_ch = NULL;
45 struct tc_nat *parm; 48 struct tc_nat *parm;
46 int ret = 0, err; 49 int ret = 0, err;
47 struct tcf_nat *p; 50 struct tcf_nat *p;
@@ -76,6 +79,9 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
76 } else { 79 } else {
77 return err; 80 return err;
78 } 81 }
82 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
83 if (err < 0)
84 goto release_idr;
79 p = to_tcf_nat(*a); 85 p = to_tcf_nat(*a);
80 86
81 spin_lock_bh(&p->tcf_lock); 87 spin_lock_bh(&p->tcf_lock);
@@ -84,13 +90,18 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
84 p->mask = parm->mask; 90 p->mask = parm->mask;
85 p->flags = parm->flags; 91 p->flags = parm->flags;
86 92
87 p->tcf_action = parm->action; 93 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
88 spin_unlock_bh(&p->tcf_lock); 94 spin_unlock_bh(&p->tcf_lock);
95 if (goto_ch)
96 tcf_chain_put_by_act(goto_ch);
89 97
90 if (ret == ACT_P_CREATED) 98 if (ret == ACT_P_CREATED)
91 tcf_idr_insert(tn, *a); 99 tcf_idr_insert(tn, *a);
92 100
93 return ret; 101 return ret;
102release_idr:
103 tcf_idr_release(*a, bind);
104 return err;
94} 105}
95 106
96static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a, 107static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a80373878df7..287793abfaf9 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -23,6 +23,7 @@
23#include <linux/tc_act/tc_pedit.h> 23#include <linux/tc_act/tc_pedit.h>
24#include <net/tc_act/tc_pedit.h> 24#include <net/tc_act/tc_pedit.h>
25#include <uapi/linux/tc_act/tc_pedit.h> 25#include <uapi/linux/tc_act/tc_pedit.h>
26#include <net/pkt_cls.h>
26 27
27static unsigned int pedit_net_id; 28static unsigned int pedit_net_id;
28static struct tc_action_ops act_pedit_ops; 29static struct tc_action_ops act_pedit_ops;
@@ -138,10 +139,11 @@ nla_failure:
138static int tcf_pedit_init(struct net *net, struct nlattr *nla, 139static int tcf_pedit_init(struct net *net, struct nlattr *nla,
139 struct nlattr *est, struct tc_action **a, 140 struct nlattr *est, struct tc_action **a,
140 int ovr, int bind, bool rtnl_held, 141 int ovr, int bind, bool rtnl_held,
141 struct netlink_ext_ack *extack) 142 struct tcf_proto *tp, struct netlink_ext_ack *extack)
142{ 143{
143 struct tc_action_net *tn = net_generic(net, pedit_net_id); 144 struct tc_action_net *tn = net_generic(net, pedit_net_id);
144 struct nlattr *tb[TCA_PEDIT_MAX + 1]; 145 struct nlattr *tb[TCA_PEDIT_MAX + 1];
146 struct tcf_chain *goto_ch = NULL;
145 struct tc_pedit_key *keys = NULL; 147 struct tc_pedit_key *keys = NULL;
146 struct tcf_pedit_key_ex *keys_ex; 148 struct tcf_pedit_key_ex *keys_ex;
147 struct tc_pedit *parm; 149 struct tc_pedit *parm;
@@ -205,6 +207,11 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
205 goto out_free; 207 goto out_free;
206 } 208 }
207 209
210 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
211 if (err < 0) {
212 ret = err;
213 goto out_release;
214 }
208 p = to_pedit(*a); 215 p = to_pedit(*a);
209 spin_lock_bh(&p->tcf_lock); 216 spin_lock_bh(&p->tcf_lock);
210 217
@@ -214,7 +221,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
214 if (!keys) { 221 if (!keys) {
215 spin_unlock_bh(&p->tcf_lock); 222 spin_unlock_bh(&p->tcf_lock);
216 ret = -ENOMEM; 223 ret = -ENOMEM;
217 goto out_release; 224 goto put_chain;
218 } 225 }
219 kfree(p->tcfp_keys); 226 kfree(p->tcfp_keys);
220 p->tcfp_keys = keys; 227 p->tcfp_keys = keys;
@@ -223,16 +230,21 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
223 memcpy(p->tcfp_keys, parm->keys, ksize); 230 memcpy(p->tcfp_keys, parm->keys, ksize);
224 231
225 p->tcfp_flags = parm->flags; 232 p->tcfp_flags = parm->flags;
226 p->tcf_action = parm->action; 233 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
227 234
228 kfree(p->tcfp_keys_ex); 235 kfree(p->tcfp_keys_ex);
229 p->tcfp_keys_ex = keys_ex; 236 p->tcfp_keys_ex = keys_ex;
230 237
231 spin_unlock_bh(&p->tcf_lock); 238 spin_unlock_bh(&p->tcf_lock);
239 if (goto_ch)
240 tcf_chain_put_by_act(goto_ch);
232 if (ret == ACT_P_CREATED) 241 if (ret == ACT_P_CREATED)
233 tcf_idr_insert(tn, *a); 242 tcf_idr_insert(tn, *a);
234 return ret; 243 return ret;
235 244
245put_chain:
246 if (goto_ch)
247 tcf_chain_put_by_act(goto_ch);
236out_release: 248out_release:
237 tcf_idr_release(*a, bind); 249 tcf_idr_release(*a, bind);
238out_free: 250out_free:
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8271a6263824..2b8581f6ab51 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <net/act_api.h> 22#include <net/act_api.h>
23#include <net/netlink.h> 23#include <net/netlink.h>
24#include <net/pkt_cls.h>
24 25
25struct tcf_police_params { 26struct tcf_police_params {
26 int tcfp_result; 27 int tcfp_result;
@@ -83,10 +84,12 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
83static int tcf_police_init(struct net *net, struct nlattr *nla, 84static int tcf_police_init(struct net *net, struct nlattr *nla,
84 struct nlattr *est, struct tc_action **a, 85 struct nlattr *est, struct tc_action **a,
85 int ovr, int bind, bool rtnl_held, 86 int ovr, int bind, bool rtnl_held,
87 struct tcf_proto *tp,
86 struct netlink_ext_ack *extack) 88 struct netlink_ext_ack *extack)
87{ 89{
88 int ret = 0, tcfp_result = TC_ACT_OK, err, size; 90 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
89 struct nlattr *tb[TCA_POLICE_MAX + 1]; 91 struct nlattr *tb[TCA_POLICE_MAX + 1];
92 struct tcf_chain *goto_ch = NULL;
90 struct tc_police *parm; 93 struct tc_police *parm;
91 struct tcf_police *police; 94 struct tcf_police *police;
92 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 95 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
@@ -128,6 +131,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
128 tcf_idr_release(*a, bind); 131 tcf_idr_release(*a, bind);
129 return -EEXIST; 132 return -EEXIST;
130 } 133 }
134 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
135 if (err < 0)
136 goto release_idr;
131 137
132 police = to_police(*a); 138 police = to_police(*a);
133 if (parm->rate.rate) { 139 if (parm->rate.rate) {
@@ -213,12 +219,14 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
213 if (new->peak_present) 219 if (new->peak_present)
214 police->tcfp_ptoks = new->tcfp_mtu_ptoks; 220 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
215 spin_unlock_bh(&police->tcfp_lock); 221 spin_unlock_bh(&police->tcfp_lock);
216 police->tcf_action = parm->action; 222 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
217 rcu_swap_protected(police->params, 223 rcu_swap_protected(police->params,
218 new, 224 new,
219 lockdep_is_held(&police->tcf_lock)); 225 lockdep_is_held(&police->tcf_lock));
220 spin_unlock_bh(&police->tcf_lock); 226 spin_unlock_bh(&police->tcf_lock);
221 227
228 if (goto_ch)
229 tcf_chain_put_by_act(goto_ch);
222 if (new) 230 if (new)
223 kfree_rcu(new, rcu); 231 kfree_rcu(new, rcu);
224 232
@@ -229,6 +237,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
229failure: 237failure:
230 qdisc_put_rtab(P_tab); 238 qdisc_put_rtab(P_tab);
231 qdisc_put_rtab(R_tab); 239 qdisc_put_rtab(R_tab);
240 if (goto_ch)
241 tcf_chain_put_by_act(goto_ch);
242release_idr:
232 tcf_idr_release(*a, bind); 243 tcf_idr_release(*a, bind);
233 return err; 244 return err;
234} 245}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 203e399e5c85..4060b0955c97 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -22,6 +22,7 @@
22#include <linux/tc_act/tc_sample.h> 22#include <linux/tc_act/tc_sample.h>
23#include <net/tc_act/tc_sample.h> 23#include <net/tc_act/tc_sample.h>
24#include <net/psample.h> 24#include <net/psample.h>
25#include <net/pkt_cls.h>
25 26
26#include <linux/if_arp.h> 27#include <linux/if_arp.h>
27 28
@@ -37,12 +38,13 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
37 38
38static int tcf_sample_init(struct net *net, struct nlattr *nla, 39static int tcf_sample_init(struct net *net, struct nlattr *nla,
39 struct nlattr *est, struct tc_action **a, int ovr, 40 struct nlattr *est, struct tc_action **a, int ovr,
40 int bind, bool rtnl_held, 41 int bind, bool rtnl_held, struct tcf_proto *tp,
41 struct netlink_ext_ack *extack) 42 struct netlink_ext_ack *extack)
42{ 43{
43 struct tc_action_net *tn = net_generic(net, sample_net_id); 44 struct tc_action_net *tn = net_generic(net, sample_net_id);
44 struct nlattr *tb[TCA_SAMPLE_MAX + 1]; 45 struct nlattr *tb[TCA_SAMPLE_MAX + 1];
45 struct psample_group *psample_group; 46 struct psample_group *psample_group;
47 struct tcf_chain *goto_ch = NULL;
46 struct tc_sample *parm; 48 struct tc_sample *parm;
47 u32 psample_group_num; 49 u32 psample_group_num;
48 struct tcf_sample *s; 50 struct tcf_sample *s;
@@ -79,18 +81,21 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
79 tcf_idr_release(*a, bind); 81 tcf_idr_release(*a, bind);
80 return -EEXIST; 82 return -EEXIST;
81 } 83 }
84 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
85 if (err < 0)
86 goto release_idr;
82 87
83 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); 88 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
84 psample_group = psample_group_get(net, psample_group_num); 89 psample_group = psample_group_get(net, psample_group_num);
85 if (!psample_group) { 90 if (!psample_group) {
86 tcf_idr_release(*a, bind); 91 err = -ENOMEM;
87 return -ENOMEM; 92 goto put_chain;
88 } 93 }
89 94
90 s = to_sample(*a); 95 s = to_sample(*a);
91 96
92 spin_lock_bh(&s->tcf_lock); 97 spin_lock_bh(&s->tcf_lock);
93 s->tcf_action = parm->action; 98 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
94 s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); 99 s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
95 s->psample_group_num = psample_group_num; 100 s->psample_group_num = psample_group_num;
96 RCU_INIT_POINTER(s->psample_group, psample_group); 101 RCU_INIT_POINTER(s->psample_group, psample_group);
@@ -100,10 +105,18 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
100 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); 105 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
101 } 106 }
102 spin_unlock_bh(&s->tcf_lock); 107 spin_unlock_bh(&s->tcf_lock);
108 if (goto_ch)
109 tcf_chain_put_by_act(goto_ch);
103 110
104 if (ret == ACT_P_CREATED) 111 if (ret == ACT_P_CREATED)
105 tcf_idr_insert(tn, *a); 112 tcf_idr_insert(tn, *a);
106 return ret; 113 return ret;
114put_chain:
115 if (goto_ch)
116 tcf_chain_put_by_act(goto_ch);
117release_idr:
118 tcf_idr_release(*a, bind);
119 return err;
107} 120}
108 121
109static void tcf_sample_cleanup(struct tc_action *a) 122static void tcf_sample_cleanup(struct tc_action *a)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index d54cb608dbaf..23c8ca5615e5 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -18,6 +18,7 @@
18#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
19#include <net/netlink.h> 19#include <net/netlink.h>
20#include <net/pkt_sched.h> 20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
21 22
22#include <linux/tc_act/tc_defact.h> 23#include <linux/tc_act/tc_defact.h>
23#include <net/tc_act/tc_defact.h> 24#include <net/tc_act/tc_defact.h>
@@ -60,14 +61,26 @@ static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
60 return 0; 61 return 0;
61} 62}
62 63
63static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata, 64static int reset_policy(struct tc_action *a, const struct nlattr *defdata,
64 struct tc_defact *p) 65 struct tc_defact *p, struct tcf_proto *tp,
66 struct netlink_ext_ack *extack)
65{ 67{
68 struct tcf_chain *goto_ch = NULL;
69 struct tcf_defact *d;
70 int err;
71
72 err = tcf_action_check_ctrlact(p->action, tp, &goto_ch, extack);
73 if (err < 0)
74 return err;
75 d = to_defact(a);
66 spin_lock_bh(&d->tcf_lock); 76 spin_lock_bh(&d->tcf_lock);
67 d->tcf_action = p->action; 77 goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch);
68 memset(d->tcfd_defdata, 0, SIMP_MAX_DATA); 78 memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
69 nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA); 79 nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
70 spin_unlock_bh(&d->tcf_lock); 80 spin_unlock_bh(&d->tcf_lock);
81 if (goto_ch)
82 tcf_chain_put_by_act(goto_ch);
83 return 0;
71} 84}
72 85
73static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = { 86static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
@@ -78,10 +91,11 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
78static int tcf_simp_init(struct net *net, struct nlattr *nla, 91static int tcf_simp_init(struct net *net, struct nlattr *nla,
79 struct nlattr *est, struct tc_action **a, 92 struct nlattr *est, struct tc_action **a,
80 int ovr, int bind, bool rtnl_held, 93 int ovr, int bind, bool rtnl_held,
81 struct netlink_ext_ack *extack) 94 struct tcf_proto *tp, struct netlink_ext_ack *extack)
82{ 95{
83 struct tc_action_net *tn = net_generic(net, simp_net_id); 96 struct tc_action_net *tn = net_generic(net, simp_net_id);
84 struct nlattr *tb[TCA_DEF_MAX + 1]; 97 struct nlattr *tb[TCA_DEF_MAX + 1];
98 struct tcf_chain *goto_ch = NULL;
85 struct tc_defact *parm; 99 struct tc_defact *parm;
86 struct tcf_defact *d; 100 struct tcf_defact *d;
87 bool exists = false; 101 bool exists = false;
@@ -122,27 +136,37 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
122 } 136 }
123 137
124 d = to_defact(*a); 138 d = to_defact(*a);
125 ret = alloc_defdata(d, tb[TCA_DEF_DATA]); 139 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
126 if (ret < 0) { 140 extack);
127 tcf_idr_release(*a, bind); 141 if (err < 0)
128 return ret; 142 goto release_idr;
129 } 143
130 d->tcf_action = parm->action; 144 err = alloc_defdata(d, tb[TCA_DEF_DATA]);
145 if (err < 0)
146 goto put_chain;
147
148 tcf_action_set_ctrlact(*a, parm->action, goto_ch);
131 ret = ACT_P_CREATED; 149 ret = ACT_P_CREATED;
132 } else { 150 } else {
133 d = to_defact(*a);
134
135 if (!ovr) { 151 if (!ovr) {
136 tcf_idr_release(*a, bind); 152 err = -EEXIST;
137 return -EEXIST; 153 goto release_idr;
138 } 154 }
139 155
140 reset_policy(d, tb[TCA_DEF_DATA], parm); 156 err = reset_policy(*a, tb[TCA_DEF_DATA], parm, tp, extack);
157 if (err)
158 goto release_idr;
141 } 159 }
142 160
143 if (ret == ACT_P_CREATED) 161 if (ret == ACT_P_CREATED)
144 tcf_idr_insert(tn, *a); 162 tcf_idr_insert(tn, *a);
145 return ret; 163 return ret;
164put_chain:
165 if (goto_ch)
166 tcf_chain_put_by_act(goto_ch);
167release_idr:
168 tcf_idr_release(*a, bind);
169 return err;
146} 170}
147 171
148static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, 172static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 65879500b688..7e1d261a31d2 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -26,6 +26,7 @@
26#include <net/ip.h> 26#include <net/ip.h>
27#include <net/ipv6.h> 27#include <net/ipv6.h>
28#include <net/dsfield.h> 28#include <net/dsfield.h>
29#include <net/pkt_cls.h>
29 30
30#include <linux/tc_act/tc_skbedit.h> 31#include <linux/tc_act/tc_skbedit.h>
31#include <net/tc_act/tc_skbedit.h> 32#include <net/tc_act/tc_skbedit.h>
@@ -96,11 +97,13 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
96static int tcf_skbedit_init(struct net *net, struct nlattr *nla, 97static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
97 struct nlattr *est, struct tc_action **a, 98 struct nlattr *est, struct tc_action **a,
98 int ovr, int bind, bool rtnl_held, 99 int ovr, int bind, bool rtnl_held,
100 struct tcf_proto *tp,
99 struct netlink_ext_ack *extack) 101 struct netlink_ext_ack *extack)
100{ 102{
101 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 103 struct tc_action_net *tn = net_generic(net, skbedit_net_id);
102 struct tcf_skbedit_params *params_new; 104 struct tcf_skbedit_params *params_new;
103 struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; 105 struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
106 struct tcf_chain *goto_ch = NULL;
104 struct tc_skbedit *parm; 107 struct tc_skbedit *parm;
105 struct tcf_skbedit *d; 108 struct tcf_skbedit *d;
106 u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; 109 u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
@@ -186,11 +189,14 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
186 return -EEXIST; 189 return -EEXIST;
187 } 190 }
188 } 191 }
192 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
193 if (err < 0)
194 goto release_idr;
189 195
190 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 196 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
191 if (unlikely(!params_new)) { 197 if (unlikely(!params_new)) {
192 tcf_idr_release(*a, bind); 198 err = -ENOMEM;
193 return -ENOMEM; 199 goto put_chain;
194 } 200 }
195 201
196 params_new->flags = flags; 202 params_new->flags = flags;
@@ -208,16 +214,24 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
208 params_new->mask = *mask; 214 params_new->mask = *mask;
209 215
210 spin_lock_bh(&d->tcf_lock); 216 spin_lock_bh(&d->tcf_lock);
211 d->tcf_action = parm->action; 217 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
212 rcu_swap_protected(d->params, params_new, 218 rcu_swap_protected(d->params, params_new,
213 lockdep_is_held(&d->tcf_lock)); 219 lockdep_is_held(&d->tcf_lock));
214 spin_unlock_bh(&d->tcf_lock); 220 spin_unlock_bh(&d->tcf_lock);
215 if (params_new) 221 if (params_new)
216 kfree_rcu(params_new, rcu); 222 kfree_rcu(params_new, rcu);
223 if (goto_ch)
224 tcf_chain_put_by_act(goto_ch);
217 225
218 if (ret == ACT_P_CREATED) 226 if (ret == ACT_P_CREATED)
219 tcf_idr_insert(tn, *a); 227 tcf_idr_insert(tn, *a);
220 return ret; 228 return ret;
229put_chain:
230 if (goto_ch)
231 tcf_chain_put_by_act(goto_ch);
232release_idr:
233 tcf_idr_release(*a, bind);
234 return err;
221} 235}
222 236
223static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, 237static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 7bac1d78e7a3..1d4c324d0a42 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -16,6 +16,7 @@
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <net/netlink.h> 17#include <net/netlink.h>
18#include <net/pkt_sched.h> 18#include <net/pkt_sched.h>
19#include <net/pkt_cls.h>
19 20
20#include <linux/tc_act/tc_skbmod.h> 21#include <linux/tc_act/tc_skbmod.h>
21#include <net/tc_act/tc_skbmod.h> 22#include <net/tc_act/tc_skbmod.h>
@@ -82,11 +83,13 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
82static int tcf_skbmod_init(struct net *net, struct nlattr *nla, 83static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
83 struct nlattr *est, struct tc_action **a, 84 struct nlattr *est, struct tc_action **a,
84 int ovr, int bind, bool rtnl_held, 85 int ovr, int bind, bool rtnl_held,
86 struct tcf_proto *tp,
85 struct netlink_ext_ack *extack) 87 struct netlink_ext_ack *extack)
86{ 88{
87 struct tc_action_net *tn = net_generic(net, skbmod_net_id); 89 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
88 struct nlattr *tb[TCA_SKBMOD_MAX + 1]; 90 struct nlattr *tb[TCA_SKBMOD_MAX + 1];
89 struct tcf_skbmod_params *p, *p_old; 91 struct tcf_skbmod_params *p, *p_old;
92 struct tcf_chain *goto_ch = NULL;
90 struct tc_skbmod *parm; 93 struct tc_skbmod *parm;
91 struct tcf_skbmod *d; 94 struct tcf_skbmod *d;
92 bool exists = false; 95 bool exists = false;
@@ -153,21 +156,24 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
153 tcf_idr_release(*a, bind); 156 tcf_idr_release(*a, bind);
154 return -EEXIST; 157 return -EEXIST;
155 } 158 }
159 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
160 if (err < 0)
161 goto release_idr;
156 162
157 d = to_skbmod(*a); 163 d = to_skbmod(*a);
158 164
159 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); 165 p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
160 if (unlikely(!p)) { 166 if (unlikely(!p)) {
161 tcf_idr_release(*a, bind); 167 err = -ENOMEM;
162 return -ENOMEM; 168 goto put_chain;
163 } 169 }
164 170
165 p->flags = lflags; 171 p->flags = lflags;
166 d->tcf_action = parm->action;
167 172
168 if (ovr) 173 if (ovr)
169 spin_lock_bh(&d->tcf_lock); 174 spin_lock_bh(&d->tcf_lock);
170 /* Protected by tcf_lock if overwriting existing action. */ 175 /* Protected by tcf_lock if overwriting existing action. */
176 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
171 p_old = rcu_dereference_protected(d->skbmod_p, 1); 177 p_old = rcu_dereference_protected(d->skbmod_p, 1);
172 178
173 if (lflags & SKBMOD_F_DMAC) 179 if (lflags & SKBMOD_F_DMAC)
@@ -183,10 +189,18 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
183 189
184 if (p_old) 190 if (p_old)
185 kfree_rcu(p_old, rcu); 191 kfree_rcu(p_old, rcu);
192 if (goto_ch)
193 tcf_chain_put_by_act(goto_ch);
186 194
187 if (ret == ACT_P_CREATED) 195 if (ret == ACT_P_CREATED)
188 tcf_idr_insert(tn, *a); 196 tcf_idr_insert(tn, *a);
189 return ret; 197 return ret;
198put_chain:
199 if (goto_ch)
200 tcf_chain_put_by_act(goto_ch);
201release_idr:
202 tcf_idr_release(*a, bind);
203 return err;
190} 204}
191 205
192static void tcf_skbmod_cleanup(struct tc_action *a) 206static void tcf_skbmod_cleanup(struct tc_action *a)
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 7c6591b991d5..d5aaf90a3971 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -17,6 +17,7 @@
17#include <net/netlink.h> 17#include <net/netlink.h>
18#include <net/pkt_sched.h> 18#include <net/pkt_sched.h>
19#include <net/dst.h> 19#include <net/dst.h>
20#include <net/pkt_cls.h>
20 21
21#include <linux/tc_act/tc_tunnel_key.h> 22#include <linux/tc_act/tc_tunnel_key.h>
22#include <net/tc_act/tc_tunnel_key.h> 23#include <net/tc_act/tc_tunnel_key.h>
@@ -210,12 +211,14 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
210static int tunnel_key_init(struct net *net, struct nlattr *nla, 211static int tunnel_key_init(struct net *net, struct nlattr *nla,
211 struct nlattr *est, struct tc_action **a, 212 struct nlattr *est, struct tc_action **a,
212 int ovr, int bind, bool rtnl_held, 213 int ovr, int bind, bool rtnl_held,
214 struct tcf_proto *tp,
213 struct netlink_ext_ack *extack) 215 struct netlink_ext_ack *extack)
214{ 216{
215 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); 217 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
216 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; 218 struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
217 struct tcf_tunnel_key_params *params_new; 219 struct tcf_tunnel_key_params *params_new;
218 struct metadata_dst *metadata = NULL; 220 struct metadata_dst *metadata = NULL;
221 struct tcf_chain *goto_ch = NULL;
219 struct tc_tunnel_key *parm; 222 struct tc_tunnel_key *parm;
220 struct tcf_tunnel_key *t; 223 struct tcf_tunnel_key *t;
221 bool exists = false; 224 bool exists = false;
@@ -359,6 +362,12 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
359 goto release_tun_meta; 362 goto release_tun_meta;
360 } 363 }
361 364
365 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
366 if (err < 0) {
367 ret = err;
368 exists = true;
369 goto release_tun_meta;
370 }
362 t = to_tunnel_key(*a); 371 t = to_tunnel_key(*a);
363 372
364 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); 373 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
@@ -366,23 +375,29 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
366 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); 375 NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
367 ret = -ENOMEM; 376 ret = -ENOMEM;
368 exists = true; 377 exists = true;
369 goto release_tun_meta; 378 goto put_chain;
370 } 379 }
371 params_new->tcft_action = parm->t_action; 380 params_new->tcft_action = parm->t_action;
372 params_new->tcft_enc_metadata = metadata; 381 params_new->tcft_enc_metadata = metadata;
373 382
374 spin_lock_bh(&t->tcf_lock); 383 spin_lock_bh(&t->tcf_lock);
375 t->tcf_action = parm->action; 384 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
376 rcu_swap_protected(t->params, params_new, 385 rcu_swap_protected(t->params, params_new,
377 lockdep_is_held(&t->tcf_lock)); 386 lockdep_is_held(&t->tcf_lock));
378 spin_unlock_bh(&t->tcf_lock); 387 spin_unlock_bh(&t->tcf_lock);
379 tunnel_key_release_params(params_new); 388 tunnel_key_release_params(params_new);
389 if (goto_ch)
390 tcf_chain_put_by_act(goto_ch);
380 391
381 if (ret == ACT_P_CREATED) 392 if (ret == ACT_P_CREATED)
382 tcf_idr_insert(tn, *a); 393 tcf_idr_insert(tn, *a);
383 394
384 return ret; 395 return ret;
385 396
397put_chain:
398 if (goto_ch)
399 tcf_chain_put_by_act(goto_ch);
400
386release_tun_meta: 401release_tun_meta:
387 if (metadata) 402 if (metadata)
388 dst_release(&metadata->dst); 403 dst_release(&metadata->dst);
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index ac0061599225..0f40d0a74423 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -15,6 +15,7 @@
15#include <linux/if_vlan.h> 15#include <linux/if_vlan.h>
16#include <net/netlink.h> 16#include <net/netlink.h>
17#include <net/pkt_sched.h> 17#include <net/pkt_sched.h>
18#include <net/pkt_cls.h>
18 19
19#include <linux/tc_act/tc_vlan.h> 20#include <linux/tc_act/tc_vlan.h>
20#include <net/tc_act/tc_vlan.h> 21#include <net/tc_act/tc_vlan.h>
@@ -105,10 +106,11 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
105static int tcf_vlan_init(struct net *net, struct nlattr *nla, 106static int tcf_vlan_init(struct net *net, struct nlattr *nla,
106 struct nlattr *est, struct tc_action **a, 107 struct nlattr *est, struct tc_action **a,
107 int ovr, int bind, bool rtnl_held, 108 int ovr, int bind, bool rtnl_held,
108 struct netlink_ext_ack *extack) 109 struct tcf_proto *tp, struct netlink_ext_ack *extack)
109{ 110{
110 struct tc_action_net *tn = net_generic(net, vlan_net_id); 111 struct tc_action_net *tn = net_generic(net, vlan_net_id);
111 struct nlattr *tb[TCA_VLAN_MAX + 1]; 112 struct nlattr *tb[TCA_VLAN_MAX + 1];
113 struct tcf_chain *goto_ch = NULL;
112 struct tcf_vlan_params *p; 114 struct tcf_vlan_params *p;
113 struct tc_vlan *parm; 115 struct tc_vlan *parm;
114 struct tcf_vlan *v; 116 struct tcf_vlan *v;
@@ -200,12 +202,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
200 return -EEXIST; 202 return -EEXIST;
201 } 203 }
202 204
205 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
206 if (err < 0)
207 goto release_idr;
208
203 v = to_vlan(*a); 209 v = to_vlan(*a);
204 210
205 p = kzalloc(sizeof(*p), GFP_KERNEL); 211 p = kzalloc(sizeof(*p), GFP_KERNEL);
206 if (!p) { 212 if (!p) {
207 tcf_idr_release(*a, bind); 213 err = -ENOMEM;
208 return -ENOMEM; 214 goto put_chain;
209 } 215 }
210 216
211 p->tcfv_action = action; 217 p->tcfv_action = action;
@@ -214,16 +220,24 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
214 p->tcfv_push_proto = push_proto; 220 p->tcfv_push_proto = push_proto;
215 221
216 spin_lock_bh(&v->tcf_lock); 222 spin_lock_bh(&v->tcf_lock);
217 v->tcf_action = parm->action; 223 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
218 rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock)); 224 rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
219 spin_unlock_bh(&v->tcf_lock); 225 spin_unlock_bh(&v->tcf_lock);
220 226
227 if (goto_ch)
228 tcf_chain_put_by_act(goto_ch);
221 if (p) 229 if (p)
222 kfree_rcu(p, rcu); 230 kfree_rcu(p, rcu);
223 231
224 if (ret == ACT_P_CREATED) 232 if (ret == ACT_P_CREATED)
225 tcf_idr_insert(tn, *a); 233 tcf_idr_insert(tn, *a);
226 return ret; 234 return ret;
235put_chain:
236 if (goto_ch)
237 tcf_chain_put_by_act(goto_ch);
238release_idr:
239 tcf_idr_release(*a, bind);
240 return err;
227} 241}
228 242
229static void tcf_vlan_cleanup(struct tc_action *a) 243static void tcf_vlan_cleanup(struct tc_action *a)
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index dc10525e90e7..99ae30c177c7 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -367,7 +367,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
367 struct tcf_block *block = chain->block; 367 struct tcf_block *block = chain->block;
368 368
369 mutex_destroy(&chain->filter_chain_lock); 369 mutex_destroy(&chain->filter_chain_lock);
370 kfree(chain); 370 kfree_rcu(chain, rcu);
371 if (free_block) 371 if (free_block)
372 tcf_block_destroy(block); 372 tcf_block_destroy(block);
373} 373}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 1d2a12132abc..acc9b9da985f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -211,6 +211,9 @@ struct cake_sched_data {
211 u8 ack_filter; 211 u8 ack_filter;
212 u8 atm_mode; 212 u8 atm_mode;
213 213
214 u32 fwmark_mask;
215 u16 fwmark_shft;
216
214 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ 217 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
215 u16 rate_shft; 218 u16 rate_shft;
216 ktime_t time_next_packet; 219 ktime_t time_next_packet;
@@ -258,8 +261,7 @@ enum {
258 CAKE_FLAG_AUTORATE_INGRESS = BIT(1), 261 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
259 CAKE_FLAG_INGRESS = BIT(2), 262 CAKE_FLAG_INGRESS = BIT(2),
260 CAKE_FLAG_WASH = BIT(3), 263 CAKE_FLAG_WASH = BIT(3),
261 CAKE_FLAG_SPLIT_GSO = BIT(4), 264 CAKE_FLAG_SPLIT_GSO = BIT(4)
262 CAKE_FLAG_FWMARK = BIT(5)
263}; 265};
264 266
265/* COBALT operates the Codel and BLUE algorithms in parallel, in order to 267/* COBALT operates the Codel and BLUE algorithms in parallel, in order to
@@ -1543,7 +1545,7 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
1543 struct sk_buff *skb) 1545 struct sk_buff *skb)
1544{ 1546{
1545 struct cake_sched_data *q = qdisc_priv(sch); 1547 struct cake_sched_data *q = qdisc_priv(sch);
1546 u32 tin; 1548 u32 tin, mark;
1547 u8 dscp; 1549 u8 dscp;
1548 1550
1549 /* Tin selection: Default to diffserv-based selection, allow overriding 1551 /* Tin selection: Default to diffserv-based selection, allow overriding
@@ -1551,14 +1553,13 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
1551 */ 1553 */
1552 dscp = cake_handle_diffserv(skb, 1554 dscp = cake_handle_diffserv(skb,
1553 q->rate_flags & CAKE_FLAG_WASH); 1555 q->rate_flags & CAKE_FLAG_WASH);
1556 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
1554 1557
1555 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) 1558 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
1556 tin = 0; 1559 tin = 0;
1557 1560
1558 else if (q->rate_flags & CAKE_FLAG_FWMARK && /* use fw mark */ 1561 else if (mark && mark <= q->tin_cnt)
1559 skb->mark && 1562 tin = q->tin_order[mark - 1];
1560 skb->mark <= q->tin_cnt)
1561 tin = q->tin_order[skb->mark - 1];
1562 1563
1563 else if (TC_H_MAJ(skb->priority) == sch->handle && 1564 else if (TC_H_MAJ(skb->priority) == sch->handle &&
1564 TC_H_MIN(skb->priority) > 0 && 1565 TC_H_MIN(skb->priority) > 0 &&
@@ -2172,6 +2173,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2172 [TCA_CAKE_MPU] = { .type = NLA_U32 }, 2173 [TCA_CAKE_MPU] = { .type = NLA_U32 },
2173 [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, 2174 [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
2174 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, 2175 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
2176 [TCA_CAKE_FWMARK] = { .type = NLA_U32 },
2175}; 2177};
2176 2178
2177static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu, 2179static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
@@ -2619,10 +2621,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
2619 } 2621 }
2620 2622
2621 if (tb[TCA_CAKE_FWMARK]) { 2623 if (tb[TCA_CAKE_FWMARK]) {
2622 if (!!nla_get_u32(tb[TCA_CAKE_FWMARK])) 2624 q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
2623 q->rate_flags |= CAKE_FLAG_FWMARK; 2625 q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
2624 else
2625 q->rate_flags &= ~CAKE_FLAG_FWMARK;
2626 } 2626 }
2627 2627
2628 if (q->tins) { 2628 if (q->tins) {
@@ -2784,8 +2784,7 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2784 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) 2784 !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
2785 goto nla_put_failure; 2785 goto nla_put_failure;
2786 2786
2787 if (nla_put_u32(skb, TCA_CAKE_FWMARK, 2787 if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
2788 !!(q->rate_flags & CAKE_FLAG_FWMARK)))
2789 goto nla_put_failure; 2788 goto nla_put_failure;
2790 2789
2791 return nla_nest_end(skb, opts); 2790 return nla_nest_end(skb, opts);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6140471efd4b..9874e60c9b0d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -999,7 +999,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
999 if (unlikely(addrs_size <= 0)) 999 if (unlikely(addrs_size <= 0))
1000 return -EINVAL; 1000 return -EINVAL;
1001 1001
1002 kaddrs = vmemdup_user(addrs, addrs_size); 1002 kaddrs = memdup_user(addrs, addrs_size);
1003 if (unlikely(IS_ERR(kaddrs))) 1003 if (unlikely(IS_ERR(kaddrs)))
1004 return PTR_ERR(kaddrs); 1004 return PTR_ERR(kaddrs);
1005 1005
@@ -1007,7 +1007,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
1007 addr_buf = kaddrs; 1007 addr_buf = kaddrs;
1008 while (walk_size < addrs_size) { 1008 while (walk_size < addrs_size) {
1009 if (walk_size + sizeof(sa_family_t) > addrs_size) { 1009 if (walk_size + sizeof(sa_family_t) > addrs_size) {
1010 kvfree(kaddrs); 1010 kfree(kaddrs);
1011 return -EINVAL; 1011 return -EINVAL;
1012 } 1012 }
1013 1013
@@ -1018,7 +1018,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
1018 * causes the address buffer to overflow return EINVAL. 1018 * causes the address buffer to overflow return EINVAL.
1019 */ 1019 */
1020 if (!af || (walk_size + af->sockaddr_len) > addrs_size) { 1020 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
1021 kvfree(kaddrs); 1021 kfree(kaddrs);
1022 return -EINVAL; 1022 return -EINVAL;
1023 } 1023 }
1024 addrcnt++; 1024 addrcnt++;
@@ -1054,7 +1054,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
1054 } 1054 }
1055 1055
1056out: 1056out:
1057 kvfree(kaddrs); 1057 kfree(kaddrs);
1058 1058
1059 return err; 1059 return err;
1060} 1060}
@@ -1329,7 +1329,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1329 if (unlikely(addrs_size <= 0)) 1329 if (unlikely(addrs_size <= 0))
1330 return -EINVAL; 1330 return -EINVAL;
1331 1331
1332 kaddrs = vmemdup_user(addrs, addrs_size); 1332 kaddrs = memdup_user(addrs, addrs_size);
1333 if (unlikely(IS_ERR(kaddrs))) 1333 if (unlikely(IS_ERR(kaddrs)))
1334 return PTR_ERR(kaddrs); 1334 return PTR_ERR(kaddrs);
1335 1335
@@ -1349,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
1349 err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id); 1349 err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
1350 1350
1351out_free: 1351out_free:
1352 kvfree(kaddrs); 1352 kfree(kaddrs);
1353 1353
1354 return err; 1354 return err;
1355} 1355}
@@ -2920,6 +2920,9 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
2920 return 0; 2920 return 0;
2921 } 2921 }
2922 2922
2923 if (sctp_style(sk, TCP))
2924 params.sack_assoc_id = SCTP_FUTURE_ASSOC;
2925
2923 if (params.sack_assoc_id == SCTP_FUTURE_ASSOC || 2926 if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
2924 params.sack_assoc_id == SCTP_ALL_ASSOC) { 2927 params.sack_assoc_id == SCTP_ALL_ASSOC) {
2925 if (params.sack_delay) { 2928 if (params.sack_delay) {
@@ -3024,6 +3027,9 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
3024 return 0; 3027 return 0;
3025 } 3028 }
3026 3029
3030 if (sctp_style(sk, TCP))
3031 info.sinfo_assoc_id = SCTP_FUTURE_ASSOC;
3032
3027 if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC || 3033 if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
3028 info.sinfo_assoc_id == SCTP_ALL_ASSOC) { 3034 info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
3029 sp->default_stream = info.sinfo_stream; 3035 sp->default_stream = info.sinfo_stream;
@@ -3081,6 +3087,9 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk,
3081 return 0; 3087 return 0;
3082 } 3088 }
3083 3089
3090 if (sctp_style(sk, TCP))
3091 info.snd_assoc_id = SCTP_FUTURE_ASSOC;
3092
3084 if (info.snd_assoc_id == SCTP_FUTURE_ASSOC || 3093 if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
3085 info.snd_assoc_id == SCTP_ALL_ASSOC) { 3094 info.snd_assoc_id == SCTP_ALL_ASSOC) {
3086 sp->default_stream = info.snd_sid; 3095 sp->default_stream = info.snd_sid;
@@ -3531,6 +3540,9 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
3531 return 0; 3540 return 0;
3532 } 3541 }
3533 3542
3543 if (sctp_style(sk, TCP))
3544 params.assoc_id = SCTP_FUTURE_ASSOC;
3545
3534 if (params.assoc_id == SCTP_FUTURE_ASSOC || 3546 if (params.assoc_id == SCTP_FUTURE_ASSOC ||
3535 params.assoc_id == SCTP_ALL_ASSOC) 3547 params.assoc_id == SCTP_ALL_ASSOC)
3536 sp->default_rcv_context = params.assoc_value; 3548 sp->default_rcv_context = params.assoc_value;
@@ -3670,6 +3682,9 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
3670 return 0; 3682 return 0;
3671 } 3683 }
3672 3684
3685 if (sctp_style(sk, TCP))
3686 params.assoc_id = SCTP_FUTURE_ASSOC;
3687
3673 if (params.assoc_id == SCTP_FUTURE_ASSOC || 3688 if (params.assoc_id == SCTP_FUTURE_ASSOC ||
3674 params.assoc_id == SCTP_ALL_ASSOC) 3689 params.assoc_id == SCTP_ALL_ASSOC)
3675 sp->max_burst = params.assoc_value; 3690 sp->max_burst = params.assoc_value;
@@ -3798,6 +3813,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3798 goto out; 3813 goto out;
3799 } 3814 }
3800 3815
3816 if (sctp_style(sk, TCP))
3817 authkey->sca_assoc_id = SCTP_FUTURE_ASSOC;
3818
3801 if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC || 3819 if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
3802 authkey->sca_assoc_id == SCTP_ALL_ASSOC) { 3820 authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
3803 ret = sctp_auth_set_key(ep, asoc, authkey); 3821 ret = sctp_auth_set_key(ep, asoc, authkey);
@@ -3853,6 +3871,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
3853 if (asoc) 3871 if (asoc)
3854 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3872 return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
3855 3873
3874 if (sctp_style(sk, TCP))
3875 val.scact_assoc_id = SCTP_FUTURE_ASSOC;
3876
3856 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || 3877 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
3857 val.scact_assoc_id == SCTP_ALL_ASSOC) { 3878 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3858 ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); 3879 ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
@@ -3904,6 +3925,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
3904 if (asoc) 3925 if (asoc)
3905 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3926 return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
3906 3927
3928 if (sctp_style(sk, TCP))
3929 val.scact_assoc_id = SCTP_FUTURE_ASSOC;
3930
3907 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || 3931 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
3908 val.scact_assoc_id == SCTP_ALL_ASSOC) { 3932 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3909 ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); 3933 ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
@@ -3954,6 +3978,9 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
3954 if (asoc) 3978 if (asoc)
3955 return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber); 3979 return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
3956 3980
3981 if (sctp_style(sk, TCP))
3982 val.scact_assoc_id = SCTP_FUTURE_ASSOC;
3983
3957 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || 3984 if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
3958 val.scact_assoc_id == SCTP_ALL_ASSOC) { 3985 val.scact_assoc_id == SCTP_ALL_ASSOC) {
3959 ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber); 3986 ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
@@ -4169,6 +4196,9 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk,
4169 goto out; 4196 goto out;
4170 } 4197 }
4171 4198
4199 if (sctp_style(sk, TCP))
4200 info.pr_assoc_id = SCTP_FUTURE_ASSOC;
4201
4172 if (info.pr_assoc_id == SCTP_FUTURE_ASSOC || 4202 if (info.pr_assoc_id == SCTP_FUTURE_ASSOC ||
4173 info.pr_assoc_id == SCTP_ALL_ASSOC) { 4203 info.pr_assoc_id == SCTP_ALL_ASSOC) {
4174 SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); 4204 SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
@@ -4251,6 +4281,9 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk,
4251 goto out; 4281 goto out;
4252 } 4282 }
4253 4283
4284 if (sctp_style(sk, TCP))
4285 params.assoc_id = SCTP_FUTURE_ASSOC;
4286
4254 if (params.assoc_id == SCTP_FUTURE_ASSOC || 4287 if (params.assoc_id == SCTP_FUTURE_ASSOC ||
4255 params.assoc_id == SCTP_ALL_ASSOC) 4288 params.assoc_id == SCTP_ALL_ASSOC)
4256 ep->strreset_enable = params.assoc_value; 4289 ep->strreset_enable = params.assoc_value;
@@ -4376,6 +4409,9 @@ static int sctp_setsockopt_scheduler(struct sock *sk,
4376 if (asoc) 4409 if (asoc)
4377 return sctp_sched_set_sched(asoc, params.assoc_value); 4410 return sctp_sched_set_sched(asoc, params.assoc_value);
4378 4411
4412 if (sctp_style(sk, TCP))
4413 params.assoc_id = SCTP_FUTURE_ASSOC;
4414
4379 if (params.assoc_id == SCTP_FUTURE_ASSOC || 4415 if (params.assoc_id == SCTP_FUTURE_ASSOC ||
4380 params.assoc_id == SCTP_ALL_ASSOC) 4416 params.assoc_id == SCTP_ALL_ASSOC)
4381 sp->default_ss = params.assoc_value; 4417 sp->default_ss = params.assoc_value;
@@ -4541,6 +4577,9 @@ static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
4541 if (asoc) 4577 if (asoc)
4542 return sctp_assoc_ulpevent_type_set(&param, asoc); 4578 return sctp_assoc_ulpevent_type_set(&param, asoc);
4543 4579
4580 if (sctp_style(sk, TCP))
4581 param.se_assoc_id = SCTP_FUTURE_ASSOC;
4582
4544 if (param.se_assoc_id == SCTP_FUTURE_ASSOC || 4583 if (param.se_assoc_id == SCTP_FUTURE_ASSOC ||
4545 param.se_assoc_id == SCTP_ALL_ASSOC) 4584 param.se_assoc_id == SCTP_ALL_ASSOC)
4546 sctp_ulpevent_type_set(&sp->subscribe, 4585 sctp_ulpevent_type_set(&sp->subscribe,
@@ -9169,7 +9208,7 @@ static inline void sctp_copy_descendant(struct sock *sk_to,
9169{ 9208{
9170 int ancestor_size = sizeof(struct inet_sock) + 9209 int ancestor_size = sizeof(struct inet_sock) +
9171 sizeof(struct sctp_sock) - 9210 sizeof(struct sctp_sock) -
9172 offsetof(struct sctp_sock, auto_asconf_list); 9211 offsetof(struct sctp_sock, pd_lobby);
9173 9212
9174 if (sk_from->sk_family == PF_INET6) 9213 if (sk_from->sk_family == PF_INET6)
9175 ancestor_size += sizeof(struct ipv6_pinfo); 9214 ancestor_size += sizeof(struct ipv6_pinfo);
@@ -9253,7 +9292,6 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
9253 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. 9292 * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
9254 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. 9293 * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
9255 */ 9294 */
9256 skb_queue_head_init(&newsp->pd_lobby);
9257 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); 9295 atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
9258 9296
9259 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { 9297 if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
diff --git a/net/socket.c b/net/socket.c
index 3c176a12fe48..8255f5bda0aa 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -384,6 +384,18 @@ static struct file_system_type sock_fs_type = {
384 * but we take care of internal coherence yet. 384 * but we take care of internal coherence yet.
385 */ 385 */
386 386
387/**
388 * sock_alloc_file - Bind a &socket to a &file
389 * @sock: socket
390 * @flags: file status flags
391 * @dname: protocol name
392 *
393 * Returns the &file bound with @sock, implicitly storing it
394 * in sock->file. If dname is %NULL, sets to "".
395 * On failure the return is a ERR pointer (see linux/err.h).
396 * This function uses GFP_KERNEL internally.
397 */
398
387struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) 399struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
388{ 400{
389 struct file *file; 401 struct file *file;
@@ -424,6 +436,14 @@ static int sock_map_fd(struct socket *sock, int flags)
424 return PTR_ERR(newfile); 436 return PTR_ERR(newfile);
425} 437}
426 438
439/**
440 * sock_from_file - Return the &socket bounded to @file.
441 * @file: file
442 * @err: pointer to an error code return
443 *
444 * On failure returns %NULL and assigns -ENOTSOCK to @err.
445 */
446
427struct socket *sock_from_file(struct file *file, int *err) 447struct socket *sock_from_file(struct file *file, int *err)
428{ 448{
429 if (file->f_op == &socket_file_ops) 449 if (file->f_op == &socket_file_ops)
@@ -532,11 +552,11 @@ static const struct inode_operations sockfs_inode_ops = {
532}; 552};
533 553
534/** 554/**
535 * sock_alloc - allocate a socket 555 * sock_alloc - allocate a socket
536 * 556 *
537 * Allocate a new inode and socket object. The two are bound together 557 * Allocate a new inode and socket object. The two are bound together
538 * and initialised. The socket is then returned. If we are out of inodes 558 * and initialised. The socket is then returned. If we are out of inodes
539 * NULL is returned. 559 * NULL is returned. This functions uses GFP_KERNEL internally.
540 */ 560 */
541 561
542struct socket *sock_alloc(void) 562struct socket *sock_alloc(void)
@@ -561,7 +581,7 @@ struct socket *sock_alloc(void)
561EXPORT_SYMBOL(sock_alloc); 581EXPORT_SYMBOL(sock_alloc);
562 582
563/** 583/**
564 * sock_release - close a socket 584 * sock_release - close a socket
565 * @sock: socket to close 585 * @sock: socket to close
566 * 586 *
567 * The socket is released from the protocol stack if it has a release 587 * The socket is released from the protocol stack if it has a release
@@ -617,6 +637,15 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
617} 637}
618EXPORT_SYMBOL(__sock_tx_timestamp); 638EXPORT_SYMBOL(__sock_tx_timestamp);
619 639
640/**
641 * sock_sendmsg - send a message through @sock
642 * @sock: socket
643 * @msg: message to send
644 *
645 * Sends @msg through @sock, passing through LSM.
646 * Returns the number of bytes sent, or an error code.
647 */
648
620static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) 649static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
621{ 650{
622 int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg)); 651 int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg));
@@ -633,6 +662,18 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
633} 662}
634EXPORT_SYMBOL(sock_sendmsg); 663EXPORT_SYMBOL(sock_sendmsg);
635 664
665/**
666 * kernel_sendmsg - send a message through @sock (kernel-space)
667 * @sock: socket
668 * @msg: message header
669 * @vec: kernel vec
670 * @num: vec array length
671 * @size: total message data size
672 *
673 * Builds the message data with @vec and sends it through @sock.
674 * Returns the number of bytes sent, or an error code.
675 */
676
636int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 677int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
637 struct kvec *vec, size_t num, size_t size) 678 struct kvec *vec, size_t num, size_t size)
638{ 679{
@@ -641,6 +682,19 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
641} 682}
642EXPORT_SYMBOL(kernel_sendmsg); 683EXPORT_SYMBOL(kernel_sendmsg);
643 684
685/**
686 * kernel_sendmsg_locked - send a message through @sock (kernel-space)
687 * @sk: sock
688 * @msg: message header
689 * @vec: output s/g array
690 * @num: output s/g array length
691 * @size: total message data size
692 *
693 * Builds the message data with @vec and sends it through @sock.
694 * Returns the number of bytes sent, or an error code.
695 * Caller must hold @sk.
696 */
697
644int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, 698int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
645 struct kvec *vec, size_t num, size_t size) 699 struct kvec *vec, size_t num, size_t size)
646{ 700{
@@ -811,6 +865,16 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
811} 865}
812EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); 866EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
813 867
868/**
869 * sock_recvmsg - receive a message from @sock
870 * @sock: socket
871 * @msg: message to receive
872 * @flags: message flags
873 *
874 * Receives @msg from @sock, passing through LSM. Returns the total number
875 * of bytes received, or an error.
876 */
877
814static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, 878static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
815 int flags) 879 int flags)
816{ 880{
@@ -826,20 +890,21 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
826EXPORT_SYMBOL(sock_recvmsg); 890EXPORT_SYMBOL(sock_recvmsg);
827 891
828/** 892/**
829 * kernel_recvmsg - Receive a message from a socket (kernel space) 893 * kernel_recvmsg - Receive a message from a socket (kernel space)
830 * @sock: The socket to receive the message from 894 * @sock: The socket to receive the message from
831 * @msg: Received message 895 * @msg: Received message
832 * @vec: Input s/g array for message data 896 * @vec: Input s/g array for message data
833 * @num: Size of input s/g array 897 * @num: Size of input s/g array
834 * @size: Number of bytes to read 898 * @size: Number of bytes to read
835 * @flags: Message flags (MSG_DONTWAIT, etc...) 899 * @flags: Message flags (MSG_DONTWAIT, etc...)
836 * 900 *
837 * On return the msg structure contains the scatter/gather array passed in the 901 * On return the msg structure contains the scatter/gather array passed in the
838 * vec argument. The array is modified so that it consists of the unfilled 902 * vec argument. The array is modified so that it consists of the unfilled
839 * portion of the original array. 903 * portion of the original array.
840 * 904 *
841 * The returned value is the total number of bytes received, or an error. 905 * The returned value is the total number of bytes received, or an error.
842 */ 906 */
907
843int kernel_recvmsg(struct socket *sock, struct msghdr *msg, 908int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
844 struct kvec *vec, size_t num, size_t size, int flags) 909 struct kvec *vec, size_t num, size_t size, int flags)
845{ 910{
@@ -1005,6 +1070,13 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
1005 * what to do with it - that's up to the protocol still. 1070 * what to do with it - that's up to the protocol still.
1006 */ 1071 */
1007 1072
1073/**
1074 * get_net_ns - increment the refcount of the network namespace
1075 * @ns: common namespace (net)
1076 *
1077 * Returns the net's common namespace.
1078 */
1079
1008struct ns_common *get_net_ns(struct ns_common *ns) 1080struct ns_common *get_net_ns(struct ns_common *ns)
1009{ 1081{
1010 return &get_net(container_of(ns, struct net, ns))->ns; 1082 return &get_net(container_of(ns, struct net, ns))->ns;
@@ -1099,6 +1171,19 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1099 return err; 1171 return err;
1100} 1172}
1101 1173
1174/**
1175 * sock_create_lite - creates a socket
1176 * @family: protocol family (AF_INET, ...)
1177 * @type: communication type (SOCK_STREAM, ...)
1178 * @protocol: protocol (0, ...)
1179 * @res: new socket
1180 *
1181 * Creates a new socket and assigns it to @res, passing through LSM.
1182 * The new socket initialization is not complete, see kernel_accept().
1183 * Returns 0 or an error. On failure @res is set to %NULL.
1184 * This function internally uses GFP_KERNEL.
1185 */
1186
1102int sock_create_lite(int family, int type, int protocol, struct socket **res) 1187int sock_create_lite(int family, int type, int protocol, struct socket **res)
1103{ 1188{
1104 int err; 1189 int err;
@@ -1224,6 +1309,21 @@ call_kill:
1224} 1309}
1225EXPORT_SYMBOL(sock_wake_async); 1310EXPORT_SYMBOL(sock_wake_async);
1226 1311
1312/**
1313 * __sock_create - creates a socket
1314 * @net: net namespace
1315 * @family: protocol family (AF_INET, ...)
1316 * @type: communication type (SOCK_STREAM, ...)
1317 * @protocol: protocol (0, ...)
1318 * @res: new socket
1319 * @kern: boolean for kernel space sockets
1320 *
1321 * Creates a new socket and assigns it to @res, passing through LSM.
1322 * Returns 0 or an error. On failure @res is set to %NULL. @kern must
1323 * be set to true if the socket resides in kernel space.
1324 * This function internally uses GFP_KERNEL.
1325 */
1326
1227int __sock_create(struct net *net, int family, int type, int protocol, 1327int __sock_create(struct net *net, int family, int type, int protocol,
1228 struct socket **res, int kern) 1328 struct socket **res, int kern)
1229{ 1329{
@@ -1333,12 +1433,35 @@ out_release:
1333} 1433}
1334EXPORT_SYMBOL(__sock_create); 1434EXPORT_SYMBOL(__sock_create);
1335 1435
1436/**
1437 * sock_create - creates a socket
1438 * @family: protocol family (AF_INET, ...)
1439 * @type: communication type (SOCK_STREAM, ...)
1440 * @protocol: protocol (0, ...)
1441 * @res: new socket
1442 *
1443 * A wrapper around __sock_create().
1444 * Returns 0 or an error. This function internally uses GFP_KERNEL.
1445 */
1446
1336int sock_create(int family, int type, int protocol, struct socket **res) 1447int sock_create(int family, int type, int protocol, struct socket **res)
1337{ 1448{
1338 return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); 1449 return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
1339} 1450}
1340EXPORT_SYMBOL(sock_create); 1451EXPORT_SYMBOL(sock_create);
1341 1452
1453/**
1454 * sock_create_kern - creates a socket (kernel space)
1455 * @net: net namespace
1456 * @family: protocol family (AF_INET, ...)
1457 * @type: communication type (SOCK_STREAM, ...)
1458 * @protocol: protocol (0, ...)
1459 * @res: new socket
1460 *
1461 * A wrapper around __sock_create().
1462 * Returns 0 or an error. This function internally uses GFP_KERNEL.
1463 */
1464
1342int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res) 1465int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
1343{ 1466{
1344 return __sock_create(net, family, type, protocol, res, 1); 1467 return __sock_create(net, family, type, protocol, res, 1);
@@ -3322,18 +3445,46 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
3322} 3445}
3323#endif 3446#endif
3324 3447
3448/**
3449 * kernel_bind - bind an address to a socket (kernel space)
3450 * @sock: socket
3451 * @addr: address
3452 * @addrlen: length of address
3453 *
3454 * Returns 0 or an error.
3455 */
3456
3325int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen) 3457int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
3326{ 3458{
3327 return sock->ops->bind(sock, addr, addrlen); 3459 return sock->ops->bind(sock, addr, addrlen);
3328} 3460}
3329EXPORT_SYMBOL(kernel_bind); 3461EXPORT_SYMBOL(kernel_bind);
3330 3462
3463/**
3464 * kernel_listen - move socket to listening state (kernel space)
3465 * @sock: socket
3466 * @backlog: pending connections queue size
3467 *
3468 * Returns 0 or an error.
3469 */
3470
3331int kernel_listen(struct socket *sock, int backlog) 3471int kernel_listen(struct socket *sock, int backlog)
3332{ 3472{
3333 return sock->ops->listen(sock, backlog); 3473 return sock->ops->listen(sock, backlog);
3334} 3474}
3335EXPORT_SYMBOL(kernel_listen); 3475EXPORT_SYMBOL(kernel_listen);
3336 3476
3477/**
3478 * kernel_accept - accept a connection (kernel space)
3479 * @sock: listening socket
3480 * @newsock: new connected socket
3481 * @flags: flags
3482 *
3483 * @flags must be SOCK_CLOEXEC, SOCK_NONBLOCK or 0.
3484 * If it fails, @newsock is guaranteed to be %NULL.
3485 * Returns 0 or an error.
3486 */
3487
3337int kernel_accept(struct socket *sock, struct socket **newsock, int flags) 3488int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
3338{ 3489{
3339 struct sock *sk = sock->sk; 3490 struct sock *sk = sock->sk;
@@ -3359,6 +3510,19 @@ done:
3359} 3510}
3360EXPORT_SYMBOL(kernel_accept); 3511EXPORT_SYMBOL(kernel_accept);
3361 3512
3513/**
3514 * kernel_connect - connect a socket (kernel space)
3515 * @sock: socket
3516 * @addr: address
3517 * @addrlen: address length
3518 * @flags: flags (O_NONBLOCK, ...)
3519 *
3520 * For datagram sockets, @addr is the addres to which datagrams are sent
3521 * by default, and the only address from which datagrams are received.
3522 * For stream sockets, attempts to connect to @addr.
3523 * Returns 0 or an error code.
3524 */
3525
3362int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, 3526int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
3363 int flags) 3527 int flags)
3364{ 3528{
@@ -3366,18 +3530,48 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
3366} 3530}
3367EXPORT_SYMBOL(kernel_connect); 3531EXPORT_SYMBOL(kernel_connect);
3368 3532
3533/**
3534 * kernel_getsockname - get the address which the socket is bound (kernel space)
3535 * @sock: socket
3536 * @addr: address holder
3537 *
3538 * Fills the @addr pointer with the address which the socket is bound.
3539 * Returns 0 or an error code.
3540 */
3541
3369int kernel_getsockname(struct socket *sock, struct sockaddr *addr) 3542int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
3370{ 3543{
3371 return sock->ops->getname(sock, addr, 0); 3544 return sock->ops->getname(sock, addr, 0);
3372} 3545}
3373EXPORT_SYMBOL(kernel_getsockname); 3546EXPORT_SYMBOL(kernel_getsockname);
3374 3547
3548/**
3549 * kernel_peername - get the address which the socket is connected (kernel space)
3550 * @sock: socket
3551 * @addr: address holder
3552 *
3553 * Fills the @addr pointer with the address which the socket is connected.
3554 * Returns 0 or an error code.
3555 */
3556
3375int kernel_getpeername(struct socket *sock, struct sockaddr *addr) 3557int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
3376{ 3558{
3377 return sock->ops->getname(sock, addr, 1); 3559 return sock->ops->getname(sock, addr, 1);
3378} 3560}
3379EXPORT_SYMBOL(kernel_getpeername); 3561EXPORT_SYMBOL(kernel_getpeername);
3380 3562
3563/**
3564 * kernel_getsockopt - get a socket option (kernel space)
3565 * @sock: socket
3566 * @level: API level (SOL_SOCKET, ...)
3567 * @optname: option tag
3568 * @optval: option value
3569 * @optlen: option length
3570 *
3571 * Assigns the option length to @optlen.
3572 * Returns 0 or an error.
3573 */
3574
3381int kernel_getsockopt(struct socket *sock, int level, int optname, 3575int kernel_getsockopt(struct socket *sock, int level, int optname,
3382 char *optval, int *optlen) 3576 char *optval, int *optlen)
3383{ 3577{
@@ -3400,6 +3594,17 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
3400} 3594}
3401EXPORT_SYMBOL(kernel_getsockopt); 3595EXPORT_SYMBOL(kernel_getsockopt);
3402 3596
3597/**
3598 * kernel_setsockopt - set a socket option (kernel space)
3599 * @sock: socket
3600 * @level: API level (SOL_SOCKET, ...)
3601 * @optname: option tag
3602 * @optval: option value
3603 * @optlen: option length
3604 *
3605 * Returns 0 or an error.
3606 */
3607
3403int kernel_setsockopt(struct socket *sock, int level, int optname, 3608int kernel_setsockopt(struct socket *sock, int level, int optname,
3404 char *optval, unsigned int optlen) 3609 char *optval, unsigned int optlen)
3405{ 3610{
@@ -3420,6 +3625,17 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
3420} 3625}
3421EXPORT_SYMBOL(kernel_setsockopt); 3626EXPORT_SYMBOL(kernel_setsockopt);
3422 3627
3628/**
3629 * kernel_sendpage - send a &page through a socket (kernel space)
3630 * @sock: socket
3631 * @page: page
3632 * @offset: page offset
3633 * @size: total size in bytes
3634 * @flags: flags (MSG_DONTWAIT, ...)
3635 *
3636 * Returns the total amount sent in bytes or an error.
3637 */
3638
3423int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3639int kernel_sendpage(struct socket *sock, struct page *page, int offset,
3424 size_t size, int flags) 3640 size_t size, int flags)
3425{ 3641{
@@ -3430,6 +3646,18 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
3430} 3646}
3431EXPORT_SYMBOL(kernel_sendpage); 3647EXPORT_SYMBOL(kernel_sendpage);
3432 3648
3649/**
3650 * kernel_sendpage_locked - send a &page through the locked sock (kernel space)
3651 * @sk: sock
3652 * @page: page
3653 * @offset: page offset
3654 * @size: total size in bytes
3655 * @flags: flags (MSG_DONTWAIT, ...)
3656 *
3657 * Returns the total amount sent in bytes or an error.
3658 * Caller must hold @sk.
3659 */
3660
3433int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, 3661int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
3434 size_t size, int flags) 3662 size_t size, int flags)
3435{ 3663{
@@ -3443,17 +3671,30 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
3443} 3671}
3444EXPORT_SYMBOL(kernel_sendpage_locked); 3672EXPORT_SYMBOL(kernel_sendpage_locked);
3445 3673
3674/**
3675 * kernel_shutdown - shut down part of a full-duplex connection (kernel space)
3676 * @sock: socket
3677 * @how: connection part
3678 *
3679 * Returns 0 or an error.
3680 */
3681
3446int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) 3682int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
3447{ 3683{
3448 return sock->ops->shutdown(sock, how); 3684 return sock->ops->shutdown(sock, how);
3449} 3685}
3450EXPORT_SYMBOL(kernel_sock_shutdown); 3686EXPORT_SYMBOL(kernel_sock_shutdown);
3451 3687
3452/* This routine returns the IP overhead imposed by a socket i.e. 3688/**
3453 * the length of the underlying IP header, depending on whether 3689 * kernel_sock_ip_overhead - returns the IP overhead imposed by a socket
3454 * this is an IPv4 or IPv6 socket and the length from IP options turned 3690 * @sk: socket
3455 * on at the socket. Assumes that the caller has a lock on the socket. 3691 *
3692 * This routine returns the IP overhead imposed by a socket i.e.
3693 * the length of the underlying IP header, depending on whether
3694 * this is an IPv4 or IPv6 socket and the length from IP options turned
3695 * on at the socket. Assumes that the caller has a lock on the socket.
3456 */ 3696 */
3697
3457u32 kernel_sock_ip_overhead(struct sock *sk) 3698u32 kernel_sock_ip_overhead(struct sock *sk)
3458{ 3699{
3459 struct inet_sock *inet; 3700 struct inet_sock *inet;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index da1a676860ca..860dcfb95ee4 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -550,6 +550,8 @@ EXPORT_SYMBOL_GPL(strp_check_rcv);
550static int __init strp_mod_init(void) 550static int __init strp_mod_init(void)
551{ 551{
552 strp_wq = create_singlethread_workqueue("kstrp"); 552 strp_wq = create_singlethread_workqueue("kstrp");
553 if (unlikely(!strp_wq))
554 return -ENOMEM;
553 555
554 return 0; 556 return 0;
555} 557}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 228970e6e52b..187d10443a15 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2311,6 +2311,15 @@ out_exit:
2311 rpc_exit(task, status); 2311 rpc_exit(task, status);
2312} 2312}
2313 2313
2314static bool
2315rpc_check_connected(const struct rpc_rqst *req)
2316{
2317 /* No allocated request or transport? return true */
2318 if (!req || !req->rq_xprt)
2319 return true;
2320 return xprt_connected(req->rq_xprt);
2321}
2322
2314static void 2323static void
2315rpc_check_timeout(struct rpc_task *task) 2324rpc_check_timeout(struct rpc_task *task)
2316{ 2325{
@@ -2322,10 +2331,11 @@ rpc_check_timeout(struct rpc_task *task)
2322 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); 2331 dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
2323 task->tk_timeouts++; 2332 task->tk_timeouts++;
2324 2333
2325 if (RPC_IS_SOFTCONN(task)) { 2334 if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
2326 rpc_exit(task, -ETIMEDOUT); 2335 rpc_exit(task, -ETIMEDOUT);
2327 return; 2336 return;
2328 } 2337 }
2338
2329 if (RPC_IS_SOFT(task)) { 2339 if (RPC_IS_SOFT(task)) {
2330 if (clnt->cl_chatty) { 2340 if (clnt->cl_chatty) {
2331 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 2341 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 9359539907ba..732d4b57411a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -495,8 +495,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
495 int flags, struct rpc_rqst *req) 495 int flags, struct rpc_rqst *req)
496{ 496{
497 struct xdr_buf *buf = &req->rq_private_buf; 497 struct xdr_buf *buf = &req->rq_private_buf;
498 size_t want, read; 498 size_t want, uninitialized_var(read);
499 ssize_t ret; 499 ssize_t uninitialized_var(ret);
500 500
501 xs_read_header(transport, buf); 501 xs_read_header(transport, buf);
502 502
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 06fee142f09f..63f39201e41e 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -919,6 +919,9 @@ int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
919{ 919{
920 struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP); 920 struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
921 921
922 if (!group)
923 return -EMSGSIZE;
924
922 if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID, 925 if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
923 grp->type) || 926 grp->type) ||
924 nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE, 927 nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f076edb74338..7ce1e86b024f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
163 163
164void tipc_net_stop(struct net *net) 164void tipc_net_stop(struct net *net)
165{ 165{
166 u32 self = tipc_own_addr(net); 166 if (!tipc_own_id(net))
167
168 if (!self)
169 return; 167 return;
170 168
171 tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
172 rtnl_lock(); 169 rtnl_lock();
173 tipc_bearer_stop(net); 170 tipc_bearer_stop(net);
174 tipc_node_stop(net); 171 tipc_node_stop(net);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2dc4919ab23c..dd3b6dc17662 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -817,10 +817,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
817static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 817static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
818{ 818{
819 struct tipc_link_entry *le = &n->links[bearer_id]; 819 struct tipc_link_entry *le = &n->links[bearer_id];
820 struct tipc_media_addr *maddr = NULL;
820 struct tipc_link *l = le->link; 821 struct tipc_link *l = le->link;
821 struct tipc_media_addr *maddr;
822 struct sk_buff_head xmitq;
823 int old_bearer_id = bearer_id; 822 int old_bearer_id = bearer_id;
823 struct sk_buff_head xmitq;
824 824
825 if (!l) 825 if (!l)
826 return; 826 return;
@@ -844,7 +844,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
844 tipc_node_write_unlock(n); 844 tipc_node_write_unlock(n);
845 if (delete) 845 if (delete)
846 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); 846 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
847 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 847 if (!skb_queue_empty(&xmitq))
848 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
848 tipc_sk_rcv(n->net, &le->inputq); 849 tipc_sk_rcv(n->net, &le->inputq);
849} 850}
850 851
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3274ef625dba..b542f14ed444 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2349,6 +2349,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2349 return 0; 2349 return 0;
2350} 2350}
2351 2351
2352static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2353{
2354 if (addr->family != AF_TIPC)
2355 return false;
2356 if (addr->addrtype == TIPC_SERVICE_RANGE)
2357 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2358 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2359 addr->addrtype == TIPC_SOCKET_ADDR);
2360}
2361
2352/** 2362/**
2353 * tipc_connect - establish a connection to another TIPC port 2363 * tipc_connect - establish a connection to another TIPC port
2354 * @sock: socket structure 2364 * @sock: socket structure
@@ -2384,18 +2394,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2384 if (!tipc_sk_type_connectionless(sk)) 2394 if (!tipc_sk_type_connectionless(sk))
2385 res = -EINVAL; 2395 res = -EINVAL;
2386 goto exit; 2396 goto exit;
2387 } else if (dst->family != AF_TIPC) {
2388 res = -EINVAL;
2389 } 2397 }
2390 if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME) 2398 if (!tipc_sockaddr_is_sane(dst)) {
2391 res = -EINVAL; 2399 res = -EINVAL;
2392 if (res)
2393 goto exit; 2400 goto exit;
2394 2401 }
2395 /* DGRAM/RDM connect(), just save the destaddr */ 2402 /* DGRAM/RDM connect(), just save the destaddr */
2396 if (tipc_sk_type_connectionless(sk)) { 2403 if (tipc_sk_type_connectionless(sk)) {
2397 memcpy(&tsk->peer, dest, destlen); 2404 memcpy(&tsk->peer, dest, destlen);
2398 goto exit; 2405 goto exit;
2406 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2407 res = -EINVAL;
2408 goto exit;
2399 } 2409 }
2400 2410
2401 previous = sk->sk_state; 2411 previous = sk->sk_state;
@@ -3255,6 +3265,8 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3255 peer_port = tsk_peer_port(tsk); 3265 peer_port = tsk_peer_port(tsk);
3256 3266
3257 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); 3267 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
3268 if (!nest)
3269 return -EMSGSIZE;
3258 3270
3259 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) 3271 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3260 goto msg_full; 3272 goto msg_full;
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 4a708a4e8583..b45932d78004 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -363,6 +363,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
363 struct tipc_subscription *sub; 363 struct tipc_subscription *sub;
364 364
365 if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) { 365 if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
366 s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
366 tipc_conn_delete_sub(con, s); 367 tipc_conn_delete_sub(con, s);
367 return 0; 368 return 0;
368 } 369 }
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 77520eacee8f..989e52386c35 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -193,9 +193,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
193 193
194static void xdp_umem_release(struct xdp_umem *umem) 194static void xdp_umem_release(struct xdp_umem *umem)
195{ 195{
196 struct task_struct *task;
197 struct mm_struct *mm;
198
199 xdp_umem_clear_dev(umem); 196 xdp_umem_clear_dev(umem);
200 197
201 ida_simple_remove(&umem_ida, umem->id); 198 ida_simple_remove(&umem_ida, umem->id);
@@ -214,21 +211,10 @@ static void xdp_umem_release(struct xdp_umem *umem)
214 211
215 xdp_umem_unpin_pages(umem); 212 xdp_umem_unpin_pages(umem);
216 213
217 task = get_pid_task(umem->pid, PIDTYPE_PID);
218 put_pid(umem->pid);
219 if (!task)
220 goto out;
221 mm = get_task_mm(task);
222 put_task_struct(task);
223 if (!mm)
224 goto out;
225
226 mmput(mm);
227 kfree(umem->pages); 214 kfree(umem->pages);
228 umem->pages = NULL; 215 umem->pages = NULL;
229 216
230 xdp_umem_unaccount_pages(umem); 217 xdp_umem_unaccount_pages(umem);
231out:
232 kfree(umem); 218 kfree(umem);
233} 219}
234 220
@@ -357,7 +343,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
357 if (size_chk < 0) 343 if (size_chk < 0)
358 return -EINVAL; 344 return -EINVAL;
359 345
360 umem->pid = get_task_pid(current, PIDTYPE_PID);
361 umem->address = (unsigned long)addr; 346 umem->address = (unsigned long)addr;
362 umem->chunk_mask = ~((u64)chunk_size - 1); 347 umem->chunk_mask = ~((u64)chunk_size - 1);
363 umem->size = size; 348 umem->size = size;
@@ -373,7 +358,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
373 358
374 err = xdp_umem_account_pages(umem); 359 err = xdp_umem_account_pages(umem);
375 if (err) 360 if (err)
376 goto out; 361 return err;
377 362
378 err = xdp_umem_pin_pages(umem); 363 err = xdp_umem_pin_pages(umem);
379 if (err) 364 if (err)
@@ -392,8 +377,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
392 377
393out_account: 378out_account:
394 xdp_umem_unaccount_pages(umem); 379 xdp_umem_unaccount_pages(umem);
395out:
396 put_pid(umem->pid);
397 return err; 380 return err;
398} 381}
399 382
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 2554a15ecf2b..76ca30cc4791 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -199,11 +199,8 @@ sub_cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
199 "$(if $(part-of-module),1,0)" "$(@)"; 199 "$(if $(part-of-module),1,0)" "$(@)";
200recordmcount_source := $(srctree)/scripts/recordmcount.pl 200recordmcount_source := $(srctree)/scripts/recordmcount.pl
201endif # BUILD_C_RECORDMCOUNT 201endif # BUILD_C_RECORDMCOUNT
202cmd_record_mcount = \ 202cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)), \
203 if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" = \ 203 $(sub_cmd_record_mcount))
204 "$(CC_FLAGS_FTRACE)" ]; then \
205 $(sub_cmd_record_mcount) \
206 fi
207endif # CC_USING_RECORD_MCOUNT 204endif # CC_USING_RECORD_MCOUNT
208endif # CONFIG_FTRACE_MCOUNT_RECORD 205endif # CONFIG_FTRACE_MCOUNT_RECORD
209 206
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 5b756278df13..a09333fd7cef 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5977,7 +5977,7 @@ sub process {
5977 while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) { 5977 while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
5978 $specifier = $1; 5978 $specifier = $1;
5979 $extension = $2; 5979 $extension = $2;
5980 if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOx]/) { 5980 if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) {
5981 $bad_specifier = $specifier; 5981 $bad_specifier = $specifier;
5982 last; 5982 last;
5983 } 5983 }
diff --git a/scripts/coccinelle/free/put_device.cocci b/scripts/coccinelle/free/put_device.cocci
index 7395697e7f19..c9f071b0a0ab 100644
--- a/scripts/coccinelle/free/put_device.cocci
+++ b/scripts/coccinelle/free/put_device.cocci
@@ -32,6 +32,7 @@ if (id == NULL || ...) { ... return ...; }
32( id 32( id
33| (T2)dev_get_drvdata(&id->dev) 33| (T2)dev_get_drvdata(&id->dev)
34| (T3)platform_get_drvdata(id) 34| (T3)platform_get_drvdata(id)
35| &id->dev
35); 36);
36| return@p2 ...; 37| return@p2 ...;
37) 38)
diff --git a/scripts/coccinelle/misc/badty.cocci b/scripts/coccinelle/misc/badty.cocci
index 481cf301ccfc..08470362199c 100644
--- a/scripts/coccinelle/misc/badty.cocci
+++ b/scripts/coccinelle/misc/badty.cocci
@@ -1,4 +1,4 @@
1/// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element 1/// Correct the size argument to alloc functions
2/// 2///
3//# This makes an effort to find cases where the argument to sizeof is wrong 3//# This makes an effort to find cases where the argument to sizeof is wrong
4//# in memory allocation functions by checking the type of the allocated memory 4//# in memory allocation functions by checking the type of the allocated memory
diff --git a/scripts/kconfig/lxdialog/inputbox.c b/scripts/kconfig/lxdialog/inputbox.c
index 611945611bf8..1dcfb288ee63 100644
--- a/scripts/kconfig/lxdialog/inputbox.c
+++ b/scripts/kconfig/lxdialog/inputbox.c
@@ -113,7 +113,8 @@ do_resize:
113 case KEY_DOWN: 113 case KEY_DOWN:
114 break; 114 break;
115 case KEY_BACKSPACE: 115 case KEY_BACKSPACE:
116 case 127: 116 case 8: /* ^H */
117 case 127: /* ^? */
117 if (pos) { 118 if (pos) {
118 wattrset(dialog, dlg.inputbox.atr); 119 wattrset(dialog, dlg.inputbox.atr);
119 if (input_x == 0) { 120 if (input_x == 0) {
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index a4670f4e825a..ac92c0ded6c5 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
1048 state->match_direction = FIND_NEXT_MATCH_UP; 1048 state->match_direction = FIND_NEXT_MATCH_UP;
1049 *ans = get_mext_match(state->pattern, 1049 *ans = get_mext_match(state->pattern,
1050 state->match_direction); 1050 state->match_direction);
1051 } else if (key == KEY_BACKSPACE || key == 127) { 1051 } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
1052 state->pattern[strlen(state->pattern)-1] = '\0'; 1052 state->pattern[strlen(state->pattern)-1] = '\0';
1053 adj_match_dir(&state->match_direction); 1053 adj_match_dir(&state->match_direction);
1054 } else 1054 } else
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 7be620a1fcdb..77f525a8617c 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
439 case KEY_F(F_EXIT): 439 case KEY_F(F_EXIT):
440 case KEY_F(F_BACK): 440 case KEY_F(F_BACK):
441 break; 441 break;
442 case 127: 442 case 8: /* ^H */
443 case 127: /* ^? */
443 case KEY_BACKSPACE: 444 case KEY_BACKSPACE:
444 if (cursor_position > 0) { 445 if (cursor_position > 0) {
445 memmove(&result[cursor_position-1], 446 memmove(&result[cursor_position-1],
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0b0d1080b1c5..f277e116e0eb 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -639,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
639 info->sechdrs[sym->st_shndx].sh_offset - 639 info->sechdrs[sym->st_shndx].sh_offset -
640 (info->hdr->e_type != ET_REL ? 640 (info->hdr->e_type != ET_REL ?
641 info->sechdrs[sym->st_shndx].sh_addr : 0); 641 info->sechdrs[sym->st_shndx].sh_addr : 0);
642 crc = *crcp; 642 crc = TO_NATIVE(*crcp);
643 } 643 }
644 sym_update_crc(symname + strlen("__crc_"), mod, crc, 644 sym_update_crc(symname + strlen("__crc_"), mod, crc,
645 export); 645 export);
diff --git a/security/Kconfig b/security/Kconfig
index 1d6463fb1450..353cfef71d4e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -239,8 +239,46 @@ source "security/safesetid/Kconfig"
239 239
240source "security/integrity/Kconfig" 240source "security/integrity/Kconfig"
241 241
242choice
243 prompt "First legacy 'major LSM' to be initialized"
244 default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
245 default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
246 default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
247 default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
248 default DEFAULT_SECURITY_DAC
249
250 help
251 This choice is there only for converting CONFIG_DEFAULT_SECURITY
252 in old kernel configs to CONFIG_LSM in new kernel configs. Don't
253 change this choice unless you are creating a fresh kernel config,
254 for this choice will be ignored after CONFIG_LSM has been set.
255
256 Selects the legacy "major security module" that will be
257 initialized first. Overridden by non-default CONFIG_LSM.
258
259 config DEFAULT_SECURITY_SELINUX
260 bool "SELinux" if SECURITY_SELINUX=y
261
262 config DEFAULT_SECURITY_SMACK
263 bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
264
265 config DEFAULT_SECURITY_TOMOYO
266 bool "TOMOYO" if SECURITY_TOMOYO=y
267
268 config DEFAULT_SECURITY_APPARMOR
269 bool "AppArmor" if SECURITY_APPARMOR=y
270
271 config DEFAULT_SECURITY_DAC
272 bool "Unix Discretionary Access Controls"
273
274endchoice
275
242config LSM 276config LSM
243 string "Ordered list of enabled LSMs" 277 string "Ordered list of enabled LSMs"
278 default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
279 default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
280 default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
281 default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
244 default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor" 282 default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
245 help 283 help
246 A comma-separated list of LSMs, in initialization order. 284 A comma-separated list of LSMs, in initialization order.
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 6b576e588725..daecdfb15a9c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -828,9 +828,11 @@ void policydb_destroy(struct policydb *p)
828 hashtab_map(p->range_tr, range_tr_destroy, NULL); 828 hashtab_map(p->range_tr, range_tr_destroy, NULL);
829 hashtab_destroy(p->range_tr); 829 hashtab_destroy(p->range_tr);
830 830
831 for (i = 0; i < p->p_types.nprim; i++) 831 if (p->type_attr_map_array) {
832 ebitmap_destroy(&p->type_attr_map_array[i]); 832 for (i = 0; i < p->p_types.nprim; i++)
833 kvfree(p->type_attr_map_array); 833 ebitmap_destroy(&p->type_attr_map_array[i]);
834 kvfree(p->type_attr_map_array);
835 }
834 836
835 ebitmap_destroy(&p->filename_trans_ttypes); 837 ebitmap_destroy(&p->filename_trans_ttypes);
836 ebitmap_destroy(&p->policycaps); 838 ebitmap_destroy(&p->policycaps);
@@ -2496,10 +2498,13 @@ int policydb_read(struct policydb *p, void *fp)
2496 if (!p->type_attr_map_array) 2498 if (!p->type_attr_map_array)
2497 goto bad; 2499 goto bad;
2498 2500
2501 /* just in case ebitmap_init() becomes more than just a memset(0): */
2502 for (i = 0; i < p->p_types.nprim; i++)
2503 ebitmap_init(&p->type_attr_map_array[i]);
2504
2499 for (i = 0; i < p->p_types.nprim; i++) { 2505 for (i = 0; i < p->p_types.nprim; i++) {
2500 struct ebitmap *e = &p->type_attr_map_array[i]; 2506 struct ebitmap *e = &p->type_attr_map_array[i];
2501 2507
2502 ebitmap_init(e);
2503 if (p->policyvers >= POLICYDB_VERSION_AVTAB) { 2508 if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
2504 rc = ebitmap_read(e, fp); 2509 rc = ebitmap_read(e, fp);
2505 if (rc) 2510 if (rc)
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 57cc60722dd3..efac68556b45 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -206,7 +206,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
206 * yama_task_free - check for task_pid to remove from exception list 206 * yama_task_free - check for task_pid to remove from exception list
207 * @task: task being removed 207 * @task: task being removed
208 */ 208 */
209void yama_task_free(struct task_struct *task) 209static void yama_task_free(struct task_struct *task)
210{ 210{
211 yama_ptracer_del(task, task); 211 yama_ptracer_del(task, task);
212} 212}
@@ -222,7 +222,7 @@ void yama_task_free(struct task_struct *task)
222 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama 222 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
223 * does not handle the given option. 223 * does not handle the given option.
224 */ 224 */
225int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, 225static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
226 unsigned long arg4, unsigned long arg5) 226 unsigned long arg4, unsigned long arg5)
227{ 227{
228 int rc = -ENOSYS; 228 int rc = -ENOSYS;
@@ -401,7 +401,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
401 * 401 *
402 * Returns 0 if following the ptrace is allowed, -ve on error. 402 * Returns 0 if following the ptrace is allowed, -ve on error.
403 */ 403 */
404int yama_ptrace_traceme(struct task_struct *parent) 404static int yama_ptrace_traceme(struct task_struct *parent)
405{ 405{
406 int rc = 0; 406 int rc = 0;
407 407
@@ -452,7 +452,7 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write,
452static int zero; 452static int zero;
453static int max_scope = YAMA_SCOPE_NO_ATTACH; 453static int max_scope = YAMA_SCOPE_NO_ATTACH;
454 454
455struct ctl_path yama_sysctl_path[] = { 455static struct ctl_path yama_sysctl_path[] = {
456 { .procname = "kernel", }, 456 { .procname = "kernel", },
457 { .procname = "yama", }, 457 { .procname = "yama", },
458 { } 458 { }
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index d5b0d7ba83c4..f6ae68017608 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
940 oss_frame_size = snd_pcm_format_physical_width(params_format(params)) * 940 oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
941 params_channels(params) / 8; 941 params_channels(params) / 8;
942 942
943 err = snd_pcm_oss_period_size(substream, params, sparams);
944 if (err < 0)
945 goto failure;
946
947 n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
948 err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
949 if (err < 0)
950 goto failure;
951
952 err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
953 runtime->oss.periods, NULL);
954 if (err < 0)
955 goto failure;
956
957 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
958
959 err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
960 if (err < 0) {
961 pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
962 goto failure;
963 }
964
943#ifdef CONFIG_SND_PCM_OSS_PLUGINS 965#ifdef CONFIG_SND_PCM_OSS_PLUGINS
944 snd_pcm_oss_plugin_clear(substream); 966 snd_pcm_oss_plugin_clear(substream);
945 if (!direct) { 967 if (!direct) {
@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
974 } 996 }
975#endif 997#endif
976 998
977 err = snd_pcm_oss_period_size(substream, params, sparams);
978 if (err < 0)
979 goto failure;
980
981 n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
982 err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
983 if (err < 0)
984 goto failure;
985
986 err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
987 runtime->oss.periods, NULL);
988 if (err < 0)
989 goto failure;
990
991 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
992
993 if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
994 pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
995 goto failure;
996 }
997
998 if (runtime->oss.trigger) { 999 if (runtime->oss.trigger) {
999 sw_params->start_threshold = 1; 1000 sw_params->start_threshold = 1;
1000 } else { 1001 } else {
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index f731f904e8cc..1d8452912b14 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
1445static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state) 1445static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
1446{ 1446{
1447 struct snd_pcm_runtime *runtime = substream->runtime; 1447 struct snd_pcm_runtime *runtime = substream->runtime;
1448 if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) 1448 switch (runtime->status->state) {
1449 case SNDRV_PCM_STATE_SUSPENDED:
1449 return -EBUSY; 1450 return -EBUSY;
1451 /* unresumable PCM state; return -EBUSY for skipping suspend */
1452 case SNDRV_PCM_STATE_OPEN:
1453 case SNDRV_PCM_STATE_SETUP:
1454 case SNDRV_PCM_STATE_DISCONNECTED:
1455 return -EBUSY;
1456 }
1450 runtime->trigger_master = substream; 1457 runtime->trigger_master = substream;
1451 return 0; 1458 return 0;
1452} 1459}
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index ee601d7f0926..c0690d1ecd55 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/nospec.h>
33#include <sound/rawmidi.h> 34#include <sound/rawmidi.h>
34#include <sound/info.h> 35#include <sound/info.h>
35#include <sound/control.h> 36#include <sound/control.h>
@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
601 return -ENXIO; 602 return -ENXIO;
602 if (info->stream < 0 || info->stream > 1) 603 if (info->stream < 0 || info->stream > 1)
603 return -EINVAL; 604 return -EINVAL;
605 info->stream = array_index_nospec(info->stream, 2);
604 pstr = &rmidi->streams[info->stream]; 606 pstr = &rmidi->streams[info->stream];
605 if (pstr->substream_count == 0) 607 if (pstr->substream_count == 0)
606 return -ENOENT; 608 return -ENOENT;
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index 278ebb993122..c93945917235 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -617,13 +617,14 @@ int
617snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf) 617snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
618{ 618{
619 struct seq_oss_synth *rec; 619 struct seq_oss_synth *rec;
620 struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
620 621
621 if (dev < 0 || dev >= dp->max_synthdev) 622 if (!info)
622 return -ENXIO; 623 return -ENXIO;
623 624
624 if (dp->synths[dev].is_midi) { 625 if (info->is_midi) {
625 struct midi_info minf; 626 struct midi_info minf;
626 snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf); 627 snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
627 inf->synth_type = SYNTH_TYPE_MIDI; 628 inf->synth_type = SYNTH_TYPE_MIDI;
628 inf->synth_subtype = 0; 629 inf->synth_subtype = 0;
629 inf->nr_voices = 16; 630 inf->nr_voices = 16;
diff --git a/sound/drivers/opl3/opl3_voice.h b/sound/drivers/opl3/opl3_voice.h
index 5b02bd49fde4..4e4ecc21760b 100644
--- a/sound/drivers/opl3/opl3_voice.h
+++ b/sound/drivers/opl3/opl3_voice.h
@@ -41,7 +41,7 @@ void snd_opl3_timer_func(struct timer_list *t);
41 41
42/* Prototypes for opl3_drums.c */ 42/* Prototypes for opl3_drums.c */
43void snd_opl3_load_drums(struct snd_opl3 *opl3); 43void snd_opl3_load_drums(struct snd_opl3 *opl3);
44void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int on_off, int vel, struct snd_midi_channel *chan); 44void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan);
45 45
46/* Prototypes for opl3_oss.c */ 46/* Prototypes for opl3_oss.c */
47#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS) 47#if IS_ENABLED(CONFIG_SND_SEQUENCER_OSS)
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 220e61926ea4..513291ba0ab0 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -36,7 +36,7 @@ static void name_card(struct snd_motu *motu)
36 fw_csr_iterator_init(&it, motu->unit->directory); 36 fw_csr_iterator_init(&it, motu->unit->directory);
37 while (fw_csr_iterator_next(&it, &key, &val)) { 37 while (fw_csr_iterator_next(&it, &key, &val)) {
38 switch (key) { 38 switch (key) {
39 case CSR_VERSION: 39 case CSR_MODEL:
40 version = val; 40 version = val;
41 break; 41 break;
42 } 42 }
@@ -46,7 +46,7 @@ static void name_card(struct snd_motu *motu)
46 strcpy(motu->card->shortname, motu->spec->name); 46 strcpy(motu->card->shortname, motu->spec->name);
47 strcpy(motu->card->mixername, motu->spec->name); 47 strcpy(motu->card->mixername, motu->spec->name);
48 snprintf(motu->card->longname, sizeof(motu->card->longname), 48 snprintf(motu->card->longname, sizeof(motu->card->longname),
49 "MOTU %s (version:%d), GUID %08x%08x at %s, S%d", 49 "MOTU %s (version:%06x), GUID %08x%08x at %s, S%d",
50 motu->spec->name, version, 50 motu->spec->name, version,
51 fw_dev->config_rom[3], fw_dev->config_rom[4], 51 fw_dev->config_rom[3], fw_dev->config_rom[4],
52 dev_name(&motu->unit->device), 100 << fw_dev->max_speed); 52 dev_name(&motu->unit->device), 100 << fw_dev->max_speed);
@@ -237,20 +237,20 @@ static const struct snd_motu_spec motu_audio_express = {
237#define SND_MOTU_DEV_ENTRY(model, data) \ 237#define SND_MOTU_DEV_ENTRY(model, data) \
238{ \ 238{ \
239 .match_flags = IEEE1394_MATCH_VENDOR_ID | \ 239 .match_flags = IEEE1394_MATCH_VENDOR_ID | \
240 IEEE1394_MATCH_MODEL_ID | \ 240 IEEE1394_MATCH_SPECIFIER_ID | \
241 IEEE1394_MATCH_SPECIFIER_ID, \ 241 IEEE1394_MATCH_VERSION, \
242 .vendor_id = OUI_MOTU, \ 242 .vendor_id = OUI_MOTU, \
243 .model_id = model, \
244 .specifier_id = OUI_MOTU, \ 243 .specifier_id = OUI_MOTU, \
244 .version = model, \
245 .driver_data = (kernel_ulong_t)data, \ 245 .driver_data = (kernel_ulong_t)data, \
246} 246}
247 247
248static const struct ieee1394_device_id motu_id_table[] = { 248static const struct ieee1394_device_id motu_id_table[] = {
249 SND_MOTU_DEV_ENTRY(0x101800, &motu_828mk2), 249 SND_MOTU_DEV_ENTRY(0x000003, &motu_828mk2),
250 SND_MOTU_DEV_ENTRY(0x107800, &snd_motu_spec_traveler), 250 SND_MOTU_DEV_ENTRY(0x000009, &snd_motu_spec_traveler),
251 SND_MOTU_DEV_ENTRY(0x106800, &motu_828mk3), /* FireWire only. */ 251 SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
252 SND_MOTU_DEV_ENTRY(0x100800, &motu_828mk3), /* Hybrid. */ 252 SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
253 SND_MOTU_DEV_ENTRY(0x104800, &motu_audio_express), 253 SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
254 { } 254 { }
255}; 255};
256MODULE_DEVICE_TABLE(ieee1394, motu_id_table); 256MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
diff --git a/sound/isa/sb/sb8.c b/sound/isa/sb/sb8.c
index aa2a83eb81a9..dc27a480c2d9 100644
--- a/sound/isa/sb/sb8.c
+++ b/sound/isa/sb/sb8.c
@@ -111,6 +111,10 @@ static int snd_sb8_probe(struct device *pdev, unsigned int dev)
111 111
112 /* block the 0x388 port to avoid PnP conflicts */ 112 /* block the 0x388 port to avoid PnP conflicts */
113 acard->fm_res = request_region(0x388, 4, "SoundBlaster FM"); 113 acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
114 if (!acard->fm_res) {
115 err = -EBUSY;
116 goto _err;
117 }
114 118
115 if (port[dev] != SNDRV_AUTO_PORT) { 119 if (port[dev] != SNDRV_AUTO_PORT) {
116 if ((err = snd_sbdsp_create(card, port[dev], irq[dev], 120 if ((err = snd_sbdsp_create(card, port[dev], irq[dev],
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index ea876b0b02b9..dc0084dc8550 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -1952,6 +1952,11 @@ static int snd_echo_create(struct snd_card *card,
1952 } 1952 }
1953 chip->dsp_registers = (volatile u32 __iomem *) 1953 chip->dsp_registers = (volatile u32 __iomem *)
1954 ioremap_nocache(chip->dsp_registers_phys, sz); 1954 ioremap_nocache(chip->dsp_registers_phys, sz);
1955 if (!chip->dsp_registers) {
1956 dev_err(chip->card->dev, "ioremap failed\n");
1957 snd_echo_free(chip);
1958 return -ENOMEM;
1959 }
1955 1960
1956 if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED, 1961 if (request_irq(pci->irq, snd_echo_interrupt, IRQF_SHARED,
1957 KBUILD_MODNAME, chip)) { 1962 KBUILD_MODNAME, chip)) {
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5f2005098a60..ec0b8595eb4d 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2939,6 +2939,20 @@ static int hda_codec_runtime_resume(struct device *dev)
2939#endif /* CONFIG_PM */ 2939#endif /* CONFIG_PM */
2940 2940
2941#ifdef CONFIG_PM_SLEEP 2941#ifdef CONFIG_PM_SLEEP
2942static int hda_codec_force_resume(struct device *dev)
2943{
2944 int ret;
2945
2946 /* The get/put pair below enforces the runtime resume even if the
2947 * device hasn't been used at suspend time. This trick is needed to
2948 * update the jack state change during the sleep.
2949 */
2950 pm_runtime_get_noresume(dev);
2951 ret = pm_runtime_force_resume(dev);
2952 pm_runtime_put(dev);
2953 return ret;
2954}
2955
2942static int hda_codec_pm_suspend(struct device *dev) 2956static int hda_codec_pm_suspend(struct device *dev)
2943{ 2957{
2944 dev->power.power_state = PMSG_SUSPEND; 2958 dev->power.power_state = PMSG_SUSPEND;
@@ -2948,7 +2962,7 @@ static int hda_codec_pm_suspend(struct device *dev)
2948static int hda_codec_pm_resume(struct device *dev) 2962static int hda_codec_pm_resume(struct device *dev)
2949{ 2963{
2950 dev->power.power_state = PMSG_RESUME; 2964 dev->power.power_state = PMSG_RESUME;
2951 return pm_runtime_force_resume(dev); 2965 return hda_codec_force_resume(dev);
2952} 2966}
2953 2967
2954static int hda_codec_pm_freeze(struct device *dev) 2968static int hda_codec_pm_freeze(struct device *dev)
@@ -2960,13 +2974,13 @@ static int hda_codec_pm_freeze(struct device *dev)
2960static int hda_codec_pm_thaw(struct device *dev) 2974static int hda_codec_pm_thaw(struct device *dev)
2961{ 2975{
2962 dev->power.power_state = PMSG_THAW; 2976 dev->power.power_state = PMSG_THAW;
2963 return pm_runtime_force_resume(dev); 2977 return hda_codec_force_resume(dev);
2964} 2978}
2965 2979
2966static int hda_codec_pm_restore(struct device *dev) 2980static int hda_codec_pm_restore(struct device *dev)
2967{ 2981{
2968 dev->power.power_state = PMSG_RESTORE; 2982 dev->power.power_state = PMSG_RESTORE;
2969 return pm_runtime_force_resume(dev); 2983 return hda_codec_force_resume(dev);
2970} 2984}
2971#endif /* CONFIG_PM_SLEEP */ 2985#endif /* CONFIG_PM_SLEEP */
2972 2986
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e5c49003e75f..ece256a3b48f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -947,7 +947,7 @@ static void __azx_runtime_suspend(struct azx *chip)
947 display_power(chip, false); 947 display_power(chip, false);
948} 948}
949 949
950static void __azx_runtime_resume(struct azx *chip) 950static void __azx_runtime_resume(struct azx *chip, bool from_rt)
951{ 951{
952 struct hda_intel *hda = container_of(chip, struct hda_intel, chip); 952 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
953 struct hdac_bus *bus = azx_bus(chip); 953 struct hdac_bus *bus = azx_bus(chip);
@@ -964,7 +964,7 @@ static void __azx_runtime_resume(struct azx *chip)
964 azx_init_pci(chip); 964 azx_init_pci(chip);
965 hda_intel_init_chip(chip, true); 965 hda_intel_init_chip(chip, true);
966 966
967 if (status) { 967 if (status && from_rt) {
968 list_for_each_codec(codec, &chip->bus) 968 list_for_each_codec(codec, &chip->bus)
969 if (status & (1 << codec->addr)) 969 if (status & (1 << codec->addr))
970 schedule_delayed_work(&codec->jackpoll_work, 970 schedule_delayed_work(&codec->jackpoll_work,
@@ -1016,7 +1016,7 @@ static int azx_resume(struct device *dev)
1016 chip->msi = 0; 1016 chip->msi = 0;
1017 if (azx_acquire_irq(chip, 1) < 0) 1017 if (azx_acquire_irq(chip, 1) < 0)
1018 return -EIO; 1018 return -EIO;
1019 __azx_runtime_resume(chip); 1019 __azx_runtime_resume(chip, false);
1020 snd_power_change_state(card, SNDRV_CTL_POWER_D0); 1020 snd_power_change_state(card, SNDRV_CTL_POWER_D0);
1021 1021
1022 trace_azx_resume(chip); 1022 trace_azx_resume(chip);
@@ -1081,7 +1081,7 @@ static int azx_runtime_resume(struct device *dev)
1081 chip = card->private_data; 1081 chip = card->private_data;
1082 if (!azx_has_pm_runtime(chip)) 1082 if (!azx_has_pm_runtime(chip))
1083 return 0; 1083 return 0;
1084 __azx_runtime_resume(chip); 1084 __azx_runtime_resume(chip, true);
1085 1085
1086 /* disable controller Wake Up event*/ 1086 /* disable controller Wake Up event*/
1087 azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & 1087 azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
@@ -2144,10 +2144,12 @@ static struct snd_pci_quirk power_save_blacklist[] = {
2144 SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0), 2144 SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
2145 /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */ 2145 /* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
2146 SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0), 2146 SND_PCI_QUIRK(0x8086, 0x2068, "Intel NUC7i3BNB", 0),
2147 /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
2148 SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
2149 /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ 2147 /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
2150 SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), 2148 SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
2149 /* https://bugzilla.redhat.com/show_bug.cgi?id=1689623 */
2150 SND_PCI_QUIRK(0x17aa, 0x367b, "Lenovo IdeaCentre B550", 0),
2151 /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
2152 SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
2151 {} 2153 {}
2152}; 2154};
2153#endif /* CONFIG_PM */ 2155#endif /* CONFIG_PM */
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 29882bda7632..e1ebc6d5f382 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1005,7 +1005,6 @@ struct ca0132_spec {
1005 unsigned int scp_resp_header; 1005 unsigned int scp_resp_header;
1006 unsigned int scp_resp_data[4]; 1006 unsigned int scp_resp_data[4];
1007 unsigned int scp_resp_count; 1007 unsigned int scp_resp_count;
1008 bool alt_firmware_present;
1009 bool startup_check_entered; 1008 bool startup_check_entered;
1010 bool dsp_reload; 1009 bool dsp_reload;
1011 1010
@@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
7518 bool dsp_loaded = false; 7517 bool dsp_loaded = false;
7519 struct ca0132_spec *spec = codec->spec; 7518 struct ca0132_spec *spec = codec->spec;
7520 const struct dsp_image_seg *dsp_os_image; 7519 const struct dsp_image_seg *dsp_os_image;
7521 const struct firmware *fw_entry; 7520 const struct firmware *fw_entry = NULL;
7522 /* 7521 /*
7523 * Alternate firmwares for different variants. The Recon3Di apparently 7522 * Alternate firmwares for different variants. The Recon3Di apparently
7524 * can use the default firmware, but I'll leave the option in case 7523 * can use the default firmware, but I'll leave the option in case
@@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
7529 case QUIRK_R3D: 7528 case QUIRK_R3D:
7530 case QUIRK_AE5: 7529 case QUIRK_AE5:
7531 if (request_firmware(&fw_entry, DESKTOP_EFX_FILE, 7530 if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
7532 codec->card->dev) != 0) { 7531 codec->card->dev) != 0)
7533 codec_dbg(codec, "Desktop firmware not found."); 7532 codec_dbg(codec, "Desktop firmware not found.");
7534 spec->alt_firmware_present = false; 7533 else
7535 } else {
7536 codec_dbg(codec, "Desktop firmware selected."); 7534 codec_dbg(codec, "Desktop firmware selected.");
7537 spec->alt_firmware_present = true;
7538 }
7539 break; 7535 break;
7540 case QUIRK_R3DI: 7536 case QUIRK_R3DI:
7541 if (request_firmware(&fw_entry, R3DI_EFX_FILE, 7537 if (request_firmware(&fw_entry, R3DI_EFX_FILE,
7542 codec->card->dev) != 0) { 7538 codec->card->dev) != 0)
7543 codec_dbg(codec, "Recon3Di alt firmware not detected."); 7539 codec_dbg(codec, "Recon3Di alt firmware not detected.");
7544 spec->alt_firmware_present = false; 7540 else
7545 } else {
7546 codec_dbg(codec, "Recon3Di firmware selected."); 7541 codec_dbg(codec, "Recon3Di firmware selected.");
7547 spec->alt_firmware_present = true;
7548 }
7549 break; 7542 break;
7550 default: 7543 default:
7551 spec->alt_firmware_present = false;
7552 break; 7544 break;
7553 } 7545 }
7554 /* 7546 /*
7555 * Use default ctefx.bin if no alt firmware is detected, or if none 7547 * Use default ctefx.bin if no alt firmware is detected, or if none
7556 * exists for your particular codec. 7548 * exists for your particular codec.
7557 */ 7549 */
7558 if (!spec->alt_firmware_present) { 7550 if (!fw_entry) {
7559 codec_dbg(codec, "Default firmware selected."); 7551 codec_dbg(codec, "Default firmware selected.");
7560 if (request_firmware(&fw_entry, EFX_FILE, 7552 if (request_firmware(&fw_entry, EFX_FILE,
7561 codec->card->dev) != 0) 7553 codec->card->dev) != 0)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 384719d5c44e..a3fb3d4c5730 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5687,6 +5687,9 @@ enum {
5687 ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE, 5687 ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
5688 ALC225_FIXUP_WYSE_AUTO_MUTE, 5688 ALC225_FIXUP_WYSE_AUTO_MUTE,
5689 ALC225_FIXUP_WYSE_DISABLE_MIC_VREF, 5689 ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
5690 ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
5691 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
5692 ALC299_FIXUP_PREDATOR_SPK,
5690}; 5693};
5691 5694
5692static const struct hda_fixup alc269_fixups[] = { 5695static const struct hda_fixup alc269_fixups[] = {
@@ -6685,6 +6688,32 @@ static const struct hda_fixup alc269_fixups[] = {
6685 .chained = true, 6688 .chained = true,
6686 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC 6689 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6687 }, 6690 },
6691 [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = {
6692 .type = HDA_FIXUP_VERBS,
6693 .v.verbs = (const struct hda_verb[]) {
6694 { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f },
6695 { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 },
6696 { }
6697 },
6698 .chained = true,
6699 .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
6700 },
6701 [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
6702 .type = HDA_FIXUP_PINS,
6703 .v.pins = (const struct hda_pintbl[]) {
6704 { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
6705 { }
6706 },
6707 .chained = true,
6708 .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
6709 },
6710 [ALC299_FIXUP_PREDATOR_SPK] = {
6711 .type = HDA_FIXUP_PINS,
6712 .v.pins = (const struct hda_pintbl[]) {
6713 { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
6714 { }
6715 }
6716 },
6688}; 6717};
6689 6718
6690static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6719static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6701,9 +6730,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6701 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS), 6730 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
6702 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), 6731 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
6703 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK), 6732 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
6704 SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), 6733 SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
6705 SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), 6734 SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
6706 SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE), 6735 SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
6736 SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
6737 SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
6738 SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
6739 SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
6707 SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), 6740 SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
6708 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), 6741 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
6709 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), 6742 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -7100,6 +7133,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7100 {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"}, 7133 {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
7101 {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"}, 7134 {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
7102 {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"}, 7135 {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
7136 {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
7103 {} 7137 {}
7104}; 7138};
7105#define ALC225_STANDARD_PINS \ 7139#define ALC225_STANDARD_PINS \
@@ -7320,6 +7354,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7320 {0x14, 0x90170110}, 7354 {0x14, 0x90170110},
7321 {0x1b, 0x90a70130}, 7355 {0x1b, 0x90a70130},
7322 {0x21, 0x03211020}), 7356 {0x21, 0x03211020}),
7357 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
7358 {0x12, 0x90a60130},
7359 {0x14, 0x90170110},
7360 {0x21, 0x03211020}),
7361 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
7362 {0x12, 0x90a60130},
7363 {0x14, 0x90170110},
7364 {0x21, 0x04211020}),
7365 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
7366 {0x1a, 0x90a70130},
7367 {0x1b, 0x90170110},
7368 {0x21, 0x03211020}),
7323 SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB, 7369 SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
7324 {0x12, 0xb7a60130}, 7370 {0x12, 0xb7a60130},
7325 {0x13, 0xb8a61140}, 7371 {0x13, 0xb8a61140},
diff --git a/tools/arch/alpha/include/uapi/asm/mman.h b/tools/arch/alpha/include/uapi/asm/mman.h
index c317d3e6867a..ea6a255ae61f 100644
--- a/tools/arch/alpha/include/uapi/asm/mman.h
+++ b/tools/arch/alpha/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
27#define MAP_NONBLOCK 0x40000 27#define MAP_NONBLOCK 0x40000
28#define MAP_NORESERVE 0x10000 28#define MAP_NORESERVE 0x10000
29#define MAP_POPULATE 0x20000 29#define MAP_POPULATE 0x20000
30#define MAP_PRIVATE 0x02
31#define MAP_SHARED 0x01
32#define MAP_STACK 0x80000 30#define MAP_STACK 0x80000
33#define PROT_EXEC 0x4 31#define PROT_EXEC 0x4
34#define PROT_GROWSDOWN 0x01000000 32#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index dae1584cf017..4703d218663a 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -17,5 +17,7 @@
17 17
18#define __ARCH_WANT_RENAMEAT 18#define __ARCH_WANT_RENAMEAT
19#define __ARCH_WANT_NEW_STAT 19#define __ARCH_WANT_NEW_STAT
20#define __ARCH_WANT_SET_GET_RLIMIT
21#define __ARCH_WANT_TIME32_SYSCALLS
20 22
21#include <asm-generic/unistd.h> 23#include <asm-generic/unistd.h>
diff --git a/tools/arch/mips/include/uapi/asm/mman.h b/tools/arch/mips/include/uapi/asm/mman.h
index de2206883abc..c8acaa138d46 100644
--- a/tools/arch/mips/include/uapi/asm/mman.h
+++ b/tools/arch/mips/include/uapi/asm/mman.h
@@ -28,8 +28,6 @@
28#define MAP_NONBLOCK 0x20000 28#define MAP_NONBLOCK 0x20000
29#define MAP_NORESERVE 0x0400 29#define MAP_NORESERVE 0x0400
30#define MAP_POPULATE 0x10000 30#define MAP_POPULATE 0x10000
31#define MAP_PRIVATE 0x002
32#define MAP_SHARED 0x001
33#define MAP_STACK 0x40000 31#define MAP_STACK 0x40000
34#define PROT_EXEC 0x04 32#define PROT_EXEC 0x04
35#define PROT_GROWSDOWN 0x01000000 33#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h
index 1bd78758bde9..f9fd1325f5bd 100644
--- a/tools/arch/parisc/include/uapi/asm/mman.h
+++ b/tools/arch/parisc/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
27#define MAP_NONBLOCK 0x20000 27#define MAP_NONBLOCK 0x20000
28#define MAP_NORESERVE 0x4000 28#define MAP_NORESERVE 0x4000
29#define MAP_POPULATE 0x10000 29#define MAP_POPULATE 0x10000
30#define MAP_PRIVATE 0x02
31#define MAP_SHARED 0x01
32#define MAP_STACK 0x40000 30#define MAP_STACK 0x40000
33#define PROT_EXEC 0x4 31#define PROT_EXEC 0x4
34#define PROT_GROWSDOWN 0x01000000 32#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 8c876c166ef2..26ca425f4c2c 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -463,10 +463,12 @@ struct kvm_ppc_cpu_char {
463#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58) 463#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
464#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57) 464#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
465#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56) 465#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
466#define KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54)
466 467
467#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63) 468#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
468#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62) 469#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
469#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61) 470#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
471#define KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58)
470 472
471/* Per-vcpu XICS interrupt controller state */ 473/* Per-vcpu XICS interrupt controller state */
472#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c) 474#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 6d6122524711..981ff9479648 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -344,6 +344,7 @@
344/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ 344/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
345#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ 345#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
346#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ 346#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
347#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
347#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ 348#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
348#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ 349#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
349#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ 350#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
diff --git a/tools/arch/xtensa/include/uapi/asm/mman.h b/tools/arch/xtensa/include/uapi/asm/mman.h
index 34dde6f44dae..f2b08c990afc 100644
--- a/tools/arch/xtensa/include/uapi/asm/mman.h
+++ b/tools/arch/xtensa/include/uapi/asm/mman.h
@@ -27,8 +27,6 @@
27#define MAP_NONBLOCK 0x20000 27#define MAP_NONBLOCK 0x20000
28#define MAP_NORESERVE 0x0400 28#define MAP_NORESERVE 0x0400
29#define MAP_POPULATE 0x10000 29#define MAP_POPULATE 0x10000
30#define MAP_PRIVATE 0x002
31#define MAP_SHARED 0x001
32#define MAP_STACK 0x40000 30#define MAP_STACK 0x40000
33#define PROT_EXEC 0x4 31#define PROT_EXEC 0x4
34#define PROT_GROWSDOWN 0x01000000 32#define PROT_GROWSDOWN 0x01000000
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 8ef80d65a474..d2be5a06c339 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -401,41 +401,31 @@ static int do_show(int argc, char **argv)
401 401
402static int do_dump(int argc, char **argv) 402static int do_dump(int argc, char **argv)
403{ 403{
404 unsigned int finfo_rec_size, linfo_rec_size, jited_linfo_rec_size; 404 struct bpf_prog_info_linear *info_linear;
405 void *func_info = NULL, *linfo = NULL, *jited_linfo = NULL;
406 unsigned int nr_finfo, nr_linfo = 0, nr_jited_linfo = 0;
407 struct bpf_prog_linfo *prog_linfo = NULL; 405 struct bpf_prog_linfo *prog_linfo = NULL;
408 unsigned long *func_ksyms = NULL; 406 enum {DUMP_JITED, DUMP_XLATED} mode;
409 struct bpf_prog_info info = {};
410 unsigned int *func_lens = NULL;
411 const char *disasm_opt = NULL; 407 const char *disasm_opt = NULL;
412 unsigned int nr_func_ksyms; 408 struct bpf_prog_info *info;
413 unsigned int nr_func_lens;
414 struct dump_data dd = {}; 409 struct dump_data dd = {};
415 __u32 len = sizeof(info); 410 void *func_info = NULL;
416 struct btf *btf = NULL; 411 struct btf *btf = NULL;
417 unsigned int buf_size;
418 char *filepath = NULL; 412 char *filepath = NULL;
419 bool opcodes = false; 413 bool opcodes = false;
420 bool visual = false; 414 bool visual = false;
421 char func_sig[1024]; 415 char func_sig[1024];
422 unsigned char *buf; 416 unsigned char *buf;
423 bool linum = false; 417 bool linum = false;
424 __u32 *member_len; 418 __u32 member_len;
425 __u64 *member_ptr; 419 __u64 arrays;
426 ssize_t n; 420 ssize_t n;
427 int err;
428 int fd; 421 int fd;
429 422
430 if (is_prefix(*argv, "jited")) { 423 if (is_prefix(*argv, "jited")) {
431 if (disasm_init()) 424 if (disasm_init())
432 return -1; 425 return -1;
433 426 mode = DUMP_JITED;
434 member_len = &info.jited_prog_len;
435 member_ptr = &info.jited_prog_insns;
436 } else if (is_prefix(*argv, "xlated")) { 427 } else if (is_prefix(*argv, "xlated")) {
437 member_len = &info.xlated_prog_len; 428 mode = DUMP_XLATED;
438 member_ptr = &info.xlated_prog_insns;
439 } else { 429 } else {
440 p_err("expected 'xlated' or 'jited', got: %s", *argv); 430 p_err("expected 'xlated' or 'jited', got: %s", *argv);
441 return -1; 431 return -1;
@@ -474,175 +464,50 @@ static int do_dump(int argc, char **argv)
474 return -1; 464 return -1;
475 } 465 }
476 466
477 err = bpf_obj_get_info_by_fd(fd, &info, &len); 467 if (mode == DUMP_JITED)
478 if (err) { 468 arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
479 p_err("can't get prog info: %s", strerror(errno)); 469 else
480 return -1; 470 arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
481 }
482
483 if (!*member_len) {
484 p_info("no instructions returned");
485 close(fd);
486 return 0;
487 }
488 471
489 buf_size = *member_len; 472 arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
473 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
474 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
475 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
476 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
490 477
491 buf = malloc(buf_size); 478 info_linear = bpf_program__get_prog_info_linear(fd, arrays);
492 if (!buf) { 479 close(fd);
493 p_err("mem alloc failed"); 480 if (IS_ERR_OR_NULL(info_linear)) {
494 close(fd); 481 p_err("can't get prog info: %s", strerror(errno));
495 return -1; 482 return -1;
496 } 483 }
497 484
498 nr_func_ksyms = info.nr_jited_ksyms; 485 info = &info_linear->info;
499 if (nr_func_ksyms) { 486 if (mode == DUMP_JITED) {
500 func_ksyms = malloc(nr_func_ksyms * sizeof(__u64)); 487 if (info->jited_prog_len == 0) {
501 if (!func_ksyms) { 488 p_info("no instructions returned");
502 p_err("mem alloc failed");
503 close(fd);
504 goto err_free;
505 }
506 }
507
508 nr_func_lens = info.nr_jited_func_lens;
509 if (nr_func_lens) {
510 func_lens = malloc(nr_func_lens * sizeof(__u32));
511 if (!func_lens) {
512 p_err("mem alloc failed");
513 close(fd);
514 goto err_free; 489 goto err_free;
515 } 490 }
516 } 491 buf = (unsigned char *)(info->jited_prog_insns);
517 492 member_len = info->jited_prog_len;
518 nr_finfo = info.nr_func_info; 493 } else { /* DUMP_XLATED */
519 finfo_rec_size = info.func_info_rec_size; 494 if (info->xlated_prog_len == 0) {
520 if (nr_finfo && finfo_rec_size) { 495 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
521 func_info = malloc(nr_finfo * finfo_rec_size);
522 if (!func_info) {
523 p_err("mem alloc failed");
524 close(fd);
525 goto err_free; 496 goto err_free;
526 } 497 }
498 buf = (unsigned char *)info->xlated_prog_insns;
499 member_len = info->xlated_prog_len;
527 } 500 }
528 501
529 linfo_rec_size = info.line_info_rec_size; 502 if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
530 if (info.nr_line_info && linfo_rec_size && info.btf_id) {
531 nr_linfo = info.nr_line_info;
532 linfo = malloc(nr_linfo * linfo_rec_size);
533 if (!linfo) {
534 p_err("mem alloc failed");
535 close(fd);
536 goto err_free;
537 }
538 }
539
540 jited_linfo_rec_size = info.jited_line_info_rec_size;
541 if (info.nr_jited_line_info &&
542 jited_linfo_rec_size &&
543 info.nr_jited_ksyms &&
544 info.nr_jited_func_lens &&
545 info.btf_id) {
546 nr_jited_linfo = info.nr_jited_line_info;
547 jited_linfo = malloc(nr_jited_linfo * jited_linfo_rec_size);
548 if (!jited_linfo) {
549 p_err("mem alloc failed");
550 close(fd);
551 goto err_free;
552 }
553 }
554
555 memset(&info, 0, sizeof(info));
556
557 *member_ptr = ptr_to_u64(buf);
558 *member_len = buf_size;
559 info.jited_ksyms = ptr_to_u64(func_ksyms);
560 info.nr_jited_ksyms = nr_func_ksyms;
561 info.jited_func_lens = ptr_to_u64(func_lens);
562 info.nr_jited_func_lens = nr_func_lens;
563 info.nr_func_info = nr_finfo;
564 info.func_info_rec_size = finfo_rec_size;
565 info.func_info = ptr_to_u64(func_info);
566 info.nr_line_info = nr_linfo;
567 info.line_info_rec_size = linfo_rec_size;
568 info.line_info = ptr_to_u64(linfo);
569 info.nr_jited_line_info = nr_jited_linfo;
570 info.jited_line_info_rec_size = jited_linfo_rec_size;
571 info.jited_line_info = ptr_to_u64(jited_linfo);
572
573 err = bpf_obj_get_info_by_fd(fd, &info, &len);
574 close(fd);
575 if (err) {
576 p_err("can't get prog info: %s", strerror(errno));
577 goto err_free;
578 }
579
580 if (*member_len > buf_size) {
581 p_err("too many instructions returned");
582 goto err_free;
583 }
584
585 if (info.nr_jited_ksyms > nr_func_ksyms) {
586 p_err("too many addresses returned");
587 goto err_free;
588 }
589
590 if (info.nr_jited_func_lens > nr_func_lens) {
591 p_err("too many values returned");
592 goto err_free;
593 }
594
595 if (info.nr_func_info != nr_finfo) {
596 p_err("incorrect nr_func_info %d vs. expected %d",
597 info.nr_func_info, nr_finfo);
598 goto err_free;
599 }
600
601 if (info.func_info_rec_size != finfo_rec_size) {
602 p_err("incorrect func_info_rec_size %d vs. expected %d",
603 info.func_info_rec_size, finfo_rec_size);
604 goto err_free;
605 }
606
607 if (linfo && info.nr_line_info != nr_linfo) {
608 p_err("incorrect nr_line_info %u vs. expected %u",
609 info.nr_line_info, nr_linfo);
610 goto err_free;
611 }
612
613 if (info.line_info_rec_size != linfo_rec_size) {
614 p_err("incorrect line_info_rec_size %u vs. expected %u",
615 info.line_info_rec_size, linfo_rec_size);
616 goto err_free;
617 }
618
619 if (jited_linfo && info.nr_jited_line_info != nr_jited_linfo) {
620 p_err("incorrect nr_jited_line_info %u vs. expected %u",
621 info.nr_jited_line_info, nr_jited_linfo);
622 goto err_free;
623 }
624
625 if (info.jited_line_info_rec_size != jited_linfo_rec_size) {
626 p_err("incorrect jited_line_info_rec_size %u vs. expected %u",
627 info.jited_line_info_rec_size, jited_linfo_rec_size);
628 goto err_free;
629 }
630
631 if ((member_len == &info.jited_prog_len &&
632 info.jited_prog_insns == 0) ||
633 (member_len == &info.xlated_prog_len &&
634 info.xlated_prog_insns == 0)) {
635 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
636 goto err_free;
637 }
638
639 if (info.btf_id && btf__get_from_id(info.btf_id, &btf)) {
640 p_err("failed to get btf"); 503 p_err("failed to get btf");
641 goto err_free; 504 goto err_free;
642 } 505 }
643 506
644 if (nr_linfo) { 507 func_info = (void *)info->func_info;
645 prog_linfo = bpf_prog_linfo__new(&info); 508
509 if (info->nr_line_info) {
510 prog_linfo = bpf_prog_linfo__new(info);
646 if (!prog_linfo) 511 if (!prog_linfo)
647 p_info("error in processing bpf_line_info. continue without it."); 512 p_info("error in processing bpf_line_info. continue without it.");
648 } 513 }
@@ -655,9 +520,9 @@ static int do_dump(int argc, char **argv)
655 goto err_free; 520 goto err_free;
656 } 521 }
657 522
658 n = write(fd, buf, *member_len); 523 n = write(fd, buf, member_len);
659 close(fd); 524 close(fd);
660 if (n != *member_len) { 525 if (n != member_len) {
661 p_err("error writing output file: %s", 526 p_err("error writing output file: %s",
662 n < 0 ? strerror(errno) : "short write"); 527 n < 0 ? strerror(errno) : "short write");
663 goto err_free; 528 goto err_free;
@@ -665,19 +530,19 @@ static int do_dump(int argc, char **argv)
665 530
666 if (json_output) 531 if (json_output)
667 jsonw_null(json_wtr); 532 jsonw_null(json_wtr);
668 } else if (member_len == &info.jited_prog_len) { 533 } else if (mode == DUMP_JITED) {
669 const char *name = NULL; 534 const char *name = NULL;
670 535
671 if (info.ifindex) { 536 if (info->ifindex) {
672 name = ifindex_to_bfd_params(info.ifindex, 537 name = ifindex_to_bfd_params(info->ifindex,
673 info.netns_dev, 538 info->netns_dev,
674 info.netns_ino, 539 info->netns_ino,
675 &disasm_opt); 540 &disasm_opt);
676 if (!name) 541 if (!name)
677 goto err_free; 542 goto err_free;
678 } 543 }
679 544
680 if (info.nr_jited_func_lens && info.jited_func_lens) { 545 if (info->nr_jited_func_lens && info->jited_func_lens) {
681 struct kernel_sym *sym = NULL; 546 struct kernel_sym *sym = NULL;
682 struct bpf_func_info *record; 547 struct bpf_func_info *record;
683 char sym_name[SYM_MAX_NAME]; 548 char sym_name[SYM_MAX_NAME];
@@ -685,17 +550,16 @@ static int do_dump(int argc, char **argv)
685 __u64 *ksyms = NULL; 550 __u64 *ksyms = NULL;
686 __u32 *lens; 551 __u32 *lens;
687 __u32 i; 552 __u32 i;
688 553 if (info->nr_jited_ksyms) {
689 if (info.nr_jited_ksyms) {
690 kernel_syms_load(&dd); 554 kernel_syms_load(&dd);
691 ksyms = (__u64 *) info.jited_ksyms; 555 ksyms = (__u64 *) info->jited_ksyms;
692 } 556 }
693 557
694 if (json_output) 558 if (json_output)
695 jsonw_start_array(json_wtr); 559 jsonw_start_array(json_wtr);
696 560
697 lens = (__u32 *) info.jited_func_lens; 561 lens = (__u32 *) info->jited_func_lens;
698 for (i = 0; i < info.nr_jited_func_lens; i++) { 562 for (i = 0; i < info->nr_jited_func_lens; i++) {
699 if (ksyms) { 563 if (ksyms) {
700 sym = kernel_syms_search(&dd, ksyms[i]); 564 sym = kernel_syms_search(&dd, ksyms[i]);
701 if (sym) 565 if (sym)
@@ -707,7 +571,7 @@ static int do_dump(int argc, char **argv)
707 } 571 }
708 572
709 if (func_info) { 573 if (func_info) {
710 record = func_info + i * finfo_rec_size; 574 record = func_info + i * info->func_info_rec_size;
711 btf_dumper_type_only(btf, record->type_id, 575 btf_dumper_type_only(btf, record->type_id,
712 func_sig, 576 func_sig,
713 sizeof(func_sig)); 577 sizeof(func_sig));
@@ -744,49 +608,37 @@ static int do_dump(int argc, char **argv)
744 if (json_output) 608 if (json_output)
745 jsonw_end_array(json_wtr); 609 jsonw_end_array(json_wtr);
746 } else { 610 } else {
747 disasm_print_insn(buf, *member_len, opcodes, name, 611 disasm_print_insn(buf, member_len, opcodes, name,
748 disasm_opt, btf, NULL, 0, 0, false); 612 disasm_opt, btf, NULL, 0, 0, false);
749 } 613 }
750 } else if (visual) { 614 } else if (visual) {
751 if (json_output) 615 if (json_output)
752 jsonw_null(json_wtr); 616 jsonw_null(json_wtr);
753 else 617 else
754 dump_xlated_cfg(buf, *member_len); 618 dump_xlated_cfg(buf, member_len);
755 } else { 619 } else {
756 kernel_syms_load(&dd); 620 kernel_syms_load(&dd);
757 dd.nr_jited_ksyms = info.nr_jited_ksyms; 621 dd.nr_jited_ksyms = info->nr_jited_ksyms;
758 dd.jited_ksyms = (__u64 *) info.jited_ksyms; 622 dd.jited_ksyms = (__u64 *) info->jited_ksyms;
759 dd.btf = btf; 623 dd.btf = btf;
760 dd.func_info = func_info; 624 dd.func_info = func_info;
761 dd.finfo_rec_size = finfo_rec_size; 625 dd.finfo_rec_size = info->func_info_rec_size;
762 dd.prog_linfo = prog_linfo; 626 dd.prog_linfo = prog_linfo;
763 627
764 if (json_output) 628 if (json_output)
765 dump_xlated_json(&dd, buf, *member_len, opcodes, 629 dump_xlated_json(&dd, buf, member_len, opcodes,
766 linum); 630 linum);
767 else 631 else
768 dump_xlated_plain(&dd, buf, *member_len, opcodes, 632 dump_xlated_plain(&dd, buf, member_len, opcodes,
769 linum); 633 linum);
770 kernel_syms_destroy(&dd); 634 kernel_syms_destroy(&dd);
771 } 635 }
772 636
773 free(buf); 637 free(info_linear);
774 free(func_ksyms);
775 free(func_lens);
776 free(func_info);
777 free(linfo);
778 free(jited_linfo);
779 bpf_prog_linfo__free(prog_linfo);
780 return 0; 638 return 0;
781 639
782err_free: 640err_free:
783 free(buf); 641 free(info_linear);
784 free(func_ksyms);
785 free(func_lens);
786 free(func_info);
787 free(linfo);
788 free(jited_linfo);
789 bpf_prog_linfo__free(prog_linfo);
790 return -1; 642 return -1;
791} 643}
792 644
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 61e46d54a67c..8d3864b061f3 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -66,7 +66,8 @@ FEATURE_TESTS_BASIC := \
66 sched_getcpu \ 66 sched_getcpu \
67 sdt \ 67 sdt \
68 setns \ 68 setns \
69 libaio 69 libaio \
70 disassembler-four-args
70 71
71# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list 72# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
72# of all feature tests 73# of all feature tests
@@ -118,7 +119,8 @@ FEATURE_DISPLAY ?= \
118 lzma \ 119 lzma \
119 get_cpuid \ 120 get_cpuid \
120 bpf \ 121 bpf \
121 libaio 122 libaio \
123 disassembler-four-args
122 124
123# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. 125# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
124# If in the future we need per-feature checks/flags for features not 126# If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index e903b86b742f..7853e6d91090 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -178,6 +178,10 @@
178# include "test-reallocarray.c" 178# include "test-reallocarray.c"
179#undef main 179#undef main
180 180
181#define main main_test_disassembler_four_args
182# include "test-disassembler-four-args.c"
183#undef main
184
181int main(int argc, char *argv[]) 185int main(int argc, char *argv[])
182{ 186{
183 main_test_libpython(); 187 main_test_libpython();
@@ -219,6 +223,7 @@ int main(int argc, char *argv[])
219 main_test_setns(); 223 main_test_setns();
220 main_test_libaio(); 224 main_test_libaio();
221 main_test_reallocarray(); 225 main_test_reallocarray();
226 main_test_disassembler_four_args();
222 227
223 return 0; 228 return 0;
224} 229}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
index d68eb4fb40cc..2b0e02c38870 100644
--- a/tools/build/feature/test-libopencsd.c
+++ b/tools/build/feature/test-libopencsd.c
@@ -4,9 +4,9 @@
4/* 4/*
5 * Check OpenCSD library version is sufficient to provide required features 5 * Check OpenCSD library version is sufficient to provide required features
6 */ 6 */
7#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0)) 7#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
8#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER) 8#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
9#error "OpenCSD >= 0.10.0 is required" 9#error "OpenCSD >= 0.11.0 is required"
10#endif 10#endif
11 11
12int main(void) 12int main(void)
diff --git a/tools/include/uapi/asm-generic/mman-common-tools.h b/tools/include/uapi/asm-generic/mman-common-tools.h
new file mode 100644
index 000000000000..af7d0d3a3182
--- /dev/null
+++ b/tools/include/uapi/asm-generic/mman-common-tools.h
@@ -0,0 +1,23 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
3#define __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
4
5#include <asm-generic/mman-common.h>
6
7/* We need this because we need to have tools/include/uapi/ included in the tools
8 * header search path to get access to stuff that is not yet in the system's
9 * copy of the files in that directory, but since this cset:
10 *
11 * 746c9398f5ac ("arch: move common mmap flags to linux/mman.h")
12 *
13 * We end up making sys/mman.h, that is in the system headers, to not find the
14 * MAP_SHARED and MAP_PRIVATE defines because they are not anymore in our copy
15 * of asm-generic/mman-common.h. So we define them here and include this header
16 * from each of the per arch mman.h headers.
17 */
18#ifndef MAP_SHARED
19#define MAP_SHARED 0x01 /* Share changes */
20#define MAP_PRIVATE 0x02 /* Changes are private */
21#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
22#endif
23#endif // __ASM_GENERIC_MMAN_COMMON_TOOLS_ONLY_H
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index e7ee32861d51..abd238d0f7a4 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -15,9 +15,7 @@
15#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ 15#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
16#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ 16#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
17 17
18#define MAP_SHARED 0x01 /* Share changes */ 18/* 0x01 - 0x03 are defined in linux/mman.h */
19#define MAP_PRIVATE 0x02 /* Changes are private */
20#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
21#define MAP_TYPE 0x0f /* Mask for type of mapping */ 19#define MAP_TYPE 0x0f /* Mask for type of mapping */
22#define MAP_FIXED 0x10 /* Interpret addr exactly */ 20#define MAP_FIXED 0x10 /* Interpret addr exactly */
23#define MAP_ANONYMOUS 0x20 /* don't use a file */ 21#define MAP_ANONYMOUS 0x20 /* don't use a file */
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 653687d9771b..36c197fc44a0 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -2,7 +2,7 @@
2#ifndef __ASM_GENERIC_MMAN_H 2#ifndef __ASM_GENERIC_MMAN_H
3#define __ASM_GENERIC_MMAN_H 3#define __ASM_GENERIC_MMAN_H
4 4
5#include <asm-generic/mman-common.h> 5#include <asm-generic/mman-common-tools.h>
6 6
7#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
8#define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 8#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index d90127298f12..dee7292e1df6 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -38,8 +38,10 @@ __SYSCALL(__NR_io_destroy, sys_io_destroy)
38__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit) 38__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
39#define __NR_io_cancel 3 39#define __NR_io_cancel 3
40__SYSCALL(__NR_io_cancel, sys_io_cancel) 40__SYSCALL(__NR_io_cancel, sys_io_cancel)
41#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
41#define __NR_io_getevents 4 42#define __NR_io_getevents 4
42__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents) 43__SC_3264(__NR_io_getevents, sys_io_getevents_time32, sys_io_getevents)
44#endif
43 45
44/* fs/xattr.c */ 46/* fs/xattr.c */
45#define __NR_setxattr 5 47#define __NR_setxattr 5
@@ -179,7 +181,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
179#define __NR_fchown 55 181#define __NR_fchown 55
180__SYSCALL(__NR_fchown, sys_fchown) 182__SYSCALL(__NR_fchown, sys_fchown)
181#define __NR_openat 56 183#define __NR_openat 56
182__SC_COMP(__NR_openat, sys_openat, compat_sys_openat) 184__SYSCALL(__NR_openat, sys_openat)
183#define __NR_close 57 185#define __NR_close 57
184__SYSCALL(__NR_close, sys_close) 186__SYSCALL(__NR_close, sys_close)
185#define __NR_vhangup 58 187#define __NR_vhangup 58
@@ -222,10 +224,12 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
222__SYSCALL(__NR3264_sendfile, sys_sendfile64) 224__SYSCALL(__NR3264_sendfile, sys_sendfile64)
223 225
224/* fs/select.c */ 226/* fs/select.c */
227#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
225#define __NR_pselect6 72 228#define __NR_pselect6 72
226__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6) 229__SC_COMP_3264(__NR_pselect6, sys_pselect6_time32, sys_pselect6, compat_sys_pselect6_time32)
227#define __NR_ppoll 73 230#define __NR_ppoll 73
228__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll) 231__SC_COMP_3264(__NR_ppoll, sys_ppoll_time32, sys_ppoll, compat_sys_ppoll_time32)
232#endif
229 233
230/* fs/signalfd.c */ 234/* fs/signalfd.c */
231#define __NR_signalfd4 74 235#define __NR_signalfd4 74
@@ -269,16 +273,20 @@ __SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
269/* fs/timerfd.c */ 273/* fs/timerfd.c */
270#define __NR_timerfd_create 85 274#define __NR_timerfd_create 85
271__SYSCALL(__NR_timerfd_create, sys_timerfd_create) 275__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
276#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
272#define __NR_timerfd_settime 86 277#define __NR_timerfd_settime 86
273__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \ 278__SC_3264(__NR_timerfd_settime, sys_timerfd_settime32, \
274 compat_sys_timerfd_settime) 279 sys_timerfd_settime)
275#define __NR_timerfd_gettime 87 280#define __NR_timerfd_gettime 87
276__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \ 281__SC_3264(__NR_timerfd_gettime, sys_timerfd_gettime32, \
277 compat_sys_timerfd_gettime) 282 sys_timerfd_gettime)
283#endif
278 284
279/* fs/utimes.c */ 285/* fs/utimes.c */
286#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
280#define __NR_utimensat 88 287#define __NR_utimensat 88
281__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat) 288__SC_3264(__NR_utimensat, sys_utimensat_time32, sys_utimensat)
289#endif
282 290
283/* kernel/acct.c */ 291/* kernel/acct.c */
284#define __NR_acct 89 292#define __NR_acct 89
@@ -309,8 +317,10 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address)
309__SYSCALL(__NR_unshare, sys_unshare) 317__SYSCALL(__NR_unshare, sys_unshare)
310 318
311/* kernel/futex.c */ 319/* kernel/futex.c */
320#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
312#define __NR_futex 98 321#define __NR_futex 98
313__SC_COMP(__NR_futex, sys_futex, compat_sys_futex) 322__SC_3264(__NR_futex, sys_futex_time32, sys_futex)
323#endif
314#define __NR_set_robust_list 99 324#define __NR_set_robust_list 99
315__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \ 325__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
316 compat_sys_set_robust_list) 326 compat_sys_set_robust_list)
@@ -319,8 +329,10 @@ __SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
319 compat_sys_get_robust_list) 329 compat_sys_get_robust_list)
320 330
321/* kernel/hrtimer.c */ 331/* kernel/hrtimer.c */
332#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
322#define __NR_nanosleep 101 333#define __NR_nanosleep 101
323__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep) 334__SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep)
335#endif
324 336
325/* kernel/itimer.c */ 337/* kernel/itimer.c */
326#define __NR_getitimer 102 338#define __NR_getitimer 102
@@ -341,23 +353,29 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
341/* kernel/posix-timers.c */ 353/* kernel/posix-timers.c */
342#define __NR_timer_create 107 354#define __NR_timer_create 107
343__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create) 355__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
356#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
344#define __NR_timer_gettime 108 357#define __NR_timer_gettime 108
345__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime) 358__SC_3264(__NR_timer_gettime, sys_timer_gettime32, sys_timer_gettime)
359#endif
346#define __NR_timer_getoverrun 109 360#define __NR_timer_getoverrun 109
347__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) 361__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
362#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
348#define __NR_timer_settime 110 363#define __NR_timer_settime 110
349__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime) 364__SC_3264(__NR_timer_settime, sys_timer_settime32, sys_timer_settime)
365#endif
350#define __NR_timer_delete 111 366#define __NR_timer_delete 111
351__SYSCALL(__NR_timer_delete, sys_timer_delete) 367__SYSCALL(__NR_timer_delete, sys_timer_delete)
368#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
352#define __NR_clock_settime 112 369#define __NR_clock_settime 112
353__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime) 370__SC_3264(__NR_clock_settime, sys_clock_settime32, sys_clock_settime)
354#define __NR_clock_gettime 113 371#define __NR_clock_gettime 113
355__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime) 372__SC_3264(__NR_clock_gettime, sys_clock_gettime32, sys_clock_gettime)
356#define __NR_clock_getres 114 373#define __NR_clock_getres 114
357__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres) 374__SC_3264(__NR_clock_getres, sys_clock_getres_time32, sys_clock_getres)
358#define __NR_clock_nanosleep 115 375#define __NR_clock_nanosleep 115
359__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \ 376__SC_3264(__NR_clock_nanosleep, sys_clock_nanosleep_time32, \
360 compat_sys_clock_nanosleep) 377 sys_clock_nanosleep)
378#endif
361 379
362/* kernel/printk.c */ 380/* kernel/printk.c */
363#define __NR_syslog 116 381#define __NR_syslog 116
@@ -388,9 +406,11 @@ __SYSCALL(__NR_sched_yield, sys_sched_yield)
388__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) 406__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
389#define __NR_sched_get_priority_min 126 407#define __NR_sched_get_priority_min 126
390__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) 408__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
409#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
391#define __NR_sched_rr_get_interval 127 410#define __NR_sched_rr_get_interval 127
392__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \ 411__SC_3264(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32, \
393 compat_sys_sched_rr_get_interval) 412 sys_sched_rr_get_interval)
413#endif
394 414
395/* kernel/signal.c */ 415/* kernel/signal.c */
396#define __NR_restart_syscall 128 416#define __NR_restart_syscall 128
@@ -411,9 +431,11 @@ __SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
411__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask) 431__SC_COMP(__NR_rt_sigprocmask, sys_rt_sigprocmask, compat_sys_rt_sigprocmask)
412#define __NR_rt_sigpending 136 432#define __NR_rt_sigpending 136
413__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending) 433__SC_COMP(__NR_rt_sigpending, sys_rt_sigpending, compat_sys_rt_sigpending)
434#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
414#define __NR_rt_sigtimedwait 137 435#define __NR_rt_sigtimedwait 137
415__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \ 436__SC_COMP_3264(__NR_rt_sigtimedwait, sys_rt_sigtimedwait_time32, \
416 compat_sys_rt_sigtimedwait) 437 sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32)
438#endif
417#define __NR_rt_sigqueueinfo 138 439#define __NR_rt_sigqueueinfo 138
418__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \ 440__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
419 compat_sys_rt_sigqueueinfo) 441 compat_sys_rt_sigqueueinfo)
@@ -467,10 +489,15 @@ __SYSCALL(__NR_uname, sys_newuname)
467__SYSCALL(__NR_sethostname, sys_sethostname) 489__SYSCALL(__NR_sethostname, sys_sethostname)
468#define __NR_setdomainname 162 490#define __NR_setdomainname 162
469__SYSCALL(__NR_setdomainname, sys_setdomainname) 491__SYSCALL(__NR_setdomainname, sys_setdomainname)
492
493#ifdef __ARCH_WANT_SET_GET_RLIMIT
494/* getrlimit and setrlimit are superseded with prlimit64 */
470#define __NR_getrlimit 163 495#define __NR_getrlimit 163
471__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit) 496__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
472#define __NR_setrlimit 164 497#define __NR_setrlimit 164
473__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit) 498__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
499#endif
500
474#define __NR_getrusage 165 501#define __NR_getrusage 165
475__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage) 502__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
476#define __NR_umask 166 503#define __NR_umask 166
@@ -481,12 +508,14 @@ __SYSCALL(__NR_prctl, sys_prctl)
481__SYSCALL(__NR_getcpu, sys_getcpu) 508__SYSCALL(__NR_getcpu, sys_getcpu)
482 509
483/* kernel/time.c */ 510/* kernel/time.c */
511#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
484#define __NR_gettimeofday 169 512#define __NR_gettimeofday 169
485__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday) 513__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
486#define __NR_settimeofday 170 514#define __NR_settimeofday 170
487__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday) 515__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
488#define __NR_adjtimex 171 516#define __NR_adjtimex 171
489__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex) 517__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
518#endif
490 519
491/* kernel/timer.c */ 520/* kernel/timer.c */
492#define __NR_getpid 172 521#define __NR_getpid 172
@@ -511,11 +540,13 @@ __SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
511__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open) 540__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
512#define __NR_mq_unlink 181 541#define __NR_mq_unlink 181
513__SYSCALL(__NR_mq_unlink, sys_mq_unlink) 542__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
543#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
514#define __NR_mq_timedsend 182 544#define __NR_mq_timedsend 182
515__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend) 545__SC_3264(__NR_mq_timedsend, sys_mq_timedsend_time32, sys_mq_timedsend)
516#define __NR_mq_timedreceive 183 546#define __NR_mq_timedreceive 183
517__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \ 547__SC_3264(__NR_mq_timedreceive, sys_mq_timedreceive_time32, \
518 compat_sys_mq_timedreceive) 548 sys_mq_timedreceive)
549#endif
519#define __NR_mq_notify 184 550#define __NR_mq_notify 184
520__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify) 551__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
521#define __NR_mq_getsetattr 185 552#define __NR_mq_getsetattr 185
@@ -536,8 +567,10 @@ __SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
536__SYSCALL(__NR_semget, sys_semget) 567__SYSCALL(__NR_semget, sys_semget)
537#define __NR_semctl 191 568#define __NR_semctl 191
538__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) 569__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
570#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
539#define __NR_semtimedop 192 571#define __NR_semtimedop 192
540__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop) 572__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32)
573#endif
541#define __NR_semop 193 574#define __NR_semop 193
542__SYSCALL(__NR_semop, sys_semop) 575__SYSCALL(__NR_semop, sys_semop)
543 576
@@ -658,8 +691,10 @@ __SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
658__SYSCALL(__NR_perf_event_open, sys_perf_event_open) 691__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
659#define __NR_accept4 242 692#define __NR_accept4 242
660__SYSCALL(__NR_accept4, sys_accept4) 693__SYSCALL(__NR_accept4, sys_accept4)
694#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
661#define __NR_recvmmsg 243 695#define __NR_recvmmsg 243
662__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg) 696__SC_COMP_3264(__NR_recvmmsg, sys_recvmmsg_time32, sys_recvmmsg, compat_sys_recvmmsg_time32)
697#endif
663 698
664/* 699/*
665 * Architectures may provide up to 16 syscalls of their own 700 * Architectures may provide up to 16 syscalls of their own
@@ -667,8 +702,10 @@ __SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
667 */ 702 */
668#define __NR_arch_specific_syscall 244 703#define __NR_arch_specific_syscall 244
669 704
705#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
670#define __NR_wait4 260 706#define __NR_wait4 260
671__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4) 707__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
708#endif
672#define __NR_prlimit64 261 709#define __NR_prlimit64 261
673__SYSCALL(__NR_prlimit64, sys_prlimit64) 710__SYSCALL(__NR_prlimit64, sys_prlimit64)
674#define __NR_fanotify_init 262 711#define __NR_fanotify_init 262
@@ -678,10 +715,11 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
678#define __NR_name_to_handle_at 264 715#define __NR_name_to_handle_at 264
679__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) 716__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
680#define __NR_open_by_handle_at 265 717#define __NR_open_by_handle_at 265
681__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \ 718__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
682 compat_sys_open_by_handle_at) 719#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
683#define __NR_clock_adjtime 266 720#define __NR_clock_adjtime 266
684__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime) 721__SC_3264(__NR_clock_adjtime, sys_clock_adjtime32, sys_clock_adjtime)
722#endif
685#define __NR_syncfs 267 723#define __NR_syncfs 267
686__SYSCALL(__NR_syncfs, sys_syncfs) 724__SYSCALL(__NR_syncfs, sys_syncfs)
687#define __NR_setns 268 725#define __NR_setns 268
@@ -734,15 +772,69 @@ __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
734__SYSCALL(__NR_pkey_free, sys_pkey_free) 772__SYSCALL(__NR_pkey_free, sys_pkey_free)
735#define __NR_statx 291 773#define __NR_statx 291
736__SYSCALL(__NR_statx, sys_statx) 774__SYSCALL(__NR_statx, sys_statx)
775#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
737#define __NR_io_pgetevents 292 776#define __NR_io_pgetevents 292
738__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) 777__SC_COMP_3264(__NR_io_pgetevents, sys_io_pgetevents_time32, sys_io_pgetevents, compat_sys_io_pgetevents)
778#endif
739#define __NR_rseq 293 779#define __NR_rseq 293
740__SYSCALL(__NR_rseq, sys_rseq) 780__SYSCALL(__NR_rseq, sys_rseq)
741#define __NR_kexec_file_load 294 781#define __NR_kexec_file_load 294
742__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load) 782__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
783/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
784#if __BITS_PER_LONG == 32
785#define __NR_clock_gettime64 403
786__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
787#define __NR_clock_settime64 404
788__SYSCALL(__NR_clock_settime64, sys_clock_settime)
789#define __NR_clock_adjtime64 405
790__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime)
791#define __NR_clock_getres_time64 406
792__SYSCALL(__NR_clock_getres_time64, sys_clock_getres)
793#define __NR_clock_nanosleep_time64 407
794__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep)
795#define __NR_timer_gettime64 408
796__SYSCALL(__NR_timer_gettime64, sys_timer_gettime)
797#define __NR_timer_settime64 409
798__SYSCALL(__NR_timer_settime64, sys_timer_settime)
799#define __NR_timerfd_gettime64 410
800__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime)
801#define __NR_timerfd_settime64 411
802__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime)
803#define __NR_utimensat_time64 412
804__SYSCALL(__NR_utimensat_time64, sys_utimensat)
805#define __NR_pselect6_time64 413
806__SC_COMP(__NR_pselect6_time64, sys_pselect6, compat_sys_pselect6_time64)
807#define __NR_ppoll_time64 414
808__SC_COMP(__NR_ppoll_time64, sys_ppoll, compat_sys_ppoll_time64)
809#define __NR_io_pgetevents_time64 416
810__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents)
811#define __NR_recvmmsg_time64 417
812__SC_COMP(__NR_recvmmsg_time64, sys_recvmmsg, compat_sys_recvmmsg_time64)
813#define __NR_mq_timedsend_time64 418
814__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend)
815#define __NR_mq_timedreceive_time64 419
816__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive)
817#define __NR_semtimedop_time64 420
818__SYSCALL(__NR_semtimedop_time64, sys_semtimedop)
819#define __NR_rt_sigtimedwait_time64 421
820__SC_COMP(__NR_rt_sigtimedwait_time64, sys_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time64)
821#define __NR_futex_time64 422
822__SYSCALL(__NR_futex_time64, sys_futex)
823#define __NR_sched_rr_get_interval_time64 423
824__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
825#endif
826
827#define __NR_pidfd_send_signal 424
828__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
829#define __NR_io_uring_setup 425
830__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
831#define __NR_io_uring_enter 426
832__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
833#define __NR_io_uring_register 427
834__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
743 835
744#undef __NR_syscalls 836#undef __NR_syscalls
745#define __NR_syscalls 295 837#define __NR_syscalls 428
746 838
747/* 839/*
748 * 32 bit systems traditionally used different 840 * 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 298b2e197744..397810fa2d33 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -1486,9 +1486,73 @@ struct drm_i915_gem_context_param {
1486#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1486#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1487#define I915_CONTEXT_DEFAULT_PRIORITY 0 1487#define I915_CONTEXT_DEFAULT_PRIORITY 0
1488#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1488#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1489 /*
1490 * When using the following param, value should be a pointer to
1491 * drm_i915_gem_context_param_sseu.
1492 */
1493#define I915_CONTEXT_PARAM_SSEU 0x7
1489 __u64 value; 1494 __u64 value;
1490}; 1495};
1491 1496
1497/**
1498 * Context SSEU programming
1499 *
1500 * It may be necessary for either functional or performance reason to configure
1501 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1502 * Sub-slice/EU).
1503 *
1504 * This is done by configuring SSEU configuration using the below
1505 * @struct drm_i915_gem_context_param_sseu for every supported engine which
1506 * userspace intends to use.
1507 *
1508 * Not all GPUs or engines support this functionality in which case an error
1509 * code -ENODEV will be returned.
1510 *
1511 * Also, flexibility of possible SSEU configuration permutations varies between
1512 * GPU generations and software imposed limitations. Requesting such a
1513 * combination will return an error code of -EINVAL.
1514 *
1515 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1516 * favour of a single global setting.
1517 */
1518struct drm_i915_gem_context_param_sseu {
1519 /*
1520 * Engine class & instance to be configured or queried.
1521 */
1522 __u16 engine_class;
1523 __u16 engine_instance;
1524
1525 /*
1526 * Unused for now. Must be cleared to zero.
1527 */
1528 __u32 flags;
1529
1530 /*
1531 * Mask of slices to enable for the context. Valid values are a subset
1532 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1533 */
1534 __u64 slice_mask;
1535
1536 /*
1537 * Mask of subslices to enable for the context. Valid values are a
1538 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1539 */
1540 __u64 subslice_mask;
1541
1542 /*
1543 * Minimum/Maximum number of EUs to enable per subslice for the
1544 * context. min_eus_per_subslice must be inferior or equal to
1545 * max_eus_per_subslice.
1546 */
1547 __u16 min_eus_per_subslice;
1548 __u16 max_eus_per_subslice;
1549
1550 /*
1551 * Unused for now. Must be cleared to zero.
1552 */
1553 __u32 rsvd;
1554};
1555
1492enum drm_i915_oa_format { 1556enum drm_i915_oa_format {
1493 I915_OA_FORMAT_A13 = 1, /* HSW only */ 1557 I915_OA_FORMAT_A13 = 1, /* HSW only */
1494 I915_OA_FORMAT_A29, /* HSW only */ 1558 I915_OA_FORMAT_A29, /* HSW only */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 3c38ac9a92a7..929c8e537a14 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -502,16 +502,6 @@ union bpf_attr {
502 * Return 502 * Return
503 * 0 on success, or a negative error in case of failure. 503 * 0 on success, or a negative error in case of failure.
504 * 504 *
505 * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
506 * Description
507 * Push an element *value* in *map*. *flags* is one of:
508 *
509 * **BPF_EXIST**
510 * If the queue/stack is full, the oldest element is removed to
511 * make room for this.
512 * Return
513 * 0 on success, or a negative error in case of failure.
514 *
515 * int bpf_probe_read(void *dst, u32 size, const void *src) 505 * int bpf_probe_read(void *dst, u32 size, const void *src)
516 * Description 506 * Description
517 * For tracing programs, safely attempt to read *size* bytes from 507 * For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
1435 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) 1425 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
1436 * Description 1426 * Description
1437 * Equivalent to bpf_get_socket_cookie() helper that accepts 1427 * Equivalent to bpf_get_socket_cookie() helper that accepts
1438 * *skb*, but gets socket from **struct bpf_sock_addr** contex. 1428 * *skb*, but gets socket from **struct bpf_sock_addr** context.
1439 * Return 1429 * Return
1440 * A 8-byte long non-decreasing number. 1430 * A 8-byte long non-decreasing number.
1441 * 1431 *
1442 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) 1432 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
1443 * Description 1433 * Description
1444 * Equivalent to bpf_get_socket_cookie() helper that accepts 1434 * Equivalent to bpf_get_socket_cookie() helper that accepts
1445 * *skb*, but gets socket from **struct bpf_sock_ops** contex. 1435 * *skb*, but gets socket from **struct bpf_sock_ops** context.
1446 * Return 1436 * Return
1447 * A 8-byte long non-decreasing number. 1437 * A 8-byte long non-decreasing number.
1448 * 1438 *
@@ -2098,52 +2088,52 @@ union bpf_attr {
2098 * Return 2088 * Return
2099 * 0 on success, or a negative error in case of failure. 2089 * 0 on success, or a negative error in case of failure.
2100 * 2090 *
2101 * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) 2091 * int bpf_rc_repeat(void *ctx)
2102 * Description 2092 * Description
2103 * This helper is used in programs implementing IR decoding, to 2093 * This helper is used in programs implementing IR decoding, to
2104 * report a successfully decoded key press with *scancode*, 2094 * report a successfully decoded repeat key message. This delays
2105 * *toggle* value in the given *protocol*. The scancode will be 2095 * the generation of a key up event for previously generated
2106 * translated to a keycode using the rc keymap, and reported as 2096 * key down event.
2107 * an input key down event. After a period a key up event is
2108 * generated. This period can be extended by calling either
2109 * **bpf_rc_keydown**\ () again with the same values, or calling
2110 * **bpf_rc_repeat**\ ().
2111 * 2097 *
2112 * Some protocols include a toggle bit, in case the button was 2098 * Some IR protocols like NEC have a special IR message for
2113 * released and pressed again between consecutive scancodes. 2099 * repeating last button, for when a button is held down.
2114 * 2100 *
2115 * The *ctx* should point to the lirc sample as passed into 2101 * The *ctx* should point to the lirc sample as passed into
2116 * the program. 2102 * the program.
2117 * 2103 *
2118 * The *protocol* is the decoded protocol number (see
2119 * **enum rc_proto** for some predefined values).
2120 *
2121 * This helper is only available is the kernel was compiled with 2104 * This helper is only available is the kernel was compiled with
2122 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2105 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2123 * "**y**". 2106 * "**y**".
2124 * Return 2107 * Return
2125 * 0 2108 * 0
2126 * 2109 *
2127 * int bpf_rc_repeat(void *ctx) 2110 * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
2128 * Description 2111 * Description
2129 * This helper is used in programs implementing IR decoding, to 2112 * This helper is used in programs implementing IR decoding, to
2130 * report a successfully decoded repeat key message. This delays 2113 * report a successfully decoded key press with *scancode*,
2131 * the generation of a key up event for previously generated 2114 * *toggle* value in the given *protocol*. The scancode will be
2132 * key down event. 2115 * translated to a keycode using the rc keymap, and reported as
2116 * an input key down event. After a period a key up event is
2117 * generated. This period can be extended by calling either
2118 * **bpf_rc_keydown**\ () again with the same values, or calling
2119 * **bpf_rc_repeat**\ ().
2133 * 2120 *
2134 * Some IR protocols like NEC have a special IR message for 2121 * Some protocols include a toggle bit, in case the button was
2135 * repeating last button, for when a button is held down. 2122 * released and pressed again between consecutive scancodes.
2136 * 2123 *
2137 * The *ctx* should point to the lirc sample as passed into 2124 * The *ctx* should point to the lirc sample as passed into
2138 * the program. 2125 * the program.
2139 * 2126 *
2127 * The *protocol* is the decoded protocol number (see
2128 * **enum rc_proto** for some predefined values).
2129 *
2140 * This helper is only available is the kernel was compiled with 2130 * This helper is only available is the kernel was compiled with
2141 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to 2131 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2142 * "**y**". 2132 * "**y**".
2143 * Return 2133 * Return
2144 * 0 2134 * 0
2145 * 2135 *
2146 * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb) 2136 * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
2147 * Description 2137 * Description
2148 * Return the cgroup v2 id of the socket associated with the *skb*. 2138 * Return the cgroup v2 id of the socket associated with the *skb*.
2149 * This is roughly similar to the **bpf_get_cgroup_classid**\ () 2139 * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
2159 * Return 2149 * Return
2160 * The id is returned or 0 in case the id could not be retrieved. 2150 * The id is returned or 0 in case the id could not be retrieved.
2161 * 2151 *
2162 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2163 * Description
2164 * Return id of cgroup v2 that is ancestor of cgroup associated
2165 * with the *skb* at the *ancestor_level*. The root cgroup is at
2166 * *ancestor_level* zero and each step down the hierarchy
2167 * increments the level. If *ancestor_level* == level of cgroup
2168 * associated with *skb*, then return value will be same as that
2169 * of **bpf_skb_cgroup_id**\ ().
2170 *
2171 * The helper is useful to implement policies based on cgroups
2172 * that are upper in hierarchy than immediate cgroup associated
2173 * with *skb*.
2174 *
2175 * The format of returned id and helper limitations are same as in
2176 * **bpf_skb_cgroup_id**\ ().
2177 * Return
2178 * The id is returned or 0 in case the id could not be retrieved.
2179 *
2180 * u64 bpf_get_current_cgroup_id(void) 2152 * u64 bpf_get_current_cgroup_id(void)
2181 * Return 2153 * Return
2182 * A 64-bit integer containing the current cgroup id based 2154 * A 64-bit integer containing the current cgroup id based
2183 * on the cgroup within which the current task is running. 2155 * on the cgroup within which the current task is running.
2184 * 2156 *
2185 * void* get_local_storage(void *map, u64 flags) 2157 * void *bpf_get_local_storage(void *map, u64 flags)
2186 * Description 2158 * Description
2187 * Get the pointer to the local storage area. 2159 * Get the pointer to the local storage area.
2188 * The type and the size of the local storage is defined 2160 * The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
2209 * Return 2181 * Return
2210 * 0 on success, or a negative error in case of failure. 2182 * 0 on success, or a negative error in case of failure.
2211 * 2183 *
2184 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2185 * Description
2186 * Return id of cgroup v2 that is ancestor of cgroup associated
2187 * with the *skb* at the *ancestor_level*. The root cgroup is at
2188 * *ancestor_level* zero and each step down the hierarchy
2189 * increments the level. If *ancestor_level* == level of cgroup
2190 * associated with *skb*, then return value will be same as that
2191 * of **bpf_skb_cgroup_id**\ ().
2192 *
2193 * The helper is useful to implement policies based on cgroups
2194 * that are upper in hierarchy than immediate cgroup associated
2195 * with *skb*.
2196 *
2197 * The format of returned id and helper limitations are same as in
2198 * **bpf_skb_cgroup_id**\ ().
2199 * Return
2200 * The id is returned or 0 in case the id could not be retrieved.
2201 *
2212 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) 2202 * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
2213 * Description 2203 * Description
2214 * Look for TCP socket matching *tuple*, optionally in a child 2204 * Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
2289 * Return 2279 * Return
2290 * 0 on success, or a negative error in case of failure. 2280 * 0 on success, or a negative error in case of failure.
2291 * 2281 *
2282 * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
2283 * Description
2284 * Push an element *value* in *map*. *flags* is one of:
2285 *
2286 * **BPF_EXIST**
2287 * If the queue/stack is full, the oldest element is
2288 * removed to make room for this.
2289 * Return
2290 * 0 on success, or a negative error in case of failure.
2291 *
2292 * int bpf_map_pop_elem(struct bpf_map *map, void *value) 2292 * int bpf_map_pop_elem(struct bpf_map *map, void *value)
2293 * Description 2293 * Description
2294 * Pop an element from *map*. 2294 * Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
2343 * Return 2343 * Return
2344 * 0 2344 * 0
2345 * 2345 *
2346 * int bpf_spin_lock(struct bpf_spin_lock *lock)
2347 * Description
2348 * Acquire a spinlock represented by the pointer *lock*, which is
2349 * stored as part of a value of a map. Taking the lock allows to
2350 * safely update the rest of the fields in that value. The
2351 * spinlock can (and must) later be released with a call to
2352 * **bpf_spin_unlock**\ (\ *lock*\ ).
2353 *
2354 * Spinlocks in BPF programs come with a number of restrictions
2355 * and constraints:
2356 *
2357 * * **bpf_spin_lock** objects are only allowed inside maps of
2358 * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
2359 * list could be extended in the future).
2360 * * BTF description of the map is mandatory.
2361 * * The BPF program can take ONE lock at a time, since taking two
2362 * or more could cause dead locks.
2363 * * Only one **struct bpf_spin_lock** is allowed per map element.
2364 * * When the lock is taken, calls (either BPF to BPF or helpers)
2365 * are not allowed.
2366 * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
2367 * allowed inside a spinlock-ed region.
2368 * * The BPF program MUST call **bpf_spin_unlock**\ () to release
2369 * the lock, on all execution paths, before it returns.
2370 * * The BPF program can access **struct bpf_spin_lock** only via
2371 * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
2372 * helpers. Loading or storing data into the **struct
2373 * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
2374 * * To use the **bpf_spin_lock**\ () helper, the BTF description
2375 * of the map value must be a struct and have **struct
2376 * bpf_spin_lock** *anyname*\ **;** field at the top level.
2377 * Nested lock inside another struct is not allowed.
2378 * * The **struct bpf_spin_lock** *lock* field in a map value must
2379 * be aligned on a multiple of 4 bytes in that value.
2380 * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
2381 * the **bpf_spin_lock** field to user space.
2382 * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
2383 * a BPF program, do not update the **bpf_spin_lock** field.
2384 * * **bpf_spin_lock** cannot be on the stack or inside a
2385 * networking packet (it can only be inside of a map values).
2386 * * **bpf_spin_lock** is available to root only.
2387 * * Tracing programs and socket filter programs cannot use
2388 * **bpf_spin_lock**\ () due to insufficient preemption checks
2389 * (but this may change in the future).
2390 * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
2391 * Return
2392 * 0
2393 *
2394 * int bpf_spin_unlock(struct bpf_spin_lock *lock)
2395 * Description
2396 * Release the *lock* previously locked by a call to
2397 * **bpf_spin_lock**\ (\ *lock*\ ).
2398 * Return
2399 * 0
2400 *
2346 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) 2401 * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
2347 * Description 2402 * Description
2348 * This helper gets a **struct bpf_sock** pointer such 2403 * This helper gets a **struct bpf_sock** pointer such
2349 * that all the fields in bpf_sock can be accessed. 2404 * that all the fields in this **bpf_sock** can be accessed.
2350 * Return 2405 * Return
2351 * A **struct bpf_sock** pointer on success, or NULL in 2406 * A **struct bpf_sock** pointer on success, or **NULL** in
2352 * case of failure. 2407 * case of failure.
2353 * 2408 *
2354 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) 2409 * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
2355 * Description 2410 * Description
2356 * This helper gets a **struct bpf_tcp_sock** pointer from a 2411 * This helper gets a **struct bpf_tcp_sock** pointer from a
2357 * **struct bpf_sock** pointer. 2412 * **struct bpf_sock** pointer.
2358 *
2359 * Return 2413 * Return
2360 * A **struct bpf_tcp_sock** pointer on success, or NULL in 2414 * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
2361 * case of failure. 2415 * case of failure.
2362 * 2416 *
2363 * int bpf_skb_ecn_set_ce(struct sk_buf *skb) 2417 * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
2364 * Description 2418 * Description
2365 * Sets ECN of IP header to ce (congestion encountered) if 2419 * Set ECN (Explicit Congestion Notification) field of IP header
2366 * current value is ect (ECN capable). Works with IPv6 and IPv4. 2420 * to **CE** (Congestion Encountered) if current value is **ECT**
2367 * Return 2421 * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
2368 * 1 if set, 0 if not set. 2422 * and IPv4.
2423 * Return
2424 * 1 if the **CE** flag is set (either by the current helper call
2425 * or because it was already present), 0 if it is not set.
2426 *
2427 * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
2428 * Description
2429 * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
2430 * **bpf_sk_release**\ () is unnecessary and not allowed.
2431 * Return
2432 * A **struct bpf_sock** pointer on success, or **NULL** in
2433 * case of failure.
2369 */ 2434 */
2370#define __BPF_FUNC_MAPPER(FN) \ 2435#define __BPF_FUNC_MAPPER(FN) \
2371 FN(unspec), \ 2436 FN(unspec), \
@@ -2465,7 +2530,8 @@ union bpf_attr {
2465 FN(spin_unlock), \ 2530 FN(spin_unlock), \
2466 FN(sk_fullsock), \ 2531 FN(sk_fullsock), \
2467 FN(tcp_sock), \ 2532 FN(tcp_sock), \
2468 FN(skb_ecn_set_ce), 2533 FN(skb_ecn_set_ce), \
2534 FN(get_listener_sock),
2469 2535
2470/* integer value in 'imm' field of BPF_CALL instruction selects which helper 2536/* integer value in 'imm' field of BPF_CALL instruction selects which helper
2471 * function eBPF program intends to call 2537 * function eBPF program intends to call
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index 6448cdd9a350..a2f8658f1c55 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -41,6 +41,7 @@
41#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */ 41#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
42#define F_SEAL_GROW 0x0004 /* prevent file from growing */ 42#define F_SEAL_GROW 0x0004 /* prevent file from growing */
43#define F_SEAL_WRITE 0x0008 /* prevent writes */ 43#define F_SEAL_WRITE 0x0008 /* prevent writes */
44#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
44/* (1U << 31) is reserved for signed error codes */ 45/* (1U << 31) is reserved for signed error codes */
45 46
46/* 47/*
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index a55cb8b10165..e7ad9d350a28 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -292,10 +292,11 @@ struct sockaddr_in {
292#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000) 292#define IN_LOOPBACK(a) ((((long int) (a)) & 0xff000000) == 0x7f000000)
293 293
294/* Defines for Multicast INADDR */ 294/* Defines for Multicast INADDR */
295#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */ 295#define INADDR_UNSPEC_GROUP 0xe0000000U /* 224.0.0.0 */
296#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */ 296#define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */
297#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */ 297#define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */
298#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */ 298#define INADDR_ALLSNOOPERS_GROUP 0xe000006aU /* 224.0.0.106 */
299#define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */
299#endif 300#endif
300 301
301/* <asm/byteorder.h> contains the htonl type stuff.. */ 302/* <asm/byteorder.h> contains the htonl type stuff.. */
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h
index d0f515d53299..fc1a64c3447b 100644
--- a/tools/include/uapi/linux/mman.h
+++ b/tools/include/uapi/linux/mman.h
@@ -12,6 +12,10 @@
12#define OVERCOMMIT_ALWAYS 1 12#define OVERCOMMIT_ALWAYS 1
13#define OVERCOMMIT_NEVER 2 13#define OVERCOMMIT_NEVER 2
14 14
15#define MAP_SHARED 0x01 /* Share changes */
16#define MAP_PRIVATE 0x02 /* Changes are private */
17#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
18
15/* 19/*
16 * Huge page size encoding when MAP_HUGETLB is specified, and a huge page 20 * Huge page size encoding when MAP_HUGETLB is specified, and a huge page
17 * size other than the default is desired. See hugetlb_encode.h. 21 * size other than the default is desired. See hugetlb_encode.h.
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 61aaacf0cfa1..5bf8e52c41fc 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -3,7 +3,7 @@
3 3
4BPF_VERSION = 0 4BPF_VERSION = 0
5BPF_PATCHLEVEL = 0 5BPF_PATCHLEVEL = 0
6BPF_EXTRAVERSION = 1 6BPF_EXTRAVERSION = 2
7 7
8MAKEFLAGS += --no-print-directory 8MAKEFLAGS += --no-print-directory
9 9
@@ -79,8 +79,6 @@ export prefix libdir src obj
79libdir_SQ = $(subst ','\'',$(libdir)) 79libdir_SQ = $(subst ','\'',$(libdir))
80libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) 80libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
81 81
82LIB_FILE = libbpf.a libbpf.so
83
84VERSION = $(BPF_VERSION) 82VERSION = $(BPF_VERSION)
85PATCHLEVEL = $(BPF_PATCHLEVEL) 83PATCHLEVEL = $(BPF_PATCHLEVEL)
86EXTRAVERSION = $(BPF_EXTRAVERSION) 84EXTRAVERSION = $(BPF_EXTRAVERSION)
@@ -88,7 +86,10 @@ EXTRAVERSION = $(BPF_EXTRAVERSION)
88OBJ = $@ 86OBJ = $@
89N = 87N =
90 88
91LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION) 89LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
90
91LIB_TARGET = libbpf.a libbpf.so.$(LIBBPF_VERSION)
92LIB_FILE = libbpf.a libbpf.so*
92 93
93# Set compile option CFLAGS 94# Set compile option CFLAGS
94ifdef EXTRA_CFLAGS 95ifdef EXTRA_CFLAGS
@@ -128,16 +129,18 @@ all:
128export srctree OUTPUT CC LD CFLAGS V 129export srctree OUTPUT CC LD CFLAGS V
129include $(srctree)/tools/build/Makefile.include 130include $(srctree)/tools/build/Makefile.include
130 131
131BPF_IN := $(OUTPUT)libbpf-in.o 132BPF_IN := $(OUTPUT)libbpf-in.o
132LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) 133VERSION_SCRIPT := libbpf.map
133VERSION_SCRIPT := libbpf.map 134
135LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
136LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
134 137
135GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \ 138GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
136 awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}') 139 awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
137VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ 140VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
138 grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) 141 grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
139 142
140CMD_TARGETS = $(LIB_FILE) 143CMD_TARGETS = $(LIB_TARGET)
141 144
142CXX_TEST_TARGET = $(OUTPUT)test_libbpf 145CXX_TEST_TARGET = $(OUTPUT)test_libbpf
143 146
@@ -170,9 +173,13 @@ $(BPF_IN): force elfdep bpfdep
170 echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true 173 echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
171 $(Q)$(MAKE) $(build)=libbpf 174 $(Q)$(MAKE) $(build)=libbpf
172 175
173$(OUTPUT)libbpf.so: $(BPF_IN) 176$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
174 $(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \ 177
175 $^ -o $@ 178$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
179 $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
180 -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
181 @ln -sf $(@F) $(OUTPUT)libbpf.so
182 @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
176 183
177$(OUTPUT)libbpf.a: $(BPF_IN) 184$(OUTPUT)libbpf.a: $(BPF_IN)
178 $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ 185 $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
@@ -192,6 +199,12 @@ check_abi: $(OUTPUT)libbpf.so
192 exit 1; \ 199 exit 1; \
193 fi 200 fi
194 201
202define do_install_mkdir
203 if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
204 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
205 fi
206endef
207
195define do_install 208define do_install
196 if [ ! -d '$(DESTDIR_SQ)$2' ]; then \ 209 if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
197 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \ 210 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
@@ -200,8 +213,9 @@ define do_install
200endef 213endef
201 214
202install_lib: all_cmd 215install_lib: all_cmd
203 $(call QUIET_INSTALL, $(LIB_FILE)) \ 216 $(call QUIET_INSTALL, $(LIB_TARGET)) \
204 $(call do_install,$(LIB_FILE),$(libdir_SQ)) 217 $(call do_install_mkdir,$(libdir_SQ)); \
218 cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
205 219
206install_headers: 220install_headers:
207 $(call QUIET_INSTALL, headers) \ 221 $(call QUIET_INSTALL, headers) \
@@ -219,7 +233,7 @@ config-clean:
219 233
220clean: 234clean:
221 $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \ 235 $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
222 *.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS 236 *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd LIBBPF-CFLAGS
223 $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf 237 $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
224 238
225 239
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index 5788479384ca..cef7b77eab69 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -111,6 +111,7 @@ starting from ``0.0.1``.
111 111
112Every time ABI is being changed, e.g. because a new symbol is added or 112Every time ABI is being changed, e.g. because a new symbol is added or
113semantic of existing symbol is changed, ABI version should be bumped. 113semantic of existing symbol is changed, ABI version should be bumped.
114This bump in ABI version is at most once per kernel development cycle.
114 115
115For example, if current state of ``libbpf.map`` is: 116For example, if current state of ``libbpf.map`` is:
116 117
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 1b8d8cdd3575..87e3020ac1bc 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1602,16 +1602,12 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1602/* Calculate type signature hash of ENUM. */ 1602/* Calculate type signature hash of ENUM. */
1603static __u32 btf_hash_enum(struct btf_type *t) 1603static __u32 btf_hash_enum(struct btf_type *t)
1604{ 1604{
1605 struct btf_enum *member = (struct btf_enum *)(t + 1); 1605 __u32 h;
1606 __u32 vlen = BTF_INFO_VLEN(t->info);
1607 __u32 h = btf_hash_common(t);
1608 int i;
1609 1606
1610 for (i = 0; i < vlen; i++) { 1607 /* don't hash vlen and enum members to support enum fwd resolving */
1611 h = hash_combine(h, member->name_off); 1608 h = hash_combine(0, t->name_off);
1612 h = hash_combine(h, member->val); 1609 h = hash_combine(h, t->info & ~0xffff);
1613 member++; 1610 h = hash_combine(h, t->size);
1614 }
1615 return h; 1611 return h;
1616} 1612}
1617 1613
@@ -1637,6 +1633,22 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
1637 return true; 1633 return true;
1638} 1634}
1639 1635
1636static inline bool btf_is_enum_fwd(struct btf_type *t)
1637{
1638 return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
1639 BTF_INFO_VLEN(t->info) == 0;
1640}
1641
1642static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
1643{
1644 if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
1645 return btf_equal_enum(t1, t2);
1646 /* ignore vlen when comparing */
1647 return t1->name_off == t2->name_off &&
1648 (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
1649 t1->size == t2->size;
1650}
1651
1640/* 1652/*
1641 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, 1653 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1642 * as referenced type IDs equivalence is established separately during type 1654 * as referenced type IDs equivalence is established separately during type
@@ -1860,6 +1872,17 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1860 new_id = cand_node->type_id; 1872 new_id = cand_node->type_id;
1861 break; 1873 break;
1862 } 1874 }
1875 if (d->opts.dont_resolve_fwds)
1876 continue;
1877 if (btf_compat_enum(t, cand)) {
1878 if (btf_is_enum_fwd(t)) {
1879 /* resolve fwd to full enum */
1880 new_id = cand_node->type_id;
1881 break;
1882 }
1883 /* resolve canonical enum fwd to full enum */
1884 d->map[cand_node->type_id] = type_id;
1885 }
1863 } 1886 }
1864 break; 1887 break;
1865 1888
@@ -2084,15 +2107,15 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2084 return fwd_kind == real_kind; 2107 return fwd_kind == real_kind;
2085 } 2108 }
2086 2109
2087 if (cand_type->info != canon_type->info)
2088 return 0;
2089
2090 switch (cand_kind) { 2110 switch (cand_kind) {
2091 case BTF_KIND_INT: 2111 case BTF_KIND_INT:
2092 return btf_equal_int(cand_type, canon_type); 2112 return btf_equal_int(cand_type, canon_type);
2093 2113
2094 case BTF_KIND_ENUM: 2114 case BTF_KIND_ENUM:
2095 return btf_equal_enum(cand_type, canon_type); 2115 if (d->opts.dont_resolve_fwds)
2116 return btf_equal_enum(cand_type, canon_type);
2117 else
2118 return btf_compat_enum(cand_type, canon_type);
2096 2119
2097 case BTF_KIND_FWD: 2120 case BTF_KIND_FWD:
2098 return btf_equal_common(cand_type, canon_type); 2121 return btf_equal_common(cand_type, canon_type);
@@ -2103,6 +2126,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2103 case BTF_KIND_PTR: 2126 case BTF_KIND_PTR:
2104 case BTF_KIND_TYPEDEF: 2127 case BTF_KIND_TYPEDEF:
2105 case BTF_KIND_FUNC: 2128 case BTF_KIND_FUNC:
2129 if (cand_type->info != canon_type->info)
2130 return 0;
2106 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); 2131 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2107 2132
2108 case BTF_KIND_ARRAY: { 2133 case BTF_KIND_ARRAY: {
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index d5b830d60601..11c25d9ea431 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -112,6 +112,11 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
112# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ 112# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
113#endif 113#endif
114 114
115static inline __u64 ptr_to_u64(const void *ptr)
116{
117 return (__u64) (unsigned long) ptr;
118}
119
115struct bpf_capabilities { 120struct bpf_capabilities {
116 /* v4.14: kernel support for program & map names. */ 121 /* v4.14: kernel support for program & map names. */
117 __u32 name:1; 122 __u32 name:1;
@@ -622,7 +627,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
622 bool strict = !(flags & MAPS_RELAX_COMPAT); 627 bool strict = !(flags & MAPS_RELAX_COMPAT);
623 int i, map_idx, map_def_sz, nr_maps = 0; 628 int i, map_idx, map_def_sz, nr_maps = 0;
624 Elf_Scn *scn; 629 Elf_Scn *scn;
625 Elf_Data *data; 630 Elf_Data *data = NULL;
626 Elf_Data *symbols = obj->efile.symbols; 631 Elf_Data *symbols = obj->efile.symbols;
627 632
628 if (obj->efile.maps_shndx < 0) 633 if (obj->efile.maps_shndx < 0)
@@ -835,12 +840,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
835 obj->efile.maps_shndx = idx; 840 obj->efile.maps_shndx = idx;
836 else if (strcmp(name, BTF_ELF_SEC) == 0) { 841 else if (strcmp(name, BTF_ELF_SEC) == 0) {
837 obj->btf = btf__new(data->d_buf, data->d_size); 842 obj->btf = btf__new(data->d_buf, data->d_size);
838 if (IS_ERR(obj->btf) || btf__load(obj->btf)) { 843 if (IS_ERR(obj->btf)) {
839 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 844 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
840 BTF_ELF_SEC, PTR_ERR(obj->btf)); 845 BTF_ELF_SEC, PTR_ERR(obj->btf));
841 if (!IS_ERR(obj->btf))
842 btf__free(obj->btf);
843 obj->btf = NULL; 846 obj->btf = NULL;
847 continue;
848 }
849 err = btf__load(obj->btf);
850 if (err) {
851 pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n",
852 BTF_ELF_SEC, err);
853 btf__free(obj->btf);
854 obj->btf = NULL;
855 err = 0;
844 } 856 }
845 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { 857 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
846 btf_ext_data = data; 858 btf_ext_data = data;
@@ -2999,3 +3011,249 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2999 ring_buffer_write_tail(header, data_tail); 3011 ring_buffer_write_tail(header, data_tail);
3000 return ret; 3012 return ret;
3001} 3013}
3014
3015struct bpf_prog_info_array_desc {
3016 int array_offset; /* e.g. offset of jited_prog_insns */
3017 int count_offset; /* e.g. offset of jited_prog_len */
3018 int size_offset; /* > 0: offset of rec size,
3019 * < 0: fix size of -size_offset
3020 */
3021};
3022
3023static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
3024 [BPF_PROG_INFO_JITED_INSNS] = {
3025 offsetof(struct bpf_prog_info, jited_prog_insns),
3026 offsetof(struct bpf_prog_info, jited_prog_len),
3027 -1,
3028 },
3029 [BPF_PROG_INFO_XLATED_INSNS] = {
3030 offsetof(struct bpf_prog_info, xlated_prog_insns),
3031 offsetof(struct bpf_prog_info, xlated_prog_len),
3032 -1,
3033 },
3034 [BPF_PROG_INFO_MAP_IDS] = {
3035 offsetof(struct bpf_prog_info, map_ids),
3036 offsetof(struct bpf_prog_info, nr_map_ids),
3037 -(int)sizeof(__u32),
3038 },
3039 [BPF_PROG_INFO_JITED_KSYMS] = {
3040 offsetof(struct bpf_prog_info, jited_ksyms),
3041 offsetof(struct bpf_prog_info, nr_jited_ksyms),
3042 -(int)sizeof(__u64),
3043 },
3044 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
3045 offsetof(struct bpf_prog_info, jited_func_lens),
3046 offsetof(struct bpf_prog_info, nr_jited_func_lens),
3047 -(int)sizeof(__u32),
3048 },
3049 [BPF_PROG_INFO_FUNC_INFO] = {
3050 offsetof(struct bpf_prog_info, func_info),
3051 offsetof(struct bpf_prog_info, nr_func_info),
3052 offsetof(struct bpf_prog_info, func_info_rec_size),
3053 },
3054 [BPF_PROG_INFO_LINE_INFO] = {
3055 offsetof(struct bpf_prog_info, line_info),
3056 offsetof(struct bpf_prog_info, nr_line_info),
3057 offsetof(struct bpf_prog_info, line_info_rec_size),
3058 },
3059 [BPF_PROG_INFO_JITED_LINE_INFO] = {
3060 offsetof(struct bpf_prog_info, jited_line_info),
3061 offsetof(struct bpf_prog_info, nr_jited_line_info),
3062 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
3063 },
3064 [BPF_PROG_INFO_PROG_TAGS] = {
3065 offsetof(struct bpf_prog_info, prog_tags),
3066 offsetof(struct bpf_prog_info, nr_prog_tags),
3067 -(int)sizeof(__u8) * BPF_TAG_SIZE,
3068 },
3069
3070};
3071
3072static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, int offset)
3073{
3074 __u32 *array = (__u32 *)info;
3075
3076 if (offset >= 0)
3077 return array[offset / sizeof(__u32)];
3078 return -(int)offset;
3079}
3080
3081static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, int offset)
3082{
3083 __u64 *array = (__u64 *)info;
3084
3085 if (offset >= 0)
3086 return array[offset / sizeof(__u64)];
3087 return -(int)offset;
3088}
3089
3090static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
3091 __u32 val)
3092{
3093 __u32 *array = (__u32 *)info;
3094
3095 if (offset >= 0)
3096 array[offset / sizeof(__u32)] = val;
3097}
3098
3099static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
3100 __u64 val)
3101{
3102 __u64 *array = (__u64 *)info;
3103
3104 if (offset >= 0)
3105 array[offset / sizeof(__u64)] = val;
3106}
3107
3108struct bpf_prog_info_linear *
3109bpf_program__get_prog_info_linear(int fd, __u64 arrays)
3110{
3111 struct bpf_prog_info_linear *info_linear;
3112 struct bpf_prog_info info = {};
3113 __u32 info_len = sizeof(info);
3114 __u32 data_len = 0;
3115 int i, err;
3116 void *ptr;
3117
3118 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
3119 return ERR_PTR(-EINVAL);
3120
3121 /* step 1: get array dimensions */
3122 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
3123 if (err) {
3124 pr_debug("can't get prog info: %s", strerror(errno));
3125 return ERR_PTR(-EFAULT);
3126 }
3127
3128 /* step 2: calculate total size of all arrays */
3129 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3130 bool include_array = (arrays & (1UL << i)) > 0;
3131 struct bpf_prog_info_array_desc *desc;
3132 __u32 count, size;
3133
3134 desc = bpf_prog_info_array_desc + i;
3135
3136 /* kernel is too old to support this field */
3137 if (info_len < desc->array_offset + sizeof(__u32) ||
3138 info_len < desc->count_offset + sizeof(__u32) ||
3139 (desc->size_offset > 0 && info_len < desc->size_offset))
3140 include_array = false;
3141
3142 if (!include_array) {
3143 arrays &= ~(1UL << i); /* clear the bit */
3144 continue;
3145 }
3146
3147 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3148 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3149
3150 data_len += count * size;
3151 }
3152
3153 /* step 3: allocate continuous memory */
3154 data_len = roundup(data_len, sizeof(__u64));
3155 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
3156 if (!info_linear)
3157 return ERR_PTR(-ENOMEM);
3158
3159 /* step 4: fill data to info_linear->info */
3160 info_linear->arrays = arrays;
3161 memset(&info_linear->info, 0, sizeof(info));
3162 ptr = info_linear->data;
3163
3164 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3165 struct bpf_prog_info_array_desc *desc;
3166 __u32 count, size;
3167
3168 if ((arrays & (1UL << i)) == 0)
3169 continue;
3170
3171 desc = bpf_prog_info_array_desc + i;
3172 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3173 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3174 bpf_prog_info_set_offset_u32(&info_linear->info,
3175 desc->count_offset, count);
3176 bpf_prog_info_set_offset_u32(&info_linear->info,
3177 desc->size_offset, size);
3178 bpf_prog_info_set_offset_u64(&info_linear->info,
3179 desc->array_offset,
3180 ptr_to_u64(ptr));
3181 ptr += count * size;
3182 }
3183
3184 /* step 5: call syscall again to get required arrays */
3185 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
3186 if (err) {
3187 pr_debug("can't get prog info: %s", strerror(errno));
3188 free(info_linear);
3189 return ERR_PTR(-EFAULT);
3190 }
3191
3192 /* step 6: verify the data */
3193 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3194 struct bpf_prog_info_array_desc *desc;
3195 __u32 v1, v2;
3196
3197 if ((arrays & (1UL << i)) == 0)
3198 continue;
3199
3200 desc = bpf_prog_info_array_desc + i;
3201 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
3202 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
3203 desc->count_offset);
3204 if (v1 != v2)
3205 pr_warning("%s: mismatch in element count\n", __func__);
3206
3207 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
3208 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
3209 desc->size_offset);
3210 if (v1 != v2)
3211 pr_warning("%s: mismatch in rec size\n", __func__);
3212 }
3213
3214 /* step 7: update info_len and data_len */
3215 info_linear->info_len = sizeof(struct bpf_prog_info);
3216 info_linear->data_len = data_len;
3217
3218 return info_linear;
3219}
3220
3221void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
3222{
3223 int i;
3224
3225 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3226 struct bpf_prog_info_array_desc *desc;
3227 __u64 addr, offs;
3228
3229 if ((info_linear->arrays & (1UL << i)) == 0)
3230 continue;
3231
3232 desc = bpf_prog_info_array_desc + i;
3233 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
3234 desc->array_offset);
3235 offs = addr - ptr_to_u64(info_linear->data);
3236 bpf_prog_info_set_offset_u64(&info_linear->info,
3237 desc->array_offset, offs);
3238 }
3239}
3240
3241void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
3242{
3243 int i;
3244
3245 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
3246 struct bpf_prog_info_array_desc *desc;
3247 __u64 addr, offs;
3248
3249 if ((info_linear->arrays & (1UL << i)) == 0)
3250 continue;
3251
3252 desc = bpf_prog_info_array_desc + i;
3253 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
3254 desc->array_offset);
3255 addr = offs + ptr_to_u64(info_linear->data);
3256 bpf_prog_info_set_offset_u64(&info_linear->info,
3257 desc->array_offset, addr);
3258 }
3259}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index b4652aa1a58a..c70785cc8ef5 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -10,6 +10,7 @@
10#ifndef __LIBBPF_LIBBPF_H 10#ifndef __LIBBPF_LIBBPF_H
11#define __LIBBPF_LIBBPF_H 11#define __LIBBPF_LIBBPF_H
12 12
13#include <stdarg.h>
13#include <stdio.h> 14#include <stdio.h>
14#include <stdint.h> 15#include <stdint.h>
15#include <stdbool.h> 16#include <stdbool.h>
@@ -377,6 +378,69 @@ LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
377LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id, 378LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
378 enum bpf_prog_type prog_type, __u32 ifindex); 379 enum bpf_prog_type prog_type, __u32 ifindex);
379 380
381/*
382 * Get bpf_prog_info in continuous memory
383 *
384 * struct bpf_prog_info has multiple arrays. The user has option to choose
385 * arrays to fetch from kernel. The following APIs provide an uniform way to
386 * fetch these data. All arrays in bpf_prog_info are stored in a single
387 * continuous memory region. This makes it easy to store the info in a
388 * file.
389 *
390 * Before writing bpf_prog_info_linear to files, it is necessary to
391 * translate pointers in bpf_prog_info to offsets. Helper functions
392 * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
393 * are introduced to switch between pointers and offsets.
394 *
395 * Examples:
396 * # To fetch map_ids and prog_tags:
397 * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
398 * (1UL << BPF_PROG_INFO_PROG_TAGS);
399 * struct bpf_prog_info_linear *info_linear =
400 * bpf_program__get_prog_info_linear(fd, arrays);
401 *
402 * # To save data in file
403 * bpf_program__bpil_addr_to_offs(info_linear);
404 * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
405 *
406 * # To read data from file
407 * read(f, info_linear, <proper_size>);
408 * bpf_program__bpil_offs_to_addr(info_linear);
409 */
410enum bpf_prog_info_array {
411 BPF_PROG_INFO_FIRST_ARRAY = 0,
412 BPF_PROG_INFO_JITED_INSNS = 0,
413 BPF_PROG_INFO_XLATED_INSNS,
414 BPF_PROG_INFO_MAP_IDS,
415 BPF_PROG_INFO_JITED_KSYMS,
416 BPF_PROG_INFO_JITED_FUNC_LENS,
417 BPF_PROG_INFO_FUNC_INFO,
418 BPF_PROG_INFO_LINE_INFO,
419 BPF_PROG_INFO_JITED_LINE_INFO,
420 BPF_PROG_INFO_PROG_TAGS,
421 BPF_PROG_INFO_LAST_ARRAY,
422};
423
424struct bpf_prog_info_linear {
425 /* size of struct bpf_prog_info, when the tool is compiled */
426 __u32 info_len;
427 /* total bytes allocated for data, round up to 8 bytes */
428 __u32 data_len;
429 /* which arrays are included in data */
430 __u64 arrays;
431 struct bpf_prog_info info;
432 __u8 data[];
433};
434
435LIBBPF_API struct bpf_prog_info_linear *
436bpf_program__get_prog_info_linear(int fd, __u64 arrays);
437
438LIBBPF_API void
439bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
440
441LIBBPF_API void
442bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
443
380#ifdef __cplusplus 444#ifdef __cplusplus
381} /* extern "C" */ 445} /* extern "C" */
382#endif 446#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 778a26702a70..f3ce50500cf2 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -153,4 +153,7 @@ LIBBPF_0.0.2 {
153 xsk_socket__delete; 153 xsk_socket__delete;
154 xsk_umem__fd; 154 xsk_umem__fd;
155 xsk_socket__fd; 155 xsk_socket__fd;
156 bpf_program__get_prog_info_linear;
157 bpf_program__bpil_addr_to_offs;
158 bpf_program__bpil_offs_to_addr;
156} LIBBPF_0.0.1; 159} LIBBPF_0.0.1;
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index f98ac82c9aea..8d0078b65486 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -126,8 +126,8 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
126 cfg->frame_headroom = usr_cfg->frame_headroom; 126 cfg->frame_headroom = usr_cfg->frame_headroom;
127} 127}
128 128
129static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, 129static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
130 const struct xsk_socket_config *usr_cfg) 130 const struct xsk_socket_config *usr_cfg)
131{ 131{
132 if (!usr_cfg) { 132 if (!usr_cfg) {
133 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; 133 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
@@ -135,14 +135,19 @@ static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
135 cfg->libbpf_flags = 0; 135 cfg->libbpf_flags = 0;
136 cfg->xdp_flags = 0; 136 cfg->xdp_flags = 0;
137 cfg->bind_flags = 0; 137 cfg->bind_flags = 0;
138 return; 138 return 0;
139 } 139 }
140 140
141 if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
142 return -EINVAL;
143
141 cfg->rx_size = usr_cfg->rx_size; 144 cfg->rx_size = usr_cfg->rx_size;
142 cfg->tx_size = usr_cfg->tx_size; 145 cfg->tx_size = usr_cfg->tx_size;
143 cfg->libbpf_flags = usr_cfg->libbpf_flags; 146 cfg->libbpf_flags = usr_cfg->libbpf_flags;
144 cfg->xdp_flags = usr_cfg->xdp_flags; 147 cfg->xdp_flags = usr_cfg->xdp_flags;
145 cfg->bind_flags = usr_cfg->bind_flags; 148 cfg->bind_flags = usr_cfg->bind_flags;
149
150 return 0;
146} 151}
147 152
148int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, 153int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
@@ -557,7 +562,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
557 } 562 }
558 strncpy(xsk->ifname, ifname, IFNAMSIZ); 563 strncpy(xsk->ifname, ifname, IFNAMSIZ);
559 564
560 xsk_set_xdp_socket_config(&xsk->config, usr_config); 565 err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
566 if (err)
567 goto out_socket;
561 568
562 if (rx) { 569 if (rx) {
563 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, 570 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index c9d038f91af6..53f8be0f4a1f 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -25,14 +25,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
25OBJTOOL := $(OUTPUT)objtool 25OBJTOOL := $(OUTPUT)objtool
26OBJTOOL_IN := $(OBJTOOL)-in.o 26OBJTOOL_IN := $(OBJTOOL)-in.o
27 27
28LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
29LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
30
28all: $(OBJTOOL) 31all: $(OBJTOOL)
29 32
30INCLUDES := -I$(srctree)/tools/include \ 33INCLUDES := -I$(srctree)/tools/include \
31 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ 34 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
32 -I$(srctree)/tools/objtool/arch/$(ARCH)/include 35 -I$(srctree)/tools/objtool/arch/$(ARCH)/include
33WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed 36WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
34CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) 37CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
35LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS) 38LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
36 39
37# Allow old libelf to be used: 40# Allow old libelf to be used:
38elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) 41elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 0414a0d52262..5dde107083c6 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2184,9 +2184,10 @@ static void cleanup(struct objtool_file *file)
2184 elf_close(file->elf); 2184 elf_close(file->elf);
2185} 2185}
2186 2186
2187static struct objtool_file file;
2188
2187int check(const char *_objname, bool orc) 2189int check(const char *_objname, bool orc)
2188{ 2190{
2189 struct objtool_file file;
2190 int ret, warnings = 0; 2191 int ret, warnings = 0;
2191 2192
2192 objname = _objname; 2193 objname = _objname;
diff --git a/tools/perf/Documentation/Build.txt b/tools/perf/Documentation/Build.txt
index f6fc6507ba55..3766886c4bca 100644
--- a/tools/perf/Documentation/Build.txt
+++ b/tools/perf/Documentation/Build.txt
@@ -47,3 +47,27 @@ Those objects are then used in final linking:
47 47
48NOTE this description is omitting other libraries involved, only 48NOTE this description is omitting other libraries involved, only
49 focusing on build framework outcomes 49 focusing on build framework outcomes
50
513) Build with ASan or UBSan
52==========================
53 $ cd tools/perf
54 $ make DESTDIR=/usr
55 $ make DESTDIR=/usr install
56
57AddressSanitizer (or ASan) is a GCC feature that detects memory corruption bugs
58such as buffer overflows and memory leaks.
59
60 $ cd tools/perf
61 $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=address'
62 $ ASAN_OPTIONS=log_path=asan.log ./perf record -a
63
64ASan outputs all detected issues into a log file named 'asan.log.<pid>'.
65
66UndefinedBehaviorSanitizer (or UBSan) is a fast undefined behavior detector
67supported by GCC. UBSan detects undefined behaviors of programs at runtime.
68
69 $ cd tools/perf
70 $ make DEBUG=1 EXTRA_CFLAGS='-fno-omit-frame-pointer -fsanitize=undefined'
71 $ UBSAN_OPTIONS=print_stacktrace=1 ./perf record -a
72
73If UBSan detects any problem at runtime, it outputs a “runtime error:” message.
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 86f3dcc15f83..462b3cde0675 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -114,7 +114,7 @@ Given a $HOME/.perfconfig like this:
114 114
115 [report] 115 [report]
116 # Defaults 116 # Defaults
117 sort-order = comm,dso,symbol 117 sort_order = comm,dso,symbol
118 percent-limit = 0 118 percent-limit = 0
119 queue-size = 0 119 queue-size = 0
120 children = true 120 children = true
@@ -584,6 +584,20 @@ llvm.*::
584 llvm.opts:: 584 llvm.opts::
585 Options passed to llc. 585 Options passed to llc.
586 586
587samples.*::
588
589 samples.context::
590 Define how many ns worth of time to show
591 around samples in perf report sample context browser.
592
593scripts.*::
594
595 Any option defines a script that is added to the scripts menu
596 in the interactive perf browser and whose output is displayed.
597 The name of the option is the name, the value is a script command line.
598 The script gets the same options passed as a full perf script,
599 in particular -i perfdata file, --cpu, --tid
600
587SEE ALSO 601SEE ALSO
588-------- 602--------
589linkperf:perf[1] 603linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 8f0c2be34848..8fe4dffcadd0 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -495,6 +495,10 @@ overhead. You can still switch them on with:
495 495
496 --switch-output --no-no-buildid --no-no-buildid-cache 496 --switch-output --no-no-buildid --no-no-buildid-cache
497 497
498--switch-max-files=N::
499
500When rotating perf.data with --switch-output, only keep N files.
501
498--dry-run:: 502--dry-run::
499Parse options then exit. --dry-run can be used to detect errors in cmdline 503Parse options then exit. --dry-run can be used to detect errors in cmdline
500options. 504options.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 1a27bfe05039..f441baa794ce 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -105,6 +105,8 @@ OPTIONS
105 guest machine 105 guest machine
106 - sample: Number of sample 106 - sample: Number of sample
107 - period: Raw number of event count of sample 107 - period: Raw number of event count of sample
108 - time: Separate the samples by time stamp with the resolution specified by
109 --time-quantum (default 100ms). Specify with overhead and before it.
108 110
109 By default, comm, dso and symbol keys are used. 111 By default, comm, dso and symbol keys are used.
110 (i.e. --sort comm,dso,symbol) 112 (i.e. --sort comm,dso,symbol)
@@ -459,6 +461,10 @@ include::itrace.txt[]
459--socket-filter:: 461--socket-filter::
460 Only report the samples on the processor socket that match with this filter 462 Only report the samples on the processor socket that match with this filter
461 463
464--samples=N::
465 Save N individual samples for each histogram entry to show context in perf
466 report tui browser.
467
462--raw-trace:: 468--raw-trace::
463 When displaying traceevent output, do not use print fmt or plugins. 469 When displaying traceevent output, do not use print fmt or plugins.
464 470
@@ -477,6 +483,9 @@ include::itrace.txt[]
477 Please note that not all mmaps are stored, options affecting which ones 483 Please note that not all mmaps are stored, options affecting which ones
478 are include 'perf record --data', for instance. 484 are include 'perf record --data', for instance.
479 485
486--ns::
487 Show time stamps in nanoseconds.
488
480--stats:: 489--stats::
481 Display overall events statistics without any further processing. 490 Display overall events statistics without any further processing.
482 (like the one at the end of the perf report -D command) 491 (like the one at the end of the perf report -D command)
@@ -494,6 +503,10 @@ include::itrace.txt[]
494 The period/hits keywords set the base the percentage is computed 503 The period/hits keywords set the base the percentage is computed
495 on - the samples period or the number of samples (hits). 504 on - the samples period or the number of samples (hits).
496 505
506--time-quantum::
507 Configure time quantum for time sort key. Default 100ms.
508 Accepts s, us, ms, ns units.
509
497include::callchain-overhead-calculation.txt[] 510include::callchain-overhead-calculation.txt[]
498 511
499SEE ALSO 512SEE ALSO
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 2e19fd7ffe35..9b0d04dd2a61 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -380,6 +380,9 @@ include::itrace.txt[]
380 Set the maximum number of program blocks to print with brstackasm for 380 Set the maximum number of program blocks to print with brstackasm for
381 each sample. 381 each sample.
382 382
383--reltime::
384 Print time stamps relative to trace start.
385
383--per-event-dump:: 386--per-event-dump::
384 Create per event files with a "perf.data.EVENT.dump" name instead of 387 Create per event files with a "perf.data.EVENT.dump" name instead of
385 printing to stdout, useful, for instance, for generating flamegraphs. 388 printing to stdout, useful, for instance, for generating flamegraphs.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4bc2085e5197..39c05f89104e 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -72,9 +72,8 @@ report::
72--all-cpus:: 72--all-cpus::
73 system-wide collection from all CPUs (default if no target is specified) 73 system-wide collection from all CPUs (default if no target is specified)
74 74
75-c:: 75--no-scale::
76--scale:: 76 Don't scale/normalize counter values
77 scale/normalize counter values
78 77
79-d:: 78-d::
80--detailed:: 79--detailed::
diff --git a/tools/perf/Documentation/tips.txt b/tools/perf/Documentation/tips.txt
index 849599f39c5e..869965d629ce 100644
--- a/tools/perf/Documentation/tips.txt
+++ b/tools/perf/Documentation/tips.txt
@@ -15,6 +15,7 @@ To see callchains in a more compact form: perf report -g folded
15Show individual samples with: perf script 15Show individual samples with: perf script
16Limit to show entries above 5% only: perf report --percent-limit 5 16Limit to show entries above 5% only: perf report --percent-limit 5
17Profiling branch (mis)predictions with: perf record -b / perf report 17Profiling branch (mis)predictions with: perf record -b / perf report
18To show assembler sample contexts use perf record -b / perf script -F +brstackinsn --xed
18Treat branches as callchains: perf report --branch-history 19Treat branches as callchains: perf report --branch-history
19To count events in every 1000 msec: perf stat -I 1000 20To count events in every 1000 msec: perf stat -I 1000
20Print event counts in CSV format with: perf stat -x, 21Print event counts in CSV format with: perf stat -x,
@@ -34,3 +35,9 @@ Show current config key-value pairs: perf config --list
34Show user configuration overrides: perf config --user --list 35Show user configuration overrides: perf config --user --list
35To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node` 36To add Node.js USDT(User-Level Statically Defined Tracing): perf buildid-cache --add `which node`
36To report cacheline events from previous recording: perf c2c report 37To report cacheline events from previous recording: perf c2c report
38To browse sample contexts use perf report --sample 10 and select in context menu
39To separate samples by time use perf report --sort time,overhead,sym
40To set sample time separation other than 100ms with --sort time use --time-quantum
41Add -I to perf report to sample register values visible in perf report context.
42To show IPC for sampling periods use perf record -e '{cycles,instructions}:S' and then browse context
43To show context switches in perf report sample context add --switch-events to perf record.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 0f11d5891301..fe3f97e342fa 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -227,6 +227,8 @@ FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
227 227
228FEATURE_CHECK_LDFLAGS-libaio = -lrt 228FEATURE_CHECK_LDFLAGS-libaio = -lrt
229 229
230FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes
231
230CFLAGS += -fno-omit-frame-pointer 232CFLAGS += -fno-omit-frame-pointer
231CFLAGS += -ggdb3 233CFLAGS += -ggdb3
232CFLAGS += -funwind-tables 234CFLAGS += -funwind-tables
@@ -713,7 +715,7 @@ else
713endif 715endif
714 716
715ifeq ($(feature-libbfd), 1) 717ifeq ($(feature-libbfd), 1)
716 EXTLIBS += -lbfd 718 EXTLIBS += -lbfd -lopcodes
717else 719else
718 # we are on a system that requires -liberty and (maybe) -lz 720 # we are on a system that requires -liberty and (maybe) -lz
719 # to link against -lbfd; test each case individually here 721 # to link against -lbfd; test each case individually here
@@ -724,12 +726,15 @@ else
724 $(call feature_check,libbfd-liberty-z) 726 $(call feature_check,libbfd-liberty-z)
725 727
726 ifeq ($(feature-libbfd-liberty), 1) 728 ifeq ($(feature-libbfd-liberty), 1)
727 EXTLIBS += -lbfd -liberty 729 EXTLIBS += -lbfd -lopcodes -liberty
730 FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
728 else 731 else
729 ifeq ($(feature-libbfd-liberty-z), 1) 732 ifeq ($(feature-libbfd-liberty-z), 1)
730 EXTLIBS += -lbfd -liberty -lz 733 EXTLIBS += -lbfd -lopcodes -liberty -lz
734 FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
731 endif 735 endif
732 endif 736 endif
737 $(call feature_check,disassembler-four-args)
733endif 738endif
734 739
735ifdef NO_DEMANGLE 740ifdef NO_DEMANGLE
@@ -808,6 +813,10 @@ ifdef HAVE_KVM_STAT_SUPPORT
808 CFLAGS += -DHAVE_KVM_STAT_SUPPORT 813 CFLAGS += -DHAVE_KVM_STAT_SUPPORT
809endif 814endif
810 815
816ifeq ($(feature-disassembler-four-args), 1)
817 CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
818endif
819
811ifeq (${IS_64_BIT}, 1) 820ifeq (${IS_64_BIT}, 1)
812 ifndef NO_PERF_READ_VDSO32 821 ifndef NO_PERF_READ_VDSO32
813 $(call feature_check,compile-32) 822 $(call feature_check,compile-32)
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 01f7555fd933..e8c9f77e9010 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -481,8 +481,8 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
481mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c 481mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
482mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh 482mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
483 483
484$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl) 484$(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
485 $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@ 485 $(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
486 486
487mount_flags_array := $(beauty_outdir)/mount_flags_array.c 487mount_flags_array := $(beauty_outdir)/mount_flags_array.c
488mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh 488mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index f0b1709a5ffb..92ee0b4378d4 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -343,6 +343,12 @@
343332 common statx __x64_sys_statx 343332 common statx __x64_sys_statx
344333 common io_pgetevents __x64_sys_io_pgetevents 344333 common io_pgetevents __x64_sys_io_pgetevents
345334 common rseq __x64_sys_rseq 345334 common rseq __x64_sys_rseq
346# don't use numbers 387 through 423, add new calls after the last
347# 'common' entry
348424 common pidfd_send_signal __x64_sys_pidfd_send_signal
349425 common io_uring_setup __x64_sys_io_uring_setup
350426 common io_uring_enter __x64_sys_io_uring_enter
351427 common io_uring_register __x64_sys_io_uring_register
346 352
347# 353#
348# x32-specific system call numbers start at 512 to avoid cache impact 354# x32-specific system call numbers start at 512 to avoid cache impact
@@ -361,7 +367,7 @@
361520 x32 execve __x32_compat_sys_execve/ptregs 367520 x32 execve __x32_compat_sys_execve/ptregs
362521 x32 ptrace __x32_compat_sys_ptrace 368521 x32 ptrace __x32_compat_sys_ptrace
363522 x32 rt_sigpending __x32_compat_sys_rt_sigpending 369522 x32 rt_sigpending __x32_compat_sys_rt_sigpending
364523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait 370523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait_time64
365524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo 371524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo
366525 x32 sigaltstack __x32_compat_sys_sigaltstack 372525 x32 sigaltstack __x32_compat_sys_sigaltstack
367526 x32 timer_create __x32_compat_sys_timer_create 373526 x32 timer_create __x32_compat_sys_timer_create
@@ -375,7 +381,7 @@
375534 x32 preadv __x32_compat_sys_preadv64 381534 x32 preadv __x32_compat_sys_preadv64
376535 x32 pwritev __x32_compat_sys_pwritev64 382535 x32 pwritev __x32_compat_sys_pwritev64
377536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo 383536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo
378537 x32 recvmmsg __x32_compat_sys_recvmmsg 384537 x32 recvmmsg __x32_compat_sys_recvmmsg_time64
379538 x32 sendmmsg __x32_compat_sys_sendmmsg 385538 x32 sendmmsg __x32_compat_sys_sendmmsg
380539 x32 process_vm_readv __x32_compat_sys_process_vm_readv 386539 x32 process_vm_readv __x32_compat_sys_process_vm_readv
381540 x32 process_vm_writev __x32_compat_sys_process_vm_writev 387540 x32 process_vm_writev __x32_compat_sys_process_vm_writev
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 7aab0be5fc5f..47f9c56e744f 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -14,5 +14,6 @@ perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
14perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o 14perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
15 15
16perf-$(CONFIG_AUXTRACE) += auxtrace.o 16perf-$(CONFIG_AUXTRACE) += auxtrace.o
17perf-$(CONFIG_AUXTRACE) += archinsn.o
17perf-$(CONFIG_AUXTRACE) += intel-pt.o 18perf-$(CONFIG_AUXTRACE) += intel-pt.o
18perf-$(CONFIG_AUXTRACE) += intel-bts.o 19perf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
new file mode 100644
index 000000000000..4237bb2e7fa2
--- /dev/null
+++ b/tools/perf/arch/x86/util/archinsn.c
@@ -0,0 +1,26 @@
1// SPDX-License-Identifier: GPL-2.0
2#include "perf.h"
3#include "archinsn.h"
4#include "util/intel-pt-decoder/insn.h"
5#include "machine.h"
6#include "thread.h"
7#include "symbol.h"
8
9void arch_fetch_insn(struct perf_sample *sample,
10 struct thread *thread,
11 struct machine *machine)
12{
13 struct insn insn;
14 int len;
15 bool is64bit = false;
16
17 if (!sample->ip)
18 return;
19 len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
20 if (len <= 0)
21 return;
22 insn_init(&insn, sample->insn, len, is64bit);
23 insn_get_length(&insn);
24 if (insn_complete(&insn) && insn.length <= len)
25 sample->insn_len = insn.length;
26}
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index 0c0a6e824934..2af067859966 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -224,7 +224,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
224 pthread_attr_t thread_attr, *attrp = NULL; 224 pthread_attr_t thread_attr, *attrp = NULL;
225 cpu_set_t cpuset; 225 cpu_set_t cpuset;
226 unsigned int i, j; 226 unsigned int i, j;
227 int ret; 227 int ret = 0;
228 228
229 if (!noaffinity) 229 if (!noaffinity)
230 pthread_attr_init(&thread_attr); 230 pthread_attr_init(&thread_attr);
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index 5a11534e96a0..fe85448abd45 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -293,7 +293,7 @@ static int do_threads(struct worker *worker, struct cpu_map *cpu)
293 pthread_attr_t thread_attr, *attrp = NULL; 293 pthread_attr_t thread_attr, *attrp = NULL;
294 cpu_set_t cpuset; 294 cpu_set_t cpuset;
295 unsigned int i, j; 295 unsigned int i, j;
296 int ret, events = EPOLLIN; 296 int ret = 0, events = EPOLLIN;
297 297
298 if (oneshot) 298 if (oneshot)
299 events |= EPOLLONESHOT; 299 events |= EPOLLONESHOT;
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index c9f98d00c0e9..a8394b4f1167 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -119,7 +119,7 @@ int cmd_list(int argc, const char **argv)
119 details_flag); 119 details_flag);
120 print_tracepoint_events(NULL, s, raw_dump); 120 print_tracepoint_events(NULL, s, raw_dump);
121 print_sdt_events(NULL, s, raw_dump); 121 print_sdt_events(NULL, s, raw_dump);
122 metricgroup__print(true, true, NULL, raw_dump, details_flag); 122 metricgroup__print(true, true, s, raw_dump, details_flag);
123 free(s); 123 free(s);
124 } 124 }
125 } 125 }
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f3f7f3100336..4e2d953d4bc5 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -62,6 +62,9 @@ struct switch_output {
62 unsigned long time; 62 unsigned long time;
63 const char *str; 63 const char *str;
64 bool set; 64 bool set;
65 char **filenames;
66 int num_files;
67 int cur_file;
65}; 68};
66 69
67struct record { 70struct record {
@@ -392,7 +395,7 @@ static int record__process_auxtrace(struct perf_tool *tool,
392 size_t padding; 395 size_t padding;
393 u8 pad[8] = {0}; 396 u8 pad[8] = {0};
394 397
395 if (!perf_data__is_pipe(data)) { 398 if (!perf_data__is_pipe(data) && !perf_data__is_dir(data)) {
396 off_t file_offset; 399 off_t file_offset;
397 int fd = perf_data__fd(data); 400 int fd = perf_data__fd(data);
398 int err; 401 int err;
@@ -837,6 +840,8 @@ static void record__init_features(struct record *rec)
837 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns)) 840 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
838 perf_header__clear_feat(&session->header, HEADER_CLOCKID); 841 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
839 842
843 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
844
840 perf_header__clear_feat(&session->header, HEADER_STAT); 845 perf_header__clear_feat(&session->header, HEADER_STAT);
841} 846}
842 847
@@ -890,6 +895,7 @@ record__switch_output(struct record *rec, bool at_exit)
890{ 895{
891 struct perf_data *data = &rec->data; 896 struct perf_data *data = &rec->data;
892 int fd, err; 897 int fd, err;
898 char *new_filename;
893 899
894 /* Same Size: "2015122520103046"*/ 900 /* Same Size: "2015122520103046"*/
895 char timestamp[] = "InvalidTimestamp"; 901 char timestamp[] = "InvalidTimestamp";
@@ -910,7 +916,7 @@ record__switch_output(struct record *rec, bool at_exit)
910 916
911 fd = perf_data__switch(data, timestamp, 917 fd = perf_data__switch(data, timestamp,
912 rec->session->header.data_offset, 918 rec->session->header.data_offset,
913 at_exit); 919 at_exit, &new_filename);
914 if (fd >= 0 && !at_exit) { 920 if (fd >= 0 && !at_exit) {
915 rec->bytes_written = 0; 921 rec->bytes_written = 0;
916 rec->session->header.data_size = 0; 922 rec->session->header.data_size = 0;
@@ -920,6 +926,21 @@ record__switch_output(struct record *rec, bool at_exit)
920 fprintf(stderr, "[ perf record: Dump %s.%s ]\n", 926 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
921 data->path, timestamp); 927 data->path, timestamp);
922 928
929 if (rec->switch_output.num_files) {
930 int n = rec->switch_output.cur_file + 1;
931
932 if (n >= rec->switch_output.num_files)
933 n = 0;
934 rec->switch_output.cur_file = n;
935 if (rec->switch_output.filenames[n]) {
936 remove(rec->switch_output.filenames[n]);
937 free(rec->switch_output.filenames[n]);
938 }
939 rec->switch_output.filenames[n] = new_filename;
940 } else {
941 free(new_filename);
942 }
943
923 /* Output tracking events */ 944 /* Output tracking events */
924 if (!at_exit) { 945 if (!at_exit) {
925 record__synthesize(rec, false); 946 record__synthesize(rec, false);
@@ -1093,7 +1114,7 @@ static int record__synthesize(struct record *rec, bool tail)
1093 return err; 1114 return err;
1094 } 1115 }
1095 1116
1096 err = perf_event__synthesize_bpf_events(tool, process_synthesized_event, 1117 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
1097 machine, opts); 1118 machine, opts);
1098 if (err < 0) 1119 if (err < 0)
1099 pr_warning("Couldn't synthesize bpf events.\n"); 1120 pr_warning("Couldn't synthesize bpf events.\n");
@@ -1116,6 +1137,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
1116 struct perf_data *data = &rec->data; 1137 struct perf_data *data = &rec->data;
1117 struct perf_session *session; 1138 struct perf_session *session;
1118 bool disabled = false, draining = false; 1139 bool disabled = false, draining = false;
1140 struct perf_evlist *sb_evlist = NULL;
1119 int fd; 1141 int fd;
1120 1142
1121 atexit(record__sig_exit); 1143 atexit(record__sig_exit);
@@ -1216,6 +1238,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
1216 goto out_child; 1238 goto out_child;
1217 } 1239 }
1218 1240
1241 if (!opts->no_bpf_event)
1242 bpf_event__add_sb_event(&sb_evlist, &session->header.env);
1243
1244 if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
1245 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1246 opts->no_bpf_event = true;
1247 }
1248
1219 err = record__synthesize(rec, false); 1249 err = record__synthesize(rec, false);
1220 if (err < 0) 1250 if (err < 0)
1221 goto out_child; 1251 goto out_child;
@@ -1466,6 +1496,9 @@ out_child:
1466 1496
1467out_delete_session: 1497out_delete_session:
1468 perf_session__delete(session); 1498 perf_session__delete(session);
1499
1500 if (!opts->no_bpf_event)
1501 perf_evlist__stop_sb_thread(sb_evlist);
1469 return status; 1502 return status;
1470} 1503}
1471 1504
@@ -1870,7 +1903,7 @@ static struct option __record_options[] = {
1870 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize, 1903 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1871 "synthesize non-sample events at the end of output"), 1904 "synthesize non-sample events at the end of output"),
1872 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"), 1905 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
1873 OPT_BOOLEAN(0, "bpf-event", &record.opts.bpf_event, "record bpf events"), 1906 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
1874 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq, 1907 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1875 "Fail if the specified frequency can't be used"), 1908 "Fail if the specified frequency can't be used"),
1876 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'", 1909 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -1968,9 +2001,11 @@ static struct option __record_options[] = {
1968 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary, 2001 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1969 "Record timestamp boundary (time of first/last samples)"), 2002 "Record timestamp boundary (time of first/last samples)"),
1970 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str, 2003 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
1971 &record.switch_output.set, "signal,size,time", 2004 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
1972 "Switch output when receive SIGUSR2 or cross size,time threshold", 2005 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
1973 "signal"), 2006 "signal"),
2007 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
2008 "Limit number of switch output generated files"),
1974 OPT_BOOLEAN(0, "dry-run", &dry_run, 2009 OPT_BOOLEAN(0, "dry-run", &dry_run,
1975 "Parse options then exit"), 2010 "Parse options then exit"),
1976#ifdef HAVE_AIO_SUPPORT 2011#ifdef HAVE_AIO_SUPPORT
@@ -2057,6 +2092,13 @@ int cmd_record(int argc, const char **argv)
2057 alarm(rec->switch_output.time); 2092 alarm(rec->switch_output.time);
2058 } 2093 }
2059 2094
2095 if (rec->switch_output.num_files) {
2096 rec->switch_output.filenames = calloc(sizeof(char *),
2097 rec->switch_output.num_files);
2098 if (!rec->switch_output.filenames)
2099 return -EINVAL;
2100 }
2101
2060 /* 2102 /*
2061 * Allow aliases to facilitate the lookup of symbols for address 2103 * Allow aliases to facilitate the lookup of symbols for address
2062 * filters. Refer to auxtrace_parse_filters(). 2104 * filters. Refer to auxtrace_parse_filters().
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index ee93c18a6685..4054eb1f98ac 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -47,9 +47,11 @@
47#include <errno.h> 47#include <errno.h>
48#include <inttypes.h> 48#include <inttypes.h>
49#include <regex.h> 49#include <regex.h>
50#include "sane_ctype.h"
50#include <signal.h> 51#include <signal.h>
51#include <linux/bitmap.h> 52#include <linux/bitmap.h>
52#include <linux/stringify.h> 53#include <linux/stringify.h>
54#include <linux/time64.h>
53#include <sys/types.h> 55#include <sys/types.h>
54#include <sys/stat.h> 56#include <sys/stat.h>
55#include <unistd.h> 57#include <unistd.h>
@@ -926,6 +928,43 @@ report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
926 return parse_callchain_report_opt(arg); 928 return parse_callchain_report_opt(arg);
927} 929}
928 930
931static int
932parse_time_quantum(const struct option *opt, const char *arg,
933 int unset __maybe_unused)
934{
935 unsigned long *time_q = opt->value;
936 char *end;
937
938 *time_q = strtoul(arg, &end, 0);
939 if (end == arg)
940 goto parse_err;
941 if (*time_q == 0) {
942 pr_err("time quantum cannot be 0");
943 return -1;
944 }
945 while (isspace(*end))
946 end++;
947 if (*end == 0)
948 return 0;
949 if (!strcmp(end, "s")) {
950 *time_q *= NSEC_PER_SEC;
951 return 0;
952 }
953 if (!strcmp(end, "ms")) {
954 *time_q *= NSEC_PER_MSEC;
955 return 0;
956 }
957 if (!strcmp(end, "us")) {
958 *time_q *= NSEC_PER_USEC;
959 return 0;
960 }
961 if (!strcmp(end, "ns"))
962 return 0;
963parse_err:
964 pr_err("Cannot parse time quantum `%s'\n", arg);
965 return -1;
966}
967
929int 968int
930report_parse_ignore_callees_opt(const struct option *opt __maybe_unused, 969report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
931 const char *arg, int unset __maybe_unused) 970 const char *arg, int unset __maybe_unused)
@@ -1044,10 +1083,9 @@ int cmd_report(int argc, const char **argv)
1044 OPT_BOOLEAN(0, "header-only", &report.header_only, 1083 OPT_BOOLEAN(0, "header-only", &report.header_only,
1045 "Show only data header."), 1084 "Show only data header."),
1046 OPT_STRING('s', "sort", &sort_order, "key[,key2...]", 1085 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1047 "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..." 1086 sort_help("sort by key(s):")),
1048 " Please refer the man page for the complete list."),
1049 OPT_STRING('F', "fields", &field_order, "key[,keys...]", 1087 OPT_STRING('F', "fields", &field_order, "key[,keys...]",
1050 "output field(s): overhead, period, sample plus all of sort keys"), 1088 sort_help("output field(s): overhead period sample ")),
1051 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization, 1089 OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
1052 "Show sample percentage for different cpu modes"), 1090 "Show sample percentage for different cpu modes"),
1053 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization, 1091 OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1120,6 +1158,8 @@ int cmd_report(int argc, const char **argv)
1120 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 1158 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1121 "Enable kernel symbol demangling"), 1159 "Enable kernel symbol demangling"),
1122 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"), 1160 OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
1161 OPT_INTEGER(0, "samples", &symbol_conf.res_sample,
1162 "Number of samples to save per histogram entry for individual browsing"),
1123 OPT_CALLBACK(0, "percent-limit", &report, "percent", 1163 OPT_CALLBACK(0, "percent-limit", &report, "percent",
1124 "Don't show entries under that percent", parse_percent_limit), 1164 "Don't show entries under that percent", parse_percent_limit),
1125 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", 1165 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
@@ -1147,6 +1187,10 @@ int cmd_report(int argc, const char **argv)
1147 OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period", 1187 OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period",
1148 "Set percent type local/global-period/hits", 1188 "Set percent type local/global-period/hits",
1149 annotate_parse_percent_type), 1189 annotate_parse_percent_type),
1190 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"),
1191 OPT_CALLBACK(0, "time-quantum", &symbol_conf.time_quantum, "time (ms|us|ns|s)",
1192 "Set time quantum for time sort key (default 100ms)",
1193 parse_time_quantum),
1150 OPT_END() 1194 OPT_END()
1151 }; 1195 };
1152 struct perf_data data = { 1196 struct perf_data data = {
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 53f78cf3113f..61cfd8f70989 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -29,10 +29,12 @@
29#include "util/time-utils.h" 29#include "util/time-utils.h"
30#include "util/path.h" 30#include "util/path.h"
31#include "print_binary.h" 31#include "print_binary.h"
32#include "archinsn.h"
32#include <linux/bitmap.h> 33#include <linux/bitmap.h>
33#include <linux/kernel.h> 34#include <linux/kernel.h>
34#include <linux/stringify.h> 35#include <linux/stringify.h>
35#include <linux/time64.h> 36#include <linux/time64.h>
37#include <sys/utsname.h>
36#include "asm/bug.h" 38#include "asm/bug.h"
37#include "util/mem-events.h" 39#include "util/mem-events.h"
38#include "util/dump-insn.h" 40#include "util/dump-insn.h"
@@ -51,6 +53,8 @@
51 53
52static char const *script_name; 54static char const *script_name;
53static char const *generate_script_lang; 55static char const *generate_script_lang;
56static bool reltime;
57static u64 initial_time;
54static bool debug_mode; 58static bool debug_mode;
55static u64 last_timestamp; 59static u64 last_timestamp;
56static u64 nr_unordered; 60static u64 nr_unordered;
@@ -58,11 +62,11 @@ static bool no_callchain;
58static bool latency_format; 62static bool latency_format;
59static bool system_wide; 63static bool system_wide;
60static bool print_flags; 64static bool print_flags;
61static bool nanosecs;
62static const char *cpu_list; 65static const char *cpu_list;
63static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); 66static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
64static struct perf_stat_config stat_config; 67static struct perf_stat_config stat_config;
65static int max_blocks; 68static int max_blocks;
69static bool native_arch;
66 70
67unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH; 71unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
68 72
@@ -684,15 +688,21 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
684 } 688 }
685 689
686 if (PRINT_FIELD(TIME)) { 690 if (PRINT_FIELD(TIME)) {
687 nsecs = sample->time; 691 u64 t = sample->time;
692 if (reltime) {
693 if (!initial_time)
694 initial_time = sample->time;
695 t = sample->time - initial_time;
696 }
697 nsecs = t;
688 secs = nsecs / NSEC_PER_SEC; 698 secs = nsecs / NSEC_PER_SEC;
689 nsecs -= secs * NSEC_PER_SEC; 699 nsecs -= secs * NSEC_PER_SEC;
690 700
691 if (nanosecs) 701 if (symbol_conf.nanosecs)
692 printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs); 702 printed += fprintf(fp, "%5lu.%09llu: ", secs, nsecs);
693 else { 703 else {
694 char sample_time[32]; 704 char sample_time[32];
695 timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time)); 705 timestamp__scnprintf_usec(t, sample_time, sizeof(sample_time));
696 printed += fprintf(fp, "%12s: ", sample_time); 706 printed += fprintf(fp, "%12s: ", sample_time);
697 } 707 }
698 } 708 }
@@ -1227,6 +1237,12 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
1227 return len + dlen; 1237 return len + dlen;
1228} 1238}
1229 1239
1240__weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
1241 struct thread *thread __maybe_unused,
1242 struct machine *machine __maybe_unused)
1243{
1244}
1245
1230static int perf_sample__fprintf_insn(struct perf_sample *sample, 1246static int perf_sample__fprintf_insn(struct perf_sample *sample,
1231 struct perf_event_attr *attr, 1247 struct perf_event_attr *attr,
1232 struct thread *thread, 1248 struct thread *thread,
@@ -1234,9 +1250,12 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
1234{ 1250{
1235 int printed = 0; 1251 int printed = 0;
1236 1252
1253 if (sample->insn_len == 0 && native_arch)
1254 arch_fetch_insn(sample, thread, machine);
1255
1237 if (PRINT_FIELD(INSNLEN)) 1256 if (PRINT_FIELD(INSNLEN))
1238 printed += fprintf(fp, " ilen: %d", sample->insn_len); 1257 printed += fprintf(fp, " ilen: %d", sample->insn_len);
1239 if (PRINT_FIELD(INSN)) { 1258 if (PRINT_FIELD(INSN) && sample->insn_len) {
1240 int i; 1259 int i;
1241 1260
1242 printed += fprintf(fp, " insn:"); 1261 printed += fprintf(fp, " insn:");
@@ -1922,6 +1941,13 @@ static int cleanup_scripting(void)
1922 return scripting_ops ? scripting_ops->stop_script() : 0; 1941 return scripting_ops ? scripting_ops->stop_script() : 0;
1923} 1942}
1924 1943
1944static bool filter_cpu(struct perf_sample *sample)
1945{
1946 if (cpu_list)
1947 return !test_bit(sample->cpu, cpu_bitmap);
1948 return false;
1949}
1950
1925static int process_sample_event(struct perf_tool *tool, 1951static int process_sample_event(struct perf_tool *tool,
1926 union perf_event *event, 1952 union perf_event *event,
1927 struct perf_sample *sample, 1953 struct perf_sample *sample,
@@ -1956,7 +1982,7 @@ static int process_sample_event(struct perf_tool *tool,
1956 if (al.filtered) 1982 if (al.filtered)
1957 goto out_put; 1983 goto out_put;
1958 1984
1959 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) 1985 if (filter_cpu(sample))
1960 goto out_put; 1986 goto out_put;
1961 1987
1962 if (scripting_ops) 1988 if (scripting_ops)
@@ -2041,9 +2067,11 @@ static int process_comm_event(struct perf_tool *tool,
2041 sample->tid = event->comm.tid; 2067 sample->tid = event->comm.tid;
2042 sample->pid = event->comm.pid; 2068 sample->pid = event->comm.pid;
2043 } 2069 }
2044 perf_sample__fprintf_start(sample, thread, evsel, 2070 if (!filter_cpu(sample)) {
2071 perf_sample__fprintf_start(sample, thread, evsel,
2045 PERF_RECORD_COMM, stdout); 2072 PERF_RECORD_COMM, stdout);
2046 perf_event__fprintf(event, stdout); 2073 perf_event__fprintf(event, stdout);
2074 }
2047 ret = 0; 2075 ret = 0;
2048out: 2076out:
2049 thread__put(thread); 2077 thread__put(thread);
@@ -2077,9 +2105,11 @@ static int process_namespaces_event(struct perf_tool *tool,
2077 sample->tid = event->namespaces.tid; 2105 sample->tid = event->namespaces.tid;
2078 sample->pid = event->namespaces.pid; 2106 sample->pid = event->namespaces.pid;
2079 } 2107 }
2080 perf_sample__fprintf_start(sample, thread, evsel, 2108 if (!filter_cpu(sample)) {
2081 PERF_RECORD_NAMESPACES, stdout); 2109 perf_sample__fprintf_start(sample, thread, evsel,
2082 perf_event__fprintf(event, stdout); 2110 PERF_RECORD_NAMESPACES, stdout);
2111 perf_event__fprintf(event, stdout);
2112 }
2083 ret = 0; 2113 ret = 0;
2084out: 2114out:
2085 thread__put(thread); 2115 thread__put(thread);
@@ -2111,9 +2141,11 @@ static int process_fork_event(struct perf_tool *tool,
2111 sample->tid = event->fork.tid; 2141 sample->tid = event->fork.tid;
2112 sample->pid = event->fork.pid; 2142 sample->pid = event->fork.pid;
2113 } 2143 }
2114 perf_sample__fprintf_start(sample, thread, evsel, 2144 if (!filter_cpu(sample)) {
2115 PERF_RECORD_FORK, stdout); 2145 perf_sample__fprintf_start(sample, thread, evsel,
2116 perf_event__fprintf(event, stdout); 2146 PERF_RECORD_FORK, stdout);
2147 perf_event__fprintf(event, stdout);
2148 }
2117 thread__put(thread); 2149 thread__put(thread);
2118 2150
2119 return 0; 2151 return 0;
@@ -2141,9 +2173,11 @@ static int process_exit_event(struct perf_tool *tool,
2141 sample->tid = event->fork.tid; 2173 sample->tid = event->fork.tid;
2142 sample->pid = event->fork.pid; 2174 sample->pid = event->fork.pid;
2143 } 2175 }
2144 perf_sample__fprintf_start(sample, thread, evsel, 2176 if (!filter_cpu(sample)) {
2145 PERF_RECORD_EXIT, stdout); 2177 perf_sample__fprintf_start(sample, thread, evsel,
2146 perf_event__fprintf(event, stdout); 2178 PERF_RECORD_EXIT, stdout);
2179 perf_event__fprintf(event, stdout);
2180 }
2147 2181
2148 if (perf_event__process_exit(tool, event, sample, machine) < 0) 2182 if (perf_event__process_exit(tool, event, sample, machine) < 0)
2149 err = -1; 2183 err = -1;
@@ -2177,9 +2211,11 @@ static int process_mmap_event(struct perf_tool *tool,
2177 sample->tid = event->mmap.tid; 2211 sample->tid = event->mmap.tid;
2178 sample->pid = event->mmap.pid; 2212 sample->pid = event->mmap.pid;
2179 } 2213 }
2180 perf_sample__fprintf_start(sample, thread, evsel, 2214 if (!filter_cpu(sample)) {
2181 PERF_RECORD_MMAP, stdout); 2215 perf_sample__fprintf_start(sample, thread, evsel,
2182 perf_event__fprintf(event, stdout); 2216 PERF_RECORD_MMAP, stdout);
2217 perf_event__fprintf(event, stdout);
2218 }
2183 thread__put(thread); 2219 thread__put(thread);
2184 return 0; 2220 return 0;
2185} 2221}
@@ -2209,9 +2245,11 @@ static int process_mmap2_event(struct perf_tool *tool,
2209 sample->tid = event->mmap2.tid; 2245 sample->tid = event->mmap2.tid;
2210 sample->pid = event->mmap2.pid; 2246 sample->pid = event->mmap2.pid;
2211 } 2247 }
2212 perf_sample__fprintf_start(sample, thread, evsel, 2248 if (!filter_cpu(sample)) {
2213 PERF_RECORD_MMAP2, stdout); 2249 perf_sample__fprintf_start(sample, thread, evsel,
2214 perf_event__fprintf(event, stdout); 2250 PERF_RECORD_MMAP2, stdout);
2251 perf_event__fprintf(event, stdout);
2252 }
2215 thread__put(thread); 2253 thread__put(thread);
2216 return 0; 2254 return 0;
2217} 2255}
@@ -2236,9 +2274,11 @@ static int process_switch_event(struct perf_tool *tool,
2236 return -1; 2274 return -1;
2237 } 2275 }
2238 2276
2239 perf_sample__fprintf_start(sample, thread, evsel, 2277 if (!filter_cpu(sample)) {
2240 PERF_RECORD_SWITCH, stdout); 2278 perf_sample__fprintf_start(sample, thread, evsel,
2241 perf_event__fprintf(event, stdout); 2279 PERF_RECORD_SWITCH, stdout);
2280 perf_event__fprintf(event, stdout);
2281 }
2242 thread__put(thread); 2282 thread__put(thread);
2243 return 0; 2283 return 0;
2244} 2284}
@@ -2259,9 +2299,11 @@ process_lost_event(struct perf_tool *tool,
2259 if (thread == NULL) 2299 if (thread == NULL)
2260 return -1; 2300 return -1;
2261 2301
2262 perf_sample__fprintf_start(sample, thread, evsel, 2302 if (!filter_cpu(sample)) {
2263 PERF_RECORD_LOST, stdout); 2303 perf_sample__fprintf_start(sample, thread, evsel,
2264 perf_event__fprintf(event, stdout); 2304 PERF_RECORD_LOST, stdout);
2305 perf_event__fprintf(event, stdout);
2306 }
2265 thread__put(thread); 2307 thread__put(thread);
2266 return 0; 2308 return 0;
2267} 2309}
@@ -2948,7 +2990,8 @@ static int check_ev_match(char *dir_name, char *scriptname,
2948 * will list all statically runnable scripts, select one, execute it and 2990 * will list all statically runnable scripts, select one, execute it and
2949 * show the output in a perf browser. 2991 * show the output in a perf browser.
2950 */ 2992 */
2951int find_scripts(char **scripts_array, char **scripts_path_array) 2993int find_scripts(char **scripts_array, char **scripts_path_array, int num,
2994 int pathlen)
2952{ 2995{
2953 struct dirent *script_dirent, *lang_dirent; 2996 struct dirent *script_dirent, *lang_dirent;
2954 char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN]; 2997 char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
@@ -2993,7 +3036,10 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
2993 /* Skip those real time scripts: xxxtop.p[yl] */ 3036 /* Skip those real time scripts: xxxtop.p[yl] */
2994 if (strstr(script_dirent->d_name, "top.")) 3037 if (strstr(script_dirent->d_name, "top."))
2995 continue; 3038 continue;
2996 sprintf(scripts_path_array[i], "%s/%s", lang_path, 3039 if (i >= num)
3040 break;
3041 snprintf(scripts_path_array[i], pathlen, "%s/%s",
3042 lang_path,
2997 script_dirent->d_name); 3043 script_dirent->d_name);
2998 temp = strchr(script_dirent->d_name, '.'); 3044 temp = strchr(script_dirent->d_name, '.');
2999 snprintf(scripts_array[i], 3045 snprintf(scripts_array[i],
@@ -3232,7 +3278,7 @@ static int parse_insn_trace(const struct option *opt __maybe_unused,
3232{ 3278{
3233 parse_output_fields(NULL, "+insn,-event,-period", 0); 3279 parse_output_fields(NULL, "+insn,-event,-period", 0);
3234 itrace_parse_synth_opts(opt, "i0ns", 0); 3280 itrace_parse_synth_opts(opt, "i0ns", 0);
3235 nanosecs = true; 3281 symbol_conf.nanosecs = true;
3236 return 0; 3282 return 0;
3237} 3283}
3238 3284
@@ -3250,7 +3296,7 @@ static int parse_call_trace(const struct option *opt __maybe_unused,
3250{ 3296{
3251 parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0); 3297 parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
3252 itrace_parse_synth_opts(opt, "cewp", 0); 3298 itrace_parse_synth_opts(opt, "cewp", 0);
3253 nanosecs = true; 3299 symbol_conf.nanosecs = true;
3254 return 0; 3300 return 0;
3255} 3301}
3256 3302
@@ -3260,7 +3306,7 @@ static int parse_callret_trace(const struct option *opt __maybe_unused,
3260{ 3306{
3261 parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0); 3307 parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
3262 itrace_parse_synth_opts(opt, "crewp", 0); 3308 itrace_parse_synth_opts(opt, "crewp", 0);
3263 nanosecs = true; 3309 symbol_conf.nanosecs = true;
3264 return 0; 3310 return 0;
3265} 3311}
3266 3312
@@ -3277,6 +3323,7 @@ int cmd_script(int argc, const char **argv)
3277 .set = false, 3323 .set = false,
3278 .default_no_sample = true, 3324 .default_no_sample = true,
3279 }; 3325 };
3326 struct utsname uts;
3280 char *script_path = NULL; 3327 char *script_path = NULL;
3281 const char **__argv; 3328 const char **__argv;
3282 int i, j, err = 0; 3329 int i, j, err = 0;
@@ -3374,6 +3421,7 @@ int cmd_script(int argc, const char **argv)
3374 "Set the maximum stack depth when parsing the callchain, " 3421 "Set the maximum stack depth when parsing the callchain, "
3375 "anything beyond the specified depth will be ignored. " 3422 "anything beyond the specified depth will be ignored. "
3376 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)), 3423 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
3424 OPT_BOOLEAN(0, "reltime", &reltime, "Show time stamps relative to start"),
3377 OPT_BOOLEAN('I', "show-info", &show_full_info, 3425 OPT_BOOLEAN('I', "show-info", &show_full_info,
3378 "display extended information from perf.data file"), 3426 "display extended information from perf.data file"),
3379 OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path, 3427 OPT_BOOLEAN('\0', "show-kernel-path", &symbol_conf.show_kernel_path,
@@ -3395,7 +3443,7 @@ int cmd_script(int argc, const char **argv)
3395 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), 3443 OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
3396 OPT_INTEGER(0, "max-blocks", &max_blocks, 3444 OPT_INTEGER(0, "max-blocks", &max_blocks,
3397 "Maximum number of code blocks to dump with brstackinsn"), 3445 "Maximum number of code blocks to dump with brstackinsn"),
3398 OPT_BOOLEAN(0, "ns", &nanosecs, 3446 OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs,
3399 "Use 9 decimal places when displaying time"), 3447 "Use 9 decimal places when displaying time"),
3400 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts", 3448 OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
3401 "Instruction Tracing options\n" ITRACE_HELP, 3449 "Instruction Tracing options\n" ITRACE_HELP,
@@ -3448,6 +3496,11 @@ int cmd_script(int argc, const char **argv)
3448 } 3496 }
3449 } 3497 }
3450 3498
3499 if (script.time_str && reltime) {
3500 fprintf(stderr, "Don't combine --reltime with --time\n");
3501 return -1;
3502 }
3503
3451 if (itrace_synth_opts.callchain && 3504 if (itrace_synth_opts.callchain &&
3452 itrace_synth_opts.callchain_sz > scripting_max_stack) 3505 itrace_synth_opts.callchain_sz > scripting_max_stack)
3453 scripting_max_stack = itrace_synth_opts.callchain_sz; 3506 scripting_max_stack = itrace_synth_opts.callchain_sz;
@@ -3615,6 +3668,12 @@ int cmd_script(int argc, const char **argv)
3615 if (symbol__init(&session->header.env) < 0) 3668 if (symbol__init(&session->header.env) < 0)
3616 goto out_delete; 3669 goto out_delete;
3617 3670
3671 uname(&uts);
3672 if (!strcmp(uts.machine, session->header.env.arch) ||
3673 (!strcmp(uts.machine, "x86_64") &&
3674 !strcmp(session->header.env.arch, "i386")))
3675 native_arch = true;
3676
3618 script.session = session; 3677 script.session = session;
3619 script__setup_sample_type(&script); 3678 script__setup_sample_type(&script);
3620 3679
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7b8f09b0b8bf..49ee3c2033ec 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -718,7 +718,8 @@ static struct option stat_options[] = {
718 "system-wide collection from all CPUs"), 718 "system-wide collection from all CPUs"),
719 OPT_BOOLEAN('g', "group", &group, 719 OPT_BOOLEAN('g', "group", &group,
720 "put the counters into a counter group"), 720 "put the counters into a counter group"),
721 OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"), 721 OPT_BOOLEAN(0, "scale", &stat_config.scale,
722 "Use --no-scale to disable counter scaling for multiplexing"),
722 OPT_INCR('v', "verbose", &verbose, 723 OPT_INCR('v', "verbose", &verbose,
723 "be more verbose (show counter open errors, etc)"), 724 "be more verbose (show counter open errors, etc)"),
724 OPT_INTEGER('r', "repeat", &stat_config.run_count, 725 OPT_INTEGER('r', "repeat", &stat_config.run_count,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 231a90daa958..1999d6533d12 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1189,30 +1189,26 @@ static int __cmd_top(struct perf_top *top)
1189 pthread_t thread, thread_process; 1189 pthread_t thread, thread_process;
1190 int ret; 1190 int ret;
1191 1191
1192 top->session = perf_session__new(NULL, false, NULL);
1193 if (top->session == NULL)
1194 return -1;
1195
1196 if (!top->annotation_opts.objdump_path) { 1192 if (!top->annotation_opts.objdump_path) {
1197 ret = perf_env__lookup_objdump(&top->session->header.env, 1193 ret = perf_env__lookup_objdump(&top->session->header.env,
1198 &top->annotation_opts.objdump_path); 1194 &top->annotation_opts.objdump_path);
1199 if (ret) 1195 if (ret)
1200 goto out_delete; 1196 return ret;
1201 } 1197 }
1202 1198
1203 ret = callchain_param__setup_sample_type(&callchain_param); 1199 ret = callchain_param__setup_sample_type(&callchain_param);
1204 if (ret) 1200 if (ret)
1205 goto out_delete; 1201 return ret;
1206 1202
1207 if (perf_session__register_idle_thread(top->session) < 0) 1203 if (perf_session__register_idle_thread(top->session) < 0)
1208 goto out_delete; 1204 return ret;
1209 1205
1210 if (top->nr_threads_synthesize > 1) 1206 if (top->nr_threads_synthesize > 1)
1211 perf_set_multithreaded(); 1207 perf_set_multithreaded();
1212 1208
1213 init_process_thread(top); 1209 init_process_thread(top);
1214 1210
1215 ret = perf_event__synthesize_bpf_events(&top->tool, perf_event__process, 1211 ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
1216 &top->session->machines.host, 1212 &top->session->machines.host,
1217 &top->record_opts); 1213 &top->record_opts);
1218 if (ret < 0) 1214 if (ret < 0)
@@ -1227,13 +1223,18 @@ static int __cmd_top(struct perf_top *top)
1227 1223
1228 if (perf_hpp_list.socket) { 1224 if (perf_hpp_list.socket) {
1229 ret = perf_env__read_cpu_topology_map(&perf_env); 1225 ret = perf_env__read_cpu_topology_map(&perf_env);
1230 if (ret < 0) 1226 if (ret < 0) {
1231 goto out_err_cpu_topo; 1227 char errbuf[BUFSIZ];
1228 const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1229
1230 ui__error("Could not read the CPU topology map: %s\n", err);
1231 return ret;
1232 }
1232 } 1233 }
1233 1234
1234 ret = perf_top__start_counters(top); 1235 ret = perf_top__start_counters(top);
1235 if (ret) 1236 if (ret)
1236 goto out_delete; 1237 return ret;
1237 1238
1238 top->session->evlist = top->evlist; 1239 top->session->evlist = top->evlist;
1239 perf_session__set_id_hdr_size(top->session); 1240 perf_session__set_id_hdr_size(top->session);
@@ -1252,7 +1253,7 @@ static int __cmd_top(struct perf_top *top)
1252 ret = -1; 1253 ret = -1;
1253 if (pthread_create(&thread_process, NULL, process_thread, top)) { 1254 if (pthread_create(&thread_process, NULL, process_thread, top)) {
1254 ui__error("Could not create process thread.\n"); 1255 ui__error("Could not create process thread.\n");
1255 goto out_delete; 1256 return ret;
1256 } 1257 }
1257 1258
1258 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : 1259 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
@@ -1296,19 +1297,7 @@ out_join:
1296out_join_thread: 1297out_join_thread:
1297 pthread_cond_signal(&top->qe.cond); 1298 pthread_cond_signal(&top->qe.cond);
1298 pthread_join(thread_process, NULL); 1299 pthread_join(thread_process, NULL);
1299out_delete:
1300 perf_session__delete(top->session);
1301 top->session = NULL;
1302
1303 return ret; 1300 return ret;
1304
1305out_err_cpu_topo: {
1306 char errbuf[BUFSIZ];
1307 const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1308
1309 ui__error("Could not read the CPU topology map: %s\n", err);
1310 goto out_delete;
1311}
1312} 1301}
1313 1302
1314static int 1303static int
@@ -1480,6 +1469,7 @@ int cmd_top(int argc, const char **argv)
1480 "Display raw encoding of assembly instructions (default)"), 1469 "Display raw encoding of assembly instructions (default)"),
1481 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 1470 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1482 "Enable kernel symbol demangling"), 1471 "Enable kernel symbol demangling"),
1472 OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
1483 OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path", 1473 OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
1484 "objdump binary to use for disassembly and annotations"), 1474 "objdump binary to use for disassembly and annotations"),
1485 OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style", 1475 OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
@@ -1511,6 +1501,7 @@ int cmd_top(int argc, const char **argv)
1511 "number of thread to run event synthesize"), 1501 "number of thread to run event synthesize"),
1512 OPT_END() 1502 OPT_END()
1513 }; 1503 };
1504 struct perf_evlist *sb_evlist = NULL;
1514 const char * const top_usage[] = { 1505 const char * const top_usage[] = {
1515 "perf top [<options>]", 1506 "perf top [<options>]",
1516 NULL 1507 NULL
@@ -1628,8 +1619,9 @@ int cmd_top(int argc, const char **argv)
1628 annotation_config__init(); 1619 annotation_config__init();
1629 1620
1630 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); 1621 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1631 if (symbol__init(NULL) < 0) 1622 status = symbol__init(NULL);
1632 return -1; 1623 if (status < 0)
1624 goto out_delete_evlist;
1633 1625
1634 sort__setup_elide(stdout); 1626 sort__setup_elide(stdout);
1635 1627
@@ -1639,10 +1631,28 @@ int cmd_top(int argc, const char **argv)
1639 signal(SIGWINCH, winch_sig); 1631 signal(SIGWINCH, winch_sig);
1640 } 1632 }
1641 1633
1634 top.session = perf_session__new(NULL, false, NULL);
1635 if (top.session == NULL) {
1636 status = -1;
1637 goto out_delete_evlist;
1638 }
1639
1640 if (!top.record_opts.no_bpf_event)
1641 bpf_event__add_sb_event(&sb_evlist, &perf_env);
1642
1643 if (perf_evlist__start_sb_thread(sb_evlist, target)) {
1644 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1645 opts->no_bpf_event = true;
1646 }
1647
1642 status = __cmd_top(&top); 1648 status = __cmd_top(&top);
1643 1649
1650 if (!opts->no_bpf_event)
1651 perf_evlist__stop_sb_thread(sb_evlist);
1652
1644out_delete_evlist: 1653out_delete_evlist:
1645 perf_evlist__delete(top.evlist); 1654 perf_evlist__delete(top.evlist);
1655 perf_session__delete(top.session);
1646 1656
1647 return status; 1657 return status;
1648} 1658}
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 05745f3ce912..999fe9170122 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -40,5 +40,6 @@ int cmd_mem(int argc, const char **argv);
40int cmd_data(int argc, const char **argv); 40int cmd_data(int argc, const char **argv);
41int cmd_ftrace(int argc, const char **argv); 41int cmd_ftrace(int argc, const char **argv);
42 42
43int find_scripts(char **scripts_array, char **scripts_path_array); 43int find_scripts(char **scripts_array, char **scripts_path_array, int num,
44 int pathlen);
44#endif 45#endif
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 7b55613924de..c68ee06cae63 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -103,7 +103,7 @@ done
103# diff with extra ignore lines 103# diff with extra ignore lines
104check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' 104check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
105check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"' 105check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>"'
106check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common.h>"' 106check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
107check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"' 107check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
108 108
109# diff non-symmetric files 109# diff non-symmetric files
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index a11cb006f968..72df4b6fa36f 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -298,6 +298,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
298 use_pager = 1; 298 use_pager = 1;
299 commit_pager_choice(); 299 commit_pager_choice();
300 300
301 perf_env__init(&perf_env);
301 perf_env__set_cmdline(&perf_env, argc, argv); 302 perf_env__set_cmdline(&perf_env, argc, argv);
302 status = p->fn(argc, argv); 303 status = p->fn(argc, argv);
303 perf_config__exit(); 304 perf_config__exit();
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index b120e547ddc7..c59743def8d3 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -66,7 +66,7 @@ struct record_opts {
66 bool ignore_missing_thread; 66 bool ignore_missing_thread;
67 bool strict_freq; 67 bool strict_freq;
68 bool sample_id; 68 bool sample_id;
69 bool bpf_event; 69 bool no_bpf_event;
70 unsigned int freq; 70 unsigned int freq;
71 unsigned int mmap_pages; 71 unsigned int mmap_pages;
72 unsigned int auxtrace_mmap_pages; 72 unsigned int auxtrace_mmap_pages;
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index 704302c3e67d..9dc2f6b70354 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -348,18 +348,6 @@
348 "PublicDescription": "" 348 "PublicDescription": ""
349 }, 349 },
350 {, 350 {,
351 "EventCode": "0x517082",
352 "EventName": "PM_CO_DISP_FAIL",
353 "BriefDescription": "CO dispatch failed due to all CO machines being busy",
354 "PublicDescription": ""
355 },
356 {,
357 "EventCode": "0x527084",
358 "EventName": "PM_CO_TM_SC_FOOTPRINT",
359 "BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3)",
360 "PublicDescription": ""
361 },
362 {,
363 "EventCode": "0x3608a", 351 "EventCode": "0x3608a",
364 "EventName": "PM_CO_USAGE", 352 "EventName": "PM_CO_USAGE",
365 "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running", 353 "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
@@ -1578,36 +1566,12 @@
1578 "PublicDescription": "" 1566 "PublicDescription": ""
1579 }, 1567 },
1580 {, 1568 {,
1581 "EventCode": "0x617082",
1582 "EventName": "PM_ISIDE_DISP",
1583 "BriefDescription": "All i-side dispatch attempts",
1584 "PublicDescription": ""
1585 },
1586 {,
1587 "EventCode": "0x627084",
1588 "EventName": "PM_ISIDE_DISP_FAIL",
1589 "BriefDescription": "All i-side dispatch attempts that failed due to a addr collision with another machine",
1590 "PublicDescription": ""
1591 },
1592 {,
1593 "EventCode": "0x627086",
1594 "EventName": "PM_ISIDE_DISP_FAIL_OTHER",
1595 "BriefDescription": "All i-side dispatch attempts that failed due to a reason other than addrs collision",
1596 "PublicDescription": ""
1597 },
1598 {,
1599 "EventCode": "0x4608e", 1569 "EventCode": "0x4608e",
1600 "EventName": "PM_ISIDE_L2MEMACC", 1570 "EventName": "PM_ISIDE_L2MEMACC",
1601 "BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)", 1571 "BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
1602 "PublicDescription": "" 1572 "PublicDescription": ""
1603 }, 1573 },
1604 {, 1574 {,
1605 "EventCode": "0x44608e",
1606 "EventName": "PM_ISIDE_MRU_TOUCH",
1607 "BriefDescription": "Iside L2 MRU touch",
1608 "PublicDescription": ""
1609 },
1610 {,
1611 "EventCode": "0x30ac", 1575 "EventCode": "0x30ac",
1612 "EventName": "PM_ISU_REF_FX0", 1576 "EventName": "PM_ISU_REF_FX0",
1613 "BriefDescription": "FX0 ISU reject", 1577 "BriefDescription": "FX0 ISU reject",
@@ -1734,222 +1698,36 @@
1734 "PublicDescription": "" 1698 "PublicDescription": ""
1735 }, 1699 },
1736 {, 1700 {,
1737 "EventCode": "0x417080",
1738 "EventName": "PM_L2_CASTOUT_MOD",
1739 "BriefDescription": "L2 Castouts - Modified (M, Mu, Me)",
1740 "PublicDescription": ""
1741 },
1742 {,
1743 "EventCode": "0x417082",
1744 "EventName": "PM_L2_CASTOUT_SHR",
1745 "BriefDescription": "L2 Castouts - Shared (T, Te, Si, S)",
1746 "PublicDescription": ""
1747 },
1748 {,
1749 "EventCode": "0x27084", 1701 "EventCode": "0x27084",
1750 "EventName": "PM_L2_CHIP_PUMP", 1702 "EventName": "PM_L2_CHIP_PUMP",
1751 "BriefDescription": "RC requests that were local on chip pump attempts", 1703 "BriefDescription": "RC requests that were local on chip pump attempts",
1752 "PublicDescription": "" 1704 "PublicDescription": ""
1753 }, 1705 },
1754 {, 1706 {,
1755 "EventCode": "0x427086",
1756 "EventName": "PM_L2_DC_INV",
1757 "BriefDescription": "Dcache invalidates from L2",
1758 "PublicDescription": ""
1759 },
1760 {,
1761 "EventCode": "0x44608c",
1762 "EventName": "PM_L2_DISP_ALL_L2MISS",
1763 "BriefDescription": "All successful Ld/St dispatches for this thread that were an L2miss",
1764 "PublicDescription": ""
1765 },
1766 {,
1767 "EventCode": "0x27086", 1707 "EventCode": "0x27086",
1768 "EventName": "PM_L2_GROUP_PUMP", 1708 "EventName": "PM_L2_GROUP_PUMP",
1769 "BriefDescription": "RC requests that were on Node Pump attempts", 1709 "BriefDescription": "RC requests that were on Node Pump attempts",
1770 "PublicDescription": "" 1710 "PublicDescription": ""
1771 }, 1711 },
1772 {, 1712 {,
1773 "EventCode": "0x626084",
1774 "EventName": "PM_L2_GRP_GUESS_CORRECT",
1775 "BriefDescription": "L2 guess grp and guess was correct (data intra-6chip AND ^on-chip)",
1776 "PublicDescription": ""
1777 },
1778 {,
1779 "EventCode": "0x626086",
1780 "EventName": "PM_L2_GRP_GUESS_WRONG",
1781 "BriefDescription": "L2 guess grp and guess was not correct (ie data on-chip OR beyond-6chip)",
1782 "PublicDescription": ""
1783 },
1784 {,
1785 "EventCode": "0x427084",
1786 "EventName": "PM_L2_IC_INV",
1787 "BriefDescription": "Icache Invalidates from L2",
1788 "PublicDescription": ""
1789 },
1790 {,
1791 "EventCode": "0x436088",
1792 "EventName": "PM_L2_INST",
1793 "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)",
1794 "PublicDescription": ""
1795 },
1796 {,
1797 "EventCode": "0x43608a",
1798 "EventName": "PM_L2_INST_MISS",
1799 "BriefDescription": "All successful i-side dispatches that were an L2miss for this thread (excludes i_l2mru_tch reqs)",
1800 "PublicDescription": ""
1801 },
1802 {,
1803 "EventCode": "0x416080",
1804 "EventName": "PM_L2_LD",
1805 "BriefDescription": "All successful D-side Load dispatches for this thread",
1806 "PublicDescription": ""
1807 },
1808 {,
1809 "EventCode": "0x437088",
1810 "EventName": "PM_L2_LD_DISP",
1811 "BriefDescription": "All successful load dispatches",
1812 "PublicDescription": ""
1813 },
1814 {,
1815 "EventCode": "0x43708a",
1816 "EventName": "PM_L2_LD_HIT",
1817 "BriefDescription": "All successful load dispatches that were L2 hits",
1818 "PublicDescription": ""
1819 },
1820 {,
1821 "EventCode": "0x426084",
1822 "EventName": "PM_L2_LD_MISS",
1823 "BriefDescription": "All successful D-Side Load dispatches that were an L2miss for this thread",
1824 "PublicDescription": ""
1825 },
1826 {,
1827 "EventCode": "0x616080",
1828 "EventName": "PM_L2_LOC_GUESS_CORRECT",
1829 "BriefDescription": "L2 guess loc and guess was correct (ie data local)",
1830 "PublicDescription": ""
1831 },
1832 {,
1833 "EventCode": "0x616082",
1834 "EventName": "PM_L2_LOC_GUESS_WRONG",
1835 "BriefDescription": "L2 guess loc and guess was not correct (ie data not on chip)",
1836 "PublicDescription": ""
1837 },
1838 {,
1839 "EventCode": "0x516080",
1840 "EventName": "PM_L2_RCLD_DISP",
1841 "BriefDescription": "L2 RC load dispatch attempt",
1842 "PublicDescription": ""
1843 },
1844 {,
1845 "EventCode": "0x516082",
1846 "EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
1847 "BriefDescription": "L2 RC load dispatch attempt failed due to address collision with RC/CO/SN/SQ",
1848 "PublicDescription": ""
1849 },
1850 {,
1851 "EventCode": "0x526084",
1852 "EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
1853 "BriefDescription": "L2 RC load dispatch attempt failed due to other reasons",
1854 "PublicDescription": ""
1855 },
1856 {,
1857 "EventCode": "0x536088",
1858 "EventName": "PM_L2_RCST_DISP",
1859 "BriefDescription": "L2 RC store dispatch attempt",
1860 "PublicDescription": ""
1861 },
1862 {,
1863 "EventCode": "0x53608a",
1864 "EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
1865 "BriefDescription": "L2 RC store dispatch attempt failed due to address collision with RC/CO/SN/SQ",
1866 "PublicDescription": ""
1867 },
1868 {,
1869 "EventCode": "0x54608c",
1870 "EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
1871 "BriefDescription": "L2 RC store dispatch attempt failed due to other reasons",
1872 "PublicDescription": ""
1873 },
1874 {,
1875 "EventCode": "0x537088",
1876 "EventName": "PM_L2_RC_ST_DONE",
1877 "BriefDescription": "RC did st to line that was Tx or Sx",
1878 "PublicDescription": ""
1879 },
1880 {,
1881 "EventCode": "0x63708a",
1882 "EventName": "PM_L2_RTY_LD",
1883 "BriefDescription": "RC retries on PB for any load from core",
1884 "PublicDescription": ""
1885 },
1886 {,
1887 "EventCode": "0x3708a", 1713 "EventCode": "0x3708a",
1888 "EventName": "PM_L2_RTY_ST", 1714 "EventName": "PM_L2_RTY_ST",
1889 "BriefDescription": "RC retries on PB for any store from core", 1715 "BriefDescription": "RC retries on PB for any store from core",
1890 "PublicDescription": "" 1716 "PublicDescription": ""
1891 }, 1717 },
1892 {, 1718 {,
1893 "EventCode": "0x54708c",
1894 "EventName": "PM_L2_SN_M_RD_DONE",
1895 "BriefDescription": "SNP dispatched for a read and was M",
1896 "PublicDescription": ""
1897 },
1898 {,
1899 "EventCode": "0x54708e",
1900 "EventName": "PM_L2_SN_M_WR_DONE",
1901 "BriefDescription": "SNP dispatched for a write and was M",
1902 "PublicDescription": ""
1903 },
1904 {,
1905 "EventCode": "0x53708a",
1906 "EventName": "PM_L2_SN_SX_I_DONE",
1907 "BriefDescription": "SNP dispatched and went from Sx or Tx to Ix",
1908 "PublicDescription": ""
1909 },
1910 {,
1911 "EventCode": "0x17080", 1719 "EventCode": "0x17080",
1912 "EventName": "PM_L2_ST", 1720 "EventName": "PM_L2_ST",
1913 "BriefDescription": "All successful D-side store dispatches for this thread", 1721 "BriefDescription": "All successful D-side store dispatches for this thread",
1914 "PublicDescription": "" 1722 "PublicDescription": ""
1915 }, 1723 },
1916 {, 1724 {,
1917 "EventCode": "0x44708c",
1918 "EventName": "PM_L2_ST_DISP",
1919 "BriefDescription": "All successful store dispatches",
1920 "PublicDescription": ""
1921 },
1922 {,
1923 "EventCode": "0x44708e",
1924 "EventName": "PM_L2_ST_HIT",
1925 "BriefDescription": "All successful store dispatches that were L2Hits",
1926 "PublicDescription": ""
1927 },
1928 {,
1929 "EventCode": "0x17082", 1725 "EventCode": "0x17082",
1930 "EventName": "PM_L2_ST_MISS", 1726 "EventName": "PM_L2_ST_MISS",
1931 "BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss", 1727 "BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
1932 "PublicDescription": "" 1728 "PublicDescription": ""
1933 }, 1729 },
1934 {, 1730 {,
1935 "EventCode": "0x636088",
1936 "EventName": "PM_L2_SYS_GUESS_CORRECT",
1937 "BriefDescription": "L2 guess sys and guess was correct (ie data beyond-6chip)",
1938 "PublicDescription": ""
1939 },
1940 {,
1941 "EventCode": "0x63608a",
1942 "EventName": "PM_L2_SYS_GUESS_WRONG",
1943 "BriefDescription": "L2 guess sys and guess was not correct (ie data ^beyond-6chip)",
1944 "PublicDescription": ""
1945 },
1946 {,
1947 "EventCode": "0x617080",
1948 "EventName": "PM_L2_SYS_PUMP",
1949 "BriefDescription": "RC requests that were system pump attempts",
1950 "PublicDescription": ""
1951 },
1952 {,
1953 "EventCode": "0x1e05e", 1731 "EventCode": "0x1e05e",
1954 "EventName": "PM_L2_TM_REQ_ABORT", 1732 "EventName": "PM_L2_TM_REQ_ABORT",
1955 "BriefDescription": "TM abort", 1733 "BriefDescription": "TM abort",
@@ -1962,36 +1740,12 @@
1962 "PublicDescription": "" 1740 "PublicDescription": ""
1963 }, 1741 },
1964 {, 1742 {,
1965 "EventCode": "0x23808a",
1966 "EventName": "PM_L3_CINJ",
1967 "BriefDescription": "l3 ci of cache inject",
1968 "PublicDescription": ""
1969 },
1970 {,
1971 "EventCode": "0x128084",
1972 "EventName": "PM_L3_CI_HIT",
1973 "BriefDescription": "L3 Castins Hit (total count",
1974 "PublicDescription": ""
1975 },
1976 {,
1977 "EventCode": "0x128086",
1978 "EventName": "PM_L3_CI_MISS",
1979 "BriefDescription": "L3 castins miss (total count",
1980 "PublicDescription": ""
1981 },
1982 {,
1983 "EventCode": "0x819082", 1743 "EventCode": "0x819082",
1984 "EventName": "PM_L3_CI_USAGE", 1744 "EventName": "PM_L3_CI_USAGE",
1985 "BriefDescription": "rotating sample of 16 CI or CO actives", 1745 "BriefDescription": "rotating sample of 16 CI or CO actives",
1986 "PublicDescription": "" 1746 "PublicDescription": ""
1987 }, 1747 },
1988 {, 1748 {,
1989 "EventCode": "0x438088",
1990 "EventName": "PM_L3_CO",
1991 "BriefDescription": "l3 castout occurring ( does not include casthrough or log writes (cinj/dmaw)",
1992 "PublicDescription": ""
1993 },
1994 {,
1995 "EventCode": "0x83908b", 1749 "EventCode": "0x83908b",
1996 "EventName": "PM_L3_CO0_ALLOC", 1750 "EventName": "PM_L3_CO0_ALLOC",
1997 "BriefDescription": "lifetime, sample of CO machine 0 valid", 1751 "BriefDescription": "lifetime, sample of CO machine 0 valid",
@@ -2010,120 +1764,18 @@
2010 "PublicDescription": "" 1764 "PublicDescription": ""
2011 }, 1765 },
2012 {, 1766 {,
2013 "EventCode": "0x238088",
2014 "EventName": "PM_L3_CO_LCO",
2015 "BriefDescription": "Total L3 castouts occurred on LCO",
2016 "PublicDescription": ""
2017 },
2018 {,
2019 "EventCode": "0x28084", 1767 "EventCode": "0x28084",
2020 "EventName": "PM_L3_CO_MEM", 1768 "EventName": "PM_L3_CO_MEM",
2021 "BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)", 1769 "BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
2022 "PublicDescription": "" 1770 "PublicDescription": ""
2023 }, 1771 },
2024 {, 1772 {,
2025 "EventCode": "0xb19082",
2026 "EventName": "PM_L3_GRP_GUESS_CORRECT",
2027 "BriefDescription": "Initial scope=group and data from same group (near) (pred successful)",
2028 "PublicDescription": ""
2029 },
2030 {,
2031 "EventCode": "0xb3908a",
2032 "EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
2033 "BriefDescription": "Initial scope=group but data from local node. Predition too high",
2034 "PublicDescription": ""
2035 },
2036 {,
2037 "EventCode": "0xb39088",
2038 "EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
2039 "BriefDescription": "Initial scope=group but data from outside group (far or rem). Prediction too Low",
2040 "PublicDescription": ""
2041 },
2042 {,
2043 "EventCode": "0x218080",
2044 "EventName": "PM_L3_HIT",
2045 "BriefDescription": "L3 Hits",
2046 "PublicDescription": ""
2047 },
2048 {,
2049 "EventCode": "0x138088",
2050 "EventName": "PM_L3_L2_CO_HIT",
2051 "BriefDescription": "L2 castout hits",
2052 "PublicDescription": ""
2053 },
2054 {,
2055 "EventCode": "0x13808a",
2056 "EventName": "PM_L3_L2_CO_MISS",
2057 "BriefDescription": "L2 castout miss",
2058 "PublicDescription": ""
2059 },
2060 {,
2061 "EventCode": "0x14808c",
2062 "EventName": "PM_L3_LAT_CI_HIT",
2063 "BriefDescription": "L3 Lateral Castins Hit",
2064 "PublicDescription": ""
2065 },
2066 {,
2067 "EventCode": "0x14808e",
2068 "EventName": "PM_L3_LAT_CI_MISS",
2069 "BriefDescription": "L3 Lateral Castins Miss",
2070 "PublicDescription": ""
2071 },
2072 {,
2073 "EventCode": "0x228084",
2074 "EventName": "PM_L3_LD_HIT",
2075 "BriefDescription": "L3 demand LD Hits",
2076 "PublicDescription": ""
2077 },
2078 {,
2079 "EventCode": "0x228086",
2080 "EventName": "PM_L3_LD_MISS",
2081 "BriefDescription": "L3 demand LD Miss",
2082 "PublicDescription": ""
2083 },
2084 {,
2085 "EventCode": "0x1e052", 1773 "EventCode": "0x1e052",
2086 "EventName": "PM_L3_LD_PREF", 1774 "EventName": "PM_L3_LD_PREF",
2087 "BriefDescription": "L3 Load Prefetches", 1775 "BriefDescription": "L3 Load Prefetches",
2088 "PublicDescription": "" 1776 "PublicDescription": ""
2089 }, 1777 },
2090 {, 1778 {,
2091 "EventCode": "0xb19080",
2092 "EventName": "PM_L3_LOC_GUESS_CORRECT",
2093 "BriefDescription": "initial scope=node/chip and data from local node (local) (pred successful)",
2094 "PublicDescription": ""
2095 },
2096 {,
2097 "EventCode": "0xb29086",
2098 "EventName": "PM_L3_LOC_GUESS_WRONG",
2099 "BriefDescription": "Initial scope=node but data from out side local node (near or far or rem). Prediction too Low",
2100 "PublicDescription": ""
2101 },
2102 {,
2103 "EventCode": "0x218082",
2104 "EventName": "PM_L3_MISS",
2105 "BriefDescription": "L3 Misses",
2106 "PublicDescription": ""
2107 },
2108 {,
2109 "EventCode": "0x54808c",
2110 "EventName": "PM_L3_P0_CO_L31",
2111 "BriefDescription": "l3 CO to L3.1 (lco) port 0",
2112 "PublicDescription": ""
2113 },
2114 {,
2115 "EventCode": "0x538088",
2116 "EventName": "PM_L3_P0_CO_MEM",
2117 "BriefDescription": "l3 CO to memory port 0",
2118 "PublicDescription": ""
2119 },
2120 {,
2121 "EventCode": "0x929084",
2122 "EventName": "PM_L3_P0_CO_RTY",
2123 "BriefDescription": "L3 CO received retry port 0",
2124 "PublicDescription": ""
2125 },
2126 {,
2127 "EventCode": "0xa29084", 1779 "EventCode": "0xa29084",
2128 "EventName": "PM_L3_P0_GRP_PUMP", 1780 "EventName": "PM_L3_P0_GRP_PUMP",
2129 "BriefDescription": "L3 pf sent with grp scope port 0", 1781 "BriefDescription": "L3 pf sent with grp scope port 0",
@@ -2148,120 +1800,6 @@
2148 "PublicDescription": "" 1800 "PublicDescription": ""
2149 }, 1801 },
2150 {, 1802 {,
2151 "EventCode": "0xa19080",
2152 "EventName": "PM_L3_P0_NODE_PUMP",
2153 "BriefDescription": "L3 pf sent with nodal scope port 0",
2154 "PublicDescription": ""
2155 },
2156 {,
2157 "EventCode": "0x919080",
2158 "EventName": "PM_L3_P0_PF_RTY",
2159 "BriefDescription": "L3 PF received retry port 0",
2160 "PublicDescription": ""
2161 },
2162 {,
2163 "EventCode": "0x939088",
2164 "EventName": "PM_L3_P0_SN_HIT",
2165 "BriefDescription": "L3 snoop hit port 0",
2166 "PublicDescription": ""
2167 },
2168 {,
2169 "EventCode": "0x118080",
2170 "EventName": "PM_L3_P0_SN_INV",
2171 "BriefDescription": "Port0 snooper detects someone doing a store to a line thats Sx",
2172 "PublicDescription": ""
2173 },
2174 {,
2175 "EventCode": "0x94908c",
2176 "EventName": "PM_L3_P0_SN_MISS",
2177 "BriefDescription": "L3 snoop miss port 0",
2178 "PublicDescription": ""
2179 },
2180 {,
2181 "EventCode": "0xa39088",
2182 "EventName": "PM_L3_P0_SYS_PUMP",
2183 "BriefDescription": "L3 pf sent with sys scope port 0",
2184 "PublicDescription": ""
2185 },
2186 {,
2187 "EventCode": "0x54808e",
2188 "EventName": "PM_L3_P1_CO_L31",
2189 "BriefDescription": "l3 CO to L3.1 (lco) port 1",
2190 "PublicDescription": ""
2191 },
2192 {,
2193 "EventCode": "0x53808a",
2194 "EventName": "PM_L3_P1_CO_MEM",
2195 "BriefDescription": "l3 CO to memory port 1",
2196 "PublicDescription": ""
2197 },
2198 {,
2199 "EventCode": "0x929086",
2200 "EventName": "PM_L3_P1_CO_RTY",
2201 "BriefDescription": "L3 CO received retry port 1",
2202 "PublicDescription": ""
2203 },
2204 {,
2205 "EventCode": "0xa29086",
2206 "EventName": "PM_L3_P1_GRP_PUMP",
2207 "BriefDescription": "L3 pf sent with grp scope port 1",
2208 "PublicDescription": ""
2209 },
2210 {,
2211 "EventCode": "0x528086",
2212 "EventName": "PM_L3_P1_LCO_DATA",
2213 "BriefDescription": "lco sent with data port 1",
2214 "PublicDescription": ""
2215 },
2216 {,
2217 "EventCode": "0x518082",
2218 "EventName": "PM_L3_P1_LCO_NO_DATA",
2219 "BriefDescription": "dataless l3 lco sent port 1",
2220 "PublicDescription": ""
2221 },
2222 {,
2223 "EventCode": "0xa4908e",
2224 "EventName": "PM_L3_P1_LCO_RTY",
2225 "BriefDescription": "L3 LCO received retry port 1",
2226 "PublicDescription": ""
2227 },
2228 {,
2229 "EventCode": "0xa19082",
2230 "EventName": "PM_L3_P1_NODE_PUMP",
2231 "BriefDescription": "L3 pf sent with nodal scope port 1",
2232 "PublicDescription": ""
2233 },
2234 {,
2235 "EventCode": "0x919082",
2236 "EventName": "PM_L3_P1_PF_RTY",
2237 "BriefDescription": "L3 PF received retry port 1",
2238 "PublicDescription": ""
2239 },
2240 {,
2241 "EventCode": "0x93908a",
2242 "EventName": "PM_L3_P1_SN_HIT",
2243 "BriefDescription": "L3 snoop hit port 1",
2244 "PublicDescription": ""
2245 },
2246 {,
2247 "EventCode": "0x118082",
2248 "EventName": "PM_L3_P1_SN_INV",
2249 "BriefDescription": "Port1 snooper detects someone doing a store to a line thats Sx",
2250 "PublicDescription": ""
2251 },
2252 {,
2253 "EventCode": "0x94908e",
2254 "EventName": "PM_L3_P1_SN_MISS",
2255 "BriefDescription": "L3 snoop miss port 1",
2256 "PublicDescription": ""
2257 },
2258 {,
2259 "EventCode": "0xa3908a",
2260 "EventName": "PM_L3_P1_SYS_PUMP",
2261 "BriefDescription": "L3 pf sent with sys scope port 1",
2262 "PublicDescription": ""
2263 },
2264 {,
2265 "EventCode": "0x84908d", 1803 "EventCode": "0x84908d",
2266 "EventName": "PM_L3_PF0_ALLOC", 1804 "EventName": "PM_L3_PF0_ALLOC",
2267 "BriefDescription": "lifetime, sample of PF machine 0 valid", 1805 "BriefDescription": "lifetime, sample of PF machine 0 valid",
@@ -2274,12 +1812,6 @@
2274 "PublicDescription": "" 1812 "PublicDescription": ""
2275 }, 1813 },
2276 {, 1814 {,
2277 "EventCode": "0x428084",
2278 "EventName": "PM_L3_PF_HIT_L3",
2279 "BriefDescription": "l3 pf hit in l3",
2280 "PublicDescription": ""
2281 },
2282 {,
2283 "EventCode": "0x18080", 1815 "EventCode": "0x18080",
2284 "EventName": "PM_L3_PF_MISS_L3", 1816 "EventName": "PM_L3_PF_MISS_L3",
2285 "BriefDescription": "L3 Prefetch missed in L3", 1817 "BriefDescription": "L3 Prefetch missed in L3",
@@ -2370,42 +1902,12 @@
2370 "PublicDescription": "" 1902 "PublicDescription": ""
2371 }, 1903 },
2372 {, 1904 {,
2373 "EventCode": "0xb29084",
2374 "EventName": "PM_L3_SYS_GUESS_CORRECT",
2375 "BriefDescription": "Initial scope=system and data from outside group (far or rem)(pred successful)",
2376 "PublicDescription": ""
2377 },
2378 {,
2379 "EventCode": "0xb4908c",
2380 "EventName": "PM_L3_SYS_GUESS_WRONG",
2381 "BriefDescription": "Initial scope=system but data from local or near. Predction too high",
2382 "PublicDescription": ""
2383 },
2384 {,
2385 "EventCode": "0x24808e",
2386 "EventName": "PM_L3_TRANS_PF",
2387 "BriefDescription": "L3 Transient prefetch",
2388 "PublicDescription": ""
2389 },
2390 {,
2391 "EventCode": "0x18081", 1905 "EventCode": "0x18081",
2392 "EventName": "PM_L3_WI0_ALLOC", 1906 "EventName": "PM_L3_WI0_ALLOC",
2393 "BriefDescription": "lifetime, sample of Write Inject machine 0 valid", 1907 "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
2394 "PublicDescription": "0.0" 1908 "PublicDescription": "0.0"
2395 }, 1909 },
2396 {, 1910 {,
2397 "EventCode": "0x418080",
2398 "EventName": "PM_L3_WI0_BUSY",
2399 "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
2400 "PublicDescription": ""
2401 },
2402 {,
2403 "EventCode": "0x418082",
2404 "EventName": "PM_L3_WI_USAGE",
2405 "BriefDescription": "rotating sample of 8 WI actives",
2406 "PublicDescription": ""
2407 },
2408 {,
2409 "EventCode": "0xc080", 1911 "EventCode": "0xc080",
2410 "EventName": "PM_LD_REF_L1_LSU0", 1912 "EventName": "PM_LD_REF_L1_LSU0",
2411 "BriefDescription": "LS0 L1 D cache load references counted at finish, gated by reject", 1913 "BriefDescription": "LS0 L1 D cache load references counted at finish, gated by reject",
@@ -3312,12 +2814,6 @@
3312 "PublicDescription": "" 2814 "PublicDescription": ""
3313 }, 2815 },
3314 {, 2816 {,
3315 "EventCode": "0x328084",
3316 "EventName": "PM_NON_TM_RST_SC",
3317 "BriefDescription": "non tm snp rst tm sc",
3318 "PublicDescription": ""
3319 },
3320 {,
3321 "EventCode": "0x2001a", 2817 "EventCode": "0x2001a",
3322 "EventName": "PM_NTCG_ALL_FIN", 2818 "EventName": "PM_NTCG_ALL_FIN",
3323 "BriefDescription": "Cycles after all instructions have finished to group completed", 2819 "BriefDescription": "Cycles after all instructions have finished to group completed",
@@ -3420,24 +2916,6 @@
3420 "PublicDescription": "" 2916 "PublicDescription": ""
3421 }, 2917 },
3422 {, 2918 {,
3423 "EventCode": "0x34808e",
3424 "EventName": "PM_RD_CLEARING_SC",
3425 "BriefDescription": "rd clearing sc",
3426 "PublicDescription": ""
3427 },
3428 {,
3429 "EventCode": "0x34808c",
3430 "EventName": "PM_RD_FORMING_SC",
3431 "BriefDescription": "rd forming sc",
3432 "PublicDescription": ""
3433 },
3434 {,
3435 "EventCode": "0x428086",
3436 "EventName": "PM_RD_HIT_PF",
3437 "BriefDescription": "rd machine hit l3 pf machine",
3438 "PublicDescription": ""
3439 },
3440 {,
3441 "EventCode": "0x20004", 2919 "EventCode": "0x20004",
3442 "EventName": "PM_REAL_SRQ_FULL", 2920 "EventName": "PM_REAL_SRQ_FULL",
3443 "BriefDescription": "Out of real srq entries", 2921 "BriefDescription": "Out of real srq entries",
@@ -3504,18 +2982,6 @@
3504 "PublicDescription": "TLBIE snoopSnoop TLBIE" 2982 "PublicDescription": "TLBIE snoopSnoop TLBIE"
3505 }, 2983 },
3506 {, 2984 {,
3507 "EventCode": "0x338088",
3508 "EventName": "PM_SNP_TM_HIT_M",
3509 "BriefDescription": "snp tm st hit m mu",
3510 "PublicDescription": ""
3511 },
3512 {,
3513 "EventCode": "0x33808a",
3514 "EventName": "PM_SNP_TM_HIT_T",
3515 "BriefDescription": "snp tm_st_hit t tn te",
3516 "PublicDescription": ""
3517 },
3518 {,
3519 "EventCode": "0x4608c", 2985 "EventCode": "0x4608c",
3520 "EventName": "PM_SN_USAGE", 2986 "EventName": "PM_SN_USAGE",
3521 "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running", 2987 "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
@@ -3534,12 +3000,6 @@
3534 "PublicDescription": "STCX executed reported at sent to nest42" 3000 "PublicDescription": "STCX executed reported at sent to nest42"
3535 }, 3001 },
3536 {, 3002 {,
3537 "EventCode": "0x717080",
3538 "EventName": "PM_ST_CAUSED_FAIL",
3539 "BriefDescription": "Non TM St caused any thread to fail",
3540 "PublicDescription": ""
3541 },
3542 {,
3543 "EventCode": "0x3090", 3003 "EventCode": "0x3090",
3544 "EventName": "PM_SWAP_CANCEL", 3004 "EventName": "PM_SWAP_CANCEL",
3545 "BriefDescription": "SWAP cancel , rtag not available", 3005 "BriefDescription": "SWAP cancel , rtag not available",
@@ -3624,18 +3084,6 @@
3624 "PublicDescription": "" 3084 "PublicDescription": ""
3625 }, 3085 },
3626 {, 3086 {,
3627 "EventCode": "0x318082",
3628 "EventName": "PM_TM_CAM_OVERFLOW",
3629 "BriefDescription": "l3 tm cam overflow during L2 co of SC",
3630 "PublicDescription": ""
3631 },
3632 {,
3633 "EventCode": "0x74708c",
3634 "EventName": "PM_TM_CAP_OVERFLOW",
3635 "BriefDescription": "TM Footprint Capactiy Overflow",
3636 "PublicDescription": ""
3637 },
3638 {,
3639 "EventCode": "0x20ba", 3087 "EventCode": "0x20ba",
3640 "EventName": "PM_TM_END_ALL", 3088 "EventName": "PM_TM_END_ALL",
3641 "BriefDescription": "Tm any tend", 3089 "BriefDescription": "Tm any tend",
@@ -3690,48 +3138,6 @@
3690 "PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42" 3138 "PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
3691 }, 3139 },
3692 {, 3140 {,
3693 "EventCode": "0x727086",
3694 "EventName": "PM_TM_FAV_CAUSED_FAIL",
3695 "BriefDescription": "TM Load (fav) caused another thread to fail",
3696 "PublicDescription": ""
3697 },
3698 {,
3699 "EventCode": "0x717082",
3700 "EventName": "PM_TM_LD_CAUSED_FAIL",
3701 "BriefDescription": "Non TM Ld caused any thread to fail",
3702 "PublicDescription": ""
3703 },
3704 {,
3705 "EventCode": "0x727084",
3706 "EventName": "PM_TM_LD_CONF",
3707 "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)",
3708 "PublicDescription": ""
3709 },
3710 {,
3711 "EventCode": "0x328086",
3712 "EventName": "PM_TM_RST_SC",
3713 "BriefDescription": "tm snp rst tm sc",
3714 "PublicDescription": ""
3715 },
3716 {,
3717 "EventCode": "0x318080",
3718 "EventName": "PM_TM_SC_CO",
3719 "BriefDescription": "l3 castout tm Sc line",
3720 "PublicDescription": ""
3721 },
3722 {,
3723 "EventCode": "0x73708a",
3724 "EventName": "PM_TM_ST_CAUSED_FAIL",
3725 "BriefDescription": "TM Store (fav or non-fav) caused another thread to fail",
3726 "PublicDescription": ""
3727 },
3728 {,
3729 "EventCode": "0x737088",
3730 "EventName": "PM_TM_ST_CONF",
3731 "BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)",
3732 "PublicDescription": ""
3733 },
3734 {,
3735 "EventCode": "0x20bc", 3141 "EventCode": "0x20bc",
3736 "EventName": "PM_TM_TBEGIN", 3142 "EventName": "PM_TM_TBEGIN",
3737 "BriefDescription": "Tm nested tbegin", 3143 "BriefDescription": "Tm nested tbegin",
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
new file mode 100644
index 000000000000..93ddfd8053ca
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/branch.json
@@ -0,0 +1,12 @@
1[
2 {
3 "EventName": "bp_l1_btb_correct",
4 "EventCode": "0x8a",
5 "BriefDescription": "L1 BTB Correction."
6 },
7 {
8 "EventName": "bp_l2_btb_correct",
9 "EventCode": "0x8b",
10 "BriefDescription": "L2 BTB Correction."
11 }
12]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
new file mode 100644
index 000000000000..fad4af9142cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/cache.json
@@ -0,0 +1,287 @@
1[
2 {
3 "EventName": "ic_fw32",
4 "EventCode": "0x80",
5 "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
6 },
7 {
8 "EventName": "ic_fw32_miss",
9 "EventCode": "0x81",
10 "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
11 },
12 {
13 "EventName": "ic_cache_fill_l2",
14 "EventCode": "0x82",
15 "BriefDescription": "The number of 64 byte instruction cache line was fulfilled from the L2 cache."
16 },
17 {
18 "EventName": "ic_cache_fill_sys",
19 "EventCode": "0x83",
20 "BriefDescription": "The number of 64 byte instruction cache line fulfilled from system memory or another cache."
21 },
22 {
23 "EventName": "bp_l1_tlb_miss_l2_hit",
24 "EventCode": "0x84",
25 "BriefDescription": "The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
26 },
27 {
28 "EventName": "bp_l1_tlb_miss_l2_miss",
29 "EventCode": "0x85",
30 "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs."
31 },
32 {
33 "EventName": "bp_snp_re_sync",
34 "EventCode": "0x86",
35 "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
36 },
37 {
38 "EventName": "ic_fetch_stall.ic_stall_any",
39 "EventCode": "0x87",
40 "BriefDescription": "IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
41 "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
42 "UMask": "0x4"
43 },
44 {
45 "EventName": "ic_fetch_stall.ic_stall_dq_empty",
46 "EventCode": "0x87",
47 "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
48 "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
49 "UMask": "0x2"
50 },
51 {
52 "EventName": "ic_fetch_stall.ic_stall_back_pressure",
53 "EventCode": "0x87",
54 "BriefDescription": "IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
55 "PublicDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
56 "UMask": "0x1"
57 },
58 {
59 "EventName": "ic_cache_inval.l2_invalidating_probe",
60 "EventCode": "0x8c",
61 "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS).",
62 "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to L2 invalidating probe (external or LS).",
63 "UMask": "0x2"
64 },
65 {
66 "EventName": "ic_cache_inval.fill_invalidated",
67 "EventCode": "0x8c",
68 "BriefDescription": "IC line invalidated due to overwriting fill response.",
69 "PublicDescription": "The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core. IC line invalidated due to overwriting fill response.",
70 "UMask": "0x1"
71 },
72 {
73 "EventName": "bp_tlb_rel",
74 "EventCode": "0x99",
75 "BriefDescription": "The number of ITLB reload requests."
76 },
77 {
78 "EventName": "l2_request_g1.rd_blk_l",
79 "EventCode": "0x60",
80 "BriefDescription": "Requests to L2 Group1.",
81 "PublicDescription": "Requests to L2 Group1.",
82 "UMask": "0x80"
83 },
84 {
85 "EventName": "l2_request_g1.rd_blk_x",
86 "EventCode": "0x60",
87 "BriefDescription": "Requests to L2 Group1.",
88 "PublicDescription": "Requests to L2 Group1.",
89 "UMask": "0x40"
90 },
91 {
92 "EventName": "l2_request_g1.ls_rd_blk_c_s",
93 "EventCode": "0x60",
94 "BriefDescription": "Requests to L2 Group1.",
95 "PublicDescription": "Requests to L2 Group1.",
96 "UMask": "0x20"
97 },
98 {
99 "EventName": "l2_request_g1.cacheable_ic_read",
100 "EventCode": "0x60",
101 "BriefDescription": "Requests to L2 Group1.",
102 "PublicDescription": "Requests to L2 Group1.",
103 "UMask": "0x10"
104 },
105 {
106 "EventName": "l2_request_g1.change_to_x",
107 "EventCode": "0x60",
108 "BriefDescription": "Requests to L2 Group1.",
109 "PublicDescription": "Requests to L2 Group1.",
110 "UMask": "0x8"
111 },
112 {
113 "EventName": "l2_request_g1.prefetch_l2",
114 "EventCode": "0x60",
115 "BriefDescription": "Requests to L2 Group1.",
116 "PublicDescription": "Requests to L2 Group1.",
117 "UMask": "0x4"
118 },
119 {
120 "EventName": "l2_request_g1.l2_hw_pf",
121 "EventCode": "0x60",
122 "BriefDescription": "Requests to L2 Group1.",
123 "PublicDescription": "Requests to L2 Group1.",
124 "UMask": "0x2"
125 },
126 {
127 "EventName": "l2_request_g1.other_requests",
128 "EventCode": "0x60",
129 "BriefDescription": "Events covered by l2_request_g2.",
130 "PublicDescription": "Requests to L2 Group1. Events covered by l2_request_g2.",
131 "UMask": "0x1"
132 },
133 {
134 "EventName": "l2_request_g2.group1",
135 "EventCode": "0x61",
136 "BriefDescription": "All Group 1 commands not in unit0.",
137 "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. All Group 1 commands not in unit0.",
138 "UMask": "0x80"
139 },
140 {
141 "EventName": "l2_request_g2.ls_rd_sized",
142 "EventCode": "0x61",
143 "BriefDescription": "RdSized, RdSized32, RdSized64.",
144 "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSized, RdSized32, RdSized64.",
145 "UMask": "0x40"
146 },
147 {
148 "EventName": "l2_request_g2.ls_rd_sized_nc",
149 "EventCode": "0x61",
150 "BriefDescription": "RdSizedNC, RdSized32NC, RdSized64NC.",
151 "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous. RdSizedNC, RdSized32NC, RdSized64NC.",
152 "UMask": "0x20"
153 },
154 {
155 "EventName": "l2_request_g2.ic_rd_sized",
156 "EventCode": "0x61",
157 "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
158 "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
159 "UMask": "0x10"
160 },
161 {
162 "EventName": "l2_request_g2.ic_rd_sized_nc",
163 "EventCode": "0x61",
164 "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
165 "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
166 "UMask": "0x8"
167 },
168 {
169 "EventName": "l2_request_g2.smc_inval",
170 "EventCode": "0x61",
171 "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
172 "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
173 "UMask": "0x4"
174 },
175 {
176 "EventName": "l2_request_g2.bus_locks_originator",
177 "EventCode": "0x61",
178 "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
179 "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
180 "UMask": "0x2"
181 },
182 {
183 "EventName": "l2_request_g2.bus_locks_responses",
184 "EventCode": "0x61",
185 "BriefDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
186 "PublicDescription": "Multi-events in that LS and IF requests can be received simultaneous.",
187 "UMask": "0x1"
188 },
189 {
190 "EventName": "l2_latency.l2_cycles_waiting_on_fills",
191 "EventCode": "0x62",
192 "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
193 "PublicDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
194 "UMask": "0x1"
195 },
196 {
197 "EventName": "l2_wcb_req.wcb_write",
198 "EventCode": "0x63",
199 "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
200 "BriefDescription": "LS to L2 WCB write requests.",
201 "UMask": "0x40"
202 },
203 {
204 "EventName": "l2_wcb_req.wcb_close",
205 "EventCode": "0x63",
206 "BriefDescription": "LS to L2 WCB close requests.",
207 "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
208 "UMask": "0x20"
209 },
210 {
211 "EventName": "l2_wcb_req.zero_byte_store",
212 "EventCode": "0x63",
213 "BriefDescription": "LS to L2 WCB zero byte store requests.",
214 "PublicDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
215 "UMask": "0x4"
216 },
217 {
218 "EventName": "l2_wcb_req.cl_zero",
219 "EventCode": "0x63",
220 "PublicDescription": "LS to L2 WCB cache line zeroing requests.",
221 "BriefDescription": "LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
222 "UMask": "0x1"
223 },
224 {
225 "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
226 "EventCode": "0x64",
227 "BriefDescription": "LS ReadBlock C/S Hit.",
228 "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS ReadBlock C/S Hit.",
229 "UMask": "0x80"
230 },
231 {
232 "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
233 "EventCode": "0x64",
234 "BriefDescription": "LS Read Block L Hit X.",
235 "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block L Hit X.",
236 "UMask": "0x40"
237 },
238 {
239 "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
240 "EventCode": "0x64",
241 "BriefDescription": "LsRdBlkL Hit Shared.",
242 "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkL Hit Shared.",
243 "UMask": "0x20"
244 },
245 {
246 "EventName": "l2_cache_req_stat.ls_rd_blk_x",
247 "EventCode": "0x64",
248 "BriefDescription": "LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
249 "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LsRdBlkX/ChgToX Hit X. Count RdBlkX finding Shared as a Miss.",
250 "UMask": "0x10"
251 },
252 {
253 "EventName": "l2_cache_req_stat.ls_rd_blk_c",
254 "EventCode": "0x64",
255 "BriefDescription": "LS Read Block C S L X Change to X Miss.",
256 "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. LS Read Block C S L X Change to X Miss.",
257 "UMask": "0x8"
258 },
259 {
260 "EventName": "l2_cache_req_stat.ic_fill_hit_x",
261 "EventCode": "0x64",
262 "BriefDescription": "IC Fill Hit Exclusive Stale.",
263 "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Exclusive Stale.",
264 "UMask": "0x4"
265 },
266 {
267 "EventName": "l2_cache_req_stat.ic_fill_hit_s",
268 "EventCode": "0x64",
269 "BriefDescription": "IC Fill Hit Shared.",
270 "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Hit Shared.",
271 "UMask": "0x2"
272 },
273 {
274 "EventName": "l2_cache_req_stat.ic_fill_miss",
275 "EventCode": "0x64",
276 "BriefDescription": "IC Fill Miss.",
277 "PublicDescription": "This event does not count accesses to the L2 cache by the L2 prefetcher, but it does count accesses by the L1 prefetcher. IC Fill Miss.",
278 "UMask": "0x1"
279 },
280 {
281 "EventName": "l2_fill_pending.l2_fill_busy",
282 "EventCode": "0x6d",
283 "BriefDescription": "Total cycles spent with one or more fill requests in flight from L2.",
284 "PublicDescription": "Total cycles spent with one or more fill requests in flight from L2.",
285 "UMask": "0x1"
286 }
287]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/core.json b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
new file mode 100644
index 000000000000..7b285b0a7f35
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/core.json
@@ -0,0 +1,134 @@
1[
2 {
3 "EventName": "ex_ret_instr",
4 "EventCode": "0xc0",
5 "BriefDescription": "Retired Instructions."
6 },
7 {
8 "EventName": "ex_ret_cops",
9 "EventCode": "0xc1",
10 "BriefDescription": "Retired Uops.",
11 "PublicDescription": "The number of uOps retired. This includes all processor activity (instructions, exceptions, interrupts, microcode assists, etc.). The number of events logged per cycle can vary from 0 to 4."
12 },
13 {
14 "EventName": "ex_ret_brn",
15 "EventCode": "0xc2",
16 "BriefDescription": "[Retired Branch Instructions.",
17 "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
18 },
19 {
20 "EventName": "ex_ret_brn_misp",
21 "EventCode": "0xc3",
22 "BriefDescription": "Retired Branch Instructions Mispredicted.",
23 "PublicDescription": "The number of branch instructions retired, of any type, that were not correctly predicted. This includes those for which prediction is not attempted (far control transfers, exceptions and interrupts)."
24 },
25 {
26 "EventName": "ex_ret_brn_tkn",
27 "EventCode": "0xc4",
28 "BriefDescription": "Retired Taken Branch Instructions.",
29 "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
30 },
31 {
32 "EventName": "ex_ret_brn_tkn_misp",
33 "EventCode": "0xc5",
34 "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
35 "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
36 },
37 {
38 "EventName": "ex_ret_brn_far",
39 "EventCode": "0xc6",
40 "BriefDescription": "Retired Far Control Transfers.",
41 "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
42 },
43 {
44 "EventName": "ex_ret_brn_resync",
45 "EventCode": "0xc7",
46 "BriefDescription": "Retired Branch Resyncs.",
47 "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
48 },
49 {
50 "EventName": "ex_ret_near_ret",
51 "EventCode": "0xc8",
52 "BriefDescription": "Retired Near Returns.",
53 "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
54 },
55 {
56 "EventName": "ex_ret_near_ret_mispred",
57 "EventCode": "0xc9",
58 "BriefDescription": "Retired Near Returns Mispredicted.",
59 "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
60 },
61 {
62 "EventName": "ex_ret_brn_ind_misp",
63 "EventCode": "0xca",
64 "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
65 "PublicDescription": "Retired Indirect Branch Instructions Mispredicted."
66 },
67 {
68 "EventName": "ex_ret_mmx_fp_instr.sse_instr",
69 "EventCode": "0xcb",
70 "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
71 "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
72 "UMask": "0x4"
73 },
74 {
75 "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
76 "EventCode": "0xcb",
77 "BriefDescription": "MMX instructions.",
78 "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
79 "UMask": "0x2"
80 },
81 {
82 "EventName": "ex_ret_mmx_fp_instr.x87_instr",
83 "EventCode": "0xcb",
84 "BriefDescription": "x87 instructions.",
85 "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
86 "UMask": "0x1"
87 },
88 {
89 "EventName": "ex_ret_cond",
90 "EventCode": "0xd1",
91 "BriefDescription": "Retired Conditional Branch Instructions."
92 },
93 {
94 "EventName": "ex_ret_cond_misp",
95 "EventCode": "0xd2",
96 "BriefDescription": "Retired Conditional Branch Instructions Mispredicted."
97 },
98 {
99 "EventName": "ex_div_busy",
100 "EventCode": "0xd3",
101 "BriefDescription": "Div Cycles Busy count."
102 },
103 {
104 "EventName": "ex_div_count",
105 "EventCode": "0xd4",
106 "BriefDescription": "Div Op Count."
107 },
108 {
109 "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
110 "EventCode": "0x1cf",
111 "BriefDescription": "Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
112 "PublicDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
113 "UMask": "0x4"
114 },
115 {
116 "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
117 "EventCode": "0x1cf",
118 "BriefDescription": "Number of Ops tagged by IBS that retired.",
119 "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
120 "UMask": "0x2"
121 },
122 {
123 "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
124 "EventCode": "0x1cf",
125 "BriefDescription": "Number of Ops tagged by IBS.",
126 "PublicDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
127 "UMask": "0x1"
128 },
129 {
130 "EventName": "ex_ret_fus_brnch_inst",
131 "EventCode": "0x1d0",
132 "BriefDescription": "The number of fused retired branch instructions retired per cycle. The number of events logged per cycle can vary from 0 to 3."
133 }
134]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
new file mode 100644
index 000000000000..ea4711983d1d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/floating-point.json
@@ -0,0 +1,168 @@
1[
2 {
3 "EventName": "fpu_pipe_assignment.dual",
4 "EventCode": "0x00",
5 "BriefDescription": "Total number multi-pipe uOps.",
6 "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number multi-pipe uOps assigned to Pipe 3.",
7 "UMask": "0xf0"
8 },
9 {
10 "EventName": "fpu_pipe_assignment.total",
11 "EventCode": "0x00",
12 "BriefDescription": "Total number uOps.",
13 "PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to Pipe 3.",
14 "UMask": "0xf"
15 },
16 {
17 "EventName": "fp_sched_empty",
18 "EventCode": "0x01",
19 "BriefDescription": "This is a speculative event. The number of cycles in which the FPU scheduler is empty. Note that some Ops like FP loads bypass the scheduler."
20 },
21 {
22 "EventName": "fp_retx87_fp_ops.all",
23 "EventCode": "0x02",
24 "BriefDescription": "All Ops.",
25 "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8.",
26 "UMask": "0x7"
27 },
28 {
29 "EventName": "fp_retx87_fp_ops.div_sqr_r_ops",
30 "EventCode": "0x02",
31 "BriefDescription": "Divide and square root Ops.",
32 "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Divide and square root Ops.",
33 "UMask": "0x4"
34 },
35 {
36 "EventName": "fp_retx87_fp_ops.mul_ops",
37 "EventCode": "0x02",
38 "BriefDescription": "Multiply Ops.",
39 "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.",
40 "UMask": "0x2"
41 },
42 {
43 "EventName": "fp_retx87_fp_ops.add_sub_ops",
44 "EventCode": "0x02",
45 "BriefDescription": "Add/subtract Ops.",
46 "PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Add/subtract Ops.",
47 "UMask": "0x1"
48 },
49 {
50 "EventName": "fp_ret_sse_avx_ops.all",
51 "EventCode": "0x03",
52 "BriefDescription": "All FLOPS.",
53 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
54 "UMask": "0xff"
55 },
56 {
57 "EventName": "fp_ret_sse_avx_ops.dp_mult_add_flops",
58 "EventCode": "0x03",
59 "BriefDescription": "Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
60 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
61 "UMask": "0x80"
62 },
63 {
64 "EventName": "fp_ret_sse_avx_ops.dp_div_flops",
65 "EventCode": "0x03",
66 "BriefDescription": "Double precision divide/square root FLOPS.",
67 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision divide/square root FLOPS.",
68 "UMask": "0x40"
69 },
70 {
71 "EventName": "fp_ret_sse_avx_ops.dp_mult_flops",
72 "EventCode": "0x03",
73 "BriefDescription": "Double precision multiply FLOPS.",
74 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision multiply FLOPS.",
75 "UMask": "0x20"
76 },
77 {
78 "EventName": "fp_ret_sse_avx_ops.dp_add_sub_flops",
79 "EventCode": "0x03",
80 "BriefDescription": "Double precision add/subtract FLOPS.",
81 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Double precision add/subtract FLOPS.",
82 "UMask": "0x10"
83 },
84 {
85 "EventName": "fp_ret_sse_avx_ops.sp_mult_add_flops",
86 "EventCode": "0x03",
87 "BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
88 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
89 "UMask": "0x8"
90 },
91 {
92 "EventName": "fp_ret_sse_avx_ops.sp_div_flops",
93 "EventCode": "0x03",
94 "BriefDescription": "Single-precision divide/square root FLOPS.",
95 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision divide/square root FLOPS.",
96 "UMask": "0x4"
97 },
98 {
99 "EventName": "fp_ret_sse_avx_ops.sp_mult_flops",
100 "EventCode": "0x03",
101 "BriefDescription": "Single-precision multiply FLOPS.",
102 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
103 "UMask": "0x2"
104 },
105 {
106 "EventName": "fp_ret_sse_avx_ops.sp_add_sub_flops",
107 "EventCode": "0x03",
108 "BriefDescription": "Single-precision add/subtract FLOPS.",
109 "PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision add/subtract FLOPS.",
110 "UMask": "0x1"
111 },
112 {
113 "EventName": "fp_num_mov_elim_scal_op.optimized",
114 "EventCode": "0x04",
115 "BriefDescription": "Number of Scalar Ops optimized.",
116 "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Scalar Ops optimized.",
117 "UMask": "0x8"
118 },
119 {
120 "EventName": "fp_num_mov_elim_scal_op.opt_potential",
121 "EventCode": "0x04",
122 "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
123 "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
124 "UMask": "0x4"
125 },
126 {
127 "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
128 "EventCode": "0x04",
129 "BriefDescription": "Number of SSE Move Ops eliminated.",
130 "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops eliminated.",
131 "UMask": "0x2"
132 },
133 {
134 "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
135 "EventCode": "0x04",
136 "BriefDescription": "Number of SSE Move Ops.",
137 "PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops.",
138 "UMask": "0x1"
139 },
140 {
141 "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
142 "EventCode": "0x05",
143 "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
144 "PublicDescription": "The number of serializing Ops retired. x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
145 "UMask": "0x8"
146 },
147 {
148 "EventName": "fp_retired_ser_ops.x87_bot_ret",
149 "EventCode": "0x05",
150 "BriefDescription": "x87 bottom-executing uOps retired.",
151 "PublicDescription": "The number of serializing Ops retired. x87 bottom-executing uOps retired.",
152 "UMask": "0x4"
153 },
154 {
155 "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
156 "EventCode": "0x05",
157 "BriefDescription": "SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
158 "PublicDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
159 "UMask": "0x2"
160 },
161 {
162 "EventName": "fp_retired_ser_ops.sse_bot_ret",
163 "EventCode": "0x05",
164 "BriefDescription": "SSE bottom-executing uOps retired.",
165 "PublicDescription": "The number of serializing Ops retired. SSE bottom-executing uOps retired.",
166 "UMask": "0x1"
167 }
168]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
new file mode 100644
index 000000000000..fa2d60d4def0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/memory.json
@@ -0,0 +1,162 @@
1[
2 {
3 "EventName": "ls_locks.bus_lock",
4 "EventCode": "0x25",
5 "BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
6 "PublicDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
7 "UMask": "0x1"
8 },
9 {
10 "EventName": "ls_dispatch.ld_st_dispatch",
11 "EventCode": "0x29",
12 "BriefDescription": "Load-op-Stores.",
13 "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
14 "UMask": "0x4"
15 },
16 {
17 "EventName": "ls_dispatch.store_dispatch",
18 "EventCode": "0x29",
19 "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
20 "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
21 "UMask": "0x2"
22 },
23 {
24 "EventName": "ls_dispatch.ld_dispatch",
25 "EventCode": "0x29",
26 "BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
27 "PublicDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
28 "UMask": "0x1"
29 },
30 {
31 "EventName": "ls_stlf",
32 "EventCode": "0x35",
33 "BriefDescription": "Number of STLF hits."
34 },
35 {
36 "EventName": "ls_dc_accesses",
37 "EventCode": "0x40",
38 "BriefDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
39 },
40 {
41 "EventName": "ls_l1_d_tlb_miss.all",
42 "EventCode": "0x45",
43 "BriefDescription": "L1 DTLB Miss or Reload off all sizes.",
44 "PublicDescription": "L1 DTLB Miss or Reload off all sizes.",
45 "UMask": "0xff"
46 },
47 {
48 "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
49 "EventCode": "0x45",
50 "BriefDescription": "L1 DTLB Miss of a page of 1G size.",
51 "PublicDescription": "L1 DTLB Miss of a page of 1G size.",
52 "UMask": "0x80"
53 },
54 {
55 "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
56 "EventCode": "0x45",
57 "BriefDescription": "L1 DTLB Miss of a page of 2M size.",
58 "PublicDescription": "L1 DTLB Miss of a page of 2M size.",
59 "UMask": "0x40"
60 },
61 {
62 "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_miss",
63 "EventCode": "0x45",
64 "BriefDescription": "L1 DTLB Miss of a page of 32K size.",
65 "PublicDescription": "L1 DTLB Miss of a page of 32K size.",
66 "UMask": "0x20"
67 },
68 {
69 "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
70 "EventCode": "0x45",
71 "BriefDescription": "L1 DTLB Miss of a page of 4K size.",
72 "PublicDescription": "L1 DTLB Miss of a page of 4K size.",
73 "UMask": "0x10"
74 },
75 {
76 "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
77 "EventCode": "0x45",
78 "BriefDescription": "L1 DTLB Reload of a page of 1G size.",
79 "PublicDescription": "L1 DTLB Reload of a page of 1G size.",
80 "UMask": "0x8"
81 },
82 {
83 "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
84 "EventCode": "0x45",
85 "BriefDescription": "L1 DTLB Reload of a page of 2M size.",
86 "PublicDescription": "L1 DTLB Reload of a page of 2M size.",
87 "UMask": "0x4"
88 },
89 {
90 "EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
91 "EventCode": "0x45",
92 "BriefDescription": "L1 DTLB Reload of a page of 32K size.",
93 "PublicDescription": "L1 DTLB Reload of a page of 32K size.",
94 "UMask": "0x2"
95 },
96 {
97 "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
98 "EventCode": "0x45",
99 "BriefDescription": "L1 DTLB Reload of a page of 4K size.",
100 "PublicDescription": "L1 DTLB Reload of a page of 4K size.",
101 "UMask": "0x1"
102 },
103 {
104 "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_iside",
105 "EventCode": "0x46",
106 "BriefDescription": "Tablewalker allocation.",
107 "PublicDescription": "Tablewalker allocation.",
108 "UMask": "0xc"
109 },
110 {
111 "EventName": "ls_tablewalker.perf_mon_tablewalk_alloc_dside",
112 "EventCode": "0x46",
113 "BriefDescription": "Tablewalker allocation.",
114 "PublicDescription": "Tablewalker allocation.",
115 "UMask": "0x3"
116 },
117 {
118 "EventName": "ls_misal_accesses",
119 "EventCode": "0x47",
120 "BriefDescription": "Misaligned loads."
121 },
122 {
123 "EventName": "ls_pref_instr_disp.prefetch_nta",
124 "EventCode": "0x4b",
125 "BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
126 "PublicDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
127 "UMask": "0x4"
128 },
129 {
130 "EventName": "ls_pref_instr_disp.store_prefetch_w",
131 "EventCode": "0x4b",
132 "BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
133 "PublicDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
134 "UMask": "0x2"
135 },
136 {
137 "EventName": "ls_pref_instr_disp.load_prefetch_w",
138 "EventCode": "0x4b",
139 "BriefDescription": "Prefetch, Prefetch_T0_T1_T2.",
140 "PublicDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
141 "UMask": "0x1"
142 },
143 {
144 "EventName": "ls_inef_sw_pref.mab_mch_cnt",
145 "EventCode": "0x52",
146 "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
147 "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
148 "UMask": "0x2"
149 },
150 {
151 "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
152 "EventCode": "0x52",
153 "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
154 "PublicDescription": "The number of software prefetches that did not fetch data outside of the processor core.",
155 "UMask": "0x1"
156 },
157 {
158 "EventName": "ls_not_halted_cyc",
159 "EventCode": "0x76",
160 "BriefDescription": "Cycles not in Halt."
161 }
162]
diff --git a/tools/perf/pmu-events/arch/x86/amdfam17h/other.json b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
new file mode 100644
index 000000000000..b26a00d05a2e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdfam17h/other.json
@@ -0,0 +1,65 @@
1[
2 {
3 "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
4 "EventCode": "0x28a",
5 "BriefDescription": "OC to IC mode switch.",
6 "PublicDescription": "OC Mode Switch. OC to IC mode switch.",
7 "UMask": "0x2"
8 },
9 {
10 "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
11 "EventCode": "0x28a",
12 "BriefDescription": "IC to OC mode switch.",
13 "PublicDescription": "OC Mode Switch. IC to OC mode switch.",
14 "UMask": "0x1"
15 },
16 {
17 "EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
18 "EventCode": "0xaf",
19 "BriefDescription": "RETIRE Tokens unavailable.",
20 "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. RETIRE Tokens unavailable.",
21 "UMask": "0x40"
22 },
23 {
24 "EventName": "de_dis_dispatch_token_stalls0.agsq_token_stall",
25 "EventCode": "0xaf",
26 "BriefDescription": "AGSQ Tokens unavailable.",
27 "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
28 "UMask": "0x20"
29 },
30 {
31 "EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
32 "EventCode": "0xaf",
33 "BriefDescription": "ALU tokens total unavailable.",
34 "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
35 "UMask": "0x10"
36 },
37 {
38 "EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
39 "EventCode": "0xaf",
40 "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
41 "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall.",
42 "UMask": "0x8"
43 },
44 {
45 "EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
46 "EventCode": "0xaf",
47 "BriefDescription": "ALSQ 3 Tokens unavailable.",
48 "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
49 "UMask": "0x4"
50 },
51 {
52 "EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
53 "EventCode": "0xaf",
54 "BriefDescription": "ALSQ 2 Tokens unavailable.",
55 "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
56 "UMask": "0x2"
57 },
58 {
59 "EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
60 "EventCode": "0xaf",
61 "BriefDescription": "ALSQ 1 Tokens unavailable.",
62 "PublicDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
63 "UMask": "0x1"
64 }
65]
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index e05c2c8458fc..d6984a3017e0 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -33,3 +33,4 @@ GenuineIntel-6-25,v2,westmereep-sp,core
33GenuineIntel-6-2F,v2,westmereex,core 33GenuineIntel-6-2F,v2,westmereex,core
34GenuineIntel-6-55-[01234],v1,skylakex,core 34GenuineIntel-6-55-[01234],v1,skylakex,core
35GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core 35GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
36AuthenticAMD-23-[[:xdigit:]]+,v1,amdfam17h,core
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 390a351d15ea..c3eae1d77d36 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -10,6 +10,8 @@
10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11# more details. 11# more details.
12 12
13from __future__ import print_function
14
13import os 15import os
14import sys 16import sys
15import struct 17import struct
@@ -199,6 +201,18 @@ import datetime
199 201
200from PySide.QtSql import * 202from PySide.QtSql import *
201 203
204if sys.version_info < (3, 0):
205 def toserverstr(str):
206 return str
207 def toclientstr(str):
208 return str
209else:
210 # Assume UTF-8 server_encoding and client_encoding
211 def toserverstr(str):
212 return bytes(str, "UTF_8")
213 def toclientstr(str):
214 return bytes(str, "UTF_8")
215
202# Need to access PostgreSQL C library directly to use COPY FROM STDIN 216# Need to access PostgreSQL C library directly to use COPY FROM STDIN
203from ctypes import * 217from ctypes import *
204libpq = CDLL("libpq.so.5") 218libpq = CDLL("libpq.so.5")
@@ -234,12 +248,17 @@ perf_db_export_mode = True
234perf_db_export_calls = False 248perf_db_export_calls = False
235perf_db_export_callchains = False 249perf_db_export_callchains = False
236 250
251def printerr(*args, **kw_args):
252 print(*args, file=sys.stderr, **kw_args)
253
254def printdate(*args, **kw_args):
255 print(datetime.datetime.today(), *args, sep=' ', **kw_args)
237 256
238def usage(): 257def usage():
239 print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]" 258 printerr("Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]")
240 print >> sys.stderr, "where: columns 'all' or 'branches'" 259 printerr("where: columns 'all' or 'branches'")
241 print >> sys.stderr, " calls 'calls' => create calls and call_paths table" 260 printerr(" calls 'calls' => create calls and call_paths table")
242 print >> sys.stderr, " callchains 'callchains' => create call_paths table" 261 printerr(" callchains 'callchains' => create call_paths table")
243 raise Exception("Too few arguments") 262 raise Exception("Too few arguments")
244 263
245if (len(sys.argv) < 2): 264if (len(sys.argv) < 2):
@@ -273,7 +292,7 @@ def do_query(q, s):
273 return 292 return
274 raise Exception("Query failed: " + q.lastError().text()) 293 raise Exception("Query failed: " + q.lastError().text())
275 294
276print datetime.datetime.today(), "Creating database..." 295printdate("Creating database...")
277 296
278db = QSqlDatabase.addDatabase('QPSQL') 297db = QSqlDatabase.addDatabase('QPSQL')
279query = QSqlQuery(db) 298query = QSqlQuery(db)
@@ -506,12 +525,12 @@ do_query(query, 'CREATE VIEW samples_view AS '
506 ' FROM samples') 525 ' FROM samples')
507 526
508 527
509file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0) 528file_header = struct.pack("!11sii", b"PGCOPY\n\377\r\n\0", 0, 0)
510file_trailer = "\377\377" 529file_trailer = b"\377\377"
511 530
512def open_output_file(file_name): 531def open_output_file(file_name):
513 path_name = output_dir_name + "/" + file_name 532 path_name = output_dir_name + "/" + file_name
514 file = open(path_name, "w+") 533 file = open(path_name, "wb+")
515 file.write(file_header) 534 file.write(file_header)
516 return file 535 return file
517 536
@@ -526,13 +545,13 @@ def copy_output_file_direct(file, table_name):
526 545
527# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly 546# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
528def copy_output_file(file, table_name): 547def copy_output_file(file, table_name):
529 conn = PQconnectdb("dbname = " + dbname) 548 conn = PQconnectdb(toclientstr("dbname = " + dbname))
530 if (PQstatus(conn)): 549 if (PQstatus(conn)):
531 raise Exception("COPY FROM STDIN PQconnectdb failed") 550 raise Exception("COPY FROM STDIN PQconnectdb failed")
532 file.write(file_trailer) 551 file.write(file_trailer)
533 file.seek(0) 552 file.seek(0)
534 sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')" 553 sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
535 res = PQexec(conn, sql) 554 res = PQexec(conn, toclientstr(sql))
536 if (PQresultStatus(res) != 4): 555 if (PQresultStatus(res) != 4):
537 raise Exception("COPY FROM STDIN PQexec failed") 556 raise Exception("COPY FROM STDIN PQexec failed")
538 data = file.read(65536) 557 data = file.read(65536)
@@ -566,7 +585,7 @@ if perf_db_export_calls:
566 call_file = open_output_file("call_table.bin") 585 call_file = open_output_file("call_table.bin")
567 586
568def trace_begin(): 587def trace_begin():
569 print datetime.datetime.today(), "Writing to intermediate files..." 588 printdate("Writing to intermediate files...")
570 # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs 589 # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
571 evsel_table(0, "unknown") 590 evsel_table(0, "unknown")
572 machine_table(0, 0, "unknown") 591 machine_table(0, 0, "unknown")
@@ -582,7 +601,7 @@ def trace_begin():
582unhandled_count = 0 601unhandled_count = 0
583 602
584def trace_end(): 603def trace_end():
585 print datetime.datetime.today(), "Copying to database..." 604 printdate("Copying to database...")
586 copy_output_file(evsel_file, "selected_events") 605 copy_output_file(evsel_file, "selected_events")
587 copy_output_file(machine_file, "machines") 606 copy_output_file(machine_file, "machines")
588 copy_output_file(thread_file, "threads") 607 copy_output_file(thread_file, "threads")
@@ -597,7 +616,7 @@ def trace_end():
597 if perf_db_export_calls: 616 if perf_db_export_calls:
598 copy_output_file(call_file, "calls") 617 copy_output_file(call_file, "calls")
599 618
600 print datetime.datetime.today(), "Removing intermediate files..." 619 printdate("Removing intermediate files...")
601 remove_output_file(evsel_file) 620 remove_output_file(evsel_file)
602 remove_output_file(machine_file) 621 remove_output_file(machine_file)
603 remove_output_file(thread_file) 622 remove_output_file(thread_file)
@@ -612,7 +631,7 @@ def trace_end():
612 if perf_db_export_calls: 631 if perf_db_export_calls:
613 remove_output_file(call_file) 632 remove_output_file(call_file)
614 os.rmdir(output_dir_name) 633 os.rmdir(output_dir_name)
615 print datetime.datetime.today(), "Adding primary keys" 634 printdate("Adding primary keys")
616 do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)') 635 do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
617 do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)') 636 do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
618 do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)') 637 do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
@@ -627,7 +646,7 @@ def trace_end():
627 if perf_db_export_calls: 646 if perf_db_export_calls:
628 do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)') 647 do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
629 648
630 print datetime.datetime.today(), "Adding foreign keys" 649 printdate("Adding foreign keys")
631 do_query(query, 'ALTER TABLE threads ' 650 do_query(query, 'ALTER TABLE threads '
632 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),' 651 'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
633 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)') 652 'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
@@ -663,8 +682,8 @@ def trace_end():
663 do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') 682 do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
664 683
665 if (unhandled_count): 684 if (unhandled_count):
666 print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" 685 printdate("Warning: ", unhandled_count, " unhandled events")
667 print datetime.datetime.today(), "Done" 686 printdate("Done")
668 687
669def trace_unhandled(event_name, context, event_fields_dict): 688def trace_unhandled(event_name, context, event_fields_dict):
670 global unhandled_count 689 global unhandled_count
@@ -674,12 +693,14 @@ def sched__sched_switch(*x):
674 pass 693 pass
675 694
676def evsel_table(evsel_id, evsel_name, *x): 695def evsel_table(evsel_id, evsel_name, *x):
696 evsel_name = toserverstr(evsel_name)
677 n = len(evsel_name) 697 n = len(evsel_name)
678 fmt = "!hiqi" + str(n) + "s" 698 fmt = "!hiqi" + str(n) + "s"
679 value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name) 699 value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
680 evsel_file.write(value) 700 evsel_file.write(value)
681 701
682def machine_table(machine_id, pid, root_dir, *x): 702def machine_table(machine_id, pid, root_dir, *x):
703 root_dir = toserverstr(root_dir)
683 n = len(root_dir) 704 n = len(root_dir)
684 fmt = "!hiqiii" + str(n) + "s" 705 fmt = "!hiqiii" + str(n) + "s"
685 value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir) 706 value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
@@ -690,6 +711,7 @@ def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
690 thread_file.write(value) 711 thread_file.write(value)
691 712
692def comm_table(comm_id, comm_str, *x): 713def comm_table(comm_id, comm_str, *x):
714 comm_str = toserverstr(comm_str)
693 n = len(comm_str) 715 n = len(comm_str)
694 fmt = "!hiqi" + str(n) + "s" 716 fmt = "!hiqi" + str(n) + "s"
695 value = struct.pack(fmt, 2, 8, comm_id, n, comm_str) 717 value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
@@ -701,6 +723,9 @@ def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
701 comm_thread_file.write(value) 723 comm_thread_file.write(value)
702 724
703def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x): 725def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
726 short_name = toserverstr(short_name)
727 long_name = toserverstr(long_name)
728 build_id = toserverstr(build_id)
704 n1 = len(short_name) 729 n1 = len(short_name)
705 n2 = len(long_name) 730 n2 = len(long_name)
706 n3 = len(build_id) 731 n3 = len(build_id)
@@ -709,12 +734,14 @@ def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
709 dso_file.write(value) 734 dso_file.write(value)
710 735
711def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x): 736def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
737 symbol_name = toserverstr(symbol_name)
712 n = len(symbol_name) 738 n = len(symbol_name)
713 fmt = "!hiqiqiqiqiii" + str(n) + "s" 739 fmt = "!hiqiqiqiqiii" + str(n) + "s"
714 value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name) 740 value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
715 symbol_file.write(value) 741 symbol_file.write(value)
716 742
717def branch_type_table(branch_type, name, *x): 743def branch_type_table(branch_type, name, *x):
744 name = toserverstr(name)
718 n = len(name) 745 n = len(name)
719 fmt = "!hiii" + str(n) + "s" 746 fmt = "!hiii" + str(n) + "s"
720 value = struct.pack(fmt, 2, 4, branch_type, n, name) 747 value = struct.pack(fmt, 2, 4, branch_type, n, name)
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index eb63e6c7107f..3b71902a5a21 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -10,6 +10,8 @@
10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 10# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11# more details. 11# more details.
12 12
13from __future__ import print_function
14
13import os 15import os
14import sys 16import sys
15import struct 17import struct
@@ -60,11 +62,17 @@ perf_db_export_mode = True
60perf_db_export_calls = False 62perf_db_export_calls = False
61perf_db_export_callchains = False 63perf_db_export_callchains = False
62 64
65def printerr(*args, **keyword_args):
66 print(*args, file=sys.stderr, **keyword_args)
67
68def printdate(*args, **kw_args):
69 print(datetime.datetime.today(), *args, sep=' ', **kw_args)
70
63def usage(): 71def usage():
64 print >> sys.stderr, "Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]" 72 printerr("Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>]");
65 print >> sys.stderr, "where: columns 'all' or 'branches'" 73 printerr("where: columns 'all' or 'branches'");
66 print >> sys.stderr, " calls 'calls' => create calls and call_paths table" 74 printerr(" calls 'calls' => create calls and call_paths table");
67 print >> sys.stderr, " callchains 'callchains' => create call_paths table" 75 printerr(" callchains 'callchains' => create call_paths table");
68 raise Exception("Too few arguments") 76 raise Exception("Too few arguments")
69 77
70if (len(sys.argv) < 2): 78if (len(sys.argv) < 2):
@@ -100,7 +108,7 @@ def do_query_(q):
100 return 108 return
101 raise Exception("Query failed: " + q.lastError().text()) 109 raise Exception("Query failed: " + q.lastError().text())
102 110
103print datetime.datetime.today(), "Creating database..." 111printdate("Creating database ...")
104 112
105db_exists = False 113db_exists = False
106try: 114try:
@@ -378,7 +386,7 @@ if perf_db_export_calls:
378 call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") 386 call_query.prepare("INSERT INTO calls VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)")
379 387
380def trace_begin(): 388def trace_begin():
381 print datetime.datetime.today(), "Writing records..." 389 printdate("Writing records...")
382 do_query(query, 'BEGIN TRANSACTION') 390 do_query(query, 'BEGIN TRANSACTION')
383 # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs 391 # id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
384 evsel_table(0, "unknown") 392 evsel_table(0, "unknown")
@@ -397,14 +405,14 @@ unhandled_count = 0
397def trace_end(): 405def trace_end():
398 do_query(query, 'END TRANSACTION') 406 do_query(query, 'END TRANSACTION')
399 407
400 print datetime.datetime.today(), "Adding indexes" 408 printdate("Adding indexes")
401 if perf_db_export_calls: 409 if perf_db_export_calls:
402 do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)') 410 do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
403 do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)') 411 do_query(query, 'CREATE INDEX pid_idx ON calls (parent_id)')
404 412
405 if (unhandled_count): 413 if (unhandled_count):
406 print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events" 414 printdate("Warning: ", unhandled_count, " unhandled events")
407 print datetime.datetime.today(), "Done" 415 printdate("Done")
408 416
409def trace_unhandled(event_name, context, event_fields_dict): 417def trace_unhandled(event_name, context, event_fields_dict):
410 global unhandled_count 418 global unhandled_count
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
index afec9479ca7f..74ef92f1d19a 100755
--- a/tools/perf/scripts/python/exported-sql-viewer.py
+++ b/tools/perf/scripts/python/exported-sql-viewer.py
@@ -88,20 +88,39 @@
88# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip) 88# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
89# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel]) 89# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
90 90
91from __future__ import print_function
92
91import sys 93import sys
92import weakref 94import weakref
93import threading 95import threading
94import string 96import string
95import cPickle 97try:
98 # Python2
99 import cPickle as pickle
100 # size of pickled integer big enough for record size
101 glb_nsz = 8
102except ImportError:
103 import pickle
104 glb_nsz = 16
96import re 105import re
97import os 106import os
98from PySide.QtCore import * 107from PySide.QtCore import *
99from PySide.QtGui import * 108from PySide.QtGui import *
100from PySide.QtSql import * 109from PySide.QtSql import *
110pyside_version_1 = True
101from decimal import * 111from decimal import *
102from ctypes import * 112from ctypes import *
103from multiprocessing import Process, Array, Value, Event 113from multiprocessing import Process, Array, Value, Event
104 114
115# xrange is range in Python3
116try:
117 xrange
118except NameError:
119 xrange = range
120
121def printerr(*args, **keyword_args):
122 print(*args, file=sys.stderr, **keyword_args)
123
105# Data formatting helpers 124# Data formatting helpers
106 125
107def tohex(ip): 126def tohex(ip):
@@ -1004,10 +1023,6 @@ class ChildDataItemFinder():
1004 1023
1005glb_chunk_sz = 10000 1024glb_chunk_sz = 10000
1006 1025
1007# size of pickled integer big enough for record size
1008
1009glb_nsz = 8
1010
1011# Background process for SQL data fetcher 1026# Background process for SQL data fetcher
1012 1027
1013class SQLFetcherProcess(): 1028class SQLFetcherProcess():
@@ -1066,7 +1081,7 @@ class SQLFetcherProcess():
1066 return True 1081 return True
1067 if space >= glb_nsz: 1082 if space >= glb_nsz:
1068 # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer 1083 # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
1069 nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL) 1084 nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
1070 self.buffer[self.local_head : self.local_head + len(nd)] = nd 1085 self.buffer[self.local_head : self.local_head + len(nd)] = nd
1071 self.local_head = 0 1086 self.local_head = 0
1072 if self.local_tail - self.local_head > sz: 1087 if self.local_tail - self.local_head > sz:
@@ -1084,9 +1099,9 @@ class SQLFetcherProcess():
1084 self.wait_event.wait() 1099 self.wait_event.wait()
1085 1100
1086 def AddToBuffer(self, obj): 1101 def AddToBuffer(self, obj):
1087 d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL) 1102 d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
1088 n = len(d) 1103 n = len(d)
1089 nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL) 1104 nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
1090 sz = n + glb_nsz 1105 sz = n + glb_nsz
1091 self.WaitForSpace(sz) 1106 self.WaitForSpace(sz)
1092 pos = self.local_head 1107 pos = self.local_head
@@ -1198,12 +1213,12 @@ class SQLFetcher(QObject):
1198 pos = self.local_tail 1213 pos = self.local_tail
1199 if len(self.buffer) - pos < glb_nsz: 1214 if len(self.buffer) - pos < glb_nsz:
1200 pos = 0 1215 pos = 0
1201 n = cPickle.loads(self.buffer[pos : pos + glb_nsz]) 1216 n = pickle.loads(self.buffer[pos : pos + glb_nsz])
1202 if n == 0: 1217 if n == 0:
1203 pos = 0 1218 pos = 0
1204 n = cPickle.loads(self.buffer[0 : glb_nsz]) 1219 n = pickle.loads(self.buffer[0 : glb_nsz])
1205 pos += glb_nsz 1220 pos += glb_nsz
1206 obj = cPickle.loads(self.buffer[pos : pos + n]) 1221 obj = pickle.loads(self.buffer[pos : pos + n])
1207 self.local_tail = pos + n 1222 self.local_tail = pos + n
1208 return obj 1223 return obj
1209 1224
@@ -1512,6 +1527,19 @@ def BranchDataPrep(query):
1512 " (" + dsoname(query.value(15)) + ")") 1527 " (" + dsoname(query.value(15)) + ")")
1513 return data 1528 return data
1514 1529
1530def BranchDataPrepWA(query):
1531 data = []
1532 data.append(query.value(0))
1533 # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
1534 data.append("{:>19}".format(query.value(1)))
1535 for i in xrange(2, 8):
1536 data.append(query.value(i))
1537 data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
1538 " (" + dsoname(query.value(11)) + ")" + " -> " +
1539 tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
1540 " (" + dsoname(query.value(15)) + ")")
1541 return data
1542
1515# Branch data model 1543# Branch data model
1516 1544
1517class BranchModel(TreeModel): 1545class BranchModel(TreeModel):
@@ -1539,7 +1567,11 @@ class BranchModel(TreeModel):
1539 " AND evsel_id = " + str(self.event_id) + 1567 " AND evsel_id = " + str(self.event_id) +
1540 " ORDER BY samples.id" 1568 " ORDER BY samples.id"
1541 " LIMIT " + str(glb_chunk_sz)) 1569 " LIMIT " + str(glb_chunk_sz))
1542 self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample) 1570 if pyside_version_1 and sys.version_info[0] == 3:
1571 prep = BranchDataPrepWA
1572 else:
1573 prep = BranchDataPrep
1574 self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
1543 self.fetcher.done.connect(self.Update) 1575 self.fetcher.done.connect(self.Update)
1544 self.fetcher.Fetch(glb_chunk_sz) 1576 self.fetcher.Fetch(glb_chunk_sz)
1545 1577
@@ -2065,14 +2097,6 @@ def IsSelectable(db, table, sql = ""):
2065 return False 2097 return False
2066 return True 2098 return True
2067 2099
2068# SQL data preparation
2069
2070def SQLTableDataPrep(query, count):
2071 data = []
2072 for i in xrange(count):
2073 data.append(query.value(i))
2074 return data
2075
2076# SQL table data model item 2100# SQL table data model item
2077 2101
2078class SQLTableItem(): 2102class SQLTableItem():
@@ -2096,7 +2120,7 @@ class SQLTableModel(TableModel):
2096 self.more = True 2120 self.more = True
2097 self.populated = 0 2121 self.populated = 0
2098 self.column_headers = column_headers 2122 self.column_headers = column_headers
2099 self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): SQLTableDataPrep(x, y), self.AddSample) 2123 self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
2100 self.fetcher.done.connect(self.Update) 2124 self.fetcher.done.connect(self.Update)
2101 self.fetcher.Fetch(glb_chunk_sz) 2125 self.fetcher.Fetch(glb_chunk_sz)
2102 2126
@@ -2140,6 +2164,12 @@ class SQLTableModel(TableModel):
2140 def columnHeader(self, column): 2164 def columnHeader(self, column):
2141 return self.column_headers[column] 2165 return self.column_headers[column]
2142 2166
2167 def SQLTableDataPrep(self, query, count):
2168 data = []
2169 for i in xrange(count):
2170 data.append(query.value(i))
2171 return data
2172
2143# SQL automatic table data model 2173# SQL automatic table data model
2144 2174
2145class SQLAutoTableModel(SQLTableModel): 2175class SQLAutoTableModel(SQLTableModel):
@@ -2168,8 +2198,32 @@ class SQLAutoTableModel(SQLTableModel):
2168 QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'") 2198 QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
2169 while query.next(): 2199 while query.next():
2170 column_headers.append(query.value(0)) 2200 column_headers.append(query.value(0))
2201 if pyside_version_1 and sys.version_info[0] == 3:
2202 if table_name == "samples_view":
2203 self.SQLTableDataPrep = self.samples_view_DataPrep
2204 if table_name == "samples":
2205 self.SQLTableDataPrep = self.samples_DataPrep
2171 super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent) 2206 super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
2172 2207
2208 def samples_view_DataPrep(self, query, count):
2209 data = []
2210 data.append(query.value(0))
2211 # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
2212 data.append("{:>19}".format(query.value(1)))
2213 for i in xrange(2, count):
2214 data.append(query.value(i))
2215 return data
2216
2217 def samples_DataPrep(self, query, count):
2218 data = []
2219 for i in xrange(9):
2220 data.append(query.value(i))
2221 # Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
2222 data.append("{:>19}".format(query.value(9)))
2223 for i in xrange(10, count):
2224 data.append(query.value(i))
2225 return data
2226
2173# Base class for custom ResizeColumnsToContents 2227# Base class for custom ResizeColumnsToContents
2174 2228
2175class ResizeColumnsToContentsBase(QObject): 2229class ResizeColumnsToContentsBase(QObject):
@@ -2854,9 +2908,13 @@ class LibXED():
2854 ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0) 2908 ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
2855 if not ok: 2909 if not ok:
2856 return 0, "" 2910 return 0, ""
2911 if sys.version_info[0] == 2:
2912 result = inst.buffer.value
2913 else:
2914 result = inst.buffer.value.decode()
2857 # Return instruction length and the disassembled instruction text 2915 # Return instruction length and the disassembled instruction text
2858 # For now, assume the length is in byte 166 2916 # For now, assume the length is in byte 166
2859 return inst.xedd[166], inst.buffer.value 2917 return inst.xedd[166], result
2860 2918
2861def TryOpen(file_name): 2919def TryOpen(file_name):
2862 try: 2920 try:
@@ -2872,9 +2930,14 @@ def Is64Bit(f):
2872 header = f.read(7) 2930 header = f.read(7)
2873 f.seek(pos) 2931 f.seek(pos)
2874 magic = header[0:4] 2932 magic = header[0:4]
2875 eclass = ord(header[4]) 2933 if sys.version_info[0] == 2:
2876 encoding = ord(header[5]) 2934 eclass = ord(header[4])
2877 version = ord(header[6]) 2935 encoding = ord(header[5])
2936 version = ord(header[6])
2937 else:
2938 eclass = header[4]
2939 encoding = header[5]
2940 version = header[6]
2878 if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1: 2941 if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
2879 result = True if eclass == 2 else False 2942 result = True if eclass == 2 else False
2880 return result 2943 return result
@@ -2973,7 +3036,7 @@ class DBRef():
2973 3036
2974def Main(): 3037def Main():
2975 if (len(sys.argv) < 2): 3038 if (len(sys.argv) < 2):
2976 print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}" 3039 printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
2977 raise Exception("Too few arguments") 3040 raise Exception("Too few arguments")
2978 3041
2979 dbname = sys.argv[1] 3042 dbname = sys.argv[1]
@@ -2986,8 +3049,8 @@ def Main():
2986 3049
2987 is_sqlite3 = False 3050 is_sqlite3 = False
2988 try: 3051 try:
2989 f = open(dbname) 3052 f = open(dbname, "rb")
2990 if f.read(15) == "SQLite format 3": 3053 if f.read(15) == b'SQLite format 3':
2991 is_sqlite3 = True 3054 is_sqlite3 = True
2992 f.close() 3055 f.close()
2993 except: 3056 except:
diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0
index cb0a3138fa54..93818054ae20 100644
--- a/tools/perf/tests/attr/test-record-C0
+++ b/tools/perf/tests/attr/test-record-C0
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -C 0 kill >/dev/null 2>&1 3args = --no-bpf-event -C 0 kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-basic b/tools/perf/tests/attr/test-record-basic
index 85a23cf35ba1..b0ca42a5ecc9 100644
--- a/tools/perf/tests/attr/test-record-basic
+++ b/tools/perf/tests/attr/test-record-basic
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = kill >/dev/null 2>&1 3args = --no-bpf-event kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-any b/tools/perf/tests/attr/test-record-branch-any
index 81f839e2fad0..1a99b3ce6b89 100644
--- a/tools/perf/tests/attr/test-record-branch-any
+++ b/tools/perf/tests/attr/test-record-branch-any
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -b kill >/dev/null 2>&1 3args = --no-bpf-event -b kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any b/tools/perf/tests/attr/test-record-branch-filter-any
index 357421f4dfce..709768b508c6 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any
+++ b/tools/perf/tests/attr/test-record-branch-filter-any
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -j any kill >/dev/null 2>&1 3args = --no-bpf-event -j any kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_call b/tools/perf/tests/attr/test-record-branch-filter-any_call
index dbc55f2ab845..f943221f7825 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any_call
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_call
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -j any_call kill >/dev/null 2>&1 3args = --no-bpf-event -j any_call kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_ret b/tools/perf/tests/attr/test-record-branch-filter-any_ret
index a0824ff8e131..fd4f5b4154a9 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-any_ret
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_ret
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -j any_ret kill >/dev/null 2>&1 3args = --no-bpf-event -j any_ret kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-hv b/tools/perf/tests/attr/test-record-branch-filter-hv
index f34d6f120181..4e52d685ebe1 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-hv
+++ b/tools/perf/tests/attr/test-record-branch-filter-hv
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -j hv kill >/dev/null 2>&1 3args = --no-bpf-event -j hv kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-ind_call b/tools/perf/tests/attr/test-record-branch-filter-ind_call
index b86a35232248..e08c6ab3796e 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-ind_call
+++ b/tools/perf/tests/attr/test-record-branch-filter-ind_call
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -j ind_call kill >/dev/null 2>&1 3args = --no-bpf-event -j ind_call kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-k b/tools/perf/tests/attr/test-record-branch-filter-k
index d3fbc5e1858a..b4b98f84fc2f 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-k
+++ b/tools/perf/tests/attr/test-record-branch-filter-k
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -j k kill >/dev/null 2>&1 3args = --no-bpf-event -j k kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-filter-u b/tools/perf/tests/attr/test-record-branch-filter-u
index a318f0dda173..fb9610edbb0d 100644
--- a/tools/perf/tests/attr/test-record-branch-filter-u
+++ b/tools/perf/tests/attr/test-record-branch-filter-u
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -j u kill >/dev/null 2>&1 3args = --no-bpf-event -j u kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-count b/tools/perf/tests/attr/test-record-count
index 34f6cc577263..5e9b9019d786 100644
--- a/tools/perf/tests/attr/test-record-count
+++ b/tools/perf/tests/attr/test-record-count
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -c 123 kill >/dev/null 2>&1 3args = --no-bpf-event -c 123 kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-data b/tools/perf/tests/attr/test-record-data
index a9cf2233b0ce..a99bb13149c2 100644
--- a/tools/perf/tests/attr/test-record-data
+++ b/tools/perf/tests/attr/test-record-data
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -d kill >/dev/null 2>&1 3args = --no-bpf-event -d kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-freq b/tools/perf/tests/attr/test-record-freq
index bf4cb459f0d5..89e29f6b2ae0 100644
--- a/tools/perf/tests/attr/test-record-freq
+++ b/tools/perf/tests/attr/test-record-freq
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -F 100 kill >/dev/null 2>&1 3args = --no-bpf-event -F 100 kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-default b/tools/perf/tests/attr/test-record-graph-default
index 0b216e69760c..5d8234d50845 100644
--- a/tools/perf/tests/attr/test-record-graph-default
+++ b/tools/perf/tests/attr/test-record-graph-default
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -g kill >/dev/null 2>&1 3args = --no-bpf-event -g kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-dwarf b/tools/perf/tests/attr/test-record-graph-dwarf
index da2fa73bd0a2..ae92061d611d 100644
--- a/tools/perf/tests/attr/test-record-graph-dwarf
+++ b/tools/perf/tests/attr/test-record-graph-dwarf
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = --call-graph dwarf -- kill >/dev/null 2>&1 3args = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-graph-fp b/tools/perf/tests/attr/test-record-graph-fp
index 625d190bb798..5630521c0b0f 100644
--- a/tools/perf/tests/attr/test-record-graph-fp
+++ b/tools/perf/tests/attr/test-record-graph-fp
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = --call-graph fp kill >/dev/null 2>&1 3args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
index 618ba1c17474..14ee60fd3f41 100644
--- a/tools/perf/tests/attr/test-record-group
+++ b/tools/perf/tests/attr/test-record-group
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = --group -e cycles,instructions kill >/dev/null 2>&1 3args = --no-bpf-event --group -e cycles,instructions kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event-1:base-record] 6[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
index f0729c454f16..300b9f7e6d69 100644
--- a/tools/perf/tests/attr/test-record-group-sampling
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1 3args = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event-1:base-record] 6[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
index 48e8bd12fe46..3ffe246e0228 100644
--- a/tools/perf/tests/attr/test-record-group1
+++ b/tools/perf/tests/attr/test-record-group1
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -e '{cycles,instructions}' kill >/dev/null 2>&1 3args = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event-1:base-record] 6[event-1:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-buffering b/tools/perf/tests/attr/test-record-no-buffering
index aa3956d8fe20..583dcbb078ba 100644
--- a/tools/perf/tests/attr/test-record-no-buffering
+++ b/tools/perf/tests/attr/test-record-no-buffering
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = --no-buffering kill >/dev/null 2>&1 3args = --no-bpf-event --no-buffering kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-inherit b/tools/perf/tests/attr/test-record-no-inherit
index 560943decb87..15d1dc162e1c 100644
--- a/tools/perf/tests/attr/test-record-no-inherit
+++ b/tools/perf/tests/attr/test-record-no-inherit
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -i kill >/dev/null 2>&1 3args = --no-bpf-event -i kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-no-samples b/tools/perf/tests/attr/test-record-no-samples
index 8eb73ab639e0..596fbd6d5a2c 100644
--- a/tools/perf/tests/attr/test-record-no-samples
+++ b/tools/perf/tests/attr/test-record-no-samples
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -n kill >/dev/null 2>&1 3args = --no-bpf-event -n kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-period b/tools/perf/tests/attr/test-record-period
index 69bc748f0f27..119101154c5e 100644
--- a/tools/perf/tests/attr/test-record-period
+++ b/tools/perf/tests/attr/test-record-period
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -c 100 -P kill >/dev/null 2>&1 3args = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-raw b/tools/perf/tests/attr/test-record-raw
index a188a614a44c..13a5f7860c78 100644
--- a/tools/perf/tests/attr/test-record-raw
+++ b/tools/perf/tests/attr/test-record-raw
@@ -1,6 +1,6 @@
1[config] 1[config]
2command = record 2command = record
3args = -R kill >/dev/null 2>&1 3args = --no-bpf-event -R kill >/dev/null 2>&1
4ret = 1 4ret = 1
5 5
6[event:base-record] 6[event:base-record]
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index 6d598cc071ae..1a9c3becf5ff 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -18,7 +18,7 @@ static void testcase(void)
18 int i; 18 int i;
19 19
20 for (i = 0; i < NR_ITERS; i++) { 20 for (i = 0; i < NR_ITERS; i++) {
21 char proc_name[10]; 21 char proc_name[15];
22 22
23 snprintf(proc_name, sizeof(proc_name), "p:%d\n", i); 23 snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
24 prctl(PR_SET_NAME, proc_name); 24 prctl(PR_SET_NAME, proc_name);
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index ea7acf403727..71f60c0f9faa 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -85,5 +85,6 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
85 if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) 85 if (perf_evsel__test_field(evsel, "target_cpu", 4, true))
86 ret = -1; 86 ret = -1;
87 87
88 perf_evsel__delete(evsel);
88 return ret; 89 return ret;
89} 90}
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
index 01f0706995a9..9acc1e80b936 100644
--- a/tools/perf/tests/expr.c
+++ b/tools/perf/tests/expr.c
@@ -19,7 +19,7 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
19 const char *p; 19 const char *p;
20 const char **other; 20 const char **other;
21 double val; 21 double val;
22 int ret; 22 int i, ret;
23 struct parse_ctx ctx; 23 struct parse_ctx ctx;
24 int num_other; 24 int num_other;
25 25
@@ -56,6 +56,9 @@ int test__expr(struct test *t __maybe_unused, int subtest __maybe_unused)
56 TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ")); 56 TEST_ASSERT_VAL("find other", !strcmp(other[1], "BAZ"));
57 TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO")); 57 TEST_ASSERT_VAL("find other", !strcmp(other[2], "BOZO"));
58 TEST_ASSERT_VAL("find other", other[3] == NULL); 58 TEST_ASSERT_VAL("find other", other[3] == NULL);
59
60 for (i = 0; i < num_other; i++)
61 free((void *)other[i]);
59 free((void *)other); 62 free((void *)other);
60 63
61 return 0; 64 return 0;
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
index c531e6deb104..493ecb611540 100644
--- a/tools/perf/tests/openat-syscall-all-cpus.c
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -45,7 +45,7 @@ int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int
45 if (IS_ERR(evsel)) { 45 if (IS_ERR(evsel)) {
46 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); 46 tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
47 pr_debug("%s\n", errbuf); 47 pr_debug("%s\n", errbuf);
48 goto out_thread_map_delete; 48 goto out_cpu_map_delete;
49 } 49 }
50 50
51 if (perf_evsel__open(evsel, cpus, threads) < 0) { 51 if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -119,6 +119,8 @@ out_close_fd:
119 perf_evsel__close_fd(evsel); 119 perf_evsel__close_fd(evsel);
120out_evsel_delete: 120out_evsel_delete:
121 perf_evsel__delete(evsel); 121 perf_evsel__delete(evsel);
122out_cpu_map_delete:
123 cpu_map__put(cpus);
122out_thread_map_delete: 124out_thread_map_delete:
123 thread_map__put(threads); 125 thread_map__put(threads);
124 return err; 126 return err;
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh
index 32bac9c0d694..5f5eefcb3c74 100755
--- a/tools/perf/trace/beauty/mmap_flags.sh
+++ b/tools/perf/trace/beauty/mmap_flags.sh
@@ -1,15 +1,18 @@
1#!/bin/sh 1#!/bin/sh
2# SPDX-License-Identifier: LGPL-2.1 2# SPDX-License-Identifier: LGPL-2.1
3 3
4if [ $# -ne 2 ] ; then 4if [ $# -ne 3 ] ; then
5 [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/` 5 [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/`
6 linux_header_dir=tools/include/uapi/linux
6 header_dir=tools/include/uapi/asm-generic 7 header_dir=tools/include/uapi/asm-generic
7 arch_header_dir=tools/arch/${hostarch}/include/uapi/asm 8 arch_header_dir=tools/arch/${hostarch}/include/uapi/asm
8else 9else
9 header_dir=$1 10 linux_header_dir=$1
10 arch_header_dir=$2 11 header_dir=$2
12 arch_header_dir=$3
11fi 13fi
12 14
15linux_mman=${linux_header_dir}/mman.h
13arch_mman=${arch_header_dir}/mman.h 16arch_mman=${arch_header_dir}/mman.h
14 17
15# those in egrep -vw are flags, we want just the bits 18# those in egrep -vw are flags, we want just the bits
@@ -20,6 +23,11 @@ egrep -q $regex ${arch_mman} && \
20(egrep $regex ${arch_mman} | \ 23(egrep $regex ${arch_mman} | \
21 sed -r "s/$regex/\2 \1/g" | \ 24 sed -r "s/$regex/\2 \1/g" | \
22 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n") 25 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
26egrep -q $regex ${linux_mman} && \
27(egrep $regex ${linux_mman} | \
28 egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
29 sed -r "s/$regex/\2 \1/g" | \
30 xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
23([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) && 31([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
24(egrep $regex ${header_dir}/mman-common.h | \ 32(egrep $regex ${header_dir}/mman-common.h | \
25 egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \ 33 egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 4f75561424ed..4ad37d8c7d6a 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -611,14 +611,16 @@ void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
611 browser->top = browser->entries; 611 browser->top = browser->entries;
612 break; 612 break;
613 case SEEK_CUR: 613 case SEEK_CUR:
614 browser->top = browser->top + browser->top_idx + offset; 614 browser->top = (char **)browser->top + offset;
615 break; 615 break;
616 case SEEK_END: 616 case SEEK_END:
617 browser->top = browser->top + browser->nr_entries - 1 + offset; 617 browser->top = (char **)browser->entries + browser->nr_entries - 1 + offset;
618 break; 618 break;
619 default: 619 default:
620 return; 620 return;
621 } 621 }
622 assert((char **)browser->top < (char **)browser->entries + browser->nr_entries);
623 assert((char **)browser->top >= (char **)browser->entries);
622} 624}
623 625
624unsigned int ui_browser__argv_refresh(struct ui_browser *browser) 626unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
@@ -630,7 +632,9 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
630 browser->top = browser->entries; 632 browser->top = browser->entries;
631 633
632 pos = (char **)browser->top; 634 pos = (char **)browser->top;
633 while (idx < browser->nr_entries) { 635 while (idx < browser->nr_entries &&
636 row < (unsigned)SLtt_Screen_Rows - 1) {
637 assert(pos < (char **)browser->entries + browser->nr_entries);
634 if (!browser->filter || !browser->filter(browser, *pos)) { 638 if (!browser->filter || !browser->filter(browser, *pos)) {
635 ui_browser__gotorc(browser, row, 0); 639 ui_browser__gotorc(browser, row, 0);
636 browser->write(browser, pos, row); 640 browser->write(browser, pos, row);
diff --git a/tools/perf/ui/browsers/Build b/tools/perf/ui/browsers/Build
index 8fee56b46502..fdf86f7981ca 100644
--- a/tools/perf/ui/browsers/Build
+++ b/tools/perf/ui/browsers/Build
@@ -3,6 +3,7 @@ perf-y += hists.o
3perf-y += map.o 3perf-y += map.o
4perf-y += scripts.o 4perf-y += scripts.o
5perf-y += header.o 5perf-y += header.o
6perf-y += res_sample.o
6 7
7CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST 8CFLAGS_annotate.o += -DENABLE_SLFUTURE_CONST
8CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST 9CFLAGS_hists.o += -DENABLE_SLFUTURE_CONST
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 35bdfd8b1e71..98d934a36d86 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -750,7 +750,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
750 continue; 750 continue;
751 case 'r': 751 case 'r':
752 { 752 {
753 script_browse(NULL); 753 script_browse(NULL, NULL);
754 continue; 754 continue;
755 } 755 }
756 case 'k': 756 case 'k':
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index aef800d97ea1..3421ecbdd3f0 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -7,6 +7,7 @@
7#include <string.h> 7#include <string.h>
8#include <linux/rbtree.h> 8#include <linux/rbtree.h>
9#include <sys/ttydefaults.h> 9#include <sys/ttydefaults.h>
10#include <linux/time64.h>
10 11
11#include "../../util/callchain.h" 12#include "../../util/callchain.h"
12#include "../../util/evsel.h" 13#include "../../util/evsel.h"
@@ -30,6 +31,7 @@
30#include "srcline.h" 31#include "srcline.h"
31#include "string2.h" 32#include "string2.h"
32#include "units.h" 33#include "units.h"
34#include "time-utils.h"
33 35
34#include "sane_ctype.h" 36#include "sane_ctype.h"
35 37
@@ -1224,6 +1226,8 @@ void hist_browser__init_hpp(void)
1224 hist_browser__hpp_color_overhead_guest_us; 1226 hist_browser__hpp_color_overhead_guest_us;
1225 perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color = 1227 perf_hpp__format[PERF_HPP__OVERHEAD_ACC].color =
1226 hist_browser__hpp_color_overhead_acc; 1228 hist_browser__hpp_color_overhead_acc;
1229
1230 res_sample_init();
1227} 1231}
1228 1232
1229static int hist_browser__show_entry(struct hist_browser *browser, 1233static int hist_browser__show_entry(struct hist_browser *browser,
@@ -2338,9 +2342,12 @@ close_file_and_continue:
2338} 2342}
2339 2343
2340struct popup_action { 2344struct popup_action {
2345 unsigned long time;
2341 struct thread *thread; 2346 struct thread *thread;
2342 struct map_symbol ms; 2347 struct map_symbol ms;
2343 int socket; 2348 int socket;
2349 struct perf_evsel *evsel;
2350 enum rstype rstype;
2344 2351
2345 int (*fn)(struct hist_browser *browser, struct popup_action *act); 2352 int (*fn)(struct hist_browser *browser, struct popup_action *act);
2346}; 2353};
@@ -2527,46 +2534,137 @@ static int
2527do_run_script(struct hist_browser *browser __maybe_unused, 2534do_run_script(struct hist_browser *browser __maybe_unused,
2528 struct popup_action *act) 2535 struct popup_action *act)
2529{ 2536{
2530 char script_opt[64]; 2537 char *script_opt;
2531 memset(script_opt, 0, sizeof(script_opt)); 2538 int len;
2539 int n = 0;
2532 2540
2541 len = 100;
2542 if (act->thread)
2543 len += strlen(thread__comm_str(act->thread));
2544 else if (act->ms.sym)
2545 len += strlen(act->ms.sym->name);
2546 script_opt = malloc(len);
2547 if (!script_opt)
2548 return -1;
2549
2550 script_opt[0] = 0;
2533 if (act->thread) { 2551 if (act->thread) {
2534 scnprintf(script_opt, sizeof(script_opt), " -c %s ", 2552 n = scnprintf(script_opt, len, " -c %s ",
2535 thread__comm_str(act->thread)); 2553 thread__comm_str(act->thread));
2536 } else if (act->ms.sym) { 2554 } else if (act->ms.sym) {
2537 scnprintf(script_opt, sizeof(script_opt), " -S %s ", 2555 n = scnprintf(script_opt, len, " -S %s ",
2538 act->ms.sym->name); 2556 act->ms.sym->name);
2539 } 2557 }
2540 2558
2541 script_browse(script_opt); 2559 if (act->time) {
2560 char start[32], end[32];
2561 unsigned long starttime = act->time;
2562 unsigned long endtime = act->time + symbol_conf.time_quantum;
2563
2564 if (starttime == endtime) { /* Display 1ms as fallback */
2565 starttime -= 1*NSEC_PER_MSEC;
2566 endtime += 1*NSEC_PER_MSEC;
2567 }
2568 timestamp__scnprintf_usec(starttime, start, sizeof start);
2569 timestamp__scnprintf_usec(endtime, end, sizeof end);
2570 n += snprintf(script_opt + n, len - n, " --time %s,%s", start, end);
2571 }
2572
2573 script_browse(script_opt, act->evsel);
2574 free(script_opt);
2542 return 0; 2575 return 0;
2543} 2576}
2544 2577
2545static int 2578static int
2546add_script_opt(struct hist_browser *browser __maybe_unused, 2579do_res_sample_script(struct hist_browser *browser __maybe_unused,
2580 struct popup_action *act)
2581{
2582 struct hist_entry *he;
2583
2584 he = hist_browser__selected_entry(browser);
2585 res_sample_browse(he->res_samples, he->num_res, act->evsel, act->rstype);
2586 return 0;
2587}
2588
2589static int
2590add_script_opt_2(struct hist_browser *browser __maybe_unused,
2547 struct popup_action *act, char **optstr, 2591 struct popup_action *act, char **optstr,
2548 struct thread *thread, struct symbol *sym) 2592 struct thread *thread, struct symbol *sym,
2593 struct perf_evsel *evsel, const char *tstr)
2549{ 2594{
2595
2550 if (thread) { 2596 if (thread) {
2551 if (asprintf(optstr, "Run scripts for samples of thread [%s]", 2597 if (asprintf(optstr, "Run scripts for samples of thread [%s]%s",
2552 thread__comm_str(thread)) < 0) 2598 thread__comm_str(thread), tstr) < 0)
2553 return 0; 2599 return 0;
2554 } else if (sym) { 2600 } else if (sym) {
2555 if (asprintf(optstr, "Run scripts for samples of symbol [%s]", 2601 if (asprintf(optstr, "Run scripts for samples of symbol [%s]%s",
2556 sym->name) < 0) 2602 sym->name, tstr) < 0)
2557 return 0; 2603 return 0;
2558 } else { 2604 } else {
2559 if (asprintf(optstr, "Run scripts for all samples") < 0) 2605 if (asprintf(optstr, "Run scripts for all samples%s", tstr) < 0)
2560 return 0; 2606 return 0;
2561 } 2607 }
2562 2608
2563 act->thread = thread; 2609 act->thread = thread;
2564 act->ms.sym = sym; 2610 act->ms.sym = sym;
2611 act->evsel = evsel;
2565 act->fn = do_run_script; 2612 act->fn = do_run_script;
2566 return 1; 2613 return 1;
2567} 2614}
2568 2615
2569static int 2616static int
2617add_script_opt(struct hist_browser *browser,
2618 struct popup_action *act, char **optstr,
2619 struct thread *thread, struct symbol *sym,
2620 struct perf_evsel *evsel)
2621{
2622 int n, j;
2623 struct hist_entry *he;
2624
2625 n = add_script_opt_2(browser, act, optstr, thread, sym, evsel, "");
2626
2627 he = hist_browser__selected_entry(browser);
2628 if (sort_order && strstr(sort_order, "time")) {
2629 char tstr[128];
2630
2631 optstr++;
2632 act++;
2633 j = sprintf(tstr, " in ");
2634 j += timestamp__scnprintf_usec(he->time, tstr + j,
2635 sizeof tstr - j);
2636 j += sprintf(tstr + j, "-");
2637 timestamp__scnprintf_usec(he->time + symbol_conf.time_quantum,
2638 tstr + j, sizeof tstr - j);
2639 n += add_script_opt_2(browser, act, optstr, thread, sym,
2640 evsel, tstr);
2641 act->time = he->time;
2642 }
2643 return n;
2644}
2645
2646static int
2647add_res_sample_opt(struct hist_browser *browser __maybe_unused,
2648 struct popup_action *act, char **optstr,
2649 struct res_sample *res_sample,
2650 struct perf_evsel *evsel,
2651 enum rstype type)
2652{
2653 if (!res_sample)
2654 return 0;
2655
2656 if (asprintf(optstr, "Show context for individual samples %s",
2657 type == A_ASM ? "with assembler" :
2658 type == A_SOURCE ? "with source" : "") < 0)
2659 return 0;
2660
2661 act->fn = do_res_sample_script;
2662 act->evsel = evsel;
2663 act->rstype = type;
2664 return 1;
2665}
2666
2667static int
2570do_switch_data(struct hist_browser *browser __maybe_unused, 2668do_switch_data(struct hist_browser *browser __maybe_unused,
2571 struct popup_action *act __maybe_unused) 2669 struct popup_action *act __maybe_unused)
2572{ 2670{
@@ -3031,7 +3129,7 @@ skip_annotation:
3031 nr_options += add_script_opt(browser, 3129 nr_options += add_script_opt(browser,
3032 &actions[nr_options], 3130 &actions[nr_options],
3033 &options[nr_options], 3131 &options[nr_options],
3034 thread, NULL); 3132 thread, NULL, evsel);
3035 } 3133 }
3036 /* 3134 /*
3037 * Note that browser->selection != NULL 3135 * Note that browser->selection != NULL
@@ -3046,11 +3144,24 @@ skip_annotation:
3046 nr_options += add_script_opt(browser, 3144 nr_options += add_script_opt(browser,
3047 &actions[nr_options], 3145 &actions[nr_options],
3048 &options[nr_options], 3146 &options[nr_options],
3049 NULL, browser->selection->sym); 3147 NULL, browser->selection->sym,
3148 evsel);
3050 } 3149 }
3051 } 3150 }
3052 nr_options += add_script_opt(browser, &actions[nr_options], 3151 nr_options += add_script_opt(browser, &actions[nr_options],
3053 &options[nr_options], NULL, NULL); 3152 &options[nr_options], NULL, NULL, evsel);
3153 nr_options += add_res_sample_opt(browser, &actions[nr_options],
3154 &options[nr_options],
3155 hist_browser__selected_entry(browser)->res_samples,
3156 evsel, A_NORMAL);
3157 nr_options += add_res_sample_opt(browser, &actions[nr_options],
3158 &options[nr_options],
3159 hist_browser__selected_entry(browser)->res_samples,
3160 evsel, A_ASM);
3161 nr_options += add_res_sample_opt(browser, &actions[nr_options],
3162 &options[nr_options],
3163 hist_browser__selected_entry(browser)->res_samples,
3164 evsel, A_SOURCE);
3054 nr_options += add_switch_opt(browser, &actions[nr_options], 3165 nr_options += add_switch_opt(browser, &actions[nr_options],
3055 &options[nr_options]); 3166 &options[nr_options]);
3056skip_scripting: 3167skip_scripting:
diff --git a/tools/perf/ui/browsers/res_sample.c b/tools/perf/ui/browsers/res_sample.c
new file mode 100644
index 000000000000..c0dd73176d42
--- /dev/null
+++ b/tools/perf/ui/browsers/res_sample.c
@@ -0,0 +1,91 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Display a menu with individual samples to browse with perf script */
3#include "util.h"
4#include "hist.h"
5#include "evsel.h"
6#include "hists.h"
7#include "sort.h"
8#include "config.h"
9#include "time-utils.h"
10#include <linux/time64.h>
11
12static u64 context_len = 10 * NSEC_PER_MSEC;
13
14static int res_sample_config(const char *var, const char *value, void *data __maybe_unused)
15{
16 if (!strcmp(var, "samples.context"))
17 return perf_config_u64(&context_len, var, value);
18 return 0;
19}
20
21void res_sample_init(void)
22{
23 perf_config(res_sample_config, NULL);
24}
25
26int res_sample_browse(struct res_sample *res_samples, int num_res,
27 struct perf_evsel *evsel, enum rstype rstype)
28{
29 char **names;
30 int i, n;
31 int choice;
32 char *cmd;
33 char pbuf[256], tidbuf[32], cpubuf[32];
34 const char *perf = perf_exe(pbuf, sizeof pbuf);
35 char trange[128], tsample[64];
36 struct res_sample *r;
37 char extra_format[256];
38
39 names = calloc(num_res, sizeof(char *));
40 if (!names)
41 return -1;
42 for (i = 0; i < num_res; i++) {
43 char tbuf[64];
44
45 timestamp__scnprintf_nsec(res_samples[i].time, tbuf, sizeof tbuf);
46 if (asprintf(&names[i], "%s: CPU %d tid %d", tbuf,
47 res_samples[i].cpu, res_samples[i].tid) < 0) {
48 while (--i >= 0)
49 free(names[i]);
50 free(names);
51 return -1;
52 }
53 }
54 choice = ui__popup_menu(num_res, names);
55 for (i = 0; i < num_res; i++)
56 free(names[i]);
57 free(names);
58
59 if (choice < 0 || choice >= num_res)
60 return -1;
61 r = &res_samples[choice];
62
63 n = timestamp__scnprintf_nsec(r->time - context_len, trange, sizeof trange);
64 trange[n++] = ',';
65 timestamp__scnprintf_nsec(r->time + context_len, trange + n, sizeof trange - n);
66
67 timestamp__scnprintf_nsec(r->time, tsample, sizeof tsample);
68
69 attr_to_script(extra_format, &evsel->attr);
70
71 if (asprintf(&cmd, "%s script %s%s --time %s %s%s %s%s --ns %s %s %s %s %s | less +/%s",
72 perf,
73 input_name ? "-i " : "",
74 input_name ? input_name : "",
75 trange,
76 r->cpu >= 0 ? "--cpu " : "",
77 r->cpu >= 0 ? (sprintf(cpubuf, "%d", r->cpu), cpubuf) : "",
78 r->tid ? "--tid " : "",
79 r->tid ? (sprintf(tidbuf, "%d", r->tid), tidbuf) : "",
80 extra_format,
81 rstype == A_ASM ? "-F +insn --xed" :
82 rstype == A_SOURCE ? "-F +srcline,+srccode" : "",
83 symbol_conf.inline_name ? "--inline" : "",
84 "--show-lost-events ",
85 r->tid ? "--show-switch-events --show-task-events " : "",
86 tsample) < 0)
87 return -1;
88 run_script(cmd);
89 free(cmd);
90 return 0;
91}
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index 90a32ac69e76..27cf3ab88d13 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -1,34 +1,12 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <elf.h>
3#include <inttypes.h>
4#include <sys/ttydefaults.h>
5#include <string.h>
6#include "../../util/sort.h" 2#include "../../util/sort.h"
7#include "../../util/util.h" 3#include "../../util/util.h"
8#include "../../util/hist.h" 4#include "../../util/hist.h"
9#include "../../util/debug.h" 5#include "../../util/debug.h"
10#include "../../util/symbol.h" 6#include "../../util/symbol.h"
11#include "../browser.h" 7#include "../browser.h"
12#include "../helpline.h"
13#include "../libslang.h" 8#include "../libslang.h"
14 9#include "config.h"
15/* 2048 lines should be enough for a script output */
16#define MAX_LINES 2048
17
18/* 160 bytes for one output line */
19#define AVERAGE_LINE_LEN 160
20
21struct script_line {
22 struct list_head node;
23 char line[AVERAGE_LINE_LEN];
24};
25
26struct perf_script_browser {
27 struct ui_browser b;
28 struct list_head entries;
29 const char *script_name;
30 int nr_lines;
31};
32 10
33#define SCRIPT_NAMELEN 128 11#define SCRIPT_NAMELEN 128
34#define SCRIPT_MAX_NO 64 12#define SCRIPT_MAX_NO 64
@@ -40,149 +18,169 @@ struct perf_script_browser {
40 */ 18 */
41#define SCRIPT_FULLPATH_LEN 256 19#define SCRIPT_FULLPATH_LEN 256
42 20
21struct script_config {
22 const char **names;
23 char **paths;
24 int index;
25 const char *perf;
26 char extra_format[256];
27};
28
29void attr_to_script(char *extra_format, struct perf_event_attr *attr)
30{
31 extra_format[0] = 0;
32 if (attr->read_format & PERF_FORMAT_GROUP)
33 strcat(extra_format, " -F +metric");
34 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK)
35 strcat(extra_format, " -F +brstackinsn --xed");
36 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
37 strcat(extra_format, " -F +iregs");
38 if (attr->sample_type & PERF_SAMPLE_REGS_USER)
39 strcat(extra_format, " -F +uregs");
40 if (attr->sample_type & PERF_SAMPLE_PHYS_ADDR)
41 strcat(extra_format, " -F +phys_addr");
42}
43
44static int add_script_option(const char *name, const char *opt,
45 struct script_config *c)
46{
47 c->names[c->index] = name;
48 if (asprintf(&c->paths[c->index],
49 "%s script %s -F +metric %s %s",
50 c->perf, opt, symbol_conf.inline_name ? " --inline" : "",
51 c->extra_format) < 0)
52 return -1;
53 c->index++;
54 return 0;
55}
56
57static int scripts_config(const char *var, const char *value, void *data)
58{
59 struct script_config *c = data;
60
61 if (!strstarts(var, "scripts."))
62 return -1;
63 if (c->index >= SCRIPT_MAX_NO)
64 return -1;
65 c->names[c->index] = strdup(var + 7);
66 if (!c->names[c->index])
67 return -1;
68 if (asprintf(&c->paths[c->index], "%s %s", value,
69 c->extra_format) < 0)
70 return -1;
71 c->index++;
72 return 0;
73}
74
43/* 75/*
44 * When success, will copy the full path of the selected script 76 * When success, will copy the full path of the selected script
45 * into the buffer pointed by script_name, and return 0. 77 * into the buffer pointed by script_name, and return 0.
46 * Return -1 on failure. 78 * Return -1 on failure.
47 */ 79 */
48static int list_scripts(char *script_name) 80static int list_scripts(char *script_name, bool *custom,
81 struct perf_evsel *evsel)
49{ 82{
50 char *buf, *names[SCRIPT_MAX_NO], *paths[SCRIPT_MAX_NO]; 83 char *buf, *paths[SCRIPT_MAX_NO], *names[SCRIPT_MAX_NO];
51 int i, num, choice, ret = -1; 84 int i, num, choice;
85 int ret = 0;
86 int max_std, custom_perf;
87 char pbuf[256];
88 const char *perf = perf_exe(pbuf, sizeof pbuf);
89 struct script_config scriptc = {
90 .names = (const char **)names,
91 .paths = paths,
92 .perf = perf
93 };
94
95 script_name[0] = 0;
52 96
53 /* Preset the script name to SCRIPT_NAMELEN */ 97 /* Preset the script name to SCRIPT_NAMELEN */
54 buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN)); 98 buf = malloc(SCRIPT_MAX_NO * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN));
55 if (!buf) 99 if (!buf)
56 return ret; 100 return -1;
57 101
58 for (i = 0; i < SCRIPT_MAX_NO; i++) { 102 if (evsel)
59 names[i] = buf + i * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN); 103 attr_to_script(scriptc.extra_format, &evsel->attr);
104 add_script_option("Show individual samples", "", &scriptc);
105 add_script_option("Show individual samples with assembler", "-F +insn --xed",
106 &scriptc);
107 add_script_option("Show individual samples with source", "-F +srcline,+srccode",
108 &scriptc);
109 perf_config(scripts_config, &scriptc);
110 custom_perf = scriptc.index;
111 add_script_option("Show samples with custom perf script arguments", "", &scriptc);
112 i = scriptc.index;
113 max_std = i;
114
115 for (; i < SCRIPT_MAX_NO; i++) {
116 names[i] = buf + (i - max_std) * (SCRIPT_NAMELEN + SCRIPT_FULLPATH_LEN);
60 paths[i] = names[i] + SCRIPT_NAMELEN; 117 paths[i] = names[i] + SCRIPT_NAMELEN;
61 } 118 }
62 119
63 num = find_scripts(names, paths); 120 num = find_scripts(names + max_std, paths + max_std, SCRIPT_MAX_NO - max_std,
64 if (num > 0) { 121 SCRIPT_FULLPATH_LEN);
65 choice = ui__popup_menu(num, names); 122 if (num < 0)
66 if (choice < num && choice >= 0) { 123 num = 0;
67 strcpy(script_name, paths[choice]); 124 choice = ui__popup_menu(num + max_std, (char * const *)names);
68 ret = 0; 125 if (choice < 0) {
69 } 126 ret = -1;
127 goto out;
70 } 128 }
129 if (choice == custom_perf) {
130 char script_args[50];
131 int key = ui_browser__input_window("perf script command",
132 "Enter perf script command line (without perf script prefix)",
133 script_args, "", 0);
134 if (key != K_ENTER)
135 return -1;
136 sprintf(script_name, "%s script %s", perf, script_args);
137 } else if (choice < num + max_std) {
138 strcpy(script_name, paths[choice]);
139 }
140 *custom = choice >= max_std;
71 141
142out:
72 free(buf); 143 free(buf);
144 for (i = 0; i < max_std; i++)
145 free(paths[i]);
73 return ret; 146 return ret;
74} 147}
75 148
76static void script_browser__write(struct ui_browser *browser, 149void run_script(char *cmd)
77 void *entry, int row)
78{ 150{
79 struct script_line *sline = list_entry(entry, struct script_line, node); 151 pr_debug("Running %s\n", cmd);
80 bool current_entry = ui_browser__is_current_entry(browser, row); 152 SLang_reset_tty();
81 153 if (system(cmd) < 0)
82 ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : 154 pr_warning("Cannot run %s\n", cmd);
83 HE_COLORSET_NORMAL); 155 /*
84 156 * SLang doesn't seem to reset the whole terminal, so be more
85 ui_browser__write_nstring(browser, sline->line, browser->width); 157 * forceful to get back to the original state.
158 */
159 printf("\033[c\033[H\033[J");
160 fflush(stdout);
161 SLang_init_tty(0, 0, 0);
162 SLsmg_refresh();
86} 163}
87 164
88static int script_browser__run(struct perf_script_browser *browser) 165int script_browse(const char *script_opt, struct perf_evsel *evsel)
89{ 166{
90 int key; 167 char *cmd, script_name[SCRIPT_FULLPATH_LEN];
168 bool custom = false;
91 169
92 if (ui_browser__show(&browser->b, browser->script_name, 170 memset(script_name, 0, SCRIPT_FULLPATH_LEN);
93 "Press ESC to exit") < 0) 171 if (list_scripts(script_name, &custom, evsel))
94 return -1; 172 return -1;
95 173
96 while (1) { 174 if (asprintf(&cmd, "%s%s %s %s%s 2>&1 | less",
97 key = ui_browser__run(&browser->b, 0); 175 custom ? "perf script -s " : "",
98 176 script_name,
99 /* We can add some special key handling here if needed */ 177 script_opt ? script_opt : "",
100 break; 178 input_name ? "-i " : "",
101 } 179 input_name ? input_name : "") < 0)
102
103 ui_browser__hide(&browser->b);
104 return key;
105}
106
107
108int script_browse(const char *script_opt)
109{
110 char cmd[SCRIPT_FULLPATH_LEN*2], script_name[SCRIPT_FULLPATH_LEN];
111 char *line = NULL;
112 size_t len = 0;
113 ssize_t retlen;
114 int ret = -1, nr_entries = 0;
115 FILE *fp;
116 void *buf;
117 struct script_line *sline;
118
119 struct perf_script_browser script = {
120 .b = {
121 .refresh = ui_browser__list_head_refresh,
122 .seek = ui_browser__list_head_seek,
123 .write = script_browser__write,
124 },
125 .script_name = script_name,
126 };
127
128 INIT_LIST_HEAD(&script.entries);
129
130 /* Save each line of the output in one struct script_line object. */
131 buf = zalloc((sizeof(*sline)) * MAX_LINES);
132 if (!buf)
133 return -1; 180 return -1;
134 sline = buf;
135
136 memset(script_name, 0, SCRIPT_FULLPATH_LEN);
137 if (list_scripts(script_name))
138 goto exit;
139
140 sprintf(cmd, "perf script -s %s ", script_name);
141 181
142 if (script_opt) 182 run_script(cmd);
143 strcat(cmd, script_opt); 183 free(cmd);
144 184
145 if (input_name) { 185 return 0;
146 strcat(cmd, " -i ");
147 strcat(cmd, input_name);
148 }
149
150 strcat(cmd, " 2>&1");
151
152 fp = popen(cmd, "r");
153 if (!fp)
154 goto exit;
155
156 while ((retlen = getline(&line, &len, fp)) != -1) {
157 strncpy(sline->line, line, AVERAGE_LINE_LEN);
158
159 /* If one output line is very large, just cut it short */
160 if (retlen >= AVERAGE_LINE_LEN) {
161 sline->line[AVERAGE_LINE_LEN - 1] = '\0';
162 sline->line[AVERAGE_LINE_LEN - 2] = '\n';
163 }
164 list_add_tail(&sline->node, &script.entries);
165
166 if (script.b.width < retlen)
167 script.b.width = retlen;
168
169 if (nr_entries++ >= MAX_LINES - 1)
170 break;
171 sline++;
172 }
173
174 if (script.b.width > AVERAGE_LINE_LEN)
175 script.b.width = AVERAGE_LINE_LEN;
176
177 free(line);
178 pclose(fp);
179
180 script.nr_lines = nr_entries;
181 script.b.nr_entries = nr_entries;
182 script.b.entries = &script.entries;
183
184 ret = script_browser__run(&script);
185exit:
186 free(buf);
187 return ret;
188} 186}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 5f6dbbf5d749..c8b01176c9e1 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -10,6 +10,10 @@
10#include <errno.h> 10#include <errno.h>
11#include <inttypes.h> 11#include <inttypes.h>
12#include <libgen.h> 12#include <libgen.h>
13#include <bpf/bpf.h>
14#include <bpf/btf.h>
15#include <bpf/libbpf.h>
16#include <linux/btf.h>
13#include "util.h" 17#include "util.h"
14#include "ui/ui.h" 18#include "ui/ui.h"
15#include "sort.h" 19#include "sort.h"
@@ -24,6 +28,7 @@
24#include "annotate.h" 28#include "annotate.h"
25#include "evsel.h" 29#include "evsel.h"
26#include "evlist.h" 30#include "evlist.h"
31#include "bpf-event.h"
27#include "block-range.h" 32#include "block-range.h"
28#include "string2.h" 33#include "string2.h"
29#include "arch/common.h" 34#include "arch/common.h"
@@ -31,6 +36,7 @@
31#include <pthread.h> 36#include <pthread.h>
32#include <linux/bitops.h> 37#include <linux/bitops.h>
33#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <bpf/libbpf.h>
34 40
35/* FIXME: For the HE_COLORSET */ 41/* FIXME: For the HE_COLORSET */
36#include "ui/browser.h" 42#include "ui/browser.h"
@@ -1615,6 +1621,9 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
1615 " --vmlinux vmlinux\n", build_id_msg ?: ""); 1621 " --vmlinux vmlinux\n", build_id_msg ?: "");
1616 } 1622 }
1617 break; 1623 break;
1624 case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
1625 scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
1626 break;
1618 default: 1627 default:
1619 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); 1628 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
1620 break; 1629 break;
@@ -1674,6 +1683,156 @@ fallback:
1674 return 0; 1683 return 0;
1675} 1684}
1676 1685
1686#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
1687#define PACKAGE "perf"
1688#include <bfd.h>
1689#include <dis-asm.h>
1690
1691static int symbol__disassemble_bpf(struct symbol *sym,
1692 struct annotate_args *args)
1693{
1694 struct annotation *notes = symbol__annotation(sym);
1695 struct annotation_options *opts = args->options;
1696 struct bpf_prog_info_linear *info_linear;
1697 struct bpf_prog_linfo *prog_linfo = NULL;
1698 struct bpf_prog_info_node *info_node;
1699 int len = sym->end - sym->start;
1700 disassembler_ftype disassemble;
1701 struct map *map = args->ms.map;
1702 struct disassemble_info info;
1703 struct dso *dso = map->dso;
1704 int pc = 0, count, sub_id;
1705 struct btf *btf = NULL;
1706 char tpath[PATH_MAX];
1707 size_t buf_size;
1708 int nr_skip = 0;
1709 int ret = -1;
1710 char *buf;
1711 bfd *bfdf;
1712 FILE *s;
1713
1714 if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
1715 return -1;
1716
1717 pr_debug("%s: handling sym %s addr %lx len %lx\n", __func__,
1718 sym->name, sym->start, sym->end - sym->start);
1719
1720 memset(tpath, 0, sizeof(tpath));
1721 perf_exe(tpath, sizeof(tpath));
1722
1723 bfdf = bfd_openr(tpath, NULL);
1724 assert(bfdf);
1725 assert(bfd_check_format(bfdf, bfd_object));
1726
1727 s = open_memstream(&buf, &buf_size);
1728 if (!s)
1729 goto out;
1730 init_disassemble_info(&info, s,
1731 (fprintf_ftype) fprintf);
1732
1733 info.arch = bfd_get_arch(bfdf);
1734 info.mach = bfd_get_mach(bfdf);
1735
1736 info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
1737 dso->bpf_prog.id);
1738 if (!info_node)
1739 goto out;
1740 info_linear = info_node->info_linear;
1741 sub_id = dso->bpf_prog.sub_id;
1742
1743 info.buffer = (void *)(info_linear->info.jited_prog_insns);
1744 info.buffer_length = info_linear->info.jited_prog_len;
1745
1746 if (info_linear->info.nr_line_info)
1747 prog_linfo = bpf_prog_linfo__new(&info_linear->info);
1748
1749 if (info_linear->info.btf_id) {
1750 struct btf_node *node;
1751
1752 node = perf_env__find_btf(dso->bpf_prog.env,
1753 info_linear->info.btf_id);
1754 if (node)
1755 btf = btf__new((__u8 *)(node->data),
1756 node->data_size);
1757 }
1758
1759 disassemble_init_for_target(&info);
1760
1761#ifdef DISASM_FOUR_ARGS_SIGNATURE
1762 disassemble = disassembler(info.arch,
1763 bfd_big_endian(bfdf),
1764 info.mach,
1765 bfdf);
1766#else
1767 disassemble = disassembler(bfdf);
1768#endif
1769 assert(disassemble);
1770
1771 fflush(s);
1772 do {
1773 const struct bpf_line_info *linfo = NULL;
1774 struct disasm_line *dl;
1775 size_t prev_buf_size;
1776 const char *srcline;
1777 u64 addr;
1778
1779 addr = pc + ((u64 *)(info_linear->info.jited_ksyms))[sub_id];
1780 count = disassemble(pc, &info);
1781
1782 if (prog_linfo)
1783 linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
1784 addr, sub_id,
1785 nr_skip);
1786
1787 if (linfo && btf) {
1788 srcline = btf__name_by_offset(btf, linfo->line_off);
1789 nr_skip++;
1790 } else
1791 srcline = NULL;
1792
1793 fprintf(s, "\n");
1794 prev_buf_size = buf_size;
1795 fflush(s);
1796
1797 if (!opts->hide_src_code && srcline) {
1798 args->offset = -1;
1799 args->line = strdup(srcline);
1800 args->line_nr = 0;
1801 args->ms.sym = sym;
1802 dl = disasm_line__new(args);
1803 if (dl) {
1804 annotation_line__add(&dl->al,
1805 &notes->src->source);
1806 }
1807 }
1808
1809 args->offset = pc;
1810 args->line = buf + prev_buf_size;
1811 args->line_nr = 0;
1812 args->ms.sym = sym;
1813 dl = disasm_line__new(args);
1814 if (dl)
1815 annotation_line__add(&dl->al, &notes->src->source);
1816
1817 pc += count;
1818 } while (count > 0 && pc < len);
1819
1820 ret = 0;
1821out:
1822 free(prog_linfo);
1823 free(btf);
1824 fclose(s);
1825 bfd_close(bfdf);
1826 return ret;
1827}
1828#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
1829static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
1830 struct annotate_args *args __maybe_unused)
1831{
1832 return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
1833}
1834#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
1835
1677static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) 1836static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1678{ 1837{
1679 struct annotation_options *opts = args->options; 1838 struct annotation_options *opts = args->options;
@@ -1701,7 +1860,9 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1701 pr_debug("annotating [%p] %30s : [%p] %30s\n", 1860 pr_debug("annotating [%p] %30s : [%p] %30s\n",
1702 dso, dso->long_name, sym, sym->name); 1861 dso, dso->long_name, sym, sym->name);
1703 1862
1704 if (dso__is_kcore(dso)) { 1863 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
1864 return symbol__disassemble_bpf(sym, args);
1865 } else if (dso__is_kcore(dso)) {
1705 kce.kcore_filename = symfs_filename; 1866 kce.kcore_filename = symfs_filename;
1706 kce.addr = map__rip_2objdump(map, sym->start); 1867 kce.addr = map__rip_2objdump(map, sym->start);
1707 kce.offs = sym->start; 1868 kce.offs = sym->start;
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index df34fe483164..5bc0cf655d37 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -369,6 +369,7 @@ enum symbol_disassemble_errno {
369 __SYMBOL_ANNOTATE_ERRNO__START = -10000, 369 __SYMBOL_ANNOTATE_ERRNO__START = -10000,
370 370
371 SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START, 371 SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START,
372 SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
372 373
373 __SYMBOL_ANNOTATE_ERRNO__END, 374 __SYMBOL_ANNOTATE_ERRNO__END,
374}; 375};
diff --git a/tools/perf/util/archinsn.h b/tools/perf/util/archinsn.h
new file mode 100644
index 000000000000..448cbb6b8d7e
--- /dev/null
+++ b/tools/perf/util/archinsn.h
@@ -0,0 +1,12 @@
1#ifndef INSN_H
2#define INSN_H 1
3
4struct perf_sample;
5struct machine;
6struct thread;
7
8void arch_fetch_insn(struct perf_sample *sample,
9 struct thread *thread,
10 struct machine *machine);
11
12#endif
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 028c8ec1f62a..2a4a0da35632 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -3,11 +3,17 @@
3#include <stdlib.h> 3#include <stdlib.h>
4#include <bpf/bpf.h> 4#include <bpf/bpf.h>
5#include <bpf/btf.h> 5#include <bpf/btf.h>
6#include <bpf/libbpf.h>
6#include <linux/btf.h> 7#include <linux/btf.h>
8#include <linux/err.h>
7#include "bpf-event.h" 9#include "bpf-event.h"
8#include "debug.h" 10#include "debug.h"
9#include "symbol.h" 11#include "symbol.h"
10#include "machine.h" 12#include "machine.h"
13#include "env.h"
14#include "session.h"
15#include "map.h"
16#include "evlist.h"
11 17
12#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) 18#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
13 19
@@ -21,15 +27,122 @@ static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
21 return ret; 27 return ret;
22} 28}
23 29
30static int machine__process_bpf_event_load(struct machine *machine,
31 union perf_event *event,
32 struct perf_sample *sample __maybe_unused)
33{
34 struct bpf_prog_info_linear *info_linear;
35 struct bpf_prog_info_node *info_node;
36 struct perf_env *env = machine->env;
37 int id = event->bpf_event.id;
38 unsigned int i;
39
40 /* perf-record, no need to handle bpf-event */
41 if (env == NULL)
42 return 0;
43
44 info_node = perf_env__find_bpf_prog_info(env, id);
45 if (!info_node)
46 return 0;
47 info_linear = info_node->info_linear;
48
49 for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
50 u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
51 u64 addr = addrs[i];
52 struct map *map;
53
54 map = map_groups__find(&machine->kmaps, addr);
55
56 if (map) {
57 map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
58 map->dso->bpf_prog.id = id;
59 map->dso->bpf_prog.sub_id = i;
60 map->dso->bpf_prog.env = env;
61 }
62 }
63 return 0;
64}
65
24int machine__process_bpf_event(struct machine *machine __maybe_unused, 66int machine__process_bpf_event(struct machine *machine __maybe_unused,
25 union perf_event *event, 67 union perf_event *event,
26 struct perf_sample *sample __maybe_unused) 68 struct perf_sample *sample __maybe_unused)
27{ 69{
28 if (dump_trace) 70 if (dump_trace)
29 perf_event__fprintf_bpf_event(event, stdout); 71 perf_event__fprintf_bpf_event(event, stdout);
72
73 switch (event->bpf_event.type) {
74 case PERF_BPF_EVENT_PROG_LOAD:
75 return machine__process_bpf_event_load(machine, event, sample);
76
77 case PERF_BPF_EVENT_PROG_UNLOAD:
78 /*
79 * Do not free bpf_prog_info and btf of the program here,
80 * as annotation still need them. They will be freed at
81 * the end of the session.
82 */
83 break;
84 default:
85 pr_debug("unexpected bpf_event type of %d\n",
86 event->bpf_event.type);
87 break;
88 }
30 return 0; 89 return 0;
31} 90}
32 91
92static int perf_env__fetch_btf(struct perf_env *env,
93 u32 btf_id,
94 struct btf *btf)
95{
96 struct btf_node *node;
97 u32 data_size;
98 const void *data;
99
100 data = btf__get_raw_data(btf, &data_size);
101
102 node = malloc(data_size + sizeof(struct btf_node));
103 if (!node)
104 return -1;
105
106 node->id = btf_id;
107 node->data_size = data_size;
108 memcpy(node->data, data, data_size);
109
110 perf_env__insert_btf(env, node);
111 return 0;
112}
113
114static int synthesize_bpf_prog_name(char *buf, int size,
115 struct bpf_prog_info *info,
116 struct btf *btf,
117 u32 sub_id)
118{
119 u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
120 void *func_infos = (void *)(uintptr_t)(info->func_info);
121 u32 sub_prog_cnt = info->nr_jited_ksyms;
122 const struct bpf_func_info *finfo;
123 const char *short_name = NULL;
124 const struct btf_type *t;
125 int name_len;
126
127 name_len = snprintf(buf, size, "bpf_prog_");
128 name_len += snprintf_hex(buf + name_len, size - name_len,
129 prog_tags[sub_id], BPF_TAG_SIZE);
130 if (btf) {
131 finfo = func_infos + sub_id * info->func_info_rec_size;
132 t = btf__type_by_id(btf, finfo->type_id);
133 short_name = btf__name_by_offset(btf, t->name_off);
134 } else if (sub_id == 0 && sub_prog_cnt == 1) {
135 /* no subprog */
136 if (info->name[0])
137 short_name = info->name;
138 } else
139 short_name = "F";
140 if (short_name)
141 name_len += snprintf(buf + name_len, size - name_len,
142 "_%s", short_name);
143 return name_len;
144}
145
33/* 146/*
34 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf 147 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
35 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And 148 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
@@ -40,7 +153,7 @@ int machine__process_bpf_event(struct machine *machine __maybe_unused,
40 * -1 for failures; 153 * -1 for failures;
41 * -2 for lack of kernel support. 154 * -2 for lack of kernel support.
42 */ 155 */
43static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool, 156static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
44 perf_event__handler_t process, 157 perf_event__handler_t process,
45 struct machine *machine, 158 struct machine *machine,
46 int fd, 159 int fd,
@@ -49,102 +162,71 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
49{ 162{
50 struct ksymbol_event *ksymbol_event = &event->ksymbol_event; 163 struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
51 struct bpf_event *bpf_event = &event->bpf_event; 164 struct bpf_event *bpf_event = &event->bpf_event;
52 u32 sub_prog_cnt, i, func_info_rec_size = 0; 165 struct bpf_prog_info_linear *info_linear;
53 u8 (*prog_tags)[BPF_TAG_SIZE] = NULL; 166 struct perf_tool *tool = session->tool;
54 struct bpf_prog_info info = { .type = 0, }; 167 struct bpf_prog_info_node *info_node;
55 u32 info_len = sizeof(info); 168 struct bpf_prog_info *info;
56 void *func_infos = NULL;
57 u64 *prog_addrs = NULL;
58 struct btf *btf = NULL; 169 struct btf *btf = NULL;
59 u32 *prog_lens = NULL; 170 struct perf_env *env;
60 bool has_btf = false; 171 u32 sub_prog_cnt, i;
61 char errbuf[512];
62 int err = 0; 172 int err = 0;
173 u64 arrays;
174
175 /*
176 * for perf-record and perf-report use header.env;
177 * otherwise, use global perf_env.
178 */
179 env = session->data ? &session->header.env : &perf_env;
63 180
64 /* Call bpf_obj_get_info_by_fd() to get sizes of arrays */ 181 arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
65 err = bpf_obj_get_info_by_fd(fd, &info, &info_len); 182 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
183 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
184 arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
185 arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
186 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
187 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
66 188
67 if (err) { 189 info_linear = bpf_program__get_prog_info_linear(fd, arrays);
68 pr_debug("%s: failed to get BPF program info: %s, aborting\n", 190 if (IS_ERR_OR_NULL(info_linear)) {
69 __func__, str_error_r(errno, errbuf, sizeof(errbuf))); 191 info_linear = NULL;
192 pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
70 return -1; 193 return -1;
71 } 194 }
72 if (info_len < offsetof(struct bpf_prog_info, prog_tags)) { 195
196 if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
73 pr_debug("%s: the kernel is too old, aborting\n", __func__); 197 pr_debug("%s: the kernel is too old, aborting\n", __func__);
74 return -2; 198 return -2;
75 } 199 }
76 200
201 info = &info_linear->info;
202
77 /* number of ksyms, func_lengths, and tags should match */ 203 /* number of ksyms, func_lengths, and tags should match */
78 sub_prog_cnt = info.nr_jited_ksyms; 204 sub_prog_cnt = info->nr_jited_ksyms;
79 if (sub_prog_cnt != info.nr_prog_tags || 205 if (sub_prog_cnt != info->nr_prog_tags ||
80 sub_prog_cnt != info.nr_jited_func_lens) 206 sub_prog_cnt != info->nr_jited_func_lens)
81 return -1; 207 return -1;
82 208
83 /* check BTF func info support */ 209 /* check BTF func info support */
84 if (info.btf_id && info.nr_func_info && info.func_info_rec_size) { 210 if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
85 /* btf func info number should be same as sub_prog_cnt */ 211 /* btf func info number should be same as sub_prog_cnt */
86 if (sub_prog_cnt != info.nr_func_info) { 212 if (sub_prog_cnt != info->nr_func_info) {
87 pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__); 213 pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
88 return -1; 214 err = -1;
89 } 215 goto out;
90 if (btf__get_from_id(info.btf_id, &btf)) {
91 pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info.btf_id);
92 return -1;
93 } 216 }
94 func_info_rec_size = info.func_info_rec_size; 217 if (btf__get_from_id(info->btf_id, &btf)) {
95 func_infos = calloc(sub_prog_cnt, func_info_rec_size); 218 pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
96 if (!func_infos) { 219 err = -1;
97 pr_debug("%s: failed to allocate memory for func_infos, aborting\n", __func__); 220 btf = NULL;
98 return -1; 221 goto out;
99 } 222 }
100 has_btf = true; 223 perf_env__fetch_btf(env, info->btf_id, btf);
101 }
102
103 /*
104 * We need address, length, and tag for each sub program.
105 * Allocate memory and call bpf_obj_get_info_by_fd() again
106 */
107 prog_addrs = calloc(sub_prog_cnt, sizeof(u64));
108 if (!prog_addrs) {
109 pr_debug("%s: failed to allocate memory for prog_addrs, aborting\n", __func__);
110 goto out;
111 }
112 prog_lens = calloc(sub_prog_cnt, sizeof(u32));
113 if (!prog_lens) {
114 pr_debug("%s: failed to allocate memory for prog_lens, aborting\n", __func__);
115 goto out;
116 }
117 prog_tags = calloc(sub_prog_cnt, BPF_TAG_SIZE);
118 if (!prog_tags) {
119 pr_debug("%s: failed to allocate memory for prog_tags, aborting\n", __func__);
120 goto out;
121 }
122
123 memset(&info, 0, sizeof(info));
124 info.nr_jited_ksyms = sub_prog_cnt;
125 info.nr_jited_func_lens = sub_prog_cnt;
126 info.nr_prog_tags = sub_prog_cnt;
127 info.jited_ksyms = ptr_to_u64(prog_addrs);
128 info.jited_func_lens = ptr_to_u64(prog_lens);
129 info.prog_tags = ptr_to_u64(prog_tags);
130 info_len = sizeof(info);
131 if (has_btf) {
132 info.nr_func_info = sub_prog_cnt;
133 info.func_info_rec_size = func_info_rec_size;
134 info.func_info = ptr_to_u64(func_infos);
135 }
136
137 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
138 if (err) {
139 pr_debug("%s: failed to get BPF program info, aborting\n", __func__);
140 goto out;
141 } 224 }
142 225
143 /* Synthesize PERF_RECORD_KSYMBOL */ 226 /* Synthesize PERF_RECORD_KSYMBOL */
144 for (i = 0; i < sub_prog_cnt; i++) { 227 for (i = 0; i < sub_prog_cnt; i++) {
145 const struct bpf_func_info *finfo; 228 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
146 const char *short_name = NULL; 229 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
147 const struct btf_type *t;
148 int name_len; 230 int name_len;
149 231
150 *ksymbol_event = (struct ksymbol_event){ 232 *ksymbol_event = (struct ksymbol_event){
@@ -157,26 +239,9 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
157 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF, 239 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
158 .flags = 0, 240 .flags = 0,
159 }; 241 };
160 name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
161 "bpf_prog_");
162 name_len += snprintf_hex(ksymbol_event->name + name_len,
163 KSYM_NAME_LEN - name_len,
164 prog_tags[i], BPF_TAG_SIZE);
165 if (has_btf) {
166 finfo = func_infos + i * info.func_info_rec_size;
167 t = btf__type_by_id(btf, finfo->type_id);
168 short_name = btf__name_by_offset(btf, t->name_off);
169 } else if (i == 0 && sub_prog_cnt == 1) {
170 /* no subprog */
171 if (info.name[0])
172 short_name = info.name;
173 } else
174 short_name = "F";
175 if (short_name)
176 name_len += snprintf(ksymbol_event->name + name_len,
177 KSYM_NAME_LEN - name_len,
178 "_%s", short_name);
179 242
243 name_len = synthesize_bpf_prog_name(ksymbol_event->name,
244 KSYM_NAME_LEN, info, btf, i);
180 ksymbol_event->header.size += PERF_ALIGN(name_len + 1, 245 ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
181 sizeof(u64)); 246 sizeof(u64));
182 247
@@ -186,8 +251,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
186 machine, process); 251 machine, process);
187 } 252 }
188 253
189 /* Synthesize PERF_RECORD_BPF_EVENT */ 254 if (!opts->no_bpf_event) {
190 if (opts->bpf_event) { 255 /* Synthesize PERF_RECORD_BPF_EVENT */
191 *bpf_event = (struct bpf_event){ 256 *bpf_event = (struct bpf_event){
192 .header = { 257 .header = {
193 .type = PERF_RECORD_BPF_EVENT, 258 .type = PERF_RECORD_BPF_EVENT,
@@ -195,25 +260,38 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_tool *tool,
195 }, 260 },
196 .type = PERF_BPF_EVENT_PROG_LOAD, 261 .type = PERF_BPF_EVENT_PROG_LOAD,
197 .flags = 0, 262 .flags = 0,
198 .id = info.id, 263 .id = info->id,
199 }; 264 };
200 memcpy(bpf_event->tag, prog_tags[i], BPF_TAG_SIZE); 265 memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
201 memset((void *)event + event->header.size, 0, machine->id_hdr_size); 266 memset((void *)event + event->header.size, 0, machine->id_hdr_size);
202 event->header.size += machine->id_hdr_size; 267 event->header.size += machine->id_hdr_size;
268
269 /* save bpf_prog_info to env */
270 info_node = malloc(sizeof(struct bpf_prog_info_node));
271 if (!info_node) {
272 err = -1;
273 goto out;
274 }
275
276 info_node->info_linear = info_linear;
277 perf_env__insert_bpf_prog_info(env, info_node);
278 info_linear = NULL;
279
280 /*
281 * process after saving bpf_prog_info to env, so that
282 * required information is ready for look up
283 */
203 err = perf_tool__process_synth_event(tool, event, 284 err = perf_tool__process_synth_event(tool, event,
204 machine, process); 285 machine, process);
205 } 286 }
206 287
207out: 288out:
208 free(prog_tags); 289 free(info_linear);
209 free(prog_lens);
210 free(prog_addrs);
211 free(func_infos);
212 free(btf); 290 free(btf);
213 return err ? -1 : 0; 291 return err ? -1 : 0;
214} 292}
215 293
216int perf_event__synthesize_bpf_events(struct perf_tool *tool, 294int perf_event__synthesize_bpf_events(struct perf_session *session,
217 perf_event__handler_t process, 295 perf_event__handler_t process,
218 struct machine *machine, 296 struct machine *machine,
219 struct record_opts *opts) 297 struct record_opts *opts)
@@ -247,7 +325,7 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
247 continue; 325 continue;
248 } 326 }
249 327
250 err = perf_event__synthesize_one_bpf_prog(tool, process, 328 err = perf_event__synthesize_one_bpf_prog(session, process,
251 machine, fd, 329 machine, fd,
252 event, opts); 330 event, opts);
253 close(fd); 331 close(fd);
@@ -261,3 +339,142 @@ int perf_event__synthesize_bpf_events(struct perf_tool *tool,
261 free(event); 339 free(event);
262 return err; 340 return err;
263} 341}
342
343static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
344{
345 struct bpf_prog_info_linear *info_linear;
346 struct bpf_prog_info_node *info_node;
347 struct btf *btf = NULL;
348 u64 arrays;
349 u32 btf_id;
350 int fd;
351
352 fd = bpf_prog_get_fd_by_id(id);
353 if (fd < 0)
354 return;
355
356 arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
357 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
358 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
359 arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
360 arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
361 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
362 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
363
364 info_linear = bpf_program__get_prog_info_linear(fd, arrays);
365 if (IS_ERR_OR_NULL(info_linear)) {
366 pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
367 goto out;
368 }
369
370 btf_id = info_linear->info.btf_id;
371
372 info_node = malloc(sizeof(struct bpf_prog_info_node));
373 if (info_node) {
374 info_node->info_linear = info_linear;
375 perf_env__insert_bpf_prog_info(env, info_node);
376 } else
377 free(info_linear);
378
379 if (btf_id == 0)
380 goto out;
381
382 if (btf__get_from_id(btf_id, &btf)) {
383 pr_debug("%s: failed to get BTF of id %u, aborting\n",
384 __func__, btf_id);
385 goto out;
386 }
387 perf_env__fetch_btf(env, btf_id, btf);
388
389out:
390 free(btf);
391 close(fd);
392}
393
394static int bpf_event__sb_cb(union perf_event *event, void *data)
395{
396 struct perf_env *env = data;
397
398 if (event->header.type != PERF_RECORD_BPF_EVENT)
399 return -1;
400
401 switch (event->bpf_event.type) {
402 case PERF_BPF_EVENT_PROG_LOAD:
403 perf_env__add_bpf_info(env, event->bpf_event.id);
404
405 case PERF_BPF_EVENT_PROG_UNLOAD:
406 /*
407 * Do not free bpf_prog_info and btf of the program here,
408 * as annotation still need them. They will be freed at
409 * the end of the session.
410 */
411 break;
412 default:
413 pr_debug("unexpected bpf_event type of %d\n",
414 event->bpf_event.type);
415 break;
416 }
417
418 return 0;
419}
420
421int bpf_event__add_sb_event(struct perf_evlist **evlist,
422 struct perf_env *env)
423{
424 struct perf_event_attr attr = {
425 .type = PERF_TYPE_SOFTWARE,
426 .config = PERF_COUNT_SW_DUMMY,
427 .sample_id_all = 1,
428 .watermark = 1,
429 .bpf_event = 1,
430 .size = sizeof(attr), /* to capture ABI version */
431 };
432
433 /*
434 * Older gcc versions don't support designated initializers, like above,
435 * for unnamed union members, such as the following:
436 */
437 attr.wakeup_watermark = 1;
438
439 return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
440}
441
442void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
443 struct perf_env *env,
444 FILE *fp)
445{
446 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
447 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
448 char name[KSYM_NAME_LEN];
449 struct btf *btf = NULL;
450 u32 sub_prog_cnt, i;
451
452 sub_prog_cnt = info->nr_jited_ksyms;
453 if (sub_prog_cnt != info->nr_prog_tags ||
454 sub_prog_cnt != info->nr_jited_func_lens)
455 return;
456
457 if (info->btf_id) {
458 struct btf_node *node;
459
460 node = perf_env__find_btf(env, info->btf_id);
461 if (node)
462 btf = btf__new((__u8 *)(node->data),
463 node->data_size);
464 }
465
466 if (sub_prog_cnt == 1) {
467 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
468 fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
469 info->id, name, prog_addrs[0], prog_lens[0]);
470 return;
471 }
472
473 fprintf(fp, "# bpf_prog_info %u:\n", info->id);
474 for (i = 0; i < sub_prog_cnt; i++) {
475 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
476
477 fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
478 i, name, prog_addrs[i], prog_lens[i]);
479 }
480}
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index 7890067e1a37..04c33b3bfe28 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -3,22 +3,45 @@
3#define __PERF_BPF_EVENT_H 3#define __PERF_BPF_EVENT_H
4 4
5#include <linux/compiler.h> 5#include <linux/compiler.h>
6#include <linux/rbtree.h>
7#include <pthread.h>
8#include <api/fd/array.h>
6#include "event.h" 9#include "event.h"
10#include <stdio.h>
7 11
8struct machine; 12struct machine;
9union perf_event; 13union perf_event;
14struct perf_env;
10struct perf_sample; 15struct perf_sample;
11struct perf_tool;
12struct record_opts; 16struct record_opts;
17struct evlist;
18struct target;
19
20struct bpf_prog_info_node {
21 struct bpf_prog_info_linear *info_linear;
22 struct rb_node rb_node;
23};
24
25struct btf_node {
26 struct rb_node rb_node;
27 u32 id;
28 u32 data_size;
29 char data[];
30};
13 31
14#ifdef HAVE_LIBBPF_SUPPORT 32#ifdef HAVE_LIBBPF_SUPPORT
15int machine__process_bpf_event(struct machine *machine, union perf_event *event, 33int machine__process_bpf_event(struct machine *machine, union perf_event *event,
16 struct perf_sample *sample); 34 struct perf_sample *sample);
17 35
18int perf_event__synthesize_bpf_events(struct perf_tool *tool, 36int perf_event__synthesize_bpf_events(struct perf_session *session,
19 perf_event__handler_t process, 37 perf_event__handler_t process,
20 struct machine *machine, 38 struct machine *machine,
21 struct record_opts *opts); 39 struct record_opts *opts);
40int bpf_event__add_sb_event(struct perf_evlist **evlist,
41 struct perf_env *env);
42void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
43 struct perf_env *env,
44 FILE *fp);
22#else 45#else
23static inline int machine__process_bpf_event(struct machine *machine __maybe_unused, 46static inline int machine__process_bpf_event(struct machine *machine __maybe_unused,
24 union perf_event *event __maybe_unused, 47 union perf_event *event __maybe_unused,
@@ -27,12 +50,25 @@ static inline int machine__process_bpf_event(struct machine *machine __maybe_unu
27 return 0; 50 return 0;
28} 51}
29 52
30static inline int perf_event__synthesize_bpf_events(struct perf_tool *tool __maybe_unused, 53static inline int perf_event__synthesize_bpf_events(struct perf_session *session __maybe_unused,
31 perf_event__handler_t process __maybe_unused, 54 perf_event__handler_t process __maybe_unused,
32 struct machine *machine __maybe_unused, 55 struct machine *machine __maybe_unused,
33 struct record_opts *opts __maybe_unused) 56 struct record_opts *opts __maybe_unused)
34{ 57{
35 return 0; 58 return 0;
36} 59}
60
61static inline int bpf_event__add_sb_event(struct perf_evlist **evlist __maybe_unused,
62 struct perf_env *env __maybe_unused)
63{
64 return 0;
65}
66
67static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused,
68 struct perf_env *env __maybe_unused,
69 FILE *fp __maybe_unused)
70{
71
72}
37#endif // HAVE_LIBBPF_SUPPORT 73#endif // HAVE_LIBBPF_SUPPORT
38#endif 74#endif
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index bff0d17920ed..0c5517a8d0b7 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -185,6 +185,7 @@ char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
185 return bf; 185 return bf;
186} 186}
187 187
188/* The caller is responsible to free the returned buffer. */
188char *build_id_cache__origname(const char *sbuild_id) 189char *build_id_cache__origname(const char *sbuild_id)
189{ 190{
190 char *linkname; 191 char *linkname;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index fa092511c52b..7e3c1b60120c 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -633,11 +633,10 @@ static int collect_config(const char *var, const char *value,
633 } 633 }
634 634
635 ret = set_value(item, value); 635 ret = set_value(item, value);
636 return ret;
637 636
638out_free: 637out_free:
639 free(key); 638 free(key);
640 return -1; 639 return ret;
641} 640}
642 641
643int perf_config_set__collect(struct perf_config_set *set, const char *file_name, 642int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index ba4c623cd8de..39fe21e1cf93 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -387,6 +387,7 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
387 break; 387 break;
388 case OCSD_INSTR_ISB: 388 case OCSD_INSTR_ISB:
389 case OCSD_INSTR_DSB_DMB: 389 case OCSD_INSTR_DSB_DMB:
390 case OCSD_INSTR_WFI_WFE:
390 case OCSD_INSTR_OTHER: 391 case OCSD_INSTR_OTHER:
391 default: 392 default:
392 packet->last_instr_taken_branch = false; 393 packet->last_instr_taken_branch = false;
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index e098e189f93e..6a64f713710d 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -14,6 +14,7 @@
14#include "data.h" 14#include "data.h"
15#include "util.h" 15#include "util.h"
16#include "debug.h" 16#include "debug.h"
17#include "header.h"
17 18
18static void close_dir(struct perf_data_file *files, int nr) 19static void close_dir(struct perf_data_file *files, int nr)
19{ 20{
@@ -34,12 +35,16 @@ int perf_data__create_dir(struct perf_data *data, int nr)
34 struct perf_data_file *files = NULL; 35 struct perf_data_file *files = NULL;
35 int i, ret = -1; 36 int i, ret = -1;
36 37
38 if (WARN_ON(!data->is_dir))
39 return -EINVAL;
40
37 files = zalloc(nr * sizeof(*files)); 41 files = zalloc(nr * sizeof(*files));
38 if (!files) 42 if (!files)
39 return -ENOMEM; 43 return -ENOMEM;
40 44
41 data->dir.files = files; 45 data->dir.version = PERF_DIR_VERSION;
42 data->dir.nr = nr; 46 data->dir.files = files;
47 data->dir.nr = nr;
43 48
44 for (i = 0; i < nr; i++) { 49 for (i = 0; i < nr; i++) {
45 struct perf_data_file *file = &files[i]; 50 struct perf_data_file *file = &files[i];
@@ -69,6 +74,13 @@ int perf_data__open_dir(struct perf_data *data)
69 DIR *dir; 74 DIR *dir;
70 int nr = 0; 75 int nr = 0;
71 76
77 if (WARN_ON(!data->is_dir))
78 return -EINVAL;
79
80 /* The version is provided by DIR_FORMAT feature. */
81 if (WARN_ON(data->dir.version != PERF_DIR_VERSION))
82 return -1;
83
72 dir = opendir(data->path); 84 dir = opendir(data->path);
73 if (!dir) 85 if (!dir)
74 return -EINVAL; 86 return -EINVAL;
@@ -118,6 +130,26 @@ out_err:
118 return ret; 130 return ret;
119} 131}
120 132
133int perf_data__update_dir(struct perf_data *data)
134{
135 int i;
136
137 if (WARN_ON(!data->is_dir))
138 return -EINVAL;
139
140 for (i = 0; i < data->dir.nr; i++) {
141 struct perf_data_file *file = &data->dir.files[i];
142 struct stat st;
143
144 if (fstat(file->fd, &st))
145 return -1;
146
147 file->size = st.st_size;
148 }
149
150 return 0;
151}
152
121static bool check_pipe(struct perf_data *data) 153static bool check_pipe(struct perf_data *data)
122{ 154{
123 struct stat st; 155 struct stat st;
@@ -173,6 +205,16 @@ static int check_backup(struct perf_data *data)
173 return 0; 205 return 0;
174} 206}
175 207
208static bool is_dir(struct perf_data *data)
209{
210 struct stat st;
211
212 if (stat(data->path, &st))
213 return false;
214
215 return (st.st_mode & S_IFMT) == S_IFDIR;
216}
217
176static int open_file_read(struct perf_data *data) 218static int open_file_read(struct perf_data *data)
177{ 219{
178 struct stat st; 220 struct stat st;
@@ -254,6 +296,30 @@ static int open_file_dup(struct perf_data *data)
254 return open_file(data); 296 return open_file(data);
255} 297}
256 298
299static int open_dir(struct perf_data *data)
300{
301 int ret;
302
303 /*
304 * So far we open only the header, so we can read the data version and
305 * layout.
306 */
307 if (asprintf(&data->file.path, "%s/header", data->path) < 0)
308 return -1;
309
310 if (perf_data__is_write(data) &&
311 mkdir(data->path, S_IRWXU) < 0)
312 return -1;
313
314 ret = open_file(data);
315
316 /* Cleanup whatever we managed to create so far. */
317 if (ret && perf_data__is_write(data))
318 rm_rf_perf_data(data->path);
319
320 return ret;
321}
322
257int perf_data__open(struct perf_data *data) 323int perf_data__open(struct perf_data *data)
258{ 324{
259 if (check_pipe(data)) 325 if (check_pipe(data))
@@ -265,11 +331,18 @@ int perf_data__open(struct perf_data *data)
265 if (check_backup(data)) 331 if (check_backup(data))
266 return -1; 332 return -1;
267 333
268 return open_file_dup(data); 334 if (perf_data__is_read(data))
335 data->is_dir = is_dir(data);
336
337 return perf_data__is_dir(data) ?
338 open_dir(data) : open_file_dup(data);
269} 339}
270 340
271void perf_data__close(struct perf_data *data) 341void perf_data__close(struct perf_data *data)
272{ 342{
343 if (perf_data__is_dir(data))
344 perf_data__close_dir(data);
345
273 zfree(&data->file.path); 346 zfree(&data->file.path);
274 close(data->file.fd); 347 close(data->file.fd);
275} 348}
@@ -288,9 +361,9 @@ ssize_t perf_data__write(struct perf_data *data,
288 361
289int perf_data__switch(struct perf_data *data, 362int perf_data__switch(struct perf_data *data,
290 const char *postfix, 363 const char *postfix,
291 size_t pos, bool at_exit) 364 size_t pos, bool at_exit,
365 char **new_filepath)
292{ 366{
293 char *new_filepath;
294 int ret; 367 int ret;
295 368
296 if (check_pipe(data)) 369 if (check_pipe(data))
@@ -298,15 +371,15 @@ int perf_data__switch(struct perf_data *data,
298 if (perf_data__is_read(data)) 371 if (perf_data__is_read(data))
299 return -EINVAL; 372 return -EINVAL;
300 373
301 if (asprintf(&new_filepath, "%s.%s", data->path, postfix) < 0) 374 if (asprintf(new_filepath, "%s.%s", data->path, postfix) < 0)
302 return -ENOMEM; 375 return -ENOMEM;
303 376
304 /* 377 /*
305 * Only fire a warning, don't return error, continue fill 378 * Only fire a warning, don't return error, continue fill
306 * original file. 379 * original file.
307 */ 380 */
308 if (rename(data->path, new_filepath)) 381 if (rename(data->path, *new_filepath))
309 pr_warning("Failed to rename %s to %s\n", data->path, new_filepath); 382 pr_warning("Failed to rename %s to %s\n", data->path, *new_filepath);
310 383
311 if (!at_exit) { 384 if (!at_exit) {
312 close(data->file.fd); 385 close(data->file.fd);
@@ -323,6 +396,22 @@ int perf_data__switch(struct perf_data *data,
323 } 396 }
324 ret = data->file.fd; 397 ret = data->file.fd;
325out: 398out:
326 free(new_filepath);
327 return ret; 399 return ret;
328} 400}
401
402unsigned long perf_data__size(struct perf_data *data)
403{
404 u64 size = data->file.size;
405 int i;
406
407 if (!data->is_dir)
408 return size;
409
410 for (i = 0; i < data->dir.nr; i++) {
411 struct perf_data_file *file = &data->dir.files[i];
412
413 size += file->size;
414 }
415
416 return size;
417}
diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h
index 14b47be2bd69..259868a39019 100644
--- a/tools/perf/util/data.h
+++ b/tools/perf/util/data.h
@@ -19,10 +19,12 @@ struct perf_data {
19 const char *path; 19 const char *path;
20 struct perf_data_file file; 20 struct perf_data_file file;
21 bool is_pipe; 21 bool is_pipe;
22 bool is_dir;
22 bool force; 23 bool force;
23 enum perf_data_mode mode; 24 enum perf_data_mode mode;
24 25
25 struct { 26 struct {
27 u64 version;
26 struct perf_data_file *files; 28 struct perf_data_file *files;
27 int nr; 29 int nr;
28 } dir; 30 } dir;
@@ -43,14 +45,14 @@ static inline int perf_data__is_pipe(struct perf_data *data)
43 return data->is_pipe; 45 return data->is_pipe;
44} 46}
45 47
46static inline int perf_data__fd(struct perf_data *data) 48static inline bool perf_data__is_dir(struct perf_data *data)
47{ 49{
48 return data->file.fd; 50 return data->is_dir;
49} 51}
50 52
51static inline unsigned long perf_data__size(struct perf_data *data) 53static inline int perf_data__fd(struct perf_data *data)
52{ 54{
53 return data->file.size; 55 return data->file.fd;
54} 56}
55 57
56int perf_data__open(struct perf_data *data); 58int perf_data__open(struct perf_data *data);
@@ -68,9 +70,11 @@ ssize_t perf_data_file__write(struct perf_data_file *file,
68 */ 70 */
69int perf_data__switch(struct perf_data *data, 71int perf_data__switch(struct perf_data *data,
70 const char *postfix, 72 const char *postfix,
71 size_t pos, bool at_exit); 73 size_t pos, bool at_exit, char **new_filepath);
72 74
73int perf_data__create_dir(struct perf_data *data, int nr); 75int perf_data__create_dir(struct perf_data *data, int nr);
74int perf_data__open_dir(struct perf_data *data); 76int perf_data__open_dir(struct perf_data *data);
75void perf_data__close_dir(struct perf_data *data); 77void perf_data__close_dir(struct perf_data *data);
78int perf_data__update_dir(struct perf_data *data);
79unsigned long perf_data__size(struct perf_data *data);
76#endif /* __PERF_DATA_H */ 80#endif /* __PERF_DATA_H */
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index ba58ba603b69..e059976d9d93 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -184,6 +184,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
184 case DSO_BINARY_TYPE__KALLSYMS: 184 case DSO_BINARY_TYPE__KALLSYMS:
185 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 185 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
186 case DSO_BINARY_TYPE__JAVA_JIT: 186 case DSO_BINARY_TYPE__JAVA_JIT:
187 case DSO_BINARY_TYPE__BPF_PROG_INFO:
187 case DSO_BINARY_TYPE__NOT_FOUND: 188 case DSO_BINARY_TYPE__NOT_FOUND:
188 ret = -1; 189 ret = -1;
189 break; 190 break;
@@ -1141,28 +1142,34 @@ void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1141 1142
1142static void dso__set_basename(struct dso *dso) 1143static void dso__set_basename(struct dso *dso)
1143{ 1144{
1144 /* 1145 char *base, *lname;
1145 * basename() may modify path buffer, so we must pass 1146 int tid;
1146 * a copy.
1147 */
1148 char *base, *lname = strdup(dso->long_name);
1149 1147
1150 if (!lname) 1148 if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
1151 return; 1149 if (asprintf(&base, "[JIT] tid %d", tid) < 0)
1152 1150 return;
1153 /* 1151 } else {
1154 * basename() may return a pointer to internal 1152 /*
1155 * storage which is reused in subsequent calls 1153 * basename() may modify path buffer, so we must pass
1156 * so copy the result. 1154 * a copy.
1157 */ 1155 */
1158 base = strdup(basename(lname)); 1156 lname = strdup(dso->long_name);
1157 if (!lname)
1158 return;
1159 1159
1160 free(lname); 1160 /*
1161 * basename() may return a pointer to internal
1162 * storage which is reused in subsequent calls
1163 * so copy the result.
1164 */
1165 base = strdup(basename(lname));
1161 1166
1162 if (!base) 1167 free(lname);
1163 return;
1164 1168
1165 dso__set_short_name(dso, base, true); 1169 if (!base)
1170 return;
1171 }
1172 dso__set_short_name(dso, base, true);
1166} 1173}
1167 1174
1168int dso__name_len(const struct dso *dso) 1175int dso__name_len(const struct dso *dso)
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index bb417c54c25a..6e3f63781e51 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -14,6 +14,7 @@
14 14
15struct machine; 15struct machine;
16struct map; 16struct map;
17struct perf_env;
17 18
18enum dso_binary_type { 19enum dso_binary_type {
19 DSO_BINARY_TYPE__KALLSYMS = 0, 20 DSO_BINARY_TYPE__KALLSYMS = 0,
@@ -35,6 +36,7 @@ enum dso_binary_type {
35 DSO_BINARY_TYPE__KCORE, 36 DSO_BINARY_TYPE__KCORE,
36 DSO_BINARY_TYPE__GUEST_KCORE, 37 DSO_BINARY_TYPE__GUEST_KCORE,
37 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 38 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
39 DSO_BINARY_TYPE__BPF_PROG_INFO,
38 DSO_BINARY_TYPE__NOT_FOUND, 40 DSO_BINARY_TYPE__NOT_FOUND,
39}; 41};
40 42
@@ -189,6 +191,12 @@ struct dso {
189 u64 debug_frame_offset; 191 u64 debug_frame_offset;
190 u64 eh_frame_hdr_offset; 192 u64 eh_frame_hdr_offset;
191 } data; 193 } data;
194 /* bpf prog information */
195 struct {
196 u32 id;
197 u32 sub_id;
198 struct perf_env *env;
199 } bpf_prog;
192 200
193 union { /* Tool specific area */ 201 union { /* Tool specific area */
194 void *priv; 202 void *priv;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 4c23779e271a..c6351b557bb0 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -3,15 +3,163 @@
3#include "env.h" 3#include "env.h"
4#include "sane_ctype.h" 4#include "sane_ctype.h"
5#include "util.h" 5#include "util.h"
6#include "bpf-event.h"
6#include <errno.h> 7#include <errno.h>
7#include <sys/utsname.h> 8#include <sys/utsname.h>
9#include <bpf/libbpf.h>
8 10
9struct perf_env perf_env; 11struct perf_env perf_env;
10 12
13void perf_env__insert_bpf_prog_info(struct perf_env *env,
14 struct bpf_prog_info_node *info_node)
15{
16 __u32 prog_id = info_node->info_linear->info.id;
17 struct bpf_prog_info_node *node;
18 struct rb_node *parent = NULL;
19 struct rb_node **p;
20
21 down_write(&env->bpf_progs.lock);
22 p = &env->bpf_progs.infos.rb_node;
23
24 while (*p != NULL) {
25 parent = *p;
26 node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
27 if (prog_id < node->info_linear->info.id) {
28 p = &(*p)->rb_left;
29 } else if (prog_id > node->info_linear->info.id) {
30 p = &(*p)->rb_right;
31 } else {
32 pr_debug("duplicated bpf prog info %u\n", prog_id);
33 goto out;
34 }
35 }
36
37 rb_link_node(&info_node->rb_node, parent, p);
38 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
39 env->bpf_progs.infos_cnt++;
40out:
41 up_write(&env->bpf_progs.lock);
42}
43
44struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
45 __u32 prog_id)
46{
47 struct bpf_prog_info_node *node = NULL;
48 struct rb_node *n;
49
50 down_read(&env->bpf_progs.lock);
51 n = env->bpf_progs.infos.rb_node;
52
53 while (n) {
54 node = rb_entry(n, struct bpf_prog_info_node, rb_node);
55 if (prog_id < node->info_linear->info.id)
56 n = n->rb_left;
57 else if (prog_id > node->info_linear->info.id)
58 n = n->rb_right;
59 else
60 break;
61 }
62
63 up_read(&env->bpf_progs.lock);
64 return node;
65}
66
67void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
68{
69 struct rb_node *parent = NULL;
70 __u32 btf_id = btf_node->id;
71 struct btf_node *node;
72 struct rb_node **p;
73
74 down_write(&env->bpf_progs.lock);
75 p = &env->bpf_progs.btfs.rb_node;
76
77 while (*p != NULL) {
78 parent = *p;
79 node = rb_entry(parent, struct btf_node, rb_node);
80 if (btf_id < node->id) {
81 p = &(*p)->rb_left;
82 } else if (btf_id > node->id) {
83 p = &(*p)->rb_right;
84 } else {
85 pr_debug("duplicated btf %u\n", btf_id);
86 goto out;
87 }
88 }
89
90 rb_link_node(&btf_node->rb_node, parent, p);
91 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
92 env->bpf_progs.btfs_cnt++;
93out:
94 up_write(&env->bpf_progs.lock);
95}
96
97struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
98{
99 struct btf_node *node = NULL;
100 struct rb_node *n;
101
102 down_read(&env->bpf_progs.lock);
103 n = env->bpf_progs.btfs.rb_node;
104
105 while (n) {
106 node = rb_entry(n, struct btf_node, rb_node);
107 if (btf_id < node->id)
108 n = n->rb_left;
109 else if (btf_id > node->id)
110 n = n->rb_right;
111 else
112 break;
113 }
114
115 up_read(&env->bpf_progs.lock);
116 return node;
117}
118
119/* purge data in bpf_progs.infos tree */
120static void perf_env__purge_bpf(struct perf_env *env)
121{
122 struct rb_root *root;
123 struct rb_node *next;
124
125 down_write(&env->bpf_progs.lock);
126
127 root = &env->bpf_progs.infos;
128 next = rb_first(root);
129
130 while (next) {
131 struct bpf_prog_info_node *node;
132
133 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
134 next = rb_next(&node->rb_node);
135 rb_erase(&node->rb_node, root);
136 free(node);
137 }
138
139 env->bpf_progs.infos_cnt = 0;
140
141 root = &env->bpf_progs.btfs;
142 next = rb_first(root);
143
144 while (next) {
145 struct btf_node *node;
146
147 node = rb_entry(next, struct btf_node, rb_node);
148 next = rb_next(&node->rb_node);
149 rb_erase(&node->rb_node, root);
150 free(node);
151 }
152
153 env->bpf_progs.btfs_cnt = 0;
154
155 up_write(&env->bpf_progs.lock);
156}
157
11void perf_env__exit(struct perf_env *env) 158void perf_env__exit(struct perf_env *env)
12{ 159{
13 int i; 160 int i;
14 161
162 perf_env__purge_bpf(env);
15 zfree(&env->hostname); 163 zfree(&env->hostname);
16 zfree(&env->os_release); 164 zfree(&env->os_release);
17 zfree(&env->version); 165 zfree(&env->version);
@@ -38,6 +186,13 @@ void perf_env__exit(struct perf_env *env)
38 zfree(&env->memory_nodes); 186 zfree(&env->memory_nodes);
39} 187}
40 188
189void perf_env__init(struct perf_env *env)
190{
191 env->bpf_progs.infos = RB_ROOT;
192 env->bpf_progs.btfs = RB_ROOT;
193 init_rwsem(&env->bpf_progs.lock);
194}
195
41int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]) 196int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
42{ 197{
43 int i; 198 int i;
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d01b8355f4ca..4f8e2b485c01 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -3,7 +3,9 @@
3#define __PERF_ENV_H 3#define __PERF_ENV_H
4 4
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/rbtree.h>
6#include "cpumap.h" 7#include "cpumap.h"
8#include "rwsem.h"
7 9
8struct cpu_topology_map { 10struct cpu_topology_map {
9 int socket_id; 11 int socket_id;
@@ -64,8 +66,23 @@ struct perf_env {
64 struct memory_node *memory_nodes; 66 struct memory_node *memory_nodes;
65 unsigned long long memory_bsize; 67 unsigned long long memory_bsize;
66 u64 clockid_res_ns; 68 u64 clockid_res_ns;
69
70 /*
71 * bpf_info_lock protects bpf rbtrees. This is needed because the
72 * trees are accessed by different threads in perf-top
73 */
74 struct {
75 struct rw_semaphore lock;
76 struct rb_root infos;
77 u32 infos_cnt;
78 struct rb_root btfs;
79 u32 btfs_cnt;
80 } bpf_progs;
67}; 81};
68 82
83struct bpf_prog_info_node;
84struct btf_node;
85
69extern struct perf_env perf_env; 86extern struct perf_env perf_env;
70 87
71void perf_env__exit(struct perf_env *env); 88void perf_env__exit(struct perf_env *env);
@@ -80,4 +97,11 @@ const char *perf_env__arch(struct perf_env *env);
80const char *perf_env__raw_arch(struct perf_env *env); 97const char *perf_env__raw_arch(struct perf_env *env);
81int perf_env__nr_cpus_avail(struct perf_env *env); 98int perf_env__nr_cpus_avail(struct perf_env *env);
82 99
100void perf_env__init(struct perf_env *env);
101void perf_env__insert_bpf_prog_info(struct perf_env *env,
102 struct bpf_prog_info_node *info_node);
103struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
104 __u32 prog_id);
105void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
106struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
83#endif /* __PERF_ENV_H */ 107#endif /* __PERF_ENV_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index ed20f4379956..6689378ee577 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -19,6 +19,7 @@
19#include "debug.h" 19#include "debug.h"
20#include "units.h" 20#include "units.h"
21#include "asm/bug.h" 21#include "asm/bug.h"
22#include "bpf-event.h"
22#include <signal.h> 23#include <signal.h>
23#include <unistd.h> 24#include <unistd.h>
24 25
@@ -230,35 +231,6 @@ void perf_evlist__set_leader(struct perf_evlist *evlist)
230 } 231 }
231} 232}
232 233
233void perf_event_attr__set_max_precise_ip(struct perf_event_attr *pattr)
234{
235 struct perf_event_attr attr = {
236 .type = PERF_TYPE_HARDWARE,
237 .config = PERF_COUNT_HW_CPU_CYCLES,
238 .exclude_kernel = 1,
239 .precise_ip = 3,
240 };
241
242 event_attr_init(&attr);
243
244 /*
245 * Unnamed union member, not supported as struct member named
246 * initializer in older compilers such as gcc 4.4.7
247 */
248 attr.sample_period = 1;
249
250 while (attr.precise_ip != 0) {
251 int fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
252 if (fd != -1) {
253 close(fd);
254 break;
255 }
256 --attr.precise_ip;
257 }
258
259 pattr->precise_ip = attr.precise_ip;
260}
261
262int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise) 234int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise)
263{ 235{
264 struct perf_evsel *evsel = perf_evsel__new_cycles(precise); 236 struct perf_evsel *evsel = perf_evsel__new_cycles(precise);
@@ -1856,3 +1828,121 @@ struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
1856 } 1828 }
1857 return leader; 1829 return leader;
1858} 1830}
1831
1832int perf_evlist__add_sb_event(struct perf_evlist **evlist,
1833 struct perf_event_attr *attr,
1834 perf_evsel__sb_cb_t cb,
1835 void *data)
1836{
1837 struct perf_evsel *evsel;
1838 bool new_evlist = (*evlist) == NULL;
1839
1840 if (*evlist == NULL)
1841 *evlist = perf_evlist__new();
1842 if (*evlist == NULL)
1843 return -1;
1844
1845 if (!attr->sample_id_all) {
1846 pr_warning("enabling sample_id_all for all side band events\n");
1847 attr->sample_id_all = 1;
1848 }
1849
1850 evsel = perf_evsel__new_idx(attr, (*evlist)->nr_entries);
1851 if (!evsel)
1852 goto out_err;
1853
1854 evsel->side_band.cb = cb;
1855 evsel->side_band.data = data;
1856 perf_evlist__add(*evlist, evsel);
1857 return 0;
1858
1859out_err:
1860 if (new_evlist) {
1861 perf_evlist__delete(*evlist);
1862 *evlist = NULL;
1863 }
1864 return -1;
1865}
1866
1867static void *perf_evlist__poll_thread(void *arg)
1868{
1869 struct perf_evlist *evlist = arg;
1870 bool draining = false;
1871 int i;
1872
1873 while (draining || !(evlist->thread.done)) {
1874 if (draining)
1875 draining = false;
1876 else if (evlist->thread.done)
1877 draining = true;
1878
1879 if (!draining)
1880 perf_evlist__poll(evlist, 1000);
1881
1882 for (i = 0; i < evlist->nr_mmaps; i++) {
1883 struct perf_mmap *map = &evlist->mmap[i];
1884 union perf_event *event;
1885
1886 if (perf_mmap__read_init(map))
1887 continue;
1888 while ((event = perf_mmap__read_event(map)) != NULL) {
1889 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1890
1891 if (evsel && evsel->side_band.cb)
1892 evsel->side_band.cb(event, evsel->side_band.data);
1893 else
1894 pr_warning("cannot locate proper evsel for the side band event\n");
1895
1896 perf_mmap__consume(map);
1897 }
1898 perf_mmap__read_done(map);
1899 }
1900 }
1901 return NULL;
1902}
1903
1904int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
1905 struct target *target)
1906{
1907 struct perf_evsel *counter;
1908
1909 if (!evlist)
1910 return 0;
1911
1912 if (perf_evlist__create_maps(evlist, target))
1913 goto out_delete_evlist;
1914
1915 evlist__for_each_entry(evlist, counter) {
1916 if (perf_evsel__open(counter, evlist->cpus,
1917 evlist->threads) < 0)
1918 goto out_delete_evlist;
1919 }
1920
1921 if (perf_evlist__mmap(evlist, UINT_MAX))
1922 goto out_delete_evlist;
1923
1924 evlist__for_each_entry(evlist, counter) {
1925 if (perf_evsel__enable(counter))
1926 goto out_delete_evlist;
1927 }
1928
1929 evlist->thread.done = 0;
1930 if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
1931 goto out_delete_evlist;
1932
1933 return 0;
1934
1935out_delete_evlist:
1936 perf_evlist__delete(evlist);
1937 evlist = NULL;
1938 return -1;
1939}
1940
1941void perf_evlist__stop_sb_thread(struct perf_evlist *evlist)
1942{
1943 if (!evlist)
1944 return;
1945 evlist->thread.done = 1;
1946 pthread_join(evlist->thread.th, NULL);
1947 perf_evlist__delete(evlist);
1948}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 744906dd4887..6a94785b9100 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -54,6 +54,10 @@ struct perf_evlist {
54 struct perf_sample *sample); 54 struct perf_sample *sample);
55 u64 first_sample_time; 55 u64 first_sample_time;
56 u64 last_sample_time; 56 u64 last_sample_time;
57 struct {
58 pthread_t th;
59 volatile int done;
60 } thread;
57}; 61};
58 62
59struct perf_evsel_str_handler { 63struct perf_evsel_str_handler {
@@ -87,6 +91,14 @@ int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
87 91
88int perf_evlist__add_dummy(struct perf_evlist *evlist); 92int perf_evlist__add_dummy(struct perf_evlist *evlist);
89 93
94int perf_evlist__add_sb_event(struct perf_evlist **evlist,
95 struct perf_event_attr *attr,
96 perf_evsel__sb_cb_t cb,
97 void *data);
98int perf_evlist__start_sb_thread(struct perf_evlist *evlist,
99 struct target *target);
100void perf_evlist__stop_sb_thread(struct perf_evlist *evlist);
101
90int perf_evlist__add_newtp(struct perf_evlist *evlist, 102int perf_evlist__add_newtp(struct perf_evlist *evlist,
91 const char *sys, const char *name, void *handler); 103 const char *sys, const char *name, void *handler);
92 104
@@ -303,8 +315,6 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
303void perf_evlist__set_tracking_event(struct perf_evlist *evlist, 315void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
304 struct perf_evsel *tracking_evsel); 316 struct perf_evsel *tracking_evsel);
305 317
306void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
307
308struct perf_evsel * 318struct perf_evsel *
309perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str); 319perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
310 320
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 3bbf73e979c0..66d066f18b5b 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -295,7 +295,6 @@ struct perf_evsel *perf_evsel__new_cycles(bool precise)
295 if (!precise) 295 if (!precise)
296 goto new_event; 296 goto new_event;
297 297
298 perf_event_attr__set_max_precise_ip(&attr);
299 /* 298 /*
300 * Now let the usual logic to set up the perf_event_attr defaults 299 * Now let the usual logic to set up the perf_event_attr defaults
301 * to kick in when we return and before perf_evsel__open() is called. 300 * to kick in when we return and before perf_evsel__open() is called.
@@ -305,6 +304,8 @@ new_event:
305 if (evsel == NULL) 304 if (evsel == NULL)
306 goto out; 305 goto out;
307 306
307 evsel->precise_max = true;
308
308 /* use asprintf() because free(evsel) assumes name is allocated */ 309 /* use asprintf() because free(evsel) assumes name is allocated */
309 if (asprintf(&evsel->name, "cycles%s%s%.*s", 310 if (asprintf(&evsel->name, "cycles%s%s%.*s",
310 (attr.precise_ip || attr.exclude_kernel) ? ":" : "", 311 (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
@@ -1036,7 +1037,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
1036 attr->mmap2 = track && !perf_missing_features.mmap2; 1037 attr->mmap2 = track && !perf_missing_features.mmap2;
1037 attr->comm = track; 1038 attr->comm = track;
1038 attr->ksymbol = track && !perf_missing_features.ksymbol; 1039 attr->ksymbol = track && !perf_missing_features.ksymbol;
1039 attr->bpf_event = track && opts->bpf_event && 1040 attr->bpf_event = track && !opts->no_bpf_event &&
1040 !perf_missing_features.bpf_event; 1041 !perf_missing_features.bpf_event;
1041 1042
1042 if (opts->record_namespaces) 1043 if (opts->record_namespaces)
@@ -1083,7 +1084,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
1083 } 1084 }
1084 1085
1085 if (evsel->precise_max) 1086 if (evsel->precise_max)
1086 perf_event_attr__set_max_precise_ip(attr); 1087 attr->precise_ip = 3;
1087 1088
1088 if (opts->all_user) { 1089 if (opts->all_user) {
1089 attr->exclude_kernel = 1; 1090 attr->exclude_kernel = 1;
@@ -1292,6 +1293,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
1292{ 1293{
1293 assert(list_empty(&evsel->node)); 1294 assert(list_empty(&evsel->node));
1294 assert(evsel->evlist == NULL); 1295 assert(evsel->evlist == NULL);
1296 perf_evsel__free_counts(evsel);
1295 perf_evsel__free_fd(evsel); 1297 perf_evsel__free_fd(evsel);
1296 perf_evsel__free_id(evsel); 1298 perf_evsel__free_id(evsel);
1297 perf_evsel__free_config_terms(evsel); 1299 perf_evsel__free_config_terms(evsel);
@@ -1342,10 +1344,9 @@ void perf_counts_values__scale(struct perf_counts_values *count,
1342 count->val = 0; 1344 count->val = 0;
1343 } else if (count->run < count->ena) { 1345 } else if (count->run < count->ena) {
1344 scaled = 1; 1346 scaled = 1;
1345 count->val = (u64)((double) count->val * count->ena / count->run + 0.5); 1347 count->val = (u64)((double) count->val * count->ena / count->run);
1346 } 1348 }
1347 } else 1349 }
1348 count->ena = count->run = 0;
1349 1350
1350 if (pscaled) 1351 if (pscaled)
1351 *pscaled = scaled; 1352 *pscaled = scaled;
@@ -1749,6 +1750,59 @@ static bool ignore_missing_thread(struct perf_evsel *evsel,
1749 return true; 1750 return true;
1750} 1751}
1751 1752
1753static void display_attr(struct perf_event_attr *attr)
1754{
1755 if (verbose >= 2) {
1756 fprintf(stderr, "%.60s\n", graph_dotted_line);
1757 fprintf(stderr, "perf_event_attr:\n");
1758 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1759 fprintf(stderr, "%.60s\n", graph_dotted_line);
1760 }
1761}
1762
1763static int perf_event_open(struct perf_evsel *evsel,
1764 pid_t pid, int cpu, int group_fd,
1765 unsigned long flags)
1766{
1767 int precise_ip = evsel->attr.precise_ip;
1768 int fd;
1769
1770 while (1) {
1771 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1772 pid, cpu, group_fd, flags);
1773
1774 fd = sys_perf_event_open(&evsel->attr, pid, cpu, group_fd, flags);
1775 if (fd >= 0)
1776 break;
1777
1778 /*
1779 * Do quick precise_ip fallback if:
1780 * - there is precise_ip set in perf_event_attr
1781 * - maximum precise is requested
1782 * - sys_perf_event_open failed with ENOTSUP error,
1783 * which is associated with wrong precise_ip
1784 */
1785 if (!precise_ip || !evsel->precise_max || (errno != ENOTSUP))
1786 break;
1787
1788 /*
1789 * We tried all the precise_ip values, and it's
1790 * still failing, so leave it to standard fallback.
1791 */
1792 if (!evsel->attr.precise_ip) {
1793 evsel->attr.precise_ip = precise_ip;
1794 break;
1795 }
1796
1797 pr_debug2("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
1798 evsel->attr.precise_ip--;
1799 pr_debug2("decreasing precise_ip by one (%d)\n", evsel->attr.precise_ip);
1800 display_attr(&evsel->attr);
1801 }
1802
1803 return fd;
1804}
1805
1752int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, 1806int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
1753 struct thread_map *threads) 1807 struct thread_map *threads)
1754{ 1808{
@@ -1824,12 +1878,7 @@ retry_sample_id:
1824 if (perf_missing_features.sample_id_all) 1878 if (perf_missing_features.sample_id_all)
1825 evsel->attr.sample_id_all = 0; 1879 evsel->attr.sample_id_all = 0;
1826 1880
1827 if (verbose >= 2) { 1881 display_attr(&evsel->attr);
1828 fprintf(stderr, "%.60s\n", graph_dotted_line);
1829 fprintf(stderr, "perf_event_attr:\n");
1830 perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL);
1831 fprintf(stderr, "%.60s\n", graph_dotted_line);
1832 }
1833 1882
1834 for (cpu = 0; cpu < cpus->nr; cpu++) { 1883 for (cpu = 0; cpu < cpus->nr; cpu++) {
1835 1884
@@ -1841,13 +1890,10 @@ retry_sample_id:
1841 1890
1842 group_fd = get_group_fd(evsel, cpu, thread); 1891 group_fd = get_group_fd(evsel, cpu, thread);
1843retry_open: 1892retry_open:
1844 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
1845 pid, cpus->map[cpu], group_fd, flags);
1846
1847 test_attr__ready(); 1893 test_attr__ready();
1848 1894
1849 fd = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu], 1895 fd = perf_event_open(evsel, pid, cpus->map[cpu],
1850 group_fd, flags); 1896 group_fd, flags);
1851 1897
1852 FD(evsel, cpu, thread) = fd; 1898 FD(evsel, cpu, thread) = fd;
1853 1899
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index cc578e02e08f..0f2c6c93d721 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -73,6 +73,8 @@ struct perf_evsel_config_term {
73 73
74struct perf_stat_evsel; 74struct perf_stat_evsel;
75 75
76typedef int (perf_evsel__sb_cb_t)(union perf_event *event, void *data);
77
76/** struct perf_evsel - event selector 78/** struct perf_evsel - event selector
77 * 79 *
78 * @evlist - evlist this evsel is in, if it is in one. 80 * @evlist - evlist this evsel is in, if it is in one.
@@ -151,6 +153,10 @@ struct perf_evsel {
151 bool collect_stat; 153 bool collect_stat;
152 bool weak_group; 154 bool weak_group;
153 const char *pmu_name; 155 const char *pmu_name;
156 struct {
157 perf_evsel__sb_cb_t *cb;
158 void *data;
159 } side_band;
154}; 160};
155 161
156union u64_swap { 162union u64_swap {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 01b324c275b9..b9e693825873 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -18,6 +18,7 @@
18#include <sys/utsname.h> 18#include <sys/utsname.h>
19#include <linux/time64.h> 19#include <linux/time64.h>
20#include <dirent.h> 20#include <dirent.h>
21#include <bpf/libbpf.h>
21 22
22#include "evlist.h" 23#include "evlist.h"
23#include "evsel.h" 24#include "evsel.h"
@@ -40,6 +41,7 @@
40#include "time-utils.h" 41#include "time-utils.h"
41#include "units.h" 42#include "units.h"
42#include "cputopo.h" 43#include "cputopo.h"
44#include "bpf-event.h"
43 45
44#include "sane_ctype.h" 46#include "sane_ctype.h"
45 47
@@ -861,6 +863,104 @@ static int write_clockid(struct feat_fd *ff,
861 sizeof(ff->ph->env.clockid_res_ns)); 863 sizeof(ff->ph->env.clockid_res_ns));
862} 864}
863 865
866static int write_dir_format(struct feat_fd *ff,
867 struct perf_evlist *evlist __maybe_unused)
868{
869 struct perf_session *session;
870 struct perf_data *data;
871
872 session = container_of(ff->ph, struct perf_session, header);
873 data = session->data;
874
875 if (WARN_ON(!perf_data__is_dir(data)))
876 return -1;
877
878 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
879}
880
881#ifdef HAVE_LIBBPF_SUPPORT
882static int write_bpf_prog_info(struct feat_fd *ff,
883 struct perf_evlist *evlist __maybe_unused)
884{
885 struct perf_env *env = &ff->ph->env;
886 struct rb_root *root;
887 struct rb_node *next;
888 int ret;
889
890 down_read(&env->bpf_progs.lock);
891
892 ret = do_write(ff, &env->bpf_progs.infos_cnt,
893 sizeof(env->bpf_progs.infos_cnt));
894 if (ret < 0)
895 goto out;
896
897 root = &env->bpf_progs.infos;
898 next = rb_first(root);
899 while (next) {
900 struct bpf_prog_info_node *node;
901 size_t len;
902
903 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
904 next = rb_next(&node->rb_node);
905 len = sizeof(struct bpf_prog_info_linear) +
906 node->info_linear->data_len;
907
908 /* before writing to file, translate address to offset */
909 bpf_program__bpil_addr_to_offs(node->info_linear);
910 ret = do_write(ff, node->info_linear, len);
911 /*
912 * translate back to address even when do_write() fails,
913 * so that this function never changes the data.
914 */
915 bpf_program__bpil_offs_to_addr(node->info_linear);
916 if (ret < 0)
917 goto out;
918 }
919out:
920 up_read(&env->bpf_progs.lock);
921 return ret;
922}
923#else // HAVE_LIBBPF_SUPPORT
924static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
925 struct perf_evlist *evlist __maybe_unused)
926{
927 return 0;
928}
929#endif // HAVE_LIBBPF_SUPPORT
930
931static int write_bpf_btf(struct feat_fd *ff,
932 struct perf_evlist *evlist __maybe_unused)
933{
934 struct perf_env *env = &ff->ph->env;
935 struct rb_root *root;
936 struct rb_node *next;
937 int ret;
938
939 down_read(&env->bpf_progs.lock);
940
941 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
942 sizeof(env->bpf_progs.btfs_cnt));
943
944 if (ret < 0)
945 goto out;
946
947 root = &env->bpf_progs.btfs;
948 next = rb_first(root);
949 while (next) {
950 struct btf_node *node;
951
952 node = rb_entry(next, struct btf_node, rb_node);
953 next = rb_next(&node->rb_node);
954 ret = do_write(ff, &node->id,
955 sizeof(u32) * 2 + node->data_size);
956 if (ret < 0)
957 goto out;
958 }
959out:
960 up_read(&env->bpf_progs.lock);
961 return ret;
962}
963
864static int cpu_cache_level__sort(const void *a, const void *b) 964static int cpu_cache_level__sort(const void *a, const void *b)
865{ 965{
866 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 966 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@@ -1341,6 +1441,63 @@ static void print_clockid(struct feat_fd *ff, FILE *fp)
1341 ff->ph->env.clockid_res_ns * 1000); 1441 ff->ph->env.clockid_res_ns * 1000);
1342} 1442}
1343 1443
1444static void print_dir_format(struct feat_fd *ff, FILE *fp)
1445{
1446 struct perf_session *session;
1447 struct perf_data *data;
1448
1449 session = container_of(ff->ph, struct perf_session, header);
1450 data = session->data;
1451
1452 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1453}
1454
1455static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1456{
1457 struct perf_env *env = &ff->ph->env;
1458 struct rb_root *root;
1459 struct rb_node *next;
1460
1461 down_read(&env->bpf_progs.lock);
1462
1463 root = &env->bpf_progs.infos;
1464 next = rb_first(root);
1465
1466 while (next) {
1467 struct bpf_prog_info_node *node;
1468
1469 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1470 next = rb_next(&node->rb_node);
1471
1472 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1473 env, fp);
1474 }
1475
1476 up_read(&env->bpf_progs.lock);
1477}
1478
1479static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1480{
1481 struct perf_env *env = &ff->ph->env;
1482 struct rb_root *root;
1483 struct rb_node *next;
1484
1485 down_read(&env->bpf_progs.lock);
1486
1487 root = &env->bpf_progs.btfs;
1488 next = rb_first(root);
1489
1490 while (next) {
1491 struct btf_node *node;
1492
1493 node = rb_entry(next, struct btf_node, rb_node);
1494 next = rb_next(&node->rb_node);
1495 fprintf(fp, "# btf info of id %u\n", node->id);
1496 }
1497
1498 up_read(&env->bpf_progs.lock);
1499}
1500
1344static void free_event_desc(struct perf_evsel *events) 1501static void free_event_desc(struct perf_evsel *events)
1345{ 1502{
1346 struct perf_evsel *evsel; 1503 struct perf_evsel *evsel;
@@ -2373,6 +2530,139 @@ static int process_clockid(struct feat_fd *ff,
2373 return 0; 2530 return 0;
2374} 2531}
2375 2532
2533static int process_dir_format(struct feat_fd *ff,
2534 void *_data __maybe_unused)
2535{
2536 struct perf_session *session;
2537 struct perf_data *data;
2538
2539 session = container_of(ff->ph, struct perf_session, header);
2540 data = session->data;
2541
2542 if (WARN_ON(!perf_data__is_dir(data)))
2543 return -1;
2544
2545 return do_read_u64(ff, &data->dir.version);
2546}
2547
2548#ifdef HAVE_LIBBPF_SUPPORT
2549static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2550{
2551 struct bpf_prog_info_linear *info_linear;
2552 struct bpf_prog_info_node *info_node;
2553 struct perf_env *env = &ff->ph->env;
2554 u32 count, i;
2555 int err = -1;
2556
2557 if (ff->ph->needs_swap) {
2558 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2559 return 0;
2560 }
2561
2562 if (do_read_u32(ff, &count))
2563 return -1;
2564
2565 down_write(&env->bpf_progs.lock);
2566
2567 for (i = 0; i < count; ++i) {
2568 u32 info_len, data_len;
2569
2570 info_linear = NULL;
2571 info_node = NULL;
2572 if (do_read_u32(ff, &info_len))
2573 goto out;
2574 if (do_read_u32(ff, &data_len))
2575 goto out;
2576
2577 if (info_len > sizeof(struct bpf_prog_info)) {
2578 pr_warning("detected invalid bpf_prog_info\n");
2579 goto out;
2580 }
2581
2582 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2583 data_len);
2584 if (!info_linear)
2585 goto out;
2586 info_linear->info_len = sizeof(struct bpf_prog_info);
2587 info_linear->data_len = data_len;
2588 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2589 goto out;
2590 if (__do_read(ff, &info_linear->info, info_len))
2591 goto out;
2592 if (info_len < sizeof(struct bpf_prog_info))
2593 memset(((void *)(&info_linear->info)) + info_len, 0,
2594 sizeof(struct bpf_prog_info) - info_len);
2595
2596 if (__do_read(ff, info_linear->data, data_len))
2597 goto out;
2598
2599 info_node = malloc(sizeof(struct bpf_prog_info_node));
2600 if (!info_node)
2601 goto out;
2602
2603 /* after reading from file, translate offset to address */
2604 bpf_program__bpil_offs_to_addr(info_linear);
2605 info_node->info_linear = info_linear;
2606 perf_env__insert_bpf_prog_info(env, info_node);
2607 }
2608
2609 return 0;
2610out:
2611 free(info_linear);
2612 free(info_node);
2613 up_write(&env->bpf_progs.lock);
2614 return err;
2615}
2616#else // HAVE_LIBBPF_SUPPORT
2617static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2618{
2619 return 0;
2620}
2621#endif // HAVE_LIBBPF_SUPPORT
2622
2623static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2624{
2625 struct perf_env *env = &ff->ph->env;
2626 u32 count, i;
2627
2628 if (ff->ph->needs_swap) {
2629 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2630 return 0;
2631 }
2632
2633 if (do_read_u32(ff, &count))
2634 return -1;
2635
2636 down_write(&env->bpf_progs.lock);
2637
2638 for (i = 0; i < count; ++i) {
2639 struct btf_node *node;
2640 u32 id, data_size;
2641
2642 if (do_read_u32(ff, &id))
2643 return -1;
2644 if (do_read_u32(ff, &data_size))
2645 return -1;
2646
2647 node = malloc(sizeof(struct btf_node) + data_size);
2648 if (!node)
2649 return -1;
2650
2651 node->id = id;
2652 node->data_size = data_size;
2653
2654 if (__do_read(ff, node->data, data_size)) {
2655 free(node);
2656 return -1;
2657 }
2658
2659 perf_env__insert_btf(env, node);
2660 }
2661
2662 up_write(&env->bpf_progs.lock);
2663 return 0;
2664}
2665
2376struct feature_ops { 2666struct feature_ops {
2377 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); 2667 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
2378 void (*print)(struct feat_fd *ff, FILE *fp); 2668 void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2432,7 +2722,10 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2432 FEAT_OPN(CACHE, cache, true), 2722 FEAT_OPN(CACHE, cache, true),
2433 FEAT_OPR(SAMPLE_TIME, sample_time, false), 2723 FEAT_OPR(SAMPLE_TIME, sample_time, false),
2434 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), 2724 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
2435 FEAT_OPR(CLOCKID, clockid, false) 2725 FEAT_OPR(CLOCKID, clockid, false),
2726 FEAT_OPN(DIR_FORMAT, dir_format, false),
2727 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
2728 FEAT_OPR(BPF_BTF, bpf_btf, false),
2436}; 2729};
2437 2730
2438struct header_print_data { 2731struct header_print_data {
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 0d553ddca0a3..386da49e1bfa 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -39,6 +39,9 @@ enum {
39 HEADER_SAMPLE_TIME, 39 HEADER_SAMPLE_TIME,
40 HEADER_MEM_TOPOLOGY, 40 HEADER_MEM_TOPOLOGY,
41 HEADER_CLOCKID, 41 HEADER_CLOCKID,
42 HEADER_DIR_FORMAT,
43 HEADER_BPF_PROG_INFO,
44 HEADER_BPF_BTF,
42 HEADER_LAST_FEATURE, 45 HEADER_LAST_FEATURE,
43 HEADER_FEAT_BITS = 256, 46 HEADER_FEAT_BITS = 256,
44}; 47};
@@ -48,6 +51,10 @@ enum perf_header_version {
48 PERF_HEADER_VERSION_2, 51 PERF_HEADER_VERSION_2,
49}; 52};
50 53
54enum perf_dir_version {
55 PERF_DIR_VERSION = 1,
56};
57
51struct perf_file_section { 58struct perf_file_section {
52 u64 offset; 59 u64 offset;
53 u64 size; 60 u64 size;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index f9eb95bf3938..7ace7a10054d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -19,6 +19,7 @@
19#include <math.h> 19#include <math.h>
20#include <inttypes.h> 20#include <inttypes.h>
21#include <sys/param.h> 21#include <sys/param.h>
22#include <linux/time64.h>
22 23
23static bool hists__filter_entry_by_dso(struct hists *hists, 24static bool hists__filter_entry_by_dso(struct hists *hists,
24 struct hist_entry *he); 25 struct hist_entry *he);
@@ -192,6 +193,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
192 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); 193 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
193 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); 194 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
194 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); 195 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
196 hists__new_col_len(hists, HISTC_TIME, 12);
195 197
196 if (h->srcline) { 198 if (h->srcline) {
197 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header)); 199 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
@@ -246,6 +248,14 @@ static void he_stat__add_cpumode_period(struct he_stat *he_stat,
246 } 248 }
247} 249}
248 250
251static long hist_time(unsigned long htime)
252{
253 unsigned long time_quantum = symbol_conf.time_quantum;
254 if (time_quantum)
255 return (htime / time_quantum) * time_quantum;
256 return htime;
257}
258
249static void he_stat__add_period(struct he_stat *he_stat, u64 period, 259static void he_stat__add_period(struct he_stat *he_stat, u64 period,
250 u64 weight) 260 u64 weight)
251{ 261{
@@ -426,6 +436,13 @@ static int hist_entry__init(struct hist_entry *he,
426 goto err_rawdata; 436 goto err_rawdata;
427 } 437 }
428 438
439 if (symbol_conf.res_sample) {
440 he->res_samples = calloc(sizeof(struct res_sample),
441 symbol_conf.res_sample);
442 if (!he->res_samples)
443 goto err_srcline;
444 }
445
429 INIT_LIST_HEAD(&he->pairs.node); 446 INIT_LIST_HEAD(&he->pairs.node);
430 thread__get(he->thread); 447 thread__get(he->thread);
431 he->hroot_in = RB_ROOT_CACHED; 448 he->hroot_in = RB_ROOT_CACHED;
@@ -436,6 +453,9 @@ static int hist_entry__init(struct hist_entry *he,
436 453
437 return 0; 454 return 0;
438 455
456err_srcline:
457 free(he->srcline);
458
439err_rawdata: 459err_rawdata:
440 free(he->raw_data); 460 free(he->raw_data);
441 461
@@ -593,6 +613,32 @@ out:
593 return he; 613 return he;
594} 614}
595 615
616static unsigned random_max(unsigned high)
617{
618 unsigned thresh = -high % high;
619 for (;;) {
620 unsigned r = random();
621 if (r >= thresh)
622 return r % high;
623 }
624}
625
626static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
627{
628 struct res_sample *r;
629 int j;
630
631 if (he->num_res < symbol_conf.res_sample) {
632 j = he->num_res++;
633 } else {
634 j = random_max(symbol_conf.res_sample);
635 }
636 r = &he->res_samples[j];
637 r->time = sample->time;
638 r->cpu = sample->cpu;
639 r->tid = sample->tid;
640}
641
596static struct hist_entry* 642static struct hist_entry*
597__hists__add_entry(struct hists *hists, 643__hists__add_entry(struct hists *hists,
598 struct addr_location *al, 644 struct addr_location *al,
@@ -635,10 +681,13 @@ __hists__add_entry(struct hists *hists,
635 .raw_data = sample->raw_data, 681 .raw_data = sample->raw_data,
636 .raw_size = sample->raw_size, 682 .raw_size = sample->raw_size,
637 .ops = ops, 683 .ops = ops,
684 .time = hist_time(sample->time),
638 }, *he = hists__findnew_entry(hists, &entry, al, sample_self); 685 }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
639 686
640 if (!hists->has_callchains && he && he->callchain_size != 0) 687 if (!hists->has_callchains && he && he->callchain_size != 0)
641 hists->has_callchains = true; 688 hists->has_callchains = true;
689 if (he && symbol_conf.res_sample)
690 hists__res_sample(he, sample);
642 return he; 691 return he;
643} 692}
644 693
@@ -1062,8 +1111,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1062 1111
1063 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent, 1112 err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1064 iter->evsel, al, max_stack_depth); 1113 iter->evsel, al, max_stack_depth);
1065 if (err) 1114 if (err) {
1115 map__put(alm);
1066 return err; 1116 return err;
1117 }
1067 1118
1068 err = iter->ops->prepare_entry(iter, al); 1119 err = iter->ops->prepare_entry(iter, al);
1069 if (err) 1120 if (err)
@@ -1162,6 +1213,7 @@ void hist_entry__delete(struct hist_entry *he)
1162 mem_info__zput(he->mem_info); 1213 mem_info__zput(he->mem_info);
1163 } 1214 }
1164 1215
1216 zfree(&he->res_samples);
1165 zfree(&he->stat_acc); 1217 zfree(&he->stat_acc);
1166 free_srcline(he->srcline); 1218 free_srcline(he->srcline);
1167 if (he->srcfile && he->srcfile[0]) 1219 if (he->srcfile && he->srcfile[0])
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 4af27fbab24f..76ff6c6d03b8 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -31,6 +31,7 @@ enum hist_filter {
31 31
32enum hist_column { 32enum hist_column {
33 HISTC_SYMBOL, 33 HISTC_SYMBOL,
34 HISTC_TIME,
34 HISTC_DSO, 35 HISTC_DSO,
35 HISTC_THREAD, 36 HISTC_THREAD,
36 HISTC_COMM, 37 HISTC_COMM,
@@ -432,9 +433,18 @@ struct hist_browser_timer {
432}; 433};
433 434
434struct annotation_options; 435struct annotation_options;
436struct res_sample;
437
438enum rstype {
439 A_NORMAL,
440 A_ASM,
441 A_SOURCE
442};
435 443
436#ifdef HAVE_SLANG_SUPPORT 444#ifdef HAVE_SLANG_SUPPORT
437#include "../ui/keysyms.h" 445#include "../ui/keysyms.h"
446void attr_to_script(char *buf, struct perf_event_attr *attr);
447
438int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel, 448int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
439 struct hist_browser_timer *hbt, 449 struct hist_browser_timer *hbt,
440 struct annotation_options *annotation_opts); 450 struct annotation_options *annotation_opts);
@@ -449,7 +459,13 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
449 struct perf_env *env, 459 struct perf_env *env,
450 bool warn_lost_event, 460 bool warn_lost_event,
451 struct annotation_options *annotation_options); 461 struct annotation_options *annotation_options);
452int script_browse(const char *script_opt); 462
463int script_browse(const char *script_opt, struct perf_evsel *evsel);
464
465void run_script(char *cmd);
466int res_sample_browse(struct res_sample *res_samples, int num_res,
467 struct perf_evsel *evsel, enum rstype rstype);
468void res_sample_init(void);
453#else 469#else
454static inline 470static inline
455int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, 471int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
@@ -478,11 +494,22 @@ static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
478 return 0; 494 return 0;
479} 495}
480 496
481static inline int script_browse(const char *script_opt __maybe_unused) 497static inline int script_browse(const char *script_opt __maybe_unused,
498 struct perf_evsel *evsel __maybe_unused)
482{ 499{
483 return 0; 500 return 0;
484} 501}
485 502
503static inline int res_sample_browse(struct res_sample *res_samples __maybe_unused,
504 int num_res __maybe_unused,
505 struct perf_evsel *evsel __maybe_unused,
506 enum rstype rstype __maybe_unused)
507{
508 return 0;
509}
510
511static inline void res_sample_init(void) {}
512
486#define K_LEFT -1000 513#define K_LEFT -1000
487#define K_RIGHT -2000 514#define K_RIGHT -2000
488#define K_SWITCH_INPUT_DATA -3000 515#define K_SWITCH_INPUT_DATA -3000
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 6e03db142091..872fab163585 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
251 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d)) 251 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
252 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n / 252 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
253 decoder->tsc_ctc_ratio_d; 253 decoder->tsc_ctc_ratio_d;
254
255 /*
256 * Allow for timestamps appearing to backwards because a TSC
257 * packet has slipped past a MTC packet, so allow 2 MTC ticks
258 * or ...
259 */
260 decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
261 decoder->tsc_ctc_ratio_n,
262 decoder->tsc_ctc_ratio_d);
263 } 254 }
264 /* ... or 0x100 paranoia */ 255
265 if (decoder->tsc_slip < 0x100) 256 /*
266 decoder->tsc_slip = 0x100; 257 * A TSC packet can slip past MTC packets so that the timestamp appears
258 * to go backwards. One estimate is that can be up to about 40 CPU
259 * cycles, which is certainly less than 0x1000 TSC ticks, but accept
260 * slippage an order of magnitude more to be on the safe side.
261 */
262 decoder->tsc_slip = 0x10000;
267 263
268 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift); 264 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
269 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n); 265 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 61959aba7e27..3c520baa198c 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1421,6 +1421,20 @@ static void machine__set_kernel_mmap(struct machine *machine,
1421 machine->vmlinux_map->end = ~0ULL; 1421 machine->vmlinux_map->end = ~0ULL;
1422} 1422}
1423 1423
1424static void machine__update_kernel_mmap(struct machine *machine,
1425 u64 start, u64 end)
1426{
1427 struct map *map = machine__kernel_map(machine);
1428
1429 map__get(map);
1430 map_groups__remove(&machine->kmaps, map);
1431
1432 machine__set_kernel_mmap(machine, start, end);
1433
1434 map_groups__insert(&machine->kmaps, map);
1435 map__put(map);
1436}
1437
1424int machine__create_kernel_maps(struct machine *machine) 1438int machine__create_kernel_maps(struct machine *machine)
1425{ 1439{
1426 struct dso *kernel = machine__get_kernel(machine); 1440 struct dso *kernel = machine__get_kernel(machine);
@@ -1453,17 +1467,11 @@ int machine__create_kernel_maps(struct machine *machine)
1453 goto out_put; 1467 goto out_put;
1454 } 1468 }
1455 1469
1456 /* we have a real start address now, so re-order the kmaps */ 1470 /*
1457 map = machine__kernel_map(machine); 1471 * we have a real start address now, so re-order the kmaps
1458 1472 * assume it's the last in the kmaps
1459 map__get(map); 1473 */
1460 map_groups__remove(&machine->kmaps, map); 1474 machine__update_kernel_mmap(machine, addr, ~0ULL);
1461
1462 /* assume it's the last in the kmaps */
1463 machine__set_kernel_mmap(machine, addr, ~0ULL);
1464
1465 map_groups__insert(&machine->kmaps, map);
1466 map__put(map);
1467 } 1475 }
1468 1476
1469 if (machine__create_extra_kernel_maps(machine, kernel)) 1477 if (machine__create_extra_kernel_maps(machine, kernel))
@@ -1599,7 +1607,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
1599 if (strstr(kernel->long_name, "vmlinux")) 1607 if (strstr(kernel->long_name, "vmlinux"))
1600 dso__set_short_name(kernel, "[kernel.vmlinux]", false); 1608 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1601 1609
1602 machine__set_kernel_mmap(machine, event->mmap.start, 1610 machine__update_kernel_mmap(machine, event->mmap.start,
1603 event->mmap.start + event->mmap.len); 1611 event->mmap.start + event->mmap.len);
1604 1612
1605 /* 1613 /*
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index fbeb0c6efaa6..e32628cd20a7 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -577,10 +577,25 @@ static void __maps__purge(struct maps *maps)
577 } 577 }
578} 578}
579 579
580static void __maps__purge_names(struct maps *maps)
581{
582 struct rb_root *root = &maps->names;
583 struct rb_node *next = rb_first(root);
584
585 while (next) {
586 struct map *pos = rb_entry(next, struct map, rb_node_name);
587
588 next = rb_next(&pos->rb_node_name);
589 rb_erase_init(&pos->rb_node_name, root);
590 map__put(pos);
591 }
592}
593
580static void maps__exit(struct maps *maps) 594static void maps__exit(struct maps *maps)
581{ 595{
582 down_write(&maps->lock); 596 down_write(&maps->lock);
583 __maps__purge(maps); 597 __maps__purge(maps);
598 __maps__purge_names(maps);
584 up_write(&maps->lock); 599 up_write(&maps->lock);
585} 600}
586 601
@@ -917,6 +932,9 @@ static void __maps__remove(struct maps *maps, struct map *map)
917{ 932{
918 rb_erase_init(&map->rb_node, &maps->entries); 933 rb_erase_init(&map->rb_node, &maps->entries);
919 map__put(map); 934 map__put(map);
935
936 rb_erase_init(&map->rb_node_name, &maps->names);
937 map__put(map);
920} 938}
921 939
922void maps__remove(struct maps *maps, struct map *map) 940void maps__remove(struct maps *maps, struct map *map)
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index ea523d3b248f..989fed6f43b5 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -270,6 +270,8 @@ static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
270 "FINAL", 270 "FINAL",
271 "ROUND", 271 "ROUND",
272 "HALF ", 272 "HALF ",
273 "TOP ",
274 "TIME ",
273 }; 275 };
274 int err; 276 int err;
275 bool show_progress = false; 277 bool show_progress = false;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4dcc01b2532c..5ef4939408f2 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2271,6 +2271,7 @@ static bool is_event_supported(u8 type, unsigned config)
2271 perf_evsel__delete(evsel); 2271 perf_evsel__delete(evsel);
2272 } 2272 }
2273 2273
2274 thread_map__put(tmap);
2274 return ret; 2275 return ret;
2275} 2276}
2276 2277
@@ -2341,6 +2342,7 @@ void print_sdt_events(const char *subsys_glob, const char *event_glob,
2341 printf(" %-50s [%s]\n", buf, "SDT event"); 2342 printf(" %-50s [%s]\n", buf, "SDT event");
2342 free(buf); 2343 free(buf);
2343 } 2344 }
2345 free(path);
2344 } else 2346 } else
2345 printf(" %-50s [%s]\n", nd->s, "SDT event"); 2347 printf(" %-50s [%s]\n", nd->s, "SDT event");
2346 if (nd2) { 2348 if (nd2) {
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 6199a3174ab9..e0429f4ef335 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -732,10 +732,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
732 732
733 if (!is_arm_pmu_core(name)) { 733 if (!is_arm_pmu_core(name)) {
734 pname = pe->pmu ? pe->pmu : "cpu"; 734 pname = pe->pmu ? pe->pmu : "cpu";
735
736 /*
737 * uncore alias may be from different PMU
738 * with common prefix
739 */
740 if (pmu_is_uncore(name) &&
741 !strncmp(pname, name, strlen(pname)))
742 goto new_alias;
743
735 if (strcmp(pname, name)) 744 if (strcmp(pname, name))
736 continue; 745 continue;
737 } 746 }
738 747
748new_alias:
739 /* need type casts to override 'const' */ 749 /* need type casts to override 'const' */
740 __perf_pmu__new_alias(head, NULL, (char *)pe->name, 750 __perf_pmu__new_alias(head, NULL, (char *)pe->name,
741 (char *)pe->desc, (char *)pe->event, 751 (char *)pe->desc, (char *)pe->event,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a1b8d9649ca7..198e09ff611e 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -160,8 +160,10 @@ static struct map *kernel_get_module_map(const char *module)
160 if (module && strchr(module, '/')) 160 if (module && strchr(module, '/'))
161 return dso__new_map(module); 161 return dso__new_map(module);
162 162
163 if (!module) 163 if (!module) {
164 module = "kernel"; 164 pos = machine__kernel_map(host_machine);
165 return map__get(pos);
166 }
165 167
166 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 168 for (pos = maps__first(maps); pos; pos = map__next(pos)) {
167 /* short_name is "[module]" */ 169 /* short_name is "[module]" */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index db643f3c2b95..b17f1c9bc965 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -132,6 +132,7 @@ struct perf_session *perf_session__new(struct perf_data *data,
132 ordered_events__init(&session->ordered_events, 132 ordered_events__init(&session->ordered_events,
133 ordered_events__deliver_event, NULL); 133 ordered_events__deliver_event, NULL);
134 134
135 perf_env__init(&session->header.env);
135 if (data) { 136 if (data) {
136 if (perf_data__open(data)) 137 if (perf_data__open(data))
137 goto out_delete; 138 goto out_delete;
@@ -152,6 +153,10 @@ struct perf_session *perf_session__new(struct perf_data *data,
152 } 153 }
153 154
154 perf_evlist__init_trace_event_sample_raw(session->evlist); 155 perf_evlist__init_trace_event_sample_raw(session->evlist);
156
157 /* Open the directory data. */
158 if (data->is_dir && perf_data__open_dir(data))
159 goto out_delete;
155 } 160 }
156 } else { 161 } else {
157 session->machines.host.env = &perf_env; 162 session->machines.host.env = &perf_env;
@@ -1843,10 +1848,17 @@ fetch_mmaped_event(struct perf_session *session,
1843#define NUM_MMAPS 128 1848#define NUM_MMAPS 128
1844#endif 1849#endif
1845 1850
1851struct reader;
1852
1853typedef s64 (*reader_cb_t)(struct perf_session *session,
1854 union perf_event *event,
1855 u64 file_offset);
1856
1846struct reader { 1857struct reader {
1847 int fd; 1858 int fd;
1848 u64 data_size; 1859 u64 data_size;
1849 u64 data_offset; 1860 u64 data_offset;
1861 reader_cb_t process;
1850}; 1862};
1851 1863
1852static int 1864static int
@@ -1917,7 +1929,7 @@ more:
1917 size = event->header.size; 1929 size = event->header.size;
1918 1930
1919 if (size < sizeof(struct perf_event_header) || 1931 if (size < sizeof(struct perf_event_header) ||
1920 (skip = perf_session__process_event(session, event, file_pos)) < 0) { 1932 (skip = rd->process(session, event, file_pos)) < 0) {
1921 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1933 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1922 file_offset + head, event->header.size, 1934 file_offset + head, event->header.size,
1923 event->header.type); 1935 event->header.type);
@@ -1943,12 +1955,20 @@ out:
1943 return err; 1955 return err;
1944} 1956}
1945 1957
1958static s64 process_simple(struct perf_session *session,
1959 union perf_event *event,
1960 u64 file_offset)
1961{
1962 return perf_session__process_event(session, event, file_offset);
1963}
1964
1946static int __perf_session__process_events(struct perf_session *session) 1965static int __perf_session__process_events(struct perf_session *session)
1947{ 1966{
1948 struct reader rd = { 1967 struct reader rd = {
1949 .fd = perf_data__fd(session->data), 1968 .fd = perf_data__fd(session->data),
1950 .data_size = session->header.data_size, 1969 .data_size = session->header.data_size,
1951 .data_offset = session->header.data_offset, 1970 .data_offset = session->header.data_offset,
1971 .process = process_simple,
1952 }; 1972 };
1953 struct ordered_events *oe = &session->ordered_events; 1973 struct ordered_events *oe = &session->ordered_events;
1954 struct perf_tool *tool = session->tool; 1974 struct perf_tool *tool = session->tool;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index d2299e912e59..5d2518e89fc4 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -3,6 +3,7 @@
3#include <inttypes.h> 3#include <inttypes.h>
4#include <regex.h> 4#include <regex.h>
5#include <linux/mman.h> 5#include <linux/mman.h>
6#include <linux/time64.h>
6#include "sort.h" 7#include "sort.h"
7#include "hist.h" 8#include "hist.h"
8#include "comm.h" 9#include "comm.h"
@@ -12,9 +13,11 @@
12#include "evsel.h" 13#include "evsel.h"
13#include "evlist.h" 14#include "evlist.h"
14#include "strlist.h" 15#include "strlist.h"
16#include "strbuf.h"
15#include <traceevent/event-parse.h> 17#include <traceevent/event-parse.h>
16#include "mem-events.h" 18#include "mem-events.h"
17#include "annotate.h" 19#include "annotate.h"
20#include "time-utils.h"
18#include <linux/kernel.h> 21#include <linux/kernel.h>
19 22
20regex_t parent_regex; 23regex_t parent_regex;
@@ -654,6 +657,42 @@ struct sort_entry sort_socket = {
654 .se_width_idx = HISTC_SOCKET, 657 .se_width_idx = HISTC_SOCKET,
655}; 658};
656 659
660/* --sort time */
661
662static int64_t
663sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
664{
665 return right->time - left->time;
666}
667
668static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
669 size_t size, unsigned int width)
670{
671 unsigned long secs;
672 unsigned long long nsecs;
673 char he_time[32];
674
675 nsecs = he->time;
676 secs = nsecs / NSEC_PER_SEC;
677 nsecs -= secs * NSEC_PER_SEC;
678
679 if (symbol_conf.nanosecs)
680 snprintf(he_time, sizeof he_time, "%5lu.%09llu: ",
681 secs, nsecs);
682 else
683 timestamp__scnprintf_usec(he->time, he_time,
684 sizeof(he_time));
685
686 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
687}
688
689struct sort_entry sort_time = {
690 .se_header = "Time",
691 .se_cmp = sort__time_cmp,
692 .se_snprintf = hist_entry__time_snprintf,
693 .se_width_idx = HISTC_TIME,
694};
695
657/* --sort trace */ 696/* --sort trace */
658 697
659static char *get_trace_output(struct hist_entry *he) 698static char *get_trace_output(struct hist_entry *he)
@@ -1634,6 +1673,7 @@ static struct sort_dimension common_sort_dimensions[] = {
1634 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), 1673 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
1635 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), 1674 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
1636 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), 1675 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
1676 DIM(SORT_TIME, "time", sort_time),
1637}; 1677};
1638 1678
1639#undef DIM 1679#undef DIM
@@ -3068,3 +3108,54 @@ void reset_output_field(void)
3068 reset_dimensions(); 3108 reset_dimensions();
3069 perf_hpp__reset_output_field(&perf_hpp_list); 3109 perf_hpp__reset_output_field(&perf_hpp_list);
3070} 3110}
3111
3112#define INDENT (3*8 + 1)
3113
3114static void add_key(struct strbuf *sb, const char *str, int *llen)
3115{
3116 if (*llen >= 75) {
3117 strbuf_addstr(sb, "\n\t\t\t ");
3118 *llen = INDENT;
3119 }
3120 strbuf_addf(sb, " %s", str);
3121 *llen += strlen(str) + 1;
3122}
3123
3124static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3125 int *llen)
3126{
3127 int i;
3128
3129 for (i = 0; i < n; i++)
3130 add_key(sb, s[i].name, llen);
3131}
3132
3133static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3134 int *llen)
3135{
3136 int i;
3137
3138 for (i = 0; i < n; i++)
3139 add_key(sb, s[i].name, llen);
3140}
3141
3142const char *sort_help(const char *prefix)
3143{
3144 struct strbuf sb;
3145 char *s;
3146 int len = strlen(prefix) + INDENT;
3147
3148 strbuf_init(&sb, 300);
3149 strbuf_addstr(&sb, prefix);
3150 add_hpp_sort_string(&sb, hpp_sort_dimensions,
3151 ARRAY_SIZE(hpp_sort_dimensions), &len);
3152 add_sort_string(&sb, common_sort_dimensions,
3153 ARRAY_SIZE(common_sort_dimensions), &len);
3154 add_sort_string(&sb, bstack_sort_dimensions,
3155 ARRAY_SIZE(bstack_sort_dimensions), &len);
3156 add_sort_string(&sb, memory_sort_dimensions,
3157 ARRAY_SIZE(memory_sort_dimensions), &len);
3158 s = strbuf_detach(&sb, NULL);
3159 strbuf_release(&sb);
3160 return s;
3161}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 2fbee0b1011c..ce376a73f964 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -47,6 +47,12 @@ extern struct sort_entry sort_srcline;
47extern enum sort_type sort__first_dimension; 47extern enum sort_type sort__first_dimension;
48extern const char default_mem_sort_order[]; 48extern const char default_mem_sort_order[];
49 49
50struct res_sample {
51 u64 time;
52 int cpu;
53 int tid;
54};
55
50struct he_stat { 56struct he_stat {
51 u64 period; 57 u64 period;
52 u64 period_sys; 58 u64 period_sys;
@@ -135,10 +141,13 @@ struct hist_entry {
135 char *srcfile; 141 char *srcfile;
136 struct symbol *parent; 142 struct symbol *parent;
137 struct branch_info *branch_info; 143 struct branch_info *branch_info;
144 long time;
138 struct hists *hists; 145 struct hists *hists;
139 struct mem_info *mem_info; 146 struct mem_info *mem_info;
140 void *raw_data; 147 void *raw_data;
141 u32 raw_size; 148 u32 raw_size;
149 int num_res;
150 struct res_sample *res_samples;
142 void *trace_output; 151 void *trace_output;
143 struct perf_hpp_list *hpp_list; 152 struct perf_hpp_list *hpp_list;
144 struct hist_entry *parent_he; 153 struct hist_entry *parent_he;
@@ -231,6 +240,7 @@ enum sort_type {
231 SORT_DSO_SIZE, 240 SORT_DSO_SIZE,
232 SORT_CGROUP_ID, 241 SORT_CGROUP_ID,
233 SORT_SYM_IPC_NULL, 242 SORT_SYM_IPC_NULL,
243 SORT_TIME,
234 244
235 /* branch stack specific sort keys */ 245 /* branch stack specific sort keys */
236 __SORT_BRANCH_STACK, 246 __SORT_BRANCH_STACK,
@@ -286,6 +296,8 @@ void reset_output_field(void);
286void sort__setup_elide(FILE *fp); 296void sort__setup_elide(FILE *fp);
287void perf_hpp__set_elide(int idx, bool elide); 297void perf_hpp__set_elide(int idx, bool elide);
288 298
299const char *sort_help(const char *prefix);
300
289int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset); 301int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
290 302
291bool is_strict_order(const char *order); 303bool is_strict_order(const char *order);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 4d40515307b8..2856cc9d5a31 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -291,10 +291,8 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel
291 break; 291 break;
292 case AGGR_GLOBAL: 292 case AGGR_GLOBAL:
293 aggr->val += count->val; 293 aggr->val += count->val;
294 if (config->scale) { 294 aggr->ena += count->ena;
295 aggr->ena += count->ena; 295 aggr->run += count->run;
296 aggr->run += count->run;
297 }
298 case AGGR_UNSET: 296 case AGGR_UNSET:
299 default: 297 default:
300 break; 298 break;
@@ -442,10 +440,8 @@ int create_perf_stat_counter(struct perf_evsel *evsel,
442 struct perf_event_attr *attr = &evsel->attr; 440 struct perf_event_attr *attr = &evsel->attr;
443 struct perf_evsel *leader = evsel->leader; 441 struct perf_evsel *leader = evsel->leader;
444 442
445 if (config->scale) { 443 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
446 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 444 PERF_FORMAT_TOTAL_TIME_RUNNING;
447 PERF_FORMAT_TOTAL_TIME_RUNNING;
448 }
449 445
450 /* 446 /*
451 * The event is part of non trivial group, let's enable 447 * The event is part of non trivial group, let's enable
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 758bf5f74e6e..5cbad55cd99d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -6,6 +6,7 @@
6#include <string.h> 6#include <string.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/mman.h> 8#include <linux/mman.h>
9#include <linux/time64.h>
9#include <sys/types.h> 10#include <sys/types.h>
10#include <sys/stat.h> 11#include <sys/stat.h>
11#include <sys/param.h> 12#include <sys/param.h>
@@ -39,15 +40,18 @@ int vmlinux_path__nr_entries;
39char **vmlinux_path; 40char **vmlinux_path;
40 41
41struct symbol_conf symbol_conf = { 42struct symbol_conf symbol_conf = {
43 .nanosecs = false,
42 .use_modules = true, 44 .use_modules = true,
43 .try_vmlinux_path = true, 45 .try_vmlinux_path = true,
44 .demangle = true, 46 .demangle = true,
45 .demangle_kernel = false, 47 .demangle_kernel = false,
46 .cumulate_callchain = true, 48 .cumulate_callchain = true,
49 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
47 .show_hist_headers = true, 50 .show_hist_headers = true,
48 .symfs = "", 51 .symfs = "",
49 .event_group = true, 52 .event_group = true,
50 .inline_name = true, 53 .inline_name = true,
54 .res_sample = 0,
51}; 55};
52 56
53static enum dso_binary_type binary_type_symtab[] = { 57static enum dso_binary_type binary_type_symtab[] = {
@@ -1451,6 +1455,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1451 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 1455 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1452 return true; 1456 return true;
1453 1457
1458 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1454 case DSO_BINARY_TYPE__NOT_FOUND: 1459 case DSO_BINARY_TYPE__NOT_FOUND:
1455 default: 1460 default:
1456 return false; 1461 return false;
diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h
index fffea68c1203..6c55fa6fccec 100644
--- a/tools/perf/util/symbol_conf.h
+++ b/tools/perf/util/symbol_conf.h
@@ -8,6 +8,7 @@ struct strlist;
8struct intlist; 8struct intlist;
9 9
10struct symbol_conf { 10struct symbol_conf {
11 bool nanosecs;
11 unsigned short priv_size; 12 unsigned short priv_size;
12 bool try_vmlinux_path, 13 bool try_vmlinux_path,
13 init_annotation, 14 init_annotation,
@@ -55,6 +56,7 @@ struct symbol_conf {
55 *sym_list_str, 56 *sym_list_str,
56 *col_width_list_str, 57 *col_width_list_str,
57 *bt_stop_list_str; 58 *bt_stop_list_str;
59 unsigned long time_quantum;
58 struct strlist *dso_list, 60 struct strlist *dso_list,
59 *comm_list, 61 *comm_list,
60 *sym_list, 62 *sym_list,
@@ -66,6 +68,7 @@ struct symbol_conf {
66 struct intlist *pid_list, 68 struct intlist *pid_list,
67 *tid_list; 69 *tid_list;
68 const char *symfs; 70 const char *symfs;
71 int res_sample;
69}; 72};
70 73
71extern struct symbol_conf symbol_conf; 74extern struct symbol_conf symbol_conf;
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
index 0f53baec660e..20663a460df3 100644
--- a/tools/perf/util/time-utils.c
+++ b/tools/perf/util/time-utils.c
@@ -453,6 +453,14 @@ int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
453 return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec); 453 return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec);
454} 454}
455 455
456int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz)
457{
458 u64 sec = timestamp / NSEC_PER_SEC,
459 nsec = timestamp % NSEC_PER_SEC;
460
461 return scnprintf(buf, sz, "%" PRIu64 ".%09" PRIu64, sec, nsec);
462}
463
456int fetch_current_timestamp(char *buf, size_t sz) 464int fetch_current_timestamp(char *buf, size_t sz)
457{ 465{
458 struct timeval tv; 466 struct timeval tv;
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
index b923de44e36f..72a42ea1d513 100644
--- a/tools/perf/util/time-utils.h
+++ b/tools/perf/util/time-utils.h
@@ -30,6 +30,7 @@ int perf_time__parse_for_ranges(const char *str, struct perf_session *session,
30 int *range_size, int *range_num); 30 int *range_size, int *range_num);
31 31
32int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz); 32int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
33int timestamp__scnprintf_nsec(u64 timestamp, char *buf, size_t sz);
33 34
34int fetch_current_timestamp(char *buf, size_t sz); 35int fetch_current_timestamp(char *buf, size_t sz);
35 36
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 9327c0ddc3a5..c3fad065c89c 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -5077,6 +5077,9 @@ int fork_it(char **argv)
5077 signal(SIGQUIT, SIG_IGN); 5077 signal(SIGQUIT, SIG_IGN);
5078 if (waitpid(child_pid, &status, 0) == -1) 5078 if (waitpid(child_pid, &status, 0) == -1)
5079 err(status, "waitpid"); 5079 err(status, "waitpid");
5080
5081 if (WIFEXITED(status))
5082 status = WEXITSTATUS(status);
5080 } 5083 }
5081 /* 5084 /*
5082 * n.b. fork_it() does not check for errors from for_all_cpus() 5085 * n.b. fork_it() does not check for errors from for_all_cpus()
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index c9433a496d54..c81fc350f7ad 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -180,6 +180,8 @@ static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
180 (void *) BPF_FUNC_sk_fullsock; 180 (void *) BPF_FUNC_sk_fullsock;
181static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) = 181static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
182 (void *) BPF_FUNC_tcp_sock; 182 (void *) BPF_FUNC_tcp_sock;
183static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
184 (void *) BPF_FUNC_get_listener_sock;
183static int (*bpf_skb_ecn_set_ce)(void *ctx) = 185static int (*bpf_skb_ecn_set_ce)(void *ctx) =
184 (void *) BPF_FUNC_skb_ecn_set_ce; 186 (void *) BPF_FUNC_skb_ecn_set_ce;
185 187
diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c
index 90f8a206340a..ee99368c595c 100644
--- a/tools/testing/selftests/bpf/prog_tests/map_lock.c
+++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c
@@ -37,7 +37,7 @@ void test_map_lock(void)
37 const char *file = "./test_map_lock.o"; 37 const char *file = "./test_map_lock.o";
38 int prog_fd, map_fd[2], vars[17] = {}; 38 int prog_fd, map_fd[2], vars[17] = {};
39 pthread_t thread_id[6]; 39 pthread_t thread_id[6];
40 struct bpf_object *obj; 40 struct bpf_object *obj = NULL;
41 int err = 0, key = 0, i; 41 int err = 0, key = 0, i;
42 void *ret; 42 void *ret;
43 43
diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c
index 9a573a9675d7..114ebe6a438e 100644
--- a/tools/testing/selftests/bpf/prog_tests/spinlock.c
+++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c
@@ -5,7 +5,7 @@ void test_spinlock(void)
5{ 5{
6 const char *file = "./test_spin_lock.o"; 6 const char *file = "./test_spin_lock.o";
7 pthread_t thread_id[4]; 7 pthread_t thread_id[4];
8 struct bpf_object *obj; 8 struct bpf_object *obj = NULL;
9 int prog_fd; 9 int prog_fd;
10 int err = 0, i; 10 int err = 0, i;
11 void *ret; 11 void *ret;
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
index de1a43e8f610..37328f148538 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
@@ -8,38 +8,51 @@
8#include "bpf_helpers.h" 8#include "bpf_helpers.h"
9#include "bpf_endian.h" 9#include "bpf_endian.h"
10 10
11enum bpf_array_idx { 11enum bpf_addr_array_idx {
12 SRV_IDX, 12 ADDR_SRV_IDX,
13 CLI_IDX, 13 ADDR_CLI_IDX,
14 __NR_BPF_ARRAY_IDX, 14 __NR_BPF_ADDR_ARRAY_IDX,
15};
16
17enum bpf_result_array_idx {
18 EGRESS_SRV_IDX,
19 EGRESS_CLI_IDX,
20 INGRESS_LISTEN_IDX,
21 __NR_BPF_RESULT_ARRAY_IDX,
22};
23
24enum bpf_linum_array_idx {
25 EGRESS_LINUM_IDX,
26 INGRESS_LINUM_IDX,
27 __NR_BPF_LINUM_ARRAY_IDX,
15}; 28};
16 29
17struct bpf_map_def SEC("maps") addr_map = { 30struct bpf_map_def SEC("maps") addr_map = {
18 .type = BPF_MAP_TYPE_ARRAY, 31 .type = BPF_MAP_TYPE_ARRAY,
19 .key_size = sizeof(__u32), 32 .key_size = sizeof(__u32),
20 .value_size = sizeof(struct sockaddr_in6), 33 .value_size = sizeof(struct sockaddr_in6),
21 .max_entries = __NR_BPF_ARRAY_IDX, 34 .max_entries = __NR_BPF_ADDR_ARRAY_IDX,
22}; 35};
23 36
24struct bpf_map_def SEC("maps") sock_result_map = { 37struct bpf_map_def SEC("maps") sock_result_map = {
25 .type = BPF_MAP_TYPE_ARRAY, 38 .type = BPF_MAP_TYPE_ARRAY,
26 .key_size = sizeof(__u32), 39 .key_size = sizeof(__u32),
27 .value_size = sizeof(struct bpf_sock), 40 .value_size = sizeof(struct bpf_sock),
28 .max_entries = __NR_BPF_ARRAY_IDX, 41 .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
29}; 42};
30 43
31struct bpf_map_def SEC("maps") tcp_sock_result_map = { 44struct bpf_map_def SEC("maps") tcp_sock_result_map = {
32 .type = BPF_MAP_TYPE_ARRAY, 45 .type = BPF_MAP_TYPE_ARRAY,
33 .key_size = sizeof(__u32), 46 .key_size = sizeof(__u32),
34 .value_size = sizeof(struct bpf_tcp_sock), 47 .value_size = sizeof(struct bpf_tcp_sock),
35 .max_entries = __NR_BPF_ARRAY_IDX, 48 .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
36}; 49};
37 50
38struct bpf_map_def SEC("maps") linum_map = { 51struct bpf_map_def SEC("maps") linum_map = {
39 .type = BPF_MAP_TYPE_ARRAY, 52 .type = BPF_MAP_TYPE_ARRAY,
40 .key_size = sizeof(__u32), 53 .key_size = sizeof(__u32),
41 .value_size = sizeof(__u32), 54 .value_size = sizeof(__u32),
42 .max_entries = 1, 55 .max_entries = __NR_BPF_LINUM_ARRAY_IDX,
43}; 56};
44 57
45static bool is_loopback6(__u32 *a6) 58static bool is_loopback6(__u32 *a6)
@@ -100,18 +113,20 @@ static void tpcpy(struct bpf_tcp_sock *dst,
100 113
101#define RETURN { \ 114#define RETURN { \
102 linum = __LINE__; \ 115 linum = __LINE__; \
103 bpf_map_update_elem(&linum_map, &idx0, &linum, 0); \ 116 bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \
104 return 1; \ 117 return 1; \
105} 118}
106 119
107SEC("cgroup_skb/egress") 120SEC("cgroup_skb/egress")
108int read_sock_fields(struct __sk_buff *skb) 121int egress_read_sock_fields(struct __sk_buff *skb)
109{ 122{
110 __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx; 123 __u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
111 struct sockaddr_in6 *srv_sa6, *cli_sa6; 124 struct sockaddr_in6 *srv_sa6, *cli_sa6;
112 struct bpf_tcp_sock *tp, *tp_ret; 125 struct bpf_tcp_sock *tp, *tp_ret;
113 struct bpf_sock *sk, *sk_ret; 126 struct bpf_sock *sk, *sk_ret;
114 __u32 linum, idx0 = 0; 127 __u32 linum, linum_idx;
128
129 linum_idx = EGRESS_LINUM_IDX;
115 130
116 sk = skb->sk; 131 sk = skb->sk;
117 if (!sk || sk->state == 10) 132 if (!sk || sk->state == 10)
@@ -132,14 +147,55 @@ int read_sock_fields(struct __sk_buff *skb)
132 RETURN; 147 RETURN;
133 148
134 if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port)) 149 if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
135 idx = srv_idx; 150 result_idx = EGRESS_SRV_IDX;
136 else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port)) 151 else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
137 idx = cli_idx; 152 result_idx = EGRESS_CLI_IDX;
138 else 153 else
139 RETURN; 154 RETURN;
140 155
141 sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx); 156 sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
142 tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx); 157 tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
158 if (!sk_ret || !tp_ret)
159 RETURN;
160
161 skcpy(sk_ret, sk);
162 tpcpy(tp_ret, tp);
163
164 RETURN;
165}
166
167SEC("cgroup_skb/ingress")
168int ingress_read_sock_fields(struct __sk_buff *skb)
169{
170 __u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX;
171 struct bpf_tcp_sock *tp, *tp_ret;
172 struct bpf_sock *sk, *sk_ret;
173 struct sockaddr_in6 *srv_sa6;
174 __u32 linum, linum_idx;
175
176 linum_idx = INGRESS_LINUM_IDX;
177
178 sk = skb->sk;
179 if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6))
180 RETURN;
181
182 srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
183 if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port))
184 RETURN;
185
186 if (sk->state != 10 && sk->state != 12)
187 RETURN;
188
189 sk = bpf_get_listener_sock(sk);
190 if (!sk)
191 RETURN;
192
193 tp = bpf_tcp_sock(sk);
194 if (!tp)
195 RETURN;
196
197 sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
198 tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
143 if (!sk_ret || !tp_ret) 199 if (!sk_ret || !tp_ret)
144 RETURN; 200 RETURN;
145 201
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 38797aa627a7..23e3b314ca60 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -5874,6 +5874,50 @@ const struct btf_dedup_test dedup_tests[] = {
5874 .dont_resolve_fwds = false, 5874 .dont_resolve_fwds = false,
5875 }, 5875 },
5876}, 5876},
5877{
5878 .descr = "dedup: enum fwd resolution",
5879 .input = {
5880 .raw_types = {
5881 /* [1] fwd enum 'e1' before full enum */
5882 BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
5883 /* [2] full enum 'e1' after fwd */
5884 BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
5885 BTF_ENUM_ENC(NAME_NTH(2), 123),
5886 /* [3] full enum 'e2' before fwd */
5887 BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
5888 BTF_ENUM_ENC(NAME_NTH(4), 456),
5889 /* [4] fwd enum 'e2' after full enum */
5890 BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
5891 /* [5] incompatible fwd enum with different size */
5892 BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
5893 /* [6] incompatible full enum with different value */
5894 BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
5895 BTF_ENUM_ENC(NAME_NTH(2), 321),
5896 BTF_END_RAW,
5897 },
5898 BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
5899 },
5900 .expect = {
5901 .raw_types = {
5902 /* [1] full enum 'e1' */
5903 BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
5904 BTF_ENUM_ENC(NAME_NTH(2), 123),
5905 /* [2] full enum 'e2' */
5906 BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
5907 BTF_ENUM_ENC(NAME_NTH(4), 456),
5908 /* [3] incompatible fwd enum with different size */
5909 BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
5910 /* [4] incompatible full enum with different value */
5911 BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
5912 BTF_ENUM_ENC(NAME_NTH(2), 321),
5913 BTF_END_RAW,
5914 },
5915 BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
5916 },
5917 .opts = {
5918 .dont_resolve_fwds = false,
5919 },
5920},
5877 5921
5878}; 5922};
5879 5923
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c
index bc8943938bf5..dcae7f664dce 100644
--- a/tools/testing/selftests/bpf/test_sock_fields.c
+++ b/tools/testing/selftests/bpf/test_sock_fields.c
@@ -16,10 +16,23 @@
16#include "cgroup_helpers.h" 16#include "cgroup_helpers.h"
17#include "bpf_rlimit.h" 17#include "bpf_rlimit.h"
18 18
19enum bpf_array_idx { 19enum bpf_addr_array_idx {
20 SRV_IDX, 20 ADDR_SRV_IDX,
21 CLI_IDX, 21 ADDR_CLI_IDX,
22 __NR_BPF_ARRAY_IDX, 22 __NR_BPF_ADDR_ARRAY_IDX,
23};
24
25enum bpf_result_array_idx {
26 EGRESS_SRV_IDX,
27 EGRESS_CLI_IDX,
28 INGRESS_LISTEN_IDX,
29 __NR_BPF_RESULT_ARRAY_IDX,
30};
31
32enum bpf_linum_array_idx {
33 EGRESS_LINUM_IDX,
34 INGRESS_LINUM_IDX,
35 __NR_BPF_LINUM_ARRAY_IDX,
23}; 36};
24 37
25#define CHECK(condition, tag, format...) ({ \ 38#define CHECK(condition, tag, format...) ({ \
@@ -41,8 +54,16 @@ static int linum_map_fd;
41static int addr_map_fd; 54static int addr_map_fd;
42static int tp_map_fd; 55static int tp_map_fd;
43static int sk_map_fd; 56static int sk_map_fd;
44static __u32 srv_idx = SRV_IDX; 57
45static __u32 cli_idx = CLI_IDX; 58static __u32 addr_srv_idx = ADDR_SRV_IDX;
59static __u32 addr_cli_idx = ADDR_CLI_IDX;
60
61static __u32 egress_srv_idx = EGRESS_SRV_IDX;
62static __u32 egress_cli_idx = EGRESS_CLI_IDX;
63static __u32 ingress_listen_idx = INGRESS_LISTEN_IDX;
64
65static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
66static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
46 67
47static void init_loopback6(struct sockaddr_in6 *sa6) 68static void init_loopback6(struct sockaddr_in6 *sa6)
48{ 69{
@@ -93,29 +114,46 @@ static void print_tp(const struct bpf_tcp_sock *tp)
93 114
94static void check_result(void) 115static void check_result(void)
95{ 116{
96 struct bpf_tcp_sock srv_tp, cli_tp; 117 struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
97 struct bpf_sock srv_sk, cli_sk; 118 struct bpf_sock srv_sk, cli_sk, listen_sk;
98 __u32 linum, idx0 = 0; 119 __u32 ingress_linum, egress_linum;
99 int err; 120 int err;
100 121
101 err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum); 122 err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
123 &egress_linum);
102 CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)", 124 CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
103 "err:%d errno:%d", err, errno); 125 "err:%d errno:%d", err, errno);
104 126
105 err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk); 127 err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
106 CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)", 128 &ingress_linum);
129 CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
130 "err:%d errno:%d", err, errno);
131
132 err = bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx, &srv_sk);
133 CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx)",
134 "err:%d errno:%d", err, errno);
135 err = bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx, &srv_tp);
136 CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx)",
137 "err:%d errno:%d", err, errno);
138
139 err = bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx, &cli_sk);
140 CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx)",
107 "err:%d errno:%d", err, errno); 141 "err:%d errno:%d", err, errno);
108 err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp); 142 err = bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx, &cli_tp);
109 CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)", 143 CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx)",
110 "err:%d errno:%d", err, errno); 144 "err:%d errno:%d", err, errno);
111 145
112 err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk); 146 err = bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx, &listen_sk);
113 CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)", 147 CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx)",
114 "err:%d errno:%d", err, errno); 148 "err:%d errno:%d", err, errno);
115 err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp); 149 err = bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx, &listen_tp);
116 CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)", 150 CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx)",
117 "err:%d errno:%d", err, errno); 151 "err:%d errno:%d", err, errno);
118 152
153 printf("listen_sk: ");
154 print_sk(&listen_sk);
155 printf("\n");
156
119 printf("srv_sk: "); 157 printf("srv_sk: ");
120 print_sk(&srv_sk); 158 print_sk(&srv_sk);
121 printf("\n"); 159 printf("\n");
@@ -124,6 +162,10 @@ static void check_result(void)
124 print_sk(&cli_sk); 162 print_sk(&cli_sk);
125 printf("\n"); 163 printf("\n");
126 164
165 printf("listen_tp: ");
166 print_tp(&listen_tp);
167 printf("\n");
168
127 printf("srv_tp: "); 169 printf("srv_tp: ");
128 print_tp(&srv_tp); 170 print_tp(&srv_tp);
129 printf("\n"); 171 printf("\n");
@@ -132,6 +174,19 @@ static void check_result(void)
132 print_tp(&cli_tp); 174 print_tp(&cli_tp);
133 printf("\n"); 175 printf("\n");
134 176
177 CHECK(listen_sk.state != 10 ||
178 listen_sk.family != AF_INET6 ||
179 listen_sk.protocol != IPPROTO_TCP ||
180 memcmp(listen_sk.src_ip6, &in6addr_loopback,
181 sizeof(listen_sk.src_ip6)) ||
182 listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
183 listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
184 listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
185 listen_sk.dst_port,
186 "Unexpected listen_sk",
187 "Check listen_sk output. ingress_linum:%u",
188 ingress_linum);
189
135 CHECK(srv_sk.state == 10 || 190 CHECK(srv_sk.state == 10 ||
136 !srv_sk.state || 191 !srv_sk.state ||
137 srv_sk.family != AF_INET6 || 192 srv_sk.family != AF_INET6 ||
@@ -142,7 +197,8 @@ static void check_result(void)
142 sizeof(srv_sk.dst_ip6)) || 197 sizeof(srv_sk.dst_ip6)) ||
143 srv_sk.src_port != ntohs(srv_sa6.sin6_port) || 198 srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
144 srv_sk.dst_port != cli_sa6.sin6_port, 199 srv_sk.dst_port != cli_sa6.sin6_port,
145 "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum); 200 "Unexpected srv_sk", "Check srv_sk output. egress_linum:%u",
201 egress_linum);
146 202
147 CHECK(cli_sk.state == 10 || 203 CHECK(cli_sk.state == 10 ||
148 !cli_sk.state || 204 !cli_sk.state ||
@@ -154,21 +210,31 @@ static void check_result(void)
154 sizeof(cli_sk.dst_ip6)) || 210 sizeof(cli_sk.dst_ip6)) ||
155 cli_sk.src_port != ntohs(cli_sa6.sin6_port) || 211 cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
156 cli_sk.dst_port != srv_sa6.sin6_port, 212 cli_sk.dst_port != srv_sa6.sin6_port,
157 "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum); 213 "Unexpected cli_sk", "Check cli_sk output. egress_linum:%u",
214 egress_linum);
215
216 CHECK(listen_tp.data_segs_out ||
217 listen_tp.data_segs_in ||
218 listen_tp.total_retrans ||
219 listen_tp.bytes_acked,
220 "Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u",
221 ingress_linum);
158 222
159 CHECK(srv_tp.data_segs_out != 1 || 223 CHECK(srv_tp.data_segs_out != 1 ||
160 srv_tp.data_segs_in || 224 srv_tp.data_segs_in ||
161 srv_tp.snd_cwnd != 10 || 225 srv_tp.snd_cwnd != 10 ||
162 srv_tp.total_retrans || 226 srv_tp.total_retrans ||
163 srv_tp.bytes_acked != DATA_LEN, 227 srv_tp.bytes_acked != DATA_LEN,
164 "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum); 228 "Unexpected srv_tp", "Check srv_tp output. egress_linum:%u",
229 egress_linum);
165 230
166 CHECK(cli_tp.data_segs_out || 231 CHECK(cli_tp.data_segs_out ||
167 cli_tp.data_segs_in != 1 || 232 cli_tp.data_segs_in != 1 ||
168 cli_tp.snd_cwnd != 10 || 233 cli_tp.snd_cwnd != 10 ||
169 cli_tp.total_retrans || 234 cli_tp.total_retrans ||
170 cli_tp.bytes_received != DATA_LEN, 235 cli_tp.bytes_received != DATA_LEN,
171 "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum); 236 "Unexpected cli_tp", "Check cli_tp output. egress_linum:%u",
237 egress_linum);
172} 238}
173 239
174static void test(void) 240static void test(void)
@@ -211,10 +277,10 @@ static void test(void)
211 err, errno); 277 err, errno);
212 278
213 /* Update addr_map with srv_sa6 and cli_sa6 */ 279 /* Update addr_map with srv_sa6 and cli_sa6 */
214 err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0); 280 err = bpf_map_update_elem(addr_map_fd, &addr_srv_idx, &srv_sa6, 0);
215 CHECK(err, "map_update", "err:%d errno:%d", err, errno); 281 CHECK(err, "map_update", "err:%d errno:%d", err, errno);
216 282
217 err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0); 283 err = bpf_map_update_elem(addr_map_fd, &addr_cli_idx, &cli_sa6, 0);
218 CHECK(err, "map_update", "err:%d errno:%d", err, errno); 284 CHECK(err, "map_update", "err:%d errno:%d", err, errno);
219 285
220 /* Connect from cli_sa6 to srv_sa6 */ 286 /* Connect from cli_sa6 to srv_sa6 */
@@ -273,9 +339,9 @@ int main(int argc, char **argv)
273 struct bpf_prog_load_attr attr = { 339 struct bpf_prog_load_attr attr = {
274 .file = "test_sock_fields_kern.o", 340 .file = "test_sock_fields_kern.o",
275 .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 341 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
276 .expected_attach_type = BPF_CGROUP_INET_EGRESS,
277 }; 342 };
278 int cgroup_fd, prog_fd, err; 343 int cgroup_fd, egress_fd, ingress_fd, err;
344 struct bpf_program *ingress_prog;
279 struct bpf_object *obj; 345 struct bpf_object *obj;
280 struct bpf_map *map; 346 struct bpf_map *map;
281 347
@@ -293,12 +359,24 @@ int main(int argc, char **argv)
293 err = join_cgroup(TEST_CGROUP); 359 err = join_cgroup(TEST_CGROUP);
294 CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno); 360 CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
295 361
296 err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); 362 err = bpf_prog_load_xattr(&attr, &obj, &egress_fd);
297 CHECK(err, "bpf_prog_load_xattr()", "err:%d", err); 363 CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
298 364
299 err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0); 365 ingress_prog = bpf_object__find_program_by_title(obj,
366 "cgroup_skb/ingress");
367 CHECK(!ingress_prog,
368 "bpf_object__find_program_by_title(cgroup_skb/ingress)",
369 "not found");
370 ingress_fd = bpf_program__fd(ingress_prog);
371
372 err = bpf_prog_attach(egress_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
300 CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)", 373 CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
301 "err:%d errno%d", err, errno); 374 "err:%d errno%d", err, errno);
375
376 err = bpf_prog_attach(ingress_fd, cgroup_fd,
377 BPF_CGROUP_INET_INGRESS, 0);
378 CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_INGRESS)",
379 "err:%d errno%d", err, errno);
302 close(cgroup_fd); 380 close(cgroup_fd);
303 381
304 map = bpf_object__find_map_by_name(obj, "addr_map"); 382 map = bpf_object__find_map_by_name(obj, "addr_map");
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 4004891afa9c..f2ccae39ee66 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -1940,3 +1940,28 @@
1940 .errstr = "!read_ok", 1940 .errstr = "!read_ok",
1941 .result = REJECT, 1941 .result = REJECT,
1942}, 1942},
1943{
1944 "calls: cross frame pruning - liveness propagation",
1945 .insns = {
1946 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1947 BPF_MOV64_IMM(BPF_REG_8, 0),
1948 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1949 BPF_MOV64_IMM(BPF_REG_8, 1),
1950 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
1951 BPF_MOV64_IMM(BPF_REG_9, 0),
1952 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
1953 BPF_MOV64_IMM(BPF_REG_9, 1),
1954 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
1955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
1956 BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
1957 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
1958 BPF_MOV64_IMM(BPF_REG_0, 0),
1959 BPF_EXIT_INSN(),
1960 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1961 BPF_EXIT_INSN(),
1962 },
1963 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
1964 .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
1965 .errstr = "!read_ok",
1966 .result = REJECT,
1967},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index 3ed3593bd8b6..923f2110072d 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -605,3 +605,171 @@
605 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 605 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
606 .result = ACCEPT, 606 .result = ACCEPT,
607}, 607},
608{
609 "reference tracking: use ptr from bpf_tcp_sock() after release",
610 .insns = {
611 BPF_SK_LOOKUP,
612 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
613 BPF_EXIT_INSN(),
614 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
615 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
616 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
617 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
618 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
619 BPF_EMIT_CALL(BPF_FUNC_sk_release),
620 BPF_EXIT_INSN(),
621 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
623 BPF_EMIT_CALL(BPF_FUNC_sk_release),
624 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
625 BPF_EXIT_INSN(),
626 },
627 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
628 .result = REJECT,
629 .errstr = "invalid mem access",
630},
631{
632 "reference tracking: use ptr from bpf_sk_fullsock() after release",
633 .insns = {
634 BPF_SK_LOOKUP,
635 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
636 BPF_EXIT_INSN(),
637 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
638 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
639 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
640 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
641 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
642 BPF_EMIT_CALL(BPF_FUNC_sk_release),
643 BPF_EXIT_INSN(),
644 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
645 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
646 BPF_EMIT_CALL(BPF_FUNC_sk_release),
647 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
648 BPF_EXIT_INSN(),
649 },
650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
651 .result = REJECT,
652 .errstr = "invalid mem access",
653},
654{
655 "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
656 .insns = {
657 BPF_SK_LOOKUP,
658 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
659 BPF_EXIT_INSN(),
660 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
661 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
662 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
663 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
664 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
665 BPF_EMIT_CALL(BPF_FUNC_sk_release),
666 BPF_EXIT_INSN(),
667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
668 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
669 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
670 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
671 BPF_EMIT_CALL(BPF_FUNC_sk_release),
672 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
673 BPF_EXIT_INSN(),
674 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
675 BPF_EXIT_INSN(),
676 },
677 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
678 .result = REJECT,
679 .errstr = "invalid mem access",
680},
681{
682 "reference tracking: use sk after bpf_sk_release(tp)",
683 .insns = {
684 BPF_SK_LOOKUP,
685 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
686 BPF_EXIT_INSN(),
687 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
688 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
689 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
690 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
692 BPF_EMIT_CALL(BPF_FUNC_sk_release),
693 BPF_EXIT_INSN(),
694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
695 BPF_EMIT_CALL(BPF_FUNC_sk_release),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
697 BPF_EXIT_INSN(),
698 },
699 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
700 .result = REJECT,
701 .errstr = "invalid mem access",
702},
703{
704 "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
705 .insns = {
706 BPF_SK_LOOKUP,
707 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
708 BPF_EXIT_INSN(),
709 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
710 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
711 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
712 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
714 BPF_EMIT_CALL(BPF_FUNC_sk_release),
715 BPF_EXIT_INSN(),
716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
717 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
718 BPF_EMIT_CALL(BPF_FUNC_sk_release),
719 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
720 BPF_EXIT_INSN(),
721 },
722 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
723 .result = ACCEPT,
724},
725{
726 "reference tracking: bpf_sk_release(listen_sk)",
727 .insns = {
728 BPF_SK_LOOKUP,
729 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
730 BPF_EXIT_INSN(),
731 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
732 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
733 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
734 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
736 BPF_EMIT_CALL(BPF_FUNC_sk_release),
737 BPF_EXIT_INSN(),
738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
739 BPF_EMIT_CALL(BPF_FUNC_sk_release),
740 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
741 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
742 BPF_EMIT_CALL(BPF_FUNC_sk_release),
743 BPF_EXIT_INSN(),
744 },
745 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
746 .result = REJECT,
747 .errstr = "reference has not been acquired before",
748},
749{
750 /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
751 "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
752 .insns = {
753 BPF_SK_LOOKUP,
754 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
755 BPF_EXIT_INSN(),
756 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
758 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
759 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
761 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
762 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
763 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
765 BPF_EMIT_CALL(BPF_FUNC_sk_release),
766 BPF_EXIT_INSN(),
767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
768 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
769 BPF_EMIT_CALL(BPF_FUNC_sk_release),
770 BPF_EXIT_INSN(),
771 },
772 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
773 .result = REJECT,
774 .errstr = "invalid mem access",
775},
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index 0ddfdf76aba5..416436231fab 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -342,7 +342,7 @@
342 }, 342 },
343 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 343 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
344 .result = REJECT, 344 .result = REJECT,
345 .errstr = "type=sock_common expected=sock", 345 .errstr = "reference has not been acquired before",
346}, 346},
347{ 347{
348 "bpf_sk_release(bpf_sk_fullsock(skb->sk))", 348 "bpf_sk_release(bpf_sk_fullsock(skb->sk))",
@@ -380,5 +380,5 @@
380 }, 380 },
381 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 381 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
382 .result = REJECT, 382 .result = REJECT,
383 .errstr = "type=tcp_sock expected=sock", 383 .errstr = "reference has not been acquired before",
384}, 384},
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 3c1f4bdf9000..7514fcea91a7 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -29,8 +29,8 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
29INSTALL_HDR_PATH = $(top_srcdir)/usr 29INSTALL_HDR_PATH = $(top_srcdir)/usr
30LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/ 30LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
31LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include 31LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
32CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I.. 32CFLAGS += -O2 -g -std=gnu99 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
33LDFLAGS += -pthread 33LDFLAGS += -pthread -no-pie
34 34
35# After inclusion, $(OUTPUT) is defined and 35# After inclusion, $(OUTPUT) is defined and
36# $(TEST_GEN_PROGS) starts with $(OUTPUT)/ 36# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index a84785b02557..07b71ad9734a 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -102,6 +102,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
102struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid); 102struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
103void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); 103void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
104int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid); 104int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
105void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
105void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, 106void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
106 struct kvm_mp_state *mp_state); 107 struct kvm_mp_state *mp_state);
107void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs); 108void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index b52cfdefecbf..efa0aad8b3c6 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1121,6 +1121,22 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1121 return rc; 1121 return rc;
1122} 1122}
1123 1123
1124void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1125{
1126 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1127 int ret;
1128
1129 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1130
1131 vcpu->state->immediate_exit = 1;
1132 ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1133 vcpu->state->immediate_exit = 0;
1134
1135 TEST_ASSERT(ret == -1 && errno == EINTR,
1136 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1137 ret, errno);
1138}
1139
1124/* 1140/*
1125 * VM VCPU Set MP State 1141 * VM VCPU Set MP State
1126 * 1142 *
diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index d503a51fad30..7c2c4d4055a8 100644
--- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -87,22 +87,25 @@ int main(int argc, char *argv[])
87 while (1) { 87 while (1) {
88 rc = _vcpu_run(vm, VCPU_ID); 88 rc = _vcpu_run(vm, VCPU_ID);
89 89
90 if (run->exit_reason == KVM_EXIT_IO) { 90 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
91 switch (get_ucall(vm, VCPU_ID, &uc)) { 91 "Unexpected exit reason: %u (%s),\n",
92 case UCALL_SYNC: 92 run->exit_reason,
93 /* emulate hypervisor clearing CR4.OSXSAVE */ 93 exit_reason_str(run->exit_reason));
94 vcpu_sregs_get(vm, VCPU_ID, &sregs); 94
95 sregs.cr4 &= ~X86_CR4_OSXSAVE; 95 switch (get_ucall(vm, VCPU_ID, &uc)) {
96 vcpu_sregs_set(vm, VCPU_ID, &sregs); 96 case UCALL_SYNC:
97 break; 97 /* emulate hypervisor clearing CR4.OSXSAVE */
98 case UCALL_ABORT: 98 vcpu_sregs_get(vm, VCPU_ID, &sregs);
99 TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit."); 99 sregs.cr4 &= ~X86_CR4_OSXSAVE;
100 break; 100 vcpu_sregs_set(vm, VCPU_ID, &sregs);
101 case UCALL_DONE: 101 break;
102 goto done; 102 case UCALL_ABORT:
103 default: 103 TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
104 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); 104 break;
105 } 105 case UCALL_DONE:
106 goto done;
107 default:
108 TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
106 } 109 }
107 } 110 }
108 111
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 4b3f556265f1..30f75856cf39 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -134,6 +134,11 @@ int main(int argc, char *argv[])
134 134
135 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 135 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
136 136
137 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
138 fprintf(stderr, "immediate_exit not available, skipping test\n");
139 exit(KSFT_SKIP);
140 }
141
137 /* Create VM */ 142 /* Create VM */
138 vm = vm_create_default(VCPU_ID, 0, guest_code); 143 vm = vm_create_default(VCPU_ID, 0, guest_code);
139 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 144 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
@@ -156,8 +161,6 @@ int main(int argc, char *argv[])
156 stage, run->exit_reason, 161 stage, run->exit_reason,
157 exit_reason_str(run->exit_reason)); 162 exit_reason_str(run->exit_reason));
158 163
159 memset(&regs1, 0, sizeof(regs1));
160 vcpu_regs_get(vm, VCPU_ID, &regs1);
161 switch (get_ucall(vm, VCPU_ID, &uc)) { 164 switch (get_ucall(vm, VCPU_ID, &uc)) {
162 case UCALL_ABORT: 165 case UCALL_ABORT:
163 TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], 166 TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
@@ -176,6 +179,17 @@ int main(int argc, char *argv[])
176 uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx", 179 uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
177 stage, (ulong)uc.args[1]); 180 stage, (ulong)uc.args[1]);
178 181
182 /*
183 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
184 * guest state is consistent only after userspace re-enters the
185 * kernel with KVM_RUN. Complete IO prior to migrating state
186 * to a new VM.
187 */
188 vcpu_run_complete_io(vm, VCPU_ID);
189
190 memset(&regs1, 0, sizeof(regs1));
191 vcpu_regs_get(vm, VCPU_ID, &regs1);
192
179 state = vcpu_save_state(vm, VCPU_ID); 193 state = vcpu_save_state(vm, VCPU_ID);
180 kvm_vm_release(vm); 194 kvm_vm_release(vm);
181 195
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
index 5970cee6d05f..b074ea9b6fe8 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
@@ -286,5 +286,30 @@
286 "teardown": [ 286 "teardown": [
287 "$TC action flush action bpf" 287 "$TC action flush action bpf"
288 ] 288 ]
289 },
290 {
291 "id": "b8a1",
292 "name": "Replace bpf action with invalid goto_chain control",
293 "category": [
294 "actions",
295 "bpf"
296 ],
297 "setup": [
298 [
299 "$TC actions flush action bpf",
300 0,
301 1,
302 255
303 ],
304 "$TC action add action bpf bytecode '1,6 0 0 4294967295' pass index 90"
305 ],
306 "cmdUnderTest": "$TC action replace action bpf bytecode '1,6 0 0 4294967295' goto chain 42 index 90 cookie c1a0c1a0",
307 "expExitCode": "255",
308 "verifyCmd": "$TC action list action bpf",
309 "matchPattern": "action order [0-9]*: bpf.* default-action pass.*index 90",
310 "matchCount": "1",
311 "teardown": [
312 "$TC action flush action bpf"
313 ]
289 } 314 }
290] 315]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
index 13147a1f5731..cadde8f41fcd 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
@@ -287,5 +287,30 @@
287 "teardown": [ 287 "teardown": [
288 "$TC actions flush action connmark" 288 "$TC actions flush action connmark"
289 ] 289 ]
290 },
291 {
292 "id": "c506",
293 "name": "Replace connmark with invalid goto chain control",
294 "category": [
295 "actions",
296 "connmark"
297 ],
298 "setup": [
299 [
300 "$TC actions flush action connmark",
301 0,
302 1,
303 255
304 ],
305 "$TC actions add action connmark pass index 90"
306 ],
307 "cmdUnderTest": "$TC actions replace action connmark goto chain 42 index 90 cookie c1a0c1a0",
308 "expExitCode": "255",
309 "verifyCmd": "$TC actions get action connmark index 90",
310 "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 90 ref",
311 "matchCount": "1",
312 "teardown": [
313 "$TC actions flush action connmark"
314 ]
290 } 315 }
291] 316]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
index a022792d392a..ddabb2fbb7c7 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
@@ -500,5 +500,30 @@
500 "matchPattern": "^[ \t]+index [0-9]+ ref", 500 "matchPattern": "^[ \t]+index [0-9]+ ref",
501 "matchCount": "0", 501 "matchCount": "0",
502 "teardown": [] 502 "teardown": []
503 },
504 {
505 "id": "d128",
506 "name": "Replace csum action with invalid goto chain control",
507 "category": [
508 "actions",
509 "csum"
510 ],
511 "setup": [
512 [
513 "$TC actions flush action csum",
514 0,
515 1,
516 255
517 ],
518 "$TC actions add action csum iph index 90"
519 ],
520 "cmdUnderTest": "$TC actions replace action csum iph goto chain 42 index 90 cookie c1a0c1a0",
521 "expExitCode": "255",
522 "verifyCmd": "$TC actions get action csum index 90",
523 "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*index 90 ref",
524 "matchCount": "1",
525 "teardown": [
526 "$TC actions flush action csum"
527 ]
503 } 528 }
504] 529]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
index 89189a03ce3d..814b7a8a478b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
@@ -560,5 +560,30 @@
560 "teardown": [ 560 "teardown": [
561 "$TC actions flush action gact" 561 "$TC actions flush action gact"
562 ] 562 ]
563 },
564 {
565 "id": "ca89",
566 "name": "Replace gact action with invalid goto chain control",
567 "category": [
568 "actions",
569 "gact"
570 ],
571 "setup": [
572 [
573 "$TC actions flush action gact",
574 0,
575 1,
576 255
577 ],
578 "$TC actions add action pass random determ drop 2 index 90"
579 ],
580 "cmdUnderTest": "$TC actions replace action goto chain 42 random determ drop 5 index 90 cookie c1a0c1a0",
581 "expExitCode": "255",
582 "verifyCmd": "$TC actions list action gact",
583 "matchPattern": "action order [0-9]*: gact action pass.*random type determ drop val 2.*index 90 ref",
584 "matchCount": "1",
585 "teardown": [
586 "$TC actions flush action gact"
587 ]
563 } 588 }
564] 589]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 0da3545cabdb..c13a68b98fc7 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -1060,5 +1060,30 @@
1060 "matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4", 1060 "matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4",
1061 "matchCount": "0", 1061 "matchCount": "0",
1062 "teardown": [] 1062 "teardown": []
1063 },
1064 {
1065 "id": "a0e2",
1066 "name": "Replace ife encode action with invalid goto chain control",
1067 "category": [
1068 "actions",
1069 "ife"
1070 ],
1071 "setup": [
1072 [
1073 "$TC actions flush action ife",
1074 0,
1075 1,
1076 255
1077 ],
1078 "$TC actions add action ife encode allow mark pass index 90"
1079 ],
1080 "cmdUnderTest": "$TC actions replace action ife encode allow mark goto chain 42 index 90 cookie c1a0c1a0",
1081 "expExitCode": "255",
1082 "verifyCmd": "$TC actions get action ife index 90",
1083 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E .*allow mark.*index 90 ref",
1084 "matchCount": "1",
1085 "teardown": [
1086 "$TC actions flush action ife"
1087 ]
1063 } 1088 }
1064] 1089]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
index db49fd0f8445..6e5fb3d25681 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
@@ -434,5 +434,30 @@
434 "teardown": [ 434 "teardown": [
435 "$TC actions flush action mirred" 435 "$TC actions flush action mirred"
436 ] 436 ]
437 },
438 {
439 "id": "2a9a",
440 "name": "Replace mirred action with invalid goto chain control",
441 "category": [
442 "actions",
443 "mirred"
444 ],
445 "setup": [
446 [
447 "$TC actions flush action mirred",
448 0,
449 1,
450 255
451 ],
452 "$TC actions add action mirred ingress mirror dev lo drop index 90"
453 ],
454 "cmdUnderTest": "$TC actions replace action mirred ingress mirror dev lo goto chain 42 index 90 cookie c1a0c1a0",
455 "expExitCode": "255",
456 "verifyCmd": "$TC actions get action mirred index 90",
457 "matchPattern": "action order [0-9]*: mirred \\(Ingress Mirror to device lo\\) drop.*index 90 ref",
458 "matchCount": "1",
459 "teardown": [
460 "$TC actions flush action mirred"
461 ]
437 } 462 }
438] 463]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
index 0080dc2fd41c..bc12c1ccad30 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
@@ -589,5 +589,30 @@
589 "teardown": [ 589 "teardown": [
590 "$TC actions flush action nat" 590 "$TC actions flush action nat"
591 ] 591 ]
592 },
593 {
594 "id": "4b12",
595 "name": "Replace nat action with invalid goto chain control",
596 "category": [
597 "actions",
598 "nat"
599 ],
600 "setup": [
601 [
602 "$TC actions flush action nat",
603 0,
604 1,
605 255
606 ],
607 "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 90"
608 ],
609 "cmdUnderTest": "$TC actions replace action nat ingress 1.18.1.1 1.18.2.2 goto chain 42 index 90 cookie c1a0c1a0",
610 "expExitCode": "255",
611 "verifyCmd": "$TC actions get action nat index 90",
612 "matchPattern": "action order [0-9]+: nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 90 ref",
613 "matchCount": "1",
614 "teardown": [
615 "$TC actions flush action nat"
616 ]
592 } 617 }
593] 618]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
new file mode 100644
index 000000000000..b73ceb9e28b1
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
@@ -0,0 +1,51 @@
1[
2 {
3 "id": "319a",
4 "name": "Add pedit action that mangles IP TTL",
5 "category": [
6 "actions",
7 "pedit"
8 ],
9 "setup": [
10 [
11 "$TC actions flush action pedit",
12 0,
13 1,
14 255
15 ]
16 ],
17 "cmdUnderTest": "$TC actions add action pedit ex munge ip ttl set 10",
18 "expExitCode": "0",
19 "verifyCmd": "$TC actions ls action pedit",
20 "matchPattern": "action order [0-9]+: pedit action pass keys 1.*index 1 ref.*key #0 at ipv4\\+8: val 0a000000 mask 00ffffff",
21 "matchCount": "1",
22 "teardown": [
23 "$TC actions flush action pedit"
24 ]
25 },
26 {
27 "id": "7e67",
28 "name": "Replace pedit action with invalid goto chain",
29 "category": [
30 "actions",
31 "pedit"
32 ],
33 "setup": [
34 [
35 "$TC actions flush action pedit",
36 0,
37 1,
38 255
39 ],
40 "$TC actions add action pedit ex munge ip ttl set 10 pass index 90"
41 ],
42 "cmdUnderTest": "$TC actions replace action pedit ex munge ip ttl set 10 goto chain 42 index 90 cookie c1a0c1a0",
43 "expExitCode": "255",
44 "verifyCmd": "$TC actions ls action pedit",
45 "matchPattern": "action order [0-9]+: pedit action pass keys 1.*index 90 ref.*key #0 at ipv4\\+8: val 0a000000 mask 00ffffff",
46 "matchCount": "1",
47 "teardown": [
48 "$TC actions flush action pedit"
49 ]
50 }
51]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
index 4086a50a670e..b8268da5adaa 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json
@@ -739,5 +739,30 @@
739 "teardown": [ 739 "teardown": [
740 "$TC actions flush action police" 740 "$TC actions flush action police"
741 ] 741 ]
742 },
743 {
744 "id": "689e",
745 "name": "Replace police action with invalid goto chain control",
746 "category": [
747 "actions",
748 "police"
749 ],
750 "setup": [
751 [
752 "$TC actions flush action police",
753 0,
754 1,
755 255
756 ],
757 "$TC actions add action police rate 3mbit burst 250k drop index 90"
758 ],
759 "cmdUnderTest": "$TC actions replace action police rate 3mbit burst 250k goto chain 42 index 90 cookie c1a0c1a0",
760 "expExitCode": "255",
761 "verifyCmd": "$TC actions get action police index 90",
762 "matchPattern": "action order [0-9]*: police 0x5a rate 3Mbit burst 250Kb mtu 2Kb action drop",
763 "matchCount": "1",
764 "teardown": [
765 "$TC actions flush action police"
766 ]
742 } 767 }
743] 768]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
index 3aca33c00039..27f0acaed880 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
@@ -584,5 +584,30 @@
584 "teardown": [ 584 "teardown": [
585 "$TC actions flush action sample" 585 "$TC actions flush action sample"
586 ] 586 ]
587 },
588 {
589 "id": "0a6e",
590 "name": "Replace sample action with invalid goto chain control",
591 "category": [
592 "actions",
593 "sample"
594 ],
595 "setup": [
596 [
597 "$TC actions flush action sample",
598 0,
599 1,
600 255
601 ],
602 "$TC actions add action sample rate 1024 group 4 pass index 90"
603 ],
604 "cmdUnderTest": "$TC actions replace action sample rate 1024 group 7 goto chain 42 index 90 cookie c1a0c1a0",
605 "expExitCode": "255",
606 "verifyCmd": "$TC actions list action sample",
607 "matchPattern": "action order [0-9]+: sample rate 1/1024 group 4 pass.*index 90",
608 "matchCount": "1",
609 "teardown": [
610 "$TC actions flush action sample"
611 ]
587 } 612 }
588] 613]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
index e89a7aa4012d..8e8c1ae12260 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
@@ -126,5 +126,30 @@
126 "teardown": [ 126 "teardown": [
127 "" 127 ""
128 ] 128 ]
129 },
130 {
131 "id": "b776",
132 "name": "Replace simple action with invalid goto chain control",
133 "category": [
134 "actions",
135 "simple"
136 ],
137 "setup": [
138 [
139 "$TC actions flush action simple",
140 0,
141 1,
142 255
143 ],
144 "$TC actions add action simple sdata \"hello\" pass index 90"
145 ],
146 "cmdUnderTest": "$TC actions replace action simple sdata \"world\" goto chain 42 index 90 cookie c1a0c1a0",
147 "expExitCode": "255",
148 "verifyCmd": "$TC actions list action simple",
149 "matchPattern": "action order [0-9]*: Simple <hello>.*index 90 ref",
150 "matchCount": "1",
151 "teardown": [
152 "$TC actions flush action simple"
153 ]
129 } 154 }
130] 155]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index 5aaf593b914a..ecd96eda7f6a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -484,5 +484,30 @@
484 "teardown": [ 484 "teardown": [
485 "$TC actions flush action skbedit" 485 "$TC actions flush action skbedit"
486 ] 486 ]
487 },
488 {
489 "id": "1b2b",
490 "name": "Replace skbedit action with invalid goto_chain control",
491 "category": [
492 "actions",
493 "skbedit"
494 ],
495 "setup": [
496 [
497 "$TC actions flush action skbedit",
498 0,
499 1,
500 255
501 ],
502 "$TC actions add action skbedit ptype host pass index 90"
503 ],
504 "cmdUnderTest": "$TC actions replace action skbedit ptype host goto chain 42 index 90 cookie c1a0c1a0",
505 "expExitCode": "255",
506 "verifyCmd": "$TC actions list action skbedit",
507 "matchPattern": "action order [0-9]*: skbedit ptype host pass.*index 90 ref",
508 "matchCount": "1",
509 "teardown": [
510 "$TC actions flush action skbedit"
511 ]
487 } 512 }
488] 513]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
index fe3326e939c1..6eb4c4f97060 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
@@ -392,5 +392,30 @@
392 "teardown": [ 392 "teardown": [
393 "$TC actions flush action skbmod" 393 "$TC actions flush action skbmod"
394 ] 394 ]
395 },
396 {
397 "id": "b651",
398 "name": "Replace skbmod action with invalid goto_chain control",
399 "category": [
400 "actions",
401 "skbmod"
402 ],
403 "setup": [
404 [
405 "$TC actions flush action skbmod",
406 0,
407 1,
408 255
409 ],
410 "$TC actions add action skbmod set etype 0x1111 pass index 90"
411 ],
412 "cmdUnderTest": "$TC actions replace action skbmod set etype 0x1111 goto chain 42 index 90 cookie c1a0c1a0",
413 "expExitCode": "255",
414 "verifyCmd": "$TC actions ls action skbmod",
415 "matchPattern": "action order [0-9]*: skbmod pass set etype 0x1111\\s+index 90 ref",
416 "matchCount": "1",
417 "teardown": [
418 "$TC actions flush action skbmod"
419 ]
395 } 420 }
396] 421]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index e7e15a7336b6..28453a445fdb 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -884,5 +884,30 @@
884 "teardown": [ 884 "teardown": [
885 "$TC actions flush action tunnel_key" 885 "$TC actions flush action tunnel_key"
886 ] 886 ]
887 },
888 {
889 "id": "8242",
890 "name": "Replace tunnel_key set action with invalid goto chain",
891 "category": [
892 "actions",
893 "tunnel_key"
894 ],
895 "setup": [
896 [
897 "$TC actions flush action tunnel_key",
898 0,
899 1,
900 255
901 ],
902 "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pass index 90"
903 ],
904 "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 10.10.10.2 dst_ip 20.20.20.1 dst_port 3129 id 2 csum goto chain 42 index 90 cookie c1a0c1a0",
905 "expExitCode": "255",
906 "verifyCmd": "$TC actions get action tunnel_key index 90",
907 "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*csum pass.*index 90 ref",
908 "matchCount": "1",
909 "teardown": [
910 "$TC actions flush action tunnel_key"
911 ]
887 } 912 }
888] 913]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
index 69ea09eefffc..cc7c7d758008 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
@@ -688,5 +688,30 @@
688 "teardown": [ 688 "teardown": [
689 "$TC actions flush action vlan" 689 "$TC actions flush action vlan"
690 ] 690 ]
691 },
692 {
693 "id": "e394",
694 "name": "Replace vlan push action with invalid goto chain control",
695 "category": [
696 "actions",
697 "vlan"
698 ],
699 "setup": [
700 [
701 "$TC actions flush action vlan",
702 0,
703 1,
704 255
705 ],
706 "$TC actions add action vlan push id 500 pass index 90"
707 ],
708 "cmdUnderTest": "$TC actions replace action vlan push id 500 goto chain 42 index 90 cookie c1a0c1a0",
709 "expExitCode": "255",
710 "verifyCmd": "$TC actions get action vlan index 90",
711 "matchPattern": "action order [0-9]+: vlan.*push id 500 protocol 802.1Q priority 0 pass.*index 90 ref",
712 "matchCount": "1",
713 "teardown": [
714 "$TC actions flush action vlan"
715 ]
691 } 716 }
692] 717]
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 264d92da3240..370bd6c5e6cb 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -222,7 +222,7 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
222 } 222 }
223 } 223 }
224 224
225 if (used_lrs) { 225 if (used_lrs || cpu_if->its_vpe.its_vm) {
226 int i; 226 int i;
227 u32 elrsr; 227 u32 elrsr;
228 228
@@ -247,7 +247,7 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
247 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs; 247 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
248 int i; 248 int i;
249 249
250 if (used_lrs) { 250 if (used_lrs || cpu_if->its_vpe.its_vm) {
251 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2); 251 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
252 252
253 for (i = 0; i < used_lrs; i++) 253 for (i = 0; i < used_lrs; i++)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index ffd7acdceac7..27c958306449 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -102,8 +102,7 @@ static bool kvm_is_device_pfn(unsigned long pfn)
102 * @addr: IPA 102 * @addr: IPA
103 * @pmd: pmd pointer for IPA 103 * @pmd: pmd pointer for IPA
104 * 104 *
105 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all 105 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs.
106 * pages in the range dirty.
107 */ 106 */
108static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) 107static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
109{ 108{
@@ -121,8 +120,7 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
121 * @addr: IPA 120 * @addr: IPA
122 * @pud: pud pointer for IPA 121 * @pud: pud pointer for IPA
123 * 122 *
124 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all 123 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs.
125 * pages in the range dirty.
126 */ 124 */
127static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) 125static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
128{ 126{
@@ -899,9 +897,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
899 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. 897 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
900 * @kvm: The KVM struct pointer for the VM. 898 * @kvm: The KVM struct pointer for the VM.
901 * 899 *
902 * Allocates only the stage-2 HW PGD level table(s) (can support either full 900 * Allocates only the stage-2 HW PGD level table(s) of size defined by
903 * 40-bit input addresses or limited to 32-bit input addresses). Clears the 901 * stage2_pgd_size(kvm).
904 * allocated pages.
905 * 902 *
906 * Note we don't need locking here as this is only called when the VM is 903 * Note we don't need locking here as this is only called when the VM is
907 * created, which can only be done once. 904 * created, which can only be done once.
@@ -1067,25 +1064,43 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1067{ 1064{
1068 pmd_t *pmd, old_pmd; 1065 pmd_t *pmd, old_pmd;
1069 1066
1067retry:
1070 pmd = stage2_get_pmd(kvm, cache, addr); 1068 pmd = stage2_get_pmd(kvm, cache, addr);
1071 VM_BUG_ON(!pmd); 1069 VM_BUG_ON(!pmd);
1072 1070
1073 old_pmd = *pmd; 1071 old_pmd = *pmd;
1072 /*
1073 * Multiple vcpus faulting on the same PMD entry, can
1074 * lead to them sequentially updating the PMD with the
1075 * same value. Following the break-before-make
1076 * (pmd_clear() followed by tlb_flush()) process can
1077 * hinder forward progress due to refaults generated
1078 * on missing translations.
1079 *
1080 * Skip updating the page table if the entry is
1081 * unchanged.
1082 */
1083 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1084 return 0;
1085
1074 if (pmd_present(old_pmd)) { 1086 if (pmd_present(old_pmd)) {
1075 /* 1087 /*
1076 * Multiple vcpus faulting on the same PMD entry, can 1088 * If we already have PTE level mapping for this block,
1077 * lead to them sequentially updating the PMD with the 1089 * we must unmap it to avoid inconsistent TLB state and
1078 * same value. Following the break-before-make 1090 * leaking the table page. We could end up in this situation
1079 * (pmd_clear() followed by tlb_flush()) process can 1091 * if the memory slot was marked for dirty logging and was
1080 * hinder forward progress due to refaults generated 1092 * reverted, leaving PTE level mappings for the pages accessed
1081 * on missing translations. 1093 * during the period. So, unmap the PTE level mapping for this
1094 * block and retry, as we could have released the upper level
1095 * table in the process.
1082 * 1096 *
1083 * Skip updating the page table if the entry is 1097 * Normal THP split/merge follows mmu_notifier callbacks and do
1084 * unchanged. 1098 * get handled accordingly.
1085 */ 1099 */
1086 if (pmd_val(old_pmd) == pmd_val(*new_pmd)) 1100 if (!pmd_thp_or_huge(old_pmd)) {
1087 return 0; 1101 unmap_stage2_range(kvm, addr & S2_PMD_MASK, S2_PMD_SIZE);
1088 1102 goto retry;
1103 }
1089 /* 1104 /*
1090 * Mapping in huge pages should only happen through a 1105 * Mapping in huge pages should only happen through a
1091 * fault. If a page is merged into a transparent huge 1106 * fault. If a page is merged into a transparent huge
@@ -1097,8 +1112,7 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1097 * should become splitting first, unmapped, merged, 1112 * should become splitting first, unmapped, merged,
1098 * and mapped back in on-demand. 1113 * and mapped back in on-demand.
1099 */ 1114 */
1100 VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); 1115 WARN_ON_ONCE(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
1101
1102 pmd_clear(pmd); 1116 pmd_clear(pmd);
1103 kvm_tlb_flush_vmid_ipa(kvm, addr); 1117 kvm_tlb_flush_vmid_ipa(kvm, addr);
1104 } else { 1118 } else {
@@ -1114,6 +1128,7 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
1114{ 1128{
1115 pud_t *pudp, old_pud; 1129 pud_t *pudp, old_pud;
1116 1130
1131retry:
1117 pudp = stage2_get_pud(kvm, cache, addr); 1132 pudp = stage2_get_pud(kvm, cache, addr);
1118 VM_BUG_ON(!pudp); 1133 VM_BUG_ON(!pudp);
1119 1134
@@ -1121,14 +1136,23 @@ static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cac
1121 1136
1122 /* 1137 /*
1123 * A large number of vcpus faulting on the same stage 2 entry, 1138 * A large number of vcpus faulting on the same stage 2 entry,
1124 * can lead to a refault due to the 1139 * can lead to a refault due to the stage2_pud_clear()/tlb_flush().
1125 * stage2_pud_clear()/tlb_flush(). Skip updating the page 1140 * Skip updating the page tables if there is no change.
1126 * tables if there is no change.
1127 */ 1141 */
1128 if (pud_val(old_pud) == pud_val(*new_pudp)) 1142 if (pud_val(old_pud) == pud_val(*new_pudp))
1129 return 0; 1143 return 0;
1130 1144
1131 if (stage2_pud_present(kvm, old_pud)) { 1145 if (stage2_pud_present(kvm, old_pud)) {
1146 /*
1147 * If we already have table level mapping for this block, unmap
1148 * the range for this block and retry.
1149 */
1150 if (!stage2_pud_huge(kvm, old_pud)) {
1151 unmap_stage2_range(kvm, addr & S2_PUD_MASK, S2_PUD_SIZE);
1152 goto retry;
1153 }
1154
1155 WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp));
1132 stage2_pud_clear(kvm, pudp); 1156 stage2_pud_clear(kvm, pudp);
1133 kvm_tlb_flush_vmid_ipa(kvm, addr); 1157 kvm_tlb_flush_vmid_ipa(kvm, addr);
1134 } else { 1158 } else {
@@ -1451,13 +1475,11 @@ static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1451} 1475}
1452 1476
1453/** 1477/**
1454 * stage2_wp_puds - write protect PGD range 1478 * stage2_wp_puds - write protect PGD range
1455 * @pgd: pointer to pgd entry 1479 * @pgd: pointer to pgd entry
1456 * @addr: range start address 1480 * @addr: range start address
1457 * @end: range end address 1481 * @end: range end address
1458 * 1482 */
1459 * Process PUD entries, for a huge PUD we cause a panic.
1460 */
1461static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd, 1483static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1462 phys_addr_t addr, phys_addr_t end) 1484 phys_addr_t addr, phys_addr_t end)
1463{ 1485{
@@ -1594,8 +1616,9 @@ static void kvm_send_hwpoison_signal(unsigned long address,
1594 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); 1616 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1595} 1617}
1596 1618
1597static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot, 1619static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
1598 unsigned long hva) 1620 unsigned long hva,
1621 unsigned long map_size)
1599{ 1622{
1600 gpa_t gpa_start; 1623 gpa_t gpa_start;
1601 hva_t uaddr_start, uaddr_end; 1624 hva_t uaddr_start, uaddr_end;
@@ -1610,34 +1633,34 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
1610 1633
1611 /* 1634 /*
1612 * Pages belonging to memslots that don't have the same alignment 1635 * Pages belonging to memslots that don't have the same alignment
1613 * within a PMD for userspace and IPA cannot be mapped with stage-2 1636 * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2
1614 * PMD entries, because we'll end up mapping the wrong pages. 1637 * PMD/PUD entries, because we'll end up mapping the wrong pages.
1615 * 1638 *
1616 * Consider a layout like the following: 1639 * Consider a layout like the following:
1617 * 1640 *
1618 * memslot->userspace_addr: 1641 * memslot->userspace_addr:
1619 * +-----+--------------------+--------------------+---+ 1642 * +-----+--------------------+--------------------+---+
1620 * |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz| 1643 * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz|
1621 * +-----+--------------------+--------------------+---+ 1644 * +-----+--------------------+--------------------+---+
1622 * 1645 *
1623 * memslot->base_gfn << PAGE_SIZE: 1646 * memslot->base_gfn << PAGE_SIZE:
1624 * +---+--------------------+--------------------+-----+ 1647 * +---+--------------------+--------------------+-----+
1625 * |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz| 1648 * |abc|def Stage-2 block | Stage-2 block |tvxyz|
1626 * +---+--------------------+--------------------+-----+ 1649 * +---+--------------------+--------------------+-----+
1627 * 1650 *
1628 * If we create those stage-2 PMDs, we'll end up with this incorrect 1651 * If we create those stage-2 blocks, we'll end up with this incorrect
1629 * mapping: 1652 * mapping:
1630 * d -> f 1653 * d -> f
1631 * e -> g 1654 * e -> g
1632 * f -> h 1655 * f -> h
1633 */ 1656 */
1634 if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK)) 1657 if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1)))
1635 return false; 1658 return false;
1636 1659
1637 /* 1660 /*
1638 * Next, let's make sure we're not trying to map anything not covered 1661 * Next, let's make sure we're not trying to map anything not covered
1639 * by the memslot. This means we have to prohibit PMD size mappings 1662 * by the memslot. This means we have to prohibit block size mappings
1640 * for the beginning and end of a non-PMD aligned and non-PMD sized 1663 * for the beginning and end of a non-block aligned and non-block sized
1641 * memory slot (illustrated by the head and tail parts of the 1664 * memory slot (illustrated by the head and tail parts of the
1642 * userspace view above containing pages 'abcde' and 'xyz', 1665 * userspace view above containing pages 'abcde' and 'xyz',
1643 * respectively). 1666 * respectively).
@@ -1646,8 +1669,8 @@ static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
1646 * userspace_addr or the base_gfn, as both are equally aligned (per 1669 * userspace_addr or the base_gfn, as both are equally aligned (per
1647 * the check above) and equally sized. 1670 * the check above) and equally sized.
1648 */ 1671 */
1649 return (hva & S2_PMD_MASK) >= uaddr_start && 1672 return (hva & ~(map_size - 1)) >= uaddr_start &&
1650 (hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end; 1673 (hva & ~(map_size - 1)) + map_size <= uaddr_end;
1651} 1674}
1652 1675
1653static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 1676static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
@@ -1676,12 +1699,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1676 return -EFAULT; 1699 return -EFAULT;
1677 } 1700 }
1678 1701
1679 if (!fault_supports_stage2_pmd_mappings(memslot, hva))
1680 force_pte = true;
1681
1682 if (logging_active)
1683 force_pte = true;
1684
1685 /* Let's check if we will get back a huge page backed by hugetlbfs */ 1702 /* Let's check if we will get back a huge page backed by hugetlbfs */
1686 down_read(&current->mm->mmap_sem); 1703 down_read(&current->mm->mmap_sem);
1687 vma = find_vma_intersection(current->mm, hva, hva + 1); 1704 vma = find_vma_intersection(current->mm, hva, hva + 1);
@@ -1692,6 +1709,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1692 } 1709 }
1693 1710
1694 vma_pagesize = vma_kernel_pagesize(vma); 1711 vma_pagesize = vma_kernel_pagesize(vma);
1712 if (logging_active ||
1713 !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
1714 force_pte = true;
1715 vma_pagesize = PAGE_SIZE;
1716 }
1717
1695 /* 1718 /*
1696 * The stage2 has a minimum of 2 level table (For arm64 see 1719 * The stage2 has a minimum of 2 level table (For arm64 see
1697 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can 1720 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
@@ -1699,11 +1722,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1699 * As for PUD huge maps, we must make sure that we have at least 1722 * As for PUD huge maps, we must make sure that we have at least
1700 * 3 levels, i.e, PMD is not folded. 1723 * 3 levels, i.e, PMD is not folded.
1701 */ 1724 */
1702 if ((vma_pagesize == PMD_SIZE || 1725 if (vma_pagesize == PMD_SIZE ||
1703 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && 1726 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
1704 !force_pte) {
1705 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; 1727 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1706 }
1707 up_read(&current->mm->mmap_sem); 1728 up_read(&current->mm->mmap_sem);
1708 1729
1709 /* We need minimum second+third level pages */ 1730 /* We need minimum second+third level pages */
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index ab3f47745d9c..44ceaccb18cf 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -754,8 +754,9 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
754 u64 indirect_ptr, type = GITS_BASER_TYPE(baser); 754 u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
755 phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser); 755 phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
756 int esz = GITS_BASER_ENTRY_SIZE(baser); 756 int esz = GITS_BASER_ENTRY_SIZE(baser);
757 int index; 757 int index, idx;
758 gfn_t gfn; 758 gfn_t gfn;
759 bool ret;
759 760
760 switch (type) { 761 switch (type) {
761 case GITS_BASER_TYPE_DEVICE: 762 case GITS_BASER_TYPE_DEVICE:
@@ -782,7 +783,8 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
782 783
783 if (eaddr) 784 if (eaddr)
784 *eaddr = addr; 785 *eaddr = addr;
785 return kvm_is_visible_gfn(its->dev->kvm, gfn); 786
787 goto out;
786 } 788 }
787 789
788 /* calculate and check the index into the 1st level */ 790 /* calculate and check the index into the 1st level */
@@ -812,7 +814,12 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
812 814
813 if (eaddr) 815 if (eaddr)
814 *eaddr = indirect_ptr; 816 *eaddr = indirect_ptr;
815 return kvm_is_visible_gfn(its->dev->kvm, gfn); 817
818out:
819 idx = srcu_read_lock(&its->dev->kvm->srcu);
820 ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
821 srcu_read_unlock(&its->dev->kvm->srcu, idx);
822 return ret;
816} 823}
817 824
818static int vgic_its_alloc_collection(struct vgic_its *its, 825static int vgic_its_alloc_collection(struct vgic_its *its,
@@ -1729,8 +1736,8 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev)
1729 kfree(its); 1736 kfree(its);
1730} 1737}
1731 1738
1732int vgic_its_has_attr_regs(struct kvm_device *dev, 1739static int vgic_its_has_attr_regs(struct kvm_device *dev,
1733 struct kvm_device_attr *attr) 1740 struct kvm_device_attr *attr)
1734{ 1741{
1735 const struct vgic_register_region *region; 1742 const struct vgic_register_region *region;
1736 gpa_t offset = attr->attr; 1743 gpa_t offset = attr->attr;
@@ -1750,9 +1757,9 @@ int vgic_its_has_attr_regs(struct kvm_device *dev,
1750 return 0; 1757 return 0;
1751} 1758}
1752 1759
1753int vgic_its_attr_regs_access(struct kvm_device *dev, 1760static int vgic_its_attr_regs_access(struct kvm_device *dev,
1754 struct kvm_device_attr *attr, 1761 struct kvm_device_attr *attr,
1755 u64 *reg, bool is_write) 1762 u64 *reg, bool is_write)
1756{ 1763{
1757 const struct vgic_register_region *region; 1764 const struct vgic_register_region *region;
1758 struct vgic_its *its; 1765 struct vgic_its *its;
@@ -1919,7 +1926,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
1919 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | 1926 ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
1920 ite->collection->collection_id; 1927 ite->collection->collection_id;
1921 val = cpu_to_le64(val); 1928 val = cpu_to_le64(val);
1922 return kvm_write_guest(kvm, gpa, &val, ite_esz); 1929 return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
1923} 1930}
1924 1931
1925/** 1932/**
@@ -2066,7 +2073,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2066 (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | 2073 (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2067 (dev->num_eventid_bits - 1)); 2074 (dev->num_eventid_bits - 1));
2068 val = cpu_to_le64(val); 2075 val = cpu_to_le64(val);
2069 return kvm_write_guest(kvm, ptr, &val, dte_esz); 2076 return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
2070} 2077}
2071 2078
2072/** 2079/**
@@ -2246,7 +2253,7 @@ static int vgic_its_save_cte(struct vgic_its *its,
2246 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | 2253 ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2247 collection->collection_id); 2254 collection->collection_id);
2248 val = cpu_to_le64(val); 2255 val = cpu_to_le64(val);
2249 return kvm_write_guest(its->dev->kvm, gpa, &val, esz); 2256 return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2250} 2257}
2251 2258
2252static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) 2259static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
@@ -2317,7 +2324,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
2317 */ 2324 */
2318 val = 0; 2325 val = 0;
2319 BUG_ON(cte_esz > sizeof(val)); 2326 BUG_ON(cte_esz > sizeof(val));
2320 ret = kvm_write_guest(its->dev->kvm, gpa, &val, cte_esz); 2327 ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2321 return ret; 2328 return ret;
2322} 2329}
2323 2330
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 408a78eb6a97..9f87e58dbd4a 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -358,7 +358,7 @@ retry:
358 if (status) { 358 if (status) {
359 /* clear consumed data */ 359 /* clear consumed data */
360 val &= ~(1 << bit_nr); 360 val &= ~(1 << bit_nr);
361 ret = kvm_write_guest(kvm, ptr, &val, 1); 361 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
362 if (ret) 362 if (ret)
363 return ret; 363 return ret;
364 } 364 }
@@ -409,7 +409,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
409 else 409 else
410 val &= ~(1 << bit_nr); 410 val &= ~(1 << bit_nr);
411 411
412 ret = kvm_write_guest(kvm, ptr, &val, 1); 412 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
413 if (ret) 413 if (ret)
414 return ret; 414 return ret;
415 } 415 }
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index abd9c7352677..3af69f2a3866 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -867,15 +867,21 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
867 * either observe the new interrupt before or after doing this check, 867 * either observe the new interrupt before or after doing this check,
868 * and introducing additional synchronization mechanism doesn't change 868 * and introducing additional synchronization mechanism doesn't change
869 * this. 869 * this.
870 *
871 * Note that we still need to go through the whole thing if anything
872 * can be directly injected (GICv4).
870 */ 873 */
871 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) 874 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
875 !vgic_supports_direct_msis(vcpu->kvm))
872 return; 876 return;
873 877
874 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 878 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
875 879
876 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); 880 if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
877 vgic_flush_lr_state(vcpu); 881 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
878 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); 882 vgic_flush_lr_state(vcpu);
883 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
884 }
879 885
880 if (can_access_vgic_from_kernel()) 886 if (can_access_vgic_from_kernel())
881 vgic_restore_state(vcpu); 887 vgic_restore_state(vcpu);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 4325250afd72..001aeda4c154 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -214,9 +214,9 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
214 214
215 if (flags & EPOLLHUP) { 215 if (flags & EPOLLHUP) {
216 /* The eventfd is closing, detach from KVM */ 216 /* The eventfd is closing, detach from KVM */
217 unsigned long flags; 217 unsigned long iflags;
218 218
219 spin_lock_irqsave(&kvm->irqfds.lock, flags); 219 spin_lock_irqsave(&kvm->irqfds.lock, iflags);
220 220
221 /* 221 /*
222 * We must check if someone deactivated the irqfd before 222 * We must check if someone deactivated the irqfd before
@@ -230,7 +230,7 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
230 if (irqfd_is_active(irqfd)) 230 if (irqfd_is_active(irqfd))
231 irqfd_deactivate(irqfd); 231 irqfd_deactivate(irqfd);
232 232
233 spin_unlock_irqrestore(&kvm->irqfds.lock, flags); 233 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
234 } 234 }
235 235
236 return 0; 236 return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f25aa98a94df..55fe8e20d8fd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2905,6 +2905,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
2905{ 2905{
2906 struct kvm_device *dev = filp->private_data; 2906 struct kvm_device *dev = filp->private_data;
2907 2907
2908 if (dev->kvm->mm != current->mm)
2909 return -EIO;
2910
2908 switch (ioctl) { 2911 switch (ioctl) {
2909 case KVM_SET_DEVICE_ATTR: 2912 case KVM_SET_DEVICE_ATTR:
2910 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg); 2913 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);