summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.clang-format17
-rw-r--r--.mailmap5
-rw-r--r--Documentation/PCI/index.rst2
-rw-r--r--Documentation/PCI/pciebus-howto.rst (renamed from Documentation/PCI/picebus-howto.rst)0
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt14
-rw-r--r--Documentation/admin-guide/sysctl/net.rst29
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt30
-rw-r--r--Documentation/devicetree/bindings/iommu/mediatek,iommu.txt30
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt12
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt4
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ksz.txt1
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt4
-rw-r--r--Documentation/networking/tls-offload.rst18
-rw-r--r--Documentation/networking/tuntap.txt4
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst279
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--MAINTAINERS59
-rw-r--r--Makefile2
-rw-r--r--arch/arc/boot/dts/Makefile3
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/linkage.h8
-rw-r--r--arch/arc/include/asm/mach_desc.h3
-rw-r--r--arch/arc/kernel/mcip.c60
-rw-r--r--arch/arc/kernel/unwind.c5
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arc/plat-hsdk/platform.c87
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi16
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi32
-rw-r--r--arch/arm/boot/dts/am4372.dtsi32
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am574x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi3
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revc.dts7
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi6
-rw-r--r--arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi50
-rw-r--r--arch/arm/boot/dts/vf610-bk4.dts4
-rw-r--r--arch/arm/lib/backtrace.S2
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S3
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c4
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c43
-rw-r--r--arch/arm/mach-omap2/omap4-common.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c3
-rw-r--r--arch/arm/mach-rpc/riscpc.c1
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/init.c8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts1
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995-draak.dts6
-rw-r--r--arch/ia64/include/asm/iommu.h2
-rw-r--r--arch/ia64/kernel/pci-dma.c2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h1
-rw-r--r--arch/nds32/kernel/signal.c2
-rw-r--r--arch/parisc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/kernel/process.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c6
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c6
-rw-r--r--arch/powerpc/mm/nohash/tlb.c1
-rw-r--r--arch/riscv/include/asm/fixmap.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h12
-rw-r--r--arch/s390/net/bpf_jit_comp.c12
-rw-r--r--arch/um/include/shared/timer-internal.h14
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/time.c16
-rw-r--r--arch/x86/Makefile1
-rw-r--r--arch/x86/boot/compressed/pgtable_64.c13
-rw-r--r--arch/x86/events/amd/ibs.c13
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/events/intel/core.c6
-rw-r--r--arch/x86/hyperv/mmu.c8
-rw-r--r--arch/x86/include/asm/bootparam_utils.h3
-rw-r--r--arch/x86/include/asm/ftrace.h1
-rw-r--r--arch/x86/include/asm/intel-family.h15
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/perf_event.h12
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/kernel/apic/apic.c68
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c24
-rw-r--r--arch/x86/kernel/apic/io_apic.c8
-rw-r--r--arch/x86/kernel/cpu/amd.c66
-rw-r--r--arch/x86/kernel/pci-dma.c20
-rw-r--r--arch/x86/kernel/uprobes.c17
-rw-r--r--arch/x86/kvm/hyperv.c5
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c33
-rw-r--r--arch/x86/kvm/svm.c9
-rw-r--r--arch/x86/kvm/vmx/vmx.c1
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--arch/x86/mm/pageattr.c26
-rw-r--r--arch/x86/net/bpf_jit_comp.c9
-rw-r--r--arch/x86/power/cpu.c86
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/bluetooth/btqca.c29
-rw-r--r--drivers/bluetooth/btqca.h7
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/bluetooth/hci_qca.c9
-rw-r--r--drivers/bus/hisi_lpc.c47
-rw-r--r--drivers/bus/ti-sysc.c24
-rw-r--r--drivers/clk/clk.c49
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.h2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c162
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.c8
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/sh/rcar-dmac.c28
-rw-r--r--drivers/dma/sprd-dma.c10
-rw-r--r--drivers/dma/ti/dma-crossbar.c4
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/fpga/altera-ps-spi.c11
-rw-r--r--drivers/fsi/fsi-scom.c8
-rw-r--r--drivers/gpio/gpio-pca953x.c15
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c30
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c55
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h7
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c130
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c8
-rw-r--r--drivers/hid/hid-cp2112.c8
-rw-r--r--drivers/hid/hid-logitech-hidpp.c22
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/wacom_wac.c7
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/core.c1
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c15
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/busses/i2c-piix4.c12
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/counters.c10
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/restrack.c15
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c76
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c17
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/sw/siw/siw.h8
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c113
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c80
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c40
-rw-r--r--drivers/input/serio/hyperv-keyboard.c35
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/amd_iommu.c135
-rw-r--r--drivers/iommu/amd_iommu.h14
-rw-r--r--drivers/iommu/amd_iommu_init.c5
-rw-r--r--drivers/iommu/amd_iommu_quirks.c92
-rw-r--r--drivers/iommu/amd_iommu_types.h9
-rw-r--r--drivers/iommu/dma-iommu.c7
-rw-r--r--drivers/iommu/dmar.c77
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c411
-rw-r--r--drivers/iommu/intel-svm.c36
-rw-r--r--drivers/iommu/intel-trace.c14
-rw-r--r--drivers/iommu/intel_irq_remapping.c6
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c88
-rw-r--r--drivers/iommu/iommu.c193
-rw-r--r--drivers/iommu/iova.c4
-rw-r--r--drivers/iommu/ipmmu-vmsa.c78
-rw-r--r--drivers/iommu/msm_iommu.c1
-rw-r--r--drivers/iommu/mtk_iommu.c168
-rw-r--r--drivers/iommu/mtk_iommu.h21
-rw-r--r--drivers/iommu/mtk_iommu_v1.c6
-rw-r--r--drivers/iommu/omap-iommu.c322
-rw-r--r--drivers/iommu/omap-iommu.h9
-rw-r--r--drivers/iommu/qcom_iommu.c11
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-dust.c11
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c68
-rw-r--r--drivers/md/dm-zoned-reclaim.c47
-rw-r--r--drivers/md/dm-zoned-target.c68
-rw-r--r--drivers/md/dm-zoned.h11
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/memory/mtk-smi.c268
-rw-r--r--drivers/mfd/rk808.c6
-rw-r--r--drivers/misc/lkdtm/bugs.c4
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/vmw_balloon.c10
-rw-r--r--drivers/misc/vmw_vmci/vmci_doorbell.c6
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/host/sdhci-cadence.c1
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c3
-rw-r--r--drivers/mmc/host/sdhci-sprd.c30
-rw-r--r--drivers/mmc/host/sdhci-tegra.c14
-rw-r--r--drivers/mtd/hyperbus/Kconfig1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c4
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/marvell/sky2.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c138
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c12
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c9
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c8
-rw-r--r--drivers/net/netdevsim/dev.c63
-rw-r--r--drivers/net/netdevsim/fib.c102
-rw-r--r--drivers/net/netdevsim/netdev.c9
-rw-r--r--drivers/net/netdevsim/netdevsim.h10
-rw-r--r--drivers/net/phy/at803x.c32
-rw-r--r--drivers/net/phy/phy-c45.c40
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/kalmia.c6
-rw-r--r--drivers/net/usb/lan78xx.c8
-rw-r--r--drivers/net/usb/r8152.c10
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c13
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/nvdimm/pfn_devs.c5
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c1
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/soc/ixp4xx/Kconfig4
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c6
-rw-r--r--drivers/soc/ti/pm33xx.c19
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/usb/chipidea/udc.c32
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usbtmc.c3
-rw-r--r--drivers/usb/core/hcd-pci.c30
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c15
-rw-r--r--drivers/usb/host/xhci-rcar.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c10
-rw-r--r--drivers/usb/storage/realtek_cr.c15
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c2
-rw-r--r--drivers/vhost/test.c13
-rw-r--r--drivers/vhost/vhost.c520
-rw-r--r--drivers/vhost/vhost.h41
-rw-r--r--drivers/video/fbdev/acornfb.c1
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--drivers/xen/swiotlb-xen.c8
-rw-r--r--fs/afs/cell.c4
-rw-r--r--fs/afs/dir.c3
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/ceph/inode.c7
-rw-r--r--fs/ceph/locks.c3
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/ceph/xattr.c19
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/cifssmb.c197
-rw-r--r--fs/cifs/connect.c31
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/misc.c22
-rw-r--r--fs/cifs/sess.c26
-rw-r--r--fs/configfs/configfs_internal.h15
-rw-r--r--fs/configfs/dir.c137
-rw-r--r--fs/configfs/file.c280
-rw-r--r--fs/io_uring.c66
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c27
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c28
-rw-r--r--fs/nfs/inode.c35
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/nfs4file.c12
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/pnfs_nfs.c15
-rw-r--r--fs/nfs/proc.c7
-rw-r--r--fs/nfs/read.c35
-rw-r--r--fs/nfs/write.c38
-rw-r--r--fs/nfsd/nfscache.c2
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/read_write.c49
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/userfaultfd.c25
-rw-r--r--fs/xfs/xfs_ioctl32.c56
-rw-r--r--fs/xfs/xfs_iops.c1
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c63
-rw-r--r--include/dt-bindings/memory/mt8183-larb-port.h130
-rw-r--r--include/linux/amd-iommu.h12
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/compiler.h8
-rw-r--r--include/linux/dma-contiguous.h5
-rw-r--r--include/linux/gpio.h24
-rw-r--r--include/linux/input/elan-i2c-ids.h2
-rw-r--r--include/linux/intel-iommu.h5
-rw-r--r--include/linux/io-pgtable.h9
-rw-r--r--include/linux/iommu.h16
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/logic_pio.h1
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h5
-rw-r--r--include/linux/omap-iommu.h15
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/platform_data/iommu-omap.h4
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/swiotlb.h8
-rw-r--r--include/linux/timekeeper_internal.h5
-rw-r--r--include/linux/trace_events.h1
-rw-r--r--include/math-emu/op-common.h5
-rw-r--r--include/net/act_api.h4
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nf_tables_offload.h2
-rw-r--r--include/net/netlink.h5
-rw-r--r--include/net/nexthop.h6
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/psample.h1
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sock.h10
-rw-r--r--include/rdma/restrack.h3
-rw-r--r--include/soc/arc/mcip.h11
-rw-r--r--include/soc/mediatek/smi.h5
-rw-r--r--include/trace/events/intel_iommu.h106
-rw-r--r--include/trace/events/rxrpc.h65
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/jffs2.h5
-rw-r--r--include/uapi/linux/netfilter/xt_nfacct.h5
-rw-r--r--include/uapi/linux/rds.h2
-rw-r--r--kernel/bpf/core.c8
-rw-r--r--kernel/bpf/syscall.c30
-rw-r--r--kernel/bpf/verifier.c9
-rw-r--r--kernel/dma/contiguous.c8
-rw-r--r--kernel/dma/direct.c12
-rw-r--r--kernel/dma/swiotlb.c34
-rw-r--r--kernel/irq/irqdesc.c15
-rw-r--r--kernel/kallsyms.c6
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/sched/core.c83
-rw-r--r--kernel/sched/fair.c5
-rw-r--r--kernel/sched/psi.c8
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/time/vsyscall.c22
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c26
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_probe.c3
-rw-r--r--lib/kfifo.c3
-rw-r--r--lib/logic_pio.c73
-rw-r--r--mm/balloon_compaction.c3
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kasan/common.c10
-rw-r--r--mm/memcontrol.c87
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/vmscan.c5
-rw-r--r--mm/z3fold.c90
-rw-r--r--mm/zsmalloc.c80
-rw-r--r--net/batman-adv/bat_iv_ogm.c20
-rw-r--r--net/batman-adv/bat_v_ogm.c18
-rw-r--r--net/batman-adv/multicast.c8
-rw-r--r--net/batman-adv/netlink.c2
-rw-r--r--net/bluetooth/hci_core.c1
-rw-r--r--net/bluetooth/hci_debugfs.c31
-rw-r--r--net/bluetooth/hidp/core.c9
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bridge/netfilter/ebtables.c8
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c2
-rw-r--r--net/ceph/crypto.c6
-rw-r--r--net/ceph/osd_client.c9
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/sock.c50
-rw-r--r--net/core/sock_diag.c3
-rw-r--r--net/core/stream.c16
-rw-r--r--net/dsa/switch.c3
-rw-r--r--net/dsa/tag_8021q.c2
-rw-r--r--net/ieee802154/6lowpan/reassembly.c2
-rw-r--r--net/ieee802154/socket.c2
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_fragment.c39
-rw-r--r--net/ipv4/ip_fragment.c8
-rw-r--r--net/ipv4/route.c17
-rw-r--r--net/ipv4/tcp.c33
-rw-r--r--net/ipv4/tcp_bpf.c6
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/mcast.c5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/mac80211/cfg.c9
-rw-r--r--net/mac80211/rx.c6
-rw-r--r--net/mpls/mpls_iptunnel.c8
-rw-r--r--net/ncsi/ncsi-cmd.c13
-rw-r--r--net/ncsi/ncsi-rsp.c9
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c5
-rw-r--r--net/netfilter/nf_flow_table_core.c43
-rw-r--r--net/netfilter/nf_flow_table_ip.c44
-rw-r--r--net/netfilter/nf_tables_api.c19
-rw-r--r--net/netfilter/nf_tables_offload.c17
-rw-r--r--net/netfilter/nft_flow_offload.c15
-rw-r--r--net/netfilter/xt_nfacct.c36
-rw-r--r--net/netfilter/xt_physdev.c6
-rw-r--r--net/openvswitch/conntrack.c20
-rw-r--r--net/openvswitch/flow.c160
-rw-r--r--net/openvswitch/flow.h1
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/psample/psample.c2
-rw-r--r--net/rds/ib.c16
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/rdma_transport.c10
-rw-r--r--net/rds/recv.c5
-rw-r--r--net/rxrpc/af_rxrpc.c9
-rw-r--r--net/rxrpc/ar-internal.h25
-rw-r--r--net/rxrpc/call_event.c23
-rw-r--r--net/rxrpc/call_object.c33
-rw-r--r--net/rxrpc/conn_client.c44
-rw-r--r--net/rxrpc/conn_event.c6
-rw-r--r--net/rxrpc/conn_object.c2
-rw-r--r--net/rxrpc/input.c359
-rw-r--r--net/rxrpc/local_event.c4
-rw-r--r--net/rxrpc/local_object.c104
-rw-r--r--net/rxrpc/output.c9
-rw-r--r--net/rxrpc/peer_event.c10
-rw-r--r--net/rxrpc/protocol.h9
-rw-r--r--net/rxrpc/recvmsg.c53
-rw-r--r--net/rxrpc/rxkad.c32
-rw-r--r--net/rxrpc/sendmsg.c13
-rw-r--r--net/rxrpc/skbuff.c40
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_ct.c2
-rw-r--r--net/sched/act_ctinfo.c2
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ife.c2
-rw-r--r--net/sched/act_ipt.c11
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_mpls.c2
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_sample.c8
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbedit.c14
-rw-r--r--net/sched/act_skbmod.c2
-rw-r--r--net/sched/act_tunnel_key.c2
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/sch_cbs.c19
-rw-r--r--net/sched/sch_generic.c19
-rw-r--r--net/sched/sch_taprio.c34
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/stream.c1
-rw-r--r--net/smc/smc_tx.c6
-rw-r--r--net/sunrpc/clnt.c47
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/tipc/addr.c1
-rw-r--r--net/tipc/link.c92
-rw-r--r--net/tipc/msg.h8
-rw-r--r--net/tls/tls_device.c9
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/util.c23
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c6
-rw-r--r--sound/core/seq/seq_clientmgr.c3
-rw-r--r--sound/core/seq/seq_fifo.c17
-rw-r--r--sound/core/seq/seq_fifo.h2
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c2
-rw-r--r--sound/pci/hda/hda_auto_parser.c4
-rw-r--r--sound/pci/hda/hda_generic.c3
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/patch_ca0132.c1
-rw-r--r--sound/pci/hda/patch_conexant.c17
-rw-r--r--sound/pci/hda/patch_realtek.c17
-rw-r--r--sound/usb/line6/pcm.c18
-rw-r--r--sound/usb/mixer.c36
-rw-r--r--sound/usb/mixer_quirks.c8
-rw-r--r--sound/usb/pcm.c1
-rw-r--r--tools/bpf/bpftool/common.c8
-rw-r--r--tools/bpf/bpftool/prog.c4
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/include/uapi/linux/bpf.h11
-rw-r--r--tools/lib/bpf/libbpf.c33
-rw-r--r--tools/power/x86/turbostat/Makefile3
-rw-r--r--tools/power/x86/turbostat/turbostat.c101
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile3
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.82
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c28
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/test_btf_dump.c7
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c6
-rw-r--r--tools/testing/selftests/bpf/test_sock.c7
-rw-r--r--tools/testing/selftests/bpf/verifier/loops1.c28
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c16
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c12
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c32
-rwxr-xr-xtools/testing/selftests/net/tcp_fastopen_backup_key.sh2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh48
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py22
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json47
-rw-r--r--virt/kvm/arm/mmio.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
-rw-r--r--virt/kvm/arm/vgic/vgic.c7
676 files changed, 8542 insertions, 4340 deletions
diff --git a/.clang-format b/.clang-format
index 2ffd69afc1a8..196ca317bd1f 100644
--- a/.clang-format
+++ b/.clang-format
@@ -107,10 +107,13 @@ ForEachMacros:
107 - 'css_for_each_descendant_post' 107 - 'css_for_each_descendant_post'
108 - 'css_for_each_descendant_pre' 108 - 'css_for_each_descendant_pre'
109 - 'device_for_each_child_node' 109 - 'device_for_each_child_node'
110 - 'dma_fence_chain_for_each'
110 - 'drm_atomic_crtc_for_each_plane' 111 - 'drm_atomic_crtc_for_each_plane'
111 - 'drm_atomic_crtc_state_for_each_plane' 112 - 'drm_atomic_crtc_state_for_each_plane'
112 - 'drm_atomic_crtc_state_for_each_plane_state' 113 - 'drm_atomic_crtc_state_for_each_plane_state'
113 - 'drm_atomic_for_each_plane_damage' 114 - 'drm_atomic_for_each_plane_damage'
115 - 'drm_client_for_each_connector_iter'
116 - 'drm_client_for_each_modeset'
114 - 'drm_connector_for_each_possible_encoder' 117 - 'drm_connector_for_each_possible_encoder'
115 - 'drm_for_each_connector_iter' 118 - 'drm_for_each_connector_iter'
116 - 'drm_for_each_crtc' 119 - 'drm_for_each_crtc'
@@ -126,6 +129,7 @@ ForEachMacros:
126 - 'drm_mm_for_each_node_in_range' 129 - 'drm_mm_for_each_node_in_range'
127 - 'drm_mm_for_each_node_safe' 130 - 'drm_mm_for_each_node_safe'
128 - 'flow_action_for_each' 131 - 'flow_action_for_each'
132 - 'for_each_active_dev_scope'
129 - 'for_each_active_drhd_unit' 133 - 'for_each_active_drhd_unit'
130 - 'for_each_active_iommu' 134 - 'for_each_active_iommu'
131 - 'for_each_available_child_of_node' 135 - 'for_each_available_child_of_node'
@@ -153,6 +157,8 @@ ForEachMacros:
153 - 'for_each_cpu_not' 157 - 'for_each_cpu_not'
154 - 'for_each_cpu_wrap' 158 - 'for_each_cpu_wrap'
155 - 'for_each_dev_addr' 159 - 'for_each_dev_addr'
160 - 'for_each_dev_scope'
161 - 'for_each_displayid_db'
156 - 'for_each_dma_cap_mask' 162 - 'for_each_dma_cap_mask'
157 - 'for_each_dpcm_be' 163 - 'for_each_dpcm_be'
158 - 'for_each_dpcm_be_rollback' 164 - 'for_each_dpcm_be_rollback'
@@ -169,6 +175,8 @@ ForEachMacros:
169 - 'for_each_evictable_lru' 175 - 'for_each_evictable_lru'
170 - 'for_each_fib6_node_rt_rcu' 176 - 'for_each_fib6_node_rt_rcu'
171 - 'for_each_fib6_walker_rt' 177 - 'for_each_fib6_walker_rt'
178 - 'for_each_free_mem_pfn_range_in_zone'
179 - 'for_each_free_mem_pfn_range_in_zone_from'
172 - 'for_each_free_mem_range' 180 - 'for_each_free_mem_range'
173 - 'for_each_free_mem_range_reverse' 181 - 'for_each_free_mem_range_reverse'
174 - 'for_each_func_rsrc' 182 - 'for_each_func_rsrc'
@@ -178,6 +186,7 @@ ForEachMacros:
178 - 'for_each_ip_tunnel_rcu' 186 - 'for_each_ip_tunnel_rcu'
179 - 'for_each_irq_nr' 187 - 'for_each_irq_nr'
180 - 'for_each_link_codecs' 188 - 'for_each_link_codecs'
189 - 'for_each_link_platforms'
181 - 'for_each_lru' 190 - 'for_each_lru'
182 - 'for_each_matching_node' 191 - 'for_each_matching_node'
183 - 'for_each_matching_node_and_match' 192 - 'for_each_matching_node_and_match'
@@ -302,7 +311,10 @@ ForEachMacros:
302 - 'ide_port_for_each_present_dev' 311 - 'ide_port_for_each_present_dev'
303 - 'idr_for_each_entry' 312 - 'idr_for_each_entry'
304 - 'idr_for_each_entry_continue' 313 - 'idr_for_each_entry_continue'
314 - 'idr_for_each_entry_continue_ul'
305 - 'idr_for_each_entry_ul' 315 - 'idr_for_each_entry_ul'
316 - 'in_dev_for_each_ifa_rcu'
317 - 'in_dev_for_each_ifa_rtnl'
306 - 'inet_bind_bucket_for_each' 318 - 'inet_bind_bucket_for_each'
307 - 'inet_lhash2_for_each_icsk_rcu' 319 - 'inet_lhash2_for_each_icsk_rcu'
308 - 'key_for_each' 320 - 'key_for_each'
@@ -343,8 +355,6 @@ ForEachMacros:
343 - 'media_device_for_each_intf' 355 - 'media_device_for_each_intf'
344 - 'media_device_for_each_link' 356 - 'media_device_for_each_link'
345 - 'media_device_for_each_pad' 357 - 'media_device_for_each_pad'
346 - 'mp_bvec_for_each_page'
347 - 'mp_bvec_for_each_segment'
348 - 'nanddev_io_for_each_page' 358 - 'nanddev_io_for_each_page'
349 - 'netdev_for_each_lower_dev' 359 - 'netdev_for_each_lower_dev'
350 - 'netdev_for_each_lower_private' 360 - 'netdev_for_each_lower_private'
@@ -381,18 +391,19 @@ ForEachMacros:
381 - 'radix_tree_for_each_slot' 391 - 'radix_tree_for_each_slot'
382 - 'radix_tree_for_each_tagged' 392 - 'radix_tree_for_each_tagged'
383 - 'rbtree_postorder_for_each_entry_safe' 393 - 'rbtree_postorder_for_each_entry_safe'
394 - 'rdma_for_each_block'
384 - 'rdma_for_each_port' 395 - 'rdma_for_each_port'
385 - 'resource_list_for_each_entry' 396 - 'resource_list_for_each_entry'
386 - 'resource_list_for_each_entry_safe' 397 - 'resource_list_for_each_entry_safe'
387 - 'rhl_for_each_entry_rcu' 398 - 'rhl_for_each_entry_rcu'
388 - 'rhl_for_each_rcu' 399 - 'rhl_for_each_rcu'
389 - 'rht_for_each' 400 - 'rht_for_each'
390 - 'rht_for_each_from'
391 - 'rht_for_each_entry' 401 - 'rht_for_each_entry'
392 - 'rht_for_each_entry_from' 402 - 'rht_for_each_entry_from'
393 - 'rht_for_each_entry_rcu' 403 - 'rht_for_each_entry_rcu'
394 - 'rht_for_each_entry_rcu_from' 404 - 'rht_for_each_entry_rcu_from'
395 - 'rht_for_each_entry_safe' 405 - 'rht_for_each_entry_safe'
406 - 'rht_for_each_from'
396 - 'rht_for_each_rcu' 407 - 'rht_for_each_rcu'
397 - 'rht_for_each_rcu_from' 408 - 'rht_for_each_rcu_from'
398 - '__rq_for_each_bio' 409 - '__rq_for_each_bio'
diff --git a/.mailmap b/.mailmap
index acba1a6163f1..afaad605284a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -64,6 +64,9 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com> 64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> 65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
67Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
68Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
69Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
67Domen Puncer <domen@coderock.org> 70Domen Puncer <domen@coderock.org>
68Douglas Gilbert <dougg@torque.net> 71Douglas Gilbert <dougg@torque.net>
69Ed L. Cashin <ecashin@coraid.com> 72Ed L. Cashin <ecashin@coraid.com>
@@ -160,6 +163,8 @@ Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.co
160Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> 163Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
161Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> 164Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
162Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com> 165Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
166Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
167Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
163Mayuresh Janorkar <mayur@ti.com> 168Mayuresh Janorkar <mayur@ti.com>
164Michael Buesch <m@bues.ch> 169Michael Buesch <m@bues.ch>
165Michel Dänzer <michel@tungstengraphics.com> 170Michel Dänzer <michel@tungstengraphics.com>
diff --git a/Documentation/PCI/index.rst b/Documentation/PCI/index.rst
index f4c6121868c3..6768305e4c26 100644
--- a/Documentation/PCI/index.rst
+++ b/Documentation/PCI/index.rst
@@ -9,7 +9,7 @@ Linux PCI Bus Subsystem
9 :numbered: 9 :numbered:
10 10
11 pci 11 pci
12 picebus-howto 12 pciebus-howto
13 pci-iov-howto 13 pci-iov-howto
14 msi-howto 14 msi-howto
15 acpi-info 15 acpi-info
diff --git a/Documentation/PCI/picebus-howto.rst b/Documentation/PCI/pciebus-howto.rst
index f882ff62c51f..f882ff62c51f 100644
--- a/Documentation/PCI/picebus-howto.rst
+++ b/Documentation/PCI/pciebus-howto.rst
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 47d981a86e2f..d31ffa110461 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1732,6 +1732,11 @@
1732 Note that using this option lowers the security 1732 Note that using this option lowers the security
1733 provided by tboot because it makes the system 1733 provided by tboot because it makes the system
1734 vulnerable to DMA attacks. 1734 vulnerable to DMA attacks.
1735 nobounce [Default off]
1736 Disable bounce buffer for unstrusted devices such as
1737 the Thunderbolt devices. This will treat the untrusted
1738 devices as the trusted ones, hence might expose security
1739 risks of DMA attacks.
1735 1740
1736 intel_idle.max_cstate= [KNL,HW,ACPI,X86] 1741 intel_idle.max_cstate= [KNL,HW,ACPI,X86]
1737 0 disables intel_idle and fall back on acpi_idle. 1742 0 disables intel_idle and fall back on acpi_idle.
@@ -1811,7 +1816,7 @@
1811 synchronously. 1816 synchronously.
1812 1817
1813 iommu.passthrough= 1818 iommu.passthrough=
1814 [ARM64] Configure DMA to bypass the IOMMU by default. 1819 [ARM64, X86] Configure DMA to bypass the IOMMU by default.
1815 Format: { "0" | "1" } 1820 Format: { "0" | "1" }
1816 0 - Use IOMMU translation for DMA. 1821 0 - Use IOMMU translation for DMA.
1817 1 - Bypass the IOMMU for DMA. 1822 1 - Bypass the IOMMU for DMA.
@@ -4090,6 +4095,13 @@
4090 Run specified binary instead of /init from the ramdisk, 4095 Run specified binary instead of /init from the ramdisk,
4091 used for early userspace startup. See initrd. 4096 used for early userspace startup. See initrd.
4092 4097
4098 rdrand= [X86]
4099 force - Override the decision by the kernel to hide the
4100 advertisement of RDRAND support (this affects
4101 certain AMD processors because of buggy BIOS
4102 support, specifically around the suspend/resume
4103 path).
4104
4093 rdt= [HW,X86,RDT] 4105 rdt= [HW,X86,RDT]
4094 Turn on/off individual RDT features. List is: 4106 Turn on/off individual RDT features. List is:
4095 cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp, 4107 cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index a7d44e71019d..287b98708a40 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -39,7 +39,6 @@ Table : Subdirectories in /proc/sys/net
39 802 E802 protocol ax25 AX25 39 802 E802 protocol ax25 AX25
40 ethernet Ethernet protocol rose X.25 PLP layer 40 ethernet Ethernet protocol rose X.25 PLP layer
41 ipv4 IP version 4 x25 X.25 protocol 41 ipv4 IP version 4 x25 X.25 protocol
42 ipx IPX token-ring IBM token ring
43 bridge Bridging decnet DEC net 42 bridge Bridging decnet DEC net
44 ipv6 IP version 6 tipc TIPC 43 ipv6 IP version 6 tipc TIPC
45 ========= =================== = ========== ================== 44 ========= =================== = ========== ==================
@@ -401,33 +400,7 @@ interface.
401(network) that the route leads to, the router (may be directly connected), the 400(network) that the route leads to, the router (may be directly connected), the
402route flags, and the device the route is using. 401route flags, and the device the route is using.
403 402
404 4035. TIPC
4055. IPX
406------
407
408The IPX protocol has no tunable values in proc/sys/net.
409
410The IPX protocol does, however, provide proc/net/ipx. This lists each IPX
411socket giving the local and remote addresses in Novell format (that is
412network:node:port). In accordance with the strange Novell tradition,
413everything but the port is in hex. Not_Connected is displayed for sockets that
414are not tied to a specific remote address. The Tx and Rx queue sizes indicate
415the number of bytes pending for transmission and reception. The state
416indicates the state the socket is in and the uid is the owning uid of the
417socket.
418
419The /proc/net/ipx_interface file lists all IPX interfaces. For each interface
420it gives the network number, the node number, and indicates if the network is
421the primary network. It also indicates which device it is bound to (or
422Internal for internal networks) and the Frame Type if appropriate. Linux
423supports 802.3, 802.2, 802.2 SNAP and DIX (Blue Book) ethernet framing for
424IPX.
425
426The /proc/net/ipx_route table holds a list of IPX routes. For each route it
427gives the destination network, the router node (or Directly) and the network
428address of the router (or Connected) for internal networks.
429
4306. TIPC
431------- 404-------
432 405
433tipc_rmem 406tipc_rmem
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 09fc02b99845..a5c1db95b3ec 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -1,20 +1,30 @@
1* ARC-HS Interrupt Distribution Unit 1* ARC-HS Interrupt Distribution Unit
2 2
3 This optional 2nd level interrupt controller can be used in SMP configurations for 3 This optional 2nd level interrupt controller can be used in SMP configurations
4 dynamic IRQ routing, load balancing of common/external IRQs towards core intc. 4 for dynamic IRQ routing, load balancing of common/external IRQs towards core
5 intc.
5 6
6Properties: 7Properties:
7 8
8- compatible: "snps,archs-idu-intc" 9- compatible: "snps,archs-idu-intc"
9- interrupt-controller: This is an interrupt controller. 10- interrupt-controller: This is an interrupt controller.
10- #interrupt-cells: Must be <1>. 11- #interrupt-cells: Must be <1> or <2>.
11 12
12 Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N 13 Value of the first cell specifies the "common" IRQ from peripheral to IDU.
13 of the particular interrupt line of IDU corresponds to the line N+24 of the 14 Number N of the particular interrupt line of IDU corresponds to the line N+24
14 core interrupt controller. 15 of the core interrupt controller.
15 16
16 intc accessed via the special ARC AUX register interface, hence "reg" property 17 The (optional) second cell specifies any of the following flags:
17 is not specified. 18 - bits[3:0] trigger type and level flags
19 1 = low-to-high edge triggered
20 2 = NOT SUPPORTED (high-to-low edge triggered)
21 4 = active high level-sensitive <<< DEFAULT
22 8 = NOT SUPPORTED (active low level-sensitive)
23 When no second cell is specified, the interrupt is assumed to be level
24 sensitive.
25
26 The interrupt controller is accessed via the special ARC AUX register
27 interface, hence "reg" property is not specified.
18 28
19Example: 29Example:
20 core_intc: core-interrupt-controller { 30 core_intc: core-interrupt-controller {
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
index 6922db598def..ce59a505f5a4 100644
--- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
@@ -11,10 +11,23 @@ ARM Short-Descriptor translation table format for address translation.
11 | 11 |
12 m4u (Multimedia Memory Management Unit) 12 m4u (Multimedia Memory Management Unit)
13 | 13 |
14 +--------+
15 | |
16 gals0-rx gals1-rx (Global Async Local Sync rx)
17 | |
18 | |
19 gals0-tx gals1-tx (Global Async Local Sync tx)
20 | | Some SoCs may have GALS.
21 +--------+
22 |
14 SMI Common(Smart Multimedia Interface Common) 23 SMI Common(Smart Multimedia Interface Common)
15 | 24 |
16 +----------------+------- 25 +----------------+-------
17 | | 26 | |
27 | gals-rx There may be GALS in some larbs.
28 | |
29 | |
30 | gals-tx
18 | | 31 | |
19 SMI larb0 SMI larb1 ... SoCs have several SMI local arbiter(larb). 32 SMI larb0 SMI larb1 ... SoCs have several SMI local arbiter(larb).
20 (display) (vdec) 33 (display) (vdec)
@@ -36,6 +49,10 @@ each local arbiter.
36like display, video decode, and camera. And there are different ports 49like display, video decode, and camera. And there are different ports
37in each larb. Take a example, There are many ports like MC, PP, VLD in the 50in each larb. Take a example, There are many ports like MC, PP, VLD in the
38video decode local arbiter, all these ports are according to the video HW. 51video decode local arbiter, all these ports are according to the video HW.
52 In some SoCs, there may be a GALS(Global Async Local Sync) module between
53smi-common and m4u, and additional GALS module between smi-larb and
54smi-common. GALS can been seen as a "asynchronous fifo" which could help
55synchronize for the modules in different clock frequency.
39 56
40Required properties: 57Required properties:
41- compatible : must be one of the following string: 58- compatible : must be one of the following string:
@@ -44,18 +61,25 @@ Required properties:
44 "mediatek,mt7623-m4u", "mediatek,mt2701-m4u" for mt7623 which uses 61 "mediatek,mt7623-m4u", "mediatek,mt2701-m4u" for mt7623 which uses
45 generation one m4u HW. 62 generation one m4u HW.
46 "mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW. 63 "mediatek,mt8173-m4u" for mt8173 which uses generation two m4u HW.
64 "mediatek,mt8183-m4u" for mt8183 which uses generation two m4u HW.
47- reg : m4u register base and size. 65- reg : m4u register base and size.
48- interrupts : the interrupt of m4u. 66- interrupts : the interrupt of m4u.
49- clocks : must contain one entry for each clock-names. 67- clocks : must contain one entry for each clock-names.
50- clock-names : must be "bclk", It is the block clock of m4u. 68- clock-names : Only 1 optional clock:
69 - "bclk": the block clock of m4u.
70 Here is the list which require this "bclk":
71 - mt2701, mt2712, mt7623 and mt8173.
72 Note that m4u use the EMI clock which always has been enabled before kernel
73 if there is no this "bclk".
51- mediatek,larbs : List of phandle to the local arbiters in the current Socs. 74- mediatek,larbs : List of phandle to the local arbiters in the current Socs.
52 Refer to bindings/memory-controllers/mediatek,smi-larb.txt. It must sort 75 Refer to bindings/memory-controllers/mediatek,smi-larb.txt. It must sort
53 according to the local arbiter index, like larb0, larb1, larb2... 76 according to the local arbiter index, like larb0, larb1, larb2...
54- iommu-cells : must be 1. This is the mtk_m4u_id according to the HW. 77- iommu-cells : must be 1. This is the mtk_m4u_id according to the HW.
55 Specifies the mtk_m4u_id as defined in 78 Specifies the mtk_m4u_id as defined in
56 dt-binding/memory/mt2701-larb-port.h for mt2701, mt7623 79 dt-binding/memory/mt2701-larb-port.h for mt2701, mt7623
57 dt-binding/memory/mt2712-larb-port.h for mt2712, and 80 dt-binding/memory/mt2712-larb-port.h for mt2712,
58 dt-binding/memory/mt8173-larb-port.h for mt8173. 81 dt-binding/memory/mt8173-larb-port.h for mt8173, and
82 dt-binding/memory/mt8183-larb-port.h for mt8183.
59 83
60Example: 84Example:
61 iommu: iommu@10205000 { 85 iommu: iommu@10205000 {
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
index e937ddd871a6..b478ade4da65 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-common.txt
@@ -2,9 +2,10 @@ SMI (Smart Multimedia Interface) Common
2 2
3The hardware block diagram please check bindings/iommu/mediatek,iommu.txt 3The hardware block diagram please check bindings/iommu/mediatek,iommu.txt
4 4
5Mediatek SMI have two generations of HW architecture, mt2712 and mt8173 use 5Mediatek SMI have two generations of HW architecture, here is the list
6the second generation of SMI HW while mt2701 uses the first generation HW of 6which generation the SoCs use:
7SMI. 7generation 1: mt2701 and mt7623.
8generation 2: mt2712, mt8173 and mt8183.
8 9
9There's slight differences between the two SMI, for generation 2, the 10There's slight differences between the two SMI, for generation 2, the
10register which control the iommu port is at each larb's register base. But 11register which control the iommu port is at each larb's register base. But
@@ -19,6 +20,7 @@ Required properties:
19 "mediatek,mt2712-smi-common" 20 "mediatek,mt2712-smi-common"
20 "mediatek,mt7623-smi-common", "mediatek,mt2701-smi-common" 21 "mediatek,mt7623-smi-common", "mediatek,mt2701-smi-common"
21 "mediatek,mt8173-smi-common" 22 "mediatek,mt8173-smi-common"
23 "mediatek,mt8183-smi-common"
22- reg : the register and size of the SMI block. 24- reg : the register and size of the SMI block.
23- power-domains : a phandle to the power domain of this local arbiter. 25- power-domains : a phandle to the power domain of this local arbiter.
24- clocks : Must contain an entry for each entry in clock-names. 26- clocks : Must contain an entry for each entry in clock-names.
@@ -30,6 +32,10 @@ Required properties:
30 They may be the same if both source clocks are the same. 32 They may be the same if both source clocks are the same.
31 - "async" : asynchronous clock, it help transform the smi clock into the emi 33 - "async" : asynchronous clock, it help transform the smi clock into the emi
32 clock domain, this clock is only needed by generation 1 smi HW. 34 clock domain, this clock is only needed by generation 1 smi HW.
35 and these 2 option clocks for generation 2 smi HW:
36 - "gals0": the path0 clock of GALS(Global Async Local Sync).
37 - "gals1": the path1 clock of GALS(Global Async Local Sync).
38 Here is the list which has this GALS: mt8183.
33 39
34Example: 40Example:
35 smi_common: smi@14022000 { 41 smi_common: smi@14022000 {
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
index 94eddcae77ab..4b369b3e1a69 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
@@ -8,6 +8,7 @@ Required properties:
8 "mediatek,mt2712-smi-larb" 8 "mediatek,mt2712-smi-larb"
9 "mediatek,mt7623-smi-larb", "mediatek,mt2701-smi-larb" 9 "mediatek,mt7623-smi-larb", "mediatek,mt2701-smi-larb"
10 "mediatek,mt8173-smi-larb" 10 "mediatek,mt8173-smi-larb"
11 "mediatek,mt8183-smi-larb"
11- reg : the register and size of this local arbiter. 12- reg : the register and size of this local arbiter.
12- mediatek,smi : a phandle to the smi_common node. 13- mediatek,smi : a phandle to the smi_common node.
13- power-domains : a phandle to the power domain of this local arbiter. 14- power-domains : a phandle to the power domain of this local arbiter.
@@ -16,6 +17,9 @@ Required properties:
16 - "apb" : Advanced Peripheral Bus clock, It's the clock for setting 17 - "apb" : Advanced Peripheral Bus clock, It's the clock for setting
17 the register. 18 the register.
18 - "smi" : It's the clock for transfer data and command. 19 - "smi" : It's the clock for transfer data and command.
20 and this optional clock name:
21 - "gals": the clock for GALS(Global Async Local Sync).
22 Here is the list which has this GALS: mt8183.
19 23
20Required property for mt2701, mt2712 and mt7623: 24Required property for mt2701, mt2712 and mt7623:
21- mediatek,larb-id :the hardware id of this larb. 25- mediatek,larb-id :the hardware id of this larb.
diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt
index 4ac21cef370e..113e7ac79aad 100644
--- a/Documentation/devicetree/bindings/net/dsa/ksz.txt
+++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt
@@ -12,6 +12,7 @@ Required properties:
12 - "microchip,ksz8565" 12 - "microchip,ksz8565"
13 - "microchip,ksz9893" 13 - "microchip,ksz9893"
14 - "microchip,ksz9563" 14 - "microchip,ksz9563"
15 - "microchip,ksz8563"
15 16
16Optional properties: 17Optional properties:
17 18
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 63c73fafe26d..0b61a90f1592 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -15,10 +15,10 @@ Required properties:
15 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. 15 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
16 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. 16 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
17 Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. 17 Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
18 Use "sifive,fu540-macb" for SiFive FU540-C000 SoC. 18 Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
19 Or the generic form: "cdns,emac". 19 Or the generic form: "cdns,emac".
20- reg: Address and length of the register set for the device 20- reg: Address and length of the register set for the device
21 For "sifive,fu540-macb", second range is required to specify the 21 For "sifive,fu540-c000-gem", second range is required to specify the
22 address and length of the registers for GEMGXL Management block. 22 address and length of the registers for GEMGXL Management block.
23- interrupts: Should contain macb interrupt 23- interrupts: Should contain macb interrupt
24- phy-mode: See ethernet.txt file in the same directory. 24- phy-mode: See ethernet.txt file in the same directory.
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index b70b70dc4524..0dd3f748239f 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -506,21 +506,3 @@ Drivers should ignore the changes to TLS the device feature flags.
506These flags will be acted upon accordingly by the core ``ktls`` code. 506These flags will be acted upon accordingly by the core ``ktls`` code.
507TLS device feature flags only control adding of new TLS connection 507TLS device feature flags only control adding of new TLS connection
508offloads, old connections will remain active after flags are cleared. 508offloads, old connections will remain active after flags are cleared.
509
510Known bugs
511==========
512
513skb_orphan() leaks clear text
514-----------------------------
515
516Currently drivers depend on the :c:member:`sk` member of
517:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
518encryption. Any operation which removes or does not preserve the socket
519association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
520will cause the driver to miss the packets and lead to clear text leaks.
521
522Redirects leak clear text
523-------------------------
524
525In the RX direction, if segment has already been decrypted by the device
526and it gets redirected or mirrored - clear text will be transmitted out.
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index 949d5dcdd9a3..0104830d5075 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -204,8 +204,8 @@ Ethernet device, which instead of receiving packets from a physical
204media, receives them from user space program and instead of sending 204media, receives them from user space program and instead of sending
205packets via physical media sends them to the user space program. 205packets via physical media sends them to the user space program.
206 206
207Let's say that you configured IPX on the tap0, then whenever 207Let's say that you configured IPv6 on the tap0, then whenever
208the kernel sends an IPX packet to tap0, it is passed to the application 208the kernel sends an IPv6 packet to tap0, it is passed to the application
209(VTun for example). The application encrypts, compresses and sends it to 209(VTun for example). The application encrypts, compresses and sends it to
210the other side over TCP or UDP. The application on the other side decompresses 210the other side over TCP or UDP. The application on the other side decompresses
211and decrypts the data received and writes the packet to the TAP device, 211and decrypts the data received and writes the packet to the TAP device,
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
new file mode 100644
index 000000000000..402636356fbe
--- /dev/null
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -0,0 +1,279 @@
1Embargoed hardware issues
2=========================
3
4Scope
5-----
6
7Hardware issues which result in security problems are a different category
8of security bugs than pure software bugs which only affect the Linux
9kernel.
10
11Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
12differently because they usually affect all Operating Systems ("OS") and
13therefore need coordination across different OS vendors, distributions,
14hardware vendors and other parties. For some of the issues, software
15mitigations can depend on microcode or firmware updates, which need further
16coordination.
17
18.. _Contact:
19
20Contact
21-------
22
23The Linux kernel hardware security team is separate from the regular Linux
24kernel security team.
25
26The team only handles the coordination of embargoed hardware security
27issues. Reports of pure software security bugs in the Linux kernel are not
28handled by this team and the reporter will be guided to contact the regular
29Linux kernel security team (:ref:`Documentation/admin-guide/
30<securitybugs>`) instead.
31
32The team can be contacted by email at <hardware-security@kernel.org>. This
33is a private list of security officers who will help you to coordinate an
34issue according to our documented process.
35
36The list is encrypted and email to the list can be sent by either PGP or
37S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
38certificate. The list's PGP key and S/MIME certificate are available from
39https://www.kernel.org/....
40
41While hardware security issues are often handled by the affected hardware
42vendor, we welcome contact from researchers or individuals who have
43identified a potential hardware flaw.
44
45Hardware security officers
46^^^^^^^^^^^^^^^^^^^^^^^^^^
47
48The current team of hardware security officers:
49
50 - Linus Torvalds (Linux Foundation Fellow)
51 - Greg Kroah-Hartman (Linux Foundation Fellow)
52 - Thomas Gleixner (Linux Foundation Fellow)
53
54Operation of mailing-lists
55^^^^^^^^^^^^^^^^^^^^^^^^^^
56
57The encrypted mailing-lists which are used in our process are hosted on
58Linux Foundation's IT infrastructure. By providing this service Linux
59Foundation's director of IT Infrastructure security technically has the
60ability to access the embargoed information, but is obliged to
61confidentiality by his employment contract. Linux Foundation's director of
62IT Infrastructure security is also responsible for the kernel.org
63infrastructure.
64
65The Linux Foundation's current director of IT Infrastructure security is
66Konstantin Ryabitsev.
67
68
69Non-disclosure agreements
70-------------------------
71
72The Linux kernel hardware security team is not a formal body and therefore
73unable to enter into any non-disclosure agreements. The kernel community
74is aware of the sensitive nature of such issues and offers a Memorandum of
75Understanding instead.
76
77
78Memorandum of Understanding
79---------------------------
80
81The Linux kernel community has a deep understanding of the requirement to
82keep hardware security issues under embargo for coordination between
83different OS vendors, distributors, hardware vendors and other parties.
84
85The Linux kernel community has successfully handled hardware security
86issues in the past and has the necessary mechanisms in place to allow
87community compliant development under embargo restrictions.
88
89The Linux kernel community has a dedicated hardware security team for
90initial contact, which oversees the process of handling such issues under
91embargo rules.
92
93The hardware security team identifies the developers (domain experts) who
94will form the initial response team for a particular issue. The initial
95response team can bring in further developers (domain experts) to address
96the issue in the best technical way.
97
98All involved developers pledge to adhere to the embargo rules and to keep
99the received information confidential. Violation of the pledge will lead to
100immediate exclusion from the current issue and removal from all related
101mailing-lists. In addition, the hardware security team will also exclude
102the offender from future issues. The impact of this consequence is a highly
103effective deterrent in our community. In case a violation happens the
104hardware security team will inform the involved parties immediately. If you
105or anyone becomes aware of a potential violation, please report it
106immediately to the Hardware security officers.
107
108
109Process
110^^^^^^^
111
112Due to the globally distributed nature of Linux kernel development,
113face-to-face meetings are almost impossible to address hardware security
114issues. Phone conferences are hard to coordinate due to time zones and
115other factors and should be only used when absolutely necessary. Encrypted
116email has been proven to be the most effective and secure communication
117method for these types of issues.
118
119Start of Disclosure
120"""""""""""""""""""
121
122Disclosure starts by contacting the Linux kernel hardware security team by
123email. This initial contact should contain a description of the problem and
124a list of any known affected hardware. If your organization builds or
125distributes the affected hardware, we encourage you to also consider what
126other hardware could be affected.
127
128The hardware security team will provide an incident-specific encrypted
129mailing-list which will be used for initial discussion with the reporter,
130further disclosure and coordination.
131
132The hardware security team will provide the disclosing party a list of
133developers (domain experts) who should be informed initially about the
134issue after confirming with the developers that they will adhere to this
135Memorandum of Understanding and the documented process. These developers
136form the initial response team and will be responsible for handling the
137issue after initial contact. The hardware security team is supporting the
138response team, but is not necessarily involved in the mitigation
139development process.
140
141While individual developers might be covered by a non-disclosure agreement
142via their employer, they cannot enter individual non-disclosure agreements
143in their role as Linux kernel developers. They will, however, agree to
144adhere to this documented process and the Memorandum of Understanding.
145
146
147Disclosure
148""""""""""
149
150The disclosing party provides detailed information to the initial response
151team via the specific encrypted mailing-list.
152
153From our experience the technical documentation of these issues is usually
154a sufficient starting point and further technical clarification is best
155done via email.
156
157Mitigation development
158""""""""""""""""""""""
159
160The initial response team sets up an encrypted mailing-list or repurposes
161an existing one if appropriate. The disclosing party should provide a list
162of contacts for all other parties who have already been, or should be,
163informed about the issue. The response team contacts these parties so they
164can name experts who should be subscribed to the mailing-list.
165
166Using a mailing-list is close to the normal Linux development process and
167has been successfully used in developing mitigations for various hardware
168security issues in the past.
169
170The mailing-list operates in the same way as normal Linux development.
171Patches are posted, discussed and reviewed and if agreed on applied to a
172non-public git repository which is only accessible to the participating
173developers via a secure connection. The repository contains the main
174development branch against the mainline kernel and backport branches for
175stable kernel versions as necessary.
176
177The initial response team will identify further experts from the Linux
178kernel developer community as needed and inform the disclosing party about
179their participation. Bringing in experts can happen at any time of the
180development process and often needs to be handled in a timely manner.
181
182Coordinated release
183"""""""""""""""""""
184
185The involved parties will negotiate the date and time where the embargo
186ends. At that point the prepared mitigations are integrated into the
187relevant kernel trees and published.
188
189While we understand that hardware security issues need coordinated embargo
190time, the embargo time should be constrained to the minimum time which is
191required for all involved parties to develop, test and prepare the
192mitigations. Extending embargo time artificially to meet conference talk
193dates or other non-technical reasons is creating more work and burden for
194the involved developers and response teams as the patches need to be kept
195up to date in order to follow the ongoing upstream kernel development,
196which might create conflicting changes.
197
198CVE assignment
199""""""""""""""
200
201Neither the hardware security team nor the initial response team assign
202CVEs, nor are CVEs required for the development process. If CVEs are
203provided by the disclosing party they can be used for documentation
204purposes.
205
206Process ambassadors
207-------------------
208
209For assistance with this process we have established ambassadors in various
210organizations, who can answer questions about or provide guidance on the
211reporting process and further handling. Ambassadors are not involved in the
212disclosure of a particular issue, unless requested by a response team or by
213an involved disclosed party. The current ambassadors list:
214
215 ============= ========================================================
216 ARM
217 AMD
218 IBM
219 Intel
220 Qualcomm Trilok Soni <tsoni@codeaurora.org>
221
222 Microsoft Sasha Levin <sashal@kernel.org>
223 VMware
224 Xen Andrew Cooper <andrew.cooper3@citrix.com>
225
226 Canonical Tyler Hicks <tyhicks@canonical.com>
227 Debian Ben Hutchings <ben@decadent.org.uk>
228 Oracle Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
229 Red Hat Josh Poimboeuf <jpoimboe@redhat.com>
230 SUSE Jiri Kosina <jkosina@suse.cz>
231
232 Amazon
233 Google Kees Cook <keescook@chromium.org>
234 ============= ========================================================
235
236If you want your organization to be added to the ambassadors list, please
237contact the hardware security team. The nominated ambassador has to
238understand and support our process fully and is ideally well connected in
239the Linux kernel community.
240
241Encrypted mailing-lists
242-----------------------
243
244We use encrypted mailing-lists for communication. The operating principle
245of these lists is that email sent to the list is encrypted either with the
246list's PGP key or with the list's S/MIME certificate. The mailing-list
247software decrypts the email and re-encrypts it individually for each
248subscriber with the subscriber's PGP key or S/MIME certificate. Details
249about the mailing-list software and the setup which is used to ensure the
250security of the lists and protection of the data can be found here:
251https://www.kernel.org/....
252
253List keys
254^^^^^^^^^
255
256For initial contact see :ref:`Contact`. For incident specific mailing-lists
257the key and S/MIME certificate are conveyed to the subscribers by email
258sent from the specific list.
259
260Subscription to incident specific lists
261^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
262
263Subscription is handled by the response teams. Disclosed parties who want
264to participate in the communication send a list of potential subscribers to
265the response team so the response team can validate subscription requests.
266
267Each subscriber needs to send a subscription request to the response team
268by email. The email must be signed with the subscriber's PGP key or S/MIME
269certificate. If a PGP key is used, it must be available from a public key
270server and is ideally connected to the Linux kernel's PGP web of trust. See
271also: https://www.kernel.org/signature.html.
272
273The response team verifies that the subscriber request is valid and adds
274the subscriber to the list. After subscription the subscriber will receive
275email from the mailing-list which is signed either with the list's PGP key
276or the list's S/MIME certificate. The subscriber's email client can extract
277the PGP key or the S/MIME certificate from the signature so the subscriber
278can send encrypted email to the list.
279
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 878ebfda7eef..e2c9ffc682c5 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -45,6 +45,7 @@ Other guides to the community that are of interest to most developers are:
45 submit-checklist 45 submit-checklist
46 kernel-docs 46 kernel-docs
47 deprecated 47 deprecated
48 embargoed-hardware-issues
48 49
49These are some overall technical guides that have been put here for now for 50These are some overall technical guides that have been put here for now for
50lack of a better place. 51lack of a better place.
diff --git a/MAINTAINERS b/MAINTAINERS
index cf04f72ca79f..ae8536cba798 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -183,7 +183,7 @@ M: Realtek linux nic maintainers <nic_swsd@realtek.com>
183M: Heiner Kallweit <hkallweit1@gmail.com> 183M: Heiner Kallweit <hkallweit1@gmail.com>
184L: netdev@vger.kernel.org 184L: netdev@vger.kernel.org
185S: Maintained 185S: Maintained
186F: drivers/net/ethernet/realtek/r8169.c 186F: drivers/net/ethernet/realtek/r8169*
187 187
1888250/16?50 (AND CLONE UARTS) SERIAL DRIVER 1888250/16?50 (AND CLONE UARTS) SERIAL DRIVER
189M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 189M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -683,7 +683,7 @@ S: Maintained
683F: drivers/crypto/sunxi-ss/ 683F: drivers/crypto/sunxi-ss/
684 684
685ALLWINNER VPU DRIVER 685ALLWINNER VPU DRIVER
686M: Maxime Ripard <maxime.ripard@bootlin.com> 686M: Maxime Ripard <mripard@kernel.org>
687M: Paul Kocialkowski <paul.kocialkowski@bootlin.com> 687M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
688L: linux-media@vger.kernel.org 688L: linux-media@vger.kernel.org
689S: Maintained 689S: Maintained
@@ -1407,7 +1407,7 @@ S: Maintained
1407F: drivers/clk/sunxi/ 1407F: drivers/clk/sunxi/
1408 1408
1409ARM/Allwinner sunXi SoC support 1409ARM/Allwinner sunXi SoC support
1410M: Maxime Ripard <maxime.ripard@bootlin.com> 1410M: Maxime Ripard <mripard@kernel.org>
1411M: Chen-Yu Tsai <wens@csie.org> 1411M: Chen-Yu Tsai <wens@csie.org>
1412L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1412L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1413S: Maintained 1413S: Maintained
@@ -3576,7 +3576,7 @@ F: Documentation/filesystems/caching/cachefiles.txt
3576F: fs/cachefiles/ 3576F: fs/cachefiles/
3577 3577
3578CADENCE MIPI-CSI2 BRIDGES 3578CADENCE MIPI-CSI2 BRIDGES
3579M: Maxime Ripard <maxime.ripard@bootlin.com> 3579M: Maxime Ripard <mripard@kernel.org>
3580L: linux-media@vger.kernel.org 3580L: linux-media@vger.kernel.org
3581S: Maintained 3581S: Maintained
3582F: Documentation/devicetree/bindings/media/cdns,*.txt 3582F: Documentation/devicetree/bindings/media/cdns,*.txt
@@ -5294,7 +5294,7 @@ F: include/linux/vga*
5294 5294
5295DRM DRIVERS AND MISC GPU PATCHES 5295DRM DRIVERS AND MISC GPU PATCHES
5296M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> 5296M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
5297M: Maxime Ripard <maxime.ripard@bootlin.com> 5297M: Maxime Ripard <mripard@kernel.org>
5298M: Sean Paul <sean@poorly.run> 5298M: Sean Paul <sean@poorly.run>
5299W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html 5299W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
5300S: Maintained 5300S: Maintained
@@ -5307,7 +5307,7 @@ F: include/uapi/drm/drm*
5307F: include/linux/vga* 5307F: include/linux/vga*
5308 5308
5309DRM DRIVERS FOR ALLWINNER A10 5309DRM DRIVERS FOR ALLWINNER A10
5310M: Maxime Ripard <maxime.ripard@bootlin.com> 5310M: Maxime Ripard <mripard@kernel.org>
5311L: dri-devel@lists.freedesktop.org 5311L: dri-devel@lists.freedesktop.org
5312S: Supported 5312S: Supported
5313F: drivers/gpu/drm/sun4i/ 5313F: drivers/gpu/drm/sun4i/
@@ -6064,7 +6064,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
6064M: Heiner Kallweit <hkallweit1@gmail.com> 6064M: Heiner Kallweit <hkallweit1@gmail.com>
6065L: netdev@vger.kernel.org 6065L: netdev@vger.kernel.org
6066S: Maintained 6066S: Maintained
6067F: Documentation/ABI/testing/sysfs-bus-mdio 6067F: Documentation/ABI/testing/sysfs-class-net-phydev
6068F: Documentation/devicetree/bindings/net/ethernet-phy.yaml 6068F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
6069F: Documentation/devicetree/bindings/net/mdio* 6069F: Documentation/devicetree/bindings/net/mdio*
6070F: Documentation/networking/phy.rst 6070F: Documentation/networking/phy.rst
@@ -7512,7 +7512,7 @@ I2C MV64XXX MARVELL AND ALLWINNER DRIVER
7512M: Gregory CLEMENT <gregory.clement@bootlin.com> 7512M: Gregory CLEMENT <gregory.clement@bootlin.com>
7513L: linux-i2c@vger.kernel.org 7513L: linux-i2c@vger.kernel.org
7514S: Maintained 7514S: Maintained
7515F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt 7515F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
7516F: drivers/i2c/busses/i2c-mv64xxx.c 7516F: drivers/i2c/busses/i2c-mv64xxx.c
7517 7517
7518I2C OVER PARALLEL PORT 7518I2C OVER PARALLEL PORT
@@ -8453,11 +8453,6 @@ S: Maintained
8453F: fs/io_uring.c 8453F: fs/io_uring.c
8454F: include/uapi/linux/io_uring.h 8454F: include/uapi/linux/io_uring.h
8455 8455
8456IP MASQUERADING
8457M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
8458S: Maintained
8459F: net/ipv4/netfilter/ipt_MASQUERADE.c
8460
8461IPMI SUBSYSTEM 8456IPMI SUBSYSTEM
8462M: Corey Minyard <minyard@acm.org> 8457M: Corey Minyard <minyard@acm.org>
8463L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) 8458L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -8831,14 +8826,6 @@ F: virt/kvm/*
8831F: tools/kvm/ 8826F: tools/kvm/
8832F: tools/testing/selftests/kvm/ 8827F: tools/testing/selftests/kvm/
8833 8828
8834KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
8835M: Joerg Roedel <joro@8bytes.org>
8836L: kvm@vger.kernel.org
8837W: http://www.linux-kvm.org/
8838S: Maintained
8839F: arch/x86/include/asm/svm.h
8840F: arch/x86/kvm/svm.c
8841
8842KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) 8829KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
8843M: Marc Zyngier <maz@kernel.org> 8830M: Marc Zyngier <maz@kernel.org>
8844R: James Morse <james.morse@arm.com> 8831R: James Morse <james.morse@arm.com>
@@ -8881,7 +8868,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com>
8881M: Janosch Frank <frankja@linux.ibm.com> 8868M: Janosch Frank <frankja@linux.ibm.com>
8882R: David Hildenbrand <david@redhat.com> 8869R: David Hildenbrand <david@redhat.com>
8883R: Cornelia Huck <cohuck@redhat.com> 8870R: Cornelia Huck <cohuck@redhat.com>
8884L: linux-s390@vger.kernel.org 8871L: kvm@vger.kernel.org
8885W: http://www.ibm.com/developerworks/linux/linux390/ 8872W: http://www.ibm.com/developerworks/linux/linux390/
8886T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git 8873T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
8887S: Supported 8874S: Supported
@@ -8896,6 +8883,11 @@ F: tools/testing/selftests/kvm/*/s390x/
8896KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) 8883KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
8897M: Paolo Bonzini <pbonzini@redhat.com> 8884M: Paolo Bonzini <pbonzini@redhat.com>
8898M: Radim Krčmář <rkrcmar@redhat.com> 8885M: Radim Krčmář <rkrcmar@redhat.com>
8886R: Sean Christopherson <sean.j.christopherson@intel.com>
8887R: Vitaly Kuznetsov <vkuznets@redhat.com>
8888R: Wanpeng Li <wanpengli@tencent.com>
8889R: Jim Mattson <jmattson@google.com>
8890R: Joerg Roedel <joro@8bytes.org>
8899L: kvm@vger.kernel.org 8891L: kvm@vger.kernel.org
8900W: http://www.linux-kvm.org 8892W: http://www.linux-kvm.org
8901T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git 8893T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -8903,8 +8895,12 @@ S: Supported
8903F: arch/x86/kvm/ 8895F: arch/x86/kvm/
8904F: arch/x86/kvm/*/ 8896F: arch/x86/kvm/*/
8905F: arch/x86/include/uapi/asm/kvm* 8897F: arch/x86/include/uapi/asm/kvm*
8898F: arch/x86/include/uapi/asm/vmx.h
8899F: arch/x86/include/uapi/asm/svm.h
8906F: arch/x86/include/asm/kvm* 8900F: arch/x86/include/asm/kvm*
8907F: arch/x86/include/asm/pvclock-abi.h 8901F: arch/x86/include/asm/pvclock-abi.h
8902F: arch/x86/include/asm/svm.h
8903F: arch/x86/include/asm/vmx.h
8908F: arch/x86/kernel/kvm.c 8904F: arch/x86/kernel/kvm.c
8909F: arch/x86/kernel/kvmclock.c 8905F: arch/x86/kernel/kvmclock.c
8910 8906
@@ -9232,6 +9228,18 @@ F: include/linux/nd.h
9232F: include/linux/libnvdimm.h 9228F: include/linux/libnvdimm.h
9233F: include/uapi/linux/ndctl.h 9229F: include/uapi/linux/ndctl.h
9234 9230
9231LICENSES and SPDX stuff
9232M: Thomas Gleixner <tglx@linutronix.de>
9233M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
9234L: linux-spdx@vger.kernel.org
9235S: Maintained
9236T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git
9237F: COPYING
9238F: Documentation/process/license-rules.rst
9239F: LICENSES/
9240F: scripts/spdxcheck-test.sh
9241F: scripts/spdxcheck.py
9242
9235LIGHTNVM PLATFORM SUPPORT 9243LIGHTNVM PLATFORM SUPPORT
9236M: Matias Bjorling <mb@lightnvm.io> 9244M: Matias Bjorling <mb@lightnvm.io>
9237W: http://github/OpenChannelSSD 9245W: http://github/OpenChannelSSD
@@ -11084,7 +11092,7 @@ NET_FAILOVER MODULE
11084M: Sridhar Samudrala <sridhar.samudrala@intel.com> 11092M: Sridhar Samudrala <sridhar.samudrala@intel.com>
11085L: netdev@vger.kernel.org 11093L: netdev@vger.kernel.org
11086S: Supported 11094S: Supported
11087F: driver/net/net_failover.c 11095F: drivers/net/net_failover.c
11088F: include/net/net_failover.h 11096F: include/net/net_failover.h
11089F: Documentation/networking/net_failover.rst 11097F: Documentation/networking/net_failover.rst
11090 11098
@@ -14476,6 +14484,7 @@ F: drivers/net/phy/phylink.c
14476F: drivers/net/phy/sfp* 14484F: drivers/net/phy/sfp*
14477F: include/linux/phylink.h 14485F: include/linux/phylink.h
14478F: include/linux/sfp.h 14486F: include/linux/sfp.h
14487K: phylink
14479 14488
14480SGI GRU DRIVER 14489SGI GRU DRIVER
14481M: Dimitri Sivanich <sivanich@sgi.com> 14490M: Dimitri Sivanich <sivanich@sgi.com>
@@ -14881,9 +14890,9 @@ F: include/linux/arm_sdei.h
14881F: include/uapi/linux/arm_sdei.h 14890F: include/uapi/linux/arm_sdei.h
14882 14891
14883SOFTWARE RAID (Multiple Disks) SUPPORT 14892SOFTWARE RAID (Multiple Disks) SUPPORT
14884M: Shaohua Li <shli@kernel.org> 14893M: Song Liu <song@kernel.org>
14885L: linux-raid@vger.kernel.org 14894L: linux-raid@vger.kernel.org
14886T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git 14895T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
14887S: Supported 14896S: Supported
14888F: drivers/md/Makefile 14897F: drivers/md/Makefile
14889F: drivers/md/Kconfig 14898F: drivers/md/Kconfig
diff --git a/Makefile b/Makefile
index 9fa18613566f..9b08f6383a52 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 3 3PATCHLEVEL = 3
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc5 5EXTRAVERSION = -rc8
6NAME = Bobtail Squid 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index a83c4f5e928b..8483a86c743d 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -12,3 +12,6 @@ dtb-y := $(builtindtb-y).dtb
12# for CONFIG_OF_ALL_DTBS test 12# for CONFIG_OF_ALL_DTBS test
13dtstree := $(srctree)/$(src) 13dtstree := $(srctree)/$(src)
14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) 14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
15
16# board-specific dtc flags
17DTC_FLAGS_hsdk += --pad 20
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index f5ae394ebe06..41b16f21beec 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -256,7 +256,7 @@
256 256
257.macro FAKE_RET_FROM_EXCPN 257.macro FAKE_RET_FROM_EXCPN
258 lr r9, [status32] 258 lr r9, [status32]
259 bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) 259 bic r9, r9, STATUS_AE_MASK
260 or r9, r9, STATUS_IE_MASK 260 or r9, r9, STATUS_IE_MASK
261 kflag r9 261 kflag r9
262.endm 262.endm
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index a0eeb9f8f0a9..d9ee43c6b7db 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -62,15 +62,15 @@
62#else /* !__ASSEMBLY__ */ 62#else /* !__ASSEMBLY__ */
63 63
64#ifdef CONFIG_ARC_HAS_ICCM 64#ifdef CONFIG_ARC_HAS_ICCM
65#define __arcfp_code __attribute__((__section__(".text.arcfp"))) 65#define __arcfp_code __section(.text.arcfp)
66#else 66#else
67#define __arcfp_code __attribute__((__section__(".text"))) 67#define __arcfp_code __section(.text)
68#endif 68#endif
69 69
70#ifdef CONFIG_ARC_HAS_DCCM 70#ifdef CONFIG_ARC_HAS_DCCM
71#define __arcfp_data __attribute__((__section__(".data.arcfp"))) 71#define __arcfp_data __section(.data.arcfp)
72#else 72#else
73#define __arcfp_data __attribute__((__section__(".data"))) 73#define __arcfp_data __section(.data)
74#endif 74#endif
75 75
76#endif /* __ASSEMBLY__ */ 76#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 8ac0e2ac3e70..73746ed5b834 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -53,8 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
53 */ 53 */
54#define MACHINE_START(_type, _name) \ 54#define MACHINE_START(_type, _name) \
55static const struct machine_desc __mach_desc_##_type \ 55static const struct machine_desc __mach_desc_##_type \
56__used \ 56__used __section(.arch.info.init) = { \
57__attribute__((__section__(".arch.info.init"))) = { \
58 .name = _name, 57 .name = _name,
59 58
60#define MACHINE_END \ 59#define MACHINE_END \
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 18b493dfb3a8..abf9398cc333 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -202,8 +202,8 @@ static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); 202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
203} 203}
204 204
205static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, 205static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
206 unsigned int distr) 206 bool set_distr, unsigned int distr)
207{ 207{
208 union { 208 union {
209 unsigned int word; 209 unsigned int word;
@@ -212,8 +212,11 @@ static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
212 }; 212 };
213 } data; 213 } data;
214 214
215 data.distr = distr; 215 data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
216 data.lvl = lvl; 216 if (set_distr)
217 data.distr = distr;
218 if (set_lvl)
219 data.lvl = lvl;
217 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); 220 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
218} 221}
219 222
@@ -240,6 +243,25 @@ static void idu_irq_unmask(struct irq_data *data)
240 raw_spin_unlock_irqrestore(&mcip_lock, flags); 243 raw_spin_unlock_irqrestore(&mcip_lock, flags);
241} 244}
242 245
246static void idu_irq_ack(struct irq_data *data)
247{
248 unsigned long flags;
249
250 raw_spin_lock_irqsave(&mcip_lock, flags);
251 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
252 raw_spin_unlock_irqrestore(&mcip_lock, flags);
253}
254
255static void idu_irq_mask_ack(struct irq_data *data)
256{
257 unsigned long flags;
258
259 raw_spin_lock_irqsave(&mcip_lock, flags);
260 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
261 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
262 raw_spin_unlock_irqrestore(&mcip_lock, flags);
263}
264
243static int 265static int
244idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 266idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
245 bool force) 267 bool force)
@@ -263,13 +285,36 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
263 else 285 else
264 distribution_mode = IDU_M_DISTRI_RR; 286 distribution_mode = IDU_M_DISTRI_RR;
265 287
266 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); 288 idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
267 289
268 raw_spin_unlock_irqrestore(&mcip_lock, flags); 290 raw_spin_unlock_irqrestore(&mcip_lock, flags);
269 291
270 return IRQ_SET_MASK_OK; 292 return IRQ_SET_MASK_OK;
271} 293}
272 294
295static int idu_irq_set_type(struct irq_data *data, u32 type)
296{
297 unsigned long flags;
298
299 /*
300 * ARCv2 IDU HW does not support inverse polarity, so these are the
301 * only interrupt types supported.
302 */
303 if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
304 return -EINVAL;
305
306 raw_spin_lock_irqsave(&mcip_lock, flags);
307
308 idu_set_mode(data->hwirq, true,
309 type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
310 IDU_M_TRIG_LEVEL,
311 false, 0);
312
313 raw_spin_unlock_irqrestore(&mcip_lock, flags);
314
315 return 0;
316}
317
273static void idu_irq_enable(struct irq_data *data) 318static void idu_irq_enable(struct irq_data *data)
274{ 319{
275 /* 320 /*
@@ -289,7 +334,10 @@ static struct irq_chip idu_irq_chip = {
289 .name = "MCIP IDU Intc", 334 .name = "MCIP IDU Intc",
290 .irq_mask = idu_irq_mask, 335 .irq_mask = idu_irq_mask,
291 .irq_unmask = idu_irq_unmask, 336 .irq_unmask = idu_irq_unmask,
337 .irq_ack = idu_irq_ack,
338 .irq_mask_ack = idu_irq_mask_ack,
292 .irq_enable = idu_irq_enable, 339 .irq_enable = idu_irq_enable,
340 .irq_set_type = idu_irq_set_type,
293#ifdef CONFIG_SMP 341#ifdef CONFIG_SMP
294 .irq_set_affinity = idu_irq_set_affinity, 342 .irq_set_affinity = idu_irq_set_affinity,
295#endif 343#endif
@@ -317,7 +365,7 @@ static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t
317} 365}
318 366
319static const struct irq_domain_ops idu_irq_ops = { 367static const struct irq_domain_ops idu_irq_ops = {
320 .xlate = irq_domain_xlate_onecell, 368 .xlate = irq_domain_xlate_onetwocell,
321 .map = idu_irq_map, 369 .map = idu_irq_map,
322}; 370};
323 371
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index c2663fce7f6c..dc05a63516f5 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -572,6 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
572#else 572#else
573 BUILD_BUG_ON(sizeof(u32) != sizeof(value)); 573 BUILD_BUG_ON(sizeof(u32) != sizeof(value));
574#endif 574#endif
575 /* Fall through */
575 case DW_EH_PE_native: 576 case DW_EH_PE_native:
576 if (end < (const void *)(ptr.pul + 1)) 577 if (end < (const void *)(ptr.pul + 1))
577 return 0; 578 return 0;
@@ -826,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
826 case DW_CFA_def_cfa: 827 case DW_CFA_def_cfa:
827 state->cfa.reg = get_uleb128(&ptr.p8, end); 828 state->cfa.reg = get_uleb128(&ptr.p8, end);
828 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg); 829 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
829 /*nobreak*/ 830 /* fall through */
830 case DW_CFA_def_cfa_offset: 831 case DW_CFA_def_cfa_offset:
831 state->cfa.offs = get_uleb128(&ptr.p8, end); 832 state->cfa.offs = get_uleb128(&ptr.p8, end);
832 unw_debug("cfa_def_cfa_offset: 0x%lx ", 833 unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -834,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
834 break; 835 break;
835 case DW_CFA_def_cfa_sf: 836 case DW_CFA_def_cfa_sf:
836 state->cfa.reg = get_uleb128(&ptr.p8, end); 837 state->cfa.reg = get_uleb128(&ptr.p8, end);
837 /*nobreak */ 838 /* fall through */
838 case DW_CFA_def_cfa_offset_sf: 839 case DW_CFA_def_cfa_offset_sf:
839 state->cfa.offs = get_sleb128(&ptr.p8, end) 840 state->cfa.offs = get_sleb128(&ptr.p8, end)
840 * state->dataAlign; 841 * state->dataAlign;
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 62c210e7ee4c..70a3fbe79fba 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -101,7 +101,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
101 if (is_isa_arcv2() && ioc_enable && coherent) 101 if (is_isa_arcv2() && ioc_enable && coherent)
102 dev->dma_coherent = true; 102 dev->dma_coherent = true;
103 103
104 dev_info(dev, "use %sncoherent DMA ops\n", 104 dev_info(dev, "use %scoherent DMA ops\n",
105 dev->dma_coherent ? "" : "non"); 105 dev->dma_coherent ? "" : "non");
106} 106}
107 107
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 7dd2dd335cf6..0b961a2a10b8 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -6,11 +6,15 @@
6 */ 6 */
7 7
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/of_fdt.h>
10#include <linux/libfdt.h>
9#include <linux/smp.h> 11#include <linux/smp.h>
10#include <asm/arcregs.h> 12#include <asm/arcregs.h>
11#include <asm/io.h> 13#include <asm/io.h>
12#include <asm/mach_desc.h> 14#include <asm/mach_desc.h>
13 15
16int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
17
14#define ARC_CCM_UNUSED_ADDR 0x60000000 18#define ARC_CCM_UNUSED_ADDR 0x60000000
15 19
16static void __init hsdk_init_per_cpu(unsigned int cpu) 20static void __init hsdk_init_per_cpu(unsigned int cpu)
@@ -97,6 +101,42 @@ static void __init hsdk_enable_gpio_intc_wire(void)
97 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); 101 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
98} 102}
99 103
104static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
105{
106 void *fdt = initial_boot_params;
107 const void *prop;
108 int node, ret;
109 bool dt_coh_set;
110
111 node = fdt_path_offset(fdt, path);
112 if (node < 0)
113 goto tweak_fail;
114
115 prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
116 if (!prop && ret != -FDT_ERR_NOTFOUND)
117 goto tweak_fail;
118
119 dt_coh_set = ret != -FDT_ERR_NOTFOUND;
120 ret = 0;
121
122 /* need to remove "dma-coherent" property */
123 if (dt_coh_set && !coherent)
124 ret = fdt_delprop(fdt, node, "dma-coherent");
125
126 /* need to set "dma-coherent" property */
127 if (!dt_coh_set && coherent)
128 ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
129
130 if (ret < 0)
131 goto tweak_fail;
132
133 return 0;
134
135tweak_fail:
136 pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
137 return -EFAULT;
138}
139
100enum hsdk_axi_masters { 140enum hsdk_axi_masters {
101 M_HS_CORE = 0, 141 M_HS_CORE = 0,
102 M_HS_RTT, 142 M_HS_RTT,
@@ -162,6 +202,39 @@ enum hsdk_axi_masters {
162#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180)) 202#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
163#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194)) 203#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
164 204
205static void __init hsdk_init_memory_bridge_axi_dmac(void)
206{
207 bool coherent = !!arc_hsdk_axi_dmac_coherent;
208 u32 axi_m_slv1, axi_m_oft1;
209
210 /*
211 * Don't tweak memory bridge configuration if we failed to tweak DTB
212 * as we will end up in a inconsistent state.
213 */
214 if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
215 return;
216
217 if (coherent) {
218 axi_m_slv1 = 0x77999999;
219 axi_m_oft1 = 0x76DCBA98;
220 } else {
221 axi_m_slv1 = 0x77777777;
222 axi_m_oft1 = 0x76543210;
223 }
224
225 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
226 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
227 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
228 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
229 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
230
231 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
233 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
234 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
235 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
236}
237
165static void __init hsdk_init_memory_bridge(void) 238static void __init hsdk_init_memory_bridge(void)
166{ 239{
167 u32 reg; 240 u32 reg;
@@ -227,24 +300,14 @@ static void __init hsdk_init_memory_bridge(void)
227 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU)); 300 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
228 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU)); 301 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
229 302
230 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
231 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
233 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
234 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
235
236 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
237 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
238 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
239 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
240 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
241
242 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS)); 303 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
243 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS)); 304 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
244 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS)); 305 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
245 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS)); 306 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
246 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS)); 307 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
247 308
309 hsdk_init_memory_bridge_axi_dmac();
310
248 /* 311 /*
249 * PAE remapping for DMA clients does not work due to an RTL bug, so 312 * PAE remapping for DMA clients does not work due to an RTL bug, so
250 * CREG_PAE register must be programmed to all zeroes, otherwise it 313 * CREG_PAE register must be programmed to all zeroes, otherwise it
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 33b00579beff..24360211534a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,6 +7,8 @@ config ARM
7 select ARCH_HAS_BINFMT_FLAT 7 select ARCH_HAS_BINFMT_FLAT
8 select ARCH_HAS_DEBUG_VIRTUAL if MMU 8 select ARCH_HAS_DEBUG_VIRTUAL if MMU
9 select ARCH_HAS_DEVMEM_IS_ALLOWED 9 select ARCH_HAS_DEVMEM_IS_ALLOWED
10 select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
11 select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
10 select ARCH_HAS_ELF_RANDOMIZE 12 select ARCH_HAS_ELF_RANDOMIZE
11 select ARCH_HAS_FORTIFY_SOURCE 13 select ARCH_HAS_FORTIFY_SOURCE
12 select ARCH_HAS_KEEPINITRD 14 select ARCH_HAS_KEEPINITRD
@@ -18,6 +20,8 @@ config ARM
18 select ARCH_HAS_SET_MEMORY 20 select ARCH_HAS_SET_MEMORY
19 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 21 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
20 select ARCH_HAS_STRICT_MODULE_RWX if MMU 22 select ARCH_HAS_STRICT_MODULE_RWX if MMU
23 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
24 select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
21 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU 25 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
22 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 26 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
23 select ARCH_HAVE_CUSTOM_GPIO_H 27 select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index ced1a19d5f89..46849d6ecb3e 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -185,7 +185,7 @@
185 uart0: serial@0 { 185 uart0: serial@0 {
186 compatible = "ti,am3352-uart", "ti,omap3-uart"; 186 compatible = "ti,am3352-uart", "ti,omap3-uart";
187 clock-frequency = <48000000>; 187 clock-frequency = <48000000>;
188 reg = <0x0 0x2000>; 188 reg = <0x0 0x1000>;
189 interrupts = <72>; 189 interrupts = <72>;
190 status = "disabled"; 190 status = "disabled";
191 dmas = <&edma 26 0>, <&edma 27 0>; 191 dmas = <&edma 26 0>, <&edma 27 0>;
@@ -934,7 +934,7 @@
934 uart1: serial@0 { 934 uart1: serial@0 {
935 compatible = "ti,am3352-uart", "ti,omap3-uart"; 935 compatible = "ti,am3352-uart", "ti,omap3-uart";
936 clock-frequency = <48000000>; 936 clock-frequency = <48000000>;
937 reg = <0x0 0x2000>; 937 reg = <0x0 0x1000>;
938 interrupts = <73>; 938 interrupts = <73>;
939 status = "disabled"; 939 status = "disabled";
940 dmas = <&edma 28 0>, <&edma 29 0>; 940 dmas = <&edma 28 0>, <&edma 29 0>;
@@ -966,7 +966,7 @@
966 uart2: serial@0 { 966 uart2: serial@0 {
967 compatible = "ti,am3352-uart", "ti,omap3-uart"; 967 compatible = "ti,am3352-uart", "ti,omap3-uart";
968 clock-frequency = <48000000>; 968 clock-frequency = <48000000>;
969 reg = <0x0 0x2000>; 969 reg = <0x0 0x1000>;
970 interrupts = <74>; 970 interrupts = <74>;
971 status = "disabled"; 971 status = "disabled";
972 dmas = <&edma 30 0>, <&edma 31 0>; 972 dmas = <&edma 30 0>, <&edma 31 0>;
@@ -1614,7 +1614,7 @@
1614 uart3: serial@0 { 1614 uart3: serial@0 {
1615 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1615 compatible = "ti,am3352-uart", "ti,omap3-uart";
1616 clock-frequency = <48000000>; 1616 clock-frequency = <48000000>;
1617 reg = <0x0 0x2000>; 1617 reg = <0x0 0x1000>;
1618 interrupts = <44>; 1618 interrupts = <44>;
1619 status = "disabled"; 1619 status = "disabled";
1620 }; 1620 };
@@ -1644,7 +1644,7 @@
1644 uart4: serial@0 { 1644 uart4: serial@0 {
1645 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1645 compatible = "ti,am3352-uart", "ti,omap3-uart";
1646 clock-frequency = <48000000>; 1646 clock-frequency = <48000000>;
1647 reg = <0x0 0x2000>; 1647 reg = <0x0 0x1000>;
1648 interrupts = <45>; 1648 interrupts = <45>;
1649 status = "disabled"; 1649 status = "disabled";
1650 }; 1650 };
@@ -1674,7 +1674,7 @@
1674 uart5: serial@0 { 1674 uart5: serial@0 {
1675 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1675 compatible = "ti,am3352-uart", "ti,omap3-uart";
1676 clock-frequency = <48000000>; 1676 clock-frequency = <48000000>;
1677 reg = <0x0 0x2000>; 1677 reg = <0x0 0x1000>;
1678 interrupts = <46>; 1678 interrupts = <46>;
1679 status = "disabled"; 1679 status = "disabled";
1680 }; 1680 };
@@ -1758,6 +1758,8 @@
1758 1758
1759 target-module@cc000 { /* 0x481cc000, ap 60 46.0 */ 1759 target-module@cc000 { /* 0x481cc000, ap 60 46.0 */
1760 compatible = "ti,sysc-omap4", "ti,sysc"; 1760 compatible = "ti,sysc-omap4", "ti,sysc";
1761 reg = <0xcc020 0x4>;
1762 reg-names = "rev";
1761 ti,hwmods = "d_can0"; 1763 ti,hwmods = "d_can0";
1762 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1764 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1763 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>, 1765 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
@@ -1780,6 +1782,8 @@
1780 1782
1781 target-module@d0000 { /* 0x481d0000, ap 62 42.0 */ 1783 target-module@d0000 { /* 0x481d0000, ap 62 42.0 */
1782 compatible = "ti,sysc-omap4", "ti,sysc"; 1784 compatible = "ti,sysc-omap4", "ti,sysc";
1785 reg = <0xd0020 0x4>;
1786 reg-names = "rev";
1783 ti,hwmods = "d_can1"; 1787 ti,hwmods = "d_can1";
1784 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1788 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1785 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>, 1789 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index e5c2f71a7c77..fb6b8aa12cc5 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -234,13 +234,33 @@
234 interrupt-names = "edma3_tcerrint"; 234 interrupt-names = "edma3_tcerrint";
235 }; 235 };
236 236
237 mmc3: mmc@47810000 { 237 target-module@47810000 {
238 compatible = "ti,omap4-hsmmc"; 238 compatible = "ti,sysc-omap2", "ti,sysc";
239 ti,hwmods = "mmc3"; 239 ti,hwmods = "mmc3";
240 ti,needs-special-reset; 240 reg = <0x478102fc 0x4>,
241 interrupts = <29>; 241 <0x47810110 0x4>,
242 reg = <0x47810000 0x1000>; 242 <0x47810114 0x4>;
243 status = "disabled"; 243 reg-names = "rev", "sysc", "syss";
244 ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
245 SYSC_OMAP2_ENAWAKEUP |
246 SYSC_OMAP2_SOFTRESET |
247 SYSC_OMAP2_AUTOIDLE)>;
248 ti,sysc-sidle = <SYSC_IDLE_FORCE>,
249 <SYSC_IDLE_NO>,
250 <SYSC_IDLE_SMART>;
251 ti,syss-mask = <1>;
252 clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>;
253 clock-names = "fck";
254 #address-cells = <1>;
255 #size-cells = <1>;
256 ranges = <0x0 0x47810000 0x1000>;
257
258 mmc3: mmc@0 {
259 compatible = "ti,omap4-hsmmc";
260 ti,needs-special-reset;
261 interrupts = <29>;
262 reg = <0x0 0x1000>;
263 };
244 }; 264 };
245 265
246 usb: usb@47400000 { 266 usb: usb@47400000 {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 55aff4db9c7c..848e2a8884e2 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -228,13 +228,33 @@
228 interrupt-names = "edma3_tcerrint"; 228 interrupt-names = "edma3_tcerrint";
229 }; 229 };
230 230
231 mmc3: mmc@47810000 { 231 target-module@47810000 {
232 compatible = "ti,omap4-hsmmc"; 232 compatible = "ti,sysc-omap2", "ti,sysc";
233 reg = <0x47810000 0x1000>;
234 ti,hwmods = "mmc3"; 233 ti,hwmods = "mmc3";
235 ti,needs-special-reset; 234 reg = <0x478102fc 0x4>,
236 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; 235 <0x47810110 0x4>,
237 status = "disabled"; 236 <0x47810114 0x4>;
237 reg-names = "rev", "sysc", "syss";
238 ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
239 SYSC_OMAP2_ENAWAKEUP |
240 SYSC_OMAP2_SOFTRESET |
241 SYSC_OMAP2_AUTOIDLE)>;
242 ti,sysc-sidle = <SYSC_IDLE_FORCE>,
243 <SYSC_IDLE_NO>,
244 <SYSC_IDLE_SMART>;
245 ti,syss-mask = <1>;
246 clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>;
247 clock-names = "fck";
248 #address-cells = <1>;
249 #size-cells = <1>;
250 ranges = <0x0 0x47810000 0x1000>;
251
252 mmc3: mmc@0 {
253 compatible = "ti,omap4-hsmmc";
254 ti,needs-special-reset;
255 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
256 reg = <0x0 0x1000>;
257 };
238 }; 258 };
239 259
240 sham: sham@53100000 { 260 sham: sham@53100000 {
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index 989cb60b9029..04bee4ff9dcb 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -1574,6 +1574,8 @@
1574 1574
1575 target-module@cc000 { /* 0x481cc000, ap 50 46.0 */ 1575 target-module@cc000 { /* 0x481cc000, ap 50 46.0 */
1576 compatible = "ti,sysc-omap4", "ti,sysc"; 1576 compatible = "ti,sysc-omap4", "ti,sysc";
1577 reg = <0xcc020 0x4>;
1578 reg-names = "rev";
1577 ti,hwmods = "d_can0"; 1579 ti,hwmods = "d_can0";
1578 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1580 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1579 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; 1581 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
@@ -1593,6 +1595,8 @@
1593 1595
1594 target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */ 1596 target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */
1595 compatible = "ti,sysc-omap4", "ti,sysc"; 1597 compatible = "ti,sysc-omap4", "ti,sysc";
1598 reg = <0xd0020 0x4>;
1599 reg-names = "rev";
1596 ti,hwmods = "d_can1"; 1600 ti,hwmods = "d_can1";
1597 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1601 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1598 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; 1602 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index 1d5e99964bbf..0aaacea1d887 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -175,14 +175,9 @@
175}; 175};
176 176
177&mmc1 { 177&mmc1 {
178 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 178 pinctrl-names = "default", "hs";
179 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 179 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
180 pinctrl-1 = <&mmc1_pins_hs>; 180 pinctrl-1 = <&mmc1_pins_hs>;
181 pinctrl-2 = <&mmc1_pins_sdr12>;
182 pinctrl-3 = <&mmc1_pins_sdr25>;
183 pinctrl-4 = <&mmc1_pins_sdr50>;
184 pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
185 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
186}; 181};
187 182
188&mmc2 { 183&mmc2 {
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index c65d7f6d3b5a..ea1c119feaa5 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 20 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
27}; 22};
28 23
29&mmc2 { 24&mmc2 {
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index dc5141c35610..7935d70874ce 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -24,14 +24,9 @@
24}; 24};
25 25
26&mmc1 { 26&mmc1 {
27 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 27 pinctrl-names = "default", "hs";
28 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 28 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
29 pinctrl-1 = <&mmc1_pins_hs>; 29 pinctrl-1 = <&mmc1_pins_hs>;
30 pinctrl-2 = <&mmc1_pins_default>;
31 pinctrl-3 = <&mmc1_pins_hs>;
32 pinctrl-4 = <&mmc1_pins_sdr50>;
33 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
34 pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
35}; 30};
36 31
37&mmc2 { 32&mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index d02f5fa61e5f..bc76f1705c0f 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -379,7 +379,7 @@
379 }; 379 };
380}; 380};
381 381
382&gpio7 { 382&gpio7_target {
383 ti,no-reset-on-init; 383 ti,no-reset-on-init;
384 ti,no-idle-on-init; 384 ti,no-idle-on-init;
385}; 385};
@@ -430,6 +430,7 @@
430 430
431 bus-width = <4>; 431 bus-width = <4>;
432 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ 432 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
433 no-1-8-v;
433}; 434};
434 435
435&mmc2 { 436&mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
index a374b5cd6db0..7b113b52c3fb 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default>; 20 pinctrl-0 = <&mmc1_pins_default>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
27 vmmc-supply = <&vdd_3v3>; 22 vmmc-supply = <&vdd_3v3>;
28 vqmmc-supply = <&ldo1_reg>; 23 vqmmc-supply = <&ldo1_reg>;
29}; 24};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
index 4badd2144db9..30c500b15b21 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default>; 20 pinctrl-0 = <&mmc1_pins_default>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
27 vmmc-supply = <&vdd_3v3>; 22 vmmc-supply = <&vdd_3v3>;
28 vqmmc-supply = <&ldo1_reg>; 23 vqmmc-supply = <&ldo1_reg>;
29}; 24};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 714e971b912a..de7f85efaa51 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -498,7 +498,7 @@
498 phy-supply = <&ldousb_reg>; 498 phy-supply = <&ldousb_reg>;
499}; 499};
500 500
501&gpio7 { 501&gpio7_target {
502 ti,no-reset-on-init; 502 ti,no-reset-on-init;
503 ti,no-idle-on-init; 503 ti,no-idle-on-init;
504}; 504};
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 23faedec08ab..21e5914fdd62 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -1261,7 +1261,7 @@
1261 }; 1261 };
1262 }; 1262 };
1263 1263
1264 target-module@51000 { /* 0x48051000, ap 45 2e.0 */ 1264 gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */
1265 compatible = "ti,sysc-omap2", "ti,sysc"; 1265 compatible = "ti,sysc-omap2", "ti,sysc";
1266 ti,hwmods = "gpio7"; 1266 ti,hwmods = "gpio7";
1267 reg = <0x51000 0x4>, 1267 reg = <0x51000 0x4>,
@@ -3025,7 +3025,7 @@
3025 3025
3026 target-module@80000 { /* 0x48480000, ap 31 16.0 */ 3026 target-module@80000 { /* 0x48480000, ap 31 16.0 */
3027 compatible = "ti,sysc-omap4", "ti,sysc"; 3027 compatible = "ti,sysc-omap4", "ti,sysc";
3028 reg = <0x80000 0x4>; 3028 reg = <0x80020 0x4>;
3029 reg-names = "rev"; 3029 reg-names = "rev";
3030 clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>; 3030 clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>;
3031 clock-names = "fck"; 3031 clock-names = "fck";
@@ -4577,7 +4577,7 @@
4577 4577
4578 target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */ 4578 target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */
4579 compatible = "ti,sysc-omap4", "ti,sysc"; 4579 compatible = "ti,sysc-omap4", "ti,sysc";
4580 reg = <0xc000 0x4>; 4580 reg = <0xc020 0x4>;
4581 reg-names = "rev"; 4581 reg-names = "rev";
4582 clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>; 4582 clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>;
4583 clock-names = "fck"; 4583 clock-names = "fck";
diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
index 28ebb4eb884a..214b9e6de2c3 100644
--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
+++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
@@ -32,7 +32,7 @@
32 * 32 *
33 * Datamanual Revisions: 33 * Datamanual Revisions:
34 * 34 *
35 * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016 35 * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
36 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 36 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
37 * 37 *
38 */ 38 */
@@ -229,45 +229,45 @@
229 229
230 mmc3_pins_default: mmc3_pins_default { 230 mmc3_pins_default: mmc3_pins_default {
231 pinctrl-single,pins = < 231 pinctrl-single,pins = <
232 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 232 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
233 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 233 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
234 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 234 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
235 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 235 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
236 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 236 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
237 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 237 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
238 >; 238 >;
239 }; 239 };
240 240
241 mmc3_pins_hs: mmc3_pins_hs { 241 mmc3_pins_hs: mmc3_pins_hs {
242 pinctrl-single,pins = < 242 pinctrl-single,pins = <
243 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 243 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
244 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 244 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
245 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 245 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
246 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 246 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
247 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 247 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
248 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 248 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
249 >; 249 >;
250 }; 250 };
251 251
252 mmc3_pins_sdr12: mmc3_pins_sdr12 { 252 mmc3_pins_sdr12: mmc3_pins_sdr12 {
253 pinctrl-single,pins = < 253 pinctrl-single,pins = <
254 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 254 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
255 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 255 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
256 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 256 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
257 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 257 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
258 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 258 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
259 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 259 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
260 >; 260 >;
261 }; 261 };
262 262
263 mmc3_pins_sdr25: mmc3_pins_sdr25 { 263 mmc3_pins_sdr25: mmc3_pins_sdr25 {
264 pinctrl-single,pins = < 264 pinctrl-single,pins = <
265 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 265 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
266 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 266 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
267 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 267 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
268 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 268 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
269 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 269 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
270 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 270 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
271 >; 271 >;
272 }; 272 };
273 273
diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts
index 3fa0cbe456db..0f3870d3b099 100644
--- a/arch/arm/boot/dts/vf610-bk4.dts
+++ b/arch/arm/boot/dts/vf610-bk4.dts
@@ -246,13 +246,13 @@
246 reg = <0>; 246 reg = <0>;
247 }; 247 };
248 248
249 n25q128a13_2: flash@1 { 249 n25q128a13_2: flash@2 {
250 compatible = "n25q128a13", "jedec,spi-nor"; 250 compatible = "n25q128a13", "jedec,spi-nor";
251 #address-cells = <1>; 251 #address-cells = <1>;
252 #size-cells = <1>; 252 #size-cells = <1>;
253 spi-max-frequency = <66000000>; 253 spi-max-frequency = <66000000>;
254 spi-rx-bus-width = <2>; 254 spi-rx-bus-width = <2>;
255 reg = <1>; 255 reg = <2>;
256 }; 256 };
257}; 257};
258 258
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 1d5210eb4776..582925238d65 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -66,7 +66,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
66 66
671003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists, 671003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
68 ldr r3, .Ldsi+4 @ adjust saved 'pc' back one 68 ldr r3, .Ldsi+4 @ adjust saved 'pc' back one
69 teq r3, r2, lsr #10 @ instruction 69 teq r3, r2, lsr #11 @ instruction
70 subne r0, sv_pc, #4 @ allow for mov 70 subne r0, sv_pc, #4 @ allow for mov
71 subeq r0, sv_pc, #8 @ allow for mov + stmia 71 subeq r0, sv_pc, #8 @ allow for mov + stmia
72 72
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 81159af44862..14a6c3eb3298 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -126,6 +126,8 @@ restart:
126 orr r11, r11, r13 @ mask all requested interrupts 126 orr r11, r11, r13 @ mask all requested interrupts
127 str r11, [r12, #OMAP1510_GPIO_INT_MASK] 127 str r11, [r12, #OMAP1510_GPIO_INT_MASK]
128 128
129 str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
130
129 ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set? 131 ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
130 beq hksw @ no - try next source 132 beq hksw @ no - try next source
131 133
@@ -133,7 +135,6 @@ restart:
133 @@@@@@@@@@@@@@@@@@@@@@ 135 @@@@@@@@@@@@@@@@@@@@@@
134 @ Keyboard clock FIQ mode interrupt handler 136 @ Keyboard clock FIQ mode interrupt handler
135 @ r10 now contains KEYBRD_CLK_MASK, use it 137 @ r10 now contains KEYBRD_CLK_MASK, use it
136 str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
137 bic r11, r11, r10 @ unmask it 138 bic r11, r11, r10 @ unmask it
138 str r11, [r12, #OMAP1510_GPIO_INT_MASK] 139 str r11, [r12, #OMAP1510_GPIO_INT_MASK]
139 140
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index 43899fa56674..0254eb9cf8c6 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -70,9 +70,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
70 * interrupts default to since commit 80ac93c27441 70 * interrupts default to since commit 80ac93c27441
71 * requires interrupt already acked and unmasked. 71 * requires interrupt already acked and unmasked.
72 */ 72 */
73 if (irq_chip->irq_ack) 73 if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
74 irq_chip->irq_ack(d);
75 if (irq_chip->irq_unmask)
76 irq_chip->irq_unmask(d); 74 irq_chip->irq_unmask(d);
77 } 75 }
78 for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) 76 for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 600650551621..d4f11c5070ae 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -229,3 +229,5 @@ include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORC
229$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h 229$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
230 230
231targets += pm-asm-offsets.s 231targets += pm-asm-offsets.s
232
233obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
new file mode 100644
index 000000000000..f1a6ece8108e
--- /dev/null
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -0,0 +1,43 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * OMAP IOMMU quirks for various TI SoCs
4 *
5 * Copyright (C) 2015-2019 Texas Instruments Incorporated - http://www.ti.com/
6 * Suman Anna <s-anna@ti.com>
7 */
8
9#include <linux/platform_device.h>
10#include <linux/err.h>
11
12#include "omap_hwmod.h"
13#include "omap_device.h"
14#include "powerdomain.h"
15
16int omap_iommu_set_pwrdm_constraint(struct platform_device *pdev, bool request,
17 u8 *pwrst)
18{
19 struct powerdomain *pwrdm;
20 struct omap_device *od;
21 u8 next_pwrst;
22
23 od = to_omap_device(pdev);
24 if (!od)
25 return -ENODEV;
26
27 if (od->hwmods_cnt != 1)
28 return -EINVAL;
29
30 pwrdm = omap_hwmod_get_pwrdm(od->hwmods[0]);
31 if (!pwrdm)
32 return -EINVAL;
33
34 if (request)
35 *pwrst = pwrdm_read_next_pwrst(pwrdm);
36
37 if (*pwrst > PWRDM_POWER_RET)
38 return 0;
39
40 next_pwrst = request ? PWRDM_POWER_ON : *pwrst;
41
42 return pwrdm_set_next_pwrst(pwrdm, next_pwrst);
43}
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index f9c02f9f1c92..5c3845730dbf 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -127,6 +127,9 @@ static int __init omap4_sram_init(void)
127 struct device_node *np; 127 struct device_node *np;
128 struct gen_pool *sram_pool; 128 struct gen_pool *sram_pool;
129 129
130 if (!soc_is_omap44xx() && !soc_is_omap54xx())
131 return 0;
132
130 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); 133 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
131 if (!np) 134 if (!np)
132 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", 135 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 4a5b4aee6615..1ec21e9ba1e9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
379static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = { 379static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
380 .rev_offs = 0x0, 380 .rev_offs = 0x0,
381 .sysc_offs = 0x4, 381 .sysc_offs = 0x4,
382 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, 382 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
383 SYSC_HAS_RESET_STATUS,
383 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), 384 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
384 .sysc_fields = &omap_hwmod_sysc_type2, 385 .sysc_fields = &omap_hwmod_sysc_type2,
385}; 386};
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 0ce56ad754ce..ea2c84214bac 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -46,6 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
46 switch (tag->u.acorn.vram_pages) { 46 switch (tag->u.acorn.vram_pages) {
47 case 512: 47 case 512:
48 vram_size += PAGE_SIZE * 256; 48 vram_size += PAGE_SIZE * 256;
49 /* Fall through - ??? */
49 case 256: 50 case 256:
50 vram_size += PAGE_SIZE * 256; 51 vram_size += PAGE_SIZE * 256;
51 default: 52 default:
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c54cd7ed90ba..c1222c0e9fd3 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -664,10 +664,6 @@ config ARM_LPAE
664 !CPU_32v4 && !CPU_32v3 664 !CPU_32v4 && !CPU_32v3
665 select PHYS_ADDR_T_64BIT 665 select PHYS_ADDR_T_64BIT
666 select SWIOTLB 666 select SWIOTLB
667 select ARCH_HAS_DMA_COHERENT_TO_PFN
668 select ARCH_HAS_DMA_MMAP_PGPROT
669 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
670 select ARCH_HAS_SYNC_DMA_FOR_CPU
671 help 667 help
672 Say Y if you have an ARMv7 processor supporting the LPAE page 668 Say Y if you have an ARMv7 processor supporting the LPAE page
673 table format and you would like to access memory beyond the 669 table format and you would like to access memory beyond the
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 16d373d587c4..b4be3baa83d4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -175,6 +175,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
175#ifdef CONFIG_HAVE_ARCH_PFN_VALID 175#ifdef CONFIG_HAVE_ARCH_PFN_VALID
176int pfn_valid(unsigned long pfn) 176int pfn_valid(unsigned long pfn)
177{ 177{
178 phys_addr_t addr = __pfn_to_phys(pfn);
179
180 if (__phys_to_pfn(addr) != pfn)
181 return 0;
182
178 return memblock_is_map_memory(__pfn_to_phys(pfn)); 183 return memblock_is_map_memory(__pfn_to_phys(pfn));
179} 184}
180EXPORT_SYMBOL(pfn_valid); 185EXPORT_SYMBOL(pfn_valid);
@@ -628,7 +633,8 @@ static void update_sections_early(struct section_perm perms[], int n)
628 if (t->flags & PF_KTHREAD) 633 if (t->flags & PF_KTHREAD)
629 continue; 634 continue;
630 for_each_thread(t, s) 635 for_each_thread(t, s)
631 set_section_perms(perms, n, true, s->mm); 636 if (s->mm)
637 set_section_perms(perms, n, true, s->mm);
632 } 638 }
633 set_section_perms(perms, n, true, current->active_mm); 639 set_section_perms(perms, n, true, current->active_mm);
634 set_section_perms(perms, n, true, &init_mm); 640 set_section_perms(perms, n, true, &init_mm);
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index c7a87368850b..12aa7eaeaf68 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -339,6 +339,12 @@
339 pinctrl-names = "default"; 339 pinctrl-names = "default";
340}; 340};
341 341
342&ir {
343 status = "okay";
344 pinctrl-0 = <&remote_input_ao_pins>;
345 pinctrl-names = "default";
346};
347
342&pwm_ef { 348&pwm_ef {
343 status = "okay"; 349 status = "okay";
344 pinctrl-0 = <&pwm_e_pins>; 350 pinctrl-0 = <&pwm_e_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index f8d43e3dcf20..1785552d450c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -2386,6 +2386,7 @@
2386 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>; 2386 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
2387 clock-names = "ddr"; 2387 clock-names = "ddr";
2388 phys = <&usb2_phy1>; 2388 phys = <&usb2_phy1>;
2389 phy-names = "usb2-phy";
2389 dr_mode = "peripheral"; 2390 dr_mode = "peripheral";
2390 g-rx-fifo-size = <192>; 2391 g-rx-fifo-size = <192>;
2391 g-np-tx-fifo-size = <128>; 2392 g-np-tx-fifo-size = <128>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
index 81780ffcc7f0..4e916e1f71f7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
@@ -53,6 +53,7 @@
53 53
54 gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>; 54 gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>;
55 enable-active-high; 55 enable-active-high;
56 regulator-always-on;
56 }; 57 };
57 58
58 tf_io: gpio-regulator-tf_io { 59 tf_io: gpio-regulator-tf_io {
diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
index 3311a982fff8..23fd0224ca90 100644
--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
@@ -279,6 +279,7 @@
279 mmc-hs200-1_8v; 279 mmc-hs200-1_8v;
280 non-removable; 280 non-removable;
281 fixed-emmc-driver-type = <1>; 281 fixed-emmc-driver-type = <1>;
282 status = "okay";
282}; 283};
283 284
284&usb_extal_clk { 285&usb_extal_clk {
diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
index 0711170b26b1..3aa2564dfdc2 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
@@ -97,7 +97,7 @@
97 reg = <0x0 0x48000000 0x0 0x18000000>; 97 reg = <0x0 0x48000000 0x0 0x18000000>;
98 }; 98 };
99 99
100 reg_1p8v: regulator0 { 100 reg_1p8v: regulator-1p8v {
101 compatible = "regulator-fixed"; 101 compatible = "regulator-fixed";
102 regulator-name = "fixed-1.8V"; 102 regulator-name = "fixed-1.8V";
103 regulator-min-microvolt = <1800000>; 103 regulator-min-microvolt = <1800000>;
@@ -106,7 +106,7 @@
106 regulator-always-on; 106 regulator-always-on;
107 }; 107 };
108 108
109 reg_3p3v: regulator1 { 109 reg_3p3v: regulator-3p3v {
110 compatible = "regulator-fixed"; 110 compatible = "regulator-fixed";
111 regulator-name = "fixed-3.3V"; 111 regulator-name = "fixed-3.3V";
112 regulator-min-microvolt = <3300000>; 112 regulator-min-microvolt = <3300000>;
@@ -115,7 +115,7 @@
115 regulator-always-on; 115 regulator-always-on;
116 }; 116 };
117 117
118 reg_12p0v: regulator1 { 118 reg_12p0v: regulator-12p0v {
119 compatible = "regulator-fixed"; 119 compatible = "regulator-fixed";
120 regulator-name = "D12.0V"; 120 regulator-name = "D12.0V";
121 regulator-min-microvolt = <12000000>; 121 regulator-min-microvolt = <12000000>;
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 7429a72f3f92..92aceef63710 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -8,10 +8,8 @@
8extern void no_iommu_init(void); 8extern void no_iommu_init(void);
9#ifdef CONFIG_INTEL_IOMMU 9#ifdef CONFIG_INTEL_IOMMU
10extern int force_iommu, no_iommu; 10extern int force_iommu, no_iommu;
11extern int iommu_pass_through;
12extern int iommu_detected; 11extern int iommu_detected;
13#else 12#else
14#define iommu_pass_through (0)
15#define no_iommu (1) 13#define no_iommu (1)
16#define iommu_detected (0) 14#define iommu_detected (0)
17#endif 15#endif
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index fe988c49f01c..f5d49cd3fbb0 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -22,8 +22,6 @@ int force_iommu __read_mostly = 1;
22int force_iommu __read_mostly; 22int force_iommu __read_mostly;
23#endif 23#endif
24 24
25int iommu_pass_through;
26
27static int __init pci_iommu_init(void) 25static int __init pci_iommu_init(void)
28{ 26{
29 if (iommu_detected) 27 if (iommu_detected)
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
index 52cf96ea43e5..cbc7cdae1c6a 100644
--- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -46,6 +46,7 @@ static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK: 46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) 47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
48 return 0x0000000000003CB0ull; 48 return 0x0000000000003CB0ull;
49 /* Else, fall through */
49 default: 50 default:
50 return 0x0000000000023CB0ull; 51 return 0x0000000000023CB0ull;
51 } 52 }
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
index fe61513982b4..330b19fcd990 100644
--- a/arch/nds32/kernel/signal.c
+++ b/arch/nds32/kernel/signal.c
@@ -316,6 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
316 regs->uregs[0] = -EINTR; 316 regs->uregs[0] = -EINTR;
317 break; 317 break;
318 } 318 }
319 /* Else, fall through */
319 case -ERESTARTNOINTR: 320 case -ERESTARTNOINTR:
320 regs->uregs[0] = regs->orig_r0; 321 regs->uregs[0] = regs->orig_r0;
321 regs->ipc -= 4; 322 regs->ipc -= 4;
@@ -360,6 +361,7 @@ static void do_signal(struct pt_regs *regs)
360 switch (regs->uregs[0]) { 361 switch (regs->uregs[0]) {
361 case -ERESTART_RESTARTBLOCK: 362 case -ERESTART_RESTARTBLOCK:
362 regs->uregs[15] = __NR_restart_syscall; 363 regs->uregs[15] = __NR_restart_syscall;
364 /* Fall through */
363 case -ERESTARTNOHAND: 365 case -ERESTARTNOHAND:
364 case -ERESTARTSYS: 366 case -ERESTARTSYS:
365 case -ERESTARTNOINTR: 367 case -ERESTARTNOINTR:
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index a39b079e73f2..6d58c1739b42 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -2,6 +2,7 @@
2#ifndef _PARISC_PGTABLE_H 2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H 3#define _PARISC_PGTABLE_H
4 4
5#include <asm/page.h>
5#include <asm-generic/4level-fixup.h> 6#include <asm-generic/4level-fixup.h>
6 7
7#include <asm/fixmap.h> 8#include <asm/fixmap.h>
@@ -98,8 +99,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
98 99
99#endif /* !__ASSEMBLY__ */ 100#endif /* !__ASSEMBLY__ */
100 101
101#include <asm/page.h>
102
103#define pte_ERROR(e) \ 102#define pte_ERROR(e) \
104 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
105#define pmd_ERROR(e) \ 104#define pmd_ERROR(e) \
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8fc4de0d22b4..7a84c9f1778e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
101 } 101 }
102} 102}
103 103
104static bool tm_active_with_fp(struct task_struct *tsk)
105{
106 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
107 (tsk->thread.ckpt_regs.msr & MSR_FP);
108}
109
110static bool tm_active_with_altivec(struct task_struct *tsk)
111{
112 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
113 (tsk->thread.ckpt_regs.msr & MSR_VEC);
114}
115#else 104#else
116static inline void check_if_tm_restore_required(struct task_struct *tsk) { } 105static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
117static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
118static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
119#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 106#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
120 107
121bool strict_msr_control; 108bool strict_msr_control;
@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
252 239
253static int restore_fp(struct task_struct *tsk) 240static int restore_fp(struct task_struct *tsk)
254{ 241{
255 if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { 242 if (tsk->thread.load_fp) {
256 load_fp_state(&current->thread.fp_state); 243 load_fp_state(&current->thread.fp_state);
257 current->thread.load_fp++; 244 current->thread.load_fp++;
258 return 1; 245 return 1;
@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
334 321
335static int restore_altivec(struct task_struct *tsk) 322static int restore_altivec(struct task_struct *tsk)
336{ 323{
337 if (cpu_has_feature(CPU_FTR_ALTIVEC) && 324 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
338 (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
339 load_vr_state(&tsk->thread.vr_state); 325 load_vr_state(&tsk->thread.vr_state);
340 tsk->thread.used_vr = 1; 326 tsk->thread.used_vr = 1;
341 tsk->thread.load_vec++; 327 tsk->thread.load_vec++;
@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
497 if (!tsk->thread.regs) 483 if (!tsk->thread.regs)
498 return; 484 return;
499 485
486 check_if_tm_restore_required(tsk);
487
500 usermsr = tsk->thread.regs->msr; 488 usermsr = tsk->thread.regs->msr;
501 489
502 if ((usermsr & msr_all_available) == 0) 490 if ((usermsr & msr_all_available) == 0)
503 return; 491 return;
504 492
505 msr_check_and_set(msr_all_available); 493 msr_check_and_set(msr_all_available);
506 check_if_tm_restore_required(tsk);
507 494
508 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); 495 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
509 496
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index e99a14798ab0..c4b606fe73eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -660,8 +660,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
660 } 660 }
661 tce = be64_to_cpu(tce); 661 tce = be64_to_cpu(tce);
662 662
663 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) 663 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
664 return H_PARAMETER; 664 ret = H_PARAMETER;
665 goto unlock_exit;
666 }
665 667
666 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 668 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
667 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, 669 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index f50bbeedfc66..b4f20f13b860 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -556,8 +556,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); 556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
557 557
558 ua = 0; 558 ua = 0;
559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) 559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
560 return H_PARAMETER; 560 ret = H_PARAMETER;
561 goto unlock_exit;
562 }
561 563
562 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 564 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
563 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, 565 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index d4acf6fa0596..bf60983a58c7 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
630#ifdef CONFIG_PPC_FSL_BOOK3E 630#ifdef CONFIG_PPC_FSL_BOOK3E
631 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { 631 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
632 unsigned int num_cams; 632 unsigned int num_cams;
633 int __maybe_unused cpu = smp_processor_id();
634 bool map = true; 633 bool map = true;
635 634
636 /* use a quarter of the TLBCAM for bolted linear map */ 635 /* use a quarter of the TLBCAM for bolted linear map */
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 9c66033c3a54..161f28d04a07 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -30,10 +30,6 @@ enum fixed_addresses {
30 __end_of_fixed_addresses 30 __end_of_fixed_addresses
31}; 31};
32 32
33#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
34#define FIXADDR_TOP (VMALLOC_START)
35#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
36
37#define FIXMAP_PAGE_IO PAGE_KERNEL 33#define FIXMAP_PAGE_IO PAGE_KERNEL
38 34
39#define __early_set_fixmap __set_fixmap 35#define __early_set_fixmap __set_fixmap
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index a364aba23d55..c24a083b3e12 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -420,14 +420,22 @@ static inline void pgtable_cache_init(void)
420#define VMALLOC_END (PAGE_OFFSET - 1) 420#define VMALLOC_END (PAGE_OFFSET - 1)
421#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 421#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
422 422
423#define FIXADDR_TOP VMALLOC_START
424#ifdef CONFIG_64BIT
425#define FIXADDR_SIZE PMD_SIZE
426#else
427#define FIXADDR_SIZE PGDIR_SIZE
428#endif
429#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
430
423/* 431/*
424 * Task size is 0x4000000000 for RV64 or 0xb800000 for RV32. 432 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
425 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 433 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
426 */ 434 */
427#ifdef CONFIG_64BIT 435#ifdef CONFIG_64BIT
428#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) 436#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
429#else 437#else
430#define TASK_SIZE VMALLOC_START 438#define TASK_SIZE FIXADDR_START
431#endif 439#endif
432 440
433#include <asm-generic/pgtable.h> 441#include <asm-generic/pgtable.h>
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e636728ab452..955eb355c2fd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
863 break; 863 break;
864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
865 /* lcgr %dst,%dst */ 865 /* lcgr %dst,%dst */
866 EMIT4(0xb9130000, dst_reg, dst_reg); 866 EMIT4(0xb9030000, dst_reg, dst_reg);
867 break; 867 break;
868 /* 868 /*
869 * BPF_FROM_BE/LE 869 * BPF_FROM_BE/LE
@@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1049 /* llgf %w1,map.max_entries(%b2) */ 1049 /* llgf %w1,map.max_entries(%b2) */
1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, 1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1051 offsetof(struct bpf_array, map.max_entries)); 1051 offsetof(struct bpf_array, map.max_entries));
1052 /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ 1052 /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
1053 EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, 1053 EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
1054 REG_W1, 0, 0xa); 1054 REG_W1, 0, 0xa);
1055 1055
1056 /* 1056 /*
@@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1076 * goto out; 1076 * goto out;
1077 */ 1077 */
1078 1078
1079 /* sllg %r1,%b3,3: %r1 = index * 8 */ 1079 /* llgfr %r1,%b3: %r1 = (u32) index */
1080 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); 1080 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1081 /* sllg %r1,%r1,3: %r1 *= 8 */
1082 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1081 /* lg %r1,prog(%b2,%r1) */ 1083 /* lg %r1,prog(%b2,%r1) */
1082 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, 1084 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
1083 REG_1, offsetof(struct bpf_array, ptrs)); 1085 REG_1, offsetof(struct bpf_array, ptrs));
diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h
index 8574338bf23b..9991ec2371e4 100644
--- a/arch/um/include/shared/timer-internal.h
+++ b/arch/um/include/shared/timer-internal.h
@@ -34,10 +34,13 @@ static inline void time_travel_set_time(unsigned long long ns)
34 time_travel_time = ns; 34 time_travel_time = ns;
35} 35}
36 36
37static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 37static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
38 unsigned long long expiry)
39{ 38{
40 time_travel_timer_mode = mode; 39 time_travel_timer_mode = mode;
40}
41
42static inline void time_travel_set_timer_expiry(unsigned long long expiry)
43{
41 time_travel_timer_expiry = expiry; 44 time_travel_timer_expiry = expiry;
42} 45}
43#else 46#else
@@ -50,8 +53,11 @@ static inline void time_travel_set_time(unsigned long long ns)
50{ 53{
51} 54}
52 55
53static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 56static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
54 unsigned long long expiry) 57{
58}
59
60static inline void time_travel_set_timer_expiry(unsigned long long expiry)
55{ 61{
56} 62}
57 63
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 67c0d1a860e9..6bede7888fc2 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -213,7 +213,7 @@ static void time_travel_sleep(unsigned long long duration)
213 if (time_travel_timer_mode != TT_TMR_DISABLED || 213 if (time_travel_timer_mode != TT_TMR_DISABLED ||
214 time_travel_timer_expiry < next) { 214 time_travel_timer_expiry < next) {
215 if (time_travel_timer_mode == TT_TMR_ONESHOT) 215 if (time_travel_timer_mode == TT_TMR_ONESHOT)
216 time_travel_set_timer(TT_TMR_DISABLED, 0); 216 time_travel_set_timer_mode(TT_TMR_DISABLED);
217 /* 217 /*
218 * time_travel_time will be adjusted in the timer 218 * time_travel_time will be adjusted in the timer
219 * IRQ handler so it works even when the signal 219 * IRQ handler so it works even when the signal
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 6a051b078359..234757233355 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -50,7 +50,7 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
50static int itimer_shutdown(struct clock_event_device *evt) 50static int itimer_shutdown(struct clock_event_device *evt)
51{ 51{
52 if (time_travel_mode != TT_MODE_OFF) 52 if (time_travel_mode != TT_MODE_OFF)
53 time_travel_set_timer(TT_TMR_DISABLED, 0); 53 time_travel_set_timer_mode(TT_TMR_DISABLED);
54 54
55 if (time_travel_mode != TT_MODE_INFCPU) 55 if (time_travel_mode != TT_MODE_INFCPU)
56 os_timer_disable(); 56 os_timer_disable();
@@ -62,9 +62,10 @@ static int itimer_set_periodic(struct clock_event_device *evt)
62{ 62{
63 unsigned long long interval = NSEC_PER_SEC / HZ; 63 unsigned long long interval = NSEC_PER_SEC / HZ;
64 64
65 if (time_travel_mode != TT_MODE_OFF) 65 if (time_travel_mode != TT_MODE_OFF) {
66 time_travel_set_timer(TT_TMR_PERIODIC, 66 time_travel_set_timer_mode(TT_TMR_PERIODIC);
67 time_travel_time + interval); 67 time_travel_set_timer_expiry(time_travel_time + interval);
68 }
68 69
69 if (time_travel_mode != TT_MODE_INFCPU) 70 if (time_travel_mode != TT_MODE_INFCPU)
70 os_timer_set_interval(interval); 71 os_timer_set_interval(interval);
@@ -77,9 +78,10 @@ static int itimer_next_event(unsigned long delta,
77{ 78{
78 delta += 1; 79 delta += 1;
79 80
80 if (time_travel_mode != TT_MODE_OFF) 81 if (time_travel_mode != TT_MODE_OFF) {
81 time_travel_set_timer(TT_TMR_ONESHOT, 82 time_travel_set_timer_mode(TT_TMR_ONESHOT);
82 time_travel_time + delta); 83 time_travel_set_timer_expiry(time_travel_time + delta);
84 }
83 85
84 if (time_travel_mode != TT_MODE_INFCPU) 86 if (time_travel_mode != TT_MODE_INFCPU)
85 return os_timer_one_shot(delta); 87 return os_timer_one_shot(delta);
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 56e748a7679f..94df0868804b 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
38 38
39REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding) 39REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
40REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector) 40REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
41REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
41REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4)) 42REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
42export REALMODE_CFLAGS 43export REALMODE_CFLAGS
43 44
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 5f2d03067ae5..c8862696a47b 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -72,6 +72,8 @@ static unsigned long find_trampoline_placement(void)
72 72
73 /* Find the first usable memory region under bios_start. */ 73 /* Find the first usable memory region under bios_start. */
74 for (i = boot_params->e820_entries - 1; i >= 0; i--) { 74 for (i = boot_params->e820_entries - 1; i >= 0; i--) {
75 unsigned long new = bios_start;
76
75 entry = &boot_params->e820_table[i]; 77 entry = &boot_params->e820_table[i];
76 78
77 /* Skip all entries above bios_start. */ 79 /* Skip all entries above bios_start. */
@@ -84,15 +86,20 @@ static unsigned long find_trampoline_placement(void)
84 86
85 /* Adjust bios_start to the end of the entry if needed. */ 87 /* Adjust bios_start to the end of the entry if needed. */
86 if (bios_start > entry->addr + entry->size) 88 if (bios_start > entry->addr + entry->size)
87 bios_start = entry->addr + entry->size; 89 new = entry->addr + entry->size;
88 90
89 /* Keep bios_start page-aligned. */ 91 /* Keep bios_start page-aligned. */
90 bios_start = round_down(bios_start, PAGE_SIZE); 92 new = round_down(new, PAGE_SIZE);
91 93
92 /* Skip the entry if it's too small. */ 94 /* Skip the entry if it's too small. */
93 if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr) 95 if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
94 continue; 96 continue;
95 97
98 /* Protect against underflow. */
99 if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
100 break;
101
102 bios_start = new;
96 break; 103 break;
97 } 104 }
98 105
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 62f317c9113a..5b35b7ea5d72 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -661,10 +661,17 @@ fail:
661 661
662 throttle = perf_event_overflow(event, &data, &regs); 662 throttle = perf_event_overflow(event, &data, &regs);
663out: 663out:
664 if (throttle) 664 if (throttle) {
665 perf_ibs_stop(event, 0); 665 perf_ibs_stop(event, 0);
666 else 666 } else {
667 perf_ibs_enable_event(perf_ibs, hwc, period >> 4); 667 period >>= 4;
668
669 if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
670 (*config & IBS_OP_CNT_CTL))
671 period |= *config & IBS_OP_CUR_CNT_RAND;
672
673 perf_ibs_enable_event(perf_ibs, hwc, period);
674 }
668 675
669 perf_event_update_userpage(event); 676 perf_event_update_userpage(event);
670 677
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 81b005e4c7d9..325959d19d9a 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1236,7 +1236,7 @@ void x86_pmu_enable_event(struct perf_event *event)
1236 * Add a single event to the PMU. 1236 * Add a single event to the PMU.
1237 * 1237 *
1238 * The event is added to the group of enabled events 1238 * The event is added to the group of enabled events
1239 * but only if it can be scehduled with existing events. 1239 * but only if it can be scheduled with existing events.
1240 */ 1240 */
1241static int x86_pmu_add(struct perf_event *event, int flags) 1241static int x86_pmu_add(struct perf_event *event, int flags)
1242{ 1242{
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 648260b5f367..e4c2cb65ea50 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
3572 return left; 3572 return left;
3573} 3573}
3574 3574
3575static u64 nhm_limit_period(struct perf_event *event, u64 left)
3576{
3577 return max(left, 32ULL);
3578}
3579
3575PMU_FORMAT_ATTR(event, "config:0-7" ); 3580PMU_FORMAT_ATTR(event, "config:0-7" );
3576PMU_FORMAT_ATTR(umask, "config:8-15" ); 3581PMU_FORMAT_ATTR(umask, "config:8-15" );
3577PMU_FORMAT_ATTR(edge, "config:18" ); 3582PMU_FORMAT_ATTR(edge, "config:18" );
@@ -4606,6 +4611,7 @@ __init int intel_pmu_init(void)
4606 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 4611 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4607 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 4612 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4608 x86_pmu.extra_regs = intel_nehalem_extra_regs; 4613 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4614 x86_pmu.limit_period = nhm_limit_period;
4609 4615
4610 mem_attr = nhm_mem_events_attrs; 4616 mem_attr = nhm_mem_events_attrs;
4611 4617
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index e65d7fe6489f..5208ba49c89a 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
37 * Lower 12 bits encode the number of additional 37 * Lower 12 bits encode the number of additional
38 * pages to flush (in addition to the 'cur' page). 38 * pages to flush (in addition to the 'cur' page).
39 */ 39 */
40 if (diff >= HV_TLB_FLUSH_UNIT) 40 if (diff >= HV_TLB_FLUSH_UNIT) {
41 gva_list[gva_n] |= ~PAGE_MASK; 41 gva_list[gva_n] |= ~PAGE_MASK;
42 else if (diff) 42 cur += HV_TLB_FLUSH_UNIT;
43 } else if (diff) {
43 gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; 44 gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
45 cur = end;
46 }
44 47
45 cur += HV_TLB_FLUSH_UNIT;
46 gva_n++; 48 gva_n++;
47 49
48 } while (cur < end); 50 } while (cur < end);
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index f5e90a849bca..981fe923a59f 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -59,7 +59,6 @@ static void sanitize_boot_params(struct boot_params *boot_params)
59 BOOT_PARAM_PRESERVE(apm_bios_info), 59 BOOT_PARAM_PRESERVE(apm_bios_info),
60 BOOT_PARAM_PRESERVE(tboot_addr), 60 BOOT_PARAM_PRESERVE(tboot_addr),
61 BOOT_PARAM_PRESERVE(ist_info), 61 BOOT_PARAM_PRESERVE(ist_info),
62 BOOT_PARAM_PRESERVE(acpi_rsdp_addr),
63 BOOT_PARAM_PRESERVE(hd0_info), 62 BOOT_PARAM_PRESERVE(hd0_info),
64 BOOT_PARAM_PRESERVE(hd1_info), 63 BOOT_PARAM_PRESERVE(hd1_info),
65 BOOT_PARAM_PRESERVE(sys_desc_table), 64 BOOT_PARAM_PRESERVE(sys_desc_table),
@@ -71,6 +70,8 @@ static void sanitize_boot_params(struct boot_params *boot_params)
71 BOOT_PARAM_PRESERVE(eddbuf_entries), 70 BOOT_PARAM_PRESERVE(eddbuf_entries),
72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), 71 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
73 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), 72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
73 BOOT_PARAM_PRESERVE(secure_boot),
74 BOOT_PARAM_PRESERVE(hdr),
74 BOOT_PARAM_PRESERVE(e820_table), 75 BOOT_PARAM_PRESERVE(e820_table),
75 BOOT_PARAM_PRESERVE(eddbuf), 76 BOOT_PARAM_PRESERVE(eddbuf),
76 }; 77 };
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 287f1f7b2e52..c38a66661576 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -16,7 +16,6 @@
16#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 16#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19extern void mcount(void);
20extern atomic_t modifying_ftrace_code; 19extern atomic_t modifying_ftrace_code;
21extern void __fentry__(void); 20extern void __fentry__(void);
22 21
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 0278aa66ef62..fe7c205233f1 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -11,6 +11,21 @@
11 * While adding a new CPUID for a new microarchitecture, add a new 11 * While adding a new CPUID for a new microarchitecture, add a new
12 * group to keep logically sorted out in chronological order. Within 12 * group to keep logically sorted out in chronological order. Within
13 * that group keep the CPUID for the variants sorted by model number. 13 * that group keep the CPUID for the variants sorted by model number.
14 *
15 * The defined symbol names have the following form:
16 * INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF}
17 * where:
18 * OPTFAMILY Describes the family of CPUs that this belongs to. Default
19 * is assumed to be "_CORE" (and should be omitted). Other values
20 * currently in use are _ATOM and _XEON_PHI
21 * MICROARCH Is the code name for the micro-architecture for this core.
22 * N.B. Not the platform name.
23 * OPTDIFF If needed, a short string to differentiate by market segment.
24 * Exact strings here will vary over time. _DESKTOP, _MOBILE, and
25 * _X (short for Xeon server) should be used when they are
26 * appropriate.
27 *
28 * The #define line may optionally include a comment including platform names.
14 */ 29 */
15 30
16#define INTEL_FAM6_CORE_YONAH 0x0E 31#define INTEL_FAM6_CORE_YONAH 0x0E
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index baedab8ac538..b91623d521d9 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -4,7 +4,6 @@
4 4
5extern int force_iommu, no_iommu; 5extern int force_iommu, no_iommu;
6extern int iommu_detected; 6extern int iommu_detected;
7extern int iommu_pass_through;
8 7
9/* 10 seconds */ 8/* 10 seconds */
10#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) 9#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 6b4fc2788078..271d837d69a8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -381,6 +381,7 @@
381#define MSR_AMD64_PATCH_LEVEL 0x0000008b 381#define MSR_AMD64_PATCH_LEVEL 0x0000008b
382#define MSR_AMD64_TSC_RATIO 0xc0000104 382#define MSR_AMD64_TSC_RATIO 0xc0000104
383#define MSR_AMD64_NB_CFG 0xc001001f 383#define MSR_AMD64_NB_CFG 0xc001001f
384#define MSR_AMD64_CPUID_FN_1 0xc0011004
384#define MSR_AMD64_PATCH_LOADER 0xc0010020 385#define MSR_AMD64_PATCH_LOADER 0xc0010020
385#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 386#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
386#define MSR_AMD64_OSVW_STATUS 0xc0010141 387#define MSR_AMD64_OSVW_STATUS 0xc0010141
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 109f974f9835..80bc209c0708 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -192,7 +192,7 @@
192 " lfence;\n" \ 192 " lfence;\n" \
193 " jmp 902b;\n" \ 193 " jmp 902b;\n" \
194 " .align 16\n" \ 194 " .align 16\n" \
195 "903: addl $4, %%esp;\n" \ 195 "903: lea 4(%%esp), %%esp;\n" \
196 " pushl %[thunk_target];\n" \ 196 " pushl %[thunk_target];\n" \
197 " ret;\n" \ 197 " ret;\n" \
198 " .align 16\n" \ 198 " .align 16\n" \
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 1392d5e6e8d6..ee26e9215f18 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -252,16 +252,20 @@ struct pebs_lbr {
252#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 252#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
253#define IBSCTL_LVT_OFFSET_MASK 0x0F 253#define IBSCTL_LVT_OFFSET_MASK 0x0F
254 254
255/* ibs fetch bits/masks */ 255/* IBS fetch bits/masks */
256#define IBS_FETCH_RAND_EN (1ULL<<57) 256#define IBS_FETCH_RAND_EN (1ULL<<57)
257#define IBS_FETCH_VAL (1ULL<<49) 257#define IBS_FETCH_VAL (1ULL<<49)
258#define IBS_FETCH_ENABLE (1ULL<<48) 258#define IBS_FETCH_ENABLE (1ULL<<48)
259#define IBS_FETCH_CNT 0xFFFF0000ULL 259#define IBS_FETCH_CNT 0xFFFF0000ULL
260#define IBS_FETCH_MAX_CNT 0x0000FFFFULL 260#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
261 261
262/* ibs op bits/masks */ 262/*
263/* lower 4 bits of the current count are ignored: */ 263 * IBS op bits/masks
264#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) 264 * The lower 7 bits of the current count are random bits
265 * preloaded by hardware and ignored in software
266 */
267#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
268#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
265#define IBS_OP_CNT_CTL (1ULL<<19) 269#define IBS_OP_CNT_CTL (1ULL<<19)
266#define IBS_OP_VAL (1ULL<<18) 270#define IBS_OP_VAL (1ULL<<18)
267#define IBS_OP_ENABLE (1ULL<<17) 271#define IBS_OP_ENABLE (1ULL<<17)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 9c4435307ff8..35c225ede0e4 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -444,8 +444,10 @@ __pu_label: \
444({ \ 444({ \
445 int __gu_err; \ 445 int __gu_err; \
446 __inttype(*(ptr)) __gu_val; \ 446 __inttype(*(ptr)) __gu_val; \
447 __typeof__(ptr) __gu_ptr = (ptr); \
448 __typeof__(size) __gu_size = (size); \
447 __uaccess_begin_nospec(); \ 449 __uaccess_begin_nospec(); \
448 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 450 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
449 __uaccess_end(); \ 451 __uaccess_end(); \
450 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 452 (x) = (__force __typeof__(*(ptr)))__gu_val; \
451 __builtin_expect(__gu_err, 0); \ 453 __builtin_expect(__gu_err, 0); \
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index f5291362da1a..aa5495d0f478 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -722,7 +722,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
722static __initdata unsigned long lapic_cal_j1, lapic_cal_j2; 722static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
723 723
724/* 724/*
725 * Temporary interrupt handler. 725 * Temporary interrupt handler and polled calibration function.
726 */ 726 */
727static void __init lapic_cal_handler(struct clock_event_device *dev) 727static void __init lapic_cal_handler(struct clock_event_device *dev)
728{ 728{
@@ -851,7 +851,8 @@ bool __init apic_needs_pit(void)
851static int __init calibrate_APIC_clock(void) 851static int __init calibrate_APIC_clock(void)
852{ 852{
853 struct clock_event_device *levt = this_cpu_ptr(&lapic_events); 853 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
854 void (*real_handler)(struct clock_event_device *dev); 854 u64 tsc_perj = 0, tsc_start = 0;
855 unsigned long jif_start;
855 unsigned long deltaj; 856 unsigned long deltaj;
856 long delta, deltatsc; 857 long delta, deltatsc;
857 int pm_referenced = 0; 858 int pm_referenced = 0;
@@ -878,28 +879,64 @@ static int __init calibrate_APIC_clock(void)
878 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n" 879 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
879 "calibrating APIC timer ...\n"); 880 "calibrating APIC timer ...\n");
880 881
882 /*
883 * There are platforms w/o global clockevent devices. Instead of
884 * making the calibration conditional on that, use a polling based
885 * approach everywhere.
886 */
881 local_irq_disable(); 887 local_irq_disable();
882 888
883 /* Replace the global interrupt handler */
884 real_handler = global_clock_event->event_handler;
885 global_clock_event->event_handler = lapic_cal_handler;
886
887 /* 889 /*
888 * Setup the APIC counter to maximum. There is no way the lapic 890 * Setup the APIC counter to maximum. There is no way the lapic
889 * can underflow in the 100ms detection time frame 891 * can underflow in the 100ms detection time frame
890 */ 892 */
891 __setup_APIC_LVTT(0xffffffff, 0, 0); 893 __setup_APIC_LVTT(0xffffffff, 0, 0);
892 894
893 /* Let the interrupts run */ 895 /*
896 * Methods to terminate the calibration loop:
897 * 1) Global clockevent if available (jiffies)
898 * 2) TSC if available and frequency is known
899 */
900 jif_start = READ_ONCE(jiffies);
901
902 if (tsc_khz) {
903 tsc_start = rdtsc();
904 tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
905 }
906
907 /*
908 * Enable interrupts so the tick can fire, if a global
909 * clockevent device is available
910 */
894 local_irq_enable(); 911 local_irq_enable();
895 912
896 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) 913 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
897 cpu_relax(); 914 /* Wait for a tick to elapse */
915 while (1) {
916 if (tsc_khz) {
917 u64 tsc_now = rdtsc();
918 if ((tsc_now - tsc_start) >= tsc_perj) {
919 tsc_start += tsc_perj;
920 break;
921 }
922 } else {
923 unsigned long jif_now = READ_ONCE(jiffies);
898 924
899 local_irq_disable(); 925 if (time_after(jif_now, jif_start)) {
926 jif_start = jif_now;
927 break;
928 }
929 }
930 cpu_relax();
931 }
900 932
901 /* Restore the real event handler */ 933 /* Invoke the calibration routine */
902 global_clock_event->event_handler = real_handler; 934 local_irq_disable();
935 lapic_cal_handler(NULL);
936 local_irq_enable();
937 }
938
939 local_irq_disable();
903 940
904 /* Build delta t1-t2 as apic timer counts down */ 941 /* Build delta t1-t2 as apic timer counts down */
905 delta = lapic_cal_t1 - lapic_cal_t2; 942 delta = lapic_cal_t1 - lapic_cal_t2;
@@ -943,10 +980,11 @@ static int __init calibrate_APIC_clock(void)
943 levt->features &= ~CLOCK_EVT_FEAT_DUMMY; 980 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
944 981
945 /* 982 /*
946 * PM timer calibration failed or not turned on 983 * PM timer calibration failed or not turned on so lets try APIC
947 * so lets try APIC timer based calibration 984 * timer based calibration, if a global clockevent device is
985 * available.
948 */ 986 */
949 if (!pm_referenced) { 987 if (!pm_referenced && global_clock_event) {
950 apic_printk(APIC_VERBOSE, "... verify APIC timer\n"); 988 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
951 989
952 /* 990 /*
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index afee386ff711..caedd8d60d36 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu)
38 return early_per_cpu(x86_cpu_to_apicid, cpu); 38 return early_per_cpu(x86_cpu_to_apicid, cpu);
39} 39}
40 40
41static inline unsigned long calculate_ldr(int cpu)
42{
43 unsigned long val, id;
44
45 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
46 id = per_cpu(x86_bios_cpu_apicid, cpu);
47 val |= SET_APIC_LOGICAL_ID(id);
48
49 return val;
50}
51
52/* 41/*
53 * Set up the logical destination ID. 42 * bigsmp enables physical destination mode
54 * 43 * and doesn't use LDR and DFR
55 * Intel recommends to set DFR, LDR and TPR before enabling
56 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
57 * document number 292116). So here it goes...
58 */ 44 */
59static void bigsmp_init_apic_ldr(void) 45static void bigsmp_init_apic_ldr(void)
60{ 46{
61 unsigned long val;
62 int cpu = smp_processor_id();
63
64 apic_write(APIC_DFR, APIC_DFR_FLAT);
65 val = calculate_ldr(cpu);
66 apic_write(APIC_LDR, val);
67} 47}
68 48
69static void bigsmp_setup_apic_routing(void) 49static void bigsmp_setup_apic_routing(void)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c7bb6c69f21c..d6af97fd170a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2438,7 +2438,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
2438 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use 2438 * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
2439 * gsi_top if ioapic_dynirq_base hasn't been initialized yet. 2439 * gsi_top if ioapic_dynirq_base hasn't been initialized yet.
2440 */ 2440 */
2441 return ioapic_initialized ? ioapic_dynirq_base : gsi_top; 2441 if (!ioapic_initialized)
2442 return gsi_top;
2443 /*
2444 * For DT enabled machines ioapic_dynirq_base is irrelevant and not
2445 * updated. So simply return @from if ioapic_dynirq_base == 0.
2446 */
2447 return ioapic_dynirq_base ? : from;
2442} 2448}
2443 2449
2444#ifdef CONFIG_X86_32 2450#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8d4e50428b68..68c363c341bf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -804,6 +804,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
804 msr_set_bit(MSR_AMD64_DE_CFG, 31); 804 msr_set_bit(MSR_AMD64_DE_CFG, 31);
805} 805}
806 806
807static bool rdrand_force;
808
809static int __init rdrand_cmdline(char *str)
810{
811 if (!str)
812 return -EINVAL;
813
814 if (!strcmp(str, "force"))
815 rdrand_force = true;
816 else
817 return -EINVAL;
818
819 return 0;
820}
821early_param("rdrand", rdrand_cmdline);
822
823static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
824{
825 /*
826 * Saving of the MSR used to hide the RDRAND support during
827 * suspend/resume is done by arch/x86/power/cpu.c, which is
828 * dependent on CONFIG_PM_SLEEP.
829 */
830 if (!IS_ENABLED(CONFIG_PM_SLEEP))
831 return;
832
833 /*
834 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
835 * RDRAND support using the CPUID function directly.
836 */
837 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
838 return;
839
840 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
841
842 /*
843 * Verify that the CPUID change has occurred in case the kernel is
844 * running virtualized and the hypervisor doesn't support the MSR.
845 */
846 if (cpuid_ecx(1) & BIT(30)) {
847 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
848 return;
849 }
850
851 clear_cpu_cap(c, X86_FEATURE_RDRAND);
852 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
853}
854
855static void init_amd_jg(struct cpuinfo_x86 *c)
856{
857 /*
858 * Some BIOS implementations do not restore proper RDRAND support
859 * across suspend and resume. Check on whether to hide the RDRAND
860 * instruction support via CPUID.
861 */
862 clear_rdrand_cpuid_bit(c);
863}
864
807static void init_amd_bd(struct cpuinfo_x86 *c) 865static void init_amd_bd(struct cpuinfo_x86 *c)
808{ 866{
809 u64 value; 867 u64 value;
@@ -818,6 +876,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
818 wrmsrl_safe(MSR_F15H_IC_CFG, value); 876 wrmsrl_safe(MSR_F15H_IC_CFG, value);
819 } 877 }
820 } 878 }
879
880 /*
881 * Some BIOS implementations do not restore proper RDRAND support
882 * across suspend and resume. Check on whether to hide the RDRAND
883 * instruction support via CPUID.
884 */
885 clear_rdrand_cpuid_bit(c);
821} 886}
822 887
823static void init_amd_zn(struct cpuinfo_x86 *c) 888static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -860,6 +925,7 @@ static void init_amd(struct cpuinfo_x86 *c)
860 case 0x10: init_amd_gh(c); break; 925 case 0x10: init_amd_gh(c); break;
861 case 0x12: init_amd_ln(c); break; 926 case 0x12: init_amd_ln(c); break;
862 case 0x15: init_amd_bd(c); break; 927 case 0x15: init_amd_bd(c); break;
928 case 0x16: init_amd_jg(c); break;
863 case 0x17: init_amd_zn(c); break; 929 case 0x17: init_amd_zn(c); break;
864 } 930 }
865 931
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index f62b498b18fb..fa4352dce491 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -1,6 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/dma-direct.h> 2#include <linux/dma-direct.h>
3#include <linux/dma-debug.h> 3#include <linux/dma-debug.h>
4#include <linux/iommu.h>
4#include <linux/dmar.h> 5#include <linux/dmar.h>
5#include <linux/export.h> 6#include <linux/export.h>
6#include <linux/memblock.h> 7#include <linux/memblock.h>
@@ -34,21 +35,6 @@ int no_iommu __read_mostly;
34/* Set this to 1 if there is a HW IOMMU in the system */ 35/* Set this to 1 if there is a HW IOMMU in the system */
35int iommu_detected __read_mostly = 0; 36int iommu_detected __read_mostly = 0;
36 37
37/*
38 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
39 * If this variable is 1, IOMMU implementations do no DMA translation for
40 * devices and allow every device to access to whole physical memory. This is
41 * useful if a user wants to use an IOMMU only for KVM device assignment to
42 * guests and not for driver dma translation.
43 * It is also possible to disable by default in kernel config, and enable with
44 * iommu=nopt at boot time.
45 */
46#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
47int iommu_pass_through __read_mostly = 1;
48#else
49int iommu_pass_through __read_mostly;
50#endif
51
52extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; 38extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
53 39
54void __init pci_iommu_alloc(void) 40void __init pci_iommu_alloc(void)
@@ -120,9 +106,9 @@ static __init int iommu_setup(char *p)
120 swiotlb = 1; 106 swiotlb = 1;
121#endif 107#endif
122 if (!strncmp(p, "pt", 2)) 108 if (!strncmp(p, "pt", 2))
123 iommu_pass_through = 1; 109 iommu_set_default_passthrough(true);
124 if (!strncmp(p, "nopt", 4)) 110 if (!strncmp(p, "nopt", 4))
125 iommu_pass_through = 0; 111 iommu_set_default_translated(true);
126 112
127 gart_parse_options(p); 113 gart_parse_options(p);
128 114
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index d8359ebeea70..8cd745ef8c7b 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -508,9 +508,12 @@ struct uprobe_xol_ops {
508 void (*abort)(struct arch_uprobe *, struct pt_regs *); 508 void (*abort)(struct arch_uprobe *, struct pt_regs *);
509}; 509};
510 510
511static inline int sizeof_long(void) 511static inline int sizeof_long(struct pt_regs *regs)
512{ 512{
513 return in_ia32_syscall() ? 4 : 8; 513 /*
514 * Check registers for mode as in_xxx_syscall() does not apply here.
515 */
516 return user_64bit_mode(regs) ? 8 : 4;
514} 517}
515 518
516static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) 519static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
@@ -521,9 +524,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
521 524
522static int emulate_push_stack(struct pt_regs *regs, unsigned long val) 525static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
523{ 526{
524 unsigned long new_sp = regs->sp - sizeof_long(); 527 unsigned long new_sp = regs->sp - sizeof_long(regs);
525 528
526 if (copy_to_user((void __user *)new_sp, &val, sizeof_long())) 529 if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
527 return -EFAULT; 530 return -EFAULT;
528 531
529 regs->sp = new_sp; 532 regs->sp = new_sp;
@@ -556,7 +559,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
556 long correction = utask->vaddr - utask->xol_vaddr; 559 long correction = utask->vaddr - utask->xol_vaddr;
557 regs->ip += correction; 560 regs->ip += correction;
558 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { 561 } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
559 regs->sp += sizeof_long(); /* Pop incorrect return address */ 562 regs->sp += sizeof_long(regs); /* Pop incorrect return address */
560 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen)) 563 if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
561 return -ERESTART; 564 return -ERESTART;
562 } 565 }
@@ -675,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
675 * "call" insn was executed out-of-line. Just restore ->sp and restart. 678 * "call" insn was executed out-of-line. Just restore ->sp and restart.
676 * We could also restore ->ip and try to call branch_emulate_op() again. 679 * We could also restore ->ip and try to call branch_emulate_op() again.
677 */ 680 */
678 regs->sp += sizeof_long(); 681 regs->sp += sizeof_long(regs);
679 return -ERESTART; 682 return -ERESTART;
680} 683}
681 684
@@ -1056,7 +1059,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
1056unsigned long 1059unsigned long
1057arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs) 1060arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
1058{ 1061{
1059 int rasize = sizeof_long(), nleft; 1062 int rasize = sizeof_long(regs), nleft;
1060 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */ 1063 unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
1061 1064
1062 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize)) 1065 if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c10a8b10b203..fff790a3f4ee 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1781,7 +1781,7 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1781int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, 1781int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1782 struct kvm_cpuid_entry2 __user *entries) 1782 struct kvm_cpuid_entry2 __user *entries)
1783{ 1783{
1784 uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); 1784 uint16_t evmcs_ver = 0;
1785 struct kvm_cpuid_entry2 cpuid_entries[] = { 1785 struct kvm_cpuid_entry2 cpuid_entries[] = {
1786 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, 1786 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1787 { .function = HYPERV_CPUID_INTERFACE }, 1787 { .function = HYPERV_CPUID_INTERFACE },
@@ -1793,6 +1793,9 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1793 }; 1793 };
1794 int i, nent = ARRAY_SIZE(cpuid_entries); 1794 int i, nent = ARRAY_SIZE(cpuid_entries);
1795 1795
1796 if (kvm_x86_ops->nested_get_evmcs_version)
1797 evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
1798
1796 /* Skip NESTED_FEATURES if eVMCS is not supported */ 1799 /* Skip NESTED_FEATURES if eVMCS is not supported */
1797 if (!evmcs_ver) 1800 if (!evmcs_ver)
1798 --nent; 1801 --nent;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 685d17c11461..e904ff06a83d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -216,6 +216,9 @@ static void recalculate_apic_map(struct kvm *kvm)
216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) 216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
217 new->phys_map[xapic_id] = apic; 217 new->phys_map[xapic_id] = apic;
218 218
219 if (!kvm_apic_sw_enabled(apic))
220 continue;
221
219 ldr = kvm_lapic_get_reg(apic, APIC_LDR); 222 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
220 223
221 if (apic_x2apic_mode(apic)) { 224 if (apic_x2apic_mode(apic)) {
@@ -258,6 +261,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
258 static_key_slow_dec_deferred(&apic_sw_disabled); 261 static_key_slow_dec_deferred(&apic_sw_disabled);
259 else 262 else
260 static_key_slow_inc(&apic_sw_disabled.key); 263 static_key_slow_inc(&apic_sw_disabled.key);
264
265 recalculate_apic_map(apic->vcpu->kvm);
261 } 266 }
262} 267}
263 268
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24843cf49579..218b277bfda3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5653 struct kvm_memory_slot *slot, 5653 struct kvm_memory_slot *slot,
5654 struct kvm_page_track_notifier_node *node) 5654 struct kvm_page_track_notifier_node *node)
5655{ 5655{
5656 struct kvm_mmu_page *sp; 5656 kvm_mmu_zap_all(kvm);
5657 LIST_HEAD(invalid_list);
5658 unsigned long i;
5659 bool flush;
5660 gfn_t gfn;
5661
5662 spin_lock(&kvm->mmu_lock);
5663
5664 if (list_empty(&kvm->arch.active_mmu_pages))
5665 goto out_unlock;
5666
5667 flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
5668
5669 for (i = 0; i < slot->npages; i++) {
5670 gfn = slot->base_gfn + i;
5671
5672 for_each_valid_sp(kvm, sp, gfn) {
5673 if (sp->gfn != gfn)
5674 continue;
5675
5676 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5677 }
5678 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5679 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5680 flush = false;
5681 cond_resched_lock(&kvm->mmu_lock);
5682 }
5683 }
5684 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5685
5686out_unlock:
5687 spin_unlock(&kvm->mmu_lock);
5688} 5657}
5689 5658
5690void kvm_mmu_init_vm(struct kvm *kvm) 5659void kvm_mmu_init_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d685491fce4d..e0368076a1ef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1714,7 +1714,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1714 if (!entry) 1714 if (!entry)
1715 return -EINVAL; 1715 return -EINVAL;
1716 1716
1717 new_entry = READ_ONCE(*entry);
1718 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & 1717 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1719 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) | 1718 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1720 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK); 1719 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
@@ -7129,12 +7128,6 @@ failed:
7129 return ret; 7128 return ret;
7130} 7129}
7131 7130
7132static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
7133{
7134 /* Not supported */
7135 return 0;
7136}
7137
7138static int nested_enable_evmcs(struct kvm_vcpu *vcpu, 7131static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
7139 uint16_t *vmcs_version) 7132 uint16_t *vmcs_version)
7140{ 7133{
@@ -7333,7 +7326,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7333 .mem_enc_unreg_region = svm_unregister_enc_region, 7326 .mem_enc_unreg_region = svm_unregister_enc_region,
7334 7327
7335 .nested_enable_evmcs = nested_enable_evmcs, 7328 .nested_enable_evmcs = nested_enable_evmcs,
7336 .nested_get_evmcs_version = nested_get_evmcs_version, 7329 .nested_get_evmcs_version = NULL,
7337 7330
7338 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, 7331 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7339}; 7332};
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 42ed3faa6af8..c030c96fc81a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7797,6 +7797,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
7797 .set_nested_state = NULL, 7797 .set_nested_state = NULL,
7798 .get_vmcs12_pages = NULL, 7798 .get_vmcs12_pages = NULL,
7799 .nested_enable_evmcs = NULL, 7799 .nested_enable_evmcs = NULL,
7800 .nested_get_evmcs_version = NULL,
7800 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, 7801 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
7801}; 7802};
7802 7803
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 93b0bd45ac73..290c3c3efb87 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6594,12 +6594,13 @@ restart:
6594 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 6594 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
6595 toggle_interruptibility(vcpu, ctxt->interruptibility); 6595 toggle_interruptibility(vcpu, ctxt->interruptibility);
6596 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6596 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6597 kvm_rip_write(vcpu, ctxt->eip);
6598 if (r == EMULATE_DONE && ctxt->tf)
6599 kvm_vcpu_do_singlestep(vcpu, &r);
6600 if (!ctxt->have_exception || 6597 if (!ctxt->have_exception ||
6601 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 6598 exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
6599 kvm_rip_write(vcpu, ctxt->eip);
6600 if (r == EMULATE_DONE && ctxt->tf)
6601 kvm_vcpu_do_singlestep(vcpu, &r);
6602 __kvm_set_rflags(vcpu, ctxt->eflags); 6602 __kvm_set_rflags(vcpu, ctxt->eflags);
6603 }
6603 6604
6604 /* 6605 /*
6605 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 6606 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 6a9a77a403c9..e14e95ea7338 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -516,7 +516,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
516 */ 516 */
517static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, 517static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
518 unsigned long pfn, unsigned long npg, 518 unsigned long pfn, unsigned long npg,
519 int warnlvl) 519 unsigned long lpsize, int warnlvl)
520{ 520{
521 pgprotval_t forbidden, res; 521 pgprotval_t forbidden, res;
522 unsigned long end; 522 unsigned long end;
@@ -535,9 +535,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
535 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); 535 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
536 forbidden = res; 536 forbidden = res;
537 537
538 res = protect_kernel_text_ro(start, end); 538 /*
539 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); 539 * Special case to preserve a large page. If the change spawns the
540 forbidden |= res; 540 * full large page mapping then there is no point to split it
541 * up. Happens with ftrace and is going to be removed once ftrace
542 * switched to text_poke().
543 */
544 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
545 res = protect_kernel_text_ro(start, end);
546 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
547 forbidden |= res;
548 }
541 549
542 /* Check the PFN directly */ 550 /* Check the PFN directly */
543 res = protect_pci_bios(pfn, pfn + npg - 1); 551 res = protect_pci_bios(pfn, pfn + npg - 1);
@@ -819,7 +827,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
819 * extra conditional required here. 827 * extra conditional required here.
820 */ 828 */
821 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, 829 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
822 CPA_CONFLICT); 830 psize, CPA_CONFLICT);
823 831
824 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { 832 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
825 /* 833 /*
@@ -855,7 +863,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
855 * protection requirement in the large page. 863 * protection requirement in the large page.
856 */ 864 */
857 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, 865 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
858 CPA_DETECT); 866 psize, CPA_DETECT);
859 867
860 /* 868 /*
861 * If there is a conflict, split the large page. 869 * If there is a conflict, split the large page.
@@ -906,7 +914,8 @@ static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
906 if (!cpa->force_static_prot) 914 if (!cpa->force_static_prot)
907 goto set; 915 goto set;
908 916
909 prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT); 917 /* Hand in lpsize = 0 to enforce the protection mechanism */
918 prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
910 919
911 if (pgprot_val(prot) == pgprot_val(ref_prot)) 920 if (pgprot_val(prot) == pgprot_val(ref_prot))
912 goto set; 921 goto set;
@@ -1503,7 +1512,8 @@ repeat:
1503 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); 1512 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1504 1513
1505 cpa_inc_4k_install(); 1514 cpa_inc_4k_install();
1506 new_prot = static_protections(new_prot, address, pfn, 1, 1515 /* Hand in lpsize = 0 to enforce the protection mechanism */
1516 new_prot = static_protections(new_prot, address, pfn, 1, 0,
1507 CPA_PROTECT); 1517 CPA_PROTECT);
1508 1518
1509 new_prot = pgprot_clear_protnone_bits(new_prot); 1519 new_prot = pgprot_clear_protnone_bits(new_prot);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index eaaed5bfc4a4..991549a1c5f3 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -390,8 +390,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
390 390
391 emit_prologue(&prog, bpf_prog->aux->stack_depth, 391 emit_prologue(&prog, bpf_prog->aux->stack_depth,
392 bpf_prog_was_classic(bpf_prog)); 392 bpf_prog_was_classic(bpf_prog));
393 addrs[0] = prog - temp;
393 394
394 for (i = 0; i < insn_cnt; i++, insn++) { 395 for (i = 1; i <= insn_cnt; i++, insn++) {
395 const s32 imm32 = insn->imm; 396 const s32 imm32 = insn->imm;
396 u32 dst_reg = insn->dst_reg; 397 u32 dst_reg = insn->dst_reg;
397 u32 src_reg = insn->src_reg; 398 u32 src_reg = insn->src_reg;
@@ -1105,7 +1106,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1105 extra_pass = true; 1106 extra_pass = true;
1106 goto skip_init_addrs; 1107 goto skip_init_addrs;
1107 } 1108 }
1108 addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL); 1109 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1109 if (!addrs) { 1110 if (!addrs) {
1110 prog = orig_prog; 1111 prog = orig_prog;
1111 goto out_addrs; 1112 goto out_addrs;
@@ -1115,7 +1116,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1115 * Before first pass, make a rough estimation of addrs[] 1116 * Before first pass, make a rough estimation of addrs[]
1116 * each BPF instruction is translated to less than 64 bytes 1117 * each BPF instruction is translated to less than 64 bytes
1117 */ 1118 */
1118 for (proglen = 0, i = 0; i < prog->len; i++) { 1119 for (proglen = 0, i = 0; i <= prog->len; i++) {
1119 proglen += 64; 1120 proglen += 64;
1120 addrs[i] = proglen; 1121 addrs[i] = proglen;
1121 } 1122 }
@@ -1180,7 +1181,7 @@ out_image:
1180 1181
1181 if (!image || !prog->is_func || extra_pass) { 1182 if (!image || !prog->is_func || extra_pass) {
1182 if (image) 1183 if (image)
1183 bpf_prog_fill_jited_linfo(prog, addrs); 1184 bpf_prog_fill_jited_linfo(prog, addrs + 1);
1184out_addrs: 1185out_addrs:
1185 kfree(addrs); 1186 kfree(addrs);
1186 kfree(jit_data); 1187 kfree(jit_data);
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 24b079e94bc2..c9ef6a7a4a1a 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -12,6 +12,7 @@
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/perf_event.h> 13#include <linux/perf_event.h>
14#include <linux/tboot.h> 14#include <linux/tboot.h>
15#include <linux/dmi.h>
15 16
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
17#include <asm/proto.h> 18#include <asm/proto.h>
@@ -23,7 +24,7 @@
23#include <asm/debugreg.h> 24#include <asm/debugreg.h>
24#include <asm/cpu.h> 25#include <asm/cpu.h>
25#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
26#include <linux/dmi.h> 27#include <asm/cpu_device_id.h>
27 28
28#ifdef CONFIG_X86_32 29#ifdef CONFIG_X86_32
29__visible unsigned long saved_context_ebx; 30__visible unsigned long saved_context_ebx;
@@ -397,15 +398,14 @@ static int __init bsp_pm_check_init(void)
397 398
398core_initcall(bsp_pm_check_init); 399core_initcall(bsp_pm_check_init);
399 400
400static int msr_init_context(const u32 *msr_id, const int total_num) 401static int msr_build_context(const u32 *msr_id, const int num)
401{ 402{
402 int i = 0; 403 struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
403 struct saved_msr *msr_array; 404 struct saved_msr *msr_array;
405 int total_num;
406 int i, j;
404 407
405 if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) { 408 total_num = saved_msrs->num + num;
406 pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
407 return -EINVAL;
408 }
409 409
410 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); 410 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
411 if (!msr_array) { 411 if (!msr_array) {
@@ -413,19 +413,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
413 return -ENOMEM; 413 return -ENOMEM;
414 } 414 }
415 415
416 for (i = 0; i < total_num; i++) { 416 if (saved_msrs->array) {
417 msr_array[i].info.msr_no = msr_id[i]; 417 /*
418 * Multiple callbacks can invoke this function, so copy any
419 * MSR save requests from previous invocations.
420 */
421 memcpy(msr_array, saved_msrs->array,
422 sizeof(struct saved_msr) * saved_msrs->num);
423
424 kfree(saved_msrs->array);
425 }
426
427 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
428 msr_array[i].info.msr_no = msr_id[j];
418 msr_array[i].valid = false; 429 msr_array[i].valid = false;
419 msr_array[i].info.reg.q = 0; 430 msr_array[i].info.reg.q = 0;
420 } 431 }
421 saved_context.saved_msrs.num = total_num; 432 saved_msrs->num = total_num;
422 saved_context.saved_msrs.array = msr_array; 433 saved_msrs->array = msr_array;
423 434
424 return 0; 435 return 0;
425} 436}
426 437
427/* 438/*
428 * The following section is a quirk framework for problematic BIOSen: 439 * The following sections are a quirk framework for problematic BIOSen:
429 * Sometimes MSRs are modified by the BIOSen after suspended to 440 * Sometimes MSRs are modified by the BIOSen after suspended to
430 * RAM, this might cause unexpected behavior after wakeup. 441 * RAM, this might cause unexpected behavior after wakeup.
431 * Thus we save/restore these specified MSRs across suspend/resume 442 * Thus we save/restore these specified MSRs across suspend/resume
@@ -440,7 +451,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
440 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; 451 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
441 452
442 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); 453 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
443 return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); 454 return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
444} 455}
445 456
446static const struct dmi_system_id msr_save_dmi_table[] = { 457static const struct dmi_system_id msr_save_dmi_table[] = {
@@ -455,9 +466,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
455 {} 466 {}
456}; 467};
457 468
469static int msr_save_cpuid_features(const struct x86_cpu_id *c)
470{
471 u32 cpuid_msr_id[] = {
472 MSR_AMD64_CPUID_FN_1,
473 };
474
475 pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
476 c->family);
477
478 return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
479}
480
481static const struct x86_cpu_id msr_save_cpu_table[] = {
482 {
483 .vendor = X86_VENDOR_AMD,
484 .family = 0x15,
485 .model = X86_MODEL_ANY,
486 .feature = X86_FEATURE_ANY,
487 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
488 },
489 {
490 .vendor = X86_VENDOR_AMD,
491 .family = 0x16,
492 .model = X86_MODEL_ANY,
493 .feature = X86_FEATURE_ANY,
494 .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
495 },
496 {}
497};
498
499typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
500static int pm_cpu_check(const struct x86_cpu_id *c)
501{
502 const struct x86_cpu_id *m;
503 int ret = 0;
504
505 m = x86_match_cpu(msr_save_cpu_table);
506 if (m) {
507 pm_cpu_match_t fn;
508
509 fn = (pm_cpu_match_t)m->driver_data;
510 ret = fn(m);
511 }
512
513 return ret;
514}
515
458static int pm_check_save_msr(void) 516static int pm_check_save_msr(void)
459{ 517{
460 dmi_check_system(msr_save_dmi_table); 518 dmi_check_system(msr_save_dmi_table);
519 pm_cpu_check(msr_save_cpu_table);
520
461 return 0; 521 return 0;
462} 522}
463 523
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 2e2efa577437..8c37294f1d1e 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
200 make the card work). 200 make the card work).
201 201
202config ATM_NICSTAR_USE_IDT77105 202config ATM_NICSTAR_USE_IDT77105
203 bool "Use IDT77015 PHY driver (25Mbps)" 203 bool "Use IDT77105 PHY driver (25Mbps)"
204 depends on ATM_NICSTAR 204 depends on ATM_NICSTAR
205 help 205 help
206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In 206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 9c0bb771751d..a2fcde582e2a 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -74,7 +74,7 @@ struct ht16k33_priv {
74 struct ht16k33_fbdev fbdev; 74 struct ht16k33_fbdev fbdev;
75}; 75};
76 76
77static struct fb_fix_screeninfo ht16k33_fb_fix = { 77static const struct fb_fix_screeninfo ht16k33_fb_fix = {
78 .id = DRIVER_NAME, 78 .id = DRIVER_NAME,
79 .type = FB_TYPE_PACKED_PIXELS, 79 .type = FB_TYPE_PACKED_PIXELS,
80 .visual = FB_VISUAL_MONO10, 80 .visual = FB_VISUAL_MONO10,
@@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = {
85 .accel = FB_ACCEL_NONE, 85 .accel = FB_ACCEL_NONE,
86}; 86};
87 87
88static struct fb_var_screeninfo ht16k33_fb_var = { 88static const struct fb_var_screeninfo ht16k33_fb_var = {
89 .xres = HT16K33_MATRIX_LED_MAX_ROWS, 89 .xres = HT16K33_MATRIX_LED_MAX_ROWS,
90 .yres = HT16K33_MATRIX_LED_MAX_COLS, 90 .yres = HT16K33_MATRIX_LED_MAX_COLS,
91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, 91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9bd4ddd12b25..5b248763a672 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg)
322 thi->name[0], 322 thi->name[0],
323 resource->name); 323 resource->name);
324 324
325 allow_kernel_signal(DRBD_SIGKILL);
326 allow_kernel_signal(SIGXCPU);
325restart: 327restart:
326 retval = thi->function(thi); 328 retval = thi->function(thi);
327 329
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3327192bb71f..c8fb886aebd4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3038,6 +3038,17 @@ again:
3038 } 3038 }
3039 return true; 3039 return true;
3040 case RBD_OBJ_READ_PARENT: 3040 case RBD_OBJ_READ_PARENT:
3041 /*
3042 * The parent image is read only up to the overlap -- zero-fill
3043 * from the overlap to the end of the request.
3044 */
3045 if (!*result) {
3046 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
3047
3048 if (obj_overlap < obj_req->ex.oe_len)
3049 rbd_obj_zero_range(obj_req, obj_overlap,
3050 obj_req->ex.oe_len - obj_overlap);
3051 }
3041 return true; 3052 return true;
3042 default: 3053 default:
3043 BUG(); 3054 BUG();
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8b33128dccee..0875470a7806 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev)
99 return 0; 99 return 0;
100} 100}
101 101
102int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
103{
104 struct sk_buff *skb;
105 int err;
106
107 bt_dev_dbg(hdev, "QCA pre shutdown cmd");
108
109 skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
110 NULL, HCI_INIT_TIMEOUT);
111 if (IS_ERR(skb)) {
112 err = PTR_ERR(skb);
113 bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
114 return err;
115 }
116
117 kfree_skb(skb);
118
119 return 0;
120}
121EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
122
102static void qca_tlv_check_data(struct rome_config *config, 123static void qca_tlv_check_data(struct rome_config *config,
103 const struct firmware *fw) 124 const struct firmware *fw)
104{ 125{
@@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config,
119 BT_DBG("Length\t\t : %d bytes", length); 140 BT_DBG("Length\t\t : %d bytes", length);
120 141
121 config->dnld_mode = ROME_SKIP_EVT_NONE; 142 config->dnld_mode = ROME_SKIP_EVT_NONE;
143 config->dnld_type = ROME_SKIP_EVT_NONE;
122 144
123 switch (config->type) { 145 switch (config->type) {
124 case TLV_TYPE_PATCH: 146 case TLV_TYPE_PATCH:
@@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
268 290
269 evt = skb_put(skb, sizeof(*evt)); 291 evt = skb_put(skb, sizeof(*evt));
270 evt->ncmd = 1; 292 evt->ncmd = 1;
271 evt->opcode = QCA_HCI_CC_OPCODE; 293 evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
272 294
273 skb_put_u8(skb, QCA_HCI_CC_SUCCESS); 295 skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
274 296
@@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
323 */ 345 */
324 if (config->dnld_type == ROME_SKIP_EVT_VSE_CC || 346 if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
325 config->dnld_type == ROME_SKIP_EVT_VSE) 347 config->dnld_type == ROME_SKIP_EVT_VSE)
326 return qca_inject_cmd_complete_event(hdev); 348 ret = qca_inject_cmd_complete_event(hdev);
327 349
328out: 350out:
329 release_firmware(fw); 351 release_firmware(fw);
@@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
388 return err; 410 return err;
389 } 411 }
390 412
413 /* Give the controller some time to get ready to receive the NVM */
414 msleep(10);
415
391 /* Download NVM configuration */ 416 /* Download NVM configuration */
392 config.type = TLV_TYPE_NVM; 417 config.type = TLV_TYPE_NVM;
393 if (firmware_name) 418 if (firmware_name)
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 6a291a7a5d96..69c5315a65fd 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -13,6 +13,7 @@
13#define EDL_PATCH_TLV_REQ_CMD (0x1E) 13#define EDL_PATCH_TLV_REQ_CMD (0x1E)
14#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) 14#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
15#define MAX_SIZE_PER_TLV_SEGMENT (243) 15#define MAX_SIZE_PER_TLV_SEGMENT (243)
16#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
16 17
17#define EDL_CMD_REQ_RES_EVT (0x00) 18#define EDL_CMD_REQ_RES_EVT (0x00)
18#define EDL_PATCH_VER_RES_EVT (0x19) 19#define EDL_PATCH_VER_RES_EVT (0x19)
@@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
135 const char *firmware_name); 136 const char *firmware_name);
136int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); 137int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
137int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); 138int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
139int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
138static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) 140static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
139{ 141{
140 return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998; 142 return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
@@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
167{ 169{
168 return false; 170 return false;
169} 171}
172
173static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
174{
175 return -EOPNOTSUPP;
176}
170#endif 177#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3876fee6ad13..5cf0734eb31b 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2762,8 +2762,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
2762 fw_size = fw->size; 2762 fw_size = fw->size;
2763 2763
2764 /* The size of patch header is 30 bytes, should be skip */ 2764 /* The size of patch header is 30 bytes, should be skip */
2765 if (fw_size < 30) 2765 if (fw_size < 30) {
2766 err = -EINVAL;
2766 goto err_release_fw; 2767 goto err_release_fw;
2768 }
2767 2769
2768 fw_size -= 30; 2770 fw_size -= 30;
2769 fw_ptr += 30; 2771 fw_ptr += 30;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 82a0a3691a63..9a970fd1975a 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -705,7 +705,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
705 unsigned long flags; 705 unsigned long flags;
706 struct qca_data *qca = hu->priv; 706 struct qca_data *qca = hu->priv;
707 707
708 BT_DBG("hu %p want to sleep", hu); 708 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
709 709
710 spin_lock_irqsave(&qca->hci_ibs_lock, flags); 710 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
711 711
@@ -720,7 +720,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
720 break; 720 break;
721 721
722 case HCI_IBS_RX_ASLEEP: 722 case HCI_IBS_RX_ASLEEP:
723 /* Fall through */ 723 break;
724 724
725 default: 725 default:
726 /* Any other state is illegal */ 726 /* Any other state is illegal */
@@ -912,7 +912,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
912 if (hdr->evt == HCI_EV_VENDOR) 912 if (hdr->evt == HCI_EV_VENDOR)
913 complete(&qca->drop_ev_comp); 913 complete(&qca->drop_ev_comp);
914 914
915 kfree(skb); 915 kfree_skb(skb);
916 916
917 return 0; 917 return 0;
918 } 918 }
@@ -1386,6 +1386,9 @@ static int qca_power_off(struct hci_dev *hdev)
1386{ 1386{
1387 struct hci_uart *hu = hci_get_drvdata(hdev); 1387 struct hci_uart *hu = hci_get_drvdata(hdev);
1388 1388
1389 /* Perform pre shutdown command */
1390 qca_send_pre_shutdown_cmd(hdev);
1391
1389 qca_power_shutdown(hu); 1392 qca_power_shutdown(hu);
1390 return 0; 1393 return 0;
1391} 1394}
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 19d7b6ff2f17..20c957185af2 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
456 size_t pdata_size; 456 size_t pdata_size;
457}; 457};
458 458
459static void hisi_lpc_acpi_remove(struct device *hostdev)
460{
461 struct acpi_device *adev = ACPI_COMPANION(hostdev);
462 struct acpi_device *child;
463
464 device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
465
466 list_for_each_entry(child, &adev->children, node)
467 acpi_device_clear_enumerated(child);
468}
469
459/* 470/*
460 * hisi_lpc_acpi_probe - probe children for ACPI FW 471 * hisi_lpc_acpi_probe - probe children for ACPI FW
461 * @hostdev: LPC host device pointer 472 * @hostdev: LPC host device pointer
@@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
555 return 0; 566 return 0;
556 567
557fail: 568fail:
558 device_for_each_child(hostdev, NULL, 569 hisi_lpc_acpi_remove(hostdev);
559 hisi_lpc_acpi_remove_subdev);
560 return ret; 570 return ret;
561} 571}
562 572
@@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
569{ 579{
570 return -ENODEV; 580 return -ENODEV;
571} 581}
582
583static void hisi_lpc_acpi_remove(struct device *hostdev)
584{
585}
572#endif // CONFIG_ACPI 586#endif // CONFIG_ACPI
573 587
574/* 588/*
@@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
606 range->fwnode = dev->fwnode; 620 range->fwnode = dev->fwnode;
607 range->flags = LOGIC_PIO_INDIRECT; 621 range->flags = LOGIC_PIO_INDIRECT;
608 range->size = PIO_INDIRECT_SIZE; 622 range->size = PIO_INDIRECT_SIZE;
623 range->hostdata = lpcdev;
624 range->ops = &hisi_lpc_ops;
625 lpcdev->io_host = range;
609 626
610 ret = logic_pio_register_range(range); 627 ret = logic_pio_register_range(range);
611 if (ret) { 628 if (ret) {
612 dev_err(dev, "register IO range failed (%d)!\n", ret); 629 dev_err(dev, "register IO range failed (%d)!\n", ret);
613 return ret; 630 return ret;
614 } 631 }
615 lpcdev->io_host = range;
616 632
617 /* register the LPC host PIO resources */ 633 /* register the LPC host PIO resources */
618 if (acpi_device) 634 if (acpi_device)
619 ret = hisi_lpc_acpi_probe(dev); 635 ret = hisi_lpc_acpi_probe(dev);
620 else 636 else
621 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 637 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
622 if (ret) 638 if (ret) {
639 logic_pio_unregister_range(range);
623 return ret; 640 return ret;
641 }
624 642
625 lpcdev->io_host->hostdata = lpcdev; 643 dev_set_drvdata(dev, lpcdev);
626 lpcdev->io_host->ops = &hisi_lpc_ops;
627 644
628 io_end = lpcdev->io_host->io_start + lpcdev->io_host->size; 645 io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
629 dev_info(dev, "registered range [%pa - %pa]\n", 646 dev_info(dev, "registered range [%pa - %pa]\n",
@@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
632 return ret; 649 return ret;
633} 650}
634 651
652static int hisi_lpc_remove(struct platform_device *pdev)
653{
654 struct device *dev = &pdev->dev;
655 struct acpi_device *acpi_device = ACPI_COMPANION(dev);
656 struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
657 struct logic_pio_hwaddr *range = lpcdev->io_host;
658
659 if (acpi_device)
660 hisi_lpc_acpi_remove(dev);
661 else
662 of_platform_depopulate(dev);
663
664 logic_pio_unregister_range(range);
665
666 return 0;
667}
668
635static const struct of_device_id hisi_lpc_of_match[] = { 669static const struct of_device_id hisi_lpc_of_match[] = {
636 { .compatible = "hisilicon,hip06-lpc", }, 670 { .compatible = "hisilicon,hip06-lpc", },
637 { .compatible = "hisilicon,hip07-lpc", }, 671 { .compatible = "hisilicon,hip07-lpc", },
@@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = {
645 .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match), 679 .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
646 }, 680 },
647 .probe = hisi_lpc_probe, 681 .probe = hisi_lpc_probe,
682 .remove = hisi_lpc_remove,
648}; 683};
649builtin_platform_driver(hisi_lpc_driver); 684builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e6deabd8305d..2db474ab4c6b 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -949,7 +949,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
949 *best_mode = SYSC_IDLE_SMART_WKUP; 949 *best_mode = SYSC_IDLE_SMART_WKUP;
950 else if (idlemodes & BIT(SYSC_IDLE_SMART)) 950 else if (idlemodes & BIT(SYSC_IDLE_SMART))
951 *best_mode = SYSC_IDLE_SMART; 951 *best_mode = SYSC_IDLE_SMART;
952 else if (idlemodes & SYSC_IDLE_FORCE) 952 else if (idlemodes & BIT(SYSC_IDLE_FORCE))
953 *best_mode = SYSC_IDLE_FORCE; 953 *best_mode = SYSC_IDLE_FORCE;
954 else 954 else
955 return -EINVAL; 955 return -EINVAL;
@@ -1267,7 +1267,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1267 SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), 1267 SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
1268 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 1268 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1269 0xffff00f0, 0), 1269 0xffff00f0, 0),
1270 SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0), 1270 SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
1271 SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
1271 SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), 1272 SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
1272 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), 1273 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
1273 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), 1274 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
@@ -1692,10 +1693,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
1692 if (error) 1693 if (error)
1693 return 0; 1694 return 0;
1694 1695
1695 if (val) 1696 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
1696 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
1697 else
1698 ddata->cfg.sysc_val = ddata->cap->sysc_mask;
1699 1697
1700 return 0; 1698 return 0;
1701} 1699}
@@ -2385,27 +2383,27 @@ static int sysc_probe(struct platform_device *pdev)
2385 2383
2386 error = sysc_init_dts_quirks(ddata); 2384 error = sysc_init_dts_quirks(ddata);
2387 if (error) 2385 if (error)
2388 goto unprepare; 2386 return error;
2389 2387
2390 error = sysc_map_and_check_registers(ddata); 2388 error = sysc_map_and_check_registers(ddata);
2391 if (error) 2389 if (error)
2392 goto unprepare; 2390 return error;
2393 2391
2394 error = sysc_init_sysc_mask(ddata); 2392 error = sysc_init_sysc_mask(ddata);
2395 if (error) 2393 if (error)
2396 goto unprepare; 2394 return error;
2397 2395
2398 error = sysc_init_idlemodes(ddata); 2396 error = sysc_init_idlemodes(ddata);
2399 if (error) 2397 if (error)
2400 goto unprepare; 2398 return error;
2401 2399
2402 error = sysc_init_syss_mask(ddata); 2400 error = sysc_init_syss_mask(ddata);
2403 if (error) 2401 if (error)
2404 goto unprepare; 2402 return error;
2405 2403
2406 error = sysc_init_pdata(ddata); 2404 error = sysc_init_pdata(ddata);
2407 if (error) 2405 if (error)
2408 goto unprepare; 2406 return error;
2409 2407
2410 sysc_init_early_quirks(ddata); 2408 sysc_init_early_quirks(ddata);
2411 2409
@@ -2415,7 +2413,7 @@ static int sysc_probe(struct platform_device *pdev)
2415 2413
2416 error = sysc_init_resets(ddata); 2414 error = sysc_init_resets(ddata);
2417 if (error) 2415 if (error)
2418 return error; 2416 goto unprepare;
2419 2417
2420 error = sysc_init_module(ddata); 2418 error = sysc_init_module(ddata);
2421 if (error) 2419 if (error)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c0990703ce54..1c46babeb093 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name)
324 return NULL; 324 return NULL;
325} 325}
326 326
327#ifdef CONFIG_OF
328static int of_parse_clkspec(const struct device_node *np, int index,
329 const char *name, struct of_phandle_args *out_args);
330static struct clk_hw *
331of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
332#else
333static inline int of_parse_clkspec(const struct device_node *np, int index,
334 const char *name,
335 struct of_phandle_args *out_args)
336{
337 return -ENOENT;
338}
339static inline struct clk_hw *
340of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
341{
342 return ERR_PTR(-ENOENT);
343}
344#endif
345
327/** 346/**
328 * clk_core_get - Find the clk_core parent of a clk 347 * clk_core_get - Find the clk_core parent of a clk
329 * @core: clk to find parent of 348 * @core: clk to find parent of
@@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name)
355 * }; 374 * };
356 * 375 *
357 * Returns: -ENOENT when the provider can't be found or the clk doesn't 376 * Returns: -ENOENT when the provider can't be found or the clk doesn't
358 * exist in the provider. -EINVAL when the name can't be found. NULL when the 377 * exist in the provider or the name can't be found in the DT node or
359 * provider knows about the clk but it isn't provided on this system. 378 * in a clkdev lookup. NULL when the provider knows about the clk but it
379 * isn't provided on this system.
360 * A valid clk_core pointer when the clk can be found in the provider. 380 * A valid clk_core pointer when the clk can be found in the provider.
361 */ 381 */
362static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 382static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
@@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
367 struct device *dev = core->dev; 387 struct device *dev = core->dev;
368 const char *dev_id = dev ? dev_name(dev) : NULL; 388 const char *dev_id = dev ? dev_name(dev) : NULL;
369 struct device_node *np = core->of_node; 389 struct device_node *np = core->of_node;
390 struct of_phandle_args clkspec;
370 391
371 if (np && (name || index >= 0)) 392 if (np && (name || index >= 0) &&
372 hw = of_clk_get_hw(np, index, name); 393 !of_parse_clkspec(np, index, name, &clkspec)) {
373 394 hw = of_clk_get_hw_from_clkspec(&clkspec);
374 /* 395 of_node_put(clkspec.np);
375 * If the DT search above couldn't find the provider or the provider 396 } else if (name) {
376 * didn't know about this clk, fallback to looking up via clkdev based 397 /*
377 * clk_lookups 398 * If the DT search above couldn't find the provider fallback to
378 */ 399 * looking up via clkdev based clk_lookups.
379 if (PTR_ERR(hw) == -ENOENT && name) 400 */
380 hw = clk_find_hw(dev_id, name); 401 hw = clk_find_hw(dev_id, name);
402 }
381 403
382 if (IS_ERR(hw)) 404 if (IS_ERR(hw))
383 return ERR_CAST(hw); 405 return ERR_CAST(hw);
@@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
401 parent = ERR_PTR(-EPROBE_DEFER); 423 parent = ERR_PTR(-EPROBE_DEFER);
402 } else { 424 } else {
403 parent = clk_core_get(core, index); 425 parent = clk_core_get(core, index);
404 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) 426 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
405 parent = clk_core_lookup(entry->name); 427 parent = clk_core_lookup(entry->name);
406 } 428 }
407 429
@@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core,
1632 break; 1654 break;
1633 1655
1634 /* Fallback to comparing globally unique names */ 1656 /* Fallback to comparing globally unique names */
1635 if (!strcmp(parent->name, core->parents[i].name)) 1657 if (core->parents[i].name &&
1658 !strcmp(parent->name, core->parents[i].name))
1636 break; 1659 break;
1637 } 1660 }
1638 1661
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 91db7894125d..65c82d922b05 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -14,7 +14,7 @@
14#include "clk-exynos5-subcmu.h" 14#include "clk-exynos5-subcmu.h"
15 15
16static struct samsung_clk_provider *ctx; 16static struct samsung_clk_provider *ctx;
17static const struct exynos5_subcmu_info *cmu; 17static const struct exynos5_subcmu_info **cmu;
18static int nr_cmus; 18static int nr_cmus;
19 19
20static void exynos5_subcmu_clk_save(void __iomem *base, 20static void exynos5_subcmu_clk_save(void __iomem *base,
@@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
56 * when OF-core populates all device-tree nodes. 56 * when OF-core populates all device-tree nodes.
57 */ 57 */
58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, 58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
59 const struct exynos5_subcmu_info *_cmu) 59 const struct exynos5_subcmu_info **_cmu)
60{ 60{
61 ctx = _ctx; 61 ctx = _ctx;
62 cmu = _cmu; 62 cmu = _cmu;
63 nr_cmus = _nr_cmus; 63 nr_cmus = _nr_cmus;
64 64
65 for (; _nr_cmus--; _cmu++) { 65 for (; _nr_cmus--; _cmu++) {
66 exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, 66 exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
67 _cmu->nr_gate_clks); 67 (*_cmu)->nr_gate_clks);
68 exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, 68 exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
69 _cmu->nr_suspend_regs); 69 (*_cmu)->nr_suspend_regs);
70 } 70 }
71} 71}
72 72
@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev)
163 if (of_property_read_string(np, "label", &name) < 0) 163 if (of_property_read_string(np, "label", &name) < 0)
164 continue; 164 continue;
165 for (i = 0; i < nr_cmus; i++) 165 for (i = 0; i < nr_cmus; i++)
166 if (strcmp(cmu[i].pd_name, name) == 0) 166 if (strcmp(cmu[i]->pd_name, name) == 0)
167 exynos5_clk_register_subcmu(&pdev->dev, 167 exynos5_clk_register_subcmu(&pdev->dev,
168 &cmu[i], np); 168 cmu[i], np);
169 } 169 }
170 return 0; 170 return 0;
171} 171}
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
index 755ee8aaa3de..9ae5356f25aa 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.h
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
@@ -21,6 +21,6 @@ struct exynos5_subcmu_info {
21}; 21};
22 22
23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, 23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
24 const struct exynos5_subcmu_info *cmu); 24 const struct exynos5_subcmu_info **cmu);
25 25
26#endif 26#endif
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f2b896881768..931c70a4da19 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
681 .pd_name = "DISP1", 681 .pd_name = "DISP1",
682}; 682};
683 683
684static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
685 &exynos5250_disp_subcmu,
686};
687
684static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { 688static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
685 /* sorted in descending order */ 689 /* sorted in descending order */
686 /* PLL_36XX_RATE(rate, m, p, s, k) */ 690 /* PLL_36XX_RATE(rate, m, p, s, k) */
@@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
843 847
844 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, 848 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
845 ARRAY_SIZE(exynos5250_clk_regs)); 849 ARRAY_SIZE(exynos5250_clk_regs));
846 exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); 850 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
851 exynos5250_subcmus);
847 852
848 samsung_clk_of_add_provider(np, ctx); 853 samsung_clk_of_add_provider(np, ctx);
849 854
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 01bca5a498b2..7670cc596c74 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
534 GATE_BUS_TOP, 24, 0, 0), 534 GATE_BUS_TOP, 24, 0, 0),
535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", 535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), 536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
537 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
538 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
539}; 537};
540 538
541static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { 539static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
577 575
578static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { 576static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
579 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), 577 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
578 /* Maudio Block */
580 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", 579 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
581 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), 580 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
581 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
582 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
583 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
584 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
582}; 585};
583 586
584static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { 587static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
@@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
890 /* GSCL Block */ 893 /* GSCL Block */
891 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), 894 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
892 895
893 /* MSCL Block */
894 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
895
896 /* PSGEN */ 896 /* PSGEN */
897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), 897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), 898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
@@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", 1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), 1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
1019 1019
1020 /* Maudio Block */
1021 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1022 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1023 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1024 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1025
1026 /* FSYS Block */ 1020 /* FSYS Block */
1027 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), 1021 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
1028 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), 1022 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
@@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1162 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", 1156 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
1163 GATE_IP_GSCL1, 17, 0, 0), 1157 GATE_IP_GSCL1, 17, 0, 0),
1164 1158
1165 /* MSCL Block */
1166 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1167 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1168 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1169 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1170 GATE_IP_MSCL, 8, 0, 0),
1171 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1172 GATE_IP_MSCL, 9, 0, 0),
1173 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1174 GATE_IP_MSCL, 10, 0, 0),
1175
1176 /* ISP */ 1159 /* ISP */
1177 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", 1160 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
1178 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), 1161 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
@@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
1281 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ 1264 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
1282}; 1265};
1283 1266
1284static const struct exynos5_subcmu_info exynos5x_subcmus[] = { 1267static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
1285 { 1268 /* MSCL Block */
1286 .div_clks = exynos5x_disp_div_clks, 1269 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1287 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), 1270 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1288 .gate_clks = exynos5x_disp_gate_clks, 1271 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1289 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), 1272 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1290 .suspend_regs = exynos5x_disp_suspend_regs, 1273 GATE_IP_MSCL, 8, 0, 0),
1291 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), 1274 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1292 .pd_name = "DISP", 1275 GATE_IP_MSCL, 9, 0, 0),
1293 }, { 1276 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1294 .div_clks = exynos5x_gsc_div_clks, 1277 GATE_IP_MSCL, 10, 0, 0),
1295 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), 1278};
1296 .gate_clks = exynos5x_gsc_gate_clks, 1279
1297 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), 1280static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
1298 .suspend_regs = exynos5x_gsc_suspend_regs, 1281 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
1299 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), 1282};
1300 .pd_name = "GSC", 1283
1301 }, { 1284static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
1302 .div_clks = exynos5x_mfc_div_clks, 1285 { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
1303 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), 1286 { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
1304 .gate_clks = exynos5x_mfc_gate_clks, 1287 { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
1305 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), 1288};
1306 .suspend_regs = exynos5x_mfc_suspend_regs, 1289
1307 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), 1290static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
1308 .pd_name = "MFC", 1291 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
1309 }, 1292 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
1293 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1294 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1295 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1296 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1297};
1298
1299static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
1300 { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
1301};
1302
1303static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
1304 .div_clks = exynos5x_disp_div_clks,
1305 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
1306 .gate_clks = exynos5x_disp_gate_clks,
1307 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
1308 .suspend_regs = exynos5x_disp_suspend_regs,
1309 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
1310 .pd_name = "DISP",
1311};
1312
1313static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
1314 .div_clks = exynos5x_gsc_div_clks,
1315 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
1316 .gate_clks = exynos5x_gsc_gate_clks,
1317 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
1318 .suspend_regs = exynos5x_gsc_suspend_regs,
1319 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
1320 .pd_name = "GSC",
1321};
1322
1323static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
1324 .div_clks = exynos5x_mfc_div_clks,
1325 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
1326 .gate_clks = exynos5x_mfc_gate_clks,
1327 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
1328 .suspend_regs = exynos5x_mfc_suspend_regs,
1329 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
1330 .pd_name = "MFC",
1331};
1332
1333static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
1334 .div_clks = exynos5x_mscl_div_clks,
1335 .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
1336 .gate_clks = exynos5x_mscl_gate_clks,
1337 .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
1338 .suspend_regs = exynos5x_mscl_suspend_regs,
1339 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
1340 .pd_name = "MSC",
1341};
1342
1343static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
1344 .gate_clks = exynos5800_mau_gate_clks,
1345 .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
1346 .suspend_regs = exynos5800_mau_suspend_regs,
1347 .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
1348 .pd_name = "MAU",
1349};
1350
1351static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
1352 &exynos5x_disp_subcmu,
1353 &exynos5x_gsc_subcmu,
1354 &exynos5x_mfc_subcmu,
1355 &exynos5x_mscl_subcmu,
1356};
1357
1358static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
1359 &exynos5x_disp_subcmu,
1360 &exynos5x_gsc_subcmu,
1361 &exynos5x_mfc_subcmu,
1362 &exynos5x_mscl_subcmu,
1363 &exynos5800_mau_subcmu,
1310}; 1364};
1311 1365
1312static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { 1366static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
@@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np,
1539 samsung_clk_extended_sleep_init(reg_base, 1593 samsung_clk_extended_sleep_init(reg_base,
1540 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), 1594 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
1541 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); 1595 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
1542 if (soc == EXYNOS5800) 1596
1597 if (soc == EXYNOS5800) {
1543 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, 1598 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
1544 ARRAY_SIZE(exynos5800_clk_regs)); 1599 ARRAY_SIZE(exynos5800_clk_regs));
1545 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), 1600
1546 exynos5x_subcmus); 1601 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
1602 exynos5800_subcmus);
1603 } else {
1604 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
1605 exynos5x_subcmus);
1606 }
1547 1607
1548 samsung_clk_of_add_provider(np, ctx); 1608 samsung_clk_of_add_provider(np, ctx);
1549} 1609}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 5c50e723ecae..1a191eeeebba 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
38 if (socfpgaclk->fixed_div) { 38 if (socfpgaclk->fixed_div) {
39 div = socfpgaclk->fixed_div; 39 div = socfpgaclk->fixed_div;
40 } else { 40 } else {
41 if (!socfpgaclk->bypass_reg) 41 if (socfpgaclk->hw.reg)
42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); 42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
43 } 43 }
44 44
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index f79eede71c62..edefa669153f 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -540,6 +540,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
540 unsigned long flags; 540 unsigned long flags;
541 unsigned int i; 541 unsigned int i;
542 542
543 /* If there's no device there's nothing to do */
544 if (!ccp)
545 return 0;
546
543 spin_lock_irqsave(&ccp->cmd_lock, flags); 547 spin_lock_irqsave(&ccp->cmd_lock, flags);
544 548
545 ccp->suspending = 1; 549 ccp->suspending = 1;
@@ -564,6 +568,10 @@ int ccp_dev_resume(struct sp_device *sp)
564 unsigned long flags; 568 unsigned long flags;
565 unsigned int i; 569 unsigned int i;
566 570
571 /* If there's no device there's nothing to do */
572 if (!ccp)
573 return 0;
574
567 spin_lock_irqsave(&ccp->cmd_lock, flags); 575 spin_lock_irqsave(&ccp->cmd_lock, flags);
568 576
569 ccp->suspending = 0; 577 ccp->suspending = 0;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 23e0a356f167..ad72b3f42ffa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1163 switch (chan->feature & FSL_DMA_IP_MASK) { 1163 switch (chan->feature & FSL_DMA_IP_MASK) {
1164 case FSL_DMA_IP_85XX: 1164 case FSL_DMA_IP_85XX:
1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1166 /* Fall through */
1166 case FSL_DMA_IP_83XX: 1167 case FSL_DMA_IP_83XX:
1167 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1168 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1168 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1169 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 9c41a4e42575..1072c450c37a 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -192,6 +192,7 @@ struct rcar_dmac_chan {
192 * @iomem: remapped I/O memory base 192 * @iomem: remapped I/O memory base
193 * @n_channels: number of available channels 193 * @n_channels: number of available channels
194 * @channels: array of DMAC channels 194 * @channels: array of DMAC channels
195 * @channels_mask: bitfield of which DMA channels are managed by this driver
195 * @modules: bitmask of client modules in use 196 * @modules: bitmask of client modules in use
196 */ 197 */
197struct rcar_dmac { 198struct rcar_dmac {
@@ -202,6 +203,7 @@ struct rcar_dmac {
202 203
203 unsigned int n_channels; 204 unsigned int n_channels;
204 struct rcar_dmac_chan *channels; 205 struct rcar_dmac_chan *channels;
206 unsigned int channels_mask;
205 207
206 DECLARE_BITMAP(modules, 256); 208 DECLARE_BITMAP(modules, 256);
207}; 209};
@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
438 u16 dmaor; 440 u16 dmaor;
439 441
440 /* Clear all channels and enable the DMAC globally. */ 442 /* Clear all channels and enable the DMAC globally. */
441 rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); 443 rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
442 rcar_dmac_write(dmac, RCAR_DMAOR, 444 rcar_dmac_write(dmac, RCAR_DMAOR,
443 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); 445 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
444 446
@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
814 for (i = 0; i < dmac->n_channels; ++i) { 816 for (i = 0; i < dmac->n_channels; ++i) {
815 struct rcar_dmac_chan *chan = &dmac->channels[i]; 817 struct rcar_dmac_chan *chan = &dmac->channels[i];
816 818
819 if (!(dmac->channels_mask & BIT(i)))
820 continue;
821
817 /* Stop and reinitialize the channel. */ 822 /* Stop and reinitialize the channel. */
818 spin_lock_irq(&chan->lock); 823 spin_lock_irq(&chan->lock);
819 rcar_dmac_chan_halt(chan); 824 rcar_dmac_chan_halt(chan);
@@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1776 return 0; 1781 return 0;
1777} 1782}
1778 1783
1784#define RCAR_DMAC_MAX_CHANNELS 32
1785
1779static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) 1786static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1780{ 1787{
1781 struct device_node *np = dev->of_node; 1788 struct device_node *np = dev->of_node;
@@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1787 return ret; 1794 return ret;
1788 } 1795 }
1789 1796
1790 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { 1797 /* The hardware and driver don't support more than 32 bits in CHCLR */
1798 if (dmac->n_channels <= 0 ||
1799 dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
1791 dev_err(dev, "invalid number of channels %u\n", 1800 dev_err(dev, "invalid number of channels %u\n",
1792 dmac->n_channels); 1801 dmac->n_channels);
1793 return -EINVAL; 1802 return -EINVAL;
1794 } 1803 }
1795 1804
1805 dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
1806
1796 return 0; 1807 return 0;
1797} 1808}
1798 1809
@@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1802 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | 1813 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1803 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | 1814 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1804 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; 1815 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1805 unsigned int channels_offset = 0;
1806 struct dma_device *engine; 1816 struct dma_device *engine;
1807 struct rcar_dmac *dmac; 1817 struct rcar_dmac *dmac;
1808 struct resource *mem; 1818 struct resource *mem;
@@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1831 * level we can't disable it selectively, so ignore channel 0 for now if 1841 * level we can't disable it selectively, so ignore channel 0 for now if
1832 * the device is part of an IOMMU group. 1842 * the device is part of an IOMMU group.
1833 */ 1843 */
1834 if (device_iommu_mapped(&pdev->dev)) { 1844 if (device_iommu_mapped(&pdev->dev))
1835 dmac->n_channels--; 1845 dmac->channels_mask &= ~BIT(0);
1836 channels_offset = 1;
1837 }
1838 1846
1839 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 1847 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1840 sizeof(*dmac->channels), GFP_KERNEL); 1848 sizeof(*dmac->channels), GFP_KERNEL);
@@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1892 INIT_LIST_HEAD(&engine->channels); 1900 INIT_LIST_HEAD(&engine->channels);
1893 1901
1894 for (i = 0; i < dmac->n_channels; ++i) { 1902 for (i = 0; i < dmac->n_channels; ++i) {
1895 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], 1903 if (!(dmac->channels_mask & BIT(i)))
1896 i + channels_offset); 1904 continue;
1905
1906 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
1897 if (ret < 0) 1907 if (ret < 0)
1898 goto error; 1908 goto error;
1899 } 1909 }
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index baac476c8622..525dc7338fe3 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
908 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); 908 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
909 struct dma_slave_config *slave_cfg = &schan->slave_cfg; 909 struct dma_slave_config *slave_cfg = &schan->slave_cfg;
910 dma_addr_t src = 0, dst = 0; 910 dma_addr_t src = 0, dst = 0;
911 dma_addr_t start_src = 0, start_dst = 0;
911 struct sprd_dma_desc *sdesc; 912 struct sprd_dma_desc *sdesc;
912 struct scatterlist *sg; 913 struct scatterlist *sg;
913 u32 len = 0; 914 u32 len = 0;
@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
954 dst = sg_dma_address(sg); 955 dst = sg_dma_address(sg);
955 } 956 }
956 957
958 if (!i) {
959 start_src = src;
960 start_dst = dst;
961 }
962
957 /* 963 /*
958 * The link-list mode needs at least 2 link-list 964 * The link-list mode needs at least 2 link-list
959 * configurations. If there is only one sg, it doesn't 965 * configurations. If there is only one sg, it doesn't
@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
970 } 976 }
971 } 977 }
972 978
973 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, 979 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
974 dir, flags, slave_cfg); 980 start_dst, len, dir, flags, slave_cfg);
975 if (ret) { 981 if (ret) {
976 kfree(sdesc); 982 kfree(sdesc);
977 return NULL; 983 return NULL;
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index ad2f0a4cd6a4..f255056696ee 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
391 391
392 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, 392 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
393 nelm * 2); 393 nelm * 2);
394 if (ret) 394 if (ret) {
395 kfree(rsv_events);
395 return ret; 396 return ret;
397 }
396 398
397 for (i = 0; i < nelm; i++) { 399 for (i = 0; i < nelm; i++) {
398 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], 400 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba27802efcd0..d07c0d5de7a2 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev)
1540 1540
1541 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, 1541 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1542 IRQF_SHARED, "omap-dma-engine", od); 1542 IRQF_SHARED, "omap-dma-engine", od);
1543 if (rc) 1543 if (rc) {
1544 omap_dma_free(od);
1544 return rc; 1545 return rc;
1546 }
1545 } 1547 }
1546 1548
1547 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) 1549 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index a13f224303c6..0221dee8dd4c 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr,
210 return -EIO; 210 return -EIO;
211 } 211 }
212 212
213 if (!IS_ERR(conf->confd)) { 213 if (conf->confd) {
214 if (!gpiod_get_raw_value_cansleep(conf->confd)) { 214 if (!gpiod_get_raw_value_cansleep(conf->confd)) {
215 dev_err(&mgr->dev, "CONF_DONE is inactive!\n"); 215 dev_err(&mgr->dev, "CONF_DONE is inactive!\n");
216 return -EIO; 216 return -EIO;
@@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi)
289 return PTR_ERR(conf->status); 289 return PTR_ERR(conf->status);
290 } 290 }
291 291
292 conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); 292 conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN);
293 if (IS_ERR(conf->confd)) { 293 if (IS_ERR(conf->confd)) {
294 dev_warn(&spi->dev, "Not using confd gpio: %ld\n", 294 dev_err(&spi->dev, "Failed to get confd gpio: %ld\n",
295 PTR_ERR(conf->confd)); 295 PTR_ERR(conf->confd));
296 return PTR_ERR(conf->confd);
297 } else if (!conf->confd) {
298 dev_warn(&spi->dev, "Not using confd gpio");
296 } 299 }
297 300
298 /* Register manager with unique name */ 301 /* Register manager with unique name */
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index 343153d47e5b..004dc03ccf09 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -38,8 +38,7 @@
38#define SCOM_STATUS_PIB_RESP_MASK 0x00007000 38#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
39#define SCOM_STATUS_PIB_RESP_SHIFT 12 39#define SCOM_STATUS_PIB_RESP_SHIFT 12
40 40
41#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_ERR_SUMMARY | \ 41#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
42 SCOM_STATUS_PROTECTION | \
43 SCOM_STATUS_PARITY | \ 42 SCOM_STATUS_PARITY | \
44 SCOM_STATUS_PIB_ABORT | \ 43 SCOM_STATUS_PIB_ABORT | \
45 SCOM_STATUS_PIB_RESP_MASK) 44 SCOM_STATUS_PIB_RESP_MASK)
@@ -251,11 +250,6 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
251 /* Return -EBUSY on PIB abort to force a retry */ 250 /* Return -EBUSY on PIB abort to force a retry */
252 if (status & SCOM_STATUS_PIB_ABORT) 251 if (status & SCOM_STATUS_PIB_ABORT)
253 return -EBUSY; 252 return -EBUSY;
254 if (status & SCOM_STATUS_ERR_SUMMARY) {
255 fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
256 sizeof(uint32_t));
257 return -EIO;
258 }
259 return 0; 253 return 0;
260} 254}
261 255
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 378b206d2dc9..48fea4c68e8d 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -604,10 +604,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
604 u8 new_irqs; 604 u8 new_irqs;
605 int level, i; 605 int level, i;
606 u8 invert_irq_mask[MAX_BANK]; 606 u8 invert_irq_mask[MAX_BANK];
607 int reg_direction[MAX_BANK]; 607 u8 reg_direction[MAX_BANK];
608 608
609 regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, 609 pca953x_read_regs(chip, chip->regs->direction, reg_direction);
610 NBANK(chip));
611 610
612 if (chip->driver_data & PCA_PCAL) { 611 if (chip->driver_data & PCA_PCAL) {
613 /* Enable latch on interrupt-enabled inputs */ 612 /* Enable latch on interrupt-enabled inputs */
@@ -679,7 +678,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
679 bool pending_seen = false; 678 bool pending_seen = false;
680 bool trigger_seen = false; 679 bool trigger_seen = false;
681 u8 trigger[MAX_BANK]; 680 u8 trigger[MAX_BANK];
682 int reg_direction[MAX_BANK]; 681 u8 reg_direction[MAX_BANK];
683 int ret, i; 682 int ret, i;
684 683
685 if (chip->driver_data & PCA_PCAL) { 684 if (chip->driver_data & PCA_PCAL) {
@@ -710,8 +709,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
710 return false; 709 return false;
711 710
712 /* Remove output pins from the equation */ 711 /* Remove output pins from the equation */
713 regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, 712 pca953x_read_regs(chip, chip->regs->direction, reg_direction);
714 NBANK(chip));
715 for (i = 0; i < NBANK(chip); i++) 713 for (i = 0; i < NBANK(chip); i++)
716 cur_stat[i] &= reg_direction[i]; 714 cur_stat[i] &= reg_direction[i];
717 715
@@ -768,7 +766,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
768{ 766{
769 struct i2c_client *client = chip->client; 767 struct i2c_client *client = chip->client;
770 struct irq_chip *irq_chip = &chip->irq_chip; 768 struct irq_chip *irq_chip = &chip->irq_chip;
771 int reg_direction[MAX_BANK]; 769 u8 reg_direction[MAX_BANK];
772 int ret, i; 770 int ret, i;
773 771
774 if (!client->irq) 772 if (!client->irq)
@@ -789,8 +787,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
789 * interrupt. We have to rely on the previous read for 787 * interrupt. We have to rely on the previous read for
790 * this purpose. 788 * this purpose.
791 */ 789 */
792 regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, 790 pca953x_read_regs(chip, chip->regs->direction, reg_direction);
793 NBANK(chip));
794 for (i = 0; i < NBANK(chip); i++) 791 for (i = 0; i < NBANK(chip); i++)
795 chip->irq_stat[i] &= reg_direction[i]; 792 chip->irq_stat[i] &= reg_direction[i];
796 mutex_init(&chip->irq_lock); 793 mutex_init(&chip->irq_lock);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 567fb98c0892..9762dd6d99fa 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -363,7 +363,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
363 /* Special handling for SPI GPIOs if used */ 363 /* Special handling for SPI GPIOs if used */
364 if (IS_ERR(desc)) 364 if (IS_ERR(desc))
365 desc = of_find_spi_gpio(dev, con_id, &of_flags); 365 desc = of_find_spi_gpio(dev, con_id, &of_flags);
366 if (IS_ERR(desc)) { 366 if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) {
367 /* This quirk looks up flags and all */ 367 /* This quirk looks up flags and all */
368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); 368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
369 if (!IS_ERR(desc)) 369 if (!IS_ERR(desc))
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f497003f119c..cca749010cd0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; 1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
1094 lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; 1094 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
1095 GPIOLINE_FLAG_IS_OUT);
1095 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) 1096 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
1096 lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; 1097 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
1098 GPIOLINE_FLAG_IS_OUT);
1097 1099
1098 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) 1100 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
1099 return -EFAULT; 1101 return -EFAULT;
@@ -1371,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1371 if (status) 1373 if (status)
1372 goto err_remove_from_list; 1374 goto err_remove_from_list;
1373 1375
1374 status = gpiochip_irqchip_init_valid_mask(chip);
1375 if (status)
1376 goto err_remove_from_list;
1377
1378 status = gpiochip_alloc_valid_mask(chip); 1376 status = gpiochip_alloc_valid_mask(chip);
1379 if (status) 1377 if (status)
1380 goto err_remove_irqchip_mask; 1378 goto err_remove_from_list;
1381
1382 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1383 if (status)
1384 goto err_free_gpiochip_mask;
1385 1379
1386 status = of_gpiochip_add(chip); 1380 status = of_gpiochip_add(chip);
1387 if (status) 1381 if (status)
1388 goto err_remove_chip; 1382 goto err_free_gpiochip_mask;
1389 1383
1390 status = gpiochip_init_valid_mask(chip); 1384 status = gpiochip_init_valid_mask(chip);
1391 if (status) 1385 if (status)
@@ -1411,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1411 1405
1412 machine_gpiochip_add(chip); 1406 machine_gpiochip_add(chip);
1413 1407
1408 status = gpiochip_irqchip_init_valid_mask(chip);
1409 if (status)
1410 goto err_remove_acpi_chip;
1411
1412 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1413 if (status)
1414 goto err_remove_irqchip_mask;
1415
1414 /* 1416 /*
1415 * By first adding the chardev, and then adding the device, 1417 * By first adding the chardev, and then adding the device,
1416 * we get a device node entry in sysfs under 1418 * we get a device node entry in sysfs under
@@ -1422,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1422 if (gpiolib_initialized) { 1424 if (gpiolib_initialized) {
1423 status = gpiochip_setup_dev(gdev); 1425 status = gpiochip_setup_dev(gdev);
1424 if (status) 1426 if (status)
1425 goto err_remove_acpi_chip; 1427 goto err_remove_irqchip;
1426 } 1428 }
1427 return 0; 1429 return 0;
1428 1430
1431err_remove_irqchip:
1432 gpiochip_irqchip_remove(chip);
1433err_remove_irqchip_mask:
1434 gpiochip_irqchip_free_valid_mask(chip);
1429err_remove_acpi_chip: 1435err_remove_acpi_chip:
1430 acpi_gpiochip_remove(chip); 1436 acpi_gpiochip_remove(chip);
1431err_remove_of_chip: 1437err_remove_of_chip:
1432 gpiochip_free_hogs(chip); 1438 gpiochip_free_hogs(chip);
1433 of_gpiochip_remove(chip); 1439 of_gpiochip_remove(chip);
1434err_remove_chip:
1435 gpiochip_irqchip_remove(chip);
1436err_free_gpiochip_mask: 1440err_free_gpiochip_mask:
1437 gpiochip_free_valid_mask(chip); 1441 gpiochip_free_valid_mask(chip);
1438err_remove_irqchip_mask:
1439 gpiochip_irqchip_free_valid_mask(chip);
1440err_remove_from_list: 1442err_remove_from_list:
1441 spin_lock_irqsave(&gpio_lock, flags); 1443 spin_lock_irqsave(&gpio_lock, flags);
1442 list_del(&gdev->list); 1444 list_del(&gdev->list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 9b384a94d2f3..3e35a8f2c5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
574 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, 574 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
575 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 575 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
576 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, 578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, 579 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0, 0, 0, 0, 0 }, 580 { 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e4094f842e7..8b26c970a3cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1143,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1143 num_deps = chunk->length_dw * 4 / 1143 num_deps = chunk->length_dw * 4 /
1144 sizeof(struct drm_amdgpu_cs_chunk_sem); 1144 sizeof(struct drm_amdgpu_cs_chunk_sem);
1145 1145
1146 if (p->post_deps)
1147 return -EINVAL;
1148
1146 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1149 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1147 GFP_KERNEL); 1150 GFP_KERNEL);
1148 p->num_post_deps = 0; 1151 p->num_post_deps = 0;
@@ -1166,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1166 1169
1167 1170
1168static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, 1171static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1169 struct amdgpu_cs_chunk 1172 struct amdgpu_cs_chunk *chunk)
1170 *chunk)
1171{ 1173{
1172 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; 1174 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1173 unsigned num_deps; 1175 unsigned num_deps;
@@ -1177,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
1177 num_deps = chunk->length_dw * 4 / 1179 num_deps = chunk->length_dw * 4 /
1178 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 1180 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1179 1181
1182 if (p->post_deps)
1183 return -EINVAL;
1184
1180 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1185 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1181 GFP_KERNEL); 1186 GFP_KERNEL);
1182 p->num_post_deps = 0; 1187 p->num_post_deps = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f539a2a92774..7398b4850649 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -534,21 +534,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
534 struct drm_sched_entity *entity) 534 struct drm_sched_entity *entity)
535{ 535{
536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); 536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
537 unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); 537 struct dma_fence *other;
538 struct dma_fence *other = centity->fences[idx]; 538 unsigned idx;
539 long r;
539 540
540 if (other) { 541 spin_lock(&ctx->ring_lock);
541 signed long r; 542 idx = centity->sequence & (amdgpu_sched_jobs - 1);
542 r = dma_fence_wait(other, true); 543 other = dma_fence_get(centity->fences[idx]);
543 if (r < 0) { 544 spin_unlock(&ctx->ring_lock);
544 if (r != -ERESTARTSYS)
545 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
546 545
547 return r; 546 if (!other)
548 } 547 return 0;
549 }
550 548
551 return 0; 549 r = dma_fence_wait(other, true);
550 if (r < 0 && r != -ERESTARTSYS)
551 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
552
553 dma_fence_put(other);
554 return r;
552} 555}
553 556
554void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 557void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 04b8ac4432c7..c066e1d3f981 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -596,14 +596,18 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
596 case CHIP_VEGA20: 596 case CHIP_VEGA20:
597 break; 597 break;
598 case CHIP_RAVEN: 598 case CHIP_RAVEN:
599 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) 599 if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
600 break; 600 &&((adev->gfx.rlc_fw_version != 106 &&
601 if ((adev->gfx.rlc_fw_version != 106 && 601 adev->gfx.rlc_fw_version < 531) ||
602 adev->gfx.rlc_fw_version < 531) || 602 (adev->gfx.rlc_fw_version == 53815) ||
603 (adev->gfx.rlc_fw_version == 53815) || 603 (adev->gfx.rlc_feature_version < 1) ||
604 (adev->gfx.rlc_feature_version < 1) || 604 !adev->gfx.rlc.is_rlc_v2_1))
605 !adev->gfx.rlc.is_rlc_v2_1)
606 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 605 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
606
607 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
608 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
609 AMD_PG_SUPPORT_CP |
610 AMD_PG_SUPPORT_RLC_SMU_HS;
607 break; 611 break;
608 default: 612 default:
609 break; 613 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 662612f89c70..9922bce3fd89 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle)
552 AMD_CG_SUPPORT_BIF_LS; 552 AMD_CG_SUPPORT_BIF_LS;
553 adev->pg_flags = AMD_PG_SUPPORT_VCN | 553 adev->pg_flags = AMD_PG_SUPPORT_VCN |
554 AMD_PG_SUPPORT_VCN_DPG | 554 AMD_PG_SUPPORT_VCN_DPG |
555 AMD_PG_SUPPORT_MMHUB |
556 AMD_PG_SUPPORT_ATHUB; 555 AMD_PG_SUPPORT_ATHUB;
557 adev->external_rev_id = adev->rev_id + 0x1; 556 adev->external_rev_id = adev->rev_id + 0x1;
558 break; 557 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 23265414d448..04fbf05d7176 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle)
992 992
993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
994 } 994 }
995
996 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
997 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
998 AMD_PG_SUPPORT_CP |
999 AMD_PG_SUPPORT_RLC_SMU_HS;
1000 break; 995 break;
1001 default: 996 default:
1002 /* FIXME: not supported yet */ 997 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4a29f72334d0..45be7a2132bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3131,13 +3131,25 @@ static enum dc_color_depth
3131convert_color_depth_from_display_info(const struct drm_connector *connector, 3131convert_color_depth_from_display_info(const struct drm_connector *connector,
3132 const struct drm_connector_state *state) 3132 const struct drm_connector_state *state)
3133{ 3133{
3134 uint32_t bpc = connector->display_info.bpc; 3134 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3135
3136 /* Assume 8 bpc by default if no bpc is specified. */
3137 bpc = bpc ? bpc : 8;
3135 3138
3136 if (!state) 3139 if (!state)
3137 state = connector->state; 3140 state = connector->state;
3138 3141
3139 if (state) { 3142 if (state) {
3140 bpc = state->max_bpc; 3143 /*
3144 * Cap display bpc based on the user requested value.
3145 *
3146 * The value for state->max_bpc may not correctly updated
3147 * depending on when the connector gets added to the state
3148 * or if this was called outside of atomic check, so it
3149 * can't be used directly.
3150 */
3151 bpc = min(bpc, state->max_requested_bpc);
3152
3141 /* Round down to the nearest even number. */ 3153 /* Round down to the nearest even number. */
3142 bpc = bpc - (bpc & 1); 3154 bpc = bpc - (bpc & 1);
3143 } 3155 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f27c6fbb192e..90c4e87ac5ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
2101 if (ret) 2101 if (ret)
2102 return ret; 2102 return ret;
2103 2103
2104 *query = metrics_table.CurrSocketPower << 8; 2104 /* For the 40.46 release, they changed the value name */
2105 if (hwmgr->smu_version == 0x282e00)
2106 *query = metrics_table.AverageSocketPower << 8;
2107 else
2108 *query = metrics_table.CurrSocketPower << 8;
2105 2109
2106 return ret; 2110 return ret;
2107} 2111}
@@ -2349,12 +2353,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2349 data->dpm_table.soc_table.dpm_state.soft_max_level = 2353 data->dpm_table.soc_table.dpm_state.soft_max_level =
2350 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2354 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2351 2355
2352 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2356 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2357 FEATURE_DPM_UCLK_MASK |
2358 FEATURE_DPM_SOCCLK_MASK);
2353 PP_ASSERT_WITH_CODE(!ret, 2359 PP_ASSERT_WITH_CODE(!ret,
2354 "Failed to upload boot level to highest!", 2360 "Failed to upload boot level to highest!",
2355 return ret); 2361 return ret);
2356 2362
2357 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2363 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2364 FEATURE_DPM_UCLK_MASK |
2365 FEATURE_DPM_SOCCLK_MASK);
2358 PP_ASSERT_WITH_CODE(!ret, 2366 PP_ASSERT_WITH_CODE(!ret,
2359 "Failed to upload dpm max level to highest!", 2367 "Failed to upload dpm max level to highest!",
2360 return ret); 2368 return ret);
@@ -2387,12 +2395,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2387 data->dpm_table.soc_table.dpm_state.soft_max_level = 2395 data->dpm_table.soc_table.dpm_state.soft_max_level =
2388 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2396 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2389 2397
2390 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2398 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2399 FEATURE_DPM_UCLK_MASK |
2400 FEATURE_DPM_SOCCLK_MASK);
2391 PP_ASSERT_WITH_CODE(!ret, 2401 PP_ASSERT_WITH_CODE(!ret,
2392 "Failed to upload boot level to highest!", 2402 "Failed to upload boot level to highest!",
2393 return ret); 2403 return ret);
2394 2404
2395 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2405 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2406 FEATURE_DPM_UCLK_MASK |
2407 FEATURE_DPM_SOCCLK_MASK);
2396 PP_ASSERT_WITH_CODE(!ret, 2408 PP_ASSERT_WITH_CODE(!ret,
2397 "Failed to upload dpm max level to highest!", 2409 "Failed to upload dpm max level to highest!",
2398 return ret); 2410 return ret);
@@ -2403,14 +2415,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2403 2415
2404static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2416static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2405{ 2417{
2418 struct vega20_hwmgr *data =
2419 (struct vega20_hwmgr *)(hwmgr->backend);
2420 uint32_t soft_min_level, soft_max_level;
2406 int ret = 0; 2421 int ret = 0;
2407 2422
2408 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2423 /* gfxclk soft min/max settings */
2424 soft_min_level =
2425 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2426 soft_max_level =
2427 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2428
2429 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2430 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2431 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2432 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2433
2434 /* uclk soft min/max settings */
2435 soft_min_level =
2436 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2437 soft_max_level =
2438 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2439
2440 data->dpm_table.mem_table.dpm_state.soft_min_level =
2441 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2442 data->dpm_table.mem_table.dpm_state.soft_max_level =
2443 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2444
2445 /* socclk soft min/max settings */
2446 soft_min_level =
2447 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2448 soft_max_level =
2449 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2450
2451 data->dpm_table.soc_table.dpm_state.soft_min_level =
2452 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2453 data->dpm_table.soc_table.dpm_state.soft_max_level =
2454 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2455
2456 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2457 FEATURE_DPM_UCLK_MASK |
2458 FEATURE_DPM_SOCCLK_MASK);
2409 PP_ASSERT_WITH_CODE(!ret, 2459 PP_ASSERT_WITH_CODE(!ret,
2410 "Failed to upload DPM Bootup Levels!", 2460 "Failed to upload DPM Bootup Levels!",
2411 return ret); 2461 return ret);
2412 2462
2413 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2463 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2464 FEATURE_DPM_UCLK_MASK |
2465 FEATURE_DPM_SOCCLK_MASK);
2414 PP_ASSERT_WITH_CODE(!ret, 2466 PP_ASSERT_WITH_CODE(!ret,
2415 "Failed to upload DPM Max Levels!", 2467 "Failed to upload DPM Max Levels!",
2416 return ret); 2468 return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index a0f52c86d8c7..a78b2e295895 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -907,8 +907,6 @@ struct smu_funcs
907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) 907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
908#define smu_set_azalia_d3_pme(smu) \ 908#define smu_set_azalia_d3_pme(smu) \
909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) 909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
910#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
911 ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
912#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ 910#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
913 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) 911 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
914#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ 912#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 5fde5cf65b42..53097961bf2b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
326 struct amdgpu_device *adev = smu->adev; 326 struct amdgpu_device *adev = smu->adev;
327 const struct smc_firmware_header_v1_0 *hdr; 327 const struct smc_firmware_header_v1_0 *hdr;
328 int ret, index; 328 int ret, index;
329 uint32_t size; 329 uint32_t size = 0;
330 uint16_t atom_table_size;
330 uint8_t frev, crev; 331 uint8_t frev, crev;
331 void *table; 332 void *table;
332 uint16_t version_major, version_minor; 333 uint16_t version_major, version_minor;
@@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
354 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 355 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
355 powerplayinfo); 356 powerplayinfo);
356 357
357 ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev, 358 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
358 (uint8_t **)&table); 359 (uint8_t **)&table);
359 if (ret) 360 if (ret)
360 return ret; 361 return ret;
362 size = atom_table_size;
361 } 363 }
362 364
363 if (!smu->smu_table.power_play_table) 365 if (!smu->smu_table.power_play_table)
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index dd6fd1c8bf24..6a14497257e4 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3050,6 +3050,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
3050 3050
3051static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) 3051static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
3052{ 3052{
3053 uint32_t smu_version;
3053 int ret = 0; 3054 int ret = 0;
3054 SmuMetrics_t metrics; 3055 SmuMetrics_t metrics;
3055 3056
@@ -3060,7 +3061,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
3060 if (ret) 3061 if (ret)
3061 return ret; 3062 return ret;
3062 3063
3063 *value = metrics.CurrSocketPower << 8; 3064 ret = smu_get_smc_version(smu, NULL, &smu_version);
3065 if (ret)
3066 return ret;
3067
3068 /* For the 40.46 release, they changed the value name */
3069 if (smu_version == 0x282e00)
3070 *value = metrics.AverageSocketPower << 8;
3071 else
3072 *value = metrics.CurrSocketPower << 8;
3064 3073
3065 return 0; 3074 return 0;
3066} 3075}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 5a118984de33..9d4d5075cc64 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
8#include <linux/iommu.h> 8#include <linux/iommu.h>
9#include <linux/of_device.h> 9#include <linux/of_device.h>
10#include <linux/of_graph.h> 10#include <linux/of_graph.h>
11#include <linux/of_reserved_mem.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
13#ifdef CONFIG_DEBUG_FS 14#ifdef CONFIG_DEBUG_FS
@@ -126,7 +127,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
126 pipe->of_output_port = 127 pipe->of_output_port =
127 of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT); 128 of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
128 129
129 pipe->of_node = np; 130 pipe->of_node = of_node_get(np);
130 131
131 return 0; 132 return 0;
132} 133}
@@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
143 return mdev->irq; 144 return mdev->irq;
144 } 145 }
145 146
147 /* Get the optional framebuffer memory resource */
148 ret = of_reserved_mem_device_init(dev);
149 if (ret && ret != -ENODEV)
150 return ret;
151 ret = 0;
152
146 for_each_available_child_of_node(np, child) { 153 for_each_available_child_of_node(np, child) {
147 if (of_node_cmp(child->name, "pipeline") == 0) { 154 if (of_node_cmp(child->name, "pipeline") == 0) {
148 ret = komeda_parse_pipe_dt(mdev, child); 155 ret = komeda_parse_pipe_dt(mdev, child);
@@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
289 296
290 mdev->n_pipelines = 0; 297 mdev->n_pipelines = 0;
291 298
299 of_reserved_mem_device_release(dev);
300
292 if (funcs && funcs->cleanup) 301 if (funcs && funcs->cleanup)
293 funcs->cleanup(mdev); 302 funcs->cleanup(mdev);
294 303
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
index cd4d9f53ddef..c9a1edb9a000 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
@@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
35 return NULL; 35 return NULL;
36} 36}
37 37
38u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
39{
40 u32 bpp;
41
42 switch (info->format) {
43 case DRM_FORMAT_YUV420_8BIT:
44 bpp = 12;
45 break;
46 case DRM_FORMAT_YUV420_10BIT:
47 bpp = 15;
48 break;
49 default:
50 bpp = info->cpp[0] * 8;
51 break;
52 }
53
54 return bpp;
55}
56
38/* Two assumptions 57/* Two assumptions
39 * 1. RGB always has YTR 58 * 1. RGB always has YTR
40 * 2. Tiled RGB always has SC 59 * 2. Tiled RGB always has SC
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
index 3631910d33b5..32273cf18f7c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
@@ -97,6 +97,9 @@ const struct komeda_format_caps *
97komeda_get_format_caps(struct komeda_format_caps_table *table, 97komeda_get_format_caps(struct komeda_format_caps_table *table,
98 u32 fourcc, u64 modifier); 98 u32 fourcc, u64 modifier);
99 99
100u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
101 u64 modifier);
102
100u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, 103u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
101 u32 layer_type, u32 *n_fmts); 104 u32 layer_type, u32 *n_fmts);
102 105
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3b0a70ed6aa0..1b01a625f40e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
43 struct drm_framebuffer *fb = &kfb->base; 43 struct drm_framebuffer *fb = &kfb->base;
44 const struct drm_format_info *info = fb->format; 44 const struct drm_format_info *info = fb->format;
45 struct drm_gem_object *obj; 45 struct drm_gem_object *obj;
46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks; 46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
47 u64 min_size; 47 u64 min_size;
48 48
49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); 49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
@@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE, 88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
89 alignment_header); 89 alignment_header);
90 90
91 bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
91 kfb->afbc_size = kfb->offset_payload + n_blocks * 92 kfb->afbc_size = kfb->offset_payload + n_blocks *
92 ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS, 93 ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
93 AFBC_SUPERBLK_ALIGNMENT); 94 AFBC_SUPERBLK_ALIGNMENT);
94 min_size = kfb->afbc_size + fb->offsets[0]; 95 min_size = kfb->afbc_size + fb->offsets[0];
95 if (min_size > obj->size) { 96 if (min_size > obj->size) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 419a8b0e5de8..69d9e26c60c8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
14#include <drm/drm_gem_cma_helper.h> 14#include <drm/drm_gem_cma_helper.h>
15#include <drm/drm_gem_framebuffer_helper.h> 15#include <drm/drm_gem_framebuffer_helper.h>
16#include <drm/drm_irq.h> 16#include <drm/drm_irq.h>
17#include <drm/drm_probe_helper.h>
17#include <drm/drm_vblank.h> 18#include <drm/drm_vblank.h>
18 19
19#include "komeda_dev.h" 20#include "komeda_dev.h"
@@ -146,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
146 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); 147 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
147 struct komeda_plane_state *kplane_st; 148 struct komeda_plane_state *kplane_st;
148 struct drm_plane_state *plane_st; 149 struct drm_plane_state *plane_st;
149 struct drm_framebuffer *fb;
150 struct drm_plane *plane; 150 struct drm_plane *plane;
151 struct list_head zorder_list; 151 struct list_head zorder_list;
152 int order = 0, err; 152 int order = 0, err;
@@ -172,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
172 172
173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) { 173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
174 plane_st = &kplane_st->base; 174 plane_st = &kplane_st->base;
175 fb = plane_st->fb;
176 plane = plane_st->plane; 175 plane = plane_st->plane;
177 176
178 plane_st->normalized_zpos = order++; 177 plane_st->normalized_zpos = order++;
@@ -205,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev,
205 struct drm_atomic_state *state) 204 struct drm_atomic_state *state)
206{ 205{
207 struct drm_crtc *crtc; 206 struct drm_crtc *crtc;
208 struct drm_crtc_state *old_crtc_st, *new_crtc_st; 207 struct drm_crtc_state *new_crtc_st;
209 int i, err; 208 int i, err;
210 209
211 err = drm_atomic_helper_check_modeset(dev, state); 210 err = drm_atomic_helper_check_modeset(dev, state);
@@ -216,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev,
216 * so need to add all affected_planes (even unchanged) to 215 * so need to add all affected_planes (even unchanged) to
217 * drm_atomic_state. 216 * drm_atomic_state.
218 */ 217 */
219 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) { 218 for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
220 err = drm_atomic_add_affected_planes(state, crtc); 219 err = drm_atomic_add_affected_planes(state, crtc);
221 if (err) 220 if (err)
222 return err; 221 return err;
@@ -307,24 +306,33 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
307 komeda_kms_irq_handler, IRQF_SHARED, 306 komeda_kms_irq_handler, IRQF_SHARED,
308 drm->driver->name, drm); 307 drm->driver->name, drm);
309 if (err) 308 if (err)
310 goto cleanup_mode_config; 309 goto free_component_binding;
311 310
312 err = mdev->funcs->enable_irq(mdev); 311 err = mdev->funcs->enable_irq(mdev);
313 if (err) 312 if (err)
314 goto cleanup_mode_config; 313 goto free_component_binding;
315 314
316 drm->irq_enabled = true; 315 drm->irq_enabled = true;
317 316
317 drm_kms_helper_poll_init(drm);
318
318 err = drm_dev_register(drm, 0); 319 err = drm_dev_register(drm, 0);
319 if (err) 320 if (err)
320 goto cleanup_mode_config; 321 goto free_interrupts;
321 322
322 return kms; 323 return kms;
323 324
324cleanup_mode_config: 325free_interrupts:
326 drm_kms_helper_poll_fini(drm);
325 drm->irq_enabled = false; 327 drm->irq_enabled = false;
328 mdev->funcs->disable_irq(mdev);
329free_component_binding:
330 component_unbind_all(mdev->dev, drm);
331cleanup_mode_config:
326 drm_mode_config_cleanup(drm); 332 drm_mode_config_cleanup(drm);
327 komeda_kms_cleanup_private_objs(kms); 333 komeda_kms_cleanup_private_objs(kms);
334 drm->dev_private = NULL;
335 drm_dev_put(drm);
328free_kms: 336free_kms:
329 kfree(kms); 337 kfree(kms);
330 return ERR_PTR(err); 338 return ERR_PTR(err);
@@ -335,12 +343,14 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
335 struct drm_device *drm = &kms->base; 343 struct drm_device *drm = &kms->base;
336 struct komeda_dev *mdev = drm->dev_private; 344 struct komeda_dev *mdev = drm->dev_private;
337 345
346 drm_dev_unregister(drm);
347 drm_kms_helper_poll_fini(drm);
348 drm_atomic_helper_shutdown(drm);
338 drm->irq_enabled = false; 349 drm->irq_enabled = false;
339 mdev->funcs->disable_irq(mdev); 350 mdev->funcs->disable_irq(mdev);
340 drm_dev_unregister(drm);
341 component_unbind_all(mdev->dev, drm); 351 component_unbind_all(mdev->dev, drm);
342 komeda_kms_cleanup_private_objs(kms);
343 drm_mode_config_cleanup(drm); 352 drm_mode_config_cleanup(drm);
353 komeda_kms_cleanup_private_objs(kms);
344 drm->dev_private = NULL; 354 drm->dev_private = NULL;
345 drm_dev_put(drm); 355 drm_dev_put(drm);
346} 356}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index a90bcbb3cb23..14b683164544 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -480,6 +480,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
480 struct seq_file *sf); 480 struct seq_file *sf);
481 481
482/* component APIs */ 482/* component APIs */
483extern __printf(10, 11)
483struct komeda_component * 484struct komeda_component *
484komeda_component_add(struct komeda_pipeline *pipe, 485komeda_component_add(struct komeda_pipeline *pipe,
485 size_t comp_sz, u32 id, u32 hw_id, 486 size_t comp_sz, u32 id, u32 hw_id,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 617e1f7b8472..2851cac94d86 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -148,7 +148,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
148 if (!kcrtc->master->wb_layer) 148 if (!kcrtc->master->wb_layer)
149 return 0; 149 return 0;
150 150
151 kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL); 151 kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
152 if (!kwb_conn) 152 if (!kwb_conn)
153 return -ENOMEM; 153 return -ENOMEM;
154 154
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b0369e690f36..0d23bf729e9f 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1454,6 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
1454} 1454}
1455 1455
1456static int drm_mode_parse_cmdline_extra(const char *str, int length, 1456static int drm_mode_parse_cmdline_extra(const char *str, int length,
1457 bool freestanding,
1457 const struct drm_connector *connector, 1458 const struct drm_connector *connector,
1458 struct drm_cmdline_mode *mode) 1459 struct drm_cmdline_mode *mode)
1459{ 1460{
@@ -1462,9 +1463,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length,
1462 for (i = 0; i < length; i++) { 1463 for (i = 0; i < length; i++) {
1463 switch (str[i]) { 1464 switch (str[i]) {
1464 case 'i': 1465 case 'i':
1466 if (freestanding)
1467 return -EINVAL;
1468
1465 mode->interlace = true; 1469 mode->interlace = true;
1466 break; 1470 break;
1467 case 'm': 1471 case 'm':
1472 if (freestanding)
1473 return -EINVAL;
1474
1468 mode->margins = true; 1475 mode->margins = true;
1469 break; 1476 break;
1470 case 'D': 1477 case 'D':
@@ -1542,6 +1549,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
1542 if (extras) { 1549 if (extras) {
1543 int ret = drm_mode_parse_cmdline_extra(end_ptr + i, 1550 int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
1544 1, 1551 1,
1552 false,
1545 connector, 1553 connector,
1546 mode); 1554 mode);
1547 if (ret) 1555 if (ret)
@@ -1669,6 +1677,22 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
1669 return 0; 1677 return 0;
1670} 1678}
1671 1679
1680static const char *drm_named_modes_whitelist[] = {
1681 "NTSC",
1682 "PAL",
1683};
1684
1685static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
1686{
1687 int i;
1688
1689 for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
1690 if (!strncmp(mode, drm_named_modes_whitelist[i], size))
1691 return true;
1692
1693 return false;
1694}
1695
1672/** 1696/**
1673 * drm_mode_parse_command_line_for_connector - parse command line modeline for connector 1697 * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
1674 * @mode_option: optional per connector mode option 1698 * @mode_option: optional per connector mode option
@@ -1725,16 +1749,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1725 * bunch of things: 1749 * bunch of things:
1726 * - We need to make sure that the first character (which 1750 * - We need to make sure that the first character (which
1727 * would be our resolution in X) is a digit. 1751 * would be our resolution in X) is a digit.
1728 * - However, if the X resolution is missing, then we end up 1752 * - If not, then it's either a named mode or a force on/off.
1729 * with something like x<yres>, with our first character 1753 * To distinguish between the two, we need to run the
1730 * being an alpha-numerical character, which would be 1754 * extra parsing function, and if not, then we consider it
1731 * considered a named mode. 1755 * a named mode.
1732 * 1756 *
1733 * If this isn't enough, we should add more heuristics here, 1757 * If this isn't enough, we should add more heuristics here,
1734 * and matching unit-tests. 1758 * and matching unit-tests.
1735 */ 1759 */
1736 if (!isdigit(name[0]) && name[0] != 'x') 1760 if (!isdigit(name[0]) && name[0] != 'x') {
1761 unsigned int namelen = strlen(name);
1762
1763 /*
1764 * Only the force on/off options can be in that case,
1765 * and they all take a single character.
1766 */
1767 if (namelen == 1) {
1768 ret = drm_mode_parse_cmdline_extra(name, namelen, true,
1769 connector, mode);
1770 if (!ret)
1771 return true;
1772 }
1773
1737 named_mode = true; 1774 named_mode = true;
1775 }
1738 1776
1739 /* Try to locate the bpp and refresh specifiers, if any */ 1777 /* Try to locate the bpp and refresh specifiers, if any */
1740 bpp_ptr = strchr(name, '-'); 1778 bpp_ptr = strchr(name, '-');
@@ -1772,6 +1810,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1772 if (named_mode) { 1810 if (named_mode) {
1773 if (mode_end + 1 > DRM_DISPLAY_MODE_LEN) 1811 if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
1774 return false; 1812 return false;
1813
1814 if (!drm_named_mode_is_in_whitelist(name, mode_end))
1815 return false;
1816
1775 strscpy(mode->name, name, mode_end + 1); 1817 strscpy(mode->name, name, mode_end + 1);
1776 } else { 1818 } else {
1777 ret = drm_mode_parse_cmdline_res_mode(name, mode_end, 1819 ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
@@ -1811,7 +1853,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1811 extra_ptr != options_ptr) { 1853 extra_ptr != options_ptr) {
1812 int len = strlen(name) - (extra_ptr - name); 1854 int len = strlen(name) - (extra_ptr - name);
1813 1855
1814 ret = drm_mode_parse_cmdline_extra(extra_ptr, len, 1856 ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
1815 connector, mode); 1857 connector, mode);
1816 if (ret) 1858 if (ret)
1817 return false; 1859 return false;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a176f900..1cb1fa74cfbc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
1465 else if (intel_crtc_has_dp_encoder(pipe_config)) 1465 else if (intel_crtc_has_dp_encoder(pipe_config))
1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1467 &pipe_config->dp_m_n); 1467 &pipe_config->dp_m_n);
1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) 1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
1469 dotclock = pipe_config->port_clock * 2 / 3; 1469 dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
1470 else 1470 else
1471 dotclock = pipe_config->port_clock; 1471 dotclock = pipe_config->port_clock;
1472 1472
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 60652ebbdf61..18e4cba76720 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -539,7 +539,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
539 539
540 intel_attach_force_audio_property(connector); 540 intel_attach_force_audio_property(connector);
541 intel_attach_broadcast_rgb_property(connector); 541 intel_attach_broadcast_rgb_property(connector);
542 drm_connector_attach_max_bpc_property(connector, 6, 12); 542
543 /*
544 * Reuse the prop from the SST connector because we're
545 * not allowed to create new props after device registration.
546 */
547 connector->max_bpc_property =
548 intel_dp->attached_connector->base.max_bpc_property;
549 if (connector->max_bpc_property)
550 drm_connector_attach_max_bpc_property(connector, 6, 12);
543 551
544 return connector; 552 return connector;
545 553
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index ffec807b8960..f413904a3e96 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -541,7 +541,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
541 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | 541 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
542 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); 542 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
543 DRM_INFO("PPS2 = 0x%08x\n", pps_val); 543 DRM_INFO("PPS2 = 0x%08x\n", pps_val);
544 if (encoder->type == INTEL_OUTPUT_EDP) { 544 if (cpu_transcoder == TRANSCODER_EDP) {
545 I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); 545 I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
546 /* 546 /*
547 * If 2 VDSC instances are needed, configure PPS for second 547 * If 2 VDSC instances are needed, configure PPS for second
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f62e3397d936..bac1ee94f63f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1598,6 +1598,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1598 1598
1599 pci_set_master(pdev); 1599 pci_set_master(pdev);
1600 1600
1601 /*
1602 * We don't have a max segment size, so set it to the max so sg's
1603 * debugging layer doesn't complain
1604 */
1605 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1606
1601 /* overlay on gen2 is broken and can't address above 1G */ 1607 /* overlay on gen2 is broken and can't address above 1G */
1602 if (IS_GEN(dev_priv, 2)) { 1608 if (IS_GEN(dev_priv, 2)) {
1603 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); 1609 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992b599d..724627afdedc 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info;
101static void vgt_deballoon_space(struct i915_ggtt *ggtt, 101static void vgt_deballoon_space(struct i915_ggtt *ggtt,
102 struct drm_mm_node *node) 102 struct drm_mm_node *node)
103{ 103{
104 if (!drm_mm_node_allocated(node))
105 return;
106
104 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", 107 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
105 node->start, 108 node->start,
106 node->start + node->size, 109 node->start + node->size,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d58f7ec5d84..f11979879e7b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -829,7 +829,7 @@ struct intel_crtc_state {
829 829
830 /* 830 /*
831 * Frequence the dpll for the port should run at. Differs from the 831 * Frequence the dpll for the port should run at. Differs from the
832 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also 832 * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
833 * already multiplied by pixel_multiplier. 833 * already multiplied by pixel_multiplier.
834 */ 834 */
835 int port_clock; 835 int port_clock;
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index e9f9e9fb9b17..6381652a8829 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -656,10 +656,9 @@ static int ingenic_drm_probe(struct platform_device *pdev)
656 return ret; 656 return ret;
657 } 657 }
658 658
659 if (panel) { 659 if (panel)
660 bridge = devm_drm_panel_bridge_add(dev, panel, 660 bridge = devm_drm_panel_bridge_add(dev, panel,
661 DRM_MODE_CONNECTOR_Unknown); 661 DRM_MODE_CONNECTOR_DPI);
662 }
663 662
664 priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc), 663 priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
665 &priv->dma_hwdesc_phys, 664 &priv->dma_hwdesc_phys,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 95fdbd0fbcac..945bc20f1d33 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -17,6 +17,7 @@
17#include <linux/of_address.h> 17#include <linux/of_address.h>
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/dma-mapping.h>
20 21
21#include "mtk_drm_crtc.h" 22#include "mtk_drm_crtc.h"
22#include "mtk_drm_ddp.h" 23#include "mtk_drm_ddp.h"
@@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
213 struct mtk_drm_private *private = drm->dev_private; 214 struct mtk_drm_private *private = drm->dev_private;
214 struct platform_device *pdev; 215 struct platform_device *pdev;
215 struct device_node *np; 216 struct device_node *np;
217 struct device *dma_dev;
216 int ret; 218 int ret;
217 219
218 if (!iommu_present(&platform_bus_type)) 220 if (!iommu_present(&platform_bus_type))
@@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
275 goto err_component_unbind; 277 goto err_component_unbind;
276 } 278 }
277 279
278 private->dma_dev = &pdev->dev; 280 dma_dev = &pdev->dev;
281 private->dma_dev = dma_dev;
282
283 /*
284 * Configure the DMA segment size to make sure we get contiguous IOVA
285 * when importing PRIME buffers.
286 */
287 if (!dma_dev->dma_parms) {
288 private->dma_parms_allocated = true;
289 dma_dev->dma_parms =
290 devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
291 GFP_KERNEL);
292 }
293 if (!dma_dev->dma_parms) {
294 ret = -ENOMEM;
295 goto err_component_unbind;
296 }
297
298 ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
299 if (ret) {
300 dev_err(dma_dev, "Failed to set DMA segment size\n");
301 goto err_unset_dma_parms;
302 }
279 303
280 /* 304 /*
281 * We don't use the drm_irq_install() helpers provided by the DRM 305 * We don't use the drm_irq_install() helpers provided by the DRM
@@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
285 drm->irq_enabled = true; 309 drm->irq_enabled = true;
286 ret = drm_vblank_init(drm, MAX_CRTC); 310 ret = drm_vblank_init(drm, MAX_CRTC);
287 if (ret < 0) 311 if (ret < 0)
288 goto err_component_unbind; 312 goto err_unset_dma_parms;
289 313
290 drm_kms_helper_poll_init(drm); 314 drm_kms_helper_poll_init(drm);
291 drm_mode_config_reset(drm); 315 drm_mode_config_reset(drm);
292 316
293 return 0; 317 return 0;
294 318
319err_unset_dma_parms:
320 if (private->dma_parms_allocated)
321 dma_dev->dma_parms = NULL;
295err_component_unbind: 322err_component_unbind:
296 component_unbind_all(drm->dev, drm); 323 component_unbind_all(drm->dev, drm);
297err_config_cleanup: 324err_config_cleanup:
@@ -302,9 +329,14 @@ err_config_cleanup:
302 329
303static void mtk_drm_kms_deinit(struct drm_device *drm) 330static void mtk_drm_kms_deinit(struct drm_device *drm)
304{ 331{
332 struct mtk_drm_private *private = drm->dev_private;
333
305 drm_kms_helper_poll_fini(drm); 334 drm_kms_helper_poll_fini(drm);
306 drm_atomic_helper_shutdown(drm); 335 drm_atomic_helper_shutdown(drm);
307 336
337 if (private->dma_parms_allocated)
338 private->dma_dev->dma_parms = NULL;
339
308 component_unbind_all(drm->dev, drm); 340 component_unbind_all(drm->dev, drm);
309 drm_mode_config_cleanup(drm); 341 drm_mode_config_cleanup(drm);
310} 342}
@@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = {
320 .compat_ioctl = drm_compat_ioctl, 352 .compat_ioctl = drm_compat_ioctl,
321}; 353};
322 354
355/*
356 * We need to override this because the device used to import the memory is
357 * not dev->dev, as drm_gem_prime_import() expects.
358 */
359struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
360 struct dma_buf *dma_buf)
361{
362 struct mtk_drm_private *private = dev->dev_private;
363
364 return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
365}
366
323static struct drm_driver mtk_drm_driver = { 367static struct drm_driver mtk_drm_driver = {
324 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 368 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
325 DRIVER_ATOMIC, 369 DRIVER_ATOMIC,
@@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = {
331 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 375 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
332 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 376 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
333 .gem_prime_export = drm_gem_prime_export, 377 .gem_prime_export = drm_gem_prime_export,
334 .gem_prime_import = drm_gem_prime_import, 378 .gem_prime_import = mtk_drm_gem_prime_import,
335 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, 379 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
336 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, 380 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
337 .gem_prime_mmap = mtk_drm_gem_mmap_buf, 381 .gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
524 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); 568 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
525 if (!comp) { 569 if (!comp) {
526 ret = -ENOMEM; 570 ret = -ENOMEM;
571 of_node_put(node);
527 goto err_node; 572 goto err_node;
528 } 573 }
529 574
530 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); 575 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
531 if (ret) 576 if (ret) {
577 of_node_put(node);
532 goto err_node; 578 goto err_node;
579 }
533 580
534 private->ddp_comp[comp_id] = comp; 581 private->ddp_comp[comp_id] = comp;
535 } 582 }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 598ff3e70446..e03fea12ff59 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -51,6 +51,8 @@ struct mtk_drm_private {
51 } commit; 51 } commit;
52 52
53 struct drm_atomic_state *suspend_state; 53 struct drm_atomic_state *suspend_state;
54
55 bool dma_parms_allocated;
54}; 56};
55 57
56extern struct platform_driver mtk_ddp_driver; 58extern struct platform_driver mtk_ddp_driver;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404fe660..a11637b0f6cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
40 u8 *ptr = msg->buf; 40 u8 *ptr = msg->buf;
41 41
42 while (remaining) { 42 while (remaining) {
43 u8 cnt = (remaining > 16) ? 16 : remaining; 43 u8 cnt, retries, cmd;
44 u8 cmd;
45 44
46 if (msg->flags & I2C_M_RD) 45 if (msg->flags & I2C_M_RD)
47 cmd = 1; 46 cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
51 if (mcnt || remaining > 16) 50 if (mcnt || remaining > 16)
52 cmd |= 4; /* MOT */ 51 cmd |= 4; /* MOT */
53 52
54 ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); 53 for (retries = 0, cnt = 0;
55 if (ret < 0) { 54 retries < 32 && !cnt;
56 nvkm_i2c_aux_release(aux); 55 retries++) {
57 return ret; 56 cnt = min_t(u8, remaining, 16);
57 ret = aux->func->xfer(aux, true, cmd,
58 msg->addr, ptr, &cnt);
59 if (ret < 0)
60 goto out;
61 }
62 if (!cnt) {
63 AUX_TRACE(aux, "no data after 32 retries");
64 ret = -EIO;
65 goto out;
58 } 66 }
59 67
60 ptr += cnt; 68 ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
64 msg++; 72 msg++;
65 } 73 }
66 74
75 ret = num;
76out:
67 nvkm_i2c_aux_release(aux); 77 nvkm_i2c_aux_release(aux);
68 return num; 78 return ret;
69} 79}
70 80
71static u32 81static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index 84a2f243ed9b..4695f1c8e33f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
190MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); 190MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
191MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); 191MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
192MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); 192MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
193MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
194MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
195MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
193MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); 196MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
194MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); 197MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
195MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); 198MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
@@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
210MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); 213MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
211MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); 214MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
212MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); 215MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
216MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
217MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
218MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
213MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); 219MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
214MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); 220MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
215MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); 221MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
@@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
230MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); 236MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
231MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); 237MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
232MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); 238MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
239MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
240MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
241MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
233MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); 242MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
234MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); 243MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
235MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); 244MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
@@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
250MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); 259MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
251MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); 260MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
252MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); 261MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
262MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
263MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
264MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index de0f882f0f7b..14b41de44ebc 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,6 +4,7 @@
4 * Author: Archit Taneja <archit@ti.com> 4 * Author: Archit Taneja <archit@ti.com>
5 */ 5 */
6 6
7#include <linux/bitops.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/platform_device.h> 10#include <linux/platform_device.h>
@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
20{ 21{
21 struct device_node *remote_node; 22 struct device_node *remote_node;
22 23
23 remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); 24 remote_node = of_graph_get_remote_node(out->dev->of_node,
25 ffs(out->of_ports) - 1, 0);
24 if (!remote_node) { 26 if (!remote_node) {
25 dev_dbg(out->dev, "failed to find video sink\n"); 27 dev_dbg(out->dev, "failed to find video sink\n");
26 return 0; 28 return 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 288c59dae56a..1bad0a2cc5c6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev)
669 if (omapdss_is_initialized() == false) 669 if (omapdss_is_initialized() == false)
670 return -EPROBE_DEFER; 670 return -EPROBE_DEFER;
671 671
672 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 672 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
673 if (ret) { 673 if (ret) {
674 dev_err(&pdev->dev, "Failed to set the DMA mask\n"); 674 dev_err(&pdev->dev, "Failed to set the DMA mask\n");
675 return ret; 675 return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index f33e349c4ec5..952201c6d821 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -59,6 +59,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400);
59static struct drm_driver qxl_driver; 59static struct drm_driver qxl_driver;
60static struct pci_driver qxl_pci_driver; 60static struct pci_driver qxl_pci_driver;
61 61
62static bool is_vga(struct pci_dev *pdev)
63{
64 return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
65}
66
62static int 67static int
63qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 68qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64{ 69{
@@ -83,9 +88,17 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
83 if (ret) 88 if (ret)
84 goto disable_pci; 89 goto disable_pci;
85 90
91 if (is_vga(pdev)) {
92 ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
93 if (ret) {
94 DRM_ERROR("can't get legacy vga ioports\n");
95 goto disable_pci;
96 }
97 }
98
86 ret = qxl_device_init(qdev, &qxl_driver, pdev); 99 ret = qxl_device_init(qdev, &qxl_driver, pdev);
87 if (ret) 100 if (ret)
88 goto disable_pci; 101 goto put_vga;
89 102
90 ret = qxl_modeset_init(qdev); 103 ret = qxl_modeset_init(qdev);
91 if (ret) 104 if (ret)
@@ -105,6 +118,9 @@ modeset_cleanup:
105 qxl_modeset_fini(qdev); 118 qxl_modeset_fini(qdev);
106unload: 119unload:
107 qxl_device_fini(qdev); 120 qxl_device_fini(qdev);
121put_vga:
122 if (is_vga(pdev))
123 vga_put(pdev, VGA_RSRC_LEGACY_IO);
108disable_pci: 124disable_pci:
109 pci_disable_device(pdev); 125 pci_disable_device(pdev);
110free_dev: 126free_dev:
@@ -122,6 +138,8 @@ qxl_pci_remove(struct pci_dev *pdev)
122 138
123 qxl_modeset_fini(qdev); 139 qxl_modeset_fini(qdev);
124 qxl_device_fini(qdev); 140 qxl_device_fini(qdev);
141 if (is_vga(pdev))
142 vga_put(pdev, VGA_RSRC_LEGACY_IO);
125 143
126 dev->dev_private = NULL; 144 dev->dev_private = NULL;
127 kfree(qdev); 145 kfree(qdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 1c62578590f4..082d02c84024 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
673 673
674 /* Locate the companion LVDS encoder for dual-link operation, if any. */ 674 /* Locate the companion LVDS encoder for dual-link operation, if any. */
675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0); 675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
676 if (!companion) { 676 if (!companion)
677 dev_err(dev, "Companion LVDS encoder not found\n"); 677 return 0;
678 return -ENXIO;
679 }
680 678
681 /* 679 /*
682 * Sanity check: the companion encoder must have the same compatible 680 * Sanity check: the companion encoder must have the same compatible
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index b45824ec7c8f..6d61a0eb5d64 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -9,6 +9,13 @@
9 9
10#define cmdline_test(test) selftest(test, test) 10#define cmdline_test(test) selftest(test, test)
11 11
12cmdline_test(drm_cmdline_test_force_d_only)
13cmdline_test(drm_cmdline_test_force_D_only_dvi)
14cmdline_test(drm_cmdline_test_force_D_only_hdmi)
15cmdline_test(drm_cmdline_test_force_D_only_not_digital)
16cmdline_test(drm_cmdline_test_force_e_only)
17cmdline_test(drm_cmdline_test_margin_only)
18cmdline_test(drm_cmdline_test_interlace_only)
12cmdline_test(drm_cmdline_test_res) 19cmdline_test(drm_cmdline_test_res)
13cmdline_test(drm_cmdline_test_res_missing_x) 20cmdline_test(drm_cmdline_test_res_missing_x)
14cmdline_test(drm_cmdline_test_res_missing_y) 21cmdline_test(drm_cmdline_test_res_missing_y)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 14c96edb13df..013de9d27c35 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -17,6 +17,136 @@
17 17
18static const struct drm_connector no_connector = {}; 18static const struct drm_connector no_connector = {};
19 19
20static int drm_cmdline_test_force_e_only(void *ignored)
21{
22 struct drm_cmdline_mode mode = { };
23
24 FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
25 &no_connector,
26 &mode));
27 FAIL_ON(mode.specified);
28 FAIL_ON(mode.refresh_specified);
29 FAIL_ON(mode.bpp_specified);
30
31 FAIL_ON(mode.rb);
32 FAIL_ON(mode.cvt);
33 FAIL_ON(mode.interlace);
34 FAIL_ON(mode.margins);
35 FAIL_ON(mode.force != DRM_FORCE_ON);
36
37 return 0;
38}
39
40static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
41{
42 struct drm_cmdline_mode mode = { };
43
44 FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
45 &no_connector,
46 &mode));
47 FAIL_ON(mode.specified);
48 FAIL_ON(mode.refresh_specified);
49 FAIL_ON(mode.bpp_specified);
50
51 FAIL_ON(mode.rb);
52 FAIL_ON(mode.cvt);
53 FAIL_ON(mode.interlace);
54 FAIL_ON(mode.margins);
55 FAIL_ON(mode.force != DRM_FORCE_ON);
56
57 return 0;
58}
59
60static const struct drm_connector connector_hdmi = {
61 .connector_type = DRM_MODE_CONNECTOR_HDMIB,
62};
63
64static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
65{
66 struct drm_cmdline_mode mode = { };
67
68 FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
69 &connector_hdmi,
70 &mode));
71 FAIL_ON(mode.specified);
72 FAIL_ON(mode.refresh_specified);
73 FAIL_ON(mode.bpp_specified);
74
75 FAIL_ON(mode.rb);
76 FAIL_ON(mode.cvt);
77 FAIL_ON(mode.interlace);
78 FAIL_ON(mode.margins);
79 FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
80
81 return 0;
82}
83
84static const struct drm_connector connector_dvi = {
85 .connector_type = DRM_MODE_CONNECTOR_DVII,
86};
87
88static int drm_cmdline_test_force_D_only_dvi(void *ignored)
89{
90 struct drm_cmdline_mode mode = { };
91
92 FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
93 &connector_dvi,
94 &mode));
95 FAIL_ON(mode.specified);
96 FAIL_ON(mode.refresh_specified);
97 FAIL_ON(mode.bpp_specified);
98
99 FAIL_ON(mode.rb);
100 FAIL_ON(mode.cvt);
101 FAIL_ON(mode.interlace);
102 FAIL_ON(mode.margins);
103 FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
104
105 return 0;
106}
107
108static int drm_cmdline_test_force_d_only(void *ignored)
109{
110 struct drm_cmdline_mode mode = { };
111
112 FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
113 &no_connector,
114 &mode));
115 FAIL_ON(mode.specified);
116 FAIL_ON(mode.refresh_specified);
117 FAIL_ON(mode.bpp_specified);
118
119 FAIL_ON(mode.rb);
120 FAIL_ON(mode.cvt);
121 FAIL_ON(mode.interlace);
122 FAIL_ON(mode.margins);
123 FAIL_ON(mode.force != DRM_FORCE_OFF);
124
125 return 0;
126}
127
128static int drm_cmdline_test_margin_only(void *ignored)
129{
130 struct drm_cmdline_mode mode = { };
131
132 FAIL_ON(drm_mode_parse_command_line_for_connector("m",
133 &no_connector,
134 &mode));
135
136 return 0;
137}
138
139static int drm_cmdline_test_interlace_only(void *ignored)
140{
141 struct drm_cmdline_mode mode = { };
142
143 FAIL_ON(drm_mode_parse_command_line_for_connector("i",
144 &no_connector,
145 &mode));
146
147 return 0;
148}
149
20static int drm_cmdline_test_res(void *ignored) 150static int drm_cmdline_test_res(void *ignored)
21{ 151{
22 struct drm_cmdline_mode mode = { }; 152 struct drm_cmdline_mode mode = { };
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 64c43ee6bd92..df0cc8f46d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
314 /* R and B components are only 5 bits deep */ 314 /* R and B components are only 5 bits deep */
315 val |= SUN4I_TCON0_FRM_CTL_MODE_R; 315 val |= SUN4I_TCON0_FRM_CTL_MODE_R;
316 val |= SUN4I_TCON0_FRM_CTL_MODE_B; 316 val |= SUN4I_TCON0_FRM_CTL_MODE_B;
317 /* Fall through */
317 case MEDIA_BUS_FMT_RGB666_1X18: 318 case MEDIA_BUS_FMT_RGB666_1X18:
318 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: 319 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
319 /* Fall through: enable dithering */ 320 /* Fall through: enable dithering */
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a1fc8b520985..b889ad3e86e1 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
993 ret = sun6i_dsi_dcs_read(dsi, msg); 993 ret = sun6i_dsi_dcs_read(dsi, msg);
994 break; 994 break;
995 } 995 }
996 /* Else, fall through */
996 997
997 default: 998 default:
998 ret = -EINVAL; 999 ret = -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index b2da31310d24..09b526518f5a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
204 .interruptible = false, 204 .interruptible = false,
205 .no_wait_gpu = false 205 .no_wait_gpu = false
206 }; 206 };
207 size_t max_segment;
207 208
208 /* wtf swapping */ 209 /* wtf swapping */
209 if (bo->pages) 210 if (bo->pages)
@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
215 if (!bo->pages) 216 if (!bo->pages)
216 goto out; 217 goto out;
217 218
218 ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, 219 max_segment = virtio_max_dma_size(qdev->vdev);
219 nr_pages << PAGE_SHIFT, GFP_KERNEL); 220 max_segment &= PAGE_MASK;
221 if (max_segment > SCATTERLIST_MAX_SEGMENT)
222 max_segment = SCATTERLIST_MAX_SEGMENT;
223 ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
224 nr_pages << PAGE_SHIFT,
225 max_segment, GFP_KERNEL);
220 if (ret) 226 if (ret)
221 goto out; 227 goto out;
222 return 0; 228 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 59e9d05ab928..0af048d1a815 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
353 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); 353 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
354 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { 354 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
355 kfree(reply); 355 kfree(reply);
356 356 reply = NULL;
357 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { 357 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
358 /* A checkpoint occurred. Retry. */ 358 /* A checkpoint occurred. Retry. */
359 continue; 359 continue;
@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
377 377
378 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { 378 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
379 kfree(reply); 379 kfree(reply);
380 380 reply = NULL;
381 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { 381 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
382 /* A checkpoint occurred. Retry. */ 382 /* A checkpoint occurred. Retry. */
383 continue; 383 continue;
@@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
389 break; 389 break;
390 } 390 }
391 391
392 if (retries == RETRIES) { 392 if (!reply)
393 kfree(reply);
394 return -EINVAL; 393 return -EINVAL;
395 }
396 394
397 *msg_len = reply_len; 395 *msg_len = reply_len;
398 *msg = reply; 396 *msg = reply;
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 2310c96ccf4a..db1b55df0d13 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
1153 1153
1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); 1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
1155 1155
1156 cp2112_gpio_direction_input(gc, d->hwirq);
1157
1158 if (!dev->gpio_poll) { 1156 if (!dev->gpio_poll) {
1159 dev->gpio_poll = true; 1157 dev->gpio_poll = true;
1160 schedule_delayed_work(&dev->gpio_poll_worker, 0); 1158 schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
1204 return PTR_ERR(dev->desc[pin]); 1202 return PTR_ERR(dev->desc[pin]);
1205 } 1203 }
1206 1204
1205 ret = cp2112_gpio_direction_input(&dev->gc, pin);
1206 if (ret < 0) {
1207 dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
1208 goto err_desc;
1209 }
1210
1207 ret = gpiochip_lock_as_irq(&dev->gc, pin); 1211 ret = gpiochip_lock_as_irq(&dev->gc, pin);
1208 if (ret) { 1212 if (ret) {
1209 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); 1213 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 21268c9fa71a..0179f7ed77e5 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3749,30 +3749,8 @@ static const struct hid_device_id hidpp_devices[] = {
3749 3749
3750 { L27MHZ_DEVICE(HID_ANY_ID) }, 3750 { L27MHZ_DEVICE(HID_ANY_ID) },
3751 3751
3752 { /* Logitech G203/Prodigy Gaming Mouse */
3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
3754 { /* Logitech G302 Gaming Mouse */
3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
3756 { /* Logitech G303 Gaming Mouse */
3757 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
3758 { /* Logitech G400 Gaming Mouse */
3759 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
3760 { /* Logitech G403 Wireless Gaming Mouse over USB */ 3752 { /* Logitech G403 Wireless Gaming Mouse over USB */
3761 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, 3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
3762 { /* Logitech G403 Gaming Mouse */
3763 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
3764 { /* Logitech G403 Hero Gaming Mouse over USB */
3765 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
3766 { /* Logitech G502 Proteus Core Gaming Mouse */
3767 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
3768 { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
3769 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
3770 { /* Logitech G502 Hero Gaming Mouse over USB */
3771 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
3772 { /* Logitech G700 Gaming Mouse over USB */
3773 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
3774 { /* Logitech G700s Gaming Mouse over USB */
3775 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
3776 { /* Logitech G703 Gaming Mouse over USB */ 3754 { /* Logitech G703 Gaming Mouse over USB */
3777 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, 3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
3778 { /* Logitech G703 Hero Gaming Mouse over USB */ 3756 { /* Logitech G703 Hero Gaming Mouse over USB */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 1065692f90e2..5792a104000a 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -24,6 +24,7 @@
24#define ICL_MOBILE_DEVICE_ID 0x34FC 24#define ICL_MOBILE_DEVICE_ID 0x34FC
25#define SPT_H_DEVICE_ID 0xA135 25#define SPT_H_DEVICE_ID 0xA135
26#define CML_LP_DEVICE_ID 0x02FC 26#define CML_LP_DEVICE_ID 0x02FC
27#define EHL_Ax_DEVICE_ID 0x4BB3
27 28
28#define REVISION_ID_CHT_A0 0x6 29#define REVISION_ID_CHT_A0 0x6
29#define REVISION_ID_CHT_Ax_SI 0x0 30#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index aa80b4d3b740..279567baca3d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, 33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, 35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
36 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
36 {0, } 37 {0, }
37}; 38};
38MODULE_DEVICE_TABLE(pci, ish_pci_tbl); 39MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 7a8ddc999a8e..1713235d28cb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
846 y >>= 1; 846 y >>= 1;
847 distance >>= 1; 847 distance >>= 1;
848 } 848 }
849 if (features->type == INTUOSHT2)
850 distance = features->distance_max - distance;
849 input_report_abs(input, ABS_X, x); 851 input_report_abs(input, ABS_X, x);
850 input_report_abs(input, ABS_Y, y); 852 input_report_abs(input, ABS_Y, y);
851 input_report_abs(input, ABS_DISTANCE, distance); 853 input_report_abs(input, ABS_DISTANCE, distance);
@@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1059 input_report_key(input, BTN_BASE2, (data[11] & 0x02)); 1061 input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1060 1062
1061 if (data[12] & 0x80) 1063 if (data[12] & 0x80)
1062 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); 1064 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1063 else 1065 else
1064 input_report_abs(input, ABS_WHEEL, 0); 1066 input_report_abs(input, ABS_WHEEL, 0);
1065 1067
@@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1290 } 1292 }
1291 if (wacom->tool[0]) { 1293 if (wacom->tool[0]) {
1292 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); 1294 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
1293 if (wacom->features.type == INTUOSP2_BT) { 1295 if (wacom->features.type == INTUOSP2_BT ||
1296 wacom->features.type == INTUOSP2S_BT) {
1294 input_report_abs(pen_input, ABS_DISTANCE, 1297 input_report_abs(pen_input, ABS_DISTANCE,
1295 range ? frame[13] : wacom->features.distance_max); 1298 range ? frame[13] : wacom->features.distance_max);
1296 } else { 1299 } else {
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 5f9505a087f6..23f358cb7f49 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -26,7 +26,7 @@
26 26
27static unsigned long virt_to_hvpfn(void *addr) 27static unsigned long virt_to_hvpfn(void *addr)
28{ 28{
29 unsigned long paddr; 29 phys_addr_t paddr;
30 30
31 if (is_vmalloc_addr(addr)) 31 if (is_vmalloc_addr(addr))
32 paddr = page_to_phys(vmalloc_to_page(addr)) + 32 paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 362e70e9d145..fb16a622e8ab 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -146,8 +146,6 @@ struct hv_context {
146 */ 146 */
147 u64 guestid; 147 u64 guestid;
148 148
149 void *tsc_page;
150
151 struct hv_per_cpu_context __percpu *cpu_context; 149 struct hv_per_cpu_context __percpu *cpu_context;
152 150
153 /* 151 /*
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c0378c3de9a4..91dfeba62485 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -165,6 +165,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
165 .driver_data = (kernel_ulong_t)0, 165 .driver_data = (kernel_ulong_t)0,
166 }, 166 },
167 { 167 {
168 /* Lewisburg PCH */
169 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa226),
170 .driver_data = (kernel_ulong_t)0,
171 },
172 {
168 /* Gemini Lake */ 173 /* Gemini Lake */
169 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), 174 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
170 .driver_data = (kernel_ulong_t)&intel_th_2x, 175 .driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -199,6 +204,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
199 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5), 204 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
200 .driver_data = (kernel_ulong_t)&intel_th_2x, 205 .driver_data = (kernel_ulong_t)&intel_th_2x,
201 }, 206 },
207 {
208 /* Tiger Lake PCH */
209 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa0a6),
210 .driver_data = (kernel_ulong_t)&intel_th_2x,
211 },
202 { 0 }, 212 { 0 },
203}; 213};
204 214
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index e55b902560de..181e7ff1ec4f 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1276,7 +1276,6 @@ int stm_source_register_device(struct device *parent,
1276 1276
1277err: 1277err:
1278 put_device(&src->dev); 1278 put_device(&src->dev);
1279 kfree(src);
1280 1279
1281 return err; 1280 return err;
1282} 1281}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index d7fd76baec92..19ef2b0c682a 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -790,7 +790,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
790 790
791static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) 791static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
792{ 792{
793 u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 793 u32 val;
794
795 /* We do not support the SMBUS Quick command */
796 val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
794 797
795 if (adap->algo->reg_slave) 798 if (adap->algo->reg_slave)
796 val |= I2C_FUNC_SLAVE; 799 val |= I2C_FUNC_SLAVE;
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index e7f9305b2dd9..f5f001738df5 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
94 94
95 dev->disable_int(dev); 95 dev->disable_int(dev);
96 dev->disable(dev); 96 dev->disable(dev);
97 synchronize_irq(dev->irq);
97 dev->slave = NULL; 98 dev->slave = NULL;
98 pm_runtime_put(dev->dev); 99 pm_runtime_put(dev->dev);
99 100
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f2956936c3f2..2e08b4722dc4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1194,19 +1194,28 @@ static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
1194 int i; 1194 int i;
1195 1195
1196 status = acpi_get_object_info(obj_handle, &info); 1196 status = acpi_get_object_info(obj_handle, &info);
1197 if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID)) 1197 if (ACPI_FAILURE(status))
1198 return AE_OK; 1198 return AE_OK;
1199 1199
1200 if (!(info->valid & ACPI_VALID_HID))
1201 goto smo88xx_not_found;
1202
1200 hid = info->hardware_id.string; 1203 hid = info->hardware_id.string;
1201 if (!hid) 1204 if (!hid)
1202 return AE_OK; 1205 goto smo88xx_not_found;
1203 1206
1204 i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid); 1207 i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
1205 if (i < 0) 1208 if (i < 0)
1206 return AE_OK; 1209 goto smo88xx_not_found;
1210
1211 kfree(info);
1207 1212
1208 *((bool *)return_value) = true; 1213 *((bool *)return_value) = true;
1209 return AE_CTRL_TERMINATE; 1214 return AE_CTRL_TERMINATE;
1215
1216smo88xx_not_found:
1217 kfree(info);
1218 return AE_OK;
1210} 1219}
1211 1220
1212static bool is_dell_system_with_lis3lv02d(void) 1221static bool is_dell_system_with_lis3lv02d(void)
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 252edb433fdf..29eae1bf4f86 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
234 .max_num_msgs = 255, 234 .max_num_msgs = 255,
235}; 235};
236 236
237static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
238 .flags = I2C_AQ_NO_ZERO_LEN,
239};
240
237static const struct mtk_i2c_compatible mt2712_compat = { 241static const struct mtk_i2c_compatible mt2712_compat = {
238 .regs = mt_i2c_regs_v1, 242 .regs = mt_i2c_regs_v1,
239 .pmic_i2c = 0, 243 .pmic_i2c = 0,
@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
298}; 302};
299 303
300static const struct mtk_i2c_compatible mt8183_compat = { 304static const struct mtk_i2c_compatible mt8183_compat = {
305 .quirks = &mt8183_i2c_quirks,
301 .regs = mt_i2c_regs_v2, 306 .regs = mt_i2c_regs_v2,
302 .pmic_i2c = 0, 307 .pmic_i2c = 0,
303 .dcm = 0, 308 .dcm = 0,
@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
870 875
871static u32 mtk_i2c_functionality(struct i2c_adapter *adap) 876static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
872{ 877{
873 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 878 if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
879 return I2C_FUNC_I2C |
880 (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
881 else
882 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
874} 883}
875 884
876static const struct i2c_algorithm mtk_i2c_algorithm = { 885static const struct i2c_algorithm mtk_i2c_algorithm = {
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c46c4bddc7ca..cba325eb852f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -91,7 +91,7 @@
91#define SB800_PIIX4_PORT_IDX_MASK 0x06 91#define SB800_PIIX4_PORT_IDX_MASK 0x06
92#define SB800_PIIX4_PORT_IDX_SHIFT 1 92#define SB800_PIIX4_PORT_IDX_SHIFT 1
93 93
94/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ 94/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
95#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 95#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
96#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 96#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
97#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 97#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
@@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
358 /* Find which register is used for port selection */ 358 /* Find which register is used for port selection */
359 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD || 359 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
360 PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) { 360 PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
361 switch (PIIX4_dev->device) { 361 if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
362 case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: 362 (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
363 PIIX4_dev->revision >= 0x1F)) {
363 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; 364 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
364 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; 365 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
365 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; 366 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
366 break; 367 } else {
367 case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
368 default:
369 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; 368 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
370 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; 369 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
371 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; 370 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
372 break;
373 } 371 }
374 } else { 372 } else {
375 if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, 373 if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f26ed495d384..9c440fa6a3dd 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -832,7 +832,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
832 */ 832 */
833void i2c_unregister_device(struct i2c_client *client) 833void i2c_unregister_device(struct i2c_client *client)
834{ 834{
835 if (!client) 835 if (IS_ERR_OR_NULL(client))
836 return; 836 return;
837 837
838 if (client->dev.of_node) { 838 if (client->dev.of_node) {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 19f1730a4f24..a68d0ccf67a4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
4724 if (ret) 4724 if (ret)
4725 goto err; 4725 goto err;
4726 4726
4727 cma_configfs_init(); 4727 ret = cma_configfs_init();
4728 if (ret)
4729 goto err_ib;
4728 4730
4729 return 0; 4731 return 0;
4730 4732
4733err_ib:
4734 ib_unregister_client(&cma_client);
4731err: 4735err:
4732 unregister_netdevice_notifier(&cma_nb); 4736 unregister_netdevice_notifier(&cma_nb);
4733 ib_sa_unregister_client(&sa_client); 4737 ib_sa_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index b79890739a2c..af8c85d18e62 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -149,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
149 struct auto_mode_param *param = &counter->mode.param; 149 struct auto_mode_param *param = &counter->mode.param;
150 bool match = true; 150 bool match = true;
151 151
152 if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) 152 if (!rdma_is_visible_in_pid_ns(&qp->res))
153 return false; 153 return false;
154 154
155 /* Ensure that counter belong to right PID */ 155 /* Ensure that counter belongs to the right PID */
156 if (!rdma_is_kernel_res(&counter->res) && 156 if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
157 !rdma_is_kernel_res(&qp->res) &&
158 (task_pid_vnr(counter->res.task) != current->pid))
159 return false; 157 return false;
160 158
161 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) 159 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -424,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
424 return qp; 422 return qp;
425 423
426err: 424err:
427 rdma_restrack_put(&qp->res); 425 rdma_restrack_put(res);
428 return NULL; 426 return NULL;
429} 427}
430 428
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 87d40d1ecdde..020c26976558 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
383 if (!names[i]) 383 if (!names[i])
384 continue; 384 continue;
385 curr = rdma_restrack_count(device, i, 385 curr = rdma_restrack_count(device, i);
386 task_active_pid_ns(current));
387 ret = fill_res_info_entry(msg, names[i], curr); 386 ret = fill_res_info_entry(msg, names[i], curr);
388 if (ret) 387 if (ret)
389 goto err; 388 goto err;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index bddff426ee0f..a07665f7ef8c 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
107 * rdma_restrack_count() - the current usage of specific object 107 * rdma_restrack_count() - the current usage of specific object
108 * @dev: IB device 108 * @dev: IB device
109 * @type: actual type of object to operate 109 * @type: actual type of object to operate
110 * @ns: PID namespace
111 */ 110 */
112int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, 111int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
113 struct pid_namespace *ns)
114{ 112{
115 struct rdma_restrack_root *rt = &dev->res[type]; 113 struct rdma_restrack_root *rt = &dev->res[type];
116 struct rdma_restrack_entry *e; 114 struct rdma_restrack_entry *e;
@@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
119 117
120 xa_lock(&rt->xa); 118 xa_lock(&rt->xa);
121 xas_for_each(&xas, e, U32_MAX) { 119 xas_for_each(&xas, e, U32_MAX) {
122 if (ns == &init_pid_ns || 120 if (!rdma_is_visible_in_pid_ns(e))
123 (!rdma_is_kernel_res(e) && 121 continue;
124 ns == task_active_pid_ns(e->task))) 122 cnt++;
125 cnt++;
126 } 123 }
127 xa_unlock(&rt->xa); 124 xa_unlock(&rt->xa);
128 return cnt; 125 return cnt;
@@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
360 */ 357 */
361 if (rdma_is_kernel_res(res)) 358 if (rdma_is_kernel_res(res))
362 return task_active_pid_ns(current) == &init_pid_ns; 359 return task_active_pid_ns(current) == &init_pid_ns;
363 return task_active_pid_ns(current) == task_active_pid_ns(res->task); 360
361 /* PID 0 means that resource is not found in current namespace */
362 return task_pid_vnr(res->task);
364} 363}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 08da840ed7ee..56553668256f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
379 379
380int ib_umem_page_count(struct ib_umem *umem) 380int ib_umem_page_count(struct ib_umem *umem)
381{ 381{
382 int i; 382 int i, n = 0;
383 int n;
384 struct scatterlist *sg; 383 struct scatterlist *sg;
385 384
386 if (umem->is_odp)
387 return ib_umem_num_pages(umem);
388
389 n = 0;
390 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 385 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
391 n += sg_dma_len(sg) >> PAGE_SHIFT; 386 n += sg_dma_len(sg) >> PAGE_SHIFT;
392 387
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 48b04d2f175f..60c8f76aab33 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
136 spin_unlock_irqrestore(&cmdq->lock, flags); 136 spin_unlock_irqrestore(&cmdq->lock, flags);
137 return -EBUSY; 137 return -EBUSY;
138 } 138 }
139
140 size = req->cmd_size;
141 /* change the cmd_size to the number of 16byte cmdq unit.
142 * req->cmd_size is modified here
143 */
144 bnxt_qplib_set_cmd_slots(req);
145
139 memset(resp, 0, sizeof(*resp)); 146 memset(resp, 0, sizeof(*resp));
140 crsqe->resp = (struct creq_qp_event *)resp; 147 crsqe->resp = (struct creq_qp_event *)resp;
141 crsqe->resp->cookie = req->cookie; 148 crsqe->resp->cookie = req->cookie;
@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
150 157
151 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 158 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
152 preq = (u8 *)req; 159 preq = (u8 *)req;
153 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
154 do { 160 do {
155 /* Locate the next cmdq slot */ 161 /* Locate the next cmdq slot */
156 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 162 sw_prod = HWQ_CMP(cmdq->prod, cmdq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2138533bb642..dfeadc192e17 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -55,9 +55,7 @@
55 do { \ 55 do { \
56 memset(&(req), 0, sizeof((req))); \ 56 memset(&(req), 0, sizeof((req))); \
57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ 57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \
58 (req).cmd_size = (sizeof((req)) + \ 58 (req).cmd_size = sizeof((req)); \
59 BNXT_QPLIB_CMDQE_UNITS - 1) / \
60 BNXT_QPLIB_CMDQE_UNITS; \
61 (req).flags = cpu_to_le16(cmd_flags); \ 59 (req).flags = cpu_to_le16(cmd_flags); \
62 } while (0) 60 } while (0)
63 61
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
95 BNXT_QPLIB_CMDQE_UNITS); 93 BNXT_QPLIB_CMDQE_UNITS);
96} 94}
97 95
96/* Set the cmd_size to a factor of CMDQE unit */
97static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
98{
99 req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
100 BNXT_QPLIB_CMDQE_UNITS;
101}
102
98#define MAX_CMDQ_IDX(depth) ((depth) - 1) 103#define MAX_CMDQ_IDX(depth) ((depth) - 1)
99 104
100static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth) 105static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 93613e5def9b..986c12153e62 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
141 if (!data) 141 if (!data)
142 return -ENOMEM; 142 return -ENOMEM;
143 copy = min(len, datalen - 1); 143 copy = min(len, datalen - 1);
144 if (copy_from_user(data, buf, copy)) 144 if (copy_from_user(data, buf, copy)) {
145 return -EFAULT; 145 ret = -EFAULT;
146 goto free_data;
147 }
146 148
147 ret = debugfs_file_get(file->f_path.dentry); 149 ret = debugfs_file_get(file->f_path.dentry);
148 if (unlikely(ret)) 150 if (unlikely(ret))
149 return ret; 151 goto free_data;
150 ptr = data; 152 ptr = data;
151 token = ptr; 153 token = ptr;
152 for (ptr = data; *ptr; ptr = end + 1, token = ptr) { 154 for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
195 ret = len; 197 ret = len;
196 198
197 debugfs_file_put(file->f_path.dentry); 199 debugfs_file_put(file->f_path.dentry);
200free_data:
198 kfree(data); 201 kfree(data);
199 return ret; 202 return ret;
200} 203}
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
214 return -ENOMEM; 217 return -ENOMEM;
215 ret = debugfs_file_get(file->f_path.dentry); 218 ret = debugfs_file_get(file->f_path.dentry);
216 if (unlikely(ret)) 219 if (unlikely(ret))
217 return ret; 220 goto free_data;
218 bit = find_first_bit(fault->opcodes, bitsize); 221 bit = find_first_bit(fault->opcodes, bitsize);
219 while (bit < bitsize) { 222 while (bit < bitsize) {
220 zero = find_next_zero_bit(fault->opcodes, bitsize, bit); 223 zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
232 data[size - 1] = '\n'; 235 data[size - 1] = '\n';
233 data[size] = '\0'; 236 data[size] = '\0';
234 ret = simple_read_from_buffer(buf, len, pos, data, size); 237 ret = simple_read_from_buffer(buf, len, pos, data, size);
238free_data:
235 kfree(data); 239 kfree(data);
236 return ret; 240 return ret;
237} 241}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 996fc298207e..6141f4edc6bf 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2574 hfi1_kern_clear_hw_flow(priv->rcd, qp); 2574 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2575} 2575}
2576 2576
2577static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, 2577static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
2578 struct hfi1_packet *packet, u8 rcv_type,
2579 u8 opcode)
2580{ 2578{
2581 struct rvt_qp *qp = packet->qp; 2579 struct rvt_qp *qp = packet->qp;
2582 struct hfi1_qp_priv *qpriv = qp->priv;
2583 u32 ipsn;
2584 struct ib_other_headers *ohdr = packet->ohdr;
2585 struct rvt_ack_entry *e;
2586 struct tid_rdma_request *req;
2587 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2588 u32 i;
2589 2580
2590 if (rcv_type >= RHF_RCV_TYPE_IB) 2581 if (rcv_type >= RHF_RCV_TYPE_IB)
2591 goto done; 2582 goto done;
@@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
2602 if (rcv_type == RHF_RCV_TYPE_EAGER) { 2593 if (rcv_type == RHF_RCV_TYPE_EAGER) {
2603 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); 2594 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2604 hfi1_schedule_send(qp); 2595 hfi1_schedule_send(qp);
2605 goto done_unlock;
2606 }
2607
2608 /*
2609 * For TID READ response, error out QP after freeing the tid
2610 * resources.
2611 */
2612 if (opcode == TID_OP(READ_RESP)) {
2613 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2614 if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
2615 cmp_psn(ipsn, qp->s_psn) < 0) {
2616 hfi1_kern_read_tid_flow_free(qp);
2617 spin_unlock(&qp->s_lock);
2618 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2619 goto done;
2620 }
2621 goto done_unlock;
2622 }
2623
2624 /*
2625 * Error out the qp for TID RDMA WRITE
2626 */
2627 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
2628 for (i = 0; i < rvt_max_atomic(rdi); i++) {
2629 e = &qp->s_ack_queue[i];
2630 if (e->opcode == TID_OP(WRITE_REQ)) {
2631 req = ack_to_tid_req(e);
2632 hfi1_kern_exp_rcv_clear_all(req);
2633 }
2634 } 2596 }
2635 spin_unlock(&qp->s_lock);
2636 rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
2637 goto done;
2638 2597
2639done_unlock: 2598 /* Since no payload is delivered, just drop the packet */
2640 spin_unlock(&qp->s_lock); 2599 spin_unlock(&qp->s_lock);
2641done: 2600done:
2642 return true; 2601 return true;
@@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2687 u32 fpsn; 2646 u32 fpsn;
2688 2647
2689 lockdep_assert_held(&qp->r_lock); 2648 lockdep_assert_held(&qp->r_lock);
2649 spin_lock(&qp->s_lock);
2690 /* If the psn is out of valid range, drop the packet */ 2650 /* If the psn is out of valid range, drop the packet */
2691 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || 2651 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2692 cmp_psn(ibpsn, qp->s_psn) > 0) 2652 cmp_psn(ibpsn, qp->s_psn) > 0)
2693 return ret; 2653 goto s_unlock;
2694 2654
2695 spin_lock(&qp->s_lock);
2696 /* 2655 /*
2697 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 2656 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2698 * requests and implicitly NAK RDMA read and atomic requests issued 2657 * requests and implicitly NAK RDMA read and atomic requests issued
@@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2740 2699
2741 wqe = do_rc_completion(qp, wqe, ibp); 2700 wqe = do_rc_completion(qp, wqe, ibp);
2742 if (qp->s_acked == qp->s_tail) 2701 if (qp->s_acked == qp->s_tail)
2743 break; 2702 goto s_unlock;
2744 } 2703 }
2745 2704
2705 if (qp->s_acked == qp->s_tail)
2706 goto s_unlock;
2707
2746 /* Handle the eflags for the request */ 2708 /* Handle the eflags for the request */
2747 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) 2709 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2748 goto s_unlock; 2710 goto s_unlock;
@@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2922 if (lnh == HFI1_LRH_GRH) 2884 if (lnh == HFI1_LRH_GRH)
2923 goto r_unlock; 2885 goto r_unlock;
2924 2886
2925 if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode)) 2887 if (tid_rdma_tid_err(packet, rcv_type))
2926 goto r_unlock; 2888 goto r_unlock;
2927 } 2889 }
2928 2890
@@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2942 */ 2904 */
2943 spin_lock(&qp->s_lock); 2905 spin_lock(&qp->s_lock);
2944 qpriv = qp->priv; 2906 qpriv = qp->priv;
2907 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
2908 qpriv->r_tid_tail == qpriv->r_tid_head)
2909 goto unlock;
2945 e = &qp->s_ack_queue[qpriv->r_tid_tail]; 2910 e = &qp->s_ack_queue[qpriv->r_tid_tail];
2911 if (e->opcode != TID_OP(WRITE_REQ))
2912 goto unlock;
2946 req = ack_to_tid_req(e); 2913 req = ack_to_tid_req(e);
2914 if (req->comp_seg == req->cur_seg)
2915 goto unlock;
2947 flow = &req->flows[req->clear_tail]; 2916 flow = &req->flows[req->clear_tail];
2948 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); 2917 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2949 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); 2918 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
@@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4509 struct rvt_swqe *wqe; 4478 struct rvt_swqe *wqe;
4510 struct tid_rdma_request *req; 4479 struct tid_rdma_request *req;
4511 struct tid_rdma_flow *flow; 4480 struct tid_rdma_flow *flow;
4512 u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn; 4481 u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
4513 unsigned long flags; 4482 unsigned long flags;
4514 u16 fidx; 4483 u16 fidx;
4515 4484
@@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4538 ack_kpsn--; 4507 ack_kpsn--;
4539 } 4508 }
4540 4509
4510 if (unlikely(qp->s_acked == qp->s_tail))
4511 goto ack_op_err;
4512
4541 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 4513 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4542 4514
4543 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 4515 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
@@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4550 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); 4522 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4551 4523
4552 /* Drop stale ACK/NAK */ 4524 /* Drop stale ACK/NAK */
4553 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0) 4525 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
4526 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
4554 goto ack_op_err; 4527 goto ack_op_err;
4555 4528
4556 while (cmp_psn(ack_kpsn, 4529 while (cmp_psn(ack_kpsn,
@@ -4712,7 +4685,12 @@ done:
4712 switch ((aeth >> IB_AETH_CREDIT_SHIFT) & 4685 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
4713 IB_AETH_CREDIT_MASK) { 4686 IB_AETH_CREDIT_MASK) {
4714 case 0: /* PSN sequence error */ 4687 case 0: /* PSN sequence error */
4688 if (!req->flows)
4689 break;
4715 flow = &req->flows[req->acked_tail]; 4690 flow = &req->flows[req->acked_tail];
4691 flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
4692 if (cmp_psn(psn, flpsn) > 0)
4693 break;
4716 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, 4694 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4717 flow); 4695 flow);
4718 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); 4696 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 68c951491a08..57079110af9b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1677,8 +1677,6 @@ tx_err:
1677 tx_buf_size, DMA_TO_DEVICE); 1677 tx_buf_size, DMA_TO_DEVICE);
1678 kfree(tun_qp->tx_ring[i].buf.addr); 1678 kfree(tun_qp->tx_ring[i].buf.addr);
1679 } 1679 }
1680 kfree(tun_qp->tx_ring);
1681 tun_qp->tx_ring = NULL;
1682 i = MLX4_NUM_TUNNEL_BUFS; 1680 i = MLX4_NUM_TUNNEL_BUFS;
1683err: 1681err:
1684 while (i > 0) { 1682 while (i > 0) {
@@ -1687,6 +1685,8 @@ err:
1687 rx_buf_size, DMA_FROM_DEVICE); 1685 rx_buf_size, DMA_FROM_DEVICE);
1688 kfree(tun_qp->ring[i].addr); 1686 kfree(tun_qp->ring[i].addr);
1689 } 1687 }
1688 kfree(tun_qp->tx_ring);
1689 tun_qp->tx_ring = NULL;
1690 kfree(tun_qp->ring); 1690 kfree(tun_qp->ring);
1691 tun_qp->ring = NULL; 1691 tun_qp->ring = NULL;
1692 return -ENOMEM; 1692 return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e12a4404096b..0569bcab02d4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1024 1024
1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1026 if (MLX5_CAP_GEN(mdev, pg)) 1026 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1028 props->odp_caps = dev->odp_caps; 1028 props->odp_caps = dev->odp_caps;
1029 } 1029 }
@@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN; 6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6140 } 6140 }
6141 6141
6142 mlx5_ib_internal_fill_odp_caps(dev);
6143
6142 err = mlx5_ib_init_multiport_master(dev); 6144 err = mlx5_ib_init_multiport_master(dev);
6143 if (err) 6145 if (err)
6144 return err; 6146 return err;
@@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6563 6565
6564static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6566static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6565{ 6567{
6566 mlx5_ib_internal_fill_odp_caps(dev);
6567
6568 return mlx5_ib_odp_init_one(dev); 6568 return mlx5_ib_odp_init_one(dev);
6569} 6569}
6570 6570
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index fe1a76d8531c..a40e0abf2338 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
57 int entry; 57 int entry;
58 58
59 if (umem->is_odp) { 59 if (umem->is_odp) {
60 unsigned int page_shift = to_ib_umem_odp(umem)->page_shift; 60 struct ib_umem_odp *odp = to_ib_umem_odp(umem);
61 unsigned int page_shift = odp->page_shift;
61 62
62 *ncont = ib_umem_page_count(umem); 63 *ncont = ib_umem_odp_num_pages(odp);
63 *count = *ncont << (page_shift - PAGE_SHIFT); 64 *count = *ncont << (page_shift - PAGE_SHIFT);
64 *shift = page_shift; 65 *shift = page_shift;
65 if (order) 66 if (order)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f6a53455bf8b..9ae587b74b12 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1475,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1475 bool dyn_bfreg); 1475 bool dyn_bfreg);
1476 1476
1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); 1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
1478
1479static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
1480 bool do_modify_atomic)
1481{
1482 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1483 return false;
1484
1485 if (do_modify_atomic &&
1486 MLX5_CAP_GEN(dev->mdev, atomic) &&
1487 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1488 return false;
1489
1490 return true;
1491}
1478#endif /* MLX5_IB_H */ 1492#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index b74fad08412f..3401f5f6792e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1293,9 +1293,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1293 if (err < 0) 1293 if (err < 0)
1294 return ERR_PTR(err); 1294 return ERR_PTR(err);
1295 1295
1296 use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && 1296 use_umr = mlx5_ib_can_use_umr(dev, true);
1297 (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
1298 !MLX5_CAP_GEN(dev->mdev, atomic));
1299 1297
1300 if (order <= mr_cache_max_order(dev) && use_umr) { 1298 if (order <= mr_cache_max_order(dev) && use_umr) {
1301 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, 1299 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1448,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1448 goto err; 1446 goto err;
1449 } 1447 }
1450 1448
1451 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { 1449 if (!mlx5_ib_can_use_umr(dev, true) ||
1450 (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
1452 /* 1451 /*
1453 * UMR can't be used - MKey needs to be replaced. 1452 * UMR can't be used - MKey needs to be replaced.
1454 */ 1453 */
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 1d257d1b3b0d..0a59912a4cef 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
301 301
302 memset(caps, 0, sizeof(*caps)); 302 memset(caps, 0, sizeof(*caps));
303 303
304 if (!MLX5_CAP_GEN(dev->mdev, pg)) 304 if (!MLX5_CAP_GEN(dev->mdev, pg) ||
305 !mlx5_ib_can_use_umr(dev, true))
305 return; 306 return;
306 307
307 caps->general_caps = IB_ODP_SUPPORT; 308 caps->general_caps = IB_ODP_SUPPORT;
@@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
355 356
356 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && 357 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
357 MLX5_CAP_GEN(dev->mdev, null_mkey) && 358 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
358 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 359 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
360 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
359 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; 361 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
360 362
361 return; 363 return;
@@ -1622,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1622{ 1624{
1623 int ret = 0; 1625 int ret = 0;
1624 1626
1625 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) 1627 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1626 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); 1628 return ret;
1629
1630 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1627 1631
1628 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { 1632 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1629 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); 1633 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
@@ -1633,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1633 } 1637 }
1634 } 1638 }
1635 1639
1636 if (!MLX5_CAP_GEN(dev->mdev, pg))
1637 return ret;
1638
1639 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq); 1640 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1640 1641
1641 return ret; 1642 return ret;
@@ -1643,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1643 1644
1644void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) 1645void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1645{ 1646{
1646 if (!MLX5_CAP_GEN(dev->mdev, pg)) 1647 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1647 return; 1648 return;
1648 1649
1649 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq); 1650 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 379328b2598f..72869ff4a334 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
4162 MLX5_IB_UMR_OCTOWORD; 4162 MLX5_IB_UMR_OCTOWORD;
4163} 4163}
4164 4164
4165static __be64 frwr_mkey_mask(void) 4165static __be64 frwr_mkey_mask(bool atomic)
4166{ 4166{
4167 u64 result; 4167 u64 result;
4168 4168
@@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
4175 MLX5_MKEY_MASK_LW | 4175 MLX5_MKEY_MASK_LW |
4176 MLX5_MKEY_MASK_RR | 4176 MLX5_MKEY_MASK_RR |
4177 MLX5_MKEY_MASK_RW | 4177 MLX5_MKEY_MASK_RW |
4178 MLX5_MKEY_MASK_A |
4179 MLX5_MKEY_MASK_SMALL_FENCE | 4178 MLX5_MKEY_MASK_SMALL_FENCE |
4180 MLX5_MKEY_MASK_FREE; 4179 MLX5_MKEY_MASK_FREE;
4181 4180
4181 if (atomic)
4182 result |= MLX5_MKEY_MASK_A;
4183
4182 return cpu_to_be64(result); 4184 return cpu_to_be64(result);
4183} 4185}
4184 4186
@@ -4204,7 +4206,7 @@ static __be64 sig_mkey_mask(void)
4204} 4206}
4205 4207
4206static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, 4208static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4207 struct mlx5_ib_mr *mr, u8 flags) 4209 struct mlx5_ib_mr *mr, u8 flags, bool atomic)
4208{ 4210{
4209 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4211 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4210 4212
@@ -4212,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4212 4214
4213 umr->flags = flags; 4215 umr->flags = flags;
4214 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 4216 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4215 umr->mkey_mask = frwr_mkey_mask(); 4217 umr->mkey_mask = frwr_mkey_mask(atomic);
4216} 4218}
4217 4219
4218static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) 4220static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
@@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4811{ 4813{
4812 struct mlx5_ib_mr *mr = to_mmr(wr->mr); 4814 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
4813 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); 4815 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4816 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
4814 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4817 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4815 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; 4818 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4819 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
4816 u8 flags = 0; 4820 u8 flags = 0;
4817 4821
4822 if (!mlx5_ib_can_use_umr(dev, atomic)) {
4823 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4824 "Fast update of %s for MR is disabled\n",
4825 (MLX5_CAP_GEN(dev->mdev,
4826 umr_modify_entity_size_disabled)) ?
4827 "entity size" :
4828 "atomic access");
4829 return -EINVAL;
4830 }
4831
4818 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { 4832 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
4819 mlx5_ib_warn(to_mdev(qp->ibqp.device), 4833 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4820 "Invalid IB_SEND_INLINE send flag\n"); 4834 "Invalid IB_SEND_INLINE send flag\n");
@@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4826 if (umr_inline) 4840 if (umr_inline)
4827 flags |= MLX5_UMR_INLINE; 4841 flags |= MLX5_UMR_INLINE;
4828 4842
4829 set_reg_umr_seg(*seg, mr, flags); 4843 set_reg_umr_seg(*seg, mr, flags, atomic);
4830 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4844 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4831 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4845 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4832 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 4846 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 77b1aabf6ff3..dba4535494ab 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -138,9 +138,9 @@ struct siw_umem {
138}; 138};
139 139
140struct siw_pble { 140struct siw_pble {
141 u64 addr; /* Address of assigned user buffer */ 141 dma_addr_t addr; /* Address of assigned buffer */
142 u64 size; /* Size of this entry */ 142 unsigned int size; /* Size of this entry */
143 u64 pbl_off; /* Total offset from start of PBL */ 143 unsigned long pbl_off; /* Total offset from start of PBL */
144}; 144};
145 145
146struct siw_pbl { 146struct siw_pbl {
@@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) 734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
735 735
736#define siw_dbg_cep(cep, fmt, ...) \ 736#define siw_dbg_cep(cep, fmt, ...) \
737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ 737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
738 cep, __func__, ##__VA_ARGS__) 738 cep, __func__, ##__VA_ARGS__)
739 739
740void siw_cq_flush(struct siw_cq *cq); 740void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 9ce8a1b925d2..8c1931a57f4a 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
355 getname_local(cep->sock, &event.local_addr); 355 getname_local(cep->sock, &event.local_addr);
356 getname_peer(cep->sock, &event.remote_addr); 356 getname_peer(cep->sock, &event.remote_addr);
357 } 357 }
358 siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n", 358 siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
359 cep->qp ? qp_id(cep->qp) : -1, id, reason, status); 359 cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
360 360
361 return id->event_handler(id, &event); 361 return id->event_handler(id, &event);
362} 362}
@@ -947,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep)
947 siw_cep_get(new_cep); 947 siw_cep_get(new_cep);
948 new_s->sk->sk_user_data = new_cep; 948 new_s->sk->sk_user_data = new_cep;
949 949
950 siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
951
952 if (siw_tcp_nagle == false) { 950 if (siw_tcp_nagle == false) {
953 int val = 1; 951 int val = 1;
954 952
@@ -1011,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w)
1011 cep = work->cep; 1009 cep = work->cep;
1012 1010
1013 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n", 1011 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1014 cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state); 1012 cep->qp ? qp_id(cep->qp) : UINT_MAX,
1013 work->type, cep->state);
1015 1014
1016 siw_cep_set_inuse(cep); 1015 siw_cep_set_inuse(cep);
1017 1016
@@ -1145,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w)
1145 } 1144 }
1146 if (release_cep) { 1145 if (release_cep) {
1147 siw_dbg_cep(cep, 1146 siw_dbg_cep(cep,
1148 "release: timer=%s, QP[%u], id 0x%p\n", 1147 "release: timer=%s, QP[%u]\n",
1149 cep->mpa_timer ? "y" : "n", 1148 cep->mpa_timer ? "y" : "n",
1150 cep->qp ? qp_id(cep->qp) : -1, cep->cm_id); 1149 cep->qp ? qp_id(cep->qp) : UINT_MAX);
1151 1150
1152 siw_cancel_mpatimer(cep); 1151 siw_cancel_mpatimer(cep);
1153 1152
@@ -1211,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1211 else 1210 else
1212 delay = MPAREP_TIMEOUT; 1211 delay = MPAREP_TIMEOUT;
1213 } 1212 }
1214 siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n", 1213 siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1215 cep->qp ? qp_id(cep->qp) : -1, type, work, delay); 1214 cep->qp ? qp_id(cep->qp) : -1, type, delay);
1216 1215
1217 queue_delayed_work(siw_cm_wq, &work->work, delay); 1216 queue_delayed_work(siw_cm_wq, &work->work, delay);
1218 1217
@@ -1376,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1376 } 1375 }
1377 if (v4) 1376 if (v4)
1378 siw_dbg_qp(qp, 1377 siw_dbg_qp(qp,
1379 "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", 1378 "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
1380 id, pd_len, 1379 pd_len,
1381 &((struct sockaddr_in *)(laddr))->sin_addr, 1380 &((struct sockaddr_in *)(laddr))->sin_addr,
1382 ntohs(((struct sockaddr_in *)(laddr))->sin_port), 1381 ntohs(((struct sockaddr_in *)(laddr))->sin_port),
1383 &((struct sockaddr_in *)(raddr))->sin_addr, 1382 &((struct sockaddr_in *)(raddr))->sin_addr,
1384 ntohs(((struct sockaddr_in *)(raddr))->sin_port)); 1383 ntohs(((struct sockaddr_in *)(raddr))->sin_port));
1385 else 1384 else
1386 siw_dbg_qp(qp, 1385 siw_dbg_qp(qp,
1387 "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", 1386 "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
1388 id, pd_len, 1387 pd_len,
1389 &((struct sockaddr_in6 *)(laddr))->sin6_addr, 1388 &((struct sockaddr_in6 *)(laddr))->sin6_addr,
1390 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port), 1389 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
1391 &((struct sockaddr_in6 *)(raddr))->sin6_addr, 1390 &((struct sockaddr_in6 *)(raddr))->sin6_addr,
@@ -1508,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1508 if (rv >= 0) { 1507 if (rv >= 0) {
1509 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT); 1508 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1510 if (!rv) { 1509 if (!rv) {
1511 siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id, 1510 siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1512 qp_id(qp));
1513 siw_cep_set_free(cep); 1511 siw_cep_set_free(cep);
1514 return 0; 1512 return 0;
1515 } 1513 }
1516 } 1514 }
1517error: 1515error:
1518 siw_dbg_qp(qp, "failed: %d\n", rv); 1516 siw_dbg(id->device, "failed: %d\n", rv);
1519 1517
1520 if (cep) { 1518 if (cep) {
1521 siw_socket_disassoc(s); 1519 siw_socket_disassoc(s);
@@ -1540,7 +1538,8 @@ error:
1540 } else if (s) { 1538 } else if (s) {
1541 sock_release(s); 1539 sock_release(s);
1542 } 1540 }
1543 siw_qp_put(qp); 1541 if (qp)
1542 siw_qp_put(qp);
1544 1543
1545 return rv; 1544 return rv;
1546} 1545}
@@ -1580,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1580 siw_cancel_mpatimer(cep); 1579 siw_cancel_mpatimer(cep);
1581 1580
1582 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1581 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1583 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1582 siw_dbg_cep(cep, "out of state\n");
1584 1583
1585 siw_cep_set_free(cep); 1584 siw_cep_set_free(cep);
1586 siw_cep_put(cep); 1585 siw_cep_put(cep);
@@ -1601,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1601 up_write(&qp->state_lock); 1600 up_write(&qp->state_lock);
1602 goto error; 1601 goto error;
1603 } 1602 }
1604 siw_dbg_cep(cep, "id 0x%p\n", id); 1603 siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1605 1604
1606 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) { 1605 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1607 siw_dbg_cep(cep, "peer allows GSO on TX\n"); 1606 siw_dbg_cep(cep, "peer allows GSO on TX\n");
@@ -1611,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1611 params->ird > sdev->attrs.max_ird) { 1610 params->ird > sdev->attrs.max_ird) {
1612 siw_dbg_cep( 1611 siw_dbg_cep(
1613 cep, 1612 cep,
1614 "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n", 1613 "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1615 id, qp_id(qp), params->ord, sdev->attrs.max_ord, 1614 qp_id(qp), params->ord, sdev->attrs.max_ord,
1616 params->ird, sdev->attrs.max_ird); 1615 params->ird, sdev->attrs.max_ird);
1617 rv = -EINVAL; 1616 rv = -EINVAL;
1618 up_write(&qp->state_lock); 1617 up_write(&qp->state_lock);
@@ -1624,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1624 if (params->private_data_len > max_priv_data) { 1623 if (params->private_data_len > max_priv_data) {
1625 siw_dbg_cep( 1624 siw_dbg_cep(
1626 cep, 1625 cep,
1627 "id 0x%p, [QP %u]: private data length: %d (max %d)\n", 1626 "[QP %u]: private data length: %d (max %d)\n",
1628 id, qp_id(qp), params->private_data_len, max_priv_data); 1627 qp_id(qp), params->private_data_len, max_priv_data);
1629 rv = -EINVAL; 1628 rv = -EINVAL;
1630 up_write(&qp->state_lock); 1629 up_write(&qp->state_lock);
1631 goto error; 1630 goto error;
@@ -1679,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1679 qp_attrs.flags = SIW_MPA_CRC; 1678 qp_attrs.flags = SIW_MPA_CRC;
1680 qp_attrs.state = SIW_QP_STATE_RTS; 1679 qp_attrs.state = SIW_QP_STATE_RTS;
1681 1680
1682 siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp)); 1681 siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1683 1682
1684 /* Associate QP with CEP */ 1683 /* Associate QP with CEP */
1685 siw_cep_get(cep); 1684 siw_cep_get(cep);
@@ -1700,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1700 if (rv) 1699 if (rv)
1701 goto error; 1700 goto error;
1702 1701
1703 siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n", 1702 siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1704 id, qp_id(qp), params->private_data_len); 1703 qp_id(qp), params->private_data_len);
1705 1704
1706 rv = siw_send_mpareqrep(cep, params->private_data, 1705 rv = siw_send_mpareqrep(cep, params->private_data,
1707 params->private_data_len); 1706 params->private_data_len);
@@ -1759,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1759 siw_cancel_mpatimer(cep); 1758 siw_cancel_mpatimer(cep);
1760 1759
1761 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1760 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1762 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1761 siw_dbg_cep(cep, "out of state\n");
1763 1762
1764 siw_cep_set_free(cep); 1763 siw_cep_set_free(cep);
1765 siw_cep_put(cep); /* put last reference */ 1764 siw_cep_put(cep); /* put last reference */
1766 1765
1767 return -ECONNRESET; 1766 return -ECONNRESET;
1768 } 1767 }
1769 siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state, 1768 siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1770 pd_len); 1769 pd_len);
1771 1770
1772 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) { 1771 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
@@ -1804,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1804 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, 1803 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
1805 sizeof(s_val)); 1804 sizeof(s_val));
1806 if (rv) { 1805 if (rv) {
1807 siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv); 1806 siw_dbg(id->device, "setsockopt error: %d\n", rv);
1808 goto error; 1807 goto error;
1809 } 1808 }
1810 rv = s->ops->bind(s, laddr, addr_family == AF_INET ? 1809 rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
1811 sizeof(struct sockaddr_in) : 1810 sizeof(struct sockaddr_in) :
1812 sizeof(struct sockaddr_in6)); 1811 sizeof(struct sockaddr_in6));
1813 if (rv) { 1812 if (rv) {
1814 siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv); 1813 siw_dbg(id->device, "socket bind error: %d\n", rv);
1815 goto error; 1814 goto error;
1816 } 1815 }
1817 cep = siw_cep_alloc(sdev); 1816 cep = siw_cep_alloc(sdev);
@@ -1824,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1824 rv = siw_cm_alloc_work(cep, backlog); 1823 rv = siw_cm_alloc_work(cep, backlog);
1825 if (rv) { 1824 if (rv) {
1826 siw_dbg(id->device, 1825 siw_dbg(id->device,
1827 "id 0x%p: alloc_work error %d, backlog %d\n", id, 1826 "alloc_work error %d, backlog %d\n",
1828 rv, backlog); 1827 rv, backlog);
1829 goto error; 1828 goto error;
1830 } 1829 }
1831 rv = s->ops->listen(s, backlog); 1830 rv = s->ops->listen(s, backlog);
1832 if (rv) { 1831 if (rv) {
1833 siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv); 1832 siw_dbg(id->device, "listen error %d\n", rv);
1834 goto error; 1833 goto error;
1835 } 1834 }
1836 cep->cm_id = id; 1835 cep->cm_id = id;
@@ -1914,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
1914 1913
1915 list_del(p); 1914 list_del(p);
1916 1915
1917 siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id, 1916 siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1918 cep->state);
1919 1917
1920 siw_cep_set_inuse(cep); 1918 siw_cep_set_inuse(cep);
1921 1919
@@ -1952,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1952 struct net_device *dev = to_siw_dev(id->device)->netdev; 1950 struct net_device *dev = to_siw_dev(id->device)->netdev;
1953 int rv = 0, listeners = 0; 1951 int rv = 0, listeners = 0;
1954 1952
1955 siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog); 1953 siw_dbg(id->device, "backlog %d\n", backlog);
1956 1954
1957 /* 1955 /*
1958 * For each attached address of the interface, create a 1956 * For each attached address of the interface, create a
@@ -1964,12 +1962,16 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1964 struct sockaddr_in s_laddr, *s_raddr; 1962 struct sockaddr_in s_laddr, *s_raddr;
1965 const struct in_ifaddr *ifa; 1963 const struct in_ifaddr *ifa;
1966 1964
1965 if (!in_dev) {
1966 rv = -ENODEV;
1967 goto out;
1968 }
1967 memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr)); 1969 memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
1968 s_raddr = (struct sockaddr_in *)&id->remote_addr; 1970 s_raddr = (struct sockaddr_in *)&id->remote_addr;
1969 1971
1970 siw_dbg(id->device, 1972 siw_dbg(id->device,
1971 "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n", 1973 "laddr %pI4:%d, raddr %pI4:%d\n",
1972 id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port), 1974 &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
1973 &s_raddr->sin_addr, ntohs(s_raddr->sin_port)); 1975 &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
1974 1976
1975 rtnl_lock(); 1977 rtnl_lock();
@@ -1993,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1993 struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr), 1995 struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
1994 *s_raddr = &to_sockaddr_in6(id->remote_addr); 1996 *s_raddr = &to_sockaddr_in6(id->remote_addr);
1995 1997
1998 if (!in6_dev) {
1999 rv = -ENODEV;
2000 goto out;
2001 }
1996 siw_dbg(id->device, 2002 siw_dbg(id->device,
1997 "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n", 2003 "laddr %pI6:%d, raddr %pI6:%d\n",
1998 id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), 2004 &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
1999 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); 2005 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
2000 2006
2001 read_lock_bh(&in6_dev->lock); 2007 rtnl_lock();
2002 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 2008 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
2003 struct sockaddr_in6 bind_addr; 2009 if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
2004 2010 continue;
2005 if (ipv6_addr_any(&s_laddr->sin6_addr) || 2011 if (ipv6_addr_any(&s_laddr->sin6_addr) ||
2006 ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) { 2012 ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
2007 bind_addr.sin6_family = AF_INET6; 2013 struct sockaddr_in6 bind_addr = {
2008 bind_addr.sin6_port = s_laddr->sin6_port; 2014 .sin6_family = AF_INET6,
2009 bind_addr.sin6_flowinfo = 0; 2015 .sin6_port = s_laddr->sin6_port,
2010 bind_addr.sin6_addr = ifp->addr; 2016 .sin6_flowinfo = 0,
2011 bind_addr.sin6_scope_id = dev->ifindex; 2017 .sin6_addr = ifp->addr,
2018 .sin6_scope_id = dev->ifindex };
2012 2019
2013 rv = siw_listen_address(id, backlog, 2020 rv = siw_listen_address(id, backlog,
2014 (struct sockaddr *)&bind_addr, 2021 (struct sockaddr *)&bind_addr,
@@ -2017,28 +2024,26 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
2017 listeners++; 2024 listeners++;
2018 } 2025 }
2019 } 2026 }
2020 read_unlock_bh(&in6_dev->lock); 2027 rtnl_unlock();
2021
2022 in6_dev_put(in6_dev); 2028 in6_dev_put(in6_dev);
2023 } else { 2029 } else {
2024 return -EAFNOSUPPORT; 2030 rv = -EAFNOSUPPORT;
2025 } 2031 }
2032out:
2026 if (listeners) 2033 if (listeners)
2027 rv = 0; 2034 rv = 0;
2028 else if (!rv) 2035 else if (!rv)
2029 rv = -EINVAL; 2036 rv = -EINVAL;
2030 2037
2031 siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK"); 2038 siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
2032 2039
2033 return rv; 2040 return rv;
2034} 2041}
2035 2042
2036int siw_destroy_listen(struct iw_cm_id *id) 2043int siw_destroy_listen(struct iw_cm_id *id)
2037{ 2044{
2038 siw_dbg(id->device, "id 0x%p\n", id);
2039
2040 if (!id->provider_data) { 2045 if (!id->provider_data) {
2041 siw_dbg(id->device, "id 0x%p: no cep(s)\n", id); 2046 siw_dbg(id->device, "no cep(s)\n");
2042 return 0; 2047 return 0;
2043 } 2048 }
2044 siw_drop_listeners(id); 2049 siw_drop_listeners(id);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index e381ae9b7d62..d8db3bee9da7 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
71 wc->wc_flags = IB_WC_WITH_INVALIDATE; 71 wc->wc_flags = IB_WC_WITH_INVALIDATE;
72 } 72 }
73 wc->qp = cqe->base_qp; 73 wc->qp = cqe->base_qp;
74 siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n", 74 siw_dbg_cq(cq,
75 "idx %u, type %d, flags %2x, id 0x%pK\n",
75 cq->cq_get % cq->num_cqe, cqe->opcode, 76 cq->cq_get % cq->num_cqe, cqe->opcode,
76 cqe->flags, (void *)cqe->id); 77 cqe->flags, (void *)(uintptr_t)cqe->id);
77 } 78 }
78 WRITE_ONCE(cqe->flags, 0); 79 WRITE_ONCE(cqe->flags, 0);
79 cq->cq_get++; 80 cq->cq_get++;
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 67171c82b0c4..87a56039f0ef 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
197 */ 197 */
198 if (addr < mem->va || addr + len > mem->va + mem->len) { 198 if (addr < mem->va || addr + len > mem->va + mem->len) {
199 siw_dbg_pd(pd, "MEM interval len %d\n", len); 199 siw_dbg_pd(pd, "MEM interval len %d\n", len);
200 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n", 200 siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
201 (unsigned long long)addr, 201 (void *)(uintptr_t)addr,
202 (unsigned long long)(addr + len)); 202 (void *)(uintptr_t)(addr + len));
203 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n", 203 siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
204 (unsigned long long)mem->va, 204 (void *)(uintptr_t)mem->va,
205 (unsigned long long)(mem->va + mem->len), 205 (void *)(uintptr_t)(mem->va + mem->len),
206 mem->stag); 206 mem->stag);
207 207
208 return -E_BASE_BOUNDS; 208 return -E_BASE_BOUNDS;
@@ -330,7 +330,7 @@ out:
330 * Optionally, provides remaining len within current element, and 330 * Optionally, provides remaining len within current element, and
331 * current PBL index for later resume at same element. 331 * current PBL index for later resume at same element.
332 */ 332 */
333u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) 333dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
334{ 334{
335 int i = idx ? *idx : 0; 335 int i = idx ? *idx : 0;
336 336
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index f43daf280891..db138c8423da 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -9,7 +9,7 @@
9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); 9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
10void siw_umem_release(struct siw_umem *umem, bool dirty); 10void siw_umem_release(struct siw_umem *umem, bool dirty);
11struct siw_pbl *siw_pbl_alloc(u32 num_buf); 11struct siw_pbl *siw_pbl_alloc(u32 num_buf);
12u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); 12dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); 13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); 14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
15int siw_invalidate_stag(struct ib_pd *pd, u32 stag); 15int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 0990307c5d2c..430314c8abd9 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -949,7 +949,7 @@ skip_irq:
949 rv = -EINVAL; 949 rv = -EINVAL;
950 goto out; 950 goto out;
951 } 951 }
952 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 952 wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
953 wqe->sqe.sge[0].lkey = 0; 953 wqe->sqe.sge[0].lkey = 0;
954 wqe->sqe.num_sge = 1; 954 wqe->sqe.num_sge = 1;
955 } 955 }
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index f87657a11657..c0a887240325 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
38 38
39 p = siw_get_upage(umem, dest_addr); 39 p = siw_get_upage(umem, dest_addr);
40 if (unlikely(!p)) { 40 if (unlikely(!p)) {
41 pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", 41 pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
42 __func__, qp_id(rx_qp(srx)), 42 __func__, qp_id(rx_qp(srx)),
43 (void *)dest_addr, (void *)umem->fp_addr); 43 (void *)(uintptr_t)dest_addr,
44 (void *)(uintptr_t)umem->fp_addr);
44 /* siw internal error */ 45 /* siw internal error */
45 srx->skb_copied += copied; 46 srx->skb_copied += copied;
46 srx->skb_new -= copied; 47 srx->skb_new -= copied;
@@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
50 pg_off = dest_addr & ~PAGE_MASK; 51 pg_off = dest_addr & ~PAGE_MASK;
51 bytes = min(len, (int)PAGE_SIZE - pg_off); 52 bytes = min(len, (int)PAGE_SIZE - pg_off);
52 53
53 siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); 54 siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
54 55
55 dest = kmap_atomic(p); 56 dest = kmap_atomic(p);
56 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, 57 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
104{ 105{
105 int rv; 106 int rv;
106 107
107 siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); 108 siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
108 109
109 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); 110 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
110 if (unlikely(rv)) { 111 if (unlikely(rv)) {
111 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", 112 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
112 qp_id(rx_qp(srx)), __func__, len, kva, rv); 113 qp_id(rx_qp(srx)), __func__, len, kva, rv);
113 114
114 return rv; 115 return rv;
@@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
132 133
133 while (len) { 134 while (len) {
134 int bytes; 135 int bytes;
135 u64 buf_addr = 136 dma_addr_t buf_addr =
136 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx); 137 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
137 if (!buf_addr) 138 if (!buf_addr)
138 break; 139 break;
@@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp)
485 mem_p = *mem; 486 mem_p = *mem;
486 if (mem_p->mem_obj == NULL) 487 if (mem_p->mem_obj == NULL)
487 rv = siw_rx_kva(srx, 488 rv = siw_rx_kva(srx,
488 (void *)(sge->laddr + frx->sge_off), 489 (void *)(uintptr_t)(sge->laddr + frx->sge_off),
489 sge_bytes); 490 sge_bytes);
490 else if (!mem_p->is_pbl) 491 else if (!mem_p->is_pbl)
491 rv = siw_rx_umem(srx, mem_p->umem, 492 rv = siw_rx_umem(srx, mem_p->umem,
492 sge->laddr + frx->sge_off, sge_bytes); 493 sge->laddr + frx->sge_off, sge_bytes);
@@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp)
598 599
599 if (mem->mem_obj == NULL) 600 if (mem->mem_obj == NULL)
600 rv = siw_rx_kva(srx, 601 rv = siw_rx_kva(srx,
601 (void *)(srx->ddp_to + srx->fpdu_part_rcvd), 602 (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
602 bytes); 603 bytes);
603 else if (!mem->is_pbl) 604 else if (!mem->is_pbl)
604 rv = siw_rx_umem(srx, mem->umem, 605 rv = siw_rx_umem(srx, mem->umem,
605 srx->ddp_to + srx->fpdu_part_rcvd, bytes); 606 srx->ddp_to + srx->fpdu_part_rcvd, bytes);
@@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp)
841 bytes = min(srx->fpdu_part_rem, srx->skb_new); 842 bytes = min(srx->fpdu_part_rem, srx->skb_new);
842 843
843 if (mem_p->mem_obj == NULL) 844 if (mem_p->mem_obj == NULL)
844 rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed), 845 rv = siw_rx_kva(srx,
845 bytes); 846 (void *)(uintptr_t)(sge->laddr + wqe->processed),
847 bytes);
846 else if (!mem_p->is_pbl) 848 else if (!mem_p->is_pbl)
847 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, 849 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
848 bytes); 850 bytes);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 43020d2040fc..438a2917a47c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
26{ 26{
27 struct siw_pbl *pbl = mem->pbl; 27 struct siw_pbl *pbl = mem->pbl;
28 u64 offset = addr - mem->va; 28 u64 offset = addr - mem->va;
29 u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); 29 dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
30 30
31 if (paddr) 31 if (paddr)
32 return virt_to_page(paddr); 32 return virt_to_page(paddr);
@@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
37/* 37/*
38 * Copy short payload at provided destination payload address 38 * Copy short payload at provided destination payload address
39 */ 39 */
40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) 40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
41{ 41{
42 struct siw_wqe *wqe = &c_tx->wqe_active; 42 struct siw_wqe *wqe = &c_tx->wqe_active;
43 struct siw_sge *sge = &wqe->sqe.sge[0]; 43 struct siw_sge *sge = &wqe->sqe.sge[0];
@@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
50 return 0; 50 return 0;
51 51
52 if (tx_flags(wqe) & SIW_WQE_INLINE) { 52 if (tx_flags(wqe) & SIW_WQE_INLINE) {
53 memcpy((void *)paddr, &wqe->sqe.sge[1], bytes); 53 memcpy(paddr, &wqe->sqe.sge[1], bytes);
54 } else { 54 } else {
55 struct siw_mem *mem = wqe->mem[0]; 55 struct siw_mem *mem = wqe->mem[0];
56 56
57 if (!mem->mem_obj) { 57 if (!mem->mem_obj) {
58 /* Kernel client using kva */ 58 /* Kernel client using kva */
59 memcpy((void *)paddr, (void *)sge->laddr, bytes); 59 memcpy(paddr,
60 (const void *)(uintptr_t)sge->laddr, bytes);
60 } else if (c_tx->in_syscall) { 61 } else if (c_tx->in_syscall) {
61 if (copy_from_user((void *)paddr, 62 if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
62 (const void __user *)sge->laddr,
63 bytes)) 63 bytes))
64 return -EFAULT; 64 return -EFAULT;
65 } else { 65 } else {
@@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
79 buffer = kmap_atomic(p); 79 buffer = kmap_atomic(p);
80 80
81 if (likely(PAGE_SIZE - off >= bytes)) { 81 if (likely(PAGE_SIZE - off >= bytes)) {
82 memcpy((void *)paddr, buffer + off, bytes); 82 memcpy(paddr, buffer + off, bytes);
83 kunmap_atomic(buffer); 83 kunmap_atomic(buffer);
84 } else { 84 } else {
85 unsigned long part = bytes - (PAGE_SIZE - off); 85 unsigned long part = bytes - (PAGE_SIZE - off);
86 86
87 memcpy((void *)paddr, buffer + off, part); 87 memcpy(paddr, buffer + off, part);
88 kunmap_atomic(buffer); 88 kunmap_atomic(buffer);
89 89
90 if (!mem->is_pbl) 90 if (!mem->is_pbl)
@@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
98 return -EFAULT; 98 return -EFAULT;
99 99
100 buffer = kmap_atomic(p); 100 buffer = kmap_atomic(p);
101 memcpy((void *)(paddr + part), buffer, 101 memcpy(paddr + part, buffer,
102 bytes - part); 102 bytes - part);
103 kunmap_atomic(buffer); 103 kunmap_atomic(buffer);
104 } 104 }
@@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
166 c_tx->ctrl_len = sizeof(struct iwarp_send); 166 c_tx->ctrl_len = sizeof(struct iwarp_send);
167 167
168 crc = (char *)&c_tx->pkt.send_pkt.crc; 168 crc = (char *)&c_tx->pkt.send_pkt.crc;
169 data = siw_try_1seg(c_tx, (u64)crc); 169 data = siw_try_1seg(c_tx, crc);
170 break; 170 break;
171 171
172 case SIW_OP_SEND_REMOTE_INV: 172 case SIW_OP_SEND_REMOTE_INV:
@@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv); 189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
190 190
191 crc = (char *)&c_tx->pkt.send_pkt.crc; 191 crc = (char *)&c_tx->pkt.send_pkt.crc;
192 data = siw_try_1seg(c_tx, (u64)crc); 192 data = siw_try_1seg(c_tx, crc);
193 break; 193 break;
194 194
195 case SIW_OP_WRITE: 195 case SIW_OP_WRITE:
@@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); 201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
202 202
203 crc = (char *)&c_tx->pkt.write_pkt.crc; 203 crc = (char *)&c_tx->pkt.write_pkt.crc;
204 data = siw_try_1seg(c_tx, (u64)crc); 204 data = siw_try_1seg(c_tx, crc);
205 break; 205 break;
206 206
207 case SIW_OP_READ_RESPONSE: 207 case SIW_OP_READ_RESPONSE:
@@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); 216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
217 217
218 crc = (char *)&c_tx->pkt.write_pkt.crc; 218 crc = (char *)&c_tx->pkt.write_pkt.crc;
219 data = siw_try_1seg(c_tx, (u64)crc); 219 data = siw_try_1seg(c_tx, crc);
220 break; 220 break;
221 221
222 default: 222 default:
@@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
398 398
399#define MAX_TRAILER (MPA_CRC_SIZE + 4) 399#define MAX_TRAILER (MPA_CRC_SIZE + 4)
400 400
401static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps) 401static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
402{ 402{
403 if (hdr_len) { 403 while (kmap_mask) {
404 ++pages; 404 if (kmap_mask & BIT(0))
405 --num_maps; 405 kunmap(*pp);
406 } 406 pp++;
407 while (num_maps-- > 0) { 407 kmap_mask >>= 1;
408 kunmap(*pages);
409 pages++;
410 } 408 }
411} 409}
412 410
@@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
437 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, 435 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
438 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, 436 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
439 pbl_idx = c_tx->pbl_idx; 437 pbl_idx = c_tx->pbl_idx;
438 unsigned long kmap_mask = 0L;
440 439
441 if (c_tx->state == SIW_SEND_HDR) { 440 if (c_tx->state == SIW_SEND_HDR) {
442 if (c_tx->use_sendpage) { 441 if (c_tx->use_sendpage) {
@@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
463 462
464 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { 463 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
465 mem = wqe->mem[sge_idx]; 464 mem = wqe->mem[sge_idx];
466 if (!mem->mem_obj) 465 is_kva = mem->mem_obj == NULL ? 1 : 0;
467 is_kva = 1;
468 } else { 466 } else {
469 is_kva = 1; 467 is_kva = 1;
470 } 468 }
@@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
473 * tx from kernel virtual address: either inline data 471 * tx from kernel virtual address: either inline data
474 * or memory region with assigned kernel buffer 472 * or memory region with assigned kernel buffer
475 */ 473 */
476 iov[seg].iov_base = (void *)(sge->laddr + sge_off); 474 iov[seg].iov_base =
475 (void *)(uintptr_t)(sge->laddr + sge_off);
477 iov[seg].iov_len = sge_len; 476 iov[seg].iov_len = sge_len;
478 477
479 if (do_crc) 478 if (do_crc)
@@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
500 p = siw_get_upage(mem->umem, 499 p = siw_get_upage(mem->umem,
501 sge->laddr + sge_off); 500 sge->laddr + sge_off);
502 if (unlikely(!p)) { 501 if (unlikely(!p)) {
503 if (hdr_len) 502 siw_unmap_pages(page_array, kmap_mask);
504 seg--;
505 if (!c_tx->use_sendpage && seg) {
506 siw_unmap_pages(page_array,
507 hdr_len, seg);
508 }
509 wqe->processed -= c_tx->bytes_unsent; 503 wqe->processed -= c_tx->bytes_unsent;
510 rv = -EFAULT; 504 rv = -EFAULT;
511 goto done_crc; 505 goto done_crc;
@@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
515 if (!c_tx->use_sendpage) { 509 if (!c_tx->use_sendpage) {
516 iov[seg].iov_base = kmap(p) + fp_off; 510 iov[seg].iov_base = kmap(p) + fp_off;
517 iov[seg].iov_len = plen; 511 iov[seg].iov_len = plen;
512
513 /* Remember for later kunmap() */
514 kmap_mask |= BIT(seg);
515
518 if (do_crc) 516 if (do_crc)
519 crypto_shash_update( 517 crypto_shash_update(
520 c_tx->mpa_crc_hd, 518 c_tx->mpa_crc_hd,
@@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
526 page_address(p) + fp_off, 524 page_address(p) + fp_off,
527 plen); 525 plen);
528 } else { 526 } else {
529 u64 pa = ((sge->laddr + sge_off) & PAGE_MASK); 527 u64 va = sge->laddr + sge_off;
530 528
531 page_array[seg] = virt_to_page(pa); 529 page_array[seg] = virt_to_page(va & PAGE_MASK);
532 if (do_crc) 530 if (do_crc)
533 crypto_shash_update( 531 crypto_shash_update(
534 c_tx->mpa_crc_hd, 532 c_tx->mpa_crc_hd,
535 (void *)(sge->laddr + sge_off), 533 (void *)(uintptr_t)va,
536 plen); 534 plen);
537 } 535 }
538 536
@@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
543 541
544 if (++seg > (int)MAX_ARRAY) { 542 if (++seg > (int)MAX_ARRAY) {
545 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); 543 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
546 if (!is_kva && !c_tx->use_sendpage) { 544 siw_unmap_pages(page_array, kmap_mask);
547 siw_unmap_pages(page_array, hdr_len,
548 seg - 1);
549 }
550 wqe->processed -= c_tx->bytes_unsent; 545 wqe->processed -= c_tx->bytes_unsent;
551 rv = -EMSGSIZE; 546 rv = -EMSGSIZE;
552 goto done_crc; 547 goto done_crc;
@@ -597,8 +592,7 @@ sge_done:
597 } else { 592 } else {
598 rv = kernel_sendmsg(s, &msg, iov, seg + 1, 593 rv = kernel_sendmsg(s, &msg, iov, seg + 1,
599 hdr_len + data_len + trl_len); 594 hdr_len + data_len + trl_len);
600 if (!is_kva) 595 siw_unmap_pages(page_array, kmap_mask);
601 siw_unmap_pages(page_array, hdr_len, seg);
602 } 596 }
603 if (rv < (int)hdr_len) { 597 if (rv < (int)hdr_len) {
604 /* Not even complete hdr pushed or negative rv */ 598 /* Not even complete hdr pushed or negative rv */
@@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
829 rv = -EINVAL; 823 rv = -EINVAL;
830 goto tx_error; 824 goto tx_error;
831 } 825 }
832 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 826 wqe->sqe.sge[0].laddr =
827 (u64)(uintptr_t)&wqe->sqe.sge[1];
833 } 828 }
834 } 829 }
835 wqe->wr_status = SIW_WR_INPROGRESS; 830 wqe->wr_status = SIW_WR_INPROGRESS;
@@ -924,7 +919,7 @@ tx_error:
924 919
925static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) 920static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
926{ 921{
927 struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr; 922 struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
928 struct siw_device *sdev = to_siw_dev(pd->device); 923 struct siw_device *sdev = to_siw_dev(pd->device);
929 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); 924 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
930 int rv = 0; 925 int rv = 0;
@@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
954 mem->stag = sqe->rkey; 949 mem->stag = sqe->rkey;
955 mem->perms = sqe->access; 950 mem->perms = sqe->access;
956 951
957 siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n", 952 siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
958 mem->va, base_mr->iova);
959 mem->va = base_mr->iova; 953 mem->va = base_mr->iova;
960 mem->stag_valid = 1; 954 mem->stag_valid = 1;
961out: 955out:
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index e7f3a2379d9d..da52c90e06d4 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
424 */ 424 */
425 qp->srq = to_siw_srq(attrs->srq); 425 qp->srq = to_siw_srq(attrs->srq);
426 qp->attrs.rq_size = 0; 426 qp->attrs.rq_size = 0;
427 siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n", 427 siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
428 qp->qp_num, qp->srq);
429 } else if (num_rqe) { 428 } else if (num_rqe) {
430 if (qp->kernel_verbs) 429 if (qp->kernel_verbs)
431 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); 430 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
@@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
610 base_ucontext); 609 base_ucontext);
611 struct siw_qp_attrs qp_attrs; 610 struct siw_qp_attrs qp_attrs;
612 611
613 siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep); 612 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
614 613
615 /* 614 /*
616 * Mark QP as in process of destruction to prevent from 615 * Mark QP as in process of destruction to prevent from
@@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
662 void *kbuf = &sqe->sge[1]; 661 void *kbuf = &sqe->sge[1];
663 int num_sge = core_wr->num_sge, bytes = 0; 662 int num_sge = core_wr->num_sge, bytes = 0;
664 663
665 sqe->sge[0].laddr = (u64)kbuf; 664 sqe->sge[0].laddr = (uintptr_t)kbuf;
666 sqe->sge[0].lkey = 0; 665 sqe->sge[0].lkey = 0;
667 666
668 while (num_sge--) { 667 while (num_sge--) {
@@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
825 break; 824 break;
826 825
827 case IB_WR_REG_MR: 826 case IB_WR_REG_MR:
828 sqe->base_mr = (uint64_t)reg_wr(wr)->mr; 827 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
829 sqe->rkey = reg_wr(wr)->key; 828 sqe->rkey = reg_wr(wr)->key;
830 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; 829 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
831 sqe->opcode = SIW_OP_REG_MR; 830 sqe->opcode = SIW_OP_REG_MR;
@@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
842 rv = -EINVAL; 841 rv = -EINVAL;
843 break; 842 break;
844 } 843 }
845 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", 844 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
846 sqe->opcode, sqe->flags, (void *)sqe->id); 845 sqe->opcode, sqe->flags,
846 (void *)(uintptr_t)sqe->id);
847 847
848 if (unlikely(rv < 0)) 848 if (unlikely(rv < 0))
849 break; 849 break;
@@ -1205,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); 1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1206 int rv; 1206 int rv;
1207 1207
1208 siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n", 1208 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1209 (unsigned long long)start, (unsigned long long)rnic_va, 1209 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1210 (unsigned long long)len); 1210 (unsigned long long)len);
1211 1211
1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
@@ -1363,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1363 struct siw_mem *mem = mr->mem; 1363 struct siw_mem *mem = mr->mem;
1364 struct siw_pbl *pbl = mem->pbl; 1364 struct siw_pbl *pbl = mem->pbl;
1365 struct siw_pble *pble; 1365 struct siw_pble *pble;
1366 u64 pbl_size; 1366 unsigned long pbl_size;
1367 int i, rv; 1367 int i, rv;
1368 1368
1369 if (!pbl) { 1369 if (!pbl) {
@@ -1402,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1402 pbl_size += sg_dma_len(slp); 1402 pbl_size += sg_dma_len(slp);
1403 } 1403 }
1404 siw_dbg_mem(mem, 1404 siw_dbg_mem(mem,
1405 "sge[%d], size %llu, addr 0x%016llx, total %llu\n", 1405 "sge[%d], size %u, addr 0x%p, total %lu\n",
1406 i, pble->size, pble->addr, pbl_size); 1406 i, pble->size, (void *)(uintptr_t)pble->addr,
1407 pbl_size);
1407 } 1408 }
1408 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); 1409 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1409 if (rv > 0) { 1410 if (rv > 0) {
1410 mem->len = base_mr->length; 1411 mem->len = base_mr->length;
1411 mem->va = base_mr->iova; 1412 mem->va = base_mr->iova;
1412 siw_dbg_mem(mem, 1413 siw_dbg_mem(mem,
1413 "%llu bytes, start 0x%016llx, %u SLE to %u entries\n", 1414 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1414 mem->len, mem->va, num_sle, pbl->num_buf); 1415 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1416 pbl->num_buf);
1415 } 1417 }
1416 return rv; 1418 return rv;
1417} 1419}
@@ -1529,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq,
1529 } 1531 }
1530 spin_lock_init(&srq->lock); 1532 spin_lock_init(&srq->lock);
1531 1533
1532 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq); 1534 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1533 1535
1534 return 0; 1536 return 0;
1535 1537
@@ -1650,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1650 1652
1651 if (unlikely(!srq->kernel_verbs)) { 1653 if (unlikely(!srq->kernel_verbs)) {
1652 siw_dbg_pd(base_srq->pd, 1654 siw_dbg_pd(base_srq->pd,
1653 "[SRQ 0x%p]: no kernel post_recv for mapped srq\n", 1655 "[SRQ]: no kernel post_recv for mapped srq\n");
1654 srq);
1655 rv = -EINVAL; 1656 rv = -EINVAL;
1656 goto out; 1657 goto out;
1657 } 1658 }
@@ -1673,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1673 } 1674 }
1674 if (unlikely(wr->num_sge > srq->max_sge)) { 1675 if (unlikely(wr->num_sge > srq->max_sge)) {
1675 siw_dbg_pd(base_srq->pd, 1676 siw_dbg_pd(base_srq->pd,
1676 "[SRQ 0x%p]: too many sge's: %d\n", srq, 1677 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1677 wr->num_sge);
1678 rv = -EINVAL; 1678 rv = -EINVAL;
1679 break; 1679 break;
1680 } 1680 }
@@ -1693,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1693 spin_unlock_irqrestore(&srq->lock, flags); 1693 spin_unlock_irqrestore(&srq->lock, flags);
1694out: 1694out:
1695 if (unlikely(rv < 0)) { 1695 if (unlikely(rv < 0)) {
1696 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv); 1696 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1697 *bad_wr = wr; 1697 *bad_wr = wr;
1698 } 1698 }
1699 return rv; 1699 return rv;
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 88ae7c2ac3c8..e486a8a74c40 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
237 237
238static void hv_kbd_on_channel_callback(void *context) 238static void hv_kbd_on_channel_callback(void *context)
239{ 239{
240 struct vmpacket_descriptor *desc;
240 struct hv_device *hv_dev = context; 241 struct hv_device *hv_dev = context;
241 void *buffer;
242 int bufferlen = 0x100; /* Start with sensible size */
243 u32 bytes_recvd; 242 u32 bytes_recvd;
244 u64 req_id; 243 u64 req_id;
245 int error;
246 244
247 buffer = kmalloc(bufferlen, GFP_ATOMIC); 245 foreach_vmbus_pkt(desc, hv_dev->channel) {
248 if (!buffer) 246 bytes_recvd = desc->len8 * 8;
249 return; 247 req_id = desc->trans_id;
250
251 while (1) {
252 error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
253 &bytes_recvd, &req_id);
254 switch (error) {
255 case 0:
256 if (bytes_recvd == 0) {
257 kfree(buffer);
258 return;
259 }
260
261 hv_kbd_handle_received_packet(hv_dev, buffer,
262 bytes_recvd, req_id);
263 break;
264 248
265 case -ENOBUFS: 249 hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
266 kfree(buffer); 250 req_id);
267 /* Handle large packet */
268 bufferlen = bytes_recvd;
269 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
270 if (!buffer)
271 return;
272 break;
273 }
274 } 251 }
275} 252}
276 253
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e15cdcd8cb3c..a4ddeade8ac4 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -182,6 +182,7 @@ config INTEL_IOMMU
182 select IOMMU_IOVA 182 select IOMMU_IOVA
183 select NEED_DMA_MAP_STATE 183 select NEED_DMA_MAP_STATE
184 select DMAR_TABLE 184 select DMAR_TABLE
185 select SWIOTLB
185 help 186 help
186 DMA remapping (DMAR) devices support enables independent address 187 DMA remapping (DMAR) devices support enables independent address
187 translations for Direct Memory Access (DMA) from devices. 188 translations for Direct Memory Access (DMA) from devices.
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index a2729aadd300..4f405f926e73 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -10,13 +10,14 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
10obj-$(CONFIG_IOMMU_IOVA) += iova.o 10obj-$(CONFIG_IOMMU_IOVA) += iova.o
11obj-$(CONFIG_OF_IOMMU) += of_iommu.o 11obj-$(CONFIG_OF_IOMMU) += of_iommu.o
12obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o 12obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
13obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o 13obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
14obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o 14obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
15obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o 15obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
16obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o 16obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
17obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o 17obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
18obj-$(CONFIG_DMAR_TABLE) += dmar.o 18obj-$(CONFIG_DMAR_TABLE) += dmar.o
19obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o 19obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
20obj-$(CONFIG_INTEL_IOMMU) += intel-trace.o
20obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o 21obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
21obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o 22obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
22obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o 23obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 29eeea914660..1ed3b98324ba 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -436,7 +436,7 @@ static int iommu_init_device(struct device *dev)
436 * invalid address), we ignore the capability for the device so 436 * invalid address), we ignore the capability for the device so
437 * it'll be forced to go into translation mode. 437 * it'll be forced to go into translation mode.
438 */ 438 */
439 if ((iommu_pass_through || !amd_iommu_force_isolation) && 439 if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
440 dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) { 440 dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
441 struct amd_iommu *iommu; 441 struct amd_iommu *iommu;
442 442
@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1143 iommu_completion_wait(iommu); 1143 iommu_completion_wait(iommu);
1144} 1144}
1145 1145
1146static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1147{
1148 struct iommu_cmd cmd;
1149
1150 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1151 dom_id, 1);
1152 iommu_queue_command(iommu, &cmd);
1153
1154 iommu_completion_wait(iommu);
1155}
1156
1146static void amd_iommu_flush_all(struct amd_iommu *iommu) 1157static void amd_iommu_flush_all(struct amd_iommu *iommu)
1147{ 1158{
1148 struct iommu_cmd cmd; 1159 struct iommu_cmd cmd;
@@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
1424 * another level increases the size of the address space by 9 bits to a size up 1435 * another level increases the size of the address space by 9 bits to a size up
1425 * to 64 bits. 1436 * to 64 bits.
1426 */ 1437 */
1427static bool increase_address_space(struct protection_domain *domain, 1438static void increase_address_space(struct protection_domain *domain,
1428 gfp_t gfp) 1439 gfp_t gfp)
1429{ 1440{
1441 unsigned long flags;
1430 u64 *pte; 1442 u64 *pte;
1431 1443
1432 if (domain->mode == PAGE_MODE_6_LEVEL) 1444 spin_lock_irqsave(&domain->lock, flags);
1445
1446 if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
1433 /* address space already 64 bit large */ 1447 /* address space already 64 bit large */
1434 return false; 1448 goto out;
1435 1449
1436 pte = (void *)get_zeroed_page(gfp); 1450 pte = (void *)get_zeroed_page(gfp);
1437 if (!pte) 1451 if (!pte)
1438 return false; 1452 goto out;
1439 1453
1440 *pte = PM_LEVEL_PDE(domain->mode, 1454 *pte = PM_LEVEL_PDE(domain->mode,
1441 iommu_virt_to_phys(domain->pt_root)); 1455 iommu_virt_to_phys(domain->pt_root));
@@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
1443 domain->mode += 1; 1457 domain->mode += 1;
1444 domain->updated = true; 1458 domain->updated = true;
1445 1459
1446 return true; 1460out:
1461 spin_unlock_irqrestore(&domain->lock, flags);
1462
1463 return;
1447} 1464}
1448 1465
1449static u64 *alloc_pte(struct protection_domain *domain, 1466static u64 *alloc_pte(struct protection_domain *domain,
@@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
1873{ 1890{
1874 u64 pte_root = 0; 1891 u64 pte_root = 0;
1875 u64 flags = 0; 1892 u64 flags = 0;
1893 u32 old_domid;
1876 1894
1877 if (domain->mode != PAGE_MODE_NONE) 1895 if (domain->mode != PAGE_MODE_NONE)
1878 pte_root = iommu_virt_to_phys(domain->pt_root); 1896 pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
1922 flags &= ~DEV_DOMID_MASK; 1940 flags &= ~DEV_DOMID_MASK;
1923 flags |= domain->id; 1941 flags |= domain->id;
1924 1942
1943 old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
1925 amd_iommu_dev_table[devid].data[1] = flags; 1944 amd_iommu_dev_table[devid].data[1] = flags;
1926 amd_iommu_dev_table[devid].data[0] = pte_root; 1945 amd_iommu_dev_table[devid].data[0] = pte_root;
1946
1947 /*
1948 * A kdump kernel might be replacing a domain ID that was copied from
1949 * the previous kernel--if so, it needs to flush the translation cache
1950 * entries for the old domain ID that is being overwritten
1951 */
1952 if (old_domid) {
1953 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1954
1955 amd_iommu_flush_tlb_domid(iommu, old_domid);
1956 }
1927} 1957}
1928 1958
1929static void clear_dte_entry(u16 devid) 1959static void clear_dte_entry(u16 devid)
@@ -2226,7 +2256,7 @@ static int amd_iommu_add_device(struct device *dev)
2226 2256
2227 BUG_ON(!dev_data); 2257 BUG_ON(!dev_data);
2228 2258
2229 if (iommu_pass_through || dev_data->iommu_v2) 2259 if (dev_data->iommu_v2)
2230 iommu_request_dm_for_dev(dev); 2260 iommu_request_dm_for_dev(dev);
2231 2261
2232 /* Domains are initialized for this device - have a look what we ended up with */ 2262 /* Domains are initialized for this device - have a look what we ended up with */
@@ -2547,7 +2577,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2547 2577
2548 bus_addr = address + s->dma_address + (j << PAGE_SHIFT); 2578 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
2549 phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT); 2579 phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
2550 ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC); 2580 ret = iommu_map_page(domain, bus_addr, phys_addr,
2581 PAGE_SIZE, prot,
2582 GFP_ATOMIC | __GFP_NOWARN);
2551 if (ret) 2583 if (ret)
2552 goto out_unmap; 2584 goto out_unmap;
2553 2585
@@ -2805,7 +2837,7 @@ int __init amd_iommu_init_api(void)
2805 2837
2806int __init amd_iommu_init_dma_ops(void) 2838int __init amd_iommu_init_dma_ops(void)
2807{ 2839{
2808 swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; 2840 swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
2809 iommu_detected = 1; 2841 iommu_detected = 1;
2810 2842
2811 if (amd_iommu_unmap_flush) 2843 if (amd_iommu_unmap_flush)
@@ -4314,13 +4346,62 @@ static const struct irq_domain_ops amd_ir_domain_ops = {
4314 .deactivate = irq_remapping_deactivate, 4346 .deactivate = irq_remapping_deactivate,
4315}; 4347};
4316 4348
4349int amd_iommu_activate_guest_mode(void *data)
4350{
4351 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
4352 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4353
4354 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
4355 !entry || entry->lo.fields_vapic.guest_mode)
4356 return 0;
4357
4358 entry->lo.val = 0;
4359 entry->hi.val = 0;
4360
4361 entry->lo.fields_vapic.guest_mode = 1;
4362 entry->lo.fields_vapic.ga_log_intr = 1;
4363 entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
4364 entry->hi.fields.vector = ir_data->ga_vector;
4365 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
4366
4367 return modify_irte_ga(ir_data->irq_2_irte.devid,
4368 ir_data->irq_2_irte.index, entry, NULL);
4369}
4370EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
4371
4372int amd_iommu_deactivate_guest_mode(void *data)
4373{
4374 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
4375 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4376 struct irq_cfg *cfg = ir_data->cfg;
4377
4378 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
4379 !entry || !entry->lo.fields_vapic.guest_mode)
4380 return 0;
4381
4382 entry->lo.val = 0;
4383 entry->hi.val = 0;
4384
4385 entry->lo.fields_remap.dm = apic->irq_dest_mode;
4386 entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
4387 entry->hi.fields.vector = cfg->vector;
4388 entry->lo.fields_remap.destination =
4389 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
4390 entry->hi.fields.destination =
4391 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
4392
4393 return modify_irte_ga(ir_data->irq_2_irte.devid,
4394 ir_data->irq_2_irte.index, entry, NULL);
4395}
4396EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
4397
4317static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) 4398static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
4318{ 4399{
4400 int ret;
4319 struct amd_iommu *iommu; 4401 struct amd_iommu *iommu;
4320 struct amd_iommu_pi_data *pi_data = vcpu_info; 4402 struct amd_iommu_pi_data *pi_data = vcpu_info;
4321 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data; 4403 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
4322 struct amd_ir_data *ir_data = data->chip_data; 4404 struct amd_ir_data *ir_data = data->chip_data;
4323 struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
4324 struct irq_2_irte *irte_info = &ir_data->irq_2_irte; 4405 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4325 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid); 4406 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
4326 4407
@@ -4331,6 +4412,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
4331 if (!dev_data || !dev_data->use_vapic) 4412 if (!dev_data || !dev_data->use_vapic)
4332 return 0; 4413 return 0;
4333 4414
4415 ir_data->cfg = irqd_cfg(data);
4334 pi_data->ir_data = ir_data; 4416 pi_data->ir_data = ir_data;
4335 4417
4336 /* Note: 4418 /* Note:
@@ -4349,37 +4431,24 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
4349 4431
4350 pi_data->prev_ga_tag = ir_data->cached_ga_tag; 4432 pi_data->prev_ga_tag = ir_data->cached_ga_tag;
4351 if (pi_data->is_guest_mode) { 4433 if (pi_data->is_guest_mode) {
4352 /* Setting */ 4434 ir_data->ga_root_ptr = (pi_data->base >> 12);
4353 irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); 4435 ir_data->ga_vector = vcpu_pi_info->vector;
4354 irte->hi.fields.vector = vcpu_pi_info->vector; 4436 ir_data->ga_tag = pi_data->ga_tag;
4355 irte->lo.fields_vapic.ga_log_intr = 1; 4437 ret = amd_iommu_activate_guest_mode(ir_data);
4356 irte->lo.fields_vapic.guest_mode = 1; 4438 if (!ret)
4357 irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; 4439 ir_data->cached_ga_tag = pi_data->ga_tag;
4358
4359 ir_data->cached_ga_tag = pi_data->ga_tag;
4360 } else { 4440 } else {
4361 /* Un-Setting */ 4441 ret = amd_iommu_deactivate_guest_mode(ir_data);
4362 struct irq_cfg *cfg = irqd_cfg(data);
4363
4364 irte->hi.val = 0;
4365 irte->lo.val = 0;
4366 irte->hi.fields.vector = cfg->vector;
4367 irte->lo.fields_remap.guest_mode = 0;
4368 irte->lo.fields_remap.destination =
4369 APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
4370 irte->hi.fields.destination =
4371 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
4372 irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
4373 irte->lo.fields_remap.dm = apic->irq_dest_mode;
4374 4442
4375 /* 4443 /*
4376 * This communicates the ga_tag back to the caller 4444 * This communicates the ga_tag back to the caller
4377 * so that it can do all the necessary clean up. 4445 * so that it can do all the necessary clean up.
4378 */ 4446 */
4379 ir_data->cached_ga_tag = 0; 4447 if (!ret)
4448 ir_data->cached_ga_tag = 0;
4380 } 4449 }
4381 4450
4382 return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data); 4451 return ret;
4383} 4452}
4384 4453
4385 4454
diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
new file mode 100644
index 000000000000..12d540d9b59b
--- /dev/null
+++ b/drivers/iommu/amd_iommu.h
@@ -0,0 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2
3#ifndef AMD_IOMMU_H
4#define AMD_IOMMU_H
5
6int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
7
8#ifdef CONFIG_DMI
9void amd_iommu_apply_ivrs_quirks(void);
10#else
11static void amd_iommu_apply_ivrs_quirks(void) { }
12#endif
13
14#endif
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 4413aa67000e..568c52317757 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -32,6 +32,7 @@
32#include <asm/irq_remapping.h> 32#include <asm/irq_remapping.h>
33 33
34#include <linux/crash_dump.h> 34#include <linux/crash_dump.h>
35#include "amd_iommu.h"
35#include "amd_iommu_proto.h" 36#include "amd_iommu_proto.h"
36#include "amd_iommu_types.h" 37#include "amd_iommu_types.h"
37#include "irq_remapping.h" 38#include "irq_remapping.h"
@@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1002 set_iommu_for_device(iommu, devid); 1003 set_iommu_for_device(iommu, devid);
1003} 1004}
1004 1005
1005static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) 1006int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1006{ 1007{
1007 struct devid_map *entry; 1008 struct devid_map *entry;
1008 struct list_head *list; 1009 struct list_head *list;
@@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1153 if (ret) 1154 if (ret)
1154 return ret; 1155 return ret;
1155 1156
1157 amd_iommu_apply_ivrs_quirks();
1158
1156 /* 1159 /*
1157 * First save the recommended feature enable bits from ACPI 1160 * First save the recommended feature enable bits from ACPI
1158 */ 1161 */
diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
new file mode 100644
index 000000000000..c235f79b7a20
--- /dev/null
+++ b/drivers/iommu/amd_iommu_quirks.c
@@ -0,0 +1,92 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2
3/*
4 * Quirks for AMD IOMMU
5 *
6 * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
7 */
8
9#ifdef CONFIG_DMI
10#include <linux/dmi.h>
11
12#include "amd_iommu.h"
13
14#define IVHD_SPECIAL_IOAPIC 1
15
16struct ivrs_quirk_entry {
17 u8 id;
18 u16 devid;
19};
20
21enum {
22 DELL_INSPIRON_7375 = 0,
23 DELL_LATITUDE_5495,
24 LENOVO_IDEAPAD_330S_15ARR,
25};
26
27static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
28 /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
29 [DELL_INSPIRON_7375] = {
30 { .id = 4, .devid = 0xa0 },
31 { .id = 5, .devid = 0x2 },
32 {}
33 },
34 /* ivrs_ioapic[4]=00:14.0 */
35 [DELL_LATITUDE_5495] = {
36 { .id = 4, .devid = 0xa0 },
37 {}
38 },
39 /* ivrs_ioapic[32]=00:14.0 */
40 [LENOVO_IDEAPAD_330S_15ARR] = {
41 { .id = 32, .devid = 0xa0 },
42 {}
43 },
44 {}
45};
46
47static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
48{
49 const struct ivrs_quirk_entry *i;
50
51 for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
52 add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
53
54 return 0;
55}
56
57static const struct dmi_system_id ivrs_quirks[] __initconst = {
58 {
59 .callback = ivrs_ioapic_quirk_cb,
60 .ident = "Dell Inspiron 7375",
61 .matches = {
62 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
63 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
64 },
65 .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
66 },
67 {
68 .callback = ivrs_ioapic_quirk_cb,
69 .ident = "Dell Latitude 5495",
70 .matches = {
71 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
72 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
73 },
74 .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
75 },
76 {
77 .callback = ivrs_ioapic_quirk_cb,
78 .ident = "Lenovo ideapad 330S-15ARR",
79 .matches = {
80 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
81 DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
82 },
83 .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
84 },
85 {}
86};
87
88void __init amd_iommu_apply_ivrs_quirks(void)
89{
90 dmi_check_system(ivrs_quirks);
91}
92#endif
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 64edd5a9694c..9ac229e92b07 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -873,6 +873,15 @@ struct amd_ir_data {
873 struct msi_msg msi_entry; 873 struct msi_msg msi_entry;
874 void *entry; /* Pointer to union irte or struct irte_ga */ 874 void *entry; /* Pointer to union irte or struct irte_ga */
875 void *ref; /* Pointer to the actual irte */ 875 void *ref; /* Pointer to the actual irte */
876
877 /**
878 * Store information for activate/de-activate
879 * Guest virtual APIC mode during runtime.
880 */
881 struct irq_cfg *cfg;
882 int ga_vector;
883 int ga_root_ptr;
884 int ga_tag;
876}; 885};
877 886
878struct amd_irte_ops { 887struct amd_irte_ops {
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 315e0087c19f..8f412af84247 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -303,13 +303,15 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
303 u64 size, struct device *dev) 303 u64 size, struct device *dev)
304{ 304{
305 struct iommu_dma_cookie *cookie = domain->iova_cookie; 305 struct iommu_dma_cookie *cookie = domain->iova_cookie;
306 struct iova_domain *iovad = &cookie->iovad;
307 unsigned long order, base_pfn; 306 unsigned long order, base_pfn;
307 struct iova_domain *iovad;
308 int attr; 308 int attr;
309 309
310 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 310 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
311 return -EINVAL; 311 return -EINVAL;
312 312
313 iovad = &cookie->iovad;
314
313 /* Use the smallest supported page size for IOVA granularity */ 315 /* Use the smallest supported page size for IOVA granularity */
314 order = __ffs(domain->pgsize_bitmap); 316 order = __ffs(domain->pgsize_bitmap);
315 base_pfn = max_t(unsigned long, 1, base >> order); 317 base_pfn = max_t(unsigned long, 1, base >> order);
@@ -970,11 +972,14 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
970{ 972{
971 bool coherent = dev_is_dma_coherent(dev); 973 bool coherent = dev_is_dma_coherent(dev);
972 size_t alloc_size = PAGE_ALIGN(size); 974 size_t alloc_size = PAGE_ALIGN(size);
975 int node = dev_to_node(dev);
973 struct page *page = NULL; 976 struct page *page = NULL;
974 void *cpu_addr; 977 void *cpu_addr;
975 978
976 page = dma_alloc_contiguous(dev, alloc_size, gfp); 979 page = dma_alloc_contiguous(dev, alloc_size, gfp);
977 if (!page) 980 if (!page)
981 page = alloc_pages_node(node, gfp, get_order(alloc_size));
982 if (!page)
978 return NULL; 983 return NULL;
979 984
980 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 985 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 5d0754ed5fa0..eecd6a421667 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1519,6 +1519,64 @@ static const char *dma_remap_fault_reasons[] =
1519 "PCE for translation request specifies blocking", 1519 "PCE for translation request specifies blocking",
1520}; 1520};
1521 1521
1522static const char * const dma_remap_sm_fault_reasons[] = {
1523 "SM: Invalid Root Table Address",
1524 "SM: TTM 0 for request with PASID",
1525 "SM: TTM 0 for page group request",
1526 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */
1527 "SM: Error attempting to access Root Entry",
1528 "SM: Present bit in Root Entry is clear",
1529 "SM: Non-zero reserved field set in Root Entry",
1530 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */
1531 "SM: Error attempting to access Context Entry",
1532 "SM: Present bit in Context Entry is clear",
1533 "SM: Non-zero reserved field set in the Context Entry",
1534 "SM: Invalid Context Entry",
1535 "SM: DTE field in Context Entry is clear",
1536 "SM: PASID Enable field in Context Entry is clear",
1537 "SM: PASID is larger than the max in Context Entry",
1538 "SM: PRE field in Context-Entry is clear",
1539 "SM: RID_PASID field error in Context-Entry",
1540 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */
1541 "SM: Error attempting to access the PASID Directory Entry",
1542 "SM: Present bit in Directory Entry is clear",
1543 "SM: Non-zero reserved field set in PASID Directory Entry",
1544 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */
1545 "SM: Error attempting to access PASID Table Entry",
1546 "SM: Present bit in PASID Table Entry is clear",
1547 "SM: Non-zero reserved field set in PASID Table Entry",
1548 "SM: Invalid Scalable-Mode PASID Table Entry",
1549 "SM: ERE field is clear in PASID Table Entry",
1550 "SM: SRE field is clear in PASID Table Entry",
1551 "Unknown", "Unknown",/* 0x5E-0x5F */
1552 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */
1553 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */
1554 "SM: Error attempting to access first-level paging entry",
1555 "SM: Present bit in first-level paging entry is clear",
1556 "SM: Non-zero reserved field set in first-level paging entry",
1557 "SM: Error attempting to access FL-PML4 entry",
1558 "SM: First-level entry address beyond MGAW in Nested translation",
1559 "SM: Read permission error in FL-PML4 entry in Nested translation",
1560 "SM: Read permission error in first-level paging entry in Nested translation",
1561 "SM: Write permission error in first-level paging entry in Nested translation",
1562 "SM: Error attempting to access second-level paging entry",
1563 "SM: Read/Write permission error in second-level paging entry",
1564 "SM: Non-zero reserved field set in second-level paging entry",
1565 "SM: Invalid second-level page table pointer",
1566 "SM: A/D bit update needed in second-level entry when set up in no snoop",
1567 "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */
1568 "SM: Address in first-level translation is not canonical",
1569 "SM: U/S set 0 for first-level translation with user privilege",
1570 "SM: No execute permission for request with PASID and ER=1",
1571 "SM: Address beyond the DMA hardware max",
1572 "SM: Second-level entry address beyond the max",
1573 "SM: No write permission for Write/AtomicOp request",
1574 "SM: No read permission for Read/AtomicOp request",
1575 "SM: Invalid address-interrupt address",
1576 "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */
1577 "SM: A/D bit update needed in first-level entry when set up in no snoop",
1578};
1579
1522static const char *irq_remap_fault_reasons[] = 1580static const char *irq_remap_fault_reasons[] =
1523{ 1581{
1524 "Detected reserved fields in the decoded interrupt-remapped request", 1582 "Detected reserved fields in the decoded interrupt-remapped request",
@@ -1536,6 +1594,10 @@ static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1536 ARRAY_SIZE(irq_remap_fault_reasons))) { 1594 ARRAY_SIZE(irq_remap_fault_reasons))) {
1537 *fault_type = INTR_REMAP; 1595 *fault_type = INTR_REMAP;
1538 return irq_remap_fault_reasons[fault_reason - 0x20]; 1596 return irq_remap_fault_reasons[fault_reason - 0x20];
1597 } else if (fault_reason >= 0x30 && (fault_reason - 0x30 <
1598 ARRAY_SIZE(dma_remap_sm_fault_reasons))) {
1599 *fault_type = DMA_REMAP;
1600 return dma_remap_sm_fault_reasons[fault_reason - 0x30];
1539 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { 1601 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1540 *fault_type = DMA_REMAP; 1602 *fault_type = DMA_REMAP;
1541 return dma_remap_fault_reasons[fault_reason]; 1603 return dma_remap_fault_reasons[fault_reason];
@@ -1611,7 +1673,8 @@ void dmar_msi_read(int irq, struct msi_msg *msg)
1611} 1673}
1612 1674
1613static int dmar_fault_do_one(struct intel_iommu *iommu, int type, 1675static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1614 u8 fault_reason, u16 source_id, unsigned long long addr) 1676 u8 fault_reason, int pasid, u16 source_id,
1677 unsigned long long addr)
1615{ 1678{
1616 const char *reason; 1679 const char *reason;
1617 int fault_type; 1680 int fault_type;
@@ -1624,10 +1687,11 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1624 PCI_FUNC(source_id & 0xFF), addr >> 48, 1687 PCI_FUNC(source_id & 0xFF), addr >> 48,
1625 fault_reason, reason); 1688 fault_reason, reason);
1626 else 1689 else
1627 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n", 1690 pr_err("[%s] Request device [%02x:%02x.%d] PASID %x fault addr %llx [fault reason %02d] %s\n",
1628 type ? "DMA Read" : "DMA Write", 1691 type ? "DMA Read" : "DMA Write",
1629 source_id >> 8, PCI_SLOT(source_id & 0xFF), 1692 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1630 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); 1693 PCI_FUNC(source_id & 0xFF), pasid, addr,
1694 fault_reason, reason);
1631 return 0; 1695 return 0;
1632} 1696}
1633 1697
@@ -1659,8 +1723,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1659 u8 fault_reason; 1723 u8 fault_reason;
1660 u16 source_id; 1724 u16 source_id;
1661 u64 guest_addr; 1725 u64 guest_addr;
1662 int type; 1726 int type, pasid;
1663 u32 data; 1727 u32 data;
1728 bool pasid_present;
1664 1729
1665 /* highest 32 bits */ 1730 /* highest 32 bits */
1666 data = readl(iommu->reg + reg + 1731 data = readl(iommu->reg + reg +
@@ -1672,10 +1737,12 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1672 fault_reason = dma_frcd_fault_reason(data); 1737 fault_reason = dma_frcd_fault_reason(data);
1673 type = dma_frcd_type(data); 1738 type = dma_frcd_type(data);
1674 1739
1740 pasid = dma_frcd_pasid_value(data);
1675 data = readl(iommu->reg + reg + 1741 data = readl(iommu->reg + reg +
1676 fault_index * PRIMARY_FAULT_REG_LEN + 8); 1742 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1677 source_id = dma_frcd_source_id(data); 1743 source_id = dma_frcd_source_id(data);
1678 1744
1745 pasid_present = dma_frcd_pasid_present(data);
1679 guest_addr = dmar_readq(iommu->reg + reg + 1746 guest_addr = dmar_readq(iommu->reg + reg +
1680 fault_index * PRIMARY_FAULT_REG_LEN); 1747 fault_index * PRIMARY_FAULT_REG_LEN);
1681 guest_addr = dma_frcd_page_addr(guest_addr); 1748 guest_addr = dma_frcd_page_addr(guest_addr);
@@ -1688,7 +1755,9 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
1688 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1755 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1689 1756
1690 if (!ratelimited) 1757 if (!ratelimited)
1758 /* Using pasid -1 if pasid is not present */
1691 dmar_fault_do_one(iommu, type, fault_reason, 1759 dmar_fault_do_one(iommu, type, fault_reason,
1760 pasid_present ? pasid : -1,
1692 source_id, guest_addr); 1761 source_id, guest_addr);
1693 1762
1694 fault_index++; 1763 fault_index++;
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index cf5af34cb681..9c94e16fb127 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -566,7 +566,7 @@ static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
566 566
567static const struct iommu_ops exynos_iommu_ops; 567static const struct iommu_ops exynos_iommu_ops;
568 568
569static int __init exynos_sysmmu_probe(struct platform_device *pdev) 569static int exynos_sysmmu_probe(struct platform_device *pdev)
570{ 570{
571 int irq, ret; 571 int irq, ret;
572 struct device *dev = &pdev->dev; 572 struct device *dev = &pdev->dev;
@@ -583,10 +583,8 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
583 return PTR_ERR(data->sfrbase); 583 return PTR_ERR(data->sfrbase);
584 584
585 irq = platform_get_irq(pdev, 0); 585 irq = platform_get_irq(pdev, 0);
586 if (irq <= 0) { 586 if (irq <= 0)
587 dev_err(dev, "Unable to find IRQ resource\n");
588 return irq; 587 return irq;
589 }
590 588
591 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, 589 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
592 dev_name(dev), data); 590 dev_name(dev), data);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b7454ca4a87c..87de0b975672 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -41,9 +41,11 @@
41#include <linux/dma-direct.h> 41#include <linux/dma-direct.h>
42#include <linux/crash_dump.h> 42#include <linux/crash_dump.h>
43#include <linux/numa.h> 43#include <linux/numa.h>
44#include <linux/swiotlb.h>
44#include <asm/irq_remapping.h> 45#include <asm/irq_remapping.h>
45#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
46#include <asm/iommu.h> 47#include <asm/iommu.h>
48#include <trace/events/intel_iommu.h>
47 49
48#include "irq_remapping.h" 50#include "irq_remapping.h"
49#include "intel-pasid.h" 51#include "intel-pasid.h"
@@ -339,11 +341,15 @@ static void domain_exit(struct dmar_domain *domain);
339static void domain_remove_dev_info(struct dmar_domain *domain); 341static void domain_remove_dev_info(struct dmar_domain *domain);
340static void dmar_remove_one_dev_info(struct device *dev); 342static void dmar_remove_one_dev_info(struct device *dev);
341static void __dmar_remove_one_dev_info(struct device_domain_info *info); 343static void __dmar_remove_one_dev_info(struct device_domain_info *info);
344static void domain_context_clear(struct intel_iommu *iommu,
345 struct device *dev);
342static int domain_detach_iommu(struct dmar_domain *domain, 346static int domain_detach_iommu(struct dmar_domain *domain,
343 struct intel_iommu *iommu); 347 struct intel_iommu *iommu);
344static bool device_is_rmrr_locked(struct device *dev); 348static bool device_is_rmrr_locked(struct device *dev);
345static int intel_iommu_attach_device(struct iommu_domain *domain, 349static int intel_iommu_attach_device(struct iommu_domain *domain,
346 struct device *dev); 350 struct device *dev);
351static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
352 dma_addr_t iova);
347 353
348#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON 354#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
349int dmar_disabled = 0; 355int dmar_disabled = 0;
@@ -360,6 +366,7 @@ static int dmar_forcedac;
360static int intel_iommu_strict; 366static int intel_iommu_strict;
361static int intel_iommu_superpage = 1; 367static int intel_iommu_superpage = 1;
362static int iommu_identity_mapping; 368static int iommu_identity_mapping;
369static int intel_no_bounce;
363 370
364#define IDENTMAP_ALL 1 371#define IDENTMAP_ALL 1
365#define IDENTMAP_GFX 2 372#define IDENTMAP_GFX 2
@@ -373,6 +380,9 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
373static DEFINE_SPINLOCK(device_domain_lock); 380static DEFINE_SPINLOCK(device_domain_lock);
374static LIST_HEAD(device_domain_list); 381static LIST_HEAD(device_domain_list);
375 382
383#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \
384 to_pci_dev(d)->untrusted)
385
376/* 386/*
377 * Iterate over elements in device_domain_list and call the specified 387 * Iterate over elements in device_domain_list and call the specified
378 * callback @fn against each element. 388 * callback @fn against each element.
@@ -455,6 +465,9 @@ static int __init intel_iommu_setup(char *str)
455 printk(KERN_INFO 465 printk(KERN_INFO
456 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); 466 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
457 intel_iommu_tboot_noforce = 1; 467 intel_iommu_tboot_noforce = 1;
468 } else if (!strncmp(str, "nobounce", 8)) {
469 pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
470 intel_no_bounce = 1;
458 } 471 }
459 472
460 str += strcspn(str, ","); 473 str += strcspn(str, ",");
@@ -2105,9 +2118,26 @@ out_unlock:
2105 return ret; 2118 return ret;
2106} 2119}
2107 2120
2121struct domain_context_mapping_data {
2122 struct dmar_domain *domain;
2123 struct intel_iommu *iommu;
2124 struct pasid_table *table;
2125};
2126
2127static int domain_context_mapping_cb(struct pci_dev *pdev,
2128 u16 alias, void *opaque)
2129{
2130 struct domain_context_mapping_data *data = opaque;
2131
2132 return domain_context_mapping_one(data->domain, data->iommu,
2133 data->table, PCI_BUS_NUM(alias),
2134 alias & 0xff);
2135}
2136
2108static int 2137static int
2109domain_context_mapping(struct dmar_domain *domain, struct device *dev) 2138domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2110{ 2139{
2140 struct domain_context_mapping_data data;
2111 struct pasid_table *table; 2141 struct pasid_table *table;
2112 struct intel_iommu *iommu; 2142 struct intel_iommu *iommu;
2113 u8 bus, devfn; 2143 u8 bus, devfn;
@@ -2117,7 +2147,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2117 return -ENODEV; 2147 return -ENODEV;
2118 2148
2119 table = intel_pasid_get_table(dev); 2149 table = intel_pasid_get_table(dev);
2120 return domain_context_mapping_one(domain, iommu, table, bus, devfn); 2150
2151 if (!dev_is_pci(dev))
2152 return domain_context_mapping_one(domain, iommu, table,
2153 bus, devfn);
2154
2155 data.domain = domain;
2156 data.iommu = iommu;
2157 data.table = table;
2158
2159 return pci_for_each_dma_alias(to_pci_dev(dev),
2160 &domain_context_mapping_cb, &data);
2121} 2161}
2122 2162
2123static int domain_context_mapped_cb(struct pci_dev *pdev, 2163static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -3267,7 +3307,7 @@ static int __init init_dmars(void)
3267 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); 3307 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3268 } 3308 }
3269 3309
3270 if (iommu_pass_through) 3310 if (iommu_default_passthrough())
3271 iommu_identity_mapping |= IDENTMAP_ALL; 3311 iommu_identity_mapping |= IDENTMAP_ALL;
3272 3312
3273#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA 3313#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
@@ -3505,6 +3545,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3505 3545
3506 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; 3546 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3507 start_paddr += paddr & ~PAGE_MASK; 3547 start_paddr += paddr & ~PAGE_MASK;
3548
3549 trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
3550
3508 return start_paddr; 3551 return start_paddr;
3509 3552
3510error: 3553error:
@@ -3560,10 +3603,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3560 if (dev_is_pci(dev)) 3603 if (dev_is_pci(dev))
3561 pdev = to_pci_dev(dev); 3604 pdev = to_pci_dev(dev);
3562 3605
3563 dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
3564
3565 freelist = domain_unmap(domain, start_pfn, last_pfn); 3606 freelist = domain_unmap(domain, start_pfn, last_pfn);
3566
3567 if (intel_iommu_strict || (pdev && pdev->untrusted) || 3607 if (intel_iommu_strict || (pdev && pdev->untrusted) ||
3568 !has_iova_flush_queue(&domain->iovad)) { 3608 !has_iova_flush_queue(&domain->iovad)) {
3569 iommu_flush_iotlb_psi(iommu, domain, start_pfn, 3609 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
@@ -3579,6 +3619,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3579 * cpu used up by the iotlb flush operation... 3619 * cpu used up by the iotlb flush operation...
3580 */ 3620 */
3581 } 3621 }
3622
3623 trace_unmap_single(dev, dev_addr, size);
3582} 3624}
3583 3625
3584static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, 3626static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
@@ -3669,6 +3711,8 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3669 } 3711 }
3670 3712
3671 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); 3713 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3714
3715 trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3672} 3716}
3673 3717
3674static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, 3718static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
@@ -3725,6 +3769,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
3725 return 0; 3769 return 0;
3726 } 3770 }
3727 3771
3772 trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
3773 sg_phys(sglist), size << VTD_PAGE_SHIFT);
3774
3728 return nelems; 3775 return nelems;
3729} 3776}
3730 3777
@@ -3740,6 +3787,252 @@ static const struct dma_map_ops intel_dma_ops = {
3740 .dma_supported = dma_direct_supported, 3787 .dma_supported = dma_direct_supported,
3741}; 3788};
3742 3789
3790static void
3791bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
3792 enum dma_data_direction dir, enum dma_sync_target target)
3793{
3794 struct dmar_domain *domain;
3795 phys_addr_t tlb_addr;
3796
3797 domain = find_domain(dev);
3798 if (WARN_ON(!domain))
3799 return;
3800
3801 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
3802 if (is_swiotlb_buffer(tlb_addr))
3803 swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
3804}
3805
3806static dma_addr_t
3807bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
3808 enum dma_data_direction dir, unsigned long attrs,
3809 u64 dma_mask)
3810{
3811 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
3812 struct dmar_domain *domain;
3813 struct intel_iommu *iommu;
3814 unsigned long iova_pfn;
3815 unsigned long nrpages;
3816 phys_addr_t tlb_addr;
3817 int prot = 0;
3818 int ret;
3819
3820 domain = find_domain(dev);
3821 if (WARN_ON(dir == DMA_NONE || !domain))
3822 return DMA_MAPPING_ERROR;
3823
3824 iommu = domain_get_iommu(domain);
3825 if (WARN_ON(!iommu))
3826 return DMA_MAPPING_ERROR;
3827
3828 nrpages = aligned_nrpages(0, size);
3829 iova_pfn = intel_alloc_iova(dev, domain,
3830 dma_to_mm_pfn(nrpages), dma_mask);
3831 if (!iova_pfn)
3832 return DMA_MAPPING_ERROR;
3833
3834 /*
3835 * Check if DMAR supports zero-length reads on write only
3836 * mappings..
3837 */
3838 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
3839 !cap_zlr(iommu->cap))
3840 prot |= DMA_PTE_READ;
3841 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3842 prot |= DMA_PTE_WRITE;
3843
3844 /*
3845 * If both the physical buffer start address and size are
3846 * page aligned, we don't need to use a bounce page.
3847 */
3848 if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
3849 tlb_addr = swiotlb_tbl_map_single(dev,
3850 __phys_to_dma(dev, io_tlb_start),
3851 paddr, size, aligned_size, dir, attrs);
3852 if (tlb_addr == DMA_MAPPING_ERROR) {
3853 goto swiotlb_error;
3854 } else {
3855 /* Cleanup the padding area. */
3856 void *padding_start = phys_to_virt(tlb_addr);
3857 size_t padding_size = aligned_size;
3858
3859 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
3860 (dir == DMA_TO_DEVICE ||
3861 dir == DMA_BIDIRECTIONAL)) {
3862 padding_start += size;
3863 padding_size -= size;
3864 }
3865
3866 memset(padding_start, 0, padding_size);
3867 }
3868 } else {
3869 tlb_addr = paddr;
3870 }
3871
3872 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3873 tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
3874 if (ret)
3875 goto mapping_error;
3876
3877 trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
3878
3879 return (phys_addr_t)iova_pfn << PAGE_SHIFT;
3880
3881mapping_error:
3882 if (is_swiotlb_buffer(tlb_addr))
3883 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3884 aligned_size, dir, attrs);
3885swiotlb_error:
3886 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3887 dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
3888 size, (unsigned long long)paddr, dir);
3889
3890 return DMA_MAPPING_ERROR;
3891}
3892
3893static void
3894bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
3895 enum dma_data_direction dir, unsigned long attrs)
3896{
3897 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
3898 struct dmar_domain *domain;
3899 phys_addr_t tlb_addr;
3900
3901 domain = find_domain(dev);
3902 if (WARN_ON(!domain))
3903 return;
3904
3905 tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
3906 if (WARN_ON(!tlb_addr))
3907 return;
3908
3909 intel_unmap(dev, dev_addr, size);
3910 if (is_swiotlb_buffer(tlb_addr))
3911 swiotlb_tbl_unmap_single(dev, tlb_addr, size,
3912 aligned_size, dir, attrs);
3913
3914 trace_bounce_unmap_single(dev, dev_addr, size);
3915}
3916
3917static dma_addr_t
3918bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
3919 size_t size, enum dma_data_direction dir, unsigned long attrs)
3920{
3921 return bounce_map_single(dev, page_to_phys(page) + offset,
3922 size, dir, attrs, *dev->dma_mask);
3923}
3924
3925static dma_addr_t
3926bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
3927 enum dma_data_direction dir, unsigned long attrs)
3928{
3929 return bounce_map_single(dev, phys_addr, size,
3930 dir, attrs, *dev->dma_mask);
3931}
3932
3933static void
3934bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
3935 enum dma_data_direction dir, unsigned long attrs)
3936{
3937 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
3938}
3939
3940static void
3941bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
3942 enum dma_data_direction dir, unsigned long attrs)
3943{
3944 bounce_unmap_single(dev, dev_addr, size, dir, attrs);
3945}
3946
3947static void
3948bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3949 enum dma_data_direction dir, unsigned long attrs)
3950{
3951 struct scatterlist *sg;
3952 int i;
3953
3954 for_each_sg(sglist, sg, nelems, i)
3955 bounce_unmap_page(dev, sg->dma_address,
3956 sg_dma_len(sg), dir, attrs);
3957}
3958
3959static int
3960bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3961 enum dma_data_direction dir, unsigned long attrs)
3962{
3963 int i;
3964 struct scatterlist *sg;
3965
3966 for_each_sg(sglist, sg, nelems, i) {
3967 sg->dma_address = bounce_map_page(dev, sg_page(sg),
3968 sg->offset, sg->length,
3969 dir, attrs);
3970 if (sg->dma_address == DMA_MAPPING_ERROR)
3971 goto out_unmap;
3972 sg_dma_len(sg) = sg->length;
3973 }
3974
3975 return nelems;
3976
3977out_unmap:
3978 bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
3979 return 0;
3980}
3981
3982static void
3983bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
3984 size_t size, enum dma_data_direction dir)
3985{
3986 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
3987}
3988
3989static void
3990bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
3991 size_t size, enum dma_data_direction dir)
3992{
3993 bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
3994}
3995
3996static void
3997bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
3998 int nelems, enum dma_data_direction dir)
3999{
4000 struct scatterlist *sg;
4001 int i;
4002
4003 for_each_sg(sglist, sg, nelems, i)
4004 bounce_sync_single(dev, sg_dma_address(sg),
4005 sg_dma_len(sg), dir, SYNC_FOR_CPU);
4006}
4007
4008static void
4009bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
4010 int nelems, enum dma_data_direction dir)
4011{
4012 struct scatterlist *sg;
4013 int i;
4014
4015 for_each_sg(sglist, sg, nelems, i)
4016 bounce_sync_single(dev, sg_dma_address(sg),
4017 sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
4018}
4019
4020static const struct dma_map_ops bounce_dma_ops = {
4021 .alloc = intel_alloc_coherent,
4022 .free = intel_free_coherent,
4023 .map_sg = bounce_map_sg,
4024 .unmap_sg = bounce_unmap_sg,
4025 .map_page = bounce_map_page,
4026 .unmap_page = bounce_unmap_page,
4027 .sync_single_for_cpu = bounce_sync_single_for_cpu,
4028 .sync_single_for_device = bounce_sync_single_for_device,
4029 .sync_sg_for_cpu = bounce_sync_sg_for_cpu,
4030 .sync_sg_for_device = bounce_sync_sg_for_device,
4031 .map_resource = bounce_map_resource,
4032 .unmap_resource = bounce_unmap_resource,
4033 .dma_supported = dma_direct_supported,
4034};
4035
3743static inline int iommu_domain_cache_init(void) 4036static inline int iommu_domain_cache_init(void)
3744{ 4037{
3745 int ret = 0; 4038 int ret = 0;
@@ -4540,22 +4833,20 @@ const struct attribute_group *intel_iommu_groups[] = {
4540 NULL, 4833 NULL,
4541}; 4834};
4542 4835
4543static int __init platform_optin_force_iommu(void) 4836static inline bool has_untrusted_dev(void)
4544{ 4837{
4545 struct pci_dev *pdev = NULL; 4838 struct pci_dev *pdev = NULL;
4546 bool has_untrusted_dev = false;
4547 4839
4548 if (!dmar_platform_optin() || no_platform_optin) 4840 for_each_pci_dev(pdev)
4549 return 0; 4841 if (pdev->untrusted)
4842 return true;
4550 4843
4551 for_each_pci_dev(pdev) { 4844 return false;
4552 if (pdev->untrusted) { 4845}
4553 has_untrusted_dev = true;
4554 break;
4555 }
4556 }
4557 4846
4558 if (!has_untrusted_dev) 4847static int __init platform_optin_force_iommu(void)
4848{
4849 if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev())
4559 return 0; 4850 return 0;
4560 4851
4561 if (no_iommu || dmar_disabled) 4852 if (no_iommu || dmar_disabled)
@@ -4569,9 +4860,6 @@ static int __init platform_optin_force_iommu(void)
4569 iommu_identity_mapping |= IDENTMAP_ALL; 4860 iommu_identity_mapping |= IDENTMAP_ALL;
4570 4861
4571 dmar_disabled = 0; 4862 dmar_disabled = 0;
4572#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4573 swiotlb = 0;
4574#endif
4575 no_iommu = 0; 4863 no_iommu = 0;
4576 4864
4577 return 1; 4865 return 1;
@@ -4711,7 +4999,14 @@ int __init intel_iommu_init(void)
4711 up_write(&dmar_global_lock); 4999 up_write(&dmar_global_lock);
4712 5000
4713#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) 5001#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
4714 swiotlb = 0; 5002 /*
5003 * If the system has no untrusted device or the user has decided
5004 * to disable the bounce page mechanisms, we don't need swiotlb.
5005 * Mark this and the pre-allocated bounce pages will be released
5006 * later.
5007 */
5008 if (!has_untrusted_dev() || intel_no_bounce)
5009 swiotlb = 0;
4715#endif 5010#endif
4716 dma_ops = &intel_dma_ops; 5011 dma_ops = &intel_dma_ops;
4717 5012
@@ -4759,6 +5054,28 @@ out_free_dmar:
4759 return ret; 5054 return ret;
4760} 5055}
4761 5056
5057static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
5058{
5059 struct intel_iommu *iommu = opaque;
5060
5061 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
5062 return 0;
5063}
5064
5065/*
5066 * NB - intel-iommu lacks any sort of reference counting for the users of
5067 * dependent devices. If multiple endpoints have intersecting dependent
5068 * devices, unbinding the driver from any one of them will possibly leave
5069 * the others unable to operate.
5070 */
5071static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
5072{
5073 if (!iommu || !dev || !dev_is_pci(dev))
5074 return;
5075
5076 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
5077}
5078
4762static void __dmar_remove_one_dev_info(struct device_domain_info *info) 5079static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4763{ 5080{
4764 struct dmar_domain *domain; 5081 struct dmar_domain *domain;
@@ -4779,7 +5096,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4779 PASID_RID2PASID); 5096 PASID_RID2PASID);
4780 5097
4781 iommu_disable_dev_iotlb(info); 5098 iommu_disable_dev_iotlb(info);
4782 domain_context_clear_one(iommu, info->bus, info->devfn); 5099 domain_context_clear(iommu, info->dev);
4783 intel_pasid_free_table(info->dev); 5100 intel_pasid_free_table(info->dev);
4784 } 5101 }
4785 5102
@@ -5310,6 +5627,11 @@ static int intel_iommu_add_device(struct device *dev)
5310 } 5627 }
5311 } 5628 }
5312 5629
5630 if (device_needs_bounce(dev)) {
5631 dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
5632 set_dma_ops(dev, &bounce_dma_ops);
5633 }
5634
5313 return 0; 5635 return 0;
5314} 5636}
5315 5637
@@ -5327,6 +5649,9 @@ static void intel_iommu_remove_device(struct device *dev)
5327 iommu_group_remove_device(dev); 5649 iommu_group_remove_device(dev);
5328 5650
5329 iommu_device_unlink(&iommu->iommu, dev); 5651 iommu_device_unlink(&iommu->iommu, dev);
5652
5653 if (device_needs_bounce(dev))
5654 set_dma_ops(dev, NULL);
5330} 5655}
5331 5656
5332static void intel_iommu_get_resv_regions(struct device *device, 5657static void intel_iommu_get_resv_regions(struct device *device,
@@ -5640,20 +5965,46 @@ const struct iommu_ops intel_iommu_ops = {
5640 .pgsize_bitmap = INTEL_IOMMU_PGSIZES, 5965 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5641}; 5966};
5642 5967
5643static void quirk_iommu_g4x_gfx(struct pci_dev *dev) 5968static void quirk_iommu_igfx(struct pci_dev *dev)
5644{ 5969{
5645 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5646 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); 5970 pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
5647 dmar_map_gfx = 0; 5971 dmar_map_gfx = 0;
5648} 5972}
5649 5973
5650DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx); 5974/* G4x/GM45 integrated gfx dmar support is totally busted. */
5651DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx); 5975DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
5652DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx); 5976DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
5653DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx); 5977DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
5654DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx); 5978DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
5655DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx); 5979DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
5656DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx); 5980DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
5981DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
5982
5983/* Broadwell igfx malfunctions with dmar */
5984DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
5985DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
5986DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
5987DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
5988DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
5989DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
5990DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
5991DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
5992DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
5993DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
5994DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
5995DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
5996DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
5997DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
5998DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
5999DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
6000DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
6001DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
6002DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
6003DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
6004DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
6005DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
6006DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
6007DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
5657 6008
5658static void quirk_iommu_rwbf(struct pci_dev *dev) 6009static void quirk_iommu_rwbf(struct pci_dev *dev)
5659{ 6010{
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 780de0caafe8..9b159132405d 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
100} 100}
101 101
102static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, 102static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
103 unsigned long address, unsigned long pages, int ih, int gl) 103 unsigned long address, unsigned long pages, int ih)
104{ 104{
105 struct qi_desc desc; 105 struct qi_desc desc;
106 106
107 if (pages == -1) { 107 /*
108 /* For global kernel pages we have to flush them in *all* PASIDs 108 * Do PASID granu IOTLB invalidation if page selective capability is
109 * because that's the only option the hardware gives us. Despite 109 * not available.
110 * the fact that they are actually only accessible through one. */ 110 */
111 if (gl) 111 if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
112 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | 112 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
113 QI_EIOTLB_DID(sdev->did) | 113 QI_EIOTLB_DID(sdev->did) |
114 QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | 114 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
115 QI_EIOTLB_TYPE; 115 QI_EIOTLB_TYPE;
116 else
117 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
118 QI_EIOTLB_DID(sdev->did) |
119 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
120 QI_EIOTLB_TYPE;
121 desc.qw1 = 0; 116 desc.qw1 = 0;
122 } else { 117 } else {
123 int mask = ilog2(__roundup_pow_of_two(pages)); 118 int mask = ilog2(__roundup_pow_of_two(pages));
@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
127 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | 122 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
128 QI_EIOTLB_TYPE; 123 QI_EIOTLB_TYPE;
129 desc.qw1 = QI_EIOTLB_ADDR(address) | 124 desc.qw1 = QI_EIOTLB_ADDR(address) |
130 QI_EIOTLB_GL(gl) |
131 QI_EIOTLB_IH(ih) | 125 QI_EIOTLB_IH(ih) |
132 QI_EIOTLB_AM(mask); 126 QI_EIOTLB_AM(mask);
133 } 127 }
@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
162} 156}
163 157
164static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, 158static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
165 unsigned long pages, int ih, int gl) 159 unsigned long pages, int ih)
166{ 160{
167 struct intel_svm_dev *sdev; 161 struct intel_svm_dev *sdev;
168 162
169 rcu_read_lock(); 163 rcu_read_lock();
170 list_for_each_entry_rcu(sdev, &svm->devs, list) 164 list_for_each_entry_rcu(sdev, &svm->devs, list)
171 intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); 165 intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
172 rcu_read_unlock(); 166 rcu_read_unlock();
173} 167}
174 168
@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
180 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 174 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
181 175
182 intel_flush_svm_range(svm, start, 176 intel_flush_svm_range(svm, start,
183 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); 177 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
184} 178}
185 179
186static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 180static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
203 rcu_read_lock(); 197 rcu_read_lock();
204 list_for_each_entry_rcu(sdev, &svm->devs, list) { 198 list_for_each_entry_rcu(sdev, &svm->devs, list) {
205 intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); 199 intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
206 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); 200 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
207 } 201 }
208 rcu_read_unlock(); 202 rcu_read_unlock();
209 203
@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
425 * large and has to be physically contiguous. So it's 419 * large and has to be physically contiguous. So it's
426 * hard to be as defensive as we might like. */ 420 * hard to be as defensive as we might like. */
427 intel_pasid_tear_down_entry(iommu, dev, svm->pasid); 421 intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
428 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); 422 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
429 kfree_rcu(sdev, rcu); 423 kfree_rcu(sdev, rcu);
430 424
431 if (list_empty(&svm->devs)) { 425 if (list_empty(&svm->devs)) {
diff --git a/drivers/iommu/intel-trace.c b/drivers/iommu/intel-trace.c
new file mode 100644
index 000000000000..bfb6a6e37a88
--- /dev/null
+++ b/drivers/iommu/intel-trace.c
@@ -0,0 +1,14 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel IOMMU trace support
4 *
5 * Copyright (C) 2019 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9
10#include <linux/string.h>
11#include <linux/types.h>
12
13#define CREATE_TRACE_POINTS
14#include <trace/events/intel_iommu.h>
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 4786ca061e31..81e43c1df7ec 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -376,13 +376,13 @@ static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
376{ 376{
377 struct set_msi_sid_data *data = opaque; 377 struct set_msi_sid_data *data = opaque;
378 378
379 if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
380 data->busmatch_count++;
381
379 data->pdev = pdev; 382 data->pdev = pdev;
380 data->alias = alias; 383 data->alias = alias;
381 data->count++; 384 data->count++;
382 385
383 if (PCI_BUS_NUM(alias) == pdev->bus->number)
384 data->busmatch_count++;
385
386 return 0; 386 return 0;
387} 387}
388 388
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 18e7d212c7de..4cb394937700 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -112,7 +112,9 @@
112#define ARM_V7S_TEX_MASK 0x7 112#define ARM_V7S_TEX_MASK 0x7
113#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT) 113#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
114 114
115#define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */ 115/* MediaTek extend the two bits for PA 32bit/33bit */
116#define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9)
117#define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4)
116 118
117/* *well, except for TEX on level 2 large pages, of course :( */ 119/* *well, except for TEX on level 2 large pages, of course :( */
118#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6 120#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
@@ -169,18 +171,62 @@ struct arm_v7s_io_pgtable {
169 spinlock_t split_lock; 171 spinlock_t split_lock;
170}; 172};
171 173
174static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte, int lvl);
175
172static dma_addr_t __arm_v7s_dma_addr(void *pages) 176static dma_addr_t __arm_v7s_dma_addr(void *pages)
173{ 177{
174 return (dma_addr_t)virt_to_phys(pages); 178 return (dma_addr_t)virt_to_phys(pages);
175} 179}
176 180
177static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl) 181static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg *cfg)
182{
183 return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
184 (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT);
185}
186
187static arm_v7s_iopte paddr_to_iopte(phys_addr_t paddr, int lvl,
188 struct io_pgtable_cfg *cfg)
189{
190 arm_v7s_iopte pte = paddr & ARM_V7S_LVL_MASK(lvl);
191
192 if (!arm_v7s_is_mtk_enabled(cfg))
193 return pte;
194
195 if (paddr & BIT_ULL(32))
196 pte |= ARM_V7S_ATTR_MTK_PA_BIT32;
197 if (paddr & BIT_ULL(33))
198 pte |= ARM_V7S_ATTR_MTK_PA_BIT33;
199 return pte;
200}
201
202static phys_addr_t iopte_to_paddr(arm_v7s_iopte pte, int lvl,
203 struct io_pgtable_cfg *cfg)
178{ 204{
205 arm_v7s_iopte mask;
206 phys_addr_t paddr;
207
179 if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) 208 if (ARM_V7S_PTE_IS_TABLE(pte, lvl))
180 pte &= ARM_V7S_TABLE_MASK; 209 mask = ARM_V7S_TABLE_MASK;
210 else if (arm_v7s_pte_is_cont(pte, lvl))
211 mask = ARM_V7S_LVL_MASK(lvl) * ARM_V7S_CONT_PAGES;
181 else 212 else
182 pte &= ARM_V7S_LVL_MASK(lvl); 213 mask = ARM_V7S_LVL_MASK(lvl);
183 return phys_to_virt(pte); 214
215 paddr = pte & mask;
216 if (!arm_v7s_is_mtk_enabled(cfg))
217 return paddr;
218
219 if (pte & ARM_V7S_ATTR_MTK_PA_BIT32)
220 paddr |= BIT_ULL(32);
221 if (pte & ARM_V7S_ATTR_MTK_PA_BIT33)
222 paddr |= BIT_ULL(33);
223 return paddr;
224}
225
226static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl,
227 struct arm_v7s_io_pgtable *data)
228{
229 return phys_to_virt(iopte_to_paddr(pte, lvl, &data->iop.cfg));
184} 230}
185 231
186static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, 232static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
@@ -295,9 +341,6 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
295 if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) 341 if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
296 pte |= ARM_V7S_ATTR_NS_SECTION; 342 pte |= ARM_V7S_ATTR_NS_SECTION;
297 343
298 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
299 pte |= ARM_V7S_ATTR_MTK_4GB;
300
301 return pte; 344 return pte;
302} 345}
303 346
@@ -397,7 +440,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
397 if (num_entries > 1) 440 if (num_entries > 1)
398 pte = arm_v7s_pte_to_cont(pte, lvl); 441 pte = arm_v7s_pte_to_cont(pte, lvl);
399 442
400 pte |= paddr & ARM_V7S_LVL_MASK(lvl); 443 pte |= paddr_to_iopte(paddr, lvl, cfg);
401 444
402 __arm_v7s_set_pte(ptep, pte, num_entries, cfg); 445 __arm_v7s_set_pte(ptep, pte, num_entries, cfg);
403 return 0; 446 return 0;
@@ -463,7 +506,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
463 } 506 }
464 507
465 if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { 508 if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
466 cptep = iopte_deref(pte, lvl); 509 cptep = iopte_deref(pte, lvl, data);
467 } else if (pte) { 510 } else if (pte) {
468 /* We require an unmap first */ 511 /* We require an unmap first */
469 WARN_ON(!selftest_running); 512 WARN_ON(!selftest_running);
@@ -485,7 +528,8 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
485 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) 528 if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
486 return 0; 529 return 0;
487 530
488 if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) 531 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
532 paddr >= (1ULL << data->iop.cfg.oas)))
489 return -ERANGE; 533 return -ERANGE;
490 534
491 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); 535 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
@@ -512,7 +556,8 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop)
512 arm_v7s_iopte pte = data->pgd[i]; 556 arm_v7s_iopte pte = data->pgd[i];
513 557
514 if (ARM_V7S_PTE_IS_TABLE(pte, 1)) 558 if (ARM_V7S_PTE_IS_TABLE(pte, 1))
515 __arm_v7s_free_table(iopte_deref(pte, 1), 2, data); 559 __arm_v7s_free_table(iopte_deref(pte, 1, data),
560 2, data);
516 } 561 }
517 __arm_v7s_free_table(data->pgd, 1, data); 562 __arm_v7s_free_table(data->pgd, 1, data);
518 kmem_cache_destroy(data->l2_tables); 563 kmem_cache_destroy(data->l2_tables);
@@ -582,7 +627,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
582 if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) 627 if (!ARM_V7S_PTE_IS_TABLE(pte, 1))
583 return 0; 628 return 0;
584 629
585 tablep = iopte_deref(pte, 1); 630 tablep = iopte_deref(pte, 1, data);
586 return __arm_v7s_unmap(data, gather, iova, size, 2, tablep); 631 return __arm_v7s_unmap(data, gather, iova, size, 2, tablep);
587 } 632 }
588 633
@@ -640,7 +685,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
640 /* Also flush any partial walks */ 685 /* Also flush any partial walks */
641 io_pgtable_tlb_flush_walk(iop, iova, blk_size, 686 io_pgtable_tlb_flush_walk(iop, iova, blk_size,
642 ARM_V7S_BLOCK_SIZE(lvl + 1)); 687 ARM_V7S_BLOCK_SIZE(lvl + 1));
643 ptep = iopte_deref(pte[i], lvl); 688 ptep = iopte_deref(pte[i], lvl, data);
644 __arm_v7s_free_table(ptep, lvl + 1, data); 689 __arm_v7s_free_table(ptep, lvl + 1, data);
645 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) { 690 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
646 /* 691 /*
@@ -665,7 +710,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
665 } 710 }
666 711
667 /* Keep on walkin' */ 712 /* Keep on walkin' */
668 ptep = iopte_deref(pte[0], lvl); 713 ptep = iopte_deref(pte[0], lvl, data);
669 return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep); 714 return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep);
670} 715}
671 716
@@ -691,7 +736,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
691 do { 736 do {
692 ptep += ARM_V7S_LVL_IDX(iova, ++lvl); 737 ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
693 pte = READ_ONCE(*ptep); 738 pte = READ_ONCE(*ptep);
694 ptep = iopte_deref(pte, lvl); 739 ptep = iopte_deref(pte, lvl, data);
695 } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); 740 } while (ARM_V7S_PTE_IS_TABLE(pte, lvl));
696 741
697 if (!ARM_V7S_PTE_IS_VALID(pte)) 742 if (!ARM_V7S_PTE_IS_VALID(pte))
@@ -700,7 +745,7 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
700 mask = ARM_V7S_LVL_MASK(lvl); 745 mask = ARM_V7S_LVL_MASK(lvl);
701 if (arm_v7s_pte_is_cont(pte, lvl)) 746 if (arm_v7s_pte_is_cont(pte, lvl))
702 mask *= ARM_V7S_CONT_PAGES; 747 mask *= ARM_V7S_CONT_PAGES;
703 return (pte & mask) | (iova & ~mask); 748 return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask);
704} 749}
705 750
706static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, 751static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
@@ -708,18 +753,21 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
708{ 753{
709 struct arm_v7s_io_pgtable *data; 754 struct arm_v7s_io_pgtable *data;
710 755
711 if (cfg->ias > ARM_V7S_ADDR_BITS || cfg->oas > ARM_V7S_ADDR_BITS) 756 if (cfg->ias > ARM_V7S_ADDR_BITS)
757 return NULL;
758
759 if (cfg->oas > (arm_v7s_is_mtk_enabled(cfg) ? 34 : ARM_V7S_ADDR_BITS))
712 return NULL; 760 return NULL;
713 761
714 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | 762 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
715 IO_PGTABLE_QUIRK_NO_PERMS | 763 IO_PGTABLE_QUIRK_NO_PERMS |
716 IO_PGTABLE_QUIRK_TLBI_ON_MAP | 764 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
717 IO_PGTABLE_QUIRK_ARM_MTK_4GB | 765 IO_PGTABLE_QUIRK_ARM_MTK_EXT |
718 IO_PGTABLE_QUIRK_NON_STRICT)) 766 IO_PGTABLE_QUIRK_NON_STRICT))
719 return NULL; 767 return NULL;
720 768
721 /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ 769 /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
722 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB && 770 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT &&
723 !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS)) 771 !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
724 return NULL; 772 return NULL;
725 773
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 70bfbcc09248..d658c7c6a2ab 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -26,12 +26,10 @@
26 26
27static struct kset *iommu_group_kset; 27static struct kset *iommu_group_kset;
28static DEFINE_IDA(iommu_group_ida); 28static DEFINE_IDA(iommu_group_ida);
29#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH 29
30static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY; 30static unsigned int iommu_def_domain_type __read_mostly;
31#else
32static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
33#endif
34static bool iommu_dma_strict __read_mostly = true; 31static bool iommu_dma_strict __read_mostly = true;
32static u32 iommu_cmd_line __read_mostly;
35 33
36struct iommu_group { 34struct iommu_group {
37 struct kobject kobj; 35 struct kobject kobj;
@@ -68,6 +66,18 @@ static const char * const iommu_group_resv_type_string[] = {
68 [IOMMU_RESV_SW_MSI] = "msi", 66 [IOMMU_RESV_SW_MSI] = "msi",
69}; 67};
70 68
69#define IOMMU_CMD_LINE_DMA_API BIT(0)
70
71static void iommu_set_cmd_line_dma_api(void)
72{
73 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
74}
75
76static bool iommu_cmd_line_dma_api(void)
77{
78 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
79}
80
71#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ 81#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
72struct iommu_group_attribute iommu_group_attr_##_name = \ 82struct iommu_group_attribute iommu_group_attr_##_name = \
73 __ATTR(_name, _mode, _show, _store) 83 __ATTR(_name, _mode, _show, _store)
@@ -80,12 +90,55 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
80static LIST_HEAD(iommu_device_list); 90static LIST_HEAD(iommu_device_list);
81static DEFINE_SPINLOCK(iommu_device_lock); 91static DEFINE_SPINLOCK(iommu_device_lock);
82 92
93/*
94 * Use a function instead of an array here because the domain-type is a
95 * bit-field, so an array would waste memory.
96 */
97static const char *iommu_domain_type_str(unsigned int t)
98{
99 switch (t) {
100 case IOMMU_DOMAIN_BLOCKED:
101 return "Blocked";
102 case IOMMU_DOMAIN_IDENTITY:
103 return "Passthrough";
104 case IOMMU_DOMAIN_UNMANAGED:
105 return "Unmanaged";
106 case IOMMU_DOMAIN_DMA:
107 return "Translated";
108 default:
109 return "Unknown";
110 }
111}
112
113static int __init iommu_subsys_init(void)
114{
115 bool cmd_line = iommu_cmd_line_dma_api();
116
117 if (!cmd_line) {
118 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
119 iommu_set_default_passthrough(false);
120 else
121 iommu_set_default_translated(false);
122
123 if (iommu_default_passthrough() && mem_encrypt_active()) {
124 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
125 iommu_set_default_translated(false);
126 }
127 }
128
129 pr_info("Default domain type: %s %s\n",
130 iommu_domain_type_str(iommu_def_domain_type),
131 cmd_line ? "(set via kernel command line)" : "");
132
133 return 0;
134}
135subsys_initcall(iommu_subsys_init);
136
83int iommu_device_register(struct iommu_device *iommu) 137int iommu_device_register(struct iommu_device *iommu)
84{ 138{
85 spin_lock(&iommu_device_lock); 139 spin_lock(&iommu_device_lock);
86 list_add_tail(&iommu->list, &iommu_device_list); 140 list_add_tail(&iommu->list, &iommu_device_list);
87 spin_unlock(&iommu_device_lock); 141 spin_unlock(&iommu_device_lock);
88
89 return 0; 142 return 0;
90} 143}
91 144
@@ -165,7 +218,11 @@ static int __init iommu_set_def_domain_type(char *str)
165 if (ret) 218 if (ret)
166 return ret; 219 return ret;
167 220
168 iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA; 221 if (pt)
222 iommu_set_default_passthrough(true);
223 else
224 iommu_set_default_translated(true);
225
169 return 0; 226 return 0;
170} 227}
171early_param("iommu.passthrough", iommu_set_def_domain_type); 228early_param("iommu.passthrough", iommu_set_def_domain_type);
@@ -229,60 +286,58 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
229 * @new: new region to insert 286 * @new: new region to insert
230 * @regions: list of regions 287 * @regions: list of regions
231 * 288 *
232 * The new element is sorted by address with respect to the other 289 * Elements are sorted by start address and overlapping segments
233 * regions of the same type. In case it overlaps with another 290 * of the same type are merged.
234 * region of the same type, regions are merged. In case it
235 * overlaps with another region of different type, regions are
236 * not merged.
237 */ 291 */
238static int iommu_insert_resv_region(struct iommu_resv_region *new, 292int iommu_insert_resv_region(struct iommu_resv_region *new,
239 struct list_head *regions) 293 struct list_head *regions)
240{ 294{
241 struct iommu_resv_region *region; 295 struct iommu_resv_region *iter, *tmp, *nr, *top;
242 phys_addr_t start = new->start; 296 LIST_HEAD(stack);
243 phys_addr_t end = new->start + new->length - 1; 297
244 struct list_head *pos = regions->next; 298 nr = iommu_alloc_resv_region(new->start, new->length,
245 299 new->prot, new->type);
246 while (pos != regions) { 300 if (!nr)
247 struct iommu_resv_region *entry = 301 return -ENOMEM;
248 list_entry(pos, struct iommu_resv_region, list); 302
249 phys_addr_t a = entry->start; 303 /* First add the new element based on start address sorting */
250 phys_addr_t b = entry->start + entry->length - 1; 304 list_for_each_entry(iter, regions, list) {
251 int type = entry->type; 305 if (nr->start < iter->start ||
252 306 (nr->start == iter->start && nr->type <= iter->type))
253 if (end < a) { 307 break;
254 goto insert; 308 }
255 } else if (start > b) { 309 list_add_tail(&nr->list, &iter->list);
256 pos = pos->next; 310
257 } else if ((start >= a) && (end <= b)) { 311 /* Merge overlapping segments of type nr->type in @regions, if any */
258 if (new->type == type) 312 list_for_each_entry_safe(iter, tmp, regions, list) {
259 return 0; 313 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
260 else 314
261 pos = pos->next; 315 /* no merge needed on elements of different types than @nr */
316 if (iter->type != nr->type) {
317 list_move_tail(&iter->list, &stack);
318 continue;
319 }
320
321 /* look for the last stack element of same type as @iter */
322 list_for_each_entry_reverse(top, &stack, list)
323 if (top->type == iter->type)
324 goto check_overlap;
325
326 list_move_tail(&iter->list, &stack);
327 continue;
328
329check_overlap:
330 top_end = top->start + top->length - 1;
331
332 if (iter->start > top_end + 1) {
333 list_move_tail(&iter->list, &stack);
262 } else { 334 } else {
263 if (new->type == type) { 335 top->length = max(top_end, iter_end) - top->start + 1;
264 phys_addr_t new_start = min(a, start); 336 list_del(&iter->list);
265 phys_addr_t new_end = max(b, end); 337 kfree(iter);
266 int ret;
267
268 list_del(&entry->list);
269 entry->start = new_start;
270 entry->length = new_end - new_start + 1;
271 ret = iommu_insert_resv_region(entry, regions);
272 kfree(entry);
273 return ret;
274 } else {
275 pos = pos->next;
276 }
277 } 338 }
278 } 339 }
279insert: 340 list_splice(&stack, regions);
280 region = iommu_alloc_resv_region(new->start, new->length,
281 new->prot, new->type);
282 if (!region)
283 return -ENOMEM;
284
285 list_add_tail(&region->list, pos);
286 return 0; 341 return 0;
287} 342}
288 343
@@ -2145,7 +2200,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
2145 2200
2146 mutex_lock(&group->mutex); 2201 mutex_lock(&group->mutex);
2147 2202
2148 /* Check if the default domain is already direct mapped */
2149 ret = 0; 2203 ret = 0;
2150 if (group->default_domain && group->default_domain->type == type) 2204 if (group->default_domain && group->default_domain->type == type)
2151 goto out; 2205 goto out;
@@ -2155,7 +2209,6 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
2155 if (iommu_group_device_count(group) != 1) 2209 if (iommu_group_device_count(group) != 1)
2156 goto out; 2210 goto out;
2157 2211
2158 /* Allocate a direct mapped domain */
2159 ret = -ENOMEM; 2212 ret = -ENOMEM;
2160 domain = __iommu_domain_alloc(dev->bus, type); 2213 domain = __iommu_domain_alloc(dev->bus, type);
2161 if (!domain) 2214 if (!domain)
@@ -2170,7 +2223,7 @@ request_default_domain_for_dev(struct device *dev, unsigned long type)
2170 2223
2171 iommu_group_create_direct_mappings(group, dev); 2224 iommu_group_create_direct_mappings(group, dev);
2172 2225
2173 /* Make the direct mapped domain the default for this group */ 2226 /* Make the domain the default for this group */
2174 if (group->default_domain) 2227 if (group->default_domain)
2175 iommu_domain_free(group->default_domain); 2228 iommu_domain_free(group->default_domain);
2176 group->default_domain = domain; 2229 group->default_domain = domain;
@@ -2198,6 +2251,28 @@ int iommu_request_dma_domain_for_dev(struct device *dev)
2198 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA); 2251 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2199} 2252}
2200 2253
2254void iommu_set_default_passthrough(bool cmd_line)
2255{
2256 if (cmd_line)
2257 iommu_set_cmd_line_dma_api();
2258
2259 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2260}
2261
2262void iommu_set_default_translated(bool cmd_line)
2263{
2264 if (cmd_line)
2265 iommu_set_cmd_line_dma_api();
2266
2267 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2268}
2269
2270bool iommu_default_passthrough(void)
2271{
2272 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2273}
2274EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2275
2201const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 2276const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2202{ 2277{
2203 const struct iommu_ops *ops = NULL; 2278 const struct iommu_ops *ops = NULL;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 3e1a8a675572..41c605b0058f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -577,7 +577,9 @@ void queue_iova(struct iova_domain *iovad,
577 577
578 spin_unlock_irqrestore(&fq->lock, flags); 578 spin_unlock_irqrestore(&fq->lock, flags);
579 579
580 if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0) 580 /* Avoid false sharing as much as possible. */
581 if (!atomic_read(&iovad->fq_timer_on) &&
582 !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
581 mod_timer(&iovad->fq_timer, 583 mod_timer(&iovad->fq_timer,
582 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); 584 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
583} 585}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 76a8ec343d53..9da8309f7170 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -49,6 +49,7 @@ struct ipmmu_features {
49 bool setup_imbuscr; 49 bool setup_imbuscr;
50 bool twobit_imttbcr_sl0; 50 bool twobit_imttbcr_sl0;
51 bool reserved_context; 51 bool reserved_context;
52 bool cache_snoop;
52}; 53};
53 54
54struct ipmmu_vmsa_device { 55struct ipmmu_vmsa_device {
@@ -115,45 +116,44 @@ static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
115#define IMTTBCR 0x0008 116#define IMTTBCR 0x0008
116#define IMTTBCR_EAE (1 << 31) 117#define IMTTBCR_EAE (1 << 31)
117#define IMTTBCR_PMB (1 << 30) 118#define IMTTBCR_PMB (1 << 30)
118#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) 119#define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) /* R-Car Gen2 only */
119#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) 120#define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) /* R-Car Gen2 only */
120#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) 121#define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) /* R-Car Gen2 only */
121#define IMTTBCR_SH1_MASK (3 << 28) 122#define IMTTBCR_SH1_MASK (3 << 28) /* R-Car Gen2 only */
122#define IMTTBCR_ORGN1_NC (0 << 26) 123#define IMTTBCR_ORGN1_NC (0 << 26) /* R-Car Gen2 only */
123#define IMTTBCR_ORGN1_WB_WA (1 << 26) 124#define IMTTBCR_ORGN1_WB_WA (1 << 26) /* R-Car Gen2 only */
124#define IMTTBCR_ORGN1_WT (2 << 26) 125#define IMTTBCR_ORGN1_WT (2 << 26) /* R-Car Gen2 only */
125#define IMTTBCR_ORGN1_WB (3 << 26) 126#define IMTTBCR_ORGN1_WB (3 << 26) /* R-Car Gen2 only */
126#define IMTTBCR_ORGN1_MASK (3 << 26) 127#define IMTTBCR_ORGN1_MASK (3 << 26) /* R-Car Gen2 only */
127#define IMTTBCR_IRGN1_NC (0 << 24) 128#define IMTTBCR_IRGN1_NC (0 << 24) /* R-Car Gen2 only */
128#define IMTTBCR_IRGN1_WB_WA (1 << 24) 129#define IMTTBCR_IRGN1_WB_WA (1 << 24) /* R-Car Gen2 only */
129#define IMTTBCR_IRGN1_WT (2 << 24) 130#define IMTTBCR_IRGN1_WT (2 << 24) /* R-Car Gen2 only */
130#define IMTTBCR_IRGN1_WB (3 << 24) 131#define IMTTBCR_IRGN1_WB (3 << 24) /* R-Car Gen2 only */
131#define IMTTBCR_IRGN1_MASK (3 << 24) 132#define IMTTBCR_IRGN1_MASK (3 << 24) /* R-Car Gen2 only */
132#define IMTTBCR_TSZ1_MASK (7 << 16) 133#define IMTTBCR_TSZ1_MASK (7 << 16)
133#define IMTTBCR_TSZ1_SHIFT 16 134#define IMTTBCR_TSZ1_SHIFT 16
134#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) 135#define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) /* R-Car Gen2 only */
135#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) 136#define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) /* R-Car Gen2 only */
136#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) 137#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
137#define IMTTBCR_SH0_MASK (3 << 12) 138#define IMTTBCR_SH0_MASK (3 << 12) /* R-Car Gen2 only */
138#define IMTTBCR_ORGN0_NC (0 << 10) 139#define IMTTBCR_ORGN0_NC (0 << 10) /* R-Car Gen2 only */
139#define IMTTBCR_ORGN0_WB_WA (1 << 10) 140#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
140#define IMTTBCR_ORGN0_WT (2 << 10) 141#define IMTTBCR_ORGN0_WT (2 << 10) /* R-Car Gen2 only */
141#define IMTTBCR_ORGN0_WB (3 << 10) 142#define IMTTBCR_ORGN0_WB (3 << 10) /* R-Car Gen2 only */
142#define IMTTBCR_ORGN0_MASK (3 << 10) 143#define IMTTBCR_ORGN0_MASK (3 << 10) /* R-Car Gen2 only */
143#define IMTTBCR_IRGN0_NC (0 << 8) 144#define IMTTBCR_IRGN0_NC (0 << 8) /* R-Car Gen2 only */
144#define IMTTBCR_IRGN0_WB_WA (1 << 8) 145#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
145#define IMTTBCR_IRGN0_WT (2 << 8) 146#define IMTTBCR_IRGN0_WT (2 << 8) /* R-Car Gen2 only */
146#define IMTTBCR_IRGN0_WB (3 << 8) 147#define IMTTBCR_IRGN0_WB (3 << 8) /* R-Car Gen2 only */
147#define IMTTBCR_IRGN0_MASK (3 << 8) 148#define IMTTBCR_IRGN0_MASK (3 << 8) /* R-Car Gen2 only */
149#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6) /* R-Car Gen3 only */
150#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6) /* R-Car Gen3 only */
151#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
148#define IMTTBCR_SL0_LVL_2 (0 << 4) 152#define IMTTBCR_SL0_LVL_2 (0 << 4)
149#define IMTTBCR_SL0_LVL_1 (1 << 4) 153#define IMTTBCR_SL0_LVL_1 (1 << 4)
150#define IMTTBCR_TSZ0_MASK (7 << 0) 154#define IMTTBCR_TSZ0_MASK (7 << 0)
151#define IMTTBCR_TSZ0_SHIFT O 155#define IMTTBCR_TSZ0_SHIFT O
152 156
153#define IMTTBCR_SL0_TWOBIT_LVL_3 (0 << 6)
154#define IMTTBCR_SL0_TWOBIT_LVL_2 (1 << 6)
155#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6)
156
157#define IMBUSCR 0x000c 157#define IMBUSCR 0x000c
158#define IMBUSCR_DVM (1 << 2) 158#define IMBUSCR_DVM (1 << 2)
159#define IMBUSCR_BUSSEL_SYS (0 << 0) 159#define IMBUSCR_BUSSEL_SYS (0 << 0)
@@ -422,17 +422,19 @@ static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
422 422
423 /* 423 /*
424 * TTBCR 424 * TTBCR
425 * We use long descriptors with inner-shareable WBWA tables and allocate 425 * We use long descriptors and allocate the whole 32-bit VA space to
426 * the whole 32-bit VA space to TTBR0. 426 * TTBR0.
427 */ 427 */
428 if (domain->mmu->features->twobit_imttbcr_sl0) 428 if (domain->mmu->features->twobit_imttbcr_sl0)
429 tmp = IMTTBCR_SL0_TWOBIT_LVL_1; 429 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
430 else 430 else
431 tmp = IMTTBCR_SL0_LVL_1; 431 tmp = IMTTBCR_SL0_LVL_1;
432 432
433 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | 433 if (domain->mmu->features->cache_snoop)
434 IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | 434 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
435 IMTTBCR_IRGN0_WB_WA | tmp); 435 IMTTBCR_IRGN0_WB_WA;
436
437 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
436 438
437 /* MAIR0 */ 439 /* MAIR0 */
438 ipmmu_ctx_write_root(domain, IMMAIR0, 440 ipmmu_ctx_write_root(domain, IMMAIR0,
@@ -994,6 +996,7 @@ static const struct ipmmu_features ipmmu_features_default = {
994 .setup_imbuscr = true, 996 .setup_imbuscr = true,
995 .twobit_imttbcr_sl0 = false, 997 .twobit_imttbcr_sl0 = false,
996 .reserved_context = false, 998 .reserved_context = false,
999 .cache_snoop = true,
997}; 1000};
998 1001
999static const struct ipmmu_features ipmmu_features_rcar_gen3 = { 1002static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
@@ -1004,6 +1007,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
1004 .setup_imbuscr = false, 1007 .setup_imbuscr = false,
1005 .twobit_imttbcr_sl0 = true, 1008 .twobit_imttbcr_sl0 = true,
1006 .reserved_context = true, 1009 .reserved_context = true,
1010 .cache_snoop = false,
1007}; 1011};
1008 1012
1009static const struct of_device_id ipmmu_of_ids[] = { 1013static const struct of_device_id ipmmu_of_ids[] = {
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 4c0be5b75c28..be99d408cf35 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -766,7 +766,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
766 766
767 iommu->irq = platform_get_irq(pdev, 0); 767 iommu->irq = platform_get_irq(pdev, 0);
768 if (iommu->irq < 0) { 768 if (iommu->irq < 0) {
769 dev_err(iommu->dev, "could not get iommu irq\n");
770 ret = -ENODEV; 769 ret = -ENODEV;
771 goto fail; 770 goto fail;
772 } 771 }
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 0827d51936fa..67a483c1a935 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -28,6 +28,7 @@
28#include "mtk_iommu.h" 28#include "mtk_iommu.h"
29 29
30#define REG_MMU_PT_BASE_ADDR 0x000 30#define REG_MMU_PT_BASE_ADDR 0x000
31#define MMU_PT_ADDR_MASK GENMASK(31, 7)
31 32
32#define REG_MMU_INVALIDATE 0x020 33#define REG_MMU_INVALIDATE 0x020
33#define F_ALL_INVLD 0x2 34#define F_ALL_INVLD 0x2
@@ -44,12 +45,9 @@
44#define REG_MMU_DCM_DIS 0x050 45#define REG_MMU_DCM_DIS 0x050
45 46
46#define REG_MMU_CTRL_REG 0x110 47#define REG_MMU_CTRL_REG 0x110
48#define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4)
47#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) 49#define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
48#define F_MMU_TF_PROTECT_SEL_SHIFT(data) \ 50#define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5)
49 ((data)->m4u_plat == M4U_MT2712 ? 4 : 5)
50/* It's named by F_MMU_TF_PROT_SEL in mt2712. */
51#define F_MMU_TF_PROTECT_SEL(prot, data) \
52 (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
53 51
54#define REG_MMU_IVRP_PADDR 0x114 52#define REG_MMU_IVRP_PADDR 0x114
55 53
@@ -66,26 +64,32 @@
66#define F_INT_CLR_BIT BIT(12) 64#define F_INT_CLR_BIT BIT(12)
67 65
68#define REG_MMU_INT_MAIN_CONTROL 0x124 66#define REG_MMU_INT_MAIN_CONTROL 0x124
69#define F_INT_TRANSLATION_FAULT BIT(0) 67 /* mmu0 | mmu1 */
70#define F_INT_MAIN_MULTI_HIT_FAULT BIT(1) 68#define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7))
71#define F_INT_INVALID_PA_FAULT BIT(2) 69#define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8))
72#define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3) 70#define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9))
73#define F_INT_TLB_MISS_FAULT BIT(4) 71#define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10))
74#define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5) 72#define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11))
75#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6) 73#define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12))
74#define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13))
76 75
77#define REG_MMU_CPE_DONE 0x12C 76#define REG_MMU_CPE_DONE 0x12C
78 77
79#define REG_MMU_FAULT_ST1 0x134 78#define REG_MMU_FAULT_ST1 0x134
79#define F_REG_MMU0_FAULT_MASK GENMASK(6, 0)
80#define F_REG_MMU1_FAULT_MASK GENMASK(13, 7)
80 81
81#define REG_MMU_FAULT_VA 0x13c 82#define REG_MMU0_FAULT_VA 0x13c
82#define F_MMU_FAULT_VA_WRITE_BIT BIT(1) 83#define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
83#define F_MMU_FAULT_VA_LAYER_BIT BIT(0) 84#define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
84 85
85#define REG_MMU_INVLD_PA 0x140 86#define REG_MMU0_INVLD_PA 0x140
86#define REG_MMU_INT_ID 0x150 87#define REG_MMU1_FAULT_VA 0x144
87#define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) 88#define REG_MMU1_INVLD_PA 0x148
88#define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) 89#define REG_MMU0_INT_ID 0x150
90#define REG_MMU1_INT_ID 0x154
91#define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
92#define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
89 93
90#define MTK_PROTECT_PA_ALIGN 128 94#define MTK_PROTECT_PA_ALIGN 128
91 95
@@ -107,6 +111,30 @@ struct mtk_iommu_domain {
107 111
108static const struct iommu_ops mtk_iommu_ops; 112static const struct iommu_ops mtk_iommu_ops;
109 113
114/*
115 * In M4U 4GB mode, the physical address is remapped as below:
116 *
117 * CPU Physical address:
118 * ====================
119 *
120 * 0 1G 2G 3G 4G 5G
121 * |---A---|---B---|---C---|---D---|---E---|
122 * +--I/O--+------------Memory-------------+
123 *
124 * IOMMU output physical address:
125 * =============================
126 *
127 * 4G 5G 6G 7G 8G
128 * |---E---|---B---|---C---|---D---|
129 * +------------Memory-------------+
130 *
131 * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
132 * bit32 of the CPU physical address always is needed to set, and for Region
133 * 'E', the CPU physical address keep as is.
134 * Additionally, The iommu consumers always use the CPU phyiscal address.
135 */
136#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
137
110static LIST_HEAD(m4ulist); /* List all the M4U HWs */ 138static LIST_HEAD(m4ulist); /* List all the M4U HWs */
111 139
112#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) 140#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
@@ -226,13 +254,21 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
226 254
227 /* Read error info from registers */ 255 /* Read error info from registers */
228 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); 256 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
229 fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA); 257 if (int_state & F_REG_MMU0_FAULT_MASK) {
258 regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
259 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
260 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
261 } else {
262 regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
263 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
264 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
265 }
230 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; 266 layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
231 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; 267 write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
232 fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA); 268 fault_larb = F_MMU_INT_ID_LARB_ID(regval);
233 regval = readl_relaxed(data->base + REG_MMU_INT_ID); 269 fault_port = F_MMU_INT_ID_PORT_ID(regval);
234 fault_larb = F_MMU0_INT_ID_LARB_ID(regval); 270
235 fault_port = F_MMU0_INT_ID_PORT_ID(regval); 271 fault_larb = data->plat_data->larbid_remap[fault_larb];
236 272
237 if (report_iommu_fault(&dom->domain, data->dev, fault_iova, 273 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
238 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { 274 write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
@@ -264,7 +300,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
264 for (i = 0; i < fwspec->num_ids; ++i) { 300 for (i = 0; i < fwspec->num_ids; ++i) {
265 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); 301 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
266 portid = MTK_M4U_TO_PORT(fwspec->ids[i]); 302 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
267 larb_mmu = &data->smi_imu.larb_imu[larbid]; 303 larb_mmu = &data->larb_imu[larbid];
268 304
269 dev_dbg(dev, "%s iommu port: %d\n", 305 dev_dbg(dev, "%s iommu port: %d\n",
270 enable ? "enable" : "disable", portid); 306 enable ? "enable" : "disable", portid);
@@ -285,17 +321,15 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
285 dom->cfg = (struct io_pgtable_cfg) { 321 dom->cfg = (struct io_pgtable_cfg) {
286 .quirks = IO_PGTABLE_QUIRK_ARM_NS | 322 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
287 IO_PGTABLE_QUIRK_NO_PERMS | 323 IO_PGTABLE_QUIRK_NO_PERMS |
288 IO_PGTABLE_QUIRK_TLBI_ON_MAP, 324 IO_PGTABLE_QUIRK_TLBI_ON_MAP |
325 IO_PGTABLE_QUIRK_ARM_MTK_EXT,
289 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, 326 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
290 .ias = 32, 327 .ias = 32,
291 .oas = 32, 328 .oas = 34,
292 .tlb = &mtk_iommu_flush_ops, 329 .tlb = &mtk_iommu_flush_ops,
293 .iommu_dev = data->dev, 330 .iommu_dev = data->dev,
294 }; 331 };
295 332
296 if (data->enable_4GB)
297 dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
298
299 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); 333 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
300 if (!dom->iop) { 334 if (!dom->iop) {
301 dev_err(data->dev, "Failed to alloc io pgtable\n"); 335 dev_err(data->dev, "Failed to alloc io pgtable\n");
@@ -358,7 +392,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
358 /* Update the pgtable base address register of the M4U HW */ 392 /* Update the pgtable base address register of the M4U HW */
359 if (!data->m4u_dom) { 393 if (!data->m4u_dom) {
360 data->m4u_dom = dom; 394 data->m4u_dom = dom;
361 writel(dom->cfg.arm_v7s_cfg.ttbr[0], 395 writel(dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
362 data->base + REG_MMU_PT_BASE_ADDR); 396 data->base + REG_MMU_PT_BASE_ADDR);
363 } 397 }
364 398
@@ -381,12 +415,16 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
381 phys_addr_t paddr, size_t size, int prot) 415 phys_addr_t paddr, size_t size, int prot)
382{ 416{
383 struct mtk_iommu_domain *dom = to_mtk_domain(domain); 417 struct mtk_iommu_domain *dom = to_mtk_domain(domain);
418 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
384 unsigned long flags; 419 unsigned long flags;
385 int ret; 420 int ret;
386 421
422 /* The "4GB mode" M4U physically can not use the lower remap of Dram. */
423 if (data->enable_4GB)
424 paddr |= BIT_ULL(32);
425
387 spin_lock_irqsave(&dom->pgtlock, flags); 426 spin_lock_irqsave(&dom->pgtlock, flags);
388 ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32), 427 ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
389 size, prot);
390 spin_unlock_irqrestore(&dom->pgtlock, flags); 428 spin_unlock_irqrestore(&dom->pgtlock, flags);
391 429
392 return ret; 430 return ret;
@@ -430,8 +468,8 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
430 pa = dom->iop->iova_to_phys(dom->iop, iova); 468 pa = dom->iop->iova_to_phys(dom->iop, iova);
431 spin_unlock_irqrestore(&dom->pgtlock, flags); 469 spin_unlock_irqrestore(&dom->pgtlock, flags);
432 470
433 if (data->enable_4GB) 471 if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
434 pa |= BIT_ULL(32); 472 pa &= ~BIT_ULL(32);
435 473
436 return pa; 474 return pa;
437} 475}
@@ -540,9 +578,11 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
540 return ret; 578 return ret;
541 } 579 }
542 580
543 regval = F_MMU_TF_PROTECT_SEL(2, data); 581 if (data->plat_data->m4u_plat == M4U_MT8173)
544 if (data->m4u_plat == M4U_MT8173) 582 regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
545 regval |= F_MMU_PREFETCH_RT_REPLACE_MOD; 583 F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
584 else
585 regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR;
546 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); 586 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
547 587
548 regval = F_L2_MULIT_HIT_EN | 588 regval = F_L2_MULIT_HIT_EN |
@@ -562,14 +602,14 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
562 F_INT_PRETETCH_TRANSATION_FIFO_FAULT; 602 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
563 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); 603 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
564 604
565 if (data->m4u_plat == M4U_MT8173) 605 if (data->plat_data->m4u_plat == M4U_MT8173)
566 regval = (data->protect_base >> 1) | (data->enable_4GB << 31); 606 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
567 else 607 else
568 regval = lower_32_bits(data->protect_base) | 608 regval = lower_32_bits(data->protect_base) |
569 upper_32_bits(data->protect_base); 609 upper_32_bits(data->protect_base);
570 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); 610 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
571 611
572 if (data->enable_4GB && data->m4u_plat != M4U_MT8173) { 612 if (data->enable_4GB && data->plat_data->has_vld_pa_rng) {
573 /* 613 /*
574 * If 4GB mode is enabled, the validate PA range is from 614 * If 4GB mode is enabled, the validate PA range is from
575 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. 615 * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
@@ -579,8 +619,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
579 } 619 }
580 writel_relaxed(0, data->base + REG_MMU_DCM_DIS); 620 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
581 621
582 /* It's MISC control register whose default value is ok except mt8173.*/ 622 if (data->plat_data->reset_axi)
583 if (data->m4u_plat == M4U_MT8173)
584 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); 623 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
585 624
586 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, 625 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
@@ -613,7 +652,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
613 if (!data) 652 if (!data)
614 return -ENOMEM; 653 return -ENOMEM;
615 data->dev = dev; 654 data->dev = dev;
616 data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev); 655 data->plat_data = of_device_get_match_data(dev);
617 656
618 /* Protect memory. HW will access here while translation fault.*/ 657 /* Protect memory. HW will access here while translation fault.*/
619 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); 658 protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
@@ -623,6 +662,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
623 662
624 /* Whether the current dram is over 4GB */ 663 /* Whether the current dram is over 4GB */
625 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); 664 data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
665 if (!data->plat_data->has_4gb_mode)
666 data->enable_4GB = false;
626 667
627 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 668 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
628 data->base = devm_ioremap_resource(dev, res); 669 data->base = devm_ioremap_resource(dev, res);
@@ -634,15 +675,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
634 if (data->irq < 0) 675 if (data->irq < 0)
635 return data->irq; 676 return data->irq;
636 677
637 data->bclk = devm_clk_get(dev, "bclk"); 678 if (data->plat_data->has_bclk) {
638 if (IS_ERR(data->bclk)) 679 data->bclk = devm_clk_get(dev, "bclk");
639 return PTR_ERR(data->bclk); 680 if (IS_ERR(data->bclk))
681 return PTR_ERR(data->bclk);
682 }
640 683
641 larb_nr = of_count_phandle_with_args(dev->of_node, 684 larb_nr = of_count_phandle_with_args(dev->of_node,
642 "mediatek,larbs", NULL); 685 "mediatek,larbs", NULL);
643 if (larb_nr < 0) 686 if (larb_nr < 0)
644 return larb_nr; 687 return larb_nr;
645 data->smi_imu.larb_nr = larb_nr;
646 688
647 for (i = 0; i < larb_nr; i++) { 689 for (i = 0; i < larb_nr; i++) {
648 struct device_node *larbnode; 690 struct device_node *larbnode;
@@ -667,7 +709,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
667 of_node_put(larbnode); 709 of_node_put(larbnode);
668 return -EPROBE_DEFER; 710 return -EPROBE_DEFER;
669 } 711 }
670 data->smi_imu.larb_imu[id].dev = &plarbdev->dev; 712 data->larb_imu[id].dev = &plarbdev->dev;
671 713
672 component_match_add_release(dev, &match, release_of, 714 component_match_add_release(dev, &match, release_of,
673 compare_of, larbnode); 715 compare_of, larbnode);
@@ -728,6 +770,7 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev)
728 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); 770 reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
729 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); 771 reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
730 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); 772 reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
773 reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG);
731 clk_disable_unprepare(data->bclk); 774 clk_disable_unprepare(data->bclk);
732 return 0; 775 return 0;
733} 776}
@@ -736,6 +779,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
736{ 779{
737 struct mtk_iommu_data *data = dev_get_drvdata(dev); 780 struct mtk_iommu_data *data = dev_get_drvdata(dev);
738 struct mtk_iommu_suspend_reg *reg = &data->reg; 781 struct mtk_iommu_suspend_reg *reg = &data->reg;
782 struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
739 void __iomem *base = data->base; 783 void __iomem *base = data->base;
740 int ret; 784 int ret;
741 785
@@ -751,8 +795,9 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
751 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); 795 writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
752 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); 796 writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
753 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); 797 writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
754 if (data->m4u_dom) 798 writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
755 writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0], 799 if (m4u_dom)
800 writel(m4u_dom->cfg.arm_v7s_cfg.ttbr[0] & MMU_PT_ADDR_MASK,
756 base + REG_MMU_PT_BASE_ADDR); 801 base + REG_MMU_PT_BASE_ADDR);
757 return 0; 802 return 0;
758} 803}
@@ -761,9 +806,32 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = {
761 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) 806 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
762}; 807};
763 808
809static const struct mtk_iommu_plat_data mt2712_data = {
810 .m4u_plat = M4U_MT2712,
811 .has_4gb_mode = true,
812 .has_bclk = true,
813 .has_vld_pa_rng = true,
814 .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
815};
816
817static const struct mtk_iommu_plat_data mt8173_data = {
818 .m4u_plat = M4U_MT8173,
819 .has_4gb_mode = true,
820 .has_bclk = true,
821 .reset_axi = true,
822 .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */
823};
824
825static const struct mtk_iommu_plat_data mt8183_data = {
826 .m4u_plat = M4U_MT8183,
827 .reset_axi = true,
828 .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1},
829};
830
764static const struct of_device_id mtk_iommu_of_ids[] = { 831static const struct of_device_id mtk_iommu_of_ids[] = {
765 { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712}, 832 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
766 { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173}, 833 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
834 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
767 {} 835 {}
768}; 836};
769 837
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 59337323db58..fc0f16eabacd 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -24,12 +24,25 @@ struct mtk_iommu_suspend_reg {
24 u32 int_control0; 24 u32 int_control0;
25 u32 int_main_control; 25 u32 int_main_control;
26 u32 ivrp_paddr; 26 u32 ivrp_paddr;
27 u32 vld_pa_rng;
27}; 28};
28 29
29enum mtk_iommu_plat { 30enum mtk_iommu_plat {
30 M4U_MT2701, 31 M4U_MT2701,
31 M4U_MT2712, 32 M4U_MT2712,
32 M4U_MT8173, 33 M4U_MT8173,
34 M4U_MT8183,
35};
36
37struct mtk_iommu_plat_data {
38 enum mtk_iommu_plat m4u_plat;
39 bool has_4gb_mode;
40
41 /* HW will use the EMI clock if there isn't the "bclk". */
42 bool has_bclk;
43 bool has_vld_pa_rng;
44 bool reset_axi;
45 unsigned char larbid_remap[MTK_LARB_NR_MAX];
33}; 46};
34 47
35struct mtk_iommu_domain; 48struct mtk_iommu_domain;
@@ -43,14 +56,14 @@ struct mtk_iommu_data {
43 struct mtk_iommu_suspend_reg reg; 56 struct mtk_iommu_suspend_reg reg;
44 struct mtk_iommu_domain *m4u_dom; 57 struct mtk_iommu_domain *m4u_dom;
45 struct iommu_group *m4u_group; 58 struct iommu_group *m4u_group;
46 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
47 bool enable_4GB; 59 bool enable_4GB;
48 bool tlb_flush_active; 60 bool tlb_flush_active;
49 61
50 struct iommu_device iommu; 62 struct iommu_device iommu;
51 enum mtk_iommu_plat m4u_plat; 63 const struct mtk_iommu_plat_data *plat_data;
52 64
53 struct list_head list; 65 struct list_head list;
66 struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
54}; 67};
55 68
56static inline int compare_of(struct device *dev, void *data) 69static inline int compare_of(struct device *dev, void *data)
@@ -67,14 +80,14 @@ static inline int mtk_iommu_bind(struct device *dev)
67{ 80{
68 struct mtk_iommu_data *data = dev_get_drvdata(dev); 81 struct mtk_iommu_data *data = dev_get_drvdata(dev);
69 82
70 return component_bind_all(dev, &data->smi_imu); 83 return component_bind_all(dev, &data->larb_imu);
71} 84}
72 85
73static inline void mtk_iommu_unbind(struct device *dev) 86static inline void mtk_iommu_unbind(struct device *dev)
74{ 87{
75 struct mtk_iommu_data *data = dev_get_drvdata(dev); 88 struct mtk_iommu_data *data = dev_get_drvdata(dev);
76 89
77 component_unbind_all(dev, &data->smi_imu); 90 component_unbind_all(dev, &data->larb_imu);
78} 91}
79 92
80#endif 93#endif
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 7b92ddd5d9fd..210b1c7c0bda 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -206,7 +206,7 @@ static void mtk_iommu_config(struct mtk_iommu_data *data,
206 for (i = 0; i < fwspec->num_ids; ++i) { 206 for (i = 0; i < fwspec->num_ids; ++i) {
207 larbid = mt2701_m4u_to_larb(fwspec->ids[i]); 207 larbid = mt2701_m4u_to_larb(fwspec->ids[i]);
208 portid = mt2701_m4u_to_port(fwspec->ids[i]); 208 portid = mt2701_m4u_to_port(fwspec->ids[i]);
209 larb_mmu = &data->smi_imu.larb_imu[larbid]; 209 larb_mmu = &data->larb_imu[larbid];
210 210
211 dev_dbg(dev, "%s iommu port: %d\n", 211 dev_dbg(dev, "%s iommu port: %d\n",
212 enable ? "enable" : "disable", portid); 212 enable ? "enable" : "disable", portid);
@@ -611,14 +611,12 @@ static int mtk_iommu_probe(struct platform_device *pdev)
611 } 611 }
612 } 612 }
613 613
614 data->smi_imu.larb_imu[larb_nr].dev = &plarbdev->dev; 614 data->larb_imu[larb_nr].dev = &plarbdev->dev;
615 component_match_add_release(dev, &match, release_of, 615 component_match_add_release(dev, &match, release_of,
616 compare_of, larb_spec.np); 616 compare_of, larb_spec.np);
617 larb_nr++; 617 larb_nr++;
618 } 618 }
619 619
620 data->smi_imu.larb_nr = larb_nr;
621
622 platform_set_drvdata(pdev, data); 620 platform_set_drvdata(pdev, data);
623 621
624 ret = mtk_iommu_hw_init(data); 622 ret = mtk_iommu_hw_init(data);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 8039bc5ee425..09c6e1c680db 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -35,6 +35,15 @@
35 35
36static const struct iommu_ops omap_iommu_ops; 36static const struct iommu_ops omap_iommu_ops;
37 37
38struct orphan_dev {
39 struct device *dev;
40 struct list_head node;
41};
42
43static LIST_HEAD(orphan_dev_list);
44
45static DEFINE_SPINLOCK(orphan_lock);
46
38#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev)) 47#define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
39 48
40/* bitmap of the page sizes currently supported */ 49/* bitmap of the page sizes currently supported */
@@ -53,6 +62,8 @@ static const struct iommu_ops omap_iommu_ops;
53static struct platform_driver omap_iommu_driver; 62static struct platform_driver omap_iommu_driver;
54static struct kmem_cache *iopte_cachep; 63static struct kmem_cache *iopte_cachep;
55 64
65static int _omap_iommu_add_device(struct device *dev);
66
56/** 67/**
57 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain 68 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
58 * @dom: generic iommu domain handle 69 * @dom: generic iommu domain handle
@@ -65,6 +76,9 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
65/** 76/**
66 * omap_iommu_save_ctx - Save registers for pm off-mode support 77 * omap_iommu_save_ctx - Save registers for pm off-mode support
67 * @dev: client device 78 * @dev: client device
79 *
80 * This should be treated as an deprecated API. It is preserved only
81 * to maintain existing functionality for OMAP3 ISP driver.
68 **/ 82 **/
69void omap_iommu_save_ctx(struct device *dev) 83void omap_iommu_save_ctx(struct device *dev)
70{ 84{
@@ -92,6 +106,9 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
92/** 106/**
93 * omap_iommu_restore_ctx - Restore registers for pm off-mode support 107 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
94 * @dev: client device 108 * @dev: client device
109 *
110 * This should be treated as an deprecated API. It is preserved only
111 * to maintain existing functionality for OMAP3 ISP driver.
95 **/ 112 **/
96void omap_iommu_restore_ctx(struct device *dev) 113void omap_iommu_restore_ctx(struct device *dev)
97{ 114{
@@ -186,36 +203,18 @@ static void omap2_iommu_disable(struct omap_iommu *obj)
186 203
187static int iommu_enable(struct omap_iommu *obj) 204static int iommu_enable(struct omap_iommu *obj)
188{ 205{
189 int err; 206 int ret;
190 struct platform_device *pdev = to_platform_device(obj->dev);
191 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
192
193 if (pdata && pdata->deassert_reset) {
194 err = pdata->deassert_reset(pdev, pdata->reset_name);
195 if (err) {
196 dev_err(obj->dev, "deassert_reset failed: %d\n", err);
197 return err;
198 }
199 }
200
201 pm_runtime_get_sync(obj->dev);
202 207
203 err = omap2_iommu_enable(obj); 208 ret = pm_runtime_get_sync(obj->dev);
209 if (ret < 0)
210 pm_runtime_put_noidle(obj->dev);
204 211
205 return err; 212 return ret < 0 ? ret : 0;
206} 213}
207 214
208static void iommu_disable(struct omap_iommu *obj) 215static void iommu_disable(struct omap_iommu *obj)
209{ 216{
210 struct platform_device *pdev = to_platform_device(obj->dev);
211 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
212
213 omap2_iommu_disable(obj);
214
215 pm_runtime_put_sync(obj->dev); 217 pm_runtime_put_sync(obj->dev);
216
217 if (pdata && pdata->assert_reset)
218 pdata->assert_reset(pdev, pdata->reset_name);
219} 218}
220 219
221/* 220/*
@@ -901,15 +900,219 @@ static void omap_iommu_detach(struct omap_iommu *obj)
901 900
902 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE, 901 dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
903 DMA_TO_DEVICE); 902 DMA_TO_DEVICE);
904 iommu_disable(obj);
905 obj->pd_dma = 0; 903 obj->pd_dma = 0;
906 obj->iopgd = NULL; 904 obj->iopgd = NULL;
905 iommu_disable(obj);
907 906
908 spin_unlock(&obj->iommu_lock); 907 spin_unlock(&obj->iommu_lock);
909 908
910 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); 909 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
911} 910}
912 911
912static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
913{
914 struct iotlb_lock lock;
915 struct cr_regs cr;
916 struct cr_regs *tmp;
917 int i;
918
919 /* check if there are any locked tlbs to save */
920 iotlb_lock_get(obj, &lock);
921 obj->num_cr_ctx = lock.base;
922 if (!obj->num_cr_ctx)
923 return;
924
925 tmp = obj->cr_ctx;
926 for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
927 * tmp++ = cr;
928}
929
930static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
931{
932 struct iotlb_lock l;
933 struct cr_regs *tmp;
934 int i;
935
936 /* no locked tlbs to restore */
937 if (!obj->num_cr_ctx)
938 return;
939
940 l.base = 0;
941 tmp = obj->cr_ctx;
942 for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
943 l.vict = i;
944 iotlb_lock_set(obj, &l);
945 iotlb_load_cr(obj, tmp);
946 }
947 l.base = obj->num_cr_ctx;
948 l.vict = i;
949 iotlb_lock_set(obj, &l);
950}
951
952/**
953 * omap_iommu_domain_deactivate - deactivate attached iommu devices
954 * @domain: iommu domain attached to the target iommu device
955 *
956 * This API allows the client devices of IOMMU devices to suspend
957 * the IOMMUs they control at runtime, after they are idled and
958 * suspended all activity. System Suspend will leverage the PM
959 * driver late callbacks.
960 **/
961int omap_iommu_domain_deactivate(struct iommu_domain *domain)
962{
963 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
964 struct omap_iommu_device *iommu;
965 struct omap_iommu *oiommu;
966 int i;
967
968 if (!omap_domain->dev)
969 return 0;
970
971 iommu = omap_domain->iommus;
972 iommu += (omap_domain->num_iommus - 1);
973 for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
974 oiommu = iommu->iommu_dev;
975 pm_runtime_put_sync(oiommu->dev);
976 }
977
978 return 0;
979}
980EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
981
982/**
983 * omap_iommu_domain_activate - activate attached iommu devices
984 * @domain: iommu domain attached to the target iommu device
985 *
986 * This API allows the client devices of IOMMU devices to resume the
987 * IOMMUs they control at runtime, before they can resume operations.
988 * System Resume will leverage the PM driver late callbacks.
989 **/
990int omap_iommu_domain_activate(struct iommu_domain *domain)
991{
992 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
993 struct omap_iommu_device *iommu;
994 struct omap_iommu *oiommu;
995 int i;
996
997 if (!omap_domain->dev)
998 return 0;
999
1000 iommu = omap_domain->iommus;
1001 for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
1002 oiommu = iommu->iommu_dev;
1003 pm_runtime_get_sync(oiommu->dev);
1004 }
1005
1006 return 0;
1007}
1008EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
1009
1010/**
1011 * omap_iommu_runtime_suspend - disable an iommu device
1012 * @dev: iommu device
1013 *
1014 * This function performs all that is necessary to disable an
1015 * IOMMU device, either during final detachment from a client
1016 * device, or during system/runtime suspend of the device. This
1017 * includes programming all the appropriate IOMMU registers, and
1018 * managing the associated omap_hwmod's state and the device's
1019 * reset line. This function also saves the context of any
1020 * locked TLBs if suspending.
1021 **/
1022static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
1023{
1024 struct platform_device *pdev = to_platform_device(dev);
1025 struct iommu_platform_data *pdata = dev_get_platdata(dev);
1026 struct omap_iommu *obj = to_iommu(dev);
1027 int ret;
1028
1029 /* save the TLBs only during suspend, and not for power down */
1030 if (obj->domain && obj->iopgd)
1031 omap_iommu_save_tlb_entries(obj);
1032
1033 omap2_iommu_disable(obj);
1034
1035 if (pdata && pdata->device_idle)
1036 pdata->device_idle(pdev);
1037
1038 if (pdata && pdata->assert_reset)
1039 pdata->assert_reset(pdev, pdata->reset_name);
1040
1041 if (pdata && pdata->set_pwrdm_constraint) {
1042 ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
1043 if (ret) {
1044 dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
1045 ret);
1046 }
1047 }
1048
1049 return 0;
1050}
1051
1052/**
1053 * omap_iommu_runtime_resume - enable an iommu device
1054 * @dev: iommu device
1055 *
1056 * This function performs all that is necessary to enable an
1057 * IOMMU device, either during initial attachment to a client
1058 * device, or during system/runtime resume of the device. This
1059 * includes programming all the appropriate IOMMU registers, and
1060 * managing the associated omap_hwmod's state and the device's
1061 * reset line. The function also restores any locked TLBs if
1062 * resuming after a suspend.
1063 **/
1064static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
1065{
1066 struct platform_device *pdev = to_platform_device(dev);
1067 struct iommu_platform_data *pdata = dev_get_platdata(dev);
1068 struct omap_iommu *obj = to_iommu(dev);
1069 int ret = 0;
1070
1071 if (pdata && pdata->set_pwrdm_constraint) {
1072 ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
1073 if (ret) {
1074 dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
1075 ret);
1076 }
1077 }
1078
1079 if (pdata && pdata->deassert_reset) {
1080 ret = pdata->deassert_reset(pdev, pdata->reset_name);
1081 if (ret) {
1082 dev_err(dev, "deassert_reset failed: %d\n", ret);
1083 return ret;
1084 }
1085 }
1086
1087 if (pdata && pdata->device_enable)
1088 pdata->device_enable(pdev);
1089
1090 /* restore the TLBs only during resume, and not for power up */
1091 if (obj->domain)
1092 omap_iommu_restore_tlb_entries(obj);
1093
1094 ret = omap2_iommu_enable(obj);
1095
1096 return ret;
1097}
1098
1099/**
1100 * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
1101 * @dev: iommu device
1102 *
1103 * This function performs the necessary checks to determine if the IOMMU
1104 * device needs suspending or not. The function checks if the runtime_pm
1105 * status of the device is suspended, and returns 1 in that case. This
1106 * results in the PM core to skip invoking any of the Sleep PM callbacks
1107 * (suspend, suspend_late, resume, resume_early etc).
1108 */
1109static int omap_iommu_prepare(struct device *dev)
1110{
1111 if (pm_runtime_status_suspended(dev))
1112 return 1;
1113 return 0;
1114}
1115
913static bool omap_iommu_can_register(struct platform_device *pdev) 1116static bool omap_iommu_can_register(struct platform_device *pdev)
914{ 1117{
915 struct device_node *np = pdev->dev.of_node; 1118 struct device_node *np = pdev->dev.of_node;
@@ -974,6 +1177,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
974 struct omap_iommu *obj; 1177 struct omap_iommu *obj;
975 struct resource *res; 1178 struct resource *res;
976 struct device_node *of = pdev->dev.of_node; 1179 struct device_node *of = pdev->dev.of_node;
1180 struct orphan_dev *orphan_dev, *tmp;
977 1181
978 if (!of) { 1182 if (!of) {
979 pr_err("%s: only DT-based devices are supported\n", __func__); 1183 pr_err("%s: only DT-based devices are supported\n", __func__);
@@ -984,6 +1188,15 @@ static int omap_iommu_probe(struct platform_device *pdev)
984 if (!obj) 1188 if (!obj)
985 return -ENOMEM; 1189 return -ENOMEM;
986 1190
1191 /*
1192 * self-manage the ordering dependencies between omap_device_enable/idle
1193 * and omap_device_assert/deassert_hardreset API
1194 */
1195 if (pdev->dev.pm_domain) {
1196 dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
1197 pdev->dev.pm_domain = NULL;
1198 }
1199
987 obj->name = dev_name(&pdev->dev); 1200 obj->name = dev_name(&pdev->dev);
988 obj->nr_tlb_entries = 32; 1201 obj->nr_tlb_entries = 32;
989 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries); 1202 err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
@@ -996,6 +1209,11 @@ static int omap_iommu_probe(struct platform_device *pdev)
996 1209
997 obj->dev = &pdev->dev; 1210 obj->dev = &pdev->dev;
998 obj->ctx = (void *)obj + sizeof(*obj); 1211 obj->ctx = (void *)obj + sizeof(*obj);
1212 obj->cr_ctx = devm_kzalloc(&pdev->dev,
1213 sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
1214 GFP_KERNEL);
1215 if (!obj->cr_ctx)
1216 return -ENOMEM;
999 1217
1000 spin_lock_init(&obj->iommu_lock); 1218 spin_lock_init(&obj->iommu_lock);
1001 spin_lock_init(&obj->page_table_lock); 1219 spin_lock_init(&obj->page_table_lock);
@@ -1036,13 +1254,20 @@ static int omap_iommu_probe(struct platform_device *pdev)
1036 goto out_sysfs; 1254 goto out_sysfs;
1037 } 1255 }
1038 1256
1039 pm_runtime_irq_safe(obj->dev);
1040 pm_runtime_enable(obj->dev); 1257 pm_runtime_enable(obj->dev);
1041 1258
1042 omap_iommu_debugfs_add(obj); 1259 omap_iommu_debugfs_add(obj);
1043 1260
1044 dev_info(&pdev->dev, "%s registered\n", obj->name); 1261 dev_info(&pdev->dev, "%s registered\n", obj->name);
1045 1262
1263 list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) {
1264 err = _omap_iommu_add_device(orphan_dev->dev);
1265 if (!err) {
1266 list_del(&orphan_dev->node);
1267 kfree(orphan_dev);
1268 }
1269 }
1270
1046 return 0; 1271 return 0;
1047 1272
1048out_sysfs: 1273out_sysfs:
@@ -1072,6 +1297,14 @@ static int omap_iommu_remove(struct platform_device *pdev)
1072 return 0; 1297 return 0;
1073} 1298}
1074 1299
1300static const struct dev_pm_ops omap_iommu_pm_ops = {
1301 .prepare = omap_iommu_prepare,
1302 SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1303 pm_runtime_force_resume)
1304 SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
1305 omap_iommu_runtime_resume, NULL)
1306};
1307
1075static const struct of_device_id omap_iommu_of_match[] = { 1308static const struct of_device_id omap_iommu_of_match[] = {
1076 { .compatible = "ti,omap2-iommu" }, 1309 { .compatible = "ti,omap2-iommu" },
1077 { .compatible = "ti,omap4-iommu" }, 1310 { .compatible = "ti,omap4-iommu" },
@@ -1085,6 +1318,7 @@ static struct platform_driver omap_iommu_driver = {
1085 .remove = omap_iommu_remove, 1318 .remove = omap_iommu_remove,
1086 .driver = { 1319 .driver = {
1087 .name = "omap-iommu", 1320 .name = "omap-iommu",
1321 .pm = &omap_iommu_pm_ops,
1088 .of_match_table = of_match_ptr(omap_iommu_of_match), 1322 .of_match_table = of_match_ptr(omap_iommu_of_match),
1089 }, 1323 },
1090}; 1324};
@@ -1423,7 +1657,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1423 return ret; 1657 return ret;
1424} 1658}
1425 1659
1426static int omap_iommu_add_device(struct device *dev) 1660static int _omap_iommu_add_device(struct device *dev)
1427{ 1661{
1428 struct omap_iommu_arch_data *arch_data, *tmp; 1662 struct omap_iommu_arch_data *arch_data, *tmp;
1429 struct omap_iommu *oiommu; 1663 struct omap_iommu *oiommu;
@@ -1432,6 +1666,8 @@ static int omap_iommu_add_device(struct device *dev)
1432 struct platform_device *pdev; 1666 struct platform_device *pdev;
1433 int num_iommus, i; 1667 int num_iommus, i;
1434 int ret; 1668 int ret;
1669 struct orphan_dev *orphan_dev;
1670 unsigned long flags;
1435 1671
1436 /* 1672 /*
1437 * Allocate the archdata iommu structure for DT-based devices. 1673 * Allocate the archdata iommu structure for DT-based devices.
@@ -1463,10 +1699,26 @@ static int omap_iommu_add_device(struct device *dev)
1463 } 1699 }
1464 1700
1465 pdev = of_find_device_by_node(np); 1701 pdev = of_find_device_by_node(np);
1466 if (WARN_ON(!pdev)) { 1702 if (!pdev) {
1467 of_node_put(np); 1703 of_node_put(np);
1468 kfree(arch_data); 1704 kfree(arch_data);
1469 return -EINVAL; 1705 spin_lock_irqsave(&orphan_lock, flags);
1706 list_for_each_entry(orphan_dev, &orphan_dev_list,
1707 node) {
1708 if (orphan_dev->dev == dev)
1709 break;
1710 }
1711 spin_unlock_irqrestore(&orphan_lock, flags);
1712
1713 if (orphan_dev && orphan_dev->dev == dev)
1714 return -EPROBE_DEFER;
1715
1716 orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL);
1717 orphan_dev->dev = dev;
1718 spin_lock_irqsave(&orphan_lock, flags);
1719 list_add(&orphan_dev->node, &orphan_dev_list);
1720 spin_unlock_irqrestore(&orphan_lock, flags);
1721 return -EPROBE_DEFER;
1470 } 1722 }
1471 1723
1472 oiommu = platform_get_drvdata(pdev); 1724 oiommu = platform_get_drvdata(pdev);
@@ -1477,6 +1729,7 @@ static int omap_iommu_add_device(struct device *dev)
1477 } 1729 }
1478 1730
1479 tmp->iommu_dev = oiommu; 1731 tmp->iommu_dev = oiommu;
1732 tmp->dev = &pdev->dev;
1480 1733
1481 of_node_put(np); 1734 of_node_put(np);
1482 } 1735 }
@@ -1511,6 +1764,17 @@ static int omap_iommu_add_device(struct device *dev)
1511 return 0; 1764 return 0;
1512} 1765}
1513 1766
1767static int omap_iommu_add_device(struct device *dev)
1768{
1769 int ret;
1770
1771 ret = _omap_iommu_add_device(dev);
1772 if (ret == -EPROBE_DEFER)
1773 return 0;
1774
1775 return ret;
1776}
1777
1514static void omap_iommu_remove_device(struct device *dev) 1778static void omap_iommu_remove_device(struct device *dev)
1515{ 1779{
1516 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1780 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@ -1554,7 +1818,7 @@ static const struct iommu_ops omap_iommu_ops = {
1554static int __init omap_iommu_init(void) 1818static int __init omap_iommu_init(void)
1555{ 1819{
1556 struct kmem_cache *p; 1820 struct kmem_cache *p;
1557 const unsigned long flags = SLAB_HWCACHE_ALIGN; 1821 const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
1558 size_t align = 1 << 10; /* L2 pagetable alignement */ 1822 size_t align = 1 << 10; /* L2 pagetable alignement */
1559 struct device_node *np; 1823 struct device_node *np;
1560 int ret; 1824 int ret;
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 09968a02d291..18ee713ede78 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -73,16 +73,22 @@ struct omap_iommu {
73 73
74 void *ctx; /* iommu context: registres saved area */ 74 void *ctx; /* iommu context: registres saved area */
75 75
76 struct cr_regs *cr_ctx;
77 u32 num_cr_ctx;
78
76 int has_bus_err_back; 79 int has_bus_err_back;
77 u32 id; 80 u32 id;
78 81
79 struct iommu_device iommu; 82 struct iommu_device iommu;
80 struct iommu_group *group; 83 struct iommu_group *group;
84
85 u8 pwrst;
81}; 86};
82 87
83/** 88/**
84 * struct omap_iommu_arch_data - omap iommu private data 89 * struct omap_iommu_arch_data - omap iommu private data
85 * @iommu_dev: handle of the iommu device 90 * @iommu_dev: handle of the OMAP iommu device
91 * @dev: handle of the iommu device
86 * 92 *
87 * This is an omap iommu private data object, which binds an iommu user 93 * This is an omap iommu private data object, which binds an iommu user
88 * to its iommu device. This object should be placed at the iommu user's 94 * to its iommu device. This object should be placed at the iommu user's
@@ -91,6 +97,7 @@ struct omap_iommu {
91 */ 97 */
92struct omap_iommu_arch_data { 98struct omap_iommu_arch_data {
93 struct omap_iommu *iommu_dev; 99 struct omap_iommu *iommu_dev;
100 struct device *dev;
94}; 101};
95 102
96struct cr_regs { 103struct cr_regs {
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index fd33cf5981d7..c31e7bc4ccbe 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -725,10 +725,8 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
725 return PTR_ERR(ctx->base); 725 return PTR_ERR(ctx->base);
726 726
727 irq = platform_get_irq(pdev, 0); 727 irq = platform_get_irq(pdev, 0);
728 if (irq < 0) { 728 if (irq < 0)
729 dev_err(dev, "failed to get irq\n");
730 return -ENODEV; 729 return -ENODEV;
731 }
732 730
733 /* clear IRQs before registering fault handler, just in case the 731 /* clear IRQs before registering fault handler, just in case the
734 * boot-loader left us a surprise: 732 * boot-loader left us a surprise:
@@ -804,7 +802,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
804 struct qcom_iommu_dev *qcom_iommu; 802 struct qcom_iommu_dev *qcom_iommu;
805 struct device *dev = &pdev->dev; 803 struct device *dev = &pdev->dev;
806 struct resource *res; 804 struct resource *res;
807 int ret, sz, max_asid = 0; 805 int ret, max_asid = 0;
808 806
809 /* find the max asid (which is 1:1 to ctx bank idx), so we know how 807 /* find the max asid (which is 1:1 to ctx bank idx), so we know how
810 * many child ctx devices we have: 808 * many child ctx devices we have:
@@ -812,9 +810,8 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
812 for_each_child_of_node(dev->of_node, child) 810 for_each_child_of_node(dev->of_node, child)
813 max_asid = max(max_asid, get_asid(child)); 811 max_asid = max(max_asid, get_asid(child));
814 812
815 sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0])); 813 qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
816 814 GFP_KERNEL);
817 qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL);
818 if (!qcom_iommu) 815 if (!qcom_iommu)
819 return -ENOMEM; 816 return -ENOMEM;
820 qcom_iommu->num_ctxs = max_asid; 817 qcom_iommu->num_ctxs = max_asid;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b6b5acc92ca2..2a48ea3f1b30 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1599 unsigned long freed; 1599 unsigned long freed;
1600 1600
1601 c = container_of(shrink, struct dm_bufio_client, shrinker); 1601 c = container_of(shrink, struct dm_bufio_client, shrinker);
1602 if (!dm_bufio_trylock(c)) 1602 if (sc->gfp_mask & __GFP_FS)
1603 dm_bufio_lock(c);
1604 else if (!dm_bufio_trylock(c))
1603 return SHRINK_STOP; 1605 return SHRINK_STOP;
1604 1606
1605 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); 1607 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 845f376a72d9..8288887b7f94 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -25,6 +25,7 @@ struct dust_device {
25 unsigned long long badblock_count; 25 unsigned long long badblock_count;
26 spinlock_t dust_lock; 26 spinlock_t dust_lock;
27 unsigned int blksz; 27 unsigned int blksz;
28 int sect_per_block_shift;
28 unsigned int sect_per_block; 29 unsigned int sect_per_block;
29 sector_t start; 30 sector_t start;
30 bool fail_read_on_bb:1; 31 bool fail_read_on_bb:1;
@@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
79 unsigned long flags; 80 unsigned long flags;
80 81
81 spin_lock_irqsave(&dd->dust_lock, flags); 82 spin_lock_irqsave(&dd->dust_lock, flags);
82 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 83 bblock = dust_rb_search(&dd->badblocklist, block);
83 84
84 if (bblock == NULL) { 85 if (bblock == NULL) {
85 if (!dd->quiet_mode) { 86 if (!dd->quiet_mode) {
@@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
113 } 114 }
114 115
115 spin_lock_irqsave(&dd->dust_lock, flags); 116 spin_lock_irqsave(&dd->dust_lock, flags);
116 bblock->bb = block * dd->sect_per_block; 117 bblock->bb = block;
117 if (!dust_rb_insert(&dd->badblocklist, bblock)) { 118 if (!dust_rb_insert(&dd->badblocklist, bblock)) {
118 if (!dd->quiet_mode) { 119 if (!dd->quiet_mode) {
119 DMERR("%s: block %llu already in badblocklist", 120 DMERR("%s: block %llu already in badblocklist",
@@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
138 unsigned long flags; 139 unsigned long flags;
139 140
140 spin_lock_irqsave(&dd->dust_lock, flags); 141 spin_lock_irqsave(&dd->dust_lock, flags);
141 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 142 bblock = dust_rb_search(&dd->badblocklist, block);
142 if (bblock != NULL) 143 if (bblock != NULL)
143 DMINFO("%s: block %llu found in badblocklist", __func__, block); 144 DMINFO("%s: block %llu found in badblocklist", __func__, block);
144 else 145 else
@@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
165 int ret = DM_MAPIO_REMAPPED; 166 int ret = DM_MAPIO_REMAPPED;
166 167
167 if (fail_read_on_bb) { 168 if (fail_read_on_bb) {
169 thisblock >>= dd->sect_per_block_shift;
168 spin_lock_irqsave(&dd->dust_lock, flags); 170 spin_lock_irqsave(&dd->dust_lock, flags);
169 ret = __dust_map_read(dd, thisblock); 171 ret = __dust_map_read(dd, thisblock);
170 spin_unlock_irqrestore(&dd->dust_lock, flags); 172 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
195 unsigned long flags; 197 unsigned long flags;
196 198
197 if (fail_read_on_bb) { 199 if (fail_read_on_bb) {
200 thisblock >>= dd->sect_per_block_shift;
198 spin_lock_irqsave(&dd->dust_lock, flags); 201 spin_lock_irqsave(&dd->dust_lock, flags);
199 __dust_map_write(dd, thisblock); 202 __dust_map_write(dd, thisblock);
200 spin_unlock_irqrestore(&dd->dust_lock, flags); 203 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
331 dd->blksz = blksz; 334 dd->blksz = blksz;
332 dd->start = tmp; 335 dd->start = tmp;
333 336
337 dd->sect_per_block_shift = __ffs(sect_per_block);
338
334 /* 339 /*
335 * Whether to fail a read on a "bad" block. 340 * Whether to fail a read on a "bad" block.
336 * Defaults to false; enabled later by message. 341 * Defaults to false; enabled later by message.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index b1b0de402dfc..9118ab85cb3a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1943,7 +1943,22 @@ offload_to_thread:
1943 queue_work(ic->wait_wq, &dio->work); 1943 queue_work(ic->wait_wq, &dio->work);
1944 return; 1944 return;
1945 } 1945 }
1946 if (journal_read_pos != NOT_FOUND)
1947 dio->range.n_sectors = ic->sectors_per_block;
1946 wait_and_add_new_range(ic, &dio->range); 1948 wait_and_add_new_range(ic, &dio->range);
1949 /*
1950 * wait_and_add_new_range drops the spinlock, so the journal
1951 * may have been changed arbitrarily. We need to recheck.
1952 * To simplify the code, we restrict I/O size to just one block.
1953 */
1954 if (journal_read_pos != NOT_FOUND) {
1955 sector_t next_sector;
1956 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1957 if (unlikely(new_pos != journal_read_pos)) {
1958 remove_range_unlocked(ic, &dio->range);
1959 goto retry;
1960 }
1961 }
1947 } 1962 }
1948 spin_unlock_irq(&ic->endio_wait.lock); 1963 spin_unlock_irq(&ic->endio_wait.lock);
1949 1964
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index df2011de7be2..1bbe4a34ef4c 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -566,8 +566,10 @@ static int run_io_job(struct kcopyd_job *job)
566 * no point in continuing. 566 * no point in continuing.
567 */ 567 */
568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && 568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
569 job->master_job->write_err) 569 job->master_job->write_err) {
570 job->write_err = job->master_job->write_err;
570 return -EIO; 571 return -EIO;
572 }
571 573
572 io_job_start(job->kc->throttle); 574 io_job_start(job->kc->throttle);
573 575
@@ -619,6 +621,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
619 else 621 else
620 job->read_err = 1; 622 job->read_err = 1;
621 push(&kc->complete_jobs, job); 623 push(&kc->complete_jobs, job);
624 wake(kc);
622 break; 625 break;
623 } 626 }
624 627
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8a60a4a070ac..1f933dd197cd 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3194 */ 3194 */
3195 r = rs_prepare_reshape(rs); 3195 r = rs_prepare_reshape(rs);
3196 if (r) 3196 if (r)
3197 return r; 3197 goto bad;
3198 3198
3199 /* Reshaping ain't recovery, so disable recovery */ 3199 /* Reshaping ain't recovery, so disable recovery */
3200 rs_setup_recovery(rs, MaxSector); 3200 rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 7b6c3ee9e755..8820931ec7d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1342,7 +1342,7 @@ void dm_table_event(struct dm_table *t)
1342} 1342}
1343EXPORT_SYMBOL(dm_table_event); 1343EXPORT_SYMBOL(dm_table_event);
1344 1344
1345sector_t dm_table_get_size(struct dm_table *t) 1345inline sector_t dm_table_get_size(struct dm_table *t)
1346{ 1346{
1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1348} 1348}
@@ -1367,6 +1367,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1367 unsigned int l, n = 0, k = 0; 1367 unsigned int l, n = 0, k = 0;
1368 sector_t *node; 1368 sector_t *node;
1369 1369
1370 if (unlikely(sector >= dm_table_get_size(t)))
1371 return &t->targets[t->num_targets];
1372
1370 for (l = 0; l < t->depth; l++) { 1373 for (l = 0; l < t->depth; l++) {
1371 n = get_child(n, k); 1374 n = get_child(n, k);
1372 node = get_node(t, l, n); 1375 node = get_node(t, l, n);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 8545dcee9fd0..595a73110e17 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -34,7 +35,7 @@
34 * (1) Super block (1 block) 35 * (1) Super block (1 block)
35 * (2) Chunk mapping table (nr_map_blocks) 36 * (2) Chunk mapping table (nr_map_blocks)
36 * (3) Bitmap blocks (nr_bitmap_blocks) 37 * (3) Bitmap blocks (nr_bitmap_blocks)
37 * All metadata blocks are stored in conventional zones, starting from the 38 * All metadata blocks are stored in conventional zones, starting from
38 * the first conventional zone found on disk. 39 * the first conventional zone found on disk.
39 */ 40 */
40struct dmz_super { 41struct dmz_super {
@@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd)
233 * Lock/unlock metadata access. This is a "read" lock on a semaphore 234 * Lock/unlock metadata access. This is a "read" lock on a semaphore
234 * that prevents metadata flush from running while metadata are being 235 * that prevents metadata flush from running while metadata are being
235 * modified. The actual metadata write mutual exclusion is achieved with 236 * modified. The actual metadata write mutual exclusion is achieved with
236 * the map lock and zone styate management (active and reclaim state are 237 * the map lock and zone state management (active and reclaim state are
237 * mutually exclusive). 238 * mutually exclusive).
238 */ 239 */
239void dmz_lock_metadata(struct dmz_metadata *zmd) 240void dmz_lock_metadata(struct dmz_metadata *zmd)
@@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
402 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; 403 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
403 struct bio *bio; 404 struct bio *bio;
404 405
406 if (dmz_bdev_is_dying(zmd->dev))
407 return ERR_PTR(-EIO);
408
405 /* Get a new block and a BIO to read it */ 409 /* Get a new block and a BIO to read it */
406 mblk = dmz_alloc_mblock(zmd, mblk_no); 410 mblk = dmz_alloc_mblock(zmd, mblk_no);
407 if (!mblk) 411 if (!mblk)
408 return NULL; 412 return ERR_PTR(-ENOMEM);
409 413
410 bio = bio_alloc(GFP_NOIO, 1); 414 bio = bio_alloc(GFP_NOIO, 1);
411 if (!bio) { 415 if (!bio) {
412 dmz_free_mblock(zmd, mblk); 416 dmz_free_mblock(zmd, mblk);
413 return NULL; 417 return ERR_PTR(-ENOMEM);
414 } 418 }
415 419
416 spin_lock(&zmd->mblk_lock); 420 spin_lock(&zmd->mblk_lock);
@@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
541 if (!mblk) { 545 if (!mblk) {
542 /* Cache miss: read the block from disk */ 546 /* Cache miss: read the block from disk */
543 mblk = dmz_get_mblock_slow(zmd, mblk_no); 547 mblk = dmz_get_mblock_slow(zmd, mblk_no);
544 if (!mblk) 548 if (IS_ERR(mblk))
545 return ERR_PTR(-ENOMEM); 549 return mblk;
546 } 550 }
547 551
548 /* Wait for on-going read I/O and check for error */ 552 /* Wait for on-going read I/O and check for error */
@@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
570/* 574/*
571 * Issue a metadata block write BIO. 575 * Issue a metadata block write BIO.
572 */ 576 */
573static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, 577static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
574 unsigned int set) 578 unsigned int set)
575{ 579{
576 sector_t block = zmd->sb[set].block + mblk->no; 580 sector_t block = zmd->sb[set].block + mblk->no;
577 struct bio *bio; 581 struct bio *bio;
578 582
583 if (dmz_bdev_is_dying(zmd->dev))
584 return -EIO;
585
579 bio = bio_alloc(GFP_NOIO, 1); 586 bio = bio_alloc(GFP_NOIO, 1);
580 if (!bio) { 587 if (!bio) {
581 set_bit(DMZ_META_ERROR, &mblk->state); 588 set_bit(DMZ_META_ERROR, &mblk->state);
582 return; 589 return -ENOMEM;
583 } 590 }
584 591
585 set_bit(DMZ_META_WRITING, &mblk->state); 592 set_bit(DMZ_META_WRITING, &mblk->state);
@@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
591 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); 598 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
592 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); 599 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
593 submit_bio(bio); 600 submit_bio(bio);
601
602 return 0;
594} 603}
595 604
596/* 605/*
@@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
602 struct bio *bio; 611 struct bio *bio;
603 int ret; 612 int ret;
604 613
614 if (dmz_bdev_is_dying(zmd->dev))
615 return -EIO;
616
605 bio = bio_alloc(GFP_NOIO, 1); 617 bio = bio_alloc(GFP_NOIO, 1);
606 if (!bio) 618 if (!bio)
607 return -ENOMEM; 619 return -ENOMEM;
@@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
659{ 671{
660 struct dmz_mblock *mblk; 672 struct dmz_mblock *mblk;
661 struct blk_plug plug; 673 struct blk_plug plug;
662 int ret = 0; 674 int ret = 0, nr_mblks_submitted = 0;
663 675
664 /* Issue writes */ 676 /* Issue writes */
665 blk_start_plug(&plug); 677 blk_start_plug(&plug);
666 list_for_each_entry(mblk, write_list, link) 678 list_for_each_entry(mblk, write_list, link) {
667 dmz_write_mblock(zmd, mblk, set); 679 ret = dmz_write_mblock(zmd, mblk, set);
680 if (ret)
681 break;
682 nr_mblks_submitted++;
683 }
668 blk_finish_plug(&plug); 684 blk_finish_plug(&plug);
669 685
670 /* Wait for completion */ 686 /* Wait for completion */
671 list_for_each_entry(mblk, write_list, link) { 687 list_for_each_entry(mblk, write_list, link) {
688 if (!nr_mblks_submitted)
689 break;
672 wait_on_bit_io(&mblk->state, DMZ_META_WRITING, 690 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
673 TASK_UNINTERRUPTIBLE); 691 TASK_UNINTERRUPTIBLE);
674 if (test_bit(DMZ_META_ERROR, &mblk->state)) { 692 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
675 clear_bit(DMZ_META_ERROR, &mblk->state); 693 clear_bit(DMZ_META_ERROR, &mblk->state);
676 ret = -EIO; 694 ret = -EIO;
677 } 695 }
696 nr_mblks_submitted--;
678 } 697 }
679 698
680 /* Flush drive cache (this will also sync data) */ 699 /* Flush drive cache (this will also sync data) */
@@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
736 */ 755 */
737 dmz_lock_flush(zmd); 756 dmz_lock_flush(zmd);
738 757
758 if (dmz_bdev_is_dying(zmd->dev)) {
759 ret = -EIO;
760 goto out;
761 }
762
739 /* Get dirty blocks */ 763 /* Get dirty blocks */
740 spin_lock(&zmd->mblk_lock); 764 spin_lock(&zmd->mblk_lock);
741 list_splice_init(&zmd->mblk_dirty_list, &write_list); 765 list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1542 struct dm_zone *zone; 1566 struct dm_zone *zone;
1543 1567
1544 if (list_empty(&zmd->map_rnd_list)) 1568 if (list_empty(&zmd->map_rnd_list))
1545 return NULL; 1569 return ERR_PTR(-EBUSY);
1546 1570
1547 list_for_each_entry(zone, &zmd->map_rnd_list, link) { 1571 list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1548 if (dmz_is_buf(zone)) 1572 if (dmz_is_buf(zone))
@@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1553 return dzone; 1577 return dzone;
1554 } 1578 }
1555 1579
1556 return NULL; 1580 return ERR_PTR(-EBUSY);
1557} 1581}
1558 1582
1559/* 1583/*
@@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1564 struct dm_zone *zone; 1588 struct dm_zone *zone;
1565 1589
1566 if (list_empty(&zmd->map_seq_list)) 1590 if (list_empty(&zmd->map_seq_list))
1567 return NULL; 1591 return ERR_PTR(-EBUSY);
1568 1592
1569 list_for_each_entry(zone, &zmd->map_seq_list, link) { 1593 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1570 if (!zone->bzone) 1594 if (!zone->bzone)
@@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1573 return zone; 1597 return zone;
1574 } 1598 }
1575 1599
1576 return NULL; 1600 return ERR_PTR(-EBUSY);
1577} 1601}
1578 1602
1579/* 1603/*
@@ -1628,9 +1652,13 @@ again:
1628 if (op != REQ_OP_WRITE) 1652 if (op != REQ_OP_WRITE)
1629 goto out; 1653 goto out;
1630 1654
1631 /* Alloate a random zone */ 1655 /* Allocate a random zone */
1632 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1656 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1633 if (!dzone) { 1657 if (!dzone) {
1658 if (dmz_bdev_is_dying(zmd->dev)) {
1659 dzone = ERR_PTR(-EIO);
1660 goto out;
1661 }
1634 dmz_wait_for_free_zones(zmd); 1662 dmz_wait_for_free_zones(zmd);
1635 goto again; 1663 goto again;
1636 } 1664 }
@@ -1725,9 +1753,13 @@ again:
1725 if (bzone) 1753 if (bzone)
1726 goto out; 1754 goto out;
1727 1755
1728 /* Alloate a random zone */ 1756 /* Allocate a random zone */
1729 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1757 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1730 if (!bzone) { 1758 if (!bzone) {
1759 if (dmz_bdev_is_dying(zmd->dev)) {
1760 bzone = ERR_PTR(-EIO);
1761 goto out;
1762 }
1731 dmz_wait_for_free_zones(zmd); 1763 dmz_wait_for_free_zones(zmd);
1732 goto again; 1764 goto again;
1733 } 1765 }
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95eb075..d240d7ca8a8a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -37,7 +38,7 @@ enum {
37/* 38/*
38 * Number of seconds of target BIO inactivity to consider the target idle. 39 * Number of seconds of target BIO inactivity to consider the target idle.
39 */ 40 */
40#define DMZ_IDLE_PERIOD (10UL * HZ) 41#define DMZ_IDLE_PERIOD (10UL * HZ)
41 42
42/* 43/*
43 * Percentage of unmapped (free) random zones below which reclaim starts 44 * Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
134 set_bit(DM_KCOPYD_WRITE_SEQ, &flags); 135 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
135 136
136 while (block < end_block) { 137 while (block < end_block) {
138 if (dev->flags & DMZ_BDEV_DYING)
139 return -EIO;
140
137 /* Get a valid region from the source zone */ 141 /* Get a valid region from the source zone */
138 ret = dmz_first_valid_block(zmd, src_zone, &block); 142 ret = dmz_first_valid_block(zmd, src_zone, &block);
139 if (ret <= 0) 143 if (ret <= 0)
@@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
215 219
216 dmz_unlock_flush(zmd); 220 dmz_unlock_flush(zmd);
217 221
218 return 0; 222 return ret;
219} 223}
220 224
221/* 225/*
@@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
259 263
260 dmz_unlock_flush(zmd); 264 dmz_unlock_flush(zmd);
261 265
262 return 0; 266 return ret;
263} 267}
264 268
265/* 269/*
@@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
312 316
313 dmz_unlock_flush(zmd); 317 dmz_unlock_flush(zmd);
314 318
315 return 0; 319 return ret;
316} 320}
317 321
318/* 322/*
@@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
334/* 338/*
335 * Find a candidate zone for reclaim and process it. 339 * Find a candidate zone for reclaim and process it.
336 */ 340 */
337static void dmz_reclaim(struct dmz_reclaim *zrc) 341static int dmz_do_reclaim(struct dmz_reclaim *zrc)
338{ 342{
339 struct dmz_metadata *zmd = zrc->metadata; 343 struct dmz_metadata *zmd = zrc->metadata;
340 struct dm_zone *dzone; 344 struct dm_zone *dzone;
@@ -344,8 +348,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
344 348
345 /* Get a data zone */ 349 /* Get a data zone */
346 dzone = dmz_get_zone_for_reclaim(zmd); 350 dzone = dmz_get_zone_for_reclaim(zmd);
347 if (!dzone) 351 if (IS_ERR(dzone))
348 return; 352 return PTR_ERR(dzone);
349 353
350 start = jiffies; 354 start = jiffies;
351 355
@@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
391out: 395out:
392 if (ret) { 396 if (ret) {
393 dmz_unlock_zone_reclaim(dzone); 397 dmz_unlock_zone_reclaim(dzone);
394 return; 398 return ret;
395 } 399 }
396 400
397 (void) dmz_flush_metadata(zrc->metadata); 401 ret = dmz_flush_metadata(zrc->metadata);
402 if (ret) {
403 dmz_dev_debug(zrc->dev,
404 "Metadata flush for zone %u failed, err %d\n",
405 dmz_id(zmd, rzone), ret);
406 return ret;
407 }
398 408
399 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", 409 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
400 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); 410 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
411 return 0;
401} 412}
402 413
403/* 414/*
@@ -427,7 +438,7 @@ static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
427 return false; 438 return false;
428 439
429 /* 440 /*
430 * If the percentage of unmappped random zones is low, 441 * If the percentage of unmapped random zones is low,
431 * reclaim even if the target is busy. 442 * reclaim even if the target is busy.
432 */ 443 */
433 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND; 444 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
@@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work)
442 struct dmz_metadata *zmd = zrc->metadata; 453 struct dmz_metadata *zmd = zrc->metadata;
443 unsigned int nr_rnd, nr_unmap_rnd; 454 unsigned int nr_rnd, nr_unmap_rnd;
444 unsigned int p_unmap_rnd; 455 unsigned int p_unmap_rnd;
456 int ret;
457
458 if (dmz_bdev_is_dying(zrc->dev))
459 return;
445 460
446 if (!dmz_should_reclaim(zrc)) { 461 if (!dmz_should_reclaim(zrc)) {
447 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); 462 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +486,17 @@ static void dmz_reclaim_work(struct work_struct *work)
471 (dmz_target_idle(zrc) ? "Idle" : "Busy"), 486 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
472 p_unmap_rnd, nr_unmap_rnd, nr_rnd); 487 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
473 488
474 dmz_reclaim(zrc); 489 ret = dmz_do_reclaim(zrc);
490 if (ret) {
491 dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
492 if (ret == -EIO)
493 /*
494 * LLD might be performing some error handling sequence
495 * at the underlying device. To not interfere, do not
496 * attempt to schedule the next reclaim run immediately.
497 */
498 return;
499 }
475 500
476 dmz_schedule_reclaim(zrc); 501 dmz_schedule_reclaim(zrc);
477} 502}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 51d029bbb740..31478fef6032 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
133 134
134 refcount_inc(&bioctx->ref); 135 refcount_inc(&bioctx->ref);
135 generic_make_request(clone); 136 generic_make_request(clone);
137 if (clone->bi_status == BLK_STS_IOERR)
138 return -EIO;
136 139
137 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) 140 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
138 zone->wp_block += nr_blocks; 141 zone->wp_block += nr_blocks;
@@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
277 280
278 /* Get the buffer zone. One will be allocated if needed */ 281 /* Get the buffer zone. One will be allocated if needed */
279 bzone = dmz_get_chunk_buffer(zmd, zone); 282 bzone = dmz_get_chunk_buffer(zmd, zone);
280 if (!bzone) 283 if (IS_ERR(bzone))
281 return -ENOSPC; 284 return PTR_ERR(bzone);
282 285
283 if (dmz_is_readonly(bzone)) 286 if (dmz_is_readonly(bzone))
284 return -EROFS; 287 return -EROFS;
@@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
389 392
390 dmz_lock_metadata(zmd); 393 dmz_lock_metadata(zmd);
391 394
395 if (dmz->dev->flags & DMZ_BDEV_DYING) {
396 ret = -EIO;
397 goto out;
398 }
399
392 /* 400 /*
393 * Get the data zone mapping the chunk. There may be no 401 * Get the data zone mapping the chunk. There may be no
394 * mapping for read and discard. If a mapping is obtained, 402 * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work)
493 501
494 /* Flush dirty metadata blocks */ 502 /* Flush dirty metadata blocks */
495 ret = dmz_flush_metadata(dmz->metadata); 503 ret = dmz_flush_metadata(dmz->metadata);
504 if (ret)
505 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
496 506
497 /* Process queued flush requests */ 507 /* Process queued flush requests */
498 while (1) { 508 while (1) {
@@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work)
513 * Get a chunk work and start it to process a new BIO. 523 * Get a chunk work and start it to process a new BIO.
514 * If the BIO chunk has no work yet, create one. 524 * If the BIO chunk has no work yet, create one.
515 */ 525 */
516static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) 526static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
517{ 527{
518 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); 528 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
519 struct dm_chunk_work *cw; 529 struct dm_chunk_work *cw;
530 int ret = 0;
520 531
521 mutex_lock(&dmz->chunk_lock); 532 mutex_lock(&dmz->chunk_lock);
522 533
523 /* Get the BIO chunk work. If one is not active yet, create one */ 534 /* Get the BIO chunk work. If one is not active yet, create one */
524 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); 535 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
525 if (!cw) { 536 if (!cw) {
526 int ret;
527 537
528 /* Create a new chunk work */ 538 /* Create a new chunk work */
529 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); 539 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
530 if (!cw) 540 if (unlikely(!cw)) {
541 ret = -ENOMEM;
531 goto out; 542 goto out;
543 }
532 544
533 INIT_WORK(&cw->work, dmz_chunk_work); 545 INIT_WORK(&cw->work, dmz_chunk_work);
534 refcount_set(&cw->refcount, 0); 546 refcount_set(&cw->refcount, 0);
@@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
539 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); 551 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
540 if (unlikely(ret)) { 552 if (unlikely(ret)) {
541 kfree(cw); 553 kfree(cw);
542 cw = NULL;
543 goto out; 554 goto out;
544 } 555 }
545 } 556 }
@@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
547 bio_list_add(&cw->bio_list, bio); 558 bio_list_add(&cw->bio_list, bio);
548 dmz_get_chunk_work(cw); 559 dmz_get_chunk_work(cw);
549 560
561 dmz_reclaim_bio_acc(dmz->reclaim);
550 if (queue_work(dmz->chunk_wq, &cw->work)) 562 if (queue_work(dmz->chunk_wq, &cw->work))
551 dmz_get_chunk_work(cw); 563 dmz_get_chunk_work(cw);
552out: 564out:
553 mutex_unlock(&dmz->chunk_lock); 565 mutex_unlock(&dmz->chunk_lock);
566 return ret;
567}
568
569/*
570 * Check the backing device availability. If it's on the way out,
571 * start failing I/O. Reclaim and metadata components also call this
572 * function to cleanly abort operation in the event of such failure.
573 */
574bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
575{
576 struct gendisk *disk;
577
578 if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
579 disk = dmz_dev->bdev->bd_disk;
580 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
581 dmz_dev_warn(dmz_dev, "Backing device queue dying");
582 dmz_dev->flags |= DMZ_BDEV_DYING;
583 } else if (disk->fops->check_events) {
584 if (disk->fops->check_events(disk, 0) &
585 DISK_EVENT_MEDIA_CHANGE) {
586 dmz_dev_warn(dmz_dev, "Backing device offline");
587 dmz_dev->flags |= DMZ_BDEV_DYING;
588 }
589 }
590 }
591
592 return dmz_dev->flags & DMZ_BDEV_DYING;
554} 593}
555 594
556/* 595/*
@@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
564 sector_t sector = bio->bi_iter.bi_sector; 603 sector_t sector = bio->bi_iter.bi_sector;
565 unsigned int nr_sectors = bio_sectors(bio); 604 unsigned int nr_sectors = bio_sectors(bio);
566 sector_t chunk_sector; 605 sector_t chunk_sector;
606 int ret;
607
608 if (dmz_bdev_is_dying(dmz->dev))
609 return DM_MAPIO_KILL;
567 610
568 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", 611 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
569 bio_op(bio), (unsigned long long)sector, nr_sectors, 612 bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
601 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); 644 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
602 645
603 /* Now ready to handle this BIO */ 646 /* Now ready to handle this BIO */
604 dmz_reclaim_bio_acc(dmz->reclaim); 647 ret = dmz_queue_chunk_work(dmz, bio);
605 dmz_queue_chunk_work(dmz, bio); 648 if (ret) {
649 dmz_dev_debug(dmz->dev,
650 "BIO op %d, can't process chunk %llu, err %i\n",
651 bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
652 ret);
653 return DM_MAPIO_REQUEUE;
654 }
606 655
607 return DM_MAPIO_SUBMITTED; 656 return DM_MAPIO_SUBMITTED;
608} 657}
@@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
855{ 904{
856 struct dmz_target *dmz = ti->private; 905 struct dmz_target *dmz = ti->private;
857 906
907 if (dmz_bdev_is_dying(dmz->dev))
908 return -ENODEV;
909
858 *bdev = dmz->dev->bdev; 910 *bdev = dmz->dev->bdev;
859 911
860 return 0; 912 return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49c9a08..d8e70b0ade35 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -56,6 +57,8 @@ struct dmz_dev {
56 57
57 unsigned int nr_zones; 58 unsigned int nr_zones;
58 59
60 unsigned int flags;
61
59 sector_t zone_nr_sectors; 62 sector_t zone_nr_sectors;
60 unsigned int zone_nr_sectors_shift; 63 unsigned int zone_nr_sectors_shift;
61 64
@@ -67,6 +70,9 @@ struct dmz_dev {
67 (dev)->zone_nr_sectors_shift) 70 (dev)->zone_nr_sectors_shift)
68#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) 71#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
69 72
73/* Device flags. */
74#define DMZ_BDEV_DYING (1 << 0)
75
70/* 76/*
71 * Zone descriptor. 77 * Zone descriptor.
72 */ 78 */
@@ -245,4 +251,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
245void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); 251void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
246void dmz_schedule_reclaim(struct dmz_reclaim *zrc); 252void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
247 253
254/*
255 * Functions defined in dm-zoned-target.c
256 */
257bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
258
248#endif /* DM_ZONED_H */ 259#endif /* DM_ZONED_H */
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b319757b1e..8aae0624a297 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
628 628
629 new_parent = shadow_current(s); 629 new_parent = shadow_current(s);
630 630
631 pn = dm_block_data(new_parent);
632 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
633 sizeof(__le64) : s->info->value_type.size;
634
635 /* create & init the left block */
631 r = new_block(s->info, &left); 636 r = new_block(s->info, &left);
632 if (r < 0) 637 if (r < 0)
633 return r; 638 return r;
634 639
640 ln = dm_block_data(left);
641 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
642
643 ln->header.flags = pn->header.flags;
644 ln->header.nr_entries = cpu_to_le32(nr_left);
645 ln->header.max_entries = pn->header.max_entries;
646 ln->header.value_size = pn->header.value_size;
647 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
648 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
649
650 /* create & init the right block */
635 r = new_block(s->info, &right); 651 r = new_block(s->info, &right);
636 if (r < 0) { 652 if (r < 0) {
637 unlock_block(s->info, left); 653 unlock_block(s->info, left);
638 return r; 654 return r;
639 } 655 }
640 656
641 pn = dm_block_data(new_parent);
642 ln = dm_block_data(left);
643 rn = dm_block_data(right); 657 rn = dm_block_data(right);
644
645 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
646 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; 658 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
647 659
648 ln->header.flags = pn->header.flags;
649 ln->header.nr_entries = cpu_to_le32(nr_left);
650 ln->header.max_entries = pn->header.max_entries;
651 ln->header.value_size = pn->header.value_size;
652
653 rn->header.flags = pn->header.flags; 660 rn->header.flags = pn->header.flags;
654 rn->header.nr_entries = cpu_to_le32(nr_right); 661 rn->header.nr_entries = cpu_to_le32(nr_right);
655 rn->header.max_entries = pn->header.max_entries; 662 rn->header.max_entries = pn->header.max_entries;
656 rn->header.value_size = pn->header.value_size; 663 rn->header.value_size = pn->header.value_size;
657
658 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
659 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); 664 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
660
661 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
662 sizeof(__le64) : s->info->value_type.size;
663 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
664 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), 665 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
665 nr_right * size); 666 nr_right * size);
666 667
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec449243966..25328582cc48 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
249 } 249 }
250 250
251 if (smm->recursion_count == 1) 251 if (smm->recursion_count == 1)
252 apply_bops(smm); 252 r = apply_bops(smm);
253 253
254 smm->recursion_count--; 254 smm->recursion_count--;
255 255
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 42ab43affbff..439d7d886873 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -41,17 +41,40 @@
41#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4)) 41#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
42#define F_MMU_EN BIT(0) 42#define F_MMU_EN BIT(0)
43 43
44/* SMI COMMON */
45#define SMI_BUS_SEL 0x220
46#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
47/* All are MMU0 defaultly. Only specialize mmu1 here. */
48#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
49
50enum mtk_smi_gen {
51 MTK_SMI_GEN1,
52 MTK_SMI_GEN2
53};
54
55struct mtk_smi_common_plat {
56 enum mtk_smi_gen gen;
57 bool has_gals;
58 u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
59};
60
44struct mtk_smi_larb_gen { 61struct mtk_smi_larb_gen {
45 bool need_larbid;
46 int port_in_larb[MTK_LARB_NR_MAX + 1]; 62 int port_in_larb[MTK_LARB_NR_MAX + 1];
47 void (*config_port)(struct device *); 63 void (*config_port)(struct device *);
64 unsigned int larb_direct_to_common_mask;
65 bool has_gals;
48}; 66};
49 67
50struct mtk_smi { 68struct mtk_smi {
51 struct device *dev; 69 struct device *dev;
52 struct clk *clk_apb, *clk_smi; 70 struct clk *clk_apb, *clk_smi;
71 struct clk *clk_gals0, *clk_gals1;
53 struct clk *clk_async; /*only needed by mt2701*/ 72 struct clk *clk_async; /*only needed by mt2701*/
54 void __iomem *smi_ao_base; 73 union {
74 void __iomem *smi_ao_base; /* only for gen1 */
75 void __iomem *base; /* only for gen2 */
76 };
77 const struct mtk_smi_common_plat *plat;
55}; 78};
56 79
57struct mtk_smi_larb { /* larb: local arbiter */ 80struct mtk_smi_larb { /* larb: local arbiter */
@@ -63,82 +86,56 @@ struct mtk_smi_larb { /* larb: local arbiter */
63 u32 *mmu; 86 u32 *mmu;
64}; 87};
65 88
66enum mtk_smi_gen { 89static int mtk_smi_clk_enable(const struct mtk_smi *smi)
67 MTK_SMI_GEN1,
68 MTK_SMI_GEN2
69};
70
71static int mtk_smi_enable(const struct mtk_smi *smi)
72{ 90{
73 int ret; 91 int ret;
74 92
75 ret = pm_runtime_get_sync(smi->dev);
76 if (ret < 0)
77 return ret;
78
79 ret = clk_prepare_enable(smi->clk_apb); 93 ret = clk_prepare_enable(smi->clk_apb);
80 if (ret) 94 if (ret)
81 goto err_put_pm; 95 return ret;
82 96
83 ret = clk_prepare_enable(smi->clk_smi); 97 ret = clk_prepare_enable(smi->clk_smi);
84 if (ret) 98 if (ret)
85 goto err_disable_apb; 99 goto err_disable_apb;
86 100
101 ret = clk_prepare_enable(smi->clk_gals0);
102 if (ret)
103 goto err_disable_smi;
104
105 ret = clk_prepare_enable(smi->clk_gals1);
106 if (ret)
107 goto err_disable_gals0;
108
87 return 0; 109 return 0;
88 110
111err_disable_gals0:
112 clk_disable_unprepare(smi->clk_gals0);
113err_disable_smi:
114 clk_disable_unprepare(smi->clk_smi);
89err_disable_apb: 115err_disable_apb:
90 clk_disable_unprepare(smi->clk_apb); 116 clk_disable_unprepare(smi->clk_apb);
91err_put_pm:
92 pm_runtime_put_sync(smi->dev);
93 return ret; 117 return ret;
94} 118}
95 119
96static void mtk_smi_disable(const struct mtk_smi *smi) 120static void mtk_smi_clk_disable(const struct mtk_smi *smi)
97{ 121{
122 clk_disable_unprepare(smi->clk_gals1);
123 clk_disable_unprepare(smi->clk_gals0);
98 clk_disable_unprepare(smi->clk_smi); 124 clk_disable_unprepare(smi->clk_smi);
99 clk_disable_unprepare(smi->clk_apb); 125 clk_disable_unprepare(smi->clk_apb);
100 pm_runtime_put_sync(smi->dev);
101} 126}
102 127
103int mtk_smi_larb_get(struct device *larbdev) 128int mtk_smi_larb_get(struct device *larbdev)
104{ 129{
105 struct mtk_smi_larb *larb = dev_get_drvdata(larbdev); 130 int ret = pm_runtime_get_sync(larbdev);
106 const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
107 struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
108 int ret;
109
110 /* Enable the smi-common's power and clocks */
111 ret = mtk_smi_enable(common);
112 if (ret)
113 return ret;
114 131
115 /* Enable the larb's power and clocks */ 132 return (ret < 0) ? ret : 0;
116 ret = mtk_smi_enable(&larb->smi);
117 if (ret) {
118 mtk_smi_disable(common);
119 return ret;
120 }
121
122 /* Configure the iommu info for this larb */
123 larb_gen->config_port(larbdev);
124
125 return 0;
126} 133}
127EXPORT_SYMBOL_GPL(mtk_smi_larb_get); 134EXPORT_SYMBOL_GPL(mtk_smi_larb_get);
128 135
129void mtk_smi_larb_put(struct device *larbdev) 136void mtk_smi_larb_put(struct device *larbdev)
130{ 137{
131 struct mtk_smi_larb *larb = dev_get_drvdata(larbdev); 138 pm_runtime_put_sync(larbdev);
132 struct mtk_smi *common = dev_get_drvdata(larb->smi_common_dev);
133
134 /*
135 * Don't de-configure the iommu info for this larb since there may be
136 * several modules in this larb.
137 * The iommu info will be reset after power off.
138 */
139
140 mtk_smi_disable(&larb->smi);
141 mtk_smi_disable(common);
142} 139}
143EXPORT_SYMBOL_GPL(mtk_smi_larb_put); 140EXPORT_SYMBOL_GPL(mtk_smi_larb_put);
144 141
@@ -146,39 +143,26 @@ static int
146mtk_smi_larb_bind(struct device *dev, struct device *master, void *data) 143mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
147{ 144{
148 struct mtk_smi_larb *larb = dev_get_drvdata(dev); 145 struct mtk_smi_larb *larb = dev_get_drvdata(dev);
149 struct mtk_smi_iommu *smi_iommu = data; 146 struct mtk_smi_larb_iommu *larb_mmu = data;
150 unsigned int i; 147 unsigned int i;
151 148
152 if (larb->larb_gen->need_larbid) { 149 for (i = 0; i < MTK_LARB_NR_MAX; i++) {
153 larb->mmu = &smi_iommu->larb_imu[larb->larbid].mmu; 150 if (dev == larb_mmu[i].dev) {
154 return 0; 151 larb->larbid = i;
155 } 152 larb->mmu = &larb_mmu[i].mmu;
156
157 /*
158 * If there is no larbid property, Loop to find the corresponding
159 * iommu information.
160 */
161 for (i = 0; i < smi_iommu->larb_nr; i++) {
162 if (dev == smi_iommu->larb_imu[i].dev) {
163 /* The 'mmu' may be updated in iommu-attach/detach. */
164 larb->mmu = &smi_iommu->larb_imu[i].mmu;
165 return 0; 153 return 0;
166 } 154 }
167 } 155 }
168 return -ENODEV; 156 return -ENODEV;
169} 157}
170 158
171static void mtk_smi_larb_config_port_mt2712(struct device *dev) 159static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
172{ 160{
173 struct mtk_smi_larb *larb = dev_get_drvdata(dev); 161 struct mtk_smi_larb *larb = dev_get_drvdata(dev);
174 u32 reg; 162 u32 reg;
175 int i; 163 int i;
176 164
177 /* 165 if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
178 * larb 8/9 is the bdpsys larb, the iommu_en is enabled defaultly.
179 * Don't need to set it again.
180 */
181 if (larb->larbid == 8 || larb->larbid == 9)
182 return; 166 return;
183 167
184 for_each_set_bit(i, (unsigned long *)larb->mmu, 32) { 168 for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
@@ -243,7 +227,6 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
243}; 227};
244 228
245static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = { 229static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
246 .need_larbid = true,
247 .port_in_larb = { 230 .port_in_larb = {
248 LARB0_PORT_OFFSET, LARB1_PORT_OFFSET, 231 LARB0_PORT_OFFSET, LARB1_PORT_OFFSET,
249 LARB2_PORT_OFFSET, LARB3_PORT_OFFSET 232 LARB2_PORT_OFFSET, LARB3_PORT_OFFSET
@@ -252,8 +235,15 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
252}; 235};
253 236
254static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = { 237static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = {
255 .need_larbid = true, 238 .config_port = mtk_smi_larb_config_port_gen2_general,
256 .config_port = mtk_smi_larb_config_port_mt2712, 239 .larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */
240};
241
242static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
243 .has_gals = true,
244 .config_port = mtk_smi_larb_config_port_gen2_general,
245 .larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7),
246 /* IPU0 | IPU1 | CCU */
257}; 247};
258 248
259static const struct of_device_id mtk_smi_larb_of_ids[] = { 249static const struct of_device_id mtk_smi_larb_of_ids[] = {
@@ -269,6 +259,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
269 .compatible = "mediatek,mt2712-smi-larb", 259 .compatible = "mediatek,mt2712-smi-larb",
270 .data = &mtk_smi_larb_mt2712 260 .data = &mtk_smi_larb_mt2712
271 }, 261 },
262 {
263 .compatible = "mediatek,mt8183-smi-larb",
264 .data = &mtk_smi_larb_mt8183
265 },
272 {} 266 {}
273}; 267};
274 268
@@ -279,7 +273,6 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
279 struct device *dev = &pdev->dev; 273 struct device *dev = &pdev->dev;
280 struct device_node *smi_node; 274 struct device_node *smi_node;
281 struct platform_device *smi_pdev; 275 struct platform_device *smi_pdev;
282 int err;
283 276
284 larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL); 277 larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
285 if (!larb) 278 if (!larb)
@@ -298,16 +291,16 @@ static int mtk_smi_larb_probe(struct platform_device *pdev)
298 larb->smi.clk_smi = devm_clk_get(dev, "smi"); 291 larb->smi.clk_smi = devm_clk_get(dev, "smi");
299 if (IS_ERR(larb->smi.clk_smi)) 292 if (IS_ERR(larb->smi.clk_smi))
300 return PTR_ERR(larb->smi.clk_smi); 293 return PTR_ERR(larb->smi.clk_smi);
301 larb->smi.dev = dev;
302 294
303 if (larb->larb_gen->need_larbid) { 295 if (larb->larb_gen->has_gals) {
304 err = of_property_read_u32(dev->of_node, "mediatek,larb-id", 296 /* The larbs may still haven't gals even if the SoC support.*/
305 &larb->larbid); 297 larb->smi.clk_gals0 = devm_clk_get(dev, "gals");
306 if (err) { 298 if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT)
307 dev_err(dev, "missing larbid property\n"); 299 larb->smi.clk_gals0 = NULL;
308 return err; 300 else if (IS_ERR(larb->smi.clk_gals0))
309 } 301 return PTR_ERR(larb->smi.clk_gals0);
310 } 302 }
303 larb->smi.dev = dev;
311 304
312 smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0); 305 smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
313 if (!smi_node) 306 if (!smi_node)
@@ -336,27 +329,86 @@ static int mtk_smi_larb_remove(struct platform_device *pdev)
336 return 0; 329 return 0;
337} 330}
338 331
332static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
333{
334 struct mtk_smi_larb *larb = dev_get_drvdata(dev);
335 const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
336 int ret;
337
338 /* Power on smi-common. */
339 ret = pm_runtime_get_sync(larb->smi_common_dev);
340 if (ret < 0) {
341 dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
342 return ret;
343 }
344
345 ret = mtk_smi_clk_enable(&larb->smi);
346 if (ret < 0) {
347 dev_err(dev, "Failed to enable clock(%d).\n", ret);
348 pm_runtime_put_sync(larb->smi_common_dev);
349 return ret;
350 }
351
352 /* Configure the basic setting for this larb */
353 larb_gen->config_port(dev);
354
355 return 0;
356}
357
358static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
359{
360 struct mtk_smi_larb *larb = dev_get_drvdata(dev);
361
362 mtk_smi_clk_disable(&larb->smi);
363 pm_runtime_put_sync(larb->smi_common_dev);
364 return 0;
365}
366
367static const struct dev_pm_ops smi_larb_pm_ops = {
368 SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
369};
370
339static struct platform_driver mtk_smi_larb_driver = { 371static struct platform_driver mtk_smi_larb_driver = {
340 .probe = mtk_smi_larb_probe, 372 .probe = mtk_smi_larb_probe,
341 .remove = mtk_smi_larb_remove, 373 .remove = mtk_smi_larb_remove,
342 .driver = { 374 .driver = {
343 .name = "mtk-smi-larb", 375 .name = "mtk-smi-larb",
344 .of_match_table = mtk_smi_larb_of_ids, 376 .of_match_table = mtk_smi_larb_of_ids,
377 .pm = &smi_larb_pm_ops,
345 } 378 }
346}; 379};
347 380
381static const struct mtk_smi_common_plat mtk_smi_common_gen1 = {
382 .gen = MTK_SMI_GEN1,
383};
384
385static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
386 .gen = MTK_SMI_GEN2,
387};
388
389static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
390 .gen = MTK_SMI_GEN2,
391 .has_gals = true,
392 .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
393 F_MMU1_LARB(7),
394};
395
348static const struct of_device_id mtk_smi_common_of_ids[] = { 396static const struct of_device_id mtk_smi_common_of_ids[] = {
349 { 397 {
350 .compatible = "mediatek,mt8173-smi-common", 398 .compatible = "mediatek,mt8173-smi-common",
351 .data = (void *)MTK_SMI_GEN2 399 .data = &mtk_smi_common_gen2,
352 }, 400 },
353 { 401 {
354 .compatible = "mediatek,mt2701-smi-common", 402 .compatible = "mediatek,mt2701-smi-common",
355 .data = (void *)MTK_SMI_GEN1 403 .data = &mtk_smi_common_gen1,
356 }, 404 },
357 { 405 {
358 .compatible = "mediatek,mt2712-smi-common", 406 .compatible = "mediatek,mt2712-smi-common",
359 .data = (void *)MTK_SMI_GEN2 407 .data = &mtk_smi_common_gen2,
408 },
409 {
410 .compatible = "mediatek,mt8183-smi-common",
411 .data = &mtk_smi_common_mt8183,
360 }, 412 },
361 {} 413 {}
362}; 414};
@@ -366,13 +418,13 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
366 struct device *dev = &pdev->dev; 418 struct device *dev = &pdev->dev;
367 struct mtk_smi *common; 419 struct mtk_smi *common;
368 struct resource *res; 420 struct resource *res;
369 enum mtk_smi_gen smi_gen;
370 int ret; 421 int ret;
371 422
372 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); 423 common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
373 if (!common) 424 if (!common)
374 return -ENOMEM; 425 return -ENOMEM;
375 common->dev = dev; 426 common->dev = dev;
427 common->plat = of_device_get_match_data(dev);
376 428
377 common->clk_apb = devm_clk_get(dev, "apb"); 429 common->clk_apb = devm_clk_get(dev, "apb");
378 if (IS_ERR(common->clk_apb)) 430 if (IS_ERR(common->clk_apb))
@@ -382,14 +434,23 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
382 if (IS_ERR(common->clk_smi)) 434 if (IS_ERR(common->clk_smi))
383 return PTR_ERR(common->clk_smi); 435 return PTR_ERR(common->clk_smi);
384 436
437 if (common->plat->has_gals) {
438 common->clk_gals0 = devm_clk_get(dev, "gals0");
439 if (IS_ERR(common->clk_gals0))
440 return PTR_ERR(common->clk_gals0);
441
442 common->clk_gals1 = devm_clk_get(dev, "gals1");
443 if (IS_ERR(common->clk_gals1))
444 return PTR_ERR(common->clk_gals1);
445 }
446
385 /* 447 /*
386 * for mtk smi gen 1, we need to get the ao(always on) base to config 448 * for mtk smi gen 1, we need to get the ao(always on) base to config
387 * m4u port, and we need to enable the aync clock for transform the smi 449 * m4u port, and we need to enable the aync clock for transform the smi
388 * clock into emi clock domain, but for mtk smi gen2, there's no smi ao 450 * clock into emi clock domain, but for mtk smi gen2, there's no smi ao
389 * base. 451 * base.
390 */ 452 */
391 smi_gen = (enum mtk_smi_gen)of_device_get_match_data(dev); 453 if (common->plat->gen == MTK_SMI_GEN1) {
392 if (smi_gen == MTK_SMI_GEN1) {
393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 454 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
394 common->smi_ao_base = devm_ioremap_resource(dev, res); 455 common->smi_ao_base = devm_ioremap_resource(dev, res);
395 if (IS_ERR(common->smi_ao_base)) 456 if (IS_ERR(common->smi_ao_base))
@@ -402,6 +463,11 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
402 ret = clk_prepare_enable(common->clk_async); 463 ret = clk_prepare_enable(common->clk_async);
403 if (ret) 464 if (ret)
404 return ret; 465 return ret;
466 } else {
467 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
468 common->base = devm_ioremap_resource(dev, res);
469 if (IS_ERR(common->base))
470 return PTR_ERR(common->base);
405 } 471 }
406 pm_runtime_enable(dev); 472 pm_runtime_enable(dev);
407 platform_set_drvdata(pdev, common); 473 platform_set_drvdata(pdev, common);
@@ -414,12 +480,42 @@ static int mtk_smi_common_remove(struct platform_device *pdev)
414 return 0; 480 return 0;
415} 481}
416 482
483static int __maybe_unused mtk_smi_common_resume(struct device *dev)
484{
485 struct mtk_smi *common = dev_get_drvdata(dev);
486 u32 bus_sel = common->plat->bus_sel;
487 int ret;
488
489 ret = mtk_smi_clk_enable(common);
490 if (ret) {
491 dev_err(common->dev, "Failed to enable clock(%d).\n", ret);
492 return ret;
493 }
494
495 if (common->plat->gen == MTK_SMI_GEN2 && bus_sel)
496 writel(bus_sel, common->base + SMI_BUS_SEL);
497 return 0;
498}
499
500static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
501{
502 struct mtk_smi *common = dev_get_drvdata(dev);
503
504 mtk_smi_clk_disable(common);
505 return 0;
506}
507
508static const struct dev_pm_ops smi_common_pm_ops = {
509 SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
510};
511
417static struct platform_driver mtk_smi_common_driver = { 512static struct platform_driver mtk_smi_common_driver = {
418 .probe = mtk_smi_common_probe, 513 .probe = mtk_smi_common_probe,
419 .remove = mtk_smi_common_remove, 514 .remove = mtk_smi_common_remove,
420 .driver = { 515 .driver = {
421 .name = "mtk-smi-common", 516 .name = "mtk-smi-common",
422 .of_match_table = mtk_smi_common_of_ids, 517 .of_match_table = mtk_smi_common_of_ids,
518 .pm = &smi_common_pm_ops,
423 } 519 }
424}; 520};
425 521
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 601cefb5c9d8..050478cabc95 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client)
729 return 0; 729 return 0;
730} 730}
731 731
732static int rk8xx_suspend(struct device *dev) 732static int __maybe_unused rk8xx_suspend(struct device *dev)
733{ 733{
734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
735 int ret = 0; 735 int ret = 0;
@@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev)
749 return ret; 749 return ret;
750} 750}
751 751
752static int rk8xx_resume(struct device *dev) 752static int __maybe_unused rk8xx_resume(struct device *dev)
753{ 753{
754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
755 int ret = 0; 755 int ret = 0;
@@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev)
768 768
769 return ret; 769 return ret;
770} 770}
771SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); 771static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
772 772
773static struct i2c_driver rk808_i2c_driver = { 773static struct i2c_driver rk808_i2c_driver = {
774 .driver = { 774 .driver = {
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 1606658b9b7e..24245ccdba72 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -22,7 +22,7 @@ struct lkdtm_list {
22 * recurse past the end of THREAD_SIZE by default. 22 * recurse past the end of THREAD_SIZE by default.
23 */ 23 */
24#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) 24#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
25#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2) 25#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
26#else 26#else
27#define REC_STACK_SIZE (THREAD_SIZE / 8) 27#define REC_STACK_SIZE (THREAD_SIZE / 8)
28#endif 28#endif
@@ -91,7 +91,7 @@ void lkdtm_LOOP(void)
91 91
92void lkdtm_EXHAUST_STACK(void) 92void lkdtm_EXHAUST_STACK(void)
93{ 93{
94 pr_info("Calling function with %d frame size to depth %d ...\n", 94 pr_info("Calling function with %lu frame size to depth %d ...\n",
95 REC_STACK_SIZE, recur_count); 95 REC_STACK_SIZE, recur_count);
96 recursive_loop(recur_count); 96 recursive_loop(recur_count);
97 pr_info("FAIL: survived without exhausting stack?!\n"); 97 pr_info("FAIL: survived without exhausting stack?!\n");
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 6c0173772162..77f7dff7098d 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,8 @@
81 81
82#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ 82#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
83 83
84#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
85
84#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */ 86#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
85#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */ 87#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
86 88
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 57cb68f5cc64..541538eff8b1 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,6 +98,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
98 98
99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, 99 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
100 100
101 {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
102
101 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)}, 103 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
102 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)}, 104 {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
103 105
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 8840299420e0..5e6be1527571 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -691,7 +691,6 @@ static int vmballoon_alloc_page_list(struct vmballoon *b,
691 } 691 }
692 692
693 if (page) { 693 if (page) {
694 vmballoon_mark_page_offline(page, ctl->page_size);
695 /* Success. Add the page to the list and continue. */ 694 /* Success. Add the page to the list and continue. */
696 list_add(&page->lru, &ctl->pages); 695 list_add(&page->lru, &ctl->pages);
697 continue; 696 continue;
@@ -930,7 +929,6 @@ static void vmballoon_release_page_list(struct list_head *page_list,
930 929
931 list_for_each_entry_safe(page, tmp, page_list, lru) { 930 list_for_each_entry_safe(page, tmp, page_list, lru) {
932 list_del(&page->lru); 931 list_del(&page->lru);
933 vmballoon_mark_page_online(page, page_size);
934 __free_pages(page, vmballoon_page_order(page_size)); 932 __free_pages(page, vmballoon_page_order(page_size));
935 } 933 }
936 934
@@ -1005,6 +1003,7 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
1005 enum vmballoon_page_size_type page_size) 1003 enum vmballoon_page_size_type page_size)
1006{ 1004{
1007 unsigned long flags; 1005 unsigned long flags;
1006 struct page *page;
1008 1007
1009 if (page_size == VMW_BALLOON_4K_PAGE) { 1008 if (page_size == VMW_BALLOON_4K_PAGE) {
1010 balloon_page_list_enqueue(&b->b_dev_info, pages); 1009 balloon_page_list_enqueue(&b->b_dev_info, pages);
@@ -1014,6 +1013,11 @@ static void vmballoon_enqueue_page_list(struct vmballoon *b,
1014 * for the balloon compaction mechanism. 1013 * for the balloon compaction mechanism.
1015 */ 1014 */
1016 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1015 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1016
1017 list_for_each_entry(page, pages, lru) {
1018 vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1019 }
1020
1017 list_splice_init(pages, &b->huge_pages); 1021 list_splice_init(pages, &b->huge_pages);
1018 __count_vm_events(BALLOON_INFLATE, *n_pages * 1022 __count_vm_events(BALLOON_INFLATE, *n_pages *
1019 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE)); 1023 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
@@ -1056,6 +1060,8 @@ static void vmballoon_dequeue_page_list(struct vmballoon *b,
1056 /* 2MB pages */ 1060 /* 2MB pages */
1057 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags); 1061 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1058 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) { 1062 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1063 vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1064
1059 list_move(&page->lru, pages); 1065 list_move(&page->lru, pages);
1060 if (++i == n_req_pages) 1066 if (++i == n_req_pages)
1061 break; 1067 break;
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
index bad89b6e0802..345addd9306d 100644
--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -310,7 +310,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
310 310
311 entry = container_of(resource, struct dbell_entry, resource); 311 entry = container_of(resource, struct dbell_entry, resource);
312 if (entry->run_delayed) { 312 if (entry->run_delayed) {
313 schedule_work(&entry->work); 313 if (!schedule_work(&entry->work))
314 vmci_resource_put(resource);
314 } else { 315 } else {
315 entry->notify_cb(entry->client_data); 316 entry->notify_cb(entry->client_data);
316 vmci_resource_put(resource); 317 vmci_resource_put(resource);
@@ -361,7 +362,8 @@ static void dbell_fire_entries(u32 notify_idx)
361 atomic_read(&dbell->active) == 1) { 362 atomic_read(&dbell->active) == 1) {
362 if (dbell->run_delayed) { 363 if (dbell->run_delayed) {
363 vmci_resource_get(&dbell->resource); 364 vmci_resource_get(&dbell->resource);
364 schedule_work(&dbell->work); 365 if (!schedule_work(&dbell->work))
366 vmci_resource_put(&dbell->resource);
365 } else { 367 } else {
366 dbell->notify_cb(dbell->client_data); 368 dbell->notify_cb(dbell->client_data);
367 } 369 }
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 74e4364bc9fb..09113b9ad679 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
564 if (index == EXT_CSD_SANITIZE_START) 564 if (index == EXT_CSD_SANITIZE_START)
565 cmd.sanitize_busy = true; 565 cmd.sanitize_busy = true;
566 566
567 err = mmc_wait_for_cmd(host, &cmd, 0); 567 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
568 if (err) 568 if (err)
569 goto out; 569 goto out;
570 570
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d681e8aaca83..fe914ff5f5d6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host)
1292 goto err; 1292 goto err;
1293 } 1293 }
1294 1294
1295 /*
1296 * Some SD cards claims an out of spec VDD voltage range. Let's treat
1297 * these bits as being in-valid and especially also bit7.
1298 */
1299 ocr &= ~0x7FFF;
1300
1295 rocr = mmc_select_voltage(host, ocr); 1301 rocr = mmc_select_voltage(host, ocr);
1296 1302
1297 /* 1303 /*
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 163d1cf4367e..44139fceac24 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -369,6 +369,7 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
369 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; 369 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
370 host->mmc_host_ops.hs400_enhanced_strobe = 370 host->mmc_host_ops.hs400_enhanced_strobe =
371 sdhci_cdns_hs400_enhanced_strobe; 371 sdhci_cdns_hs400_enhanced_strobe;
372 sdhci_enable_v4_mode(host);
372 373
373 sdhci_get_of_property(pdev); 374 sdhci_get_of_property(pdev);
374 375
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d4e7e8b7be77..e7d1920729fb 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
357 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 357 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
358 pm_runtime_use_autosuspend(&pdev->dev); 358 pm_runtime_use_autosuspend(&pdev->dev);
359 359
360 /* HS200 is broken at this moment */
361 host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
362
360 ret = sdhci_add_host(host); 363 ret = sdhci_add_host(host);
361 if (ret) 364 if (ret)
362 goto pm_runtime_disable; 365 goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 83a4767ca680..d07b9793380f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -217,10 +217,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
217 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); 217 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
218 u32 div, val, mask; 218 u32 div, val, mask;
219 219
220 div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); 220 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
221 221
222 clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8); 222 div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
223 sdhci_enable_clk(host, clk); 223 div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
224 sdhci_enable_clk(host, div);
224 225
225 /* enable auto gate sdhc_enable_auto_gate */ 226 /* enable auto gate sdhc_enable_auto_gate */
226 val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); 227 val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
@@ -373,6 +374,11 @@ static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host)
373 return 1 << 31; 374 return 1 << 31;
374} 375}
375 376
377static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
378{
379 return 0;
380}
381
376static struct sdhci_ops sdhci_sprd_ops = { 382static struct sdhci_ops sdhci_sprd_ops = {
377 .read_l = sdhci_sprd_readl, 383 .read_l = sdhci_sprd_readl,
378 .write_l = sdhci_sprd_writel, 384 .write_l = sdhci_sprd_writel,
@@ -385,6 +391,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
385 .set_uhs_signaling = sdhci_sprd_set_uhs_signaling, 391 .set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
386 .hw_reset = sdhci_sprd_hw_reset, 392 .hw_reset = sdhci_sprd_hw_reset,
387 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count, 393 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
394 .get_ro = sdhci_sprd_get_ro,
388}; 395};
389 396
390static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) 397static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -501,9 +508,12 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
501} 508}
502 509
503static const struct sdhci_pltfm_data sdhci_sprd_pdata = { 510static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
504 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 511 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
512 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
513 SDHCI_QUIRK_MISSING_CAPS,
505 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | 514 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
506 SDHCI_QUIRK2_USE_32BIT_BLK_CNT, 515 SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
516 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
507 .ops = &sdhci_sprd_ops, 517 .ops = &sdhci_sprd_ops,
508}; 518};
509 519
@@ -605,6 +615,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
605 615
606 sdhci_enable_v4_mode(host); 616 sdhci_enable_v4_mode(host);
607 617
618 /*
619 * Supply the existing CAPS, but clear the UHS-I modes. This
620 * will allow these modes to be specified only by device
621 * tree properties through mmc_of_parse().
622 */
623 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
624 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
625 host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
626 SDHCI_SUPPORT_DDR50);
627
608 ret = sdhci_setup_host(host); 628 ret = sdhci_setup_host(host);
609 if (ret) 629 if (ret)
610 goto pm_runtime_disable; 630 goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f4d4761cf20a..02d8f524bb9e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
258 } 258 }
259} 259}
260 260
261static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
262{
263 /*
264 * Write-enable shall be assumed if GPIO is missing in a board's
265 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
266 * Tegra.
267 */
268 return mmc_gpio_get_ro(host->mmc);
269}
270
261static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) 271static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
262{ 272{
263 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 273 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1224}; 1234};
1225 1235
1226static const struct sdhci_ops tegra_sdhci_ops = { 1236static const struct sdhci_ops tegra_sdhci_ops = {
1237 .get_ro = tegra_sdhci_get_ro,
1227 .read_w = tegra_sdhci_readw, 1238 .read_w = tegra_sdhci_readw,
1228 .write_l = tegra_sdhci_writel, 1239 .write_l = tegra_sdhci_writel,
1229 .set_clock = tegra_sdhci_set_clock, 1240 .set_clock = tegra_sdhci_set_clock,
@@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1279}; 1290};
1280 1291
1281static const struct sdhci_ops tegra114_sdhci_ops = { 1292static const struct sdhci_ops tegra114_sdhci_ops = {
1293 .get_ro = tegra_sdhci_get_ro,
1282 .read_w = tegra_sdhci_readw, 1294 .read_w = tegra_sdhci_readw,
1283 .write_w = tegra_sdhci_writew, 1295 .write_w = tegra_sdhci_writew,
1284 .write_l = tegra_sdhci_writel, 1296 .write_l = tegra_sdhci_writel,
@@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1332}; 1344};
1333 1345
1334static const struct sdhci_ops tegra210_sdhci_ops = { 1346static const struct sdhci_ops tegra210_sdhci_ops = {
1347 .get_ro = tegra_sdhci_get_ro,
1335 .read_w = tegra_sdhci_readw, 1348 .read_w = tegra_sdhci_readw,
1336 .write_w = tegra210_sdhci_writew, 1349 .write_w = tegra210_sdhci_writew,
1337 .write_l = tegra_sdhci_writel, 1350 .write_l = tegra_sdhci_writel,
@@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1366}; 1379};
1367 1380
1368static const struct sdhci_ops tegra186_sdhci_ops = { 1381static const struct sdhci_ops tegra186_sdhci_ops = {
1382 .get_ro = tegra_sdhci_get_ro,
1369 .read_w = tegra_sdhci_readw, 1383 .read_w = tegra_sdhci_readw,
1370 .write_l = tegra_sdhci_writel, 1384 .write_l = tegra_sdhci_writel,
1371 .set_clock = tegra_sdhci_set_clock, 1385 .set_clock = tegra_sdhci_set_clock,
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index b4e3caf7d799..a4d8968d133d 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -1,5 +1,6 @@
1menuconfig MTD_HYPERBUS 1menuconfig MTD_HYPERBUS
2 tristate "HyperBus support" 2 tristate "HyperBus support"
3 depends on HAS_IOMEM
3 select MTD_CFI 4 select MTD_CFI
4 select MTD_MAP_BANK_WIDTH_2 5 select MTD_MAP_BANK_WIDTH_2
5 select MTD_CFI_AMDSTD 6 select MTD_CFI_AMDSTD
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 895510d40ce4..47602af4ee34 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -81,6 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
81 default: 81 default:
82 printk(KERN_WARNING "SA1100 flash: unknown base address " 82 printk(KERN_WARNING "SA1100 flash: unknown base address "
83 "0x%08lx, assuming CS0\n", phys); 83 "0x%08lx, assuming CS0\n", phys);
84 /* Fall through */
84 85
85 case SA1100_CS0_PHYS: 86 case SA1100_CS0_PHYS:
86 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; 87 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 02fd7822c14a..931d9d935686 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1126,6 +1126,8 @@ static void bond_compute_features(struct bonding *bond)
1126done: 1126done:
1127 bond_dev->vlan_features = vlan_features; 1127 bond_dev->vlan_features = vlan_features;
1128 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1128 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1129 NETIF_F_HW_VLAN_CTAG_TX |
1130 NETIF_F_HW_VLAN_STAG_TX |
1129 NETIF_F_GSO_UDP_L4; 1131 NETIF_F_GSO_UDP_L4;
1130 bond_dev->mpls_features = mpls_features; 1132 bond_dev->mpls_features = mpls_features;
1131 bond_dev->gso_max_segs = gso_max_segs; 1133 bond_dev->gso_max_segs = gso_max_segs;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 3811fdbda13e..28c963a21dac 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -478,6 +478,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
478 unsigned long *supported, 478 unsigned long *supported,
479 struct phylink_link_state *state) 479 struct phylink_link_state *state)
480{ 480{
481 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
481 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 482 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
482 483
483 if (!phy_interface_mode_is_rgmii(state->interface) && 484 if (!phy_interface_mode_is_rgmii(state->interface) &&
@@ -487,8 +488,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
487 state->interface != PHY_INTERFACE_MODE_INTERNAL && 488 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
488 state->interface != PHY_INTERFACE_MODE_MOCA) { 489 state->interface != PHY_INTERFACE_MODE_MOCA) {
489 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 490 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
490 dev_err(ds->dev, 491 if (port != core_readl(priv, CORE_IMP0_PRT_ID))
491 "Unsupported interface: %d\n", state->interface); 492 dev_err(ds->dev,
493 "Unsupported interface: %d for port %d\n",
494 state->interface, port);
492 return; 495 return;
493 } 496 }
494 497
@@ -526,6 +529,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
526 u32 id_mode_dis = 0, port_mode; 529 u32 id_mode_dis = 0, port_mode;
527 u32 reg, offset; 530 u32 reg, offset;
528 531
532 if (port == core_readl(priv, CORE_IMP0_PRT_ID))
533 return;
534
529 if (priv->type == BCM7445_DEVICE_ID) 535 if (priv->type == BCM7445_DEVICE_ID)
530 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 536 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
531 else 537 else
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 5a9e27b337a8..098b01e4ed1a 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -81,6 +81,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
81 { .compatible = "microchip,ksz9897" }, 81 { .compatible = "microchip,ksz9897" },
82 { .compatible = "microchip,ksz9893" }, 82 { .compatible = "microchip,ksz9893" },
83 { .compatible = "microchip,ksz9563" }, 83 { .compatible = "microchip,ksz9563" },
84 { .compatible = "microchip,ksz8563" },
84 {}, 85 {},
85}; 86};
86MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); 87MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index ee7096d8af07..72ec250b9540 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -128,6 +128,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
128 128
129#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \ 129#define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \
130 { \ 130 { \
131 .name = #width, \
131 .val_bits = (width), \ 132 .val_bits = (width), \
132 .reg_stride = (width) / 8, \ 133 .reg_stride = (width) / 8, \
133 .reg_bits = (regbits) + (regalign), \ 134 .reg_bits = (regbits) + (regalign), \
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index d073baffc20b..df976b259e43 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1223,12 +1223,8 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1223{ 1223{
1224 struct sja1105_private *priv = ds->priv; 1224 struct sja1105_private *priv = ds->priv;
1225 struct device *dev = ds->dev; 1225 struct device *dev = ds->dev;
1226 u16 rx_vid, tx_vid;
1227 int i; 1226 int i;
1228 1227
1229 rx_vid = dsa_8021q_rx_vid(ds, port);
1230 tx_vid = dsa_8021q_tx_vid(ds, port);
1231
1232 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1228 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1233 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1229 struct sja1105_l2_lookup_entry l2_lookup = {0};
1234 u8 macaddr[ETH_ALEN]; 1230 u8 macaddr[ETH_ALEN];
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index b41f23679a08..7ce9c69e9c44 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void)
469 469
470 ret = xgbe_platform_init(); 470 ret = xgbe_platform_init();
471 if (ret) 471 if (ret)
472 return ret; 472 goto err_platform_init;
473 473
474 ret = xgbe_pci_init(); 474 ret = xgbe_pci_init();
475 if (ret) 475 if (ret)
476 return ret; 476 goto err_pci_init;
477 477
478 return 0; 478 return 0;
479
480err_pci_init:
481 xgbe_platform_exit();
482err_platform_init:
483 unregister_netdevice_notifier(&xgbe_netdev_notifier);
484 return ret;
479} 485}
480 486
481static void __exit xgbe_mod_exit(void) 487static void __exit xgbe_mod_exit(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 440690b18734..aee827f07c16 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) 431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
432 break; 432 break;
433 } 433 }
434 if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { 434 if (rule && rule->type == aq_rx_filter_vlan &&
435 be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
435 struct ethtool_rxnfc cmd; 436 struct ethtool_rxnfc cmd;
436 437
437 cmd.fs.location = rule->aq_fsp.location; 438 cmd.fs.location = rule->aq_fsp.location;
@@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
843 return err; 844 return err;
844 845
845 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { 846 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
846 if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { 847 if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
847 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, 848 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
848 !(aq_nic->packet_filter & IFF_PROMISC)); 849 !(aq_nic->packet_filter & IFF_PROMISC));
849 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false; 850 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 100722ad5c2d..b4a0fb281e69 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev)
61 if (err < 0) 61 if (err < 0)
62 goto err_exit; 62 goto err_exit;
63 63
64 err = aq_filters_vlans_update(aq_nic);
65 if (err < 0)
66 goto err_exit;
67
64 err = aq_nic_start(aq_nic); 68 err = aq_nic_start(aq_nic);
65 if (err < 0) 69 if (err < 0)
66 goto err_exit; 70 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index e1392766e21e..8f66e7817811 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -393,7 +393,7 @@ int aq_nic_start(struct aq_nic_s *self)
393 self->aq_nic_cfg.link_irq_vec); 393 self->aq_nic_cfg.link_irq_vec);
394 err = request_threaded_irq(irqvec, NULL, 394 err = request_threaded_irq(irqvec, NULL,
395 aq_linkstate_threaded_isr, 395 aq_linkstate_threaded_isr,
396 IRQF_SHARED, 396 IRQF_SHARED | IRQF_ONESHOT,
397 self->ndev->name, self); 397 self->ndev->name, self);
398 if (err < 0) 398 if (err < 0)
399 goto err_exit; 399 goto err_exit;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 715685aa48c3..28892b8acd0e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
86 } 86 }
87 } 87 }
88 88
89err_exit:
89 if (!was_tx_cleaned) 90 if (!was_tx_cleaned)
90 work_done = budget; 91 work_done = budget;
91 92
@@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
95 1U << self->aq_ring_param.vec_idx); 96 1U << self->aq_ring_param.vec_idx);
96 } 97 }
97 } 98 }
98err_exit: 99
99 return work_done; 100 return work_done;
100} 101}
101 102
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e47ea92e2ae3..d10b421ed1f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3057 /* if VF indicate to PF this function is going down (PF will delete sp 3057 /* if VF indicate to PF this function is going down (PF will delete sp
3058 * elements and clear initializations 3058 * elements and clear initializations
3059 */ 3059 */
3060 if (IS_VF(bp)) 3060 if (IS_VF(bp)) {
3061 bnx2x_clear_vlan_info(bp);
3061 bnx2x_vfpf_close_vf(bp); 3062 bnx2x_vfpf_close_vf(bp);
3062 else if (unload_mode != UNLOAD_RECOVERY) 3063 } else if (unload_mode != UNLOAD_RECOVERY) {
3063 /* if this is a normal/close unload need to clean up chip*/ 3064 /* if this is a normal/close unload need to clean up chip*/
3064 bnx2x_chip_cleanup(bp, unload_mode, keep_link); 3065 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3065 else { 3066 } else {
3066 /* Send the UNLOAD_REQUEST to the MCP */ 3067 /* Send the UNLOAD_REQUEST to the MCP */
3067 bnx2x_send_unload_req(bp, unload_mode); 3068 bnx2x_send_unload_req(bp, unload_mode);
3068 3069
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c2f6e44e9a3f..8b08cb18e363 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
425void bnx2x_disable_close_the_gate(struct bnx2x *bp); 425void bnx2x_disable_close_the_gate(struct bnx2x *bp);
426int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 426int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
427 427
428void bnx2x_clear_vlan_info(struct bnx2x *bp);
429
428/** 430/**
429 * bnx2x_sp_event - handle ramrods completion. 431 * bnx2x_sp_event - handle ramrods completion.
430 * 432 *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2cc14db8f0ec..192ff8d5da32 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8482 return rc; 8482 return rc;
8483} 8483}
8484 8484
8485void bnx2x_clear_vlan_info(struct bnx2x *bp)
8486{
8487 struct bnx2x_vlan_entry *vlan;
8488
8489 /* Mark that hw forgot all entries */
8490 list_for_each_entry(vlan, &bp->vlan_reg, link)
8491 vlan->hw = false;
8492
8493 bp->vlan_cnt = 0;
8494}
8495
8485static int bnx2x_del_all_vlans(struct bnx2x *bp) 8496static int bnx2x_del_all_vlans(struct bnx2x *bp)
8486{ 8497{
8487 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; 8498 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8488 unsigned long ramrod_flags = 0, vlan_flags = 0; 8499 unsigned long ramrod_flags = 0, vlan_flags = 0;
8489 struct bnx2x_vlan_entry *vlan;
8490 int rc; 8500 int rc;
8491 8501
8492 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8502 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
8495 if (rc) 8505 if (rc)
8496 return rc; 8506 return rc;
8497 8507
8498 /* Mark that hw forgot all entries */ 8508 bnx2x_clear_vlan_info(bp);
8499 list_for_each_entry(vlan, &bp->vlan_reg, link)
8500 vlan->hw = false;
8501 bp->vlan_cnt = 0;
8502 8509
8503 return 0; 8510 return 0;
8504} 8511}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7070349915bc..8dce4069472b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2021,9 +2021,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2021 if (bnapi->events & BNXT_RX_EVENT) { 2021 if (bnapi->events & BNXT_RX_EVENT) {
2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2023 2023
2024 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2025 if (bnapi->events & BNXT_AGG_EVENT) 2024 if (bnapi->events & BNXT_AGG_EVENT)
2026 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2025 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2026 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2027 } 2027 }
2028 bnapi->events = 0; 2028 bnapi->events = 0;
2029} 2029}
@@ -5064,6 +5064,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5064 5064
5065static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5065static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5066{ 5066{
5067 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5067 int i, rc = 0; 5068 int i, rc = 0;
5068 u32 type; 5069 u32 type;
5069 5070
@@ -5139,7 +5140,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5139 if (rc) 5140 if (rc)
5140 goto err_out; 5141 goto err_out;
5141 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5142 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5142 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5143 /* If we have agg rings, post agg buffers first. */
5144 if (!agg_rings)
5145 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5143 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5146 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5144 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5147 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5145 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5148 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -5158,7 +5161,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5158 } 5161 }
5159 } 5162 }
5160 5163
5161 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5164 if (agg_rings) {
5162 type = HWRM_RING_ALLOC_AGG; 5165 type = HWRM_RING_ALLOC_AGG;
5163 for (i = 0; i < bp->rx_nr_rings; i++) { 5166 for (i = 0; i < bp->rx_nr_rings; i++) {
5164 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5167 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -5174,6 +5177,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5174 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5177 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5175 ring->fw_ring_id); 5178 ring->fw_ring_id);
5176 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5179 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5180 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5177 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5181 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5178 } 5182 }
5179 } 5183 }
@@ -7016,19 +7020,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7016 bnxt_hwrm_vnic_set_rss(bp, i, false); 7020 bnxt_hwrm_vnic_set_rss(bp, i, false);
7017} 7021}
7018 7022
7019static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7023static void bnxt_clear_vnic(struct bnxt *bp)
7020 bool irq_re_init)
7021{ 7024{
7022 if (bp->vnic_info) { 7025 if (!bp->vnic_info)
7023 bnxt_hwrm_clear_vnic_filter(bp); 7026 return;
7027
7028 bnxt_hwrm_clear_vnic_filter(bp);
7029 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7024 /* clear all RSS setting before free vnic ctx */ 7030 /* clear all RSS setting before free vnic ctx */
7025 bnxt_hwrm_clear_vnic_rss(bp); 7031 bnxt_hwrm_clear_vnic_rss(bp);
7026 bnxt_hwrm_vnic_ctx_free(bp); 7032 bnxt_hwrm_vnic_ctx_free(bp);
7027 /* before free the vnic, undo the vnic tpa settings */
7028 if (bp->flags & BNXT_FLAG_TPA)
7029 bnxt_set_tpa(bp, false);
7030 bnxt_hwrm_vnic_free(bp);
7031 } 7033 }
7034 /* before free the vnic, undo the vnic tpa settings */
7035 if (bp->flags & BNXT_FLAG_TPA)
7036 bnxt_set_tpa(bp, false);
7037 bnxt_hwrm_vnic_free(bp);
7038 if (bp->flags & BNXT_FLAG_CHIP_P5)
7039 bnxt_hwrm_vnic_ctx_free(bp);
7040}
7041
7042static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7043 bool irq_re_init)
7044{
7045 bnxt_clear_vnic(bp);
7032 bnxt_hwrm_ring_free(bp, close_path); 7046 bnxt_hwrm_ring_free(bp, close_path);
7033 bnxt_hwrm_ring_grp_free(bp); 7047 bnxt_hwrm_ring_grp_free(bp);
7034 if (irq_re_init) { 7048 if (irq_re_init) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 549c90d3e465..c05d663212b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
98 if (idx) 98 if (idx)
99 req->dimensions = cpu_to_le16(1); 99 req->dimensions = cpu_to_le16(1);
100 100
101 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) 101 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
102 memcpy(data_addr, buf, bytesize); 102 memcpy(data_addr, buf, bytesize);
103 103 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
104 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); 104 } else {
105 rc = hwrm_send_message_silent(bp, msg, msg_len,
106 HWRM_CMD_TIMEOUT);
107 }
105 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) 108 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
106 memcpy(buf, data_addr, bytesize); 109 memcpy(buf, data_addr, bytesize);
107 110
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index c7ee63d69679..8445a0cce849 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
2016 mutex_lock(&bp->hwrm_cmd_lock); 2016 mutex_lock(&bp->hwrm_cmd_lock);
2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), 2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
2018 INSTALL_PACKAGE_TIMEOUT); 2018 INSTALL_PACKAGE_TIMEOUT);
2019 if (hwrm_err) 2019 if (hwrm_err) {
2020 goto flash_pkg_exit;
2021
2022 if (resp->error_code) {
2023 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; 2020 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
2024 2021
2025 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 2022 if (resp->error_code && error_code ==
2023 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
2026 install.flags |= cpu_to_le16( 2024 install.flags |= cpu_to_le16(
2027 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 2025 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2028 hwrm_err = _hwrm_send_message(bp, &install, 2026 hwrm_err = _hwrm_send_message(bp, &install,
2029 sizeof(install), 2027 sizeof(install),
2030 INSTALL_PACKAGE_TIMEOUT); 2028 INSTALL_PACKAGE_TIMEOUT);
2031 if (hwrm_err)
2032 goto flash_pkg_exit;
2033 } 2029 }
2030 if (hwrm_err)
2031 goto flash_pkg_exit;
2034 } 2032 }
2035 2033
2036 if (resp->result) { 2034 if (resp->result) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 6fe4a7174271..dd621f6bd127 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
1236static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, 1236static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1237 u16 src_fid) 1237 u16 src_fid)
1238{ 1238{
1239 flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; 1239 flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1240} 1240}
1241 1241
1242static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, 1242static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
@@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1285 goto free_node; 1285 goto free_node;
1286 1286
1287 bnxt_tc_set_src_fid(bp, flow, src_fid); 1287 bnxt_tc_set_src_fid(bp, flow, src_fid);
1288 1288 bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
1289 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
1290 bnxt_tc_set_flow_dir(bp, flow, src_fid);
1291 1289
1292 if (!bnxt_tc_can_offload(bp, flow)) { 1290 if (!bnxt_tc_can_offload(bp, flow)) {
1293 rc = -EOPNOTSUPP; 1291 rc = -EOPNOTSUPP;
@@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1407 * 2. 15th bit of flow_handle must specify the flow 1405 * 2. 15th bit of flow_handle must specify the flow
1408 * direction (TX/RX). 1406 * direction (TX/RX).
1409 */ 1407 */
1410 if (flow_node->flow.dir == BNXT_DIR_RX) 1408 if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
1411 handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | 1409 handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1412 CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; 1410 CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1413 else 1411 else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index ffec57d1a5ec..4f05305052f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -23,6 +23,9 @@ struct bnxt_tc_l2_key {
23 __be16 inner_vlan_tci; 23 __be16 inner_vlan_tci;
24 __be16 ether_type; 24 __be16 ether_type;
25 u8 num_vlans; 25 u8 num_vlans;
26 u8 dir;
27#define BNXT_DIR_RX 1
28#define BNXT_DIR_TX 0
26}; 29};
27 30
28struct bnxt_tc_l3_key { 31struct bnxt_tc_l3_key {
@@ -98,9 +101,6 @@ struct bnxt_tc_flow {
98 101
99 /* flow applicable to pkts ingressing on this fid */ 102 /* flow applicable to pkts ingressing on this fid */
100 u16 src_fid; 103 u16 src_fid;
101 u8 dir;
102#define BNXT_DIR_RX 1
103#define BNXT_DIR_TX 0
104 struct bnxt_tc_l2_key l2_key; 104 struct bnxt_tc_l2_key l2_key;
105 struct bnxt_tc_l2_key l2_mask; 105 struct bnxt_tc_l2_key l2_mask;
106 struct bnxt_tc_l3_key l3_key; 106 struct bnxt_tc_l3_key l3_key;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index d3a0b614dbfa..b22196880d6d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1124,6 +1124,7 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
1124 .set_coalesce = bcmgenet_set_coalesce, 1124 .set_coalesce = bcmgenet_set_coalesce,
1125 .get_link_ksettings = bcmgenet_get_link_ksettings, 1125 .get_link_ksettings = bcmgenet_get_link_ksettings,
1126 .set_link_ksettings = bcmgenet_set_link_ksettings, 1126 .set_link_ksettings = bcmgenet_set_link_ksettings,
1127 .get_ts_info = ethtool_op_get_ts_info,
1127}; 1128};
1128 1129
1129/* Power down the unimac, based on mode. */ 1130/* Power down the unimac, based on mode. */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 5ca17e62dc3e..35b59b5edf0f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4154,7 +4154,7 @@ static const struct of_device_id macb_dt_ids[] = {
4154 { .compatible = "cdns,emac", .data = &emac_config }, 4154 { .compatible = "cdns,emac", .data = &emac_config },
4155 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4155 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4156 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4156 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4157 { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, 4157 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4158 { /* sentinel */ } 4158 { /* sentinel */ }
4159}; 4159};
4160MODULE_DEVICE_TABLE(of, macb_dt_ids); 4160MODULE_DEVICE_TABLE(of, macb_dt_ids);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 73632b843749..b821c9e1604c 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -10,7 +10,7 @@
10 10
11#include "cavium_ptp.h" 11#include "cavium_ptp.h"
12 12
13#define DRV_NAME "Cavium PTP Driver" 13#define DRV_NAME "cavium_ptp"
14 14
15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C 15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
16#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E 16#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 032224178b64..6dd65f9b347c 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -237,8 +237,10 @@ int octeon_setup_iq(struct octeon_device *oct,
237 } 237 }
238 238
239 oct->num_iqs++; 239 oct->num_iqs++;
240 if (oct->fn_list.enable_io_queues(oct)) 240 if (oct->fn_list.enable_io_queues(oct)) {
241 octeon_delete_instr_queue(oct, iq_no);
241 return 1; 242 return 1;
243 }
242 244
243 return 0; 245 return 0;
244} 246}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 02959035ed3f..d692251ee252 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
3236 return -ENOMEM; 3236 return -ENOMEM;
3237 3237
3238 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); 3238 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
3239 if (err) 3239 if (err) {
3240 kvfree(t);
3240 return err; 3241 return err;
3242 }
3241 3243
3242 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); 3244 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
3243 kvfree(t); 3245 kvfree(t);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index 133acca0bf31..092da2d90026 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -167,7 +167,7 @@ struct nps_enet_priv {
167}; 167};
168 168
169/** 169/**
170 * nps_reg_set - Sets ENET register with provided value. 170 * nps_enet_reg_set - Sets ENET register with provided value.
171 * @priv: Pointer to EZchip ENET private data structure. 171 * @priv: Pointer to EZchip ENET private data structure.
172 * @reg: Register offset from base address. 172 * @reg: Register offset from base address.
173 * @value: Value to set in register. 173 * @value: Value to set in register.
@@ -179,7 +179,7 @@ static inline void nps_enet_reg_set(struct nps_enet_priv *priv,
179} 179}
180 180
181/** 181/**
182 * nps_reg_get - Gets value of specified ENET register. 182 * nps_enet_reg_get - Gets value of specified ENET register.
183 * @priv: Pointer to EZchip ENET private data structure. 183 * @priv: Pointer to EZchip ENET private data structure.
184 * @reg: Register offset from base address. 184 * @reg: Register offset from base address.
185 * 185 *
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 2fd2586e42bf..bc594892507a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -82,7 +82,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
82 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); 82 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
83 if (n != 1) { 83 if (n != 1) {
84 err = -EPERM; 84 err = -EPERM;
85 goto err_irq; 85 goto err_irq_vectors;
86 } 86 }
87 87
88 ptp_qoriq->irq = pci_irq_vector(pdev, 0); 88 ptp_qoriq->irq = pci_irq_vector(pdev, 0);
@@ -107,6 +107,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
107err_no_clock: 107err_no_clock:
108 free_irq(ptp_qoriq->irq, ptp_qoriq); 108 free_irq(ptp_qoriq->irq, ptp_qoriq);
109err_irq: 109err_irq:
110 pci_free_irq_vectors(pdev);
111err_irq_vectors:
110 iounmap(base); 112 iounmap(base);
111err_ioremap: 113err_ioremap:
112 kfree(ptp_qoriq); 114 kfree(ptp_qoriq);
@@ -125,6 +127,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev)
125 127
126 enetc_phc_index = -1; 128 enetc_phc_index = -1;
127 ptp_qoriq_free(ptp_qoriq); 129 ptp_qoriq_free(ptp_qoriq);
130 pci_free_irq_vectors(pdev);
128 kfree(ptp_qoriq); 131 kfree(ptp_qoriq);
129 132
130 pci_release_mem_regions(pdev); 133 pci_release_mem_regions(pdev);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 497298752381..aca95f64bde8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
50 u64_stats_fetch_begin(&priv->tx[ring].statss); 50 u64_stats_fetch_begin(&priv->tx[ring].statss);
51 s->tx_packets += priv->tx[ring].pkt_done; 51 s->tx_packets += priv->tx[ring].pkt_done;
52 s->tx_bytes += priv->tx[ring].bytes_done; 52 s->tx_bytes += priv->tx[ring].bytes_done;
53 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 53 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54 start)); 54 start));
55 } 55 }
56 } 56 }
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index d654c234aaf7..c5be4ebd8437 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1605 struct net_device *netdev; 1605 struct net_device *netdev;
1606 struct ibmveth_adapter *adapter; 1606 struct ibmveth_adapter *adapter;
1607 unsigned char *mac_addr_p; 1607 unsigned char *mac_addr_p;
1608 unsigned int *mcastFilterSize_p; 1608 __be32 *mcastFilterSize_p;
1609 long ret; 1609 long ret;
1610 unsigned long ret_attr; 1610 unsigned long ret_attr;
1611 1611
@@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1627 return -EINVAL; 1627 return -EINVAL;
1628 } 1628 }
1629 1629
1630 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, 1630 mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1631 VETH_MCAST_FILTER_SIZE, NULL); 1631 VETH_MCAST_FILTER_SIZE,
1632 NULL);
1632 if (!mcastFilterSize_p) { 1633 if (!mcastFilterSize_p) {
1633 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " 1634 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1634 "attribute\n"); 1635 "attribute\n");
@@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1645 1646
1646 adapter->vdev = dev; 1647 adapter->vdev = dev;
1647 adapter->netdev = netdev; 1648 adapter->netdev = netdev;
1648 adapter->mcastFilterSize = *mcastFilterSize_p; 1649 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1649 adapter->pool_config = 0; 1650 adapter->pool_config = 0;
1650 1651
1651 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1652 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3da680073265..fa4bb940665c 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], 1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569 (u64)tx_buff->indir_dma, 1569 (u64)tx_buff->indir_dma,
1570 (u64)num_entries); 1570 (u64)num_entries);
1571 dma_unmap_single(dev, tx_buff->indir_dma,
1572 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1571 } else { 1573 } else {
1572 tx_buff->num_entries = num_entries; 1574 tx_buff->num_entries = num_entries;
1573 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1575 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
@@ -1981,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work)
1981 1983
1982 rwi = get_next_rwi(adapter); 1984 rwi = get_next_rwi(adapter);
1983 while (rwi) { 1985 while (rwi) {
1986 if (adapter->state == VNIC_REMOVING ||
1987 adapter->state == VNIC_REMOVED)
1988 goto out;
1989
1984 if (adapter->force_reset_recovery) { 1990 if (adapter->force_reset_recovery) {
1985 adapter->force_reset_recovery = false; 1991 adapter->force_reset_recovery = false;
1986 rc = do_hard_reset(adapter, rwi, reset_state); 1992 rc = do_hard_reset(adapter, rwi, reset_state);
@@ -2005,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work)
2005 netdev_dbg(adapter->netdev, "Reset failed\n"); 2011 netdev_dbg(adapter->netdev, "Reset failed\n");
2006 free_all_rwi(adapter); 2012 free_all_rwi(adapter);
2007 } 2013 }
2008 2014out:
2009 adapter->resetting = false; 2015 adapter->resetting = false;
2010 if (we_lock_rtnl) 2016 if (we_lock_rtnl)
2011 rtnl_unlock(); 2017 rtnl_unlock();
@@ -2788,7 +2794,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2788 union sub_crq *next; 2794 union sub_crq *next;
2789 int index; 2795 int index;
2790 int i, j; 2796 int i, j;
2791 u8 *first;
2792 2797
2793restart_loop: 2798restart_loop:
2794 while (pending_scrq(adapter, scrq)) { 2799 while (pending_scrq(adapter, scrq)) {
@@ -2818,14 +2823,6 @@ restart_loop:
2818 2823
2819 txbuff->data_dma[j] = 0; 2824 txbuff->data_dma[j] = 0;
2820 } 2825 }
2821 /* if sub_crq was sent indirectly */
2822 first = &txbuff->indir_arr[0].generic.first;
2823 if (*first == IBMVNIC_CRQ_CMD) {
2824 dma_unmap_single(dev, txbuff->indir_dma,
2825 sizeof(txbuff->indir_arr),
2826 DMA_TO_DEVICE);
2827 *first = 0;
2828 }
2829 2826
2830 if (txbuff->last_frag) { 2827 if (txbuff->last_frag) {
2831 dev_kfree_skb_any(txbuff->skb); 2828 dev_kfree_skb_any(txbuff->skb);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cbaf712d6529..7882148abb43 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7897,11 +7897,8 @@ static void ixgbe_service_task(struct work_struct *work)
7897 return; 7897 return;
7898 } 7898 }
7899 if (ixgbe_check_fw_error(adapter)) { 7899 if (ixgbe_check_fw_error(adapter)) {
7900 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 7900 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7901 rtnl_lock();
7902 unregister_netdev(adapter->netdev); 7901 unregister_netdev(adapter->netdev);
7903 rtnl_unlock();
7904 }
7905 ixgbe_service_event_complete(adapter); 7902 ixgbe_service_event_complete(adapter);
7906 return; 7903 return;
7907 } 7904 }
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index a01c75ede871..e0363870f3a5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4931,6 +4931,13 @@ static const struct dmi_system_id msi_blacklist[] = {
4931 DMI_MATCH(DMI_BOARD_NAME, "P6T"), 4931 DMI_MATCH(DMI_BOARD_NAME, "P6T"),
4932 }, 4932 },
4933 }, 4933 },
4934 {
4935 .ident = "ASUS P6X",
4936 .matches = {
4937 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
4938 DMI_MATCH(DMI_BOARD_NAME, "P6X"),
4939 },
4940 },
4934 {} 4941 {}
4935}; 4942};
4936 4943
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 6c01314e87b0..db3552f2d087 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1187 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); 1187 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1188 if (err) { 1188 if (err) {
1189 en_err(priv, "Failed to allocate RSS indirection QP\n"); 1189 en_err(priv, "Failed to allocate RSS indirection QP\n");
1190 goto rss_err; 1190 goto qp_alloc_err;
1191 } 1191 }
1192 1192
1193 rss_map->indir_qp->event = mlx4_en_sqp_event; 1193 rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1241,6 +1241,7 @@ indir_err:
1241 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); 1241 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
1242 mlx4_qp_remove(mdev->dev, rss_map->indir_qp); 1242 mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1243 mlx4_qp_free(mdev->dev, rss_map->indir_qp); 1243 mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1244qp_alloc_err:
1244 kfree(rss_map->indir_qp); 1245 kfree(rss_map->indir_qp);
1245 rss_map->indir_qp = NULL; 1246 rss_map->indir_qp = NULL;
1246rss_err: 1247rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ce1be2a84231..65bec19a438f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -184,8 +184,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
184 184
185struct mlx5e_tx_wqe { 185struct mlx5e_tx_wqe {
186 struct mlx5_wqe_ctrl_seg ctrl; 186 struct mlx5_wqe_ctrl_seg ctrl;
187 struct mlx5_wqe_eth_seg eth; 187 union {
188 struct mlx5_wqe_data_seg data[0]; 188 struct {
189 struct mlx5_wqe_eth_seg eth;
190 struct mlx5_wqe_data_seg data[0];
191 };
192 u8 tls_progress_params_ctx[0];
193 };
189}; 194};
190 195
191struct mlx5e_rx_wqe_ll { 196struct mlx5e_rx_wqe_ll {
@@ -1100,6 +1105,8 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1100u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); 1105u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1101int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, 1106int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1102 struct ethtool_ts_info *info); 1107 struct ethtool_ts_info *info);
1108int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1109 struct ethtool_flash *flash);
1103void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, 1110void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1104 struct ethtool_pauseparam *pauseparam); 1111 struct ethtool_pauseparam *pauseparam);
1105int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, 1112int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index f3d98748b211..c7f86453c638 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -76,26 +76,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
76 u8 state; 76 u8 state;
77 int err; 77 int err;
78 78
79 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
80 return 0;
81
82 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); 79 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
83 if (err) { 80 if (err) {
84 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", 81 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
85 sq->sqn, err); 82 sq->sqn, err);
86 return err; 83 goto out;
87 } 84 }
88 85
89 if (state != MLX5_SQC_STATE_ERR) { 86 if (state != MLX5_SQC_STATE_ERR)
90 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); 87 goto out;
91 return -EINVAL;
92 }
93 88
94 mlx5e_tx_disable_queue(sq->txq); 89 mlx5e_tx_disable_queue(sq->txq);
95 90
96 err = mlx5e_wait_for_sq_flush(sq); 91 err = mlx5e_wait_for_sq_flush(sq);
97 if (err) 92 if (err)
98 return err; 93 goto out;
99 94
100 /* At this point, no new packets will arrive from the stack as TXQ is 95 /* At this point, no new packets will arrive from the stack as TXQ is
101 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all 96 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
@@ -104,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
104 99
105 err = mlx5e_sq_to_ready(sq, state); 100 err = mlx5e_sq_to_ready(sq, state);
106 if (err) 101 if (err)
107 return err; 102 goto out;
108 103
109 mlx5e_reset_txqsq_cc_pc(sq); 104 mlx5e_reset_txqsq_cc_pc(sq);
110 sq->stats->recover++; 105 sq->stats->recover++;
106 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
111 mlx5e_activate_txqsq(sq); 107 mlx5e_activate_txqsq(sq);
112 108
113 return 0; 109 return 0;
110out:
111 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
112 return err;
114} 113}
115 114
116static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, 115static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index aaffa6f68dc0..7f78c004d12f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -143,7 +143,10 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
143{ 143{
144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
145 /* TX queue is created active. */ 145 /* TX queue is created active. */
146
147 spin_lock(&c->xskicosq_lock);
146 mlx5e_trigger_irq(&c->xskicosq); 148 mlx5e_trigger_irq(&c->xskicosq);
149 spin_unlock(&c->xskicosq_lock);
147} 150}
148 151
149void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 152void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 407da83474ef..b7298f9ee3d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -11,12 +11,14 @@
11#include "accel/tls.h" 11#include "accel/tls.h"
12 12
13#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ 13#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
14 (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params)) 14 (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
15 MLX5_ST_SZ_BYTES(tls_static_params))
15#define MLX5E_KTLS_STATIC_WQEBBS \ 16#define MLX5E_KTLS_STATIC_WQEBBS \
16 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) 17 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
17 18
18#define MLX5E_KTLS_PROGRESS_WQE_SZ \ 19#define MLX5E_KTLS_PROGRESS_WQE_SZ \
19 (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params)) 20 (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
21 MLX5_ST_SZ_BYTES(tls_progress_params))
20#define MLX5E_KTLS_PROGRESS_WQEBBS \ 22#define MLX5E_KTLS_PROGRESS_WQEBBS \
21 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) 23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
22#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 24#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 3766545ce259..7833ddef0427 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | 69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
70 STATIC_PARAMS_DS_CNT); 70 STATIC_PARAMS_DS_CNT);
71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
72 cseg->imm = cpu_to_be32(priv_tx->tisn); 72 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
73 73
74 ucseg->flags = MLX5_UMR_INLINE; 74 ucseg->flags = MLX5_UMR_INLINE;
75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); 75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
@@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
80static void 80static void
81fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) 81fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
82{ 82{
83 MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn); 83 MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
84 MLX5_SET(tls_progress_params, ctx, record_tracker_state, 84 MLX5_SET(tls_progress_params, ctx, record_tracker_state,
85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); 85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
86 MLX5_SET(tls_progress_params, ctx, auth_state, 86 MLX5_SET(tls_progress_params, ctx, auth_state,
@@ -104,18 +104,20 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
104 PROGRESS_PARAMS_DS_CNT); 104 PROGRESS_PARAMS_DS_CNT);
105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
106 106
107 fill_progress_params_ctx(wqe->data, priv_tx); 107 fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
108} 108}
109 109
110static void tx_fill_wi(struct mlx5e_txqsq *sq, 110static void tx_fill_wi(struct mlx5e_txqsq *sq,
111 u16 pi, u8 num_wqebbs, 111 u16 pi, u8 num_wqebbs,
112 skb_frag_t *resync_dump_frag) 112 skb_frag_t *resync_dump_frag,
113 u32 num_bytes)
113{ 114{
114 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 115 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
115 116
116 wi->skb = NULL; 117 wi->skb = NULL;
117 wi->num_wqebbs = num_wqebbs; 118 wi->num_wqebbs = num_wqebbs;
118 wi->resync_dump_frag = resync_dump_frag; 119 wi->resync_dump_frag = resync_dump_frag;
120 wi->num_bytes = num_bytes;
119} 121}
120 122
121void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) 123void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq,
143 145
144 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); 146 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
145 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); 147 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
146 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL); 148 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
147 sq->pc += MLX5E_KTLS_STATIC_WQEBBS; 149 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
148} 150}
149 151
@@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
157 159
158 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); 160 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
159 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); 161 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
160 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL); 162 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
161 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; 163 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
162} 164}
163 165
@@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
248 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); 250 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
249} 251}
250 252
253struct mlx5e_dump_wqe {
254 struct mlx5_wqe_ctrl_seg ctrl;
255 struct mlx5_wqe_data_seg data;
256};
257
251static int 258static int
252tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, 259tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253 skb_frag_t *frag, u32 tisn, bool first) 260 skb_frag_t *frag, u32 tisn, bool first)
254{ 261{
255 struct mlx5_wqe_ctrl_seg *cseg; 262 struct mlx5_wqe_ctrl_seg *cseg;
256 struct mlx5_wqe_eth_seg *eseg;
257 struct mlx5_wqe_data_seg *dseg; 263 struct mlx5_wqe_data_seg *dseg;
258 struct mlx5e_tx_wqe *wqe; 264 struct mlx5e_dump_wqe *wqe;
259 dma_addr_t dma_addr = 0; 265 dma_addr_t dma_addr = 0;
260 u16 ds_cnt, ds_cnt_inl;
261 u8 num_wqebbs; 266 u8 num_wqebbs;
262 u16 pi, ihs; 267 u16 ds_cnt;
263 int fsz; 268 int fsz;
264 269 u16 pi;
265 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
266 ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
267 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
268 ds_cnt += ds_cnt_inl;
269 ds_cnt += 1; /* one frag */
270 270
271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); 271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
272 272
273 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
273 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 274 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
274 275
275 cseg = &wqe->ctrl; 276 cseg = &wqe->ctrl;
276 eseg = &wqe->eth; 277 dseg = &wqe->data;
277 dseg = wqe->data;
278 278
279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); 279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
281 cseg->imm = cpu_to_be32(tisn); 281 cseg->tisn = cpu_to_be32(tisn << 8);
282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
283 283
284 eseg->inline_hdr.sz = cpu_to_be16(ihs);
285 memcpy(eseg->inline_hdr.start, skb->data, ihs);
286 dseg += ds_cnt_inl;
287
288 fsz = skb_frag_size(frag); 284 fsz = skb_frag_size(frag);
289 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 285 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
290 DMA_TO_DEVICE); 286 DMA_TO_DEVICE);
@@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
296 dseg->byte_count = cpu_to_be32(fsz); 292 dseg->byte_count = cpu_to_be32(fsz);
297 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 293 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
298 294
299 tx_fill_wi(sq, pi, num_wqebbs, frag); 295 tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
300 sq->pc += num_wqebbs; 296 sq->pc += num_wqebbs;
301 297
302 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, 298 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
@@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
323 struct mlx5_wq_cyc *wq = &sq->wq; 319 struct mlx5_wq_cyc *wq = &sq->wq;
324 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 320 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
325 321
326 tx_fill_wi(sq, pi, 1, NULL); 322 tx_fill_wi(sq, pi, 1, NULL, 0);
327 323
328 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); 324 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
329} 325}
@@ -434,7 +430,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
434 priv_tx->expected_seq = seq + datalen; 430 priv_tx->expected_seq = seq + datalen;
435 431
436 cseg = &(*wqe)->ctrl; 432 cseg = &(*wqe)->ctrl;
437 cseg->imm = cpu_to_be32(priv_tx->tisn); 433 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
438 434
439 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 435 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
440 stats->tls_encrypted_bytes += datalen; 436 stats->tls_encrypted_bytes += datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 8657e0f26995..2c75b2752f58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
437 return &arfs_t->rules_hash[bucket_idx]; 437 return &arfs_t->rules_hash[bucket_idx];
438} 438}
439 439
440static u8 arfs_get_ip_proto(const struct sk_buff *skb)
441{
442 return (skb->protocol == htons(ETH_P_IP)) ?
443 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
444}
445
446static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, 440static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
447 u8 ip_proto, __be16 etype) 441 u8 ip_proto, __be16 etype)
448{ 442{
@@ -602,31 +596,9 @@ out:
602 arfs_may_expire_flow(priv); 596 arfs_may_expire_flow(priv);
603} 597}
604 598
605/* return L4 destination port from ip4/6 packets */
606static __be16 arfs_get_dst_port(const struct sk_buff *skb)
607{
608 char *transport_header;
609
610 transport_header = skb_transport_header(skb);
611 if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
612 return ((struct tcphdr *)transport_header)->dest;
613 return ((struct udphdr *)transport_header)->dest;
614}
615
616/* return L4 source port from ip4/6 packets */
617static __be16 arfs_get_src_port(const struct sk_buff *skb)
618{
619 char *transport_header;
620
621 transport_header = skb_transport_header(skb);
622 if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
623 return ((struct tcphdr *)transport_header)->source;
624 return ((struct udphdr *)transport_header)->source;
625}
626
627static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, 599static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
628 struct arfs_table *arfs_t, 600 struct arfs_table *arfs_t,
629 const struct sk_buff *skb, 601 const struct flow_keys *fk,
630 u16 rxq, u32 flow_id) 602 u16 rxq, u32 flow_id)
631{ 603{
632 struct arfs_rule *rule; 604 struct arfs_rule *rule;
@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
641 INIT_WORK(&rule->arfs_work, arfs_handle_work); 613 INIT_WORK(&rule->arfs_work, arfs_handle_work);
642 614
643 tuple = &rule->tuple; 615 tuple = &rule->tuple;
644 tuple->etype = skb->protocol; 616 tuple->etype = fk->basic.n_proto;
617 tuple->ip_proto = fk->basic.ip_proto;
645 if (tuple->etype == htons(ETH_P_IP)) { 618 if (tuple->etype == htons(ETH_P_IP)) {
646 tuple->src_ipv4 = ip_hdr(skb)->saddr; 619 tuple->src_ipv4 = fk->addrs.v4addrs.src;
647 tuple->dst_ipv4 = ip_hdr(skb)->daddr; 620 tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
648 } else { 621 } else {
649 memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 622 memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
650 sizeof(struct in6_addr)); 623 sizeof(struct in6_addr));
651 memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 624 memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 sizeof(struct in6_addr)); 625 sizeof(struct in6_addr));
653 } 626 }
654 tuple->ip_proto = arfs_get_ip_proto(skb); 627 tuple->src_port = fk->ports.src;
655 tuple->src_port = arfs_get_src_port(skb); 628 tuple->dst_port = fk->ports.dst;
656 tuple->dst_port = arfs_get_dst_port(skb);
657 629
658 rule->flow_id = flow_id; 630 rule->flow_id = flow_id;
659 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; 631 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
664 return rule; 636 return rule;
665} 637}
666 638
667static bool arfs_cmp_ips(struct arfs_tuple *tuple, 639static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
668 const struct sk_buff *skb)
669{ 640{
670 if (tuple->etype == htons(ETH_P_IP) && 641 if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
671 tuple->src_ipv4 == ip_hdr(skb)->saddr && 642 return false;
672 tuple->dst_ipv4 == ip_hdr(skb)->daddr) 643 if (tuple->etype != fk->basic.n_proto)
673 return true; 644 return false;
674 if (tuple->etype == htons(ETH_P_IPV6) && 645 if (tuple->etype == htons(ETH_P_IP))
675 (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 646 return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
676 sizeof(struct in6_addr))) && 647 tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
677 (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 648 if (tuple->etype == htons(ETH_P_IPV6))
678 sizeof(struct in6_addr)))) 649 return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
679 return true; 650 sizeof(struct in6_addr)) &&
651 !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 sizeof(struct in6_addr));
680 return false; 653 return false;
681} 654}
682 655
683static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, 656static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
684 const struct sk_buff *skb) 657 const struct flow_keys *fk)
685{ 658{
686 struct arfs_rule *arfs_rule; 659 struct arfs_rule *arfs_rule;
687 struct hlist_head *head; 660 struct hlist_head *head;
688 __be16 src_port = arfs_get_src_port(skb);
689 __be16 dst_port = arfs_get_dst_port(skb);
690 661
691 head = arfs_hash_bucket(arfs_t, src_port, dst_port); 662 head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
692 hlist_for_each_entry(arfs_rule, head, hlist) { 663 hlist_for_each_entry(arfs_rule, head, hlist) {
693 if (arfs_rule->tuple.src_port == src_port && 664 if (arfs_cmp(&arfs_rule->tuple, fk))
694 arfs_rule->tuple.dst_port == dst_port &&
695 arfs_cmp_ips(&arfs_rule->tuple, skb)) {
696 return arfs_rule; 665 return arfs_rule;
697 }
698 } 666 }
699 667
700 return NULL; 668 return NULL;
@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
707 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; 675 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
708 struct arfs_table *arfs_t; 676 struct arfs_table *arfs_t;
709 struct arfs_rule *arfs_rule; 677 struct arfs_rule *arfs_rule;
678 struct flow_keys fk;
679
680 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
681 return -EPROTONOSUPPORT;
710 682
711 if (skb->protocol != htons(ETH_P_IP) && 683 if (fk.basic.n_proto != htons(ETH_P_IP) &&
712 skb->protocol != htons(ETH_P_IPV6)) 684 fk.basic.n_proto != htons(ETH_P_IPV6))
713 return -EPROTONOSUPPORT; 685 return -EPROTONOSUPPORT;
714 686
715 if (skb->encapsulation) 687 if (skb->encapsulation)
716 return -EPROTONOSUPPORT; 688 return -EPROTONOSUPPORT;
717 689
718 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); 690 arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
719 if (!arfs_t) 691 if (!arfs_t)
720 return -EPROTONOSUPPORT; 692 return -EPROTONOSUPPORT;
721 693
722 spin_lock_bh(&arfs->arfs_lock); 694 spin_lock_bh(&arfs->arfs_lock);
723 arfs_rule = arfs_find_rule(arfs_t, skb); 695 arfs_rule = arfs_find_rule(arfs_t, &fk);
724 if (arfs_rule) { 696 if (arfs_rule) {
725 if (arfs_rule->rxq == rxq_index) { 697 if (arfs_rule->rxq == rxq_index) {
726 spin_unlock_bh(&arfs->arfs_lock); 698 spin_unlock_bh(&arfs->arfs_lock);
@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
728 } 700 }
729 arfs_rule->rxq = rxq_index; 701 arfs_rule->rxq = rxq_index;
730 } else { 702 } else {
731 arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, 703 arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
732 rxq_index, flow_id);
733 if (!arfs_rule) { 704 if (!arfs_rule) {
734 spin_unlock_bh(&arfs->arfs_lock); 705 spin_unlock_bh(&arfs->arfs_lock);
735 return -ENOMEM; 706 return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 03bed714bac3..20e628c907e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1081,6 +1081,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1081 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : 1081 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
1082 mlx5e_port_speed2linkmodes(mdev, speed, !ext); 1082 mlx5e_port_speed2linkmodes(mdev, speed, !ext);
1083 1083
1084 if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
1085 autoneg != AUTONEG_ENABLE) {
1086 netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
1087 __func__);
1088 err = -EINVAL;
1089 goto out;
1090 }
1091
1084 link_modes = link_modes & eproto.cap; 1092 link_modes = link_modes & eproto.cap;
1085 if (!link_modes) { 1093 if (!link_modes) {
1086 netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", 1094 netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
@@ -1338,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1338 struct mlx5_core_dev *mdev = priv->mdev; 1346 struct mlx5_core_dev *mdev = priv->mdev;
1339 int err; 1347 int err;
1340 1348
1349 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
1350 return -EOPNOTSUPP;
1351
1341 if (pauseparam->autoneg) 1352 if (pauseparam->autoneg)
1342 return -EINVAL; 1353 return -EINVAL;
1343 1354
@@ -1679,6 +1690,40 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
1679 return 0; 1690 return 0;
1680} 1691}
1681 1692
1693int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1694 struct ethtool_flash *flash)
1695{
1696 struct mlx5_core_dev *mdev = priv->mdev;
1697 struct net_device *dev = priv->netdev;
1698 const struct firmware *fw;
1699 int err;
1700
1701 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
1702 return -EOPNOTSUPP;
1703
1704 err = request_firmware_direct(&fw, flash->data, &dev->dev);
1705 if (err)
1706 return err;
1707
1708 dev_hold(dev);
1709 rtnl_unlock();
1710
1711 err = mlx5_firmware_flash(mdev, fw, NULL);
1712 release_firmware(fw);
1713
1714 rtnl_lock();
1715 dev_put(dev);
1716 return err;
1717}
1718
1719static int mlx5e_flash_device(struct net_device *dev,
1720 struct ethtool_flash *flash)
1721{
1722 struct mlx5e_priv *priv = netdev_priv(dev);
1723
1724 return mlx5e_ethtool_flash_device(priv, flash);
1725}
1726
1682static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, 1727static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
1683 bool is_rx_cq) 1728 bool is_rx_cq)
1684{ 1729{
@@ -1961,6 +2006,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1961 .set_wol = mlx5e_set_wol, 2006 .set_wol = mlx5e_set_wol,
1962 .get_module_info = mlx5e_get_module_info, 2007 .get_module_info = mlx5e_get_module_info,
1963 .get_module_eeprom = mlx5e_get_module_eeprom, 2008 .get_module_eeprom = mlx5e_get_module_eeprom,
2009 .flash_device = mlx5e_flash_device,
1964 .get_priv_flags = mlx5e_get_priv_flags, 2010 .get_priv_flags = mlx5e_get_priv_flags,
1965 .set_priv_flags = mlx5e_set_priv_flags, 2011 .set_priv_flags = mlx5e_set_priv_flags,
1966 .self_test = mlx5e_self_test, 2012 .self_test = mlx5e_self_test,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6c712c5be4d8..9d5f6e56188f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1321,7 +1321,6 @@ err_free_txqsq:
1321void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) 1321void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1322{ 1322{
1323 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); 1323 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1324 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
1325 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1324 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1326 netdev_tx_reset_queue(sq->txq); 1325 netdev_tx_reset_queue(sq->txq);
1327 netif_tx_start_queue(sq->txq); 1326 netif_tx_start_queue(sq->txq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7ecfc53cf5f6..00b2d4a86159 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1480 struct mlx5_flow_spec *spec, 1480 struct mlx5_flow_spec *spec,
1481 struct flow_cls_offload *f, 1481 struct flow_cls_offload *f,
1482 struct net_device *filter_dev, 1482 struct net_device *filter_dev,
1483 u8 *match_level, u8 *tunnel_match_level) 1483 u8 *inner_match_level, u8 *outer_match_level)
1484{ 1484{
1485 struct netlink_ext_ack *extack = f->common.extack; 1485 struct netlink_ext_ack *extack = f->common.extack;
1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1495 struct flow_dissector *dissector = rule->match.dissector; 1495 struct flow_dissector *dissector = rule->match.dissector;
1496 u16 addr_type = 0; 1496 u16 addr_type = 0;
1497 u8 ip_proto = 0; 1497 u8 ip_proto = 0;
1498 u8 *match_level;
1498 1499
1499 *match_level = MLX5_MATCH_NONE; 1500 match_level = outer_match_level;
1500 1501
1501 if (dissector->used_keys & 1502 if (dissector->used_keys &
1502 ~(BIT(FLOW_DISSECTOR_KEY_META) | 1503 ~(BIT(FLOW_DISSECTOR_KEY_META) |
@@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1524 } 1525 }
1525 1526
1526 if (mlx5e_get_tc_tun(filter_dev)) { 1527 if (mlx5e_get_tc_tun(filter_dev)) {
1527 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) 1528 if (parse_tunnel_attr(priv, spec, f, filter_dev,
1529 outer_match_level))
1528 return -EOPNOTSUPP; 1530 return -EOPNOTSUPP;
1529 1531
1530 /* In decap flow, header pointers should point to the inner 1532 /* At this point, header pointers should point to the inner
1531 * headers, outer header were already set by parse_tunnel_attr 1533 * headers, outer header were already set by parse_tunnel_attr
1532 */ 1534 */
1535 match_level = inner_match_level;
1533 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1536 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
1534 spec); 1537 spec);
1535 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1538 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
@@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1831 struct flow_cls_offload *f, 1834 struct flow_cls_offload *f,
1832 struct net_device *filter_dev) 1835 struct net_device *filter_dev)
1833{ 1836{
1837 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
1834 struct netlink_ext_ack *extack = f->common.extack; 1838 struct netlink_ext_ack *extack = f->common.extack;
1835 struct mlx5_core_dev *dev = priv->mdev; 1839 struct mlx5_core_dev *dev = priv->mdev;
1836 struct mlx5_eswitch *esw = dev->priv.eswitch; 1840 struct mlx5_eswitch *esw = dev->priv.eswitch;
1837 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1841 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1838 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1839 struct mlx5_eswitch_rep *rep; 1842 struct mlx5_eswitch_rep *rep;
1840 int err; 1843 int err;
1841 1844
1842 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); 1845 inner_match_level = MLX5_MATCH_NONE;
1846 outer_match_level = MLX5_MATCH_NONE;
1847
1848 err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
1849 &outer_match_level);
1850 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
1851 outer_match_level : inner_match_level;
1843 1852
1844 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1853 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1845 rep = rpriv->rep; 1854 rep = rpriv->rep;
1846 if (rep->vport != MLX5_VPORT_UPLINK && 1855 if (rep->vport != MLX5_VPORT_UPLINK &&
1847 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 1856 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1848 esw->offloads.inline_mode < match_level)) { 1857 esw->offloads.inline_mode < non_tunnel_match_level)) {
1849 NL_SET_ERR_MSG_MOD(extack, 1858 NL_SET_ERR_MSG_MOD(extack,
1850 "Flow is not offloaded due to min inline setting"); 1859 "Flow is not offloaded due to min inline setting");
1851 netdev_warn(priv->netdev, 1860 netdev_warn(priv->netdev,
1852 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 1861 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1853 match_level, esw->offloads.inline_mode); 1862 non_tunnel_match_level, esw->offloads.inline_mode);
1854 return -EOPNOTSUPP; 1863 return -EOPNOTSUPP;
1855 } 1864 }
1856 } 1865 }
1857 1866
1858 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 1867 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1859 flow->esw_attr->match_level = match_level; 1868 flow->esw_attr->inner_match_level = inner_match_level;
1860 flow->esw_attr->tunnel_match_level = tunnel_match_level; 1869 flow->esw_attr->outer_match_level = outer_match_level;
1861 } else { 1870 } else {
1862 flow->nic_attr->match_level = match_level; 1871 flow->nic_attr->match_level = non_tunnel_match_level;
1863 } 1872 }
1864 1873
1865 return err; 1874 return err;
@@ -3158,7 +3167,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
3158 3167
3159 esw_attr->parse_attr = parse_attr; 3168 esw_attr->parse_attr = parse_attr;
3160 esw_attr->chain = f->common.chain_index; 3169 esw_attr->chain = f->common.chain_index;
3161 esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; 3170 esw_attr->prio = f->common.prio;
3162 3171
3163 esw_attr->in_rep = in_rep; 3172 esw_attr->in_rep = in_rep;
3164 esw_attr->in_mdev = in_mdev; 3173 esw_attr->in_mdev = in_mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a38e8a3c7c9a..04685dbb280c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr {
377 struct mlx5_termtbl_handle *termtbl; 377 struct mlx5_termtbl_handle *termtbl;
378 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 378 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
379 u32 mod_hdr_id; 379 u32 mod_hdr_id;
380 u8 match_level; 380 u8 inner_match_level;
381 u8 tunnel_match_level; 381 u8 outer_match_level;
382 struct mlx5_fc *counter; 382 struct mlx5_fc *counter;
383 u32 chain; 383 u32 chain;
384 u16 prio; 384 u16 prio;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 089ae4d48a82..0323fd078271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
207 207
208 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
209 209
210 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 210 if (attr->outer_match_level != MLX5_MATCH_NONE)
211 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
212 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
213 if (attr->match_level != MLX5_MATCH_NONE)
214 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
215 } else if (attr->match_level != MLX5_MATCH_NONE) {
216 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 211 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
217 } 212 if (attr->inner_match_level != MLX5_MATCH_NONE)
213 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
218 214
219 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 215 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
220 flow_act.modify_id = attr->mod_hdr_id; 216 flow_act.modify_id = attr->mod_hdr_id;
@@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
290 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
291 287
292 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 288 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
293 if (attr->match_level != MLX5_MATCH_NONE) 289 if (attr->outer_match_level != MLX5_MATCH_NONE)
294 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 290 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
295 291
296 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 292 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9314777d99e3..d685122d9ff7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
590 data_size = crdump_size - offset; 590 data_size = crdump_size - offset;
591 else 591 else
592 data_size = MLX5_CR_DUMP_CHUNK_SIZE; 592 data_size = MLX5_CR_DUMP_CHUNK_SIZE;
593 err = devlink_fmsg_binary_put(fmsg, cr_data, data_size); 593 err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
594 data_size);
594 if (err) 595 if (err)
595 goto free_data; 596 goto free_data;
596 } 597 }
@@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t)
700 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 701 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
701 goto out; 702 goto out;
702 703
704 fatal_error = check_fatal_sensors(dev);
705
706 if (fatal_error && !health->fatal_error) {
707 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
708 dev->priv.health.fatal_error = fatal_error;
709 print_health_info(dev);
710 mlx5_trigger_health_work(dev);
711 goto out;
712 }
713
703 count = ioread32be(health->health_counter); 714 count = ioread32be(health->health_counter);
704 if (count == health->prev) 715 if (count == health->prev)
705 ++health->miss_counter; 716 ++health->miss_counter;
@@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t)
718 if (health->synd && health->synd != prev_synd) 729 if (health->synd && health->synd != prev_synd)
719 queue_work(health->wq, &health->report_work); 730 queue_work(health->wq, &health->report_work);
720 731
721 fatal_error = check_fatal_sensors(dev);
722
723 if (fatal_error && !health->fatal_error) {
724 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
725 dev->priv.health.fatal_error = fatal_error;
726 print_health_info(dev);
727 mlx5_trigger_health_work(dev);
728 }
729
730out: 732out:
731 mod_timer(&health->timer, get_next_poll_jiffies()); 733 mod_timer(&health->timer, get_next_poll_jiffies());
732} 734}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index ebd81f6b556e..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -122,6 +122,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev,
122 return mlx5e_ethtool_get_ts_info(priv, info); 122 return mlx5e_ethtool_get_ts_info(priv, info);
123} 123}
124 124
125static int mlx5i_flash_device(struct net_device *netdev,
126 struct ethtool_flash *flash)
127{
128 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
129
130 return mlx5e_ethtool_flash_device(priv, flash);
131}
132
125enum mlx5_ptys_width { 133enum mlx5_ptys_width {
126 MLX5_PTYS_WIDTH_1X = 1 << 0, 134 MLX5_PTYS_WIDTH_1X = 1 << 0,
127 MLX5_PTYS_WIDTH_2X = 1 << 1, 135 MLX5_PTYS_WIDTH_2X = 1 << 1,
@@ -233,6 +241,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
233 .get_ethtool_stats = mlx5i_get_ethtool_stats, 241 .get_ethtool_stats = mlx5i_get_ethtool_stats,
234 .get_ringparam = mlx5i_get_ringparam, 242 .get_ringparam = mlx5i_get_ringparam,
235 .set_ringparam = mlx5i_set_ringparam, 243 .set_ringparam = mlx5i_set_ringparam,
244 .flash_device = mlx5i_flash_device,
236 .get_channels = mlx5i_get_channels, 245 .get_channels = mlx5i_get_channels,
237 .set_channels = mlx5i_set_channels, 246 .set_channels = mlx5i_set_channels,
238 .get_coalesce = mlx5i_get_coalesce, 247 .get_coalesce = mlx5i_get_coalesce,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index ea9ee88491e5..ea1d4d26ece0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
27 case 128: 27 case 128:
28 general_obj_key_size = 28 general_obj_key_size =
29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128; 29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
30 key_p += sz_bytes;
30 break; 31 break;
31 case 256: 32 case 256:
32 general_obj_key_size = 33 general_obj_key_size =
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index e8ac90564dbe..84a87d059333 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -471,7 +471,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
471void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 471void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
472 unsigned int priority) 472 unsigned int priority)
473{ 473{
474 rulei->priority = priority >> 16; 474 rulei->priority = priority;
475} 475}
476 476
477void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 477void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 63b07edd9d81..38bb1cfe4e8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -29,7 +29,7 @@
29 29
30struct mlxsw_sp_ptp_state { 30struct mlxsw_sp_ptp_state {
31 struct mlxsw_sp *mlxsw_sp; 31 struct mlxsw_sp *mlxsw_sp;
32 struct rhashtable unmatched_ht; 32 struct rhltable unmatched_ht;
33 spinlock_t unmatched_lock; /* protects the HT */ 33 spinlock_t unmatched_lock; /* protects the HT */
34 struct delayed_work ht_gc_dw; 34 struct delayed_work ht_gc_dw;
35 u32 gc_cycle; 35 u32 gc_cycle;
@@ -45,7 +45,7 @@ struct mlxsw_sp1_ptp_key {
45 45
46struct mlxsw_sp1_ptp_unmatched { 46struct mlxsw_sp1_ptp_unmatched {
47 struct mlxsw_sp1_ptp_key key; 47 struct mlxsw_sp1_ptp_key key;
48 struct rhash_head ht_node; 48 struct rhlist_head ht_node;
49 struct rcu_head rcu; 49 struct rcu_head rcu;
50 struct sk_buff *skb; 50 struct sk_buff *skb;
51 u64 timestamp; 51 u64 timestamp;
@@ -359,7 +359,7 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
359/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on 359/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
360 * error. 360 * error.
361 */ 361 */
362static struct mlxsw_sp1_ptp_unmatched * 362static int
363mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp, 363mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
364 struct mlxsw_sp1_ptp_key key, 364 struct mlxsw_sp1_ptp_key key,
365 struct sk_buff *skb, 365 struct sk_buff *skb,
@@ -368,41 +368,51 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
368 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL; 368 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
369 struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state; 369 struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
370 struct mlxsw_sp1_ptp_unmatched *unmatched; 370 struct mlxsw_sp1_ptp_unmatched *unmatched;
371 struct mlxsw_sp1_ptp_unmatched *conflict; 371 int err;
372 372
373 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC); 373 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
374 if (!unmatched) 374 if (!unmatched)
375 return ERR_PTR(-ENOMEM); 375 return -ENOMEM;
376 376
377 unmatched->key = key; 377 unmatched->key = key;
378 unmatched->skb = skb; 378 unmatched->skb = skb;
379 unmatched->timestamp = timestamp; 379 unmatched->timestamp = timestamp;
380 unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles; 380 unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
381 381
382 conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht, 382 err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
383 &unmatched->ht_node, 383 mlxsw_sp1_ptp_unmatched_ht_params);
384 mlxsw_sp1_ptp_unmatched_ht_params); 384 if (err)
385 if (conflict)
386 kfree(unmatched); 385 kfree(unmatched);
387 386
388 return conflict; 387 return err;
389} 388}
390 389
391static struct mlxsw_sp1_ptp_unmatched * 390static struct mlxsw_sp1_ptp_unmatched *
392mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp, 391mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
393 struct mlxsw_sp1_ptp_key key) 392 struct mlxsw_sp1_ptp_key key, int *p_length)
394{ 393{
395 return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key, 394 struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
396 mlxsw_sp1_ptp_unmatched_ht_params); 395 struct rhlist_head *tmp, *list;
396 int length = 0;
397
398 list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
399 mlxsw_sp1_ptp_unmatched_ht_params);
400 rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
401 last = unmatched;
402 length++;
403 }
404
405 *p_length = length;
406 return last;
397} 407}
398 408
399static int 409static int
400mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp, 410mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
401 struct mlxsw_sp1_ptp_unmatched *unmatched) 411 struct mlxsw_sp1_ptp_unmatched *unmatched)
402{ 412{
403 return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht, 413 return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
404 &unmatched->ht_node, 414 &unmatched->ht_node,
405 mlxsw_sp1_ptp_unmatched_ht_params); 415 mlxsw_sp1_ptp_unmatched_ht_params);
406} 416}
407 417
408/* This function is called in the following scenarios: 418/* This function is called in the following scenarios:
@@ -489,75 +499,38 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
489 struct mlxsw_sp1_ptp_key key, 499 struct mlxsw_sp1_ptp_key key,
490 struct sk_buff *skb, u64 timestamp) 500 struct sk_buff *skb, u64 timestamp)
491{ 501{
492 struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict; 502 struct mlxsw_sp1_ptp_unmatched *unmatched;
503 int length;
493 int err; 504 int err;
494 505
495 rcu_read_lock(); 506 rcu_read_lock();
496 507
497 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key);
498
499 spin_lock(&mlxsw_sp->ptp_state->unmatched_lock); 508 spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
500 509
501 if (unmatched) { 510 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
502 /* There was an unmatched entry when we looked, but it may have
503 * been removed before we took the lock.
504 */
505 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
506 if (err)
507 unmatched = NULL;
508 }
509
510 if (!unmatched) {
511 /* We have no unmatched entry, but one may have been added after
512 * we looked, but before we took the lock.
513 */
514 unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
515 skb, timestamp);
516 if (IS_ERR(unmatched)) {
517 if (skb)
518 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
519 key.local_port,
520 key.ingress, NULL);
521 unmatched = NULL;
522 } else if (unmatched) {
523 /* Save just told us, under lock, that the entry is
524 * there, so this has to work.
525 */
526 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp,
527 unmatched);
528 WARN_ON_ONCE(err);
529 }
530 }
531
532 /* If unmatched is non-NULL here, it comes either from the lookup, or
533 * from the save attempt above. In either case the entry was removed
534 * from the hash table. If unmatched is NULL, a new unmatched entry was
535 * added to the hash table, and there was no conflict.
536 */
537
538 if (skb && unmatched && unmatched->timestamp) { 511 if (skb && unmatched && unmatched->timestamp) {
539 unmatched->skb = skb; 512 unmatched->skb = skb;
540 } else if (timestamp && unmatched && unmatched->skb) { 513 } else if (timestamp && unmatched && unmatched->skb) {
541 unmatched->timestamp = timestamp; 514 unmatched->timestamp = timestamp;
542 } else if (unmatched) { 515 } else {
543 /* unmatched holds an older entry of the same type: either an 516 /* Either there is no entry to match, or one that is there is
544 * skb if we are handling skb, or a timestamp if we are handling 517 * incompatible.
545 * timestamp. We can't match that up, so save what we have.
546 */ 518 */
547 conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, 519 if (length < 100)
548 skb, timestamp); 520 err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
549 if (IS_ERR(conflict)) { 521 skb, timestamp);
550 if (skb) 522 else
551 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, 523 err = -E2BIG;
552 key.local_port, 524 if (err && skb)
553 key.ingress, NULL); 525 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
554 } else { 526 key.local_port,
555 /* Above, we removed an object with this key from the 527 key.ingress, NULL);
556 * hash table, under lock, so conflict can not be a 528 unmatched = NULL;
557 * valid pointer. 529 }
558 */ 530
559 WARN_ON_ONCE(conflict); 531 if (unmatched) {
560 } 532 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
533 WARN_ON_ONCE(err);
561 } 534 }
562 535
563 spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock); 536 spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
@@ -669,9 +642,8 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
669 local_bh_disable(); 642 local_bh_disable();
670 643
671 spin_lock(&ptp_state->unmatched_lock); 644 spin_lock(&ptp_state->unmatched_lock);
672 err = rhashtable_remove_fast(&ptp_state->unmatched_ht, 645 err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
673 &unmatched->ht_node, 646 mlxsw_sp1_ptp_unmatched_ht_params);
674 mlxsw_sp1_ptp_unmatched_ht_params);
675 spin_unlock(&ptp_state->unmatched_lock); 647 spin_unlock(&ptp_state->unmatched_lock);
676 648
677 if (err) 649 if (err)
@@ -702,7 +674,7 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
702 ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw); 674 ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
703 gc_cycle = ptp_state->gc_cycle++; 675 gc_cycle = ptp_state->gc_cycle++;
704 676
705 rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter); 677 rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
706 rhashtable_walk_start(&iter); 678 rhashtable_walk_start(&iter);
707 while ((obj = rhashtable_walk_next(&iter))) { 679 while ((obj = rhashtable_walk_next(&iter))) {
708 if (IS_ERR(obj)) 680 if (IS_ERR(obj))
@@ -855,8 +827,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
855 827
856 spin_lock_init(&ptp_state->unmatched_lock); 828 spin_lock_init(&ptp_state->unmatched_lock);
857 829
858 err = rhashtable_init(&ptp_state->unmatched_ht, 830 err = rhltable_init(&ptp_state->unmatched_ht,
859 &mlxsw_sp1_ptp_unmatched_ht_params); 831 &mlxsw_sp1_ptp_unmatched_ht_params);
860 if (err) 832 if (err)
861 goto err_hashtable_init; 833 goto err_hashtable_init;
862 834
@@ -891,7 +863,7 @@ err_fifo_clr:
891err_mtptpt1_set: 863err_mtptpt1_set:
892 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); 864 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
893err_mtptpt_set: 865err_mtptpt_set:
894 rhashtable_destroy(&ptp_state->unmatched_ht); 866 rhltable_destroy(&ptp_state->unmatched_ht);
895err_hashtable_init: 867err_hashtable_init:
896 kfree(ptp_state); 868 kfree(ptp_state);
897 return ERR_PTR(err); 869 return ERR_PTR(err);
@@ -906,8 +878,8 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
906 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false); 878 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
907 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0); 879 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
908 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); 880 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
909 rhashtable_free_and_destroy(&ptp_state->unmatched_ht, 881 rhltable_free_and_destroy(&ptp_state->unmatched_ht,
910 &mlxsw_sp1_ptp_unmatched_free_fn, NULL); 882 &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
911 kfree(ptp_state); 883 kfree(ptp_state);
912} 884}
913 885
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 39aca1ab4687..86fc6e6b46dd 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data,
317 break; 317 break;
318 case OCELOT_ACL_ACTION_TRAP: 318 case OCELOT_ACL_ACTION_TRAP:
319 VCAP_ACT_SET(PORT_MASK, 0x0); 319 VCAP_ACT_SET(PORT_MASK, 0x0);
320 VCAP_ACT_SET(MASK_MODE, 0x0); 320 VCAP_ACT_SET(MASK_MODE, 0x1);
321 VCAP_ACT_SET(POLICE_ENA, 0x0); 321 VCAP_ACT_SET(POLICE_ENA, 0x0);
322 VCAP_ACT_SET(POLICE_IDX, 0x0); 322 VCAP_ACT_SET(POLICE_IDX, 0x0);
323 VCAP_ACT_SET(CPU_QU_NUM, 0x0); 323 VCAP_ACT_SET(CPU_QU_NUM, 0x0);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 59487d446a09..b894bc0c9c16 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -13,12 +13,6 @@ struct ocelot_port_block {
13 struct ocelot_port *port; 13 struct ocelot_port *port;
14}; 14};
15 15
16static u16 get_prio(u32 prio)
17{
18 /* prio starts from 0x1000 while the ids starts from 0 */
19 return prio >> 16;
20}
21
22static int ocelot_flower_parse_action(struct flow_cls_offload *f, 16static int ocelot_flower_parse_action(struct flow_cls_offload *f,
23 struct ocelot_ace_rule *rule) 17 struct ocelot_ace_rule *rule)
24{ 18{
@@ -168,7 +162,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
168 } 162 }
169 163
170finished_key_parsing: 164finished_key_parsing:
171 ocelot_rule->prio = get_prio(f->common.prio); 165 ocelot_rule->prio = f->common.prio;
172 ocelot_rule->id = f->cookie; 166 ocelot_rule->id = f->cookie;
173 return ocelot_flower_parse_action(f, ocelot_rule); 167 return ocelot_flower_parse_action(f, ocelot_rule);
174} 168}
@@ -218,7 +212,7 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f,
218 struct ocelot_ace_rule rule; 212 struct ocelot_ace_rule rule;
219 int ret; 213 int ret;
220 214
221 rule.prio = get_prio(f->common.prio); 215 rule.prio = f->common.prio;
222 rule.port = port_block->port; 216 rule.port = port_block->port;
223 rule.id = f->cookie; 217 rule.id = f->cookie;
224 218
@@ -236,7 +230,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f,
236 struct ocelot_ace_rule rule; 230 struct ocelot_ace_rule rule;
237 int ret; 231 int ret;
238 232
239 rule.prio = get_prio(f->common.prio); 233 rule.prio = f->common.prio;
240 rule.port = port_block->port; 234 rule.port = port_block->port;
241 rule.id = f->cookie; 235 rule.id = f->cookie;
242 ret = ocelot_ace_rule_stats_update(&rule); 236 ret = ocelot_ace_rule_stats_update(&rule);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index d8b7fba96d58..337b0cbfd153 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3919 * setup (if available). */ 3919 * setup (if available). */
3920 status = myri10ge_request_irq(mgp); 3920 status = myri10ge_request_irq(mgp);
3921 if (status != 0) 3921 if (status != 0)
3922 goto abort_with_firmware; 3922 goto abort_with_slices;
3923 myri10ge_free_irq(mgp); 3923 myri10ge_free_irq(mgp);
3924 3924
3925 /* Save configuration space to be restored if the 3925 /* Save configuration space to be restored if the
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 4054b70d7719..5afcb3c4c2ef 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -1163,7 +1163,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1163 bool clr_gpr, lmem_step step) 1163 bool clr_gpr, lmem_step step)
1164{ 1164{
1165 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1165 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
1166 bool first = true, last; 1166 bool first = true, narrow_ld, last;
1167 bool needs_inc = false; 1167 bool needs_inc = false;
1168 swreg stack_off_reg; 1168 swreg stack_off_reg;
1169 u8 prev_gpr = 255; 1169 u8 prev_gpr = 255;
@@ -1209,13 +1209,22 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1209 1209
1210 needs_inc = true; 1210 needs_inc = true;
1211 } 1211 }
1212
1213 narrow_ld = clr_gpr && size < 8;
1214
1212 if (lm3) { 1215 if (lm3) {
1216 unsigned int nop_cnt;
1217
1213 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1218 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
1214 /* For size < 4 one slot will be filled by zeroing of upper. */ 1219 /* For size < 4 one slot will be filled by zeroing of upper,
1215 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1220 * but be careful, that zeroing could be eliminated by zext
1221 * optimization.
1222 */
1223 nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3;
1224 wrp_nops(nfp_prog, nop_cnt);
1216 } 1225 }
1217 1226
1218 if (clr_gpr && size < 8) 1227 if (narrow_ld)
1219 wrp_zext(nfp_prog, meta, gpr); 1228 wrp_zext(nfp_prog, meta, gpr);
1220 1229
1221 while (size) { 1230 while (size) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index e209f150c5f2..457bdc60f3ee 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1409,13 +1409,21 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1409 struct nfp_flower_priv *priv = app->priv; 1409 struct nfp_flower_priv *priv = app->priv;
1410 struct flow_block_cb *block_cb; 1410 struct flow_block_cb *block_cb;
1411 1411
1412 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1412 if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1413 !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && 1413 !nfp_flower_internal_port_can_offload(app, netdev)) ||
1414 nfp_flower_internal_port_can_offload(app, netdev))) 1414 (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1415 nfp_flower_internal_port_can_offload(app, netdev)))
1415 return -EOPNOTSUPP; 1416 return -EOPNOTSUPP;
1416 1417
1417 switch (f->command) { 1418 switch (f->command) {
1418 case FLOW_BLOCK_BIND: 1419 case FLOW_BLOCK_BIND:
1420 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1421 if (cb_priv &&
1422 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1423 cb_priv,
1424 &nfp_block_cb_list))
1425 return -EBUSY;
1426
1419 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); 1427 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1420 if (!cb_priv) 1428 if (!cb_priv)
1421 return -ENOMEM; 1429 return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 86e968cd5ffd..124a43dc136a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -93,7 +93,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
93 return -EOPNOTSUPP; 93 return -EOPNOTSUPP;
94 } 94 }
95 95
96 if (flow->common.prio != (1 << 16)) { 96 if (flow->common.prio != 1) {
97 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); 97 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
98 return -EOPNOTSUPP; 98 return -EOPNOTSUPP;
99 } 99 }
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a7a80f4b722a..f0ee982eb1b5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -328,13 +328,13 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
328 328
329 flow.daddr = *(__be32 *)n->primary_key; 329 flow.daddr = *(__be32 *)n->primary_key;
330 330
331 /* Only concerned with route changes for representors. */
332 if (!nfp_netdev_is_nfp_repr(n->dev))
333 return NOTIFY_DONE;
334
335 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); 331 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
336 app = app_priv->app; 332 app = app_priv->app;
337 333
334 if (!nfp_netdev_is_nfp_repr(n->dev) &&
335 !nfp_flower_internal_port_can_offload(app, n->dev))
336 return NOTIFY_DONE;
337
338 /* Only concerned with changes to routes already added to NFP. */ 338 /* Only concerned with changes to routes already added to NFP. */
339 if (!nfp_tun_has_route(app, flow.daddr)) 339 if (!nfp_tun_has_route(app, flow.daddr))
340 return NOTIFY_DONE; 340 return NOTIFY_DONE;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 829dd60ab937..1efff7f68ef6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1325,7 +1325,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1325 &drv_version); 1325 &drv_version);
1326 if (rc) { 1326 if (rc) {
1327 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1327 DP_NOTICE(cdev, "Failed sending drv version command\n");
1328 return rc; 1328 goto err4;
1329 } 1329 }
1330 } 1330 }
1331 1331
@@ -1333,6 +1333,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1333 1333
1334 return 0; 1334 return 0;
1335 1335
1336err4:
1337 qed_ll2_dealloc_if(cdev);
1336err3: 1338err3:
1337 qed_hw_stop(cdev); 1339 qed_hw_stop(cdev);
1338err2: 1340err2:
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index e1dd6ea60d67..bae0074ab9aa 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5921,6 +5921,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
5921 skb = napi_alloc_skb(&tp->napi, pkt_size); 5921 skb = napi_alloc_skb(&tp->napi, pkt_size);
5922 if (skb) 5922 if (skb)
5923 skb_copy_to_linear_data(skb, data, pkt_size); 5923 skb_copy_to_linear_data(skb, data, pkt_size);
5924 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5924 5925
5925 return skb; 5926 return skb;
5926} 5927}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ef8f08931fe8..6cacd5e893ac 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,7 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
3 * 3 *
4 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp. 5 * Copyright (C) 2015 Renesas Solutions Corp.
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
7 * 7 *
@@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
513 kfree(ts_skb); 513 kfree(ts_skb);
514 if (tag == tfa_tag) { 514 if (tag == tfa_tag) {
515 skb_tstamp_tx(skb, &shhwtstamps); 515 skb_tstamp_tx(skb, &shhwtstamps);
516 dev_consume_skb_any(skb);
516 break; 517 break;
518 } else {
519 dev_kfree_skb_any(skb);
517 } 520 }
518 } 521 }
519 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); 522 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
@@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1564 } 1567 }
1565 goto unmap; 1568 goto unmap;
1566 } 1569 }
1567 ts_skb->skb = skb; 1570 ts_skb->skb = skb_get(skb);
1568 ts_skb->tag = priv->ts_skb_tag++; 1571 ts_skb->tag = priv->ts_skb_tag++;
1569 priv->ts_skb_tag &= 0x3ff; 1572 priv->ts_skb_tag &= 0x3ff;
1570 list_add_tail(&ts_skb->list, &priv->ts_skb_list); 1573 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev)
1693 /* Clear the timestamp list */ 1696 /* Clear the timestamp list */
1694 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { 1697 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1695 list_del(&ts_skb->list); 1698 list_del(&ts_skb->list);
1699 kfree_skb(ts_skb->skb);
1696 kfree(ts_skb); 1700 kfree(ts_skb);
1697 } 1701 }
1698 1702
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 7a5e6c5abb57..276c7cae7cee 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev)
794 printk(KERN_ERR "Sgiseeq: Cannot register net device, " 794 printk(KERN_ERR "Sgiseeq: Cannot register net device, "
795 "aborting.\n"); 795 "aborting.\n");
796 err = -ENODEV; 796 err = -ENODEV;
797 goto err_out_free_page; 797 goto err_out_free_attrs;
798 } 798 }
799 799
800 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); 800 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
801 801
802 return 0; 802 return 0;
803 803
804err_out_free_page: 804err_out_free_attrs:
805 free_page((unsigned long) sp->srings); 805 dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
806 sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
806err_out_free_dev: 807err_out_free_dev:
807 free_netdev(dev); 808 free_netdev(dev);
808 809
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 4644b2aeeba1..e2e469c37a4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1194,10 +1194,8 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
1194 int ret; 1194 int ret;
1195 struct device *dev = &bsp_priv->pdev->dev; 1195 struct device *dev = &bsp_priv->pdev->dev;
1196 1196
1197 if (!ldo) { 1197 if (!ldo)
1198 dev_err(dev, "no regulator found\n"); 1198 return 0;
1199 return -1;
1200 }
1201 1199
1202 if (enable) { 1200 if (enable) {
1203 ret = regulator_enable(ldo); 1201 ret = regulator_enable(ldo);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 37c0bc699cd9..6c305b6ecad0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -94,7 +94,7 @@ static int tc_fill_entry(struct stmmac_priv *priv,
94 struct stmmac_tc_entry *entry, *frag = NULL; 94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel; 95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem; 96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio; 97 u32 prio = cls->common.prio << 16;
98 int ret; 98 int ret;
99 99
100 /* Only 1 match per entry */ 100 /* Only 1 match per entry */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 32a89744972d..a46b8b2e44e1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev)
2775 if (!cpsw) 2775 if (!cpsw)
2776 return -ENOMEM; 2776 return -ENOMEM;
2777 2777
2778 platform_set_drvdata(pdev, cpsw);
2778 cpsw->dev = dev; 2779 cpsw->dev = dev;
2779 2780
2780 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 2781 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
@@ -2879,7 +2880,6 @@ static int cpsw_probe(struct platform_device *pdev)
2879 goto clean_cpts; 2880 goto clean_cpts;
2880 } 2881 }
2881 2882
2882 platform_set_drvdata(pdev, cpsw);
2883 priv = netdev_priv(ndev); 2883 priv = netdev_priv(ndev);
2884 priv->cpsw = cpsw; 2884 priv->cpsw = cpsw;
2885 priv->ndev = ndev; 2885 priv->ndev = ndev;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 8479a440527b..12466a72cefc 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit)
1504 pci_unmap_single(lp->pci_dev, 1504 pci_unmap_single(lp->pci_dev,
1505 lp->rx_skbs[cur_bd].skb_dma, 1505 lp->rx_skbs[cur_bd].skb_dma,
1506 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1506 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1507 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) 1507 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
1508 memmove(skb->data, skb->data - NET_IP_ALIGN, 1508 memmove(skb->data, skb->data - NET_IP_ALIGN,
1509 pkt_len); 1509 pkt_len);
1510 data = skb_put(skb, pkt_len); 1510 data = skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 78a7de3fb622..c62f474b6d08 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
371static void tsi108_stat_carry(struct net_device *dev) 371static void tsi108_stat_carry(struct net_device *dev)
372{ 372{
373 struct tsi108_prv_data *data = netdev_priv(dev); 373 struct tsi108_prv_data *data = netdev_priv(dev);
374 unsigned long flags;
374 u32 carry1, carry2; 375 u32 carry1, carry2;
375 376
376 spin_lock_irq(&data->misclock); 377 spin_lock_irqsave(&data->misclock, flags);
377 378
378 carry1 = TSI_READ(TSI108_STAT_CARRY1); 379 carry1 = TSI_READ(TSI108_STAT_CARRY1);
379 carry2 = TSI_READ(TSI108_STAT_CARRY2); 380 carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev)
441 TSI108_STAT_TXPAUSEDROP_CARRY, 442 TSI108_STAT_TXPAUSEDROP_CARRY,
442 &data->tx_pause_drop); 443 &data->tx_pause_drop);
443 444
444 spin_unlock_irq(&data->misclock); 445 spin_unlock_irqrestore(&data->misclock, flags);
445} 446}
446 447
447/* Read a stat counter atomically with respect to carries. 448/* Read a stat counter atomically with respect to carries.
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3544e1991579..e8fce6d715ef 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net,
1239 struct rtnl_link_stats64 *t) 1239 struct rtnl_link_stats64 *t)
1240{ 1240{
1241 struct net_device_context *ndev_ctx = netdev_priv(net); 1241 struct net_device_context *ndev_ctx = netdev_priv(net);
1242 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1242 struct netvsc_device *nvdev;
1243 struct netvsc_vf_pcpu_stats vf_tot; 1243 struct netvsc_vf_pcpu_stats vf_tot;
1244 int i; 1244 int i;
1245 1245
1246 rcu_read_lock();
1247
1248 nvdev = rcu_dereference(ndev_ctx->nvdev);
1246 if (!nvdev) 1249 if (!nvdev)
1247 return; 1250 goto out;
1248 1251
1249 netdev_stats_to_stats64(t, &net->stats); 1252 netdev_stats_to_stats64(t, &net->stats);
1250 1253
@@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net,
1283 t->rx_packets += packets; 1286 t->rx_packets += packets;
1284 t->multicast += multicast; 1287 t->multicast += multicast;
1285 } 1288 }
1289out:
1290 rcu_read_unlock();
1286} 1291}
1287 1292
1288static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1293static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b41696e16bdc..c20e7ef18bc9 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
802 err = hwsim_subscribe_all_others(phy); 802 err = hwsim_subscribe_all_others(phy);
803 if (err < 0) { 803 if (err < 0) {
804 mutex_unlock(&hwsim_phys_lock); 804 mutex_unlock(&hwsim_phys_lock);
805 goto err_reg; 805 goto err_subscribe;
806 } 806 }
807 } 807 }
808 list_add_tail(&phy->list, &hwsim_phys); 808 list_add_tail(&phy->list, &hwsim_phys);
@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
812 812
813 return idx; 813 return idx;
814 814
815err_subscribe:
816 ieee802154_unregister_hw(phy->hw);
815err_reg: 817err_reg:
816 kfree(pib); 818 kfree(pib);
817err_pib: 819err_pib:
@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
901 return 0; 903 return 0;
902 904
903platform_drv: 905platform_drv:
904 genl_unregister_family(&hwsim_genl_family);
905platform_dev:
906 platform_device_unregister(mac802154hwsim_dev); 906 platform_device_unregister(mac802154hwsim_dev);
907platform_dev:
908 genl_unregister_family(&hwsim_genl_family);
907 return rc; 909 return rc;
908} 910}
909 911
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index c5c417a3c0ce..bcc40a236624 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -73,46 +73,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
73 debugfs_remove_recursive(nsim_dev_port->ddir); 73 debugfs_remove_recursive(nsim_dev_port->ddir);
74} 74}
75 75
76static struct net *nsim_devlink_net(struct devlink *devlink)
77{
78 return &init_net;
79}
80
76static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) 81static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
77{ 82{
78 struct nsim_dev *nsim_dev = priv; 83 struct net *net = priv;
79 84
80 return nsim_fib_get_val(nsim_dev->fib_data, 85 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
81 NSIM_RESOURCE_IPV4_FIB, false);
82} 86}
83 87
84static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) 88static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
85{ 89{
86 struct nsim_dev *nsim_dev = priv; 90 struct net *net = priv;
87 91
88 return nsim_fib_get_val(nsim_dev->fib_data, 92 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
89 NSIM_RESOURCE_IPV4_FIB_RULES, false);
90} 93}
91 94
92static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) 95static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
93{ 96{
94 struct nsim_dev *nsim_dev = priv; 97 struct net *net = priv;
95 98
96 return nsim_fib_get_val(nsim_dev->fib_data, 99 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
97 NSIM_RESOURCE_IPV6_FIB, false);
98} 100}
99 101
100static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) 102static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
101{ 103{
102 struct nsim_dev *nsim_dev = priv; 104 struct net *net = priv;
103 105
104 return nsim_fib_get_val(nsim_dev->fib_data, 106 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
105 NSIM_RESOURCE_IPV6_FIB_RULES, false);
106} 107}
107 108
108static int nsim_dev_resources_register(struct devlink *devlink) 109static int nsim_dev_resources_register(struct devlink *devlink)
109{ 110{
110 struct nsim_dev *nsim_dev = devlink_priv(devlink);
111 struct devlink_resource_size_params params = { 111 struct devlink_resource_size_params params = {
112 .size_max = (u64)-1, 112 .size_max = (u64)-1,
113 .size_granularity = 1, 113 .size_granularity = 1,
114 .unit = DEVLINK_RESOURCE_UNIT_ENTRY 114 .unit = DEVLINK_RESOURCE_UNIT_ENTRY
115 }; 115 };
116 struct net *net = nsim_devlink_net(devlink);
116 int err; 117 int err;
117 u64 n; 118 u64 n;
118 119
@@ -126,8 +127,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
126 goto out; 127 goto out;
127 } 128 }
128 129
129 n = nsim_fib_get_val(nsim_dev->fib_data, 130 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
130 NSIM_RESOURCE_IPV4_FIB, true);
131 err = devlink_resource_register(devlink, "fib", n, 131 err = devlink_resource_register(devlink, "fib", n,
132 NSIM_RESOURCE_IPV4_FIB, 132 NSIM_RESOURCE_IPV4_FIB,
133 NSIM_RESOURCE_IPV4, &params); 133 NSIM_RESOURCE_IPV4, &params);
@@ -136,8 +136,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
136 return err; 136 return err;
137 } 137 }
138 138
139 n = nsim_fib_get_val(nsim_dev->fib_data, 139 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
140 NSIM_RESOURCE_IPV4_FIB_RULES, true);
141 err = devlink_resource_register(devlink, "fib-rules", n, 140 err = devlink_resource_register(devlink, "fib-rules", n,
142 NSIM_RESOURCE_IPV4_FIB_RULES, 141 NSIM_RESOURCE_IPV4_FIB_RULES,
143 NSIM_RESOURCE_IPV4, &params); 142 NSIM_RESOURCE_IPV4, &params);
@@ -156,8 +155,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
156 goto out; 155 goto out;
157 } 156 }
158 157
159 n = nsim_fib_get_val(nsim_dev->fib_data, 158 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
160 NSIM_RESOURCE_IPV6_FIB, true);
161 err = devlink_resource_register(devlink, "fib", n, 159 err = devlink_resource_register(devlink, "fib", n,
162 NSIM_RESOURCE_IPV6_FIB, 160 NSIM_RESOURCE_IPV6_FIB,
163 NSIM_RESOURCE_IPV6, &params); 161 NSIM_RESOURCE_IPV6, &params);
@@ -166,8 +164,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
166 return err; 164 return err;
167 } 165 }
168 166
169 n = nsim_fib_get_val(nsim_dev->fib_data, 167 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
170 NSIM_RESOURCE_IPV6_FIB_RULES, true);
171 err = devlink_resource_register(devlink, "fib-rules", n, 168 err = devlink_resource_register(devlink, "fib-rules", n,
172 NSIM_RESOURCE_IPV6_FIB_RULES, 169 NSIM_RESOURCE_IPV6_FIB_RULES,
173 NSIM_RESOURCE_IPV6, &params); 170 NSIM_RESOURCE_IPV6, &params);
@@ -179,19 +176,19 @@ static int nsim_dev_resources_register(struct devlink *devlink)
179 devlink_resource_occ_get_register(devlink, 176 devlink_resource_occ_get_register(devlink,
180 NSIM_RESOURCE_IPV4_FIB, 177 NSIM_RESOURCE_IPV4_FIB,
181 nsim_dev_ipv4_fib_resource_occ_get, 178 nsim_dev_ipv4_fib_resource_occ_get,
182 nsim_dev); 179 net);
183 devlink_resource_occ_get_register(devlink, 180 devlink_resource_occ_get_register(devlink,
184 NSIM_RESOURCE_IPV4_FIB_RULES, 181 NSIM_RESOURCE_IPV4_FIB_RULES,
185 nsim_dev_ipv4_fib_rules_res_occ_get, 182 nsim_dev_ipv4_fib_rules_res_occ_get,
186 nsim_dev); 183 net);
187 devlink_resource_occ_get_register(devlink, 184 devlink_resource_occ_get_register(devlink,
188 NSIM_RESOURCE_IPV6_FIB, 185 NSIM_RESOURCE_IPV6_FIB,
189 nsim_dev_ipv6_fib_resource_occ_get, 186 nsim_dev_ipv6_fib_resource_occ_get,
190 nsim_dev); 187 net);
191 devlink_resource_occ_get_register(devlink, 188 devlink_resource_occ_get_register(devlink,
192 NSIM_RESOURCE_IPV6_FIB_RULES, 189 NSIM_RESOURCE_IPV6_FIB_RULES,
193 nsim_dev_ipv6_fib_rules_res_occ_get, 190 nsim_dev_ipv6_fib_rules_res_occ_get,
194 nsim_dev); 191 net);
195out: 192out:
196 return err; 193 return err;
197} 194}
@@ -199,11 +196,11 @@ out:
199static int nsim_dev_reload(struct devlink *devlink, 196static int nsim_dev_reload(struct devlink *devlink,
200 struct netlink_ext_ack *extack) 197 struct netlink_ext_ack *extack)
201{ 198{
202 struct nsim_dev *nsim_dev = devlink_priv(devlink);
203 enum nsim_resource_id res_ids[] = { 199 enum nsim_resource_id res_ids[] = {
204 NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, 200 NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
205 NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES 201 NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
206 }; 202 };
203 struct net *net = nsim_devlink_net(devlink);
207 int i; 204 int i;
208 205
209 for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { 206 for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
@@ -212,8 +209,7 @@ static int nsim_dev_reload(struct devlink *devlink,
212 209
213 err = devlink_resource_size_get(devlink, res_ids[i], &val); 210 err = devlink_resource_size_get(devlink, res_ids[i], &val);
214 if (!err) { 211 if (!err) {
215 err = nsim_fib_set_max(nsim_dev->fib_data, 212 err = nsim_fib_set_max(net, res_ids[i], val, extack);
216 res_ids[i], val, extack);
217 if (err) 213 if (err)
218 return err; 214 return err;
219 } 215 }
@@ -285,15 +281,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
285 mutex_init(&nsim_dev->port_list_lock); 281 mutex_init(&nsim_dev->port_list_lock);
286 nsim_dev->fw_update_status = true; 282 nsim_dev->fw_update_status = true;
287 283
288 nsim_dev->fib_data = nsim_fib_create();
289 if (IS_ERR(nsim_dev->fib_data)) {
290 err = PTR_ERR(nsim_dev->fib_data);
291 goto err_devlink_free;
292 }
293
294 err = nsim_dev_resources_register(devlink); 284 err = nsim_dev_resources_register(devlink);
295 if (err) 285 if (err)
296 goto err_fib_destroy; 286 goto err_devlink_free;
297 287
298 err = devlink_register(devlink, &nsim_bus_dev->dev); 288 err = devlink_register(devlink, &nsim_bus_dev->dev);
299 if (err) 289 if (err)
@@ -315,8 +305,6 @@ err_dl_unregister:
315 devlink_unregister(devlink); 305 devlink_unregister(devlink);
316err_resources_unregister: 306err_resources_unregister:
317 devlink_resources_unregister(devlink, NULL); 307 devlink_resources_unregister(devlink, NULL);
318err_fib_destroy:
319 nsim_fib_destroy(nsim_dev->fib_data);
320err_devlink_free: 308err_devlink_free:
321 devlink_free(devlink); 309 devlink_free(devlink);
322 return ERR_PTR(err); 310 return ERR_PTR(err);
@@ -330,7 +318,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
330 nsim_dev_debugfs_exit(nsim_dev); 318 nsim_dev_debugfs_exit(nsim_dev);
331 devlink_unregister(devlink); 319 devlink_unregister(devlink);
332 devlink_resources_unregister(devlink, NULL); 320 devlink_resources_unregister(devlink, NULL);
333 nsim_fib_destroy(nsim_dev->fib_data);
334 mutex_destroy(&nsim_dev->port_list_lock); 321 mutex_destroy(&nsim_dev->port_list_lock);
335 devlink_free(devlink); 322 devlink_free(devlink);
336} 323}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 8c57ba747772..f61d094746c0 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,6 +18,7 @@
18#include <net/ip_fib.h> 18#include <net/ip_fib.h>
19#include <net/ip6_fib.h> 19#include <net/ip6_fib.h>
20#include <net/fib_rules.h> 20#include <net/fib_rules.h>
21#include <net/netns/generic.h>
21 22
22#include "netdevsim.h" 23#include "netdevsim.h"
23 24
@@ -32,14 +33,15 @@ struct nsim_per_fib_data {
32}; 33};
33 34
34struct nsim_fib_data { 35struct nsim_fib_data {
35 struct notifier_block fib_nb;
36 struct nsim_per_fib_data ipv4; 36 struct nsim_per_fib_data ipv4;
37 struct nsim_per_fib_data ipv6; 37 struct nsim_per_fib_data ipv6;
38}; 38};
39 39
40u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, 40static unsigned int nsim_fib_net_id;
41 enum nsim_resource_id res_id, bool max) 41
42u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
42{ 43{
44 struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
43 struct nsim_fib_entry *entry; 45 struct nsim_fib_entry *entry;
44 46
45 switch (res_id) { 47 switch (res_id) {
@@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
62 return max ? entry->max : entry->num; 64 return max ? entry->max : entry->num;
63} 65}
64 66
65int nsim_fib_set_max(struct nsim_fib_data *fib_data, 67int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
66 enum nsim_resource_id res_id, u64 val,
67 struct netlink_ext_ack *extack) 68 struct netlink_ext_ack *extack)
68{ 69{
70 struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
69 struct nsim_fib_entry *entry; 71 struct nsim_fib_entry *entry;
70 int err = 0; 72 int err = 0;
71 73
@@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
118 return err; 120 return err;
119} 121}
120 122
121static int nsim_fib_rule_event(struct nsim_fib_data *data, 123static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
122 struct fib_notifier_info *info, bool add)
123{ 124{
125 struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
124 struct netlink_ext_ack *extack = info->extack; 126 struct netlink_ext_ack *extack = info->extack;
125 int err = 0; 127 int err = 0;
126 128
@@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
155 return err; 157 return err;
156} 158}
157 159
158static int nsim_fib_event(struct nsim_fib_data *data, 160static int nsim_fib_event(struct fib_notifier_info *info, bool add)
159 struct fib_notifier_info *info, bool add)
160{ 161{
162 struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
161 struct netlink_ext_ack *extack = info->extack; 163 struct netlink_ext_ack *extack = info->extack;
162 int err = 0; 164 int err = 0;
163 165
@@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data,
176static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, 178static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
177 void *ptr) 179 void *ptr)
178{ 180{
179 struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
180 fib_nb);
181 struct fib_notifier_info *info = ptr; 181 struct fib_notifier_info *info = ptr;
182 int err = 0; 182 int err = 0;
183 183
184 switch (event) { 184 switch (event) {
185 case FIB_EVENT_RULE_ADD: /* fall through */ 185 case FIB_EVENT_RULE_ADD: /* fall through */
186 case FIB_EVENT_RULE_DEL: 186 case FIB_EVENT_RULE_DEL:
187 err = nsim_fib_rule_event(data, info, 187 err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
188 event == FIB_EVENT_RULE_ADD);
189 break; 188 break;
190 189
191 case FIB_EVENT_ENTRY_ADD: /* fall through */ 190 case FIB_EVENT_ENTRY_ADD: /* fall through */
192 case FIB_EVENT_ENTRY_DEL: 191 case FIB_EVENT_ENTRY_DEL:
193 err = nsim_fib_event(data, info, 192 err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
194 event == FIB_EVENT_ENTRY_ADD);
195 break; 193 break;
196 } 194 }
197 195
@@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
201/* inconsistent dump, trying again */ 199/* inconsistent dump, trying again */
202static void nsim_fib_dump_inconsistent(struct notifier_block *nb) 200static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
203{ 201{
204 struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, 202 struct nsim_fib_data *data;
205 fib_nb); 203 struct net *net;
204
205 rcu_read_lock();
206 for_each_net_rcu(net) {
207 data = net_generic(net, nsim_fib_net_id);
208
209 data->ipv4.fib.num = 0ULL;
210 data->ipv4.rules.num = 0ULL;
206 211
207 data->ipv4.fib.num = 0ULL; 212 data->ipv6.fib.num = 0ULL;
208 data->ipv4.rules.num = 0ULL; 213 data->ipv6.rules.num = 0ULL;
209 data->ipv6.fib.num = 0ULL; 214 }
210 data->ipv6.rules.num = 0ULL; 215 rcu_read_unlock();
211} 216}
212 217
213struct nsim_fib_data *nsim_fib_create(void) 218static struct notifier_block nsim_fib_nb = {
214{ 219 .notifier_call = nsim_fib_event_nb,
215 struct nsim_fib_data *data; 220};
216 int err;
217 221
218 data = kzalloc(sizeof(*data), GFP_KERNEL); 222/* Initialize per network namespace state */
219 if (!data) 223static int __net_init nsim_fib_netns_init(struct net *net)
220 return ERR_PTR(-ENOMEM); 224{
225 struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
221 226
222 data->ipv4.fib.max = (u64)-1; 227 data->ipv4.fib.max = (u64)-1;
223 data->ipv4.rules.max = (u64)-1; 228 data->ipv4.rules.max = (u64)-1;
@@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void)
225 data->ipv6.fib.max = (u64)-1; 230 data->ipv6.fib.max = (u64)-1;
226 data->ipv6.rules.max = (u64)-1; 231 data->ipv6.rules.max = (u64)-1;
227 232
228 data->fib_nb.notifier_call = nsim_fib_event_nb; 233 return 0;
229 err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent); 234}
230 if (err) {
231 pr_err("Failed to register fib notifier\n");
232 goto err_out;
233 }
234 235
235 return data; 236static struct pernet_operations nsim_fib_net_ops = {
237 .init = nsim_fib_netns_init,
238 .id = &nsim_fib_net_id,
239 .size = sizeof(struct nsim_fib_data),
240};
236 241
237err_out: 242void nsim_fib_exit(void)
238 kfree(data); 243{
239 return ERR_PTR(err); 244 unregister_pernet_subsys(&nsim_fib_net_ops);
245 unregister_fib_notifier(&nsim_fib_nb);
240} 246}
241 247
242void nsim_fib_destroy(struct nsim_fib_data *data) 248int nsim_fib_init(void)
243{ 249{
244 unregister_fib_notifier(&data->fib_nb); 250 int err;
245 kfree(data); 251
252 err = register_pernet_subsys(&nsim_fib_net_ops);
253 if (err < 0) {
254 pr_err("Failed to register pernet subsystem\n");
255 goto err_out;
256 }
257
258 err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
259 if (err < 0) {
260 pr_err("Failed to register fib notifier\n");
261 goto err_out;
262 }
263
264err_out:
265 return err;
246} 266}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0740940f41b1..55f57f76d01b 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -357,12 +357,18 @@ static int __init nsim_module_init(void)
357 if (err) 357 if (err)
358 goto err_dev_exit; 358 goto err_dev_exit;
359 359
360 err = rtnl_link_register(&nsim_link_ops); 360 err = nsim_fib_init();
361 if (err) 361 if (err)
362 goto err_bus_exit; 362 goto err_bus_exit;
363 363
364 err = rtnl_link_register(&nsim_link_ops);
365 if (err)
366 goto err_fib_exit;
367
364 return 0; 368 return 0;
365 369
370err_fib_exit:
371 nsim_fib_exit();
366err_bus_exit: 372err_bus_exit:
367 nsim_bus_exit(); 373 nsim_bus_exit();
368err_dev_exit: 374err_dev_exit:
@@ -373,6 +379,7 @@ err_dev_exit:
373static void __exit nsim_module_exit(void) 379static void __exit nsim_module_exit(void)
374{ 380{
375 rtnl_link_unregister(&nsim_link_ops); 381 rtnl_link_unregister(&nsim_link_ops);
382 nsim_fib_exit();
376 nsim_bus_exit(); 383 nsim_bus_exit();
377 nsim_dev_exit(); 384 nsim_dev_exit();
378} 385}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 79c05af2a7c0..9404637d34b7 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -169,12 +169,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
169int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, 169int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
170 unsigned int port_index); 170 unsigned int port_index);
171 171
172struct nsim_fib_data *nsim_fib_create(void); 172int nsim_fib_init(void);
173void nsim_fib_destroy(struct nsim_fib_data *fib_data); 173void nsim_fib_exit(void);
174u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, 174u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
175 enum nsim_resource_id res_id, bool max); 175int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
176int nsim_fib_set_max(struct nsim_fib_data *fib_data,
177 enum nsim_resource_id res_id, u64 val,
178 struct netlink_ext_ack *extack); 176 struct netlink_ext_ack *extack);
179 177
180#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) 178#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 222ccd9ecfce..6ad8b1c63c34 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -257,36 +257,20 @@ static int at803x_config_init(struct phy_device *phydev)
257 * after HW reset: RX delay enabled and TX delay disabled 257 * after HW reset: RX delay enabled and TX delay disabled
258 * after SW reset: RX delay enabled, while TX delay retains the 258 * after SW reset: RX delay enabled, while TX delay retains the
259 * value before reset. 259 * value before reset.
260 *
261 * So let's first disable the RX and TX delays in PHY and enable
262 * them based on the mode selected (this also takes care of RGMII
263 * mode where we expect delays to be disabled)
264 */ 260 */
265
266 ret = at803x_disable_rx_delay(phydev);
267 if (ret < 0)
268 return ret;
269 ret = at803x_disable_tx_delay(phydev);
270 if (ret < 0)
271 return ret;
272
273 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 261 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
274 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { 262 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
275 /* If RGMII_ID or RGMII_RXID are specified enable RX delay,
276 * otherwise keep it disabled
277 */
278 ret = at803x_enable_rx_delay(phydev); 263 ret = at803x_enable_rx_delay(phydev);
279 if (ret < 0) 264 else
280 return ret; 265 ret = at803x_disable_rx_delay(phydev);
281 } 266 if (ret < 0)
267 return ret;
282 268
283 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 269 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
284 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { 270 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
285 /* If RGMII_ID or RGMII_TXID are specified enable TX delay,
286 * otherwise keep it disabled
287 */
288 ret = at803x_enable_tx_delay(phydev); 271 ret = at803x_enable_tx_delay(phydev);
289 } 272 else
273 ret = at803x_disable_tx_delay(phydev);
290 274
291 return ret; 275 return ret;
292} 276}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index b9d4145781ca..7935593debb1 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev)
219 int val, devad; 219 int val, devad;
220 bool link = true; 220 bool link = true;
221 221
222 if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
223 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
224 if (val < 0)
225 return val;
226
227 /* Autoneg is being started, therefore disregard current
228 * link status and report link as down.
229 */
230 if (val & MDIO_AN_CTRL1_RESTART) {
231 phydev->link = 0;
232 return 0;
233 }
234 }
235
222 while (mmd_mask && link) { 236 while (mmd_mask && link) {
223 devad = __ffs(mmd_mask); 237 devad = __ffs(mmd_mask);
224 mmd_mask &= ~BIT(devad); 238 mmd_mask &= ~BIT(devad);
@@ -509,6 +523,32 @@ int genphy_c45_read_status(struct phy_device *phydev)
509} 523}
510EXPORT_SYMBOL_GPL(genphy_c45_read_status); 524EXPORT_SYMBOL_GPL(genphy_c45_read_status);
511 525
526/**
527 * genphy_c45_config_aneg - restart auto-negotiation or forced setup
528 * @phydev: target phy_device struct
529 *
530 * Description: If auto-negotiation is enabled, we configure the
531 * advertising, and then restart auto-negotiation. If it is not
532 * enabled, then we force a configuration.
533 */
534int genphy_c45_config_aneg(struct phy_device *phydev)
535{
536 bool changed = false;
537 int ret;
538
539 if (phydev->autoneg == AUTONEG_DISABLE)
540 return genphy_c45_pma_setup_forced(phydev);
541
542 ret = genphy_c45_an_config_aneg(phydev);
543 if (ret < 0)
544 return ret;
545 if (ret > 0)
546 changed = true;
547
548 return genphy_c45_check_and_restart_aneg(phydev, changed);
549}
550EXPORT_SYMBOL_GPL(genphy_c45_config_aneg);
551
512/* The gen10g_* functions are the old Clause 45 stub */ 552/* The gen10g_* functions are the old Clause 45 stub */
513 553
514int gen10g_config_aneg(struct phy_device *phydev) 554int gen10g_config_aneg(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef7aa738e0dc..6b0f89369b46 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -507,7 +507,7 @@ static int phy_config_aneg(struct phy_device *phydev)
507 * allowed to call genphy_config_aneg() 507 * allowed to call genphy_config_aneg()
508 */ 508 */
509 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 509 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
510 return -EOPNOTSUPP; 510 return genphy_c45_config_aneg(phydev);
511 511
512 return genphy_config_aneg(phydev); 512 return genphy_config_aneg(phydev);
513} 513}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7ddd91df99e3..27ebc2c6c2d0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1752,7 +1752,17 @@ EXPORT_SYMBOL(genphy_aneg_done);
1752 */ 1752 */
1753int genphy_update_link(struct phy_device *phydev) 1753int genphy_update_link(struct phy_device *phydev)
1754{ 1754{
1755 int status; 1755 int status = 0, bmcr;
1756
1757 bmcr = phy_read(phydev, MII_BMCR);
1758 if (bmcr < 0)
1759 return bmcr;
1760
1761 /* Autoneg is being started, therefore disregard BMSR value and
1762 * report link as down.
1763 */
1764 if (bmcr & BMCR_ANRESTART)
1765 goto done;
1756 1766
1757 /* The link state is latched low so that momentary link 1767 /* The link state is latched low so that momentary link
1758 * drops can be detected. Do not double-read the status 1768 * drops can be detected. Do not double-read the status
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index abfa0da9bbd2..e8089def5a46 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team)
1004 1004
1005 team->dev->vlan_features = vlan_features; 1005 team->dev->vlan_features = vlan_features;
1006 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1006 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1007 NETIF_F_HW_VLAN_CTAG_TX |
1008 NETIF_F_HW_VLAN_STAG_TX |
1007 NETIF_F_GSO_UDP_L4; 1009 NETIF_F_GSO_UDP_L4;
1008 team->dev->hard_header_len = max_hard_header_len; 1010 team->dev->hard_header_len = max_hard_header_len;
1009 1011
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 5519248a791e..32b08b18e120 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
163 } 163 }
164 if (!timeout) { 164 if (!timeout) {
165 dev_err(&udev->dev, "firmware not ready in time\n"); 165 dev_err(&udev->dev, "firmware not ready in time\n");
166 return -ETIMEDOUT; 166 ret = -ETIMEDOUT;
167 goto err;
167 } 168 }
168 169
169 /* enable ethernet mode (?) */ 170 /* enable ethernet mode (?) */
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index d62b6706a537..fc5895f85cee 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
113 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), 113 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
114 usb_buf, 24); 114 usb_buf, 24);
115 if (status != 0) 115 if (status != 0)
116 return status; 116 goto out;
117 117
118 memcpy(usb_buf, init_msg_2, 12); 118 memcpy(usb_buf, init_msg_2, 12);
119 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), 119 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
120 usb_buf, 28); 120 usb_buf, 28);
121 if (status != 0) 121 if (status != 0)
122 return status; 122 goto out;
123 123
124 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); 124 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
125 125out:
126 kfree(usb_buf); 126 kfree(usb_buf);
127 return status; 127 return status;
128} 128}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3d92ea6fcc02..f033fee225a1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf,
3792 ret = register_netdev(netdev); 3792 ret = register_netdev(netdev);
3793 if (ret != 0) { 3793 if (ret != 0) {
3794 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3794 netif_err(dev, probe, netdev, "couldn't register the device\n");
3795 goto out3; 3795 goto out4;
3796 } 3796 }
3797 3797
3798 usb_set_intfdata(intf, dev); 3798 usb_set_intfdata(intf, dev);
@@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf,
3807 3807
3808 ret = lan78xx_phy_init(dev); 3808 ret = lan78xx_phy_init(dev);
3809 if (ret < 0) 3809 if (ret < 0)
3810 goto out4; 3810 goto out5;
3811 3811
3812 return 0; 3812 return 0;
3813 3813
3814out4: 3814out5:
3815 unregister_netdev(netdev); 3815 unregister_netdev(netdev);
3816out4:
3817 usb_free_urb(dev->urb_intr);
3816out3: 3818out3:
3817 lan78xx_unbind(dev, intf); 3819 lan78xx_unbind(dev, intf);
3818out2: 3820out2:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0cc03a9ff545..04137ac373b0 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -799,8 +799,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
799 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), 799 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
800 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, 800 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
801 value, index, tmp, size, 500); 801 value, index, tmp, size, 500);
802 if (ret < 0)
803 memset(data, 0xff, size);
804 else
805 memcpy(data, tmp, size);
802 806
803 memcpy(data, tmp, size);
804 kfree(tmp); 807 kfree(tmp);
805 808
806 return ret; 809 return ret;
@@ -4018,8 +4021,7 @@ static int rtl8152_close(struct net_device *netdev)
4018#ifdef CONFIG_PM_SLEEP 4021#ifdef CONFIG_PM_SLEEP
4019 unregister_pm_notifier(&tp->pm_notifier); 4022 unregister_pm_notifier(&tp->pm_notifier);
4020#endif 4023#endif
4021 if (!test_bit(RTL8152_UNPLUG, &tp->flags)) 4024 napi_disable(&tp->napi);
4022 napi_disable(&tp->napi);
4023 clear_bit(WORK_ENABLE, &tp->flags); 4025 clear_bit(WORK_ENABLE, &tp->flags);
4024 usb_kill_urb(tp->intr_urb); 4026 usb_kill_urb(tp->intr_urb);
4025 cancel_delayed_work_sync(&tp->schedule); 4027 cancel_delayed_work_sync(&tp->schedule);
@@ -5350,7 +5352,6 @@ static int rtl8152_probe(struct usb_interface *intf,
5350 return 0; 5352 return 0;
5351 5353
5352out1: 5354out1:
5353 netif_napi_del(&tp->napi);
5354 usb_set_intfdata(intf, NULL); 5355 usb_set_intfdata(intf, NULL);
5355out: 5356out:
5356 free_netdev(netdev); 5357 free_netdev(netdev);
@@ -5365,7 +5366,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
5365 if (tp) { 5366 if (tp) {
5366 rtl_set_unplug(tp); 5367 rtl_set_unplug(tp);
5367 5368
5368 netif_napi_del(&tp->napi);
5369 unregister_netdev(tp->netdev); 5369 unregister_netdev(tp->netdev);
5370 cancel_delayed_work_sync(&tp->hw_phy_work); 5370 cancel_delayed_work_sync(&tp->hw_phy_work);
5371 tp->rtl_ops.unload(tp); 5371 tp->rtl_ops.unload(tp);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4f3de0ac8b0b..ba98e0971b84 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1331 } 1331 }
1332 } 1332 }
1333 1333
1334 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { 1334 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
1335 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) 1335 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1336 schedule_delayed_work(&vi->refill, 0); 1336 schedule_delayed_work(&vi->refill, 0);
1337 } 1337 }
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e9fc168bb734..489cba9b284d 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
351 } 351 }
352 result = i2400m_barker_db_add(barker); 352 result = i2400m_barker_db_add(barker);
353 if (result < 0) 353 if (result < 0)
354 goto error_add; 354 goto error_parse_add;
355 } 355 }
356 kfree(options_orig); 356 kfree(options_orig);
357 } 357 }
358 return 0; 358 return 0;
359 359
360error_parse_add:
360error_parse: 361error_parse:
362 kfree(options_orig);
361error_add: 363error_add:
362 kfree(i2400m_barker_db); 364 kfree(i2400m_barker_db);
363 return result; 365 return result;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 1f500cddb3a7..55b713255b8e 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -556,6 +556,30 @@ const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
556 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, 556 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
557}; 557};
558 558
559const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = {
560 .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
561 .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
562 IWL_DEVICE_22500,
563 /*
564 * This device doesn't support receiving BlockAck with a large bitmap
565 * so we need to restrict the size of transmitted aggregation to the
566 * HT size; mac80211 would otherwise pick the HE max (256) by default.
567 */
568 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
569};
570
571const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = {
572 .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
573 .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
574 IWL_DEVICE_22500,
575 /*
576 * This device doesn't support receiving BlockAck with a large bitmap
577 * so we need to restrict the size of transmitted aggregation to the
578 * HT size; mac80211 would otherwise pick the HE max (256) by default.
579 */
580 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
581};
582
559const struct iwl_cfg iwl22000_2ax_cfg_jf = { 583const struct iwl_cfg iwl22000_2ax_cfg_jf = {
560 .name = "Intel(R) Dual Band Wireless AX 22000", 584 .name = "Intel(R) Dual Band Wireless AX 22000",
561 .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, 585 .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 1c1bf1b281cd..6c04f8223aff 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -577,6 +577,8 @@ extern const struct iwl_cfg iwl_ax1650i_cfg_quz_hr;
577extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; 577extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr;
578extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; 578extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
579extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; 579extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
580extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0;
581extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0;
580extern const struct iwl_cfg killer1650x_2ax_cfg; 582extern const struct iwl_cfg killer1650x_2ax_cfg;
581extern const struct iwl_cfg killer1650w_2ax_cfg; 583extern const struct iwl_cfg killer1650w_2ax_cfg;
582extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; 584extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index cb22d447fcb8..fe776e35b9d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
554 cpu_to_le32(vif->bss_conf.use_short_slot ? 554 cpu_to_le32(vif->bss_conf.use_short_slot ?
555 MAC_FLG_SHORT_SLOT : 0); 555 MAC_FLG_SHORT_SLOT : 0);
556 556
557 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); 557 cmd->filter_flags = 0;
558 558
559 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 559 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); 560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
@@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
623 /* We need the dtim_period to set the MAC as associated */ 623 /* We need the dtim_period to set the MAC as associated */
624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && 624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
625 !force_assoc_off) { 625 !force_assoc_off) {
626 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
627 u8 ap_sta_id = mvmvif->ap_sta_id;
626 u32 dtim_offs; 628 u32 dtim_offs;
627 629
628 /* 630 /*
@@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
658 dtim_offs); 660 dtim_offs);
659 661
660 ctxt_sta->is_assoc = cpu_to_le32(1); 662 ctxt_sta->is_assoc = cpu_to_le32(1);
663
664 /*
665 * allow multicast data frames only as long as the station is
666 * authorized, i.e., GTK keys are already installed (if needed)
667 */
668 if (ap_sta_id < IWL_MVM_STATION_COUNT) {
669 struct ieee80211_sta *sta;
670
671 rcu_read_lock();
672
673 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
674 if (!IS_ERR_OR_NULL(sta)) {
675 struct iwl_mvm_sta *mvmsta =
676 iwl_mvm_sta_from_mac80211(sta);
677
678 if (mvmsta->sta_state ==
679 IEEE80211_STA_AUTHORIZED)
680 cmd.filter_flags |=
681 cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
682 }
683
684 rcu_read_unlock();
685 }
661 } else { 686 } else {
662 ctxt_sta->is_assoc = cpu_to_le32(0); 687 ctxt_sta->is_assoc = cpu_to_le32(0);
663 688
@@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
703 MAC_FILTER_IN_CONTROL_AND_MGMT | 728 MAC_FILTER_IN_CONTROL_AND_MGMT |
704 MAC_FILTER_IN_BEACON | 729 MAC_FILTER_IN_BEACON |
705 MAC_FILTER_IN_PROBE_REQUEST | 730 MAC_FILTER_IN_PROBE_REQUEST |
706 MAC_FILTER_IN_CRC32); 731 MAC_FILTER_IN_CRC32 |
732 MAC_FILTER_ACCEPT_GRP);
707 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); 733 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
708 734
709 /* Allocate sniffer station */ 735 /* Allocate sniffer station */
@@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
727 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); 753 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
728 754
729 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | 755 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
730 MAC_FILTER_IN_PROBE_REQUEST); 756 MAC_FILTER_IN_PROBE_REQUEST |
757 MAC_FILTER_ACCEPT_GRP);
731 758
732 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ 759 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
733 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); 760 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1c904b5226aa..a7bc00d1296f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3327,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
3327 /* enable beacon filtering */ 3327 /* enable beacon filtering */
3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
3329 3329
3330 /*
3331 * Now that the station is authorized, i.e., keys were already
3332 * installed, need to indicate to the FW that
3333 * multicast data frames can be forwarded to the driver
3334 */
3335 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3336
3330 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3337 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3331 true); 3338 true);
3332 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3339 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
3333 new_state == IEEE80211_STA_ASSOC) { 3340 new_state == IEEE80211_STA_ASSOC) {
3341 /* Multicast data frames are no longer allowed */
3342 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3343
3334 /* disable beacon filtering */ 3344 /* disable beacon filtering */
3335 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3345 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3336 WARN_ON(ret && 3346 WARN_ON(ret &&
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index de711c1160d3..d9ed53b7c768 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1062,7 +1062,28 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1062 iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; 1062 iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0;
1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) 1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; 1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
1065 else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0)
1066 iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0;
1067 else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0)
1068 iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0;
1065 } 1069 }
1070
1071 /* same thing for QuZ... */
1072 if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
1073 if (cfg == &iwl_ax101_cfg_qu_hr)
1074 cfg = &iwl_ax101_cfg_quz_hr;
1075 else if (cfg == &iwl_ax201_cfg_qu_hr)
1076 cfg = &iwl_ax201_cfg_quz_hr;
1077 else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
1078 cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
1079 else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
1080 cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
1081 else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
1082 cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
1083 else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1084 cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
1085 }
1086
1066#endif 1087#endif
1067 1088
1068 pci_set_drvdata(pdev, iwl_trans); 1089 pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f5df5b370d78..db62c8314603 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3602,11 +3602,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3602 } 3602 }
3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && 3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3605 ((trans->cfg != &iwl_ax200_cfg_cc && 3605 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
3606 trans->cfg != &killer1650x_2ax_cfg &&
3607 trans->cfg != &killer1650w_2ax_cfg &&
3608 trans->cfg != &iwl_ax201_cfg_quz_hr) ||
3609 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
3610 u32 hw_status; 3606 u32 hw_status;
3611 3607
3612 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); 3608 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 38d110338987..9ef6b8fe03c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
99 u16 len = byte_cnt; 99 u16 len = byte_cnt;
100 __le16 bc_ent; 100 __le16 bc_ent;
101 101
102 if (trans_pcie->bc_table_dword) 102 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
103 len = DIV_ROUND_UP(len, 4);
104
105 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
106 return; 103 return;
107 104
108 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 105 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
@@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
117 */ 114 */
118 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 115 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
119 116
120 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 117 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
121 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) 118 /* Starting from 22560, the HW expects bytes */
119 WARN_ON(trans_pcie->bc_table_dword);
120 WARN_ON(len > 0x3FFF);
121 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; 122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
123 else 123 } else {
124 /* Until 22560, the HW expects DW */
125 WARN_ON(!trans_pcie->bc_table_dword);
126 len = DIV_ROUND_UP(len, 4);
127 WARN_ON(len > 0xFFF);
128 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
124 scd_bc_tbl->tfd_offset[idx] = bc_ent; 129 scd_bc_tbl->tfd_offset[idx] = bc_ent;
130 }
125} 131}
126 132
127/* 133/*
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 627ed1fc7b15..645f4d15fb61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
136 .release_buffered_frames = mt76_release_buffered_frames, 136 .release_buffered_frames = mt76_release_buffered_frames,
137}; 137};
138 138
139static int mt76x0u_init_hardware(struct mt76x02_dev *dev) 139static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
140{ 140{
141 int err; 141 int err;
142 142
143 mt76x0_chip_onoff(dev, true, true); 143 mt76x0_chip_onoff(dev, true, reset);
144 144
145 if (!mt76x02_wait_for_mac(&dev->mt76)) 145 if (!mt76x02_wait_for_mac(&dev->mt76))
146 return -ETIMEDOUT; 146 return -ETIMEDOUT;
@@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
173 if (err < 0) 173 if (err < 0)
174 goto out_err; 174 goto out_err;
175 175
176 err = mt76x0u_init_hardware(dev); 176 err = mt76x0u_init_hardware(dev, true);
177 if (err < 0) 177 if (err < 0)
178 goto out_err; 178 goto out_err;
179 179
@@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
309 if (ret < 0) 309 if (ret < 0)
310 goto err; 310 goto err;
311 311
312 ret = mt76x0u_init_hardware(dev); 312 ret = mt76x0u_init_hardware(dev, false);
313 if (ret) 313 if (ret)
314 goto err; 314 goto err;
315 315
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index c9b957ac5733..ecbe78b8027b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -6095,6 +6095,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
6095 } 6095 }
6096 6096
6097 /* 6097 /*
6098 * Clear encryption initialization vectors on start, but keep them
6099 * for watchdog reset. Otherwise we will have wrong IVs and not be
6100 * able to keep connections after reset.
6101 */
6102 if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
6103 for (i = 0; i < 256; i++)
6104 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
6105
6106 /*
6098 * Clear all beacons 6107 * Clear all beacons
6099 */ 6108 */
6100 for (i = 0; i < 8; i++) 6109 for (i = 0; i < 8; i++)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 7e43690a861c..2b216edd0c7d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -658,6 +658,7 @@ enum rt2x00_state_flags {
658 DEVICE_STATE_ENABLED_RADIO, 658 DEVICE_STATE_ENABLED_RADIO,
659 DEVICE_STATE_SCANNING, 659 DEVICE_STATE_SCANNING,
660 DEVICE_STATE_FLUSHING, 660 DEVICE_STATE_FLUSHING,
661 DEVICE_STATE_RESET,
661 662
662 /* 663 /*
663 * Driver configuration 664 * Driver configuration
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 35414f97a978..9d158237ac67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1256 1256
1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) 1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1258{ 1258{
1259 int retval; 1259 int retval = 0;
1260 1260
1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { 1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
1262 /* 1262 /*
1263 * This is special case for ieee80211_restart_hw(), otherwise 1263 * This is special case for ieee80211_restart_hw(), otherwise
1264 * mac80211 never call start() two times in row without stop(); 1264 * mac80211 never call start() two times in row without stop();
1265 */ 1265 */
1266 set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1266 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); 1267 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
1267 rt2x00lib_stop(rt2x00dev); 1268 rt2x00lib_stop(rt2x00dev);
1268 } 1269 }
@@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1273 */ 1274 */
1274 retval = rt2x00lib_load_firmware(rt2x00dev); 1275 retval = rt2x00lib_load_firmware(rt2x00dev);
1275 if (retval) 1276 if (retval)
1276 return retval; 1277 goto out;
1277 1278
1278 /* 1279 /*
1279 * Initialize the device. 1280 * Initialize the device.
1280 */ 1281 */
1281 retval = rt2x00lib_initialize(rt2x00dev); 1282 retval = rt2x00lib_initialize(rt2x00dev);
1282 if (retval) 1283 if (retval)
1283 return retval; 1284 goto out;
1284 1285
1285 rt2x00dev->intf_ap_count = 0; 1286 rt2x00dev->intf_ap_count = 0;
1286 rt2x00dev->intf_sta_count = 0; 1287 rt2x00dev->intf_sta_count = 0;
@@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1289 /* Enable the radio */ 1290 /* Enable the radio */
1290 retval = rt2x00lib_enable_radio(rt2x00dev); 1291 retval = rt2x00lib_enable_radio(rt2x00dev);
1291 if (retval) 1292 if (retval)
1292 return retval; 1293 goto out;
1293 1294
1294 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); 1295 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
1295 1296
1296 return 0; 1297out:
1298 clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1299 return retval;
1297} 1300}
1298 1301
1299void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) 1302void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1d9940d4e8c7..c9262ffeefe4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; 925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
926 nskb = xenvif_alloc_skb(0); 926 nskb = xenvif_alloc_skb(0);
927 if (unlikely(nskb == NULL)) { 927 if (unlikely(nskb == NULL)) {
928 skb_shinfo(skb)->nr_frags = 0;
928 kfree_skb(skb); 929 kfree_skb(skb);
929 xenvif_tx_err(queue, &txreq, extra_count, idx); 930 xenvif_tx_err(queue, &txreq, extra_count, idx);
930 if (net_ratelimit()) 931 if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
940 941
941 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 942 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
942 /* Failure in xenvif_set_skb_gso is fatal. */ 943 /* Failure in xenvif_set_skb_gso is fatal. */
944 skb_shinfo(skb)->nr_frags = 0;
943 kfree_skb(skb); 945 kfree_skb(skb);
944 kfree_skb(nskb); 946 kfree_skb(nskb);
945 break; 947 break;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3e7b11cf1aae..cb98b8fe786e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -655,6 +655,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
655 resource_size_t start, size; 655 resource_size_t start, size;
656 struct nd_region *nd_region; 656 struct nd_region *nd_region;
657 unsigned long npfns, align; 657 unsigned long npfns, align;
658 u32 end_trunc;
658 struct nd_pfn_sb *pfn_sb; 659 struct nd_pfn_sb *pfn_sb;
659 phys_addr_t offset; 660 phys_addr_t offset;
660 const char *sig; 661 const char *sig;
@@ -696,6 +697,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
696 size = resource_size(&nsio->res); 697 size = resource_size(&nsio->res);
697 npfns = PHYS_PFN(size - SZ_8K); 698 npfns = PHYS_PFN(size - SZ_8K);
698 align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT)); 699 align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
700 end_trunc = start + size - ALIGN_DOWN(start + size, align);
699 if (nd_pfn->mode == PFN_MODE_PMEM) { 701 if (nd_pfn->mode == PFN_MODE_PMEM) {
700 /* 702 /*
701 * The altmap should be padded out to the block size used 703 * The altmap should be padded out to the block size used
@@ -714,7 +716,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
714 return -ENXIO; 716 return -ENXIO;
715 } 717 }
716 718
717 npfns = PHYS_PFN(size - offset); 719 npfns = PHYS_PFN(size - offset - end_trunc);
718 pfn_sb->mode = cpu_to_le32(nd_pfn->mode); 720 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
719 pfn_sb->dataoff = cpu_to_le64(offset); 721 pfn_sb->dataoff = cpu_to_le64(offset);
720 pfn_sb->npfns = cpu_to_le64(npfns); 722 pfn_sb->npfns = cpu_to_le64(npfns);
@@ -723,6 +725,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
723 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); 725 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
724 pfn_sb->version_major = cpu_to_le16(1); 726 pfn_sb->version_major = cpu_to_le16(1);
725 pfn_sb->version_minor = cpu_to_le16(3); 727 pfn_sb->version_minor = cpu_to_le16(3);
728 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
726 pfn_sb->align = cpu_to_le32(nd_pfn->align); 729 pfn_sb->align = cpu_to_le32(nd_pfn->align);
727 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); 730 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
728 pfn_sb->checksum = cpu_to_le64(checksum); 731 pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c258a1ce4b28..d3d6b7bd6903 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2257,6 +2257,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
2257 .vid = 0x1179, 2257 .vid = 0x1179,
2258 .mn = "THNSF5256GPUK TOSHIBA", 2258 .mn = "THNSF5256GPUK TOSHIBA",
2259 .quirks = NVME_QUIRK_NO_APST, 2259 .quirks = NVME_QUIRK_NO_APST,
2260 },
2261 {
2262 /*
2263 * This LiteON CL1-3D*-Q11 firmware version has a race
2264 * condition associated with actions related to suspend to idle
2265 * LiteON has resolved the problem in future firmware
2266 */
2267 .vid = 0x14a4,
2268 .fr = "22301111",
2269 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2260 } 2270 }
2261}; 2271};
2262 2272
@@ -2597,6 +2607,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2597 goto out_free; 2607 goto out_free;
2598 } 2608 }
2599 2609
2610 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2611 ctrl->cntlid = le16_to_cpu(id->cntlid);
2612
2600 if (!ctrl->identified) { 2613 if (!ctrl->identified) {
2601 int i; 2614 int i;
2602 2615
@@ -2697,7 +2710,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2697 goto out_free; 2710 goto out_free;
2698 } 2711 }
2699 } else { 2712 } else {
2700 ctrl->cntlid = le16_to_cpu(id->cntlid);
2701 ctrl->hmpre = le32_to_cpu(id->hmpre); 2713 ctrl->hmpre = le32_to_cpu(id->hmpre);
2702 ctrl->hmmin = le32_to_cpu(id->hmmin); 2714 ctrl->hmmin = le32_to_cpu(id->hmmin);
2703 ctrl->hmminds = le32_to_cpu(id->hmminds); 2715 ctrl->hmminds = le32_to_cpu(id->hmminds);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 888d4543894e..af831d3d15d0 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -428,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
428 srcu_read_unlock(&head->srcu, srcu_idx); 428 srcu_read_unlock(&head->srcu, srcu_idx);
429 } 429 }
430 430
431 synchronize_srcu(&ns->head->srcu);
431 kblockd_schedule_work(&ns->head->requeue_work); 432 kblockd_schedule_work(&ns->head->requeue_work);
432} 433}
433 434
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 778b3a0b6adb..2d678fb968c7 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -92,6 +92,11 @@ enum nvme_quirks {
92 * Broken Write Zeroes. 92 * Broken Write Zeroes.
93 */ 93 */
94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
95
96 /*
97 * Force simple suspend/resume path.
98 */
99 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
95}; 100};
96 101
97/* 102/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6bd9b1033965..732d5b63ec05 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2876,7 +2876,8 @@ static int nvme_suspend(struct device *dev)
2876 * state (which may not be possible if the link is up). 2876 * state (which may not be possible if the link is up).
2877 */ 2877 */
2878 if (pm_suspend_via_firmware() || !ctrl->npss || 2878 if (pm_suspend_via_firmware() || !ctrl->npss ||
2879 !pcie_aspm_enabled(pdev)) { 2879 !pcie_aspm_enabled(pdev) ||
2880 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
2880 nvme_dev_disable(ndev, true); 2881 nvme_dev_disable(ndev, true);
2881 return 0; 2882 return 0;
2882 } 2883 }
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 208aacf39329..44c4ae1abd00 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5256,7 +5256,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5256 */ 5256 */
5257 if (ioread32(map + 0x2240c) & 0x2) { 5257 if (ioread32(map + 0x2240c) & 0x2) {
5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); 5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5259 ret = pci_reset_function(pdev); 5259 ret = pci_reset_bus(pdev);
5260 if (ret < 0) 5260 if (ret < 0)
5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret); 5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5262 } 5262 }
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index e504d255d5ce..430731cdf827 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -707,7 +707,7 @@ static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device)
707 */ 707 */
708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) 708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
709{ 709{
710 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 710 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
713 713
@@ -722,7 +722,7 @@ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
722 */ 722 */
723static int __maybe_unused cros_ec_ishtp_resume(struct device *device) 723static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
724{ 724{
725 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 725 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
728 728
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 30de448de802..86d88aec94a1 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -742,6 +742,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
742 USB_CH_IP_CUR_LVL_1P5; 742 USB_CH_IP_CUR_LVL_1P5;
743 break; 743 break;
744 } 744 }
745 /* Else, fall through */
745 case USB_STAT_HM_IDGND: 746 case USB_STAT_HM_IDGND:
746 dev_err(di->dev, "USB Type - Charging not allowed\n"); 747 dev_err(di->dev, "USB Type - Charging not allowed\n");
747 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; 748 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index c7ee07ce3615..28db887d38ed 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -629,6 +629,7 @@ struct qeth_seqno {
629struct qeth_reply { 629struct qeth_reply {
630 struct list_head list; 630 struct list_head list;
631 struct completion received; 631 struct completion received;
632 spinlock_t lock;
632 int (*callback)(struct qeth_card *, struct qeth_reply *, 633 int (*callback)(struct qeth_card *, struct qeth_reply *,
633 unsigned long); 634 unsigned long);
634 u32 seqno; 635 u32 seqno;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4d0caeebc802..6502b148541e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -544,6 +544,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
544 if (reply) { 544 if (reply) {
545 refcount_set(&reply->refcnt, 1); 545 refcount_set(&reply->refcnt, 1);
546 init_completion(&reply->received); 546 init_completion(&reply->received);
547 spin_lock_init(&reply->lock);
547 } 548 }
548 return reply; 549 return reply;
549} 550}
@@ -799,6 +800,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
799 800
800 if (!reply->callback) { 801 if (!reply->callback) {
801 rc = 0; 802 rc = 0;
803 goto no_callback;
804 }
805
806 spin_lock_irqsave(&reply->lock, flags);
807 if (reply->rc) {
808 /* Bail out when the requestor has already left: */
809 rc = reply->rc;
802 } else { 810 } else {
803 if (cmd) { 811 if (cmd) {
804 reply->offset = (u16)((char *)cmd - (char *)iob->data); 812 reply->offset = (u16)((char *)cmd - (char *)iob->data);
@@ -807,7 +815,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
807 rc = reply->callback(card, reply, (unsigned long)iob); 815 rc = reply->callback(card, reply, (unsigned long)iob);
808 } 816 }
809 } 817 }
818 spin_unlock_irqrestore(&reply->lock, flags);
810 819
820no_callback:
811 if (rc <= 0) 821 if (rc <= 0)
812 qeth_notify_reply(reply, rc); 822 qeth_notify_reply(reply, rc);
813 qeth_put_reply(reply); 823 qeth_put_reply(reply);
@@ -1749,6 +1759,16 @@ static int qeth_send_control_data(struct qeth_card *card,
1749 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1759 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1750 1760
1751 qeth_dequeue_reply(card, reply); 1761 qeth_dequeue_reply(card, reply);
1762
1763 if (reply_cb) {
1764 /* Wait until the callback for a late reply has completed: */
1765 spin_lock_irq(&reply->lock);
1766 if (rc)
1767 /* Zap any callback that's still pending: */
1768 reply->rc = rc;
1769 spin_unlock_irq(&reply->lock);
1770 }
1771
1752 if (!rc) 1772 if (!rc)
1753 rc = reply->rc; 1773 rc = reply->rc;
1754 qeth_put_reply(reply); 1774 qeth_put_reply(reply);
@@ -4354,6 +4374,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4354 get_user(req_len, &ureq->hdr.req_len)) 4374 get_user(req_len, &ureq->hdr.req_len))
4355 return -EFAULT; 4375 return -EFAULT;
4356 4376
4377 /* Sanitize user input, to avoid overflows in iob size calculation: */
4378 if (req_len > QETH_BUFSIZE)
4379 return -EINVAL;
4380
4357 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4381 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4358 if (!iob) 4382 if (!iob)
4359 return -ENOMEM; 4383 return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index abcad097ff2f..f47b4b281b14 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -459,6 +459,7 @@ static void sas_discover_domain(struct work_struct *work)
459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); 459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
460 /* Fall through */ 460 /* Fall through */
461#endif 461#endif
462 /* Fall through - only for the #else condition above. */
462 default: 463 default:
463 error = -ENXIO; 464 error = -ENXIO;
464 pr_err("unhandled device %d\n", dev->dev_type); 465 pr_err("unhandled device %d\n", dev->dev_type);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2c3bb8a966e5..bade2e025ecf 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -824,6 +824,7 @@ struct lpfc_hba {
824 uint32_t cfg_cq_poll_threshold; 824 uint32_t cfg_cq_poll_threshold;
825 uint32_t cfg_cq_max_proc_limit; 825 uint32_t cfg_cq_max_proc_limit;
826 uint32_t cfg_fcp_cpu_map; 826 uint32_t cfg_fcp_cpu_map;
827 uint32_t cfg_fcp_mq_threshold;
827 uint32_t cfg_hdw_queue; 828 uint32_t cfg_hdw_queue;
828 uint32_t cfg_irq_chann; 829 uint32_t cfg_irq_chann;
829 uint32_t cfg_suppress_rsp; 830 uint32_t cfg_suppress_rsp;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ea62322ffe2b..d65558619ab0 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5709,6 +5709,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5709 "Embed NVME Command in WQE"); 5709 "Embed NVME Command in WQE");
5710 5710
5711/* 5711/*
5712 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5713 * the driver will advertise it supports to the SCSI layer.
5714 *
5715 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5716 * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
5717 *
5718 * Value range is [0,256]. Default value is 8.
5719 */
5720LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5721 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5722 "Set the number of SCSI Queues advertised");
5723
5724/*
5712 * lpfc_hdw_queue: Set the number of Hardware Queues the driver 5725 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5713 * will advertise it supports to the NVME and SCSI layers. This also 5726 * will advertise it supports to the NVME and SCSI layers. This also
5714 * will map to the number of CQ/WQ pairs the driver will create. 5727 * will map to the number of CQ/WQ pairs the driver will create.
@@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
6030 &dev_attr_lpfc_cq_poll_threshold, 6043 &dev_attr_lpfc_cq_poll_threshold,
6031 &dev_attr_lpfc_cq_max_proc_limit, 6044 &dev_attr_lpfc_cq_max_proc_limit,
6032 &dev_attr_lpfc_fcp_cpu_map, 6045 &dev_attr_lpfc_fcp_cpu_map,
6046 &dev_attr_lpfc_fcp_mq_threshold,
6033 &dev_attr_lpfc_hdw_queue, 6047 &dev_attr_lpfc_hdw_queue,
6034 &dev_attr_lpfc_irq_chann, 6048 &dev_attr_lpfc_irq_chann,
6035 &dev_attr_lpfc_suppress_rsp, 6049 &dev_attr_lpfc_suppress_rsp,
@@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
7112 /* Initialize first burst. Target vs Initiator are different. */ 7126 /* Initialize first burst. Target vs Initiator are different. */
7113 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 7127 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7114 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); 7128 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7129 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7115 lpfc_hdw_queue_init(phba, lpfc_hdw_queue); 7130 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7116 lpfc_irq_chann_init(phba, lpfc_irq_chann); 7131 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7117 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); 7132 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a7549ae32542..1ac98becb5ba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4309 shost->max_cmd_len = 16; 4309 shost->max_cmd_len = 16;
4310 4310
4311 if (phba->sli_rev == LPFC_SLI_REV4) { 4311 if (phba->sli_rev == LPFC_SLI_REV4) {
4312 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) 4312 if (!phba->cfg_fcp_mq_threshold ||
4313 shost->nr_hw_queues = phba->cfg_hdw_queue; 4313 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4314 else 4314 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4315 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; 4315
4316 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4317 phba->cfg_fcp_mq_threshold);
4316 4318
4317 shost->dma_boundary = 4319 shost->dma_boundary =
4318 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4320 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3aeca387b22a..a81ef0293696 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -44,6 +44,11 @@
44#define LPFC_HBA_HDWQ_MAX 128 44#define LPFC_HBA_HDWQ_MAX 128
45#define LPFC_HBA_HDWQ_DEF 0 45#define LPFC_HBA_HDWQ_DEF 0
46 46
47/* FCP MQ queue count limiting */
48#define LPFC_FCP_MQ_THRESHOLD_MIN 0
49#define LPFC_FCP_MQ_THRESHOLD_MAX 256
50#define LPFC_FCP_MQ_THRESHOLD_DEF 8
51
47/* Common buffer size to accomidate SCSI and NVME IO buffers */ 52/* Common buffer size to accomidate SCSI and NVME IO buffers */
48#define LPFC_COMMON_IO_BUF_SZ 768 53#define LPFC_COMMON_IO_BUF_SZ 768
49 54
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8d560c562e9c..6b7b390b2e52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2957 vha->gnl.ldma); 2957 vha->gnl.ldma);
2958 2958
2959 vha->gnl.l = NULL;
2960
2959 vfree(vha->scan.l); 2961 vfree(vha->scan.l);
2960 2962
2961 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2963 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2e58cff9d200..98e60a34afd9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3440,6 +3440,12 @@ skip_dpc:
3440 return 0; 3440 return 0;
3441 3441
3442probe_failed: 3442probe_failed:
3443 if (base_vha->gnl.l) {
3444 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3445 base_vha->gnl.l, base_vha->gnl.ldma);
3446 base_vha->gnl.l = NULL;
3447 }
3448
3443 if (base_vha->timer_active) 3449 if (base_vha->timer_active)
3444 qla2x00_stop_timer(base_vha); 3450 qla2x00_stop_timer(base_vha);
3445 base_vha->flags.online = 0; 3451 base_vha->flags.online = 0;
@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
3673 if (!atomic_read(&pdev->enable_cnt)) { 3679 if (!atomic_read(&pdev->enable_cnt)) {
3674 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3680 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3675 base_vha->gnl.l, base_vha->gnl.ldma); 3681 base_vha->gnl.l, base_vha->gnl.ldma);
3676 3682 base_vha->gnl.l = NULL;
3677 scsi_host_put(base_vha->host); 3683 scsi_host_put(base_vha->host);
3678 kfree(ha); 3684 kfree(ha);
3679 pci_set_drvdata(pdev, NULL); 3685 pci_set_drvdata(pdev, NULL);
@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3713 dma_free_coherent(&ha->pdev->dev, 3719 dma_free_coherent(&ha->pdev->dev,
3714 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3720 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3715 3721
3722 base_vha->gnl.l = NULL;
3723
3716 vfree(base_vha->scan.l); 3724 vfree(base_vha->scan.l);
3717 3725
3718 if (IS_QLAFX00(ha)) 3726 if (IS_QLAFX00(ha))
@@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4816 "Alloc failed for scan database.\n"); 4824 "Alloc failed for scan database.\n");
4817 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4825 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
4818 vha->gnl.l, vha->gnl.ldma); 4826 vha->gnl.l, vha->gnl.ldma);
4827 vha->gnl.l = NULL;
4819 scsi_remove_host(vha->host); 4828 scsi_remove_host(vha->host);
4820 return NULL; 4829 return NULL;
4821 } 4830 }
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e274053109d0..029da74bb2f5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7063 struct ufs_vreg *vreg) 7063 struct ufs_vreg *vreg)
7064{ 7064{
7065 if (!vreg)
7066 return 0;
7067
7065 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 7068 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7066} 7069}
7067 7070
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
index de2e62c3310a..e3eb19b85fa4 100644
--- a/drivers/soc/ixp4xx/Kconfig
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -1,4 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2if ARCH_IXP4XX || COMPILE_TEST
3
2menu "IXP4xx SoC drivers" 4menu "IXP4xx SoC drivers"
3 5
4config IXP4XX_QMGR 6config IXP4XX_QMGR
@@ -15,3 +17,5 @@ config IXP4XX_NPE
15 and is automatically selected by Ethernet and HSS drivers. 17 and is automatically selected by Ethernet and HSS drivers.
16 18
17endmenu 19endmenu
20
21endif
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index d5cf953b4337..7d622ea1274e 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -630,6 +630,9 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
630 struct geni_wrapper *wrapper = se->wrapper; 630 struct geni_wrapper *wrapper = se->wrapper;
631 u32 val; 631 u32 val;
632 632
633 if (!wrapper)
634 return -EINVAL;
635
633 *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE); 636 *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
634 if (dma_mapping_error(wrapper->dev, *iova)) 637 if (dma_mapping_error(wrapper->dev, *iova))
635 return -EIO; 638 return -EIO;
@@ -663,6 +666,9 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
663 struct geni_wrapper *wrapper = se->wrapper; 666 struct geni_wrapper *wrapper = se->wrapper;
664 u32 val; 667 u32 val;
665 668
669 if (!wrapper)
670 return -EINVAL;
671
666 *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE); 672 *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
667 if (dma_mapping_error(wrapper->dev, *iova)) 673 if (dma_mapping_error(wrapper->dev, *iova))
668 return -EIO; 674 return -EIO;
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index bb77c220b6f8..ccc6d53fe788 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -141,7 +141,7 @@ static int __init am43xx_map_gic(void)
141} 141}
142 142
143#ifdef CONFIG_SUSPEND 143#ifdef CONFIG_SUSPEND
144struct wkup_m3_wakeup_src rtc_wake_src(void) 144static struct wkup_m3_wakeup_src rtc_wake_src(void)
145{ 145{
146 u32 i; 146 u32 i;
147 147
@@ -157,7 +157,7 @@ struct wkup_m3_wakeup_src rtc_wake_src(void)
157 return rtc_ext_wakeup; 157 return rtc_ext_wakeup;
158} 158}
159 159
160int am33xx_rtc_only_idle(unsigned long wfi_flags) 160static int am33xx_rtc_only_idle(unsigned long wfi_flags)
161{ 161{
162 omap_rtc_power_off_program(&omap_rtc->dev); 162 omap_rtc_power_off_program(&omap_rtc->dev);
163 am33xx_do_wfi_sram(wfi_flags); 163 am33xx_do_wfi_sram(wfi_flags);
@@ -252,7 +252,7 @@ static int am33xx_pm_begin(suspend_state_t state)
252 if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) { 252 if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
253 nvmem = devm_nvmem_device_get(&omap_rtc->dev, 253 nvmem = devm_nvmem_device_get(&omap_rtc->dev,
254 "omap_rtc_scratch0"); 254 "omap_rtc_scratch0");
255 if (nvmem) 255 if (!IS_ERR(nvmem))
256 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 256 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
257 (void *)&rtc_magic_val); 257 (void *)&rtc_magic_val);
258 rtc_only_idle = 1; 258 rtc_only_idle = 1;
@@ -278,9 +278,12 @@ static void am33xx_pm_end(void)
278 struct nvmem_device *nvmem; 278 struct nvmem_device *nvmem;
279 279
280 nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); 280 nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
281 if (IS_ERR(nvmem))
282 return;
283
281 m3_ipc->ops->finish_low_power(m3_ipc); 284 m3_ipc->ops->finish_low_power(m3_ipc);
282 if (rtc_only_idle) { 285 if (rtc_only_idle) {
283 if (retrigger_irq) 286 if (retrigger_irq) {
284 /* 287 /*
285 * 32 bits of Interrupt Set-Pending correspond to 32 288 * 32 bits of Interrupt Set-Pending correspond to 32
286 * 32 interrupts. Compute the bit offset of the 289 * 32 interrupts. Compute the bit offset of the
@@ -291,8 +294,10 @@ static void am33xx_pm_end(void)
291 writel_relaxed(1 << (retrigger_irq & 31), 294 writel_relaxed(1 << (retrigger_irq & 31),
292 gic_dist_base + GIC_INT_SET_PENDING_BASE 295 gic_dist_base + GIC_INT_SET_PENDING_BASE
293 + retrigger_irq / 32 * 4); 296 + retrigger_irq / 32 * 4);
294 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 297 }
295 (void *)&val); 298
299 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
300 (void *)&val);
296 } 301 }
297 302
298 rtc_only_idle = 0; 303 rtc_only_idle = 0;
@@ -415,7 +420,7 @@ static int am33xx_pm_rtc_setup(void)
415 420
416 nvmem = devm_nvmem_device_get(&omap_rtc->dev, 421 nvmem = devm_nvmem_device_get(&omap_rtc->dev,
417 "omap_rtc_scratch0"); 422 "omap_rtc_scratch0");
418 if (nvmem) { 423 if (!IS_ERR(nvmem)) {
419 nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 424 nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
420 4, (void *)&rtc_magic_val); 425 4, (void *)&rtc_magic_val);
421 if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC) 426 if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 04eda111920e..661bb9358364 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1132 struct se_cmd *se_cmd = cmd->se_cmd; 1132 struct se_cmd *se_cmd = cmd->se_cmd;
1133 struct tcmu_dev *udev = cmd->tcmu_dev; 1133 struct tcmu_dev *udev = cmd->tcmu_dev;
1134 bool read_len_valid = false; 1134 bool read_len_valid = false;
1135 uint32_t read_len = se_cmd->data_length; 1135 uint32_t read_len;
1136 1136
1137 /* 1137 /*
1138 * cmd has been completed already from timeout, just reclaim 1138 * cmd has been completed already from timeout, just reclaim
1139 * data area space and free cmd 1139 * data area space and free cmd
1140 */ 1140 */
1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1142 WARN_ON_ONCE(se_cmd);
1142 goto out; 1143 goto out;
1144 }
1143 1145
1144 list_del_init(&cmd->queue_entry); 1146 list_del_init(&cmd->queue_entry);
1145 1147
@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1152 goto done; 1154 goto done;
1153 } 1155 }
1154 1156
1157 read_len = se_cmd->data_length;
1155 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1158 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1156 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1159 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1157 read_len_valid = true; 1160 read_len_valid = true;
@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1307 */ 1310 */
1308 scsi_status = SAM_STAT_CHECK_CONDITION; 1311 scsi_status = SAM_STAT_CHECK_CONDITION;
1309 list_del_init(&cmd->queue_entry); 1312 list_del_init(&cmd->queue_entry);
1313 cmd->se_cmd = NULL;
1310 } else { 1314 } else {
1311 list_del_init(&cmd->queue_entry); 1315 list_del_init(&cmd->queue_entry);
1312 idr_remove(&udev->commands, id); 1316 idr_remove(&udev->commands, id);
@@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2022 2026
2023 idr_remove(&udev->commands, i); 2027 idr_remove(&udev->commands, i);
2024 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2028 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2029 WARN_ON(!cmd->se_cmd);
2025 list_del_init(&cmd->queue_entry); 2030 list_del_init(&cmd->queue_entry);
2026 if (err_level == 1) { 2031 if (err_level == 1) {
2027 /* 2032 /*
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 6a5ee8e6da10..67ad40b0a05b 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
709 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget); 709 struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
710 unsigned long flags; 710 unsigned long flags;
711 711
712 spin_lock_irqsave(&ci->lock, flags);
713 ci->gadget.speed = USB_SPEED_UNKNOWN;
714 ci->remote_wakeup = 0;
715 ci->suspended = 0;
716 spin_unlock_irqrestore(&ci->lock, flags);
717
718 /* flush all endpoints */ 712 /* flush all endpoints */
719 gadget_for_each_ep(ep, gadget) { 713 gadget_for_each_ep(ep, gadget) {
720 usb_ep_fifo_flush(ep); 714 usb_ep_fifo_flush(ep);
@@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
732 ci->status = NULL; 726 ci->status = NULL;
733 } 727 }
734 728
729 spin_lock_irqsave(&ci->lock, flags);
730 ci->gadget.speed = USB_SPEED_UNKNOWN;
731 ci->remote_wakeup = 0;
732 ci->suspended = 0;
733 spin_unlock_irqrestore(&ci->lock, flags);
734
735 return 0; 735 return 0;
736} 736}
737 737
@@ -1303,6 +1303,10 @@ static int ep_disable(struct usb_ep *ep)
1303 return -EBUSY; 1303 return -EBUSY;
1304 1304
1305 spin_lock_irqsave(hwep->lock, flags); 1305 spin_lock_irqsave(hwep->lock, flags);
1306 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1307 spin_unlock_irqrestore(hwep->lock, flags);
1308 return 0;
1309 }
1306 1310
1307 /* only internal SW should disable ctrl endpts */ 1311 /* only internal SW should disable ctrl endpts */
1308 1312
@@ -1392,6 +1396,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1392 return -EINVAL; 1396 return -EINVAL;
1393 1397
1394 spin_lock_irqsave(hwep->lock, flags); 1398 spin_lock_irqsave(hwep->lock, flags);
1399 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1400 spin_unlock_irqrestore(hwep->lock, flags);
1401 return 0;
1402 }
1395 retval = _ep_queue(ep, req, gfp_flags); 1403 retval = _ep_queue(ep, req, gfp_flags);
1396 spin_unlock_irqrestore(hwep->lock, flags); 1404 spin_unlock_irqrestore(hwep->lock, flags);
1397 return retval; 1405 return retval;
@@ -1415,8 +1423,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1415 return -EINVAL; 1423 return -EINVAL;
1416 1424
1417 spin_lock_irqsave(hwep->lock, flags); 1425 spin_lock_irqsave(hwep->lock, flags);
1418 1426 if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1419 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); 1427 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1420 1428
1421 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { 1429 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1422 dma_pool_free(hwep->td_pool, node->ptr, node->dma); 1430 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
@@ -1487,6 +1495,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
1487 } 1495 }
1488 1496
1489 spin_lock_irqsave(hwep->lock, flags); 1497 spin_lock_irqsave(hwep->lock, flags);
1498 if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1499 spin_unlock_irqrestore(hwep->lock, flags);
1500 return;
1501 }
1490 1502
1491 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); 1503 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1492 1504
@@ -1559,6 +1571,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
1559 int ret = 0; 1571 int ret = 0;
1560 1572
1561 spin_lock_irqsave(&ci->lock, flags); 1573 spin_lock_irqsave(&ci->lock, flags);
1574 if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1575 spin_unlock_irqrestore(&ci->lock, flags);
1576 return 0;
1577 }
1562 if (!ci->remote_wakeup) { 1578 if (!ci->remote_wakeup) {
1563 ret = -EOPNOTSUPP; 1579 ret = -EOPNOTSUPP;
1564 goto out; 1580 goto out;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index a7824a51f86d..70afb2ca1eab 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -587,10 +587,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
587{ 587{
588 struct wdm_device *desc = file->private_data; 588 struct wdm_device *desc = file->private_data;
589 589
590 wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); 590 wait_event(desc->wait,
591 /*
592 * needs both flags. We cannot do with one
593 * because resetting it would cause a race
594 * with write() yet we need to signal
595 * a disconnect
596 */
597 !test_bit(WDM_IN_USE, &desc->flags) ||
598 test_bit(WDM_DISCONNECTING, &desc->flags));
591 599
592 /* cannot dereference desc->intf if WDM_DISCONNECTING */ 600 /* cannot dereference desc->intf if WDM_DISCONNECTING */
593 if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) 601 if (test_bit(WDM_DISCONNECTING, &desc->flags))
602 return -ENODEV;
603 if (desc->werr < 0)
594 dev_err(&desc->intf->dev, "Error in flush path: %d\n", 604 dev_err(&desc->intf->dev, "Error in flush path: %d\n",
595 desc->werr); 605 desc->werr);
596 606
@@ -974,8 +984,6 @@ static void wdm_disconnect(struct usb_interface *intf)
974 spin_lock_irqsave(&desc->iuspin, flags); 984 spin_lock_irqsave(&desc->iuspin, flags);
975 set_bit(WDM_DISCONNECTING, &desc->flags); 985 set_bit(WDM_DISCONNECTING, &desc->flags);
976 set_bit(WDM_READ, &desc->flags); 986 set_bit(WDM_READ, &desc->flags);
977 /* to terminate pending flushes */
978 clear_bit(WDM_IN_USE, &desc->flags);
979 spin_unlock_irqrestore(&desc->iuspin, flags); 987 spin_unlock_irqrestore(&desc->iuspin, flags);
980 wake_up_all(&desc->wait); 988 wake_up_all(&desc->wait);
981 mutex_lock(&desc->rlock); 989 mutex_lock(&desc->rlock);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 4942122b2346..36858ddd8d9b 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -2362,8 +2362,11 @@ static int usbtmc_probe(struct usb_interface *intf,
2362 goto err_put; 2362 goto err_put;
2363 } 2363 }
2364 2364
2365 retcode = -EINVAL;
2365 data->bulk_in = bulk_in->bEndpointAddress; 2366 data->bulk_in = bulk_in->bEndpointAddress;
2366 data->wMaxPacketSize = usb_endpoint_maxp(bulk_in); 2367 data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
2368 if (!data->wMaxPacketSize)
2369 goto err_put;
2367 dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in); 2370 dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
2368 2371
2369 data->bulk_out = bulk_out->bEndpointAddress; 2372 data->bulk_out = bulk_out->bEndpointAddress;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 03432467b05f..7537681355f6 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -216,17 +216,18 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
216 /* EHCI, OHCI */ 216 /* EHCI, OHCI */
217 hcd->rsrc_start = pci_resource_start(dev, 0); 217 hcd->rsrc_start = pci_resource_start(dev, 0);
218 hcd->rsrc_len = pci_resource_len(dev, 0); 218 hcd->rsrc_len = pci_resource_len(dev, 0);
219 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, 219 if (!devm_request_mem_region(&dev->dev, hcd->rsrc_start,
220 driver->description)) { 220 hcd->rsrc_len, driver->description)) {
221 dev_dbg(&dev->dev, "controller already in use\n"); 221 dev_dbg(&dev->dev, "controller already in use\n");
222 retval = -EBUSY; 222 retval = -EBUSY;
223 goto put_hcd; 223 goto put_hcd;
224 } 224 }
225 hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); 225 hcd->regs = devm_ioremap_nocache(&dev->dev, hcd->rsrc_start,
226 hcd->rsrc_len);
226 if (hcd->regs == NULL) { 227 if (hcd->regs == NULL) {
227 dev_dbg(&dev->dev, "error mapping memory\n"); 228 dev_dbg(&dev->dev, "error mapping memory\n");
228 retval = -EFAULT; 229 retval = -EFAULT;
229 goto release_mem_region; 230 goto put_hcd;
230 } 231 }
231 232
232 } else { 233 } else {
@@ -240,8 +241,8 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
240 241
241 hcd->rsrc_start = pci_resource_start(dev, region); 242 hcd->rsrc_start = pci_resource_start(dev, region);
242 hcd->rsrc_len = pci_resource_len(dev, region); 243 hcd->rsrc_len = pci_resource_len(dev, region);
243 if (request_region(hcd->rsrc_start, hcd->rsrc_len, 244 if (devm_request_region(&dev->dev, hcd->rsrc_start,
244 driver->description)) 245 hcd->rsrc_len, driver->description))
245 break; 246 break;
246 } 247 }
247 if (region == PCI_ROM_RESOURCE) { 248 if (region == PCI_ROM_RESOURCE) {
@@ -275,20 +276,13 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
275 } 276 }
276 277
277 if (retval != 0) 278 if (retval != 0)
278 goto unmap_registers; 279 goto put_hcd;
279 device_wakeup_enable(hcd->self.controller); 280 device_wakeup_enable(hcd->self.controller);
280 281
281 if (pci_dev_run_wake(dev)) 282 if (pci_dev_run_wake(dev))
282 pm_runtime_put_noidle(&dev->dev); 283 pm_runtime_put_noidle(&dev->dev);
283 return retval; 284 return retval;
284 285
285unmap_registers:
286 if (driver->flags & HCD_MEMORY) {
287 iounmap(hcd->regs);
288release_mem_region:
289 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
290 } else
291 release_region(hcd->rsrc_start, hcd->rsrc_len);
292put_hcd: 286put_hcd:
293 usb_put_hcd(hcd); 287 usb_put_hcd(hcd);
294disable_pci: 288disable_pci:
@@ -347,14 +341,6 @@ void usb_hcd_pci_remove(struct pci_dev *dev)
347 dev_set_drvdata(&dev->dev, NULL); 341 dev_set_drvdata(&dev->dev, NULL);
348 up_read(&companions_rwsem); 342 up_read(&companions_rwsem);
349 } 343 }
350
351 if (hcd->driver->flags & HCD_MEMORY) {
352 iounmap(hcd->regs);
353 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
354 } else {
355 release_region(hcd->rsrc_start, hcd->rsrc_len);
356 }
357
358 usb_put_hcd(hcd); 344 usb_put_hcd(hcd);
359 pci_disable_device(dev); 345 pci_disable_device(dev);
360} 346}
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 5f1b14f3e5a0..bb6af6b5ac97 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -2265,7 +2265,7 @@ static void udc_handle_ep0_setup(struct lpc32xx_udc *udc)
2265 default: 2265 default:
2266 break; 2266 break;
2267 } 2267 }
2268 2268 break;
2269 2269
2270 case USB_REQ_SET_ADDRESS: 2270 case USB_REQ_SET_ADDRESS:
2271 if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) { 2271 if (reqtype == (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) {
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index b457fdaff297..1fe3deec35cf 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -419,8 +419,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
419 * other cases where the next software may expect clean state from the 419 * other cases where the next software may expect clean state from the
420 * "firmware". this is bus-neutral, unlike shutdown() methods. 420 * "firmware". this is bus-neutral, unlike shutdown() methods.
421 */ 421 */
422static void 422static void _ohci_shutdown(struct usb_hcd *hcd)
423ohci_shutdown (struct usb_hcd *hcd)
424{ 423{
425 struct ohci_hcd *ohci; 424 struct ohci_hcd *ohci;
426 425
@@ -436,6 +435,16 @@ ohci_shutdown (struct usb_hcd *hcd)
436 ohci->rh_state = OHCI_RH_HALTED; 435 ohci->rh_state = OHCI_RH_HALTED;
437} 436}
438 437
438static void ohci_shutdown(struct usb_hcd *hcd)
439{
440 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
441 unsigned long flags;
442
443 spin_lock_irqsave(&ohci->lock, flags);
444 _ohci_shutdown(hcd);
445 spin_unlock_irqrestore(&ohci->lock, flags);
446}
447
439/*-------------------------------------------------------------------------* 448/*-------------------------------------------------------------------------*
440 * HC functions 449 * HC functions
441 *-------------------------------------------------------------------------*/ 450 *-------------------------------------------------------------------------*/
@@ -760,7 +769,7 @@ static void io_watchdog_func(struct timer_list *t)
760 died: 769 died:
761 usb_hc_died(ohci_to_hcd(ohci)); 770 usb_hc_died(ohci_to_hcd(ohci));
762 ohci_dump(ohci); 771 ohci_dump(ohci);
763 ohci_shutdown(ohci_to_hcd(ohci)); 772 _ohci_shutdown(ohci_to_hcd(ohci));
764 goto done; 773 goto done;
765 } else { 774 } else {
766 /* No write back because the done queue was empty */ 775 /* No write back because the done queue was empty */
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 8616c52849c6..2b0ccd150209 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -104,7 +104,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
104 return of_device_is_compatible(node, "renesas,xhci-r8a7790") || 104 return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
105 of_device_is_compatible(node, "renesas,xhci-r8a7791") || 105 of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
106 of_device_is_compatible(node, "renesas,xhci-r8a7793") || 106 of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
107 of_device_is_compatible(node, "renensas,rcar-gen2-xhci"); 107 of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
108} 108}
109 109
110static int xhci_rcar_is_gen3(struct device *dev) 110static int xhci_rcar_is_gen3(struct device *dev)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index dafc65911fc0..2ff7c911fbd0 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1194,6 +1194,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
1194 1194
1195 tegra_xusb_config(tegra, regs); 1195 tegra_xusb_config(tegra, regs);
1196 1196
1197 /*
1198 * The XUSB Falcon microcontroller can only address 40 bits, so set
1199 * the DMA mask accordingly.
1200 */
1201 err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40));
1202 if (err < 0) {
1203 dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1204 goto put_rpm;
1205 }
1206
1197 err = tegra_xusb_load_firmware(tegra); 1207 err = tegra_xusb_load_firmware(tegra);
1198 if (err < 0) { 1208 if (err < 0) {
1199 dev_err(&pdev->dev, "failed to load firmware: %d\n", err); 1209 dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index cc794e25a0b6..1d9ce9cbc831 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -38,7 +38,7 @@ MODULE_LICENSE("GPL");
38 38
39static int auto_delink_en = 1; 39static int auto_delink_en = 1;
40module_param(auto_delink_en, int, S_IRUGO | S_IWUSR); 40module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
41MODULE_PARM_DESC(auto_delink_en, "enable auto delink"); 41MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
42 42
43#ifdef CONFIG_REALTEK_AUTOPM 43#ifdef CONFIG_REALTEK_AUTOPM
44static int ss_en = 1; 44static int ss_en = 1;
@@ -996,12 +996,15 @@ static int init_realtek_cr(struct us_data *us)
996 goto INIT_FAIL; 996 goto INIT_FAIL;
997 } 997 }
998 998
999 if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) || 999 if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
1000 CHECK_FW_VER(chip, 0x5901)) 1000 CHECK_PID(chip, 0x0159)) {
1001 SET_AUTO_DELINK(chip); 1001 if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1002 if (STATUS_LEN(chip) == 16) { 1002 CHECK_FW_VER(chip, 0x5901))
1003 if (SUPPORT_AUTO_DELINK(chip))
1004 SET_AUTO_DELINK(chip); 1003 SET_AUTO_DELINK(chip);
1004 if (STATUS_LEN(chip) == 16) {
1005 if (SUPPORT_AUTO_DELINK(chip))
1006 SET_AUTO_DELINK(chip);
1007 }
1005 } 1008 }
1006#ifdef CONFIG_REALTEK_AUTOPM 1009#ifdef CONFIG_REALTEK_AUTOPM
1007 if (ss_en) 1010 if (ss_en)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ea0d27a94afe..1cd9b6305b06 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2100,7 +2100,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
2100 US_FL_IGNORE_RESIDUE ), 2100 US_FL_IGNORE_RESIDUE ),
2101 2101
2102/* Reported by Michael Büsch <m@bues.ch> */ 2102/* Reported by Michael Büsch <m@bues.ch> */
2103UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, 2103UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
2104 "JMicron", 2104 "JMicron",
2105 "USB to ATA/ATAPI Bridge", 2105 "USB to ATA/ATAPI Bridge",
2106 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2106 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 15abe1d9958f..bcfdb55fd198 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1446,7 +1446,7 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
1446 else if ((pdo_min_voltage(pdo[i]) == 1446 else if ((pdo_min_voltage(pdo[i]) ==
1447 pdo_min_voltage(pdo[i - 1])) && 1447 pdo_min_voltage(pdo[i - 1])) &&
1448 (pdo_max_voltage(pdo[i]) == 1448 (pdo_max_voltage(pdo[i]) ==
1449 pdo_min_voltage(pdo[i - 1]))) 1449 pdo_max_voltage(pdo[i - 1])))
1450 return PDO_ERR_DUPE_PDO; 1450 return PDO_ERR_DUPE_PDO;
1451 break; 1451 break;
1452 /* 1452 /*
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 9e90e969af55..7804869c6a31 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -22,6 +22,12 @@
22 * Using this limit prevents one virtqueue from starving others. */ 22 * Using this limit prevents one virtqueue from starving others. */
23#define VHOST_TEST_WEIGHT 0x80000 23#define VHOST_TEST_WEIGHT 0x80000
24 24
25/* Max number of packets transferred before requeueing the job.
26 * Using this limit prevents one virtqueue from starving others with
27 * pkts.
28 */
29#define VHOST_TEST_PKT_WEIGHT 256
30
25enum { 31enum {
26 VHOST_TEST_VQ = 0, 32 VHOST_TEST_VQ = 0,
27 VHOST_TEST_VQ_MAX = 1, 33 VHOST_TEST_VQ_MAX = 1,
@@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n)
80 } 86 }
81 vhost_add_used_and_signal(&n->dev, vq, head, 0); 87 vhost_add_used_and_signal(&n->dev, vq, head, 0);
82 total_len += len; 88 total_len += len;
83 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) { 89 if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
84 vhost_poll_queue(&vq->poll);
85 break; 90 break;
86 }
87 } 91 }
88 92
89 mutex_unlock(&vq->mutex); 93 mutex_unlock(&vq->mutex);
@@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
115 dev = &n->dev; 119 dev = &n->dev;
116 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; 120 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
117 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; 121 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
118 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); 122 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
123 VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
119 124
120 f->private_data = n; 125 f->private_data = n;
121 126
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 0536f8526359..5dc174ac8cac 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
203int vhost_poll_start(struct vhost_poll *poll, struct file *file) 203int vhost_poll_start(struct vhost_poll *poll, struct file *file)
204{ 204{
205 __poll_t mask; 205 __poll_t mask;
206 int ret = 0;
207 206
208 if (poll->wqh) 207 if (poll->wqh)
209 return 0; 208 return 0;
@@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); 212 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
214 if (mask & EPOLLERR) { 213 if (mask & EPOLLERR) {
215 vhost_poll_stop(poll); 214 vhost_poll_stop(poll);
216 ret = -EINVAL; 215 return -EINVAL;
217 } 216 }
218 217
219 return ret; 218 return 0;
220} 219}
221EXPORT_SYMBOL_GPL(vhost_poll_start); 220EXPORT_SYMBOL_GPL(vhost_poll_start);
222 221
@@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
298 __vhost_vq_meta_reset(d->vqs[i]); 297 __vhost_vq_meta_reset(d->vqs[i]);
299} 298}
300 299
301#if VHOST_ARCH_CAN_ACCEL_UACCESS
302static void vhost_map_unprefetch(struct vhost_map *map)
303{
304 kfree(map->pages);
305 map->pages = NULL;
306 map->npages = 0;
307 map->addr = NULL;
308}
309
310static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
311{
312 struct vhost_map *map[VHOST_NUM_ADDRS];
313 int i;
314
315 spin_lock(&vq->mmu_lock);
316 for (i = 0; i < VHOST_NUM_ADDRS; i++) {
317 map[i] = rcu_dereference_protected(vq->maps[i],
318 lockdep_is_held(&vq->mmu_lock));
319 if (map[i])
320 rcu_assign_pointer(vq->maps[i], NULL);
321 }
322 spin_unlock(&vq->mmu_lock);
323
324 synchronize_rcu();
325
326 for (i = 0; i < VHOST_NUM_ADDRS; i++)
327 if (map[i])
328 vhost_map_unprefetch(map[i]);
329
330}
331
332static void vhost_reset_vq_maps(struct vhost_virtqueue *vq)
333{
334 int i;
335
336 vhost_uninit_vq_maps(vq);
337 for (i = 0; i < VHOST_NUM_ADDRS; i++)
338 vq->uaddrs[i].size = 0;
339}
340
341static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
342 unsigned long start,
343 unsigned long end)
344{
345 if (unlikely(!uaddr->size))
346 return false;
347
348 return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
349}
350
351static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
352 int index,
353 unsigned long start,
354 unsigned long end)
355{
356 struct vhost_uaddr *uaddr = &vq->uaddrs[index];
357 struct vhost_map *map;
358 int i;
359
360 if (!vhost_map_range_overlap(uaddr, start, end))
361 return;
362
363 spin_lock(&vq->mmu_lock);
364 ++vq->invalidate_count;
365
366 map = rcu_dereference_protected(vq->maps[index],
367 lockdep_is_held(&vq->mmu_lock));
368 if (map) {
369 if (uaddr->write) {
370 for (i = 0; i < map->npages; i++)
371 set_page_dirty(map->pages[i]);
372 }
373 rcu_assign_pointer(vq->maps[index], NULL);
374 }
375 spin_unlock(&vq->mmu_lock);
376
377 if (map) {
378 synchronize_rcu();
379 vhost_map_unprefetch(map);
380 }
381}
382
383static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
384 int index,
385 unsigned long start,
386 unsigned long end)
387{
388 if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
389 return;
390
391 spin_lock(&vq->mmu_lock);
392 --vq->invalidate_count;
393 spin_unlock(&vq->mmu_lock);
394}
395
396static int vhost_invalidate_range_start(struct mmu_notifier *mn,
397 const struct mmu_notifier_range *range)
398{
399 struct vhost_dev *dev = container_of(mn, struct vhost_dev,
400 mmu_notifier);
401 int i, j;
402
403 if (!mmu_notifier_range_blockable(range))
404 return -EAGAIN;
405
406 for (i = 0; i < dev->nvqs; i++) {
407 struct vhost_virtqueue *vq = dev->vqs[i];
408
409 for (j = 0; j < VHOST_NUM_ADDRS; j++)
410 vhost_invalidate_vq_start(vq, j,
411 range->start,
412 range->end);
413 }
414
415 return 0;
416}
417
418static void vhost_invalidate_range_end(struct mmu_notifier *mn,
419 const struct mmu_notifier_range *range)
420{
421 struct vhost_dev *dev = container_of(mn, struct vhost_dev,
422 mmu_notifier);
423 int i, j;
424
425 for (i = 0; i < dev->nvqs; i++) {
426 struct vhost_virtqueue *vq = dev->vqs[i];
427
428 for (j = 0; j < VHOST_NUM_ADDRS; j++)
429 vhost_invalidate_vq_end(vq, j,
430 range->start,
431 range->end);
432 }
433}
434
435static const struct mmu_notifier_ops vhost_mmu_notifier_ops = {
436 .invalidate_range_start = vhost_invalidate_range_start,
437 .invalidate_range_end = vhost_invalidate_range_end,
438};
439
440static void vhost_init_maps(struct vhost_dev *dev)
441{
442 struct vhost_virtqueue *vq;
443 int i, j;
444
445 dev->mmu_notifier.ops = &vhost_mmu_notifier_ops;
446
447 for (i = 0; i < dev->nvqs; ++i) {
448 vq = dev->vqs[i];
449 for (j = 0; j < VHOST_NUM_ADDRS; j++)
450 RCU_INIT_POINTER(vq->maps[j], NULL);
451 }
452}
453#endif
454
455static void vhost_vq_reset(struct vhost_dev *dev, 300static void vhost_vq_reset(struct vhost_dev *dev,
456 struct vhost_virtqueue *vq) 301 struct vhost_virtqueue *vq)
457{ 302{
@@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
480 vq->busyloop_timeout = 0; 325 vq->busyloop_timeout = 0;
481 vq->umem = NULL; 326 vq->umem = NULL;
482 vq->iotlb = NULL; 327 vq->iotlb = NULL;
483 vq->invalidate_count = 0;
484 __vhost_vq_meta_reset(vq); 328 __vhost_vq_meta_reset(vq);
485#if VHOST_ARCH_CAN_ACCEL_UACCESS
486 vhost_reset_vq_maps(vq);
487#endif
488} 329}
489 330
490static int vhost_worker(void *data) 331static int vhost_worker(void *data)
@@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev,
634 INIT_LIST_HEAD(&dev->read_list); 475 INIT_LIST_HEAD(&dev->read_list);
635 INIT_LIST_HEAD(&dev->pending_list); 476 INIT_LIST_HEAD(&dev->pending_list);
636 spin_lock_init(&dev->iotlb_lock); 477 spin_lock_init(&dev->iotlb_lock);
637#if VHOST_ARCH_CAN_ACCEL_UACCESS 478
638 vhost_init_maps(dev);
639#endif
640 479
641 for (i = 0; i < dev->nvqs; ++i) { 480 for (i = 0; i < dev->nvqs; ++i) {
642 vq = dev->vqs[i]; 481 vq = dev->vqs[i];
@@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev,
645 vq->heads = NULL; 484 vq->heads = NULL;
646 vq->dev = dev; 485 vq->dev = dev;
647 mutex_init(&vq->mutex); 486 mutex_init(&vq->mutex);
648 spin_lock_init(&vq->mmu_lock);
649 vhost_vq_reset(dev, vq); 487 vhost_vq_reset(dev, vq);
650 if (vq->handle_kick) 488 if (vq->handle_kick)
651 vhost_poll_init(&vq->poll, vq->handle_kick, 489 vhost_poll_init(&vq->poll, vq->handle_kick,
@@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
725 if (err) 563 if (err)
726 goto err_cgroup; 564 goto err_cgroup;
727 565
728#if VHOST_ARCH_CAN_ACCEL_UACCESS
729 err = mmu_notifier_register(&dev->mmu_notifier, dev->mm);
730 if (err)
731 goto err_mmu_notifier;
732#endif
733
734 return 0; 566 return 0;
735
736#if VHOST_ARCH_CAN_ACCEL_UACCESS
737err_mmu_notifier:
738 vhost_dev_free_iovecs(dev);
739#endif
740err_cgroup: 567err_cgroup:
741 kthread_stop(worker); 568 kthread_stop(worker);
742 dev->worker = NULL; 569 dev->worker = NULL;
@@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev)
827 spin_unlock(&dev->iotlb_lock); 654 spin_unlock(&dev->iotlb_lock);
828} 655}
829 656
830#if VHOST_ARCH_CAN_ACCEL_UACCESS
831static void vhost_setup_uaddr(struct vhost_virtqueue *vq,
832 int index, unsigned long uaddr,
833 size_t size, bool write)
834{
835 struct vhost_uaddr *addr = &vq->uaddrs[index];
836
837 addr->uaddr = uaddr;
838 addr->size = size;
839 addr->write = write;
840}
841
842static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq)
843{
844 vhost_setup_uaddr(vq, VHOST_ADDR_DESC,
845 (unsigned long)vq->desc,
846 vhost_get_desc_size(vq, vq->num),
847 false);
848 vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL,
849 (unsigned long)vq->avail,
850 vhost_get_avail_size(vq, vq->num),
851 false);
852 vhost_setup_uaddr(vq, VHOST_ADDR_USED,
853 (unsigned long)vq->used,
854 vhost_get_used_size(vq, vq->num),
855 true);
856}
857
858static int vhost_map_prefetch(struct vhost_virtqueue *vq,
859 int index)
860{
861 struct vhost_map *map;
862 struct vhost_uaddr *uaddr = &vq->uaddrs[index];
863 struct page **pages;
864 int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE);
865 int npinned;
866 void *vaddr, *v;
867 int err;
868 int i;
869
870 spin_lock(&vq->mmu_lock);
871
872 err = -EFAULT;
873 if (vq->invalidate_count)
874 goto err;
875
876 err = -ENOMEM;
877 map = kmalloc(sizeof(*map), GFP_ATOMIC);
878 if (!map)
879 goto err;
880
881 pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
882 if (!pages)
883 goto err_pages;
884
885 err = EFAULT;
886 npinned = __get_user_pages_fast(uaddr->uaddr, npages,
887 uaddr->write, pages);
888 if (npinned > 0)
889 release_pages(pages, npinned);
890 if (npinned != npages)
891 goto err_gup;
892
893 for (i = 0; i < npinned; i++)
894 if (PageHighMem(pages[i]))
895 goto err_gup;
896
897 vaddr = v = page_address(pages[0]);
898
899 /* For simplicity, fallback to userspace address if VA is not
900 * contigious.
901 */
902 for (i = 1; i < npinned; i++) {
903 v += PAGE_SIZE;
904 if (v != page_address(pages[i]))
905 goto err_gup;
906 }
907
908 map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1));
909 map->npages = npages;
910 map->pages = pages;
911
912 rcu_assign_pointer(vq->maps[index], map);
913 /* No need for a synchronize_rcu(). This function should be
914 * called by dev->worker so we are serialized with all
915 * readers.
916 */
917 spin_unlock(&vq->mmu_lock);
918
919 return 0;
920
921err_gup:
922 kfree(pages);
923err_pages:
924 kfree(map);
925err:
926 spin_unlock(&vq->mmu_lock);
927 return err;
928}
929#endif
930
931void vhost_dev_cleanup(struct vhost_dev *dev) 657void vhost_dev_cleanup(struct vhost_dev *dev)
932{ 658{
933 int i; 659 int i;
@@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
957 kthread_stop(dev->worker); 683 kthread_stop(dev->worker);
958 dev->worker = NULL; 684 dev->worker = NULL;
959 } 685 }
960 if (dev->mm) { 686 if (dev->mm)
961#if VHOST_ARCH_CAN_ACCEL_UACCESS
962 mmu_notifier_unregister(&dev->mmu_notifier, dev->mm);
963#endif
964 mmput(dev->mm); 687 mmput(dev->mm);
965 }
966#if VHOST_ARCH_CAN_ACCEL_UACCESS
967 for (i = 0; i < dev->nvqs; i++)
968 vhost_uninit_vq_maps(dev->vqs[i]);
969#endif
970 dev->mm = NULL; 688 dev->mm = NULL;
971} 689}
972EXPORT_SYMBOL_GPL(vhost_dev_cleanup); 690EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
@@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
1195 913
1196static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) 914static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
1197{ 915{
1198#if VHOST_ARCH_CAN_ACCEL_UACCESS
1199 struct vhost_map *map;
1200 struct vring_used *used;
1201
1202 if (!vq->iotlb) {
1203 rcu_read_lock();
1204
1205 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1206 if (likely(map)) {
1207 used = map->addr;
1208 *((__virtio16 *)&used->ring[vq->num]) =
1209 cpu_to_vhost16(vq, vq->avail_idx);
1210 rcu_read_unlock();
1211 return 0;
1212 }
1213
1214 rcu_read_unlock();
1215 }
1216#endif
1217
1218 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), 916 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1219 vhost_avail_event(vq)); 917 vhost_avail_event(vq));
1220} 918}
@@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
1223 struct vring_used_elem *head, int idx, 921 struct vring_used_elem *head, int idx,
1224 int count) 922 int count)
1225{ 923{
1226#if VHOST_ARCH_CAN_ACCEL_UACCESS
1227 struct vhost_map *map;
1228 struct vring_used *used;
1229 size_t size;
1230
1231 if (!vq->iotlb) {
1232 rcu_read_lock();
1233
1234 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1235 if (likely(map)) {
1236 used = map->addr;
1237 size = count * sizeof(*head);
1238 memcpy(used->ring + idx, head, size);
1239 rcu_read_unlock();
1240 return 0;
1241 }
1242
1243 rcu_read_unlock();
1244 }
1245#endif
1246
1247 return vhost_copy_to_user(vq, vq->used->ring + idx, head, 924 return vhost_copy_to_user(vq, vq->used->ring + idx, head,
1248 count * sizeof(*head)); 925 count * sizeof(*head));
1249} 926}
@@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
1251static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) 928static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
1252 929
1253{ 930{
1254#if VHOST_ARCH_CAN_ACCEL_UACCESS
1255 struct vhost_map *map;
1256 struct vring_used *used;
1257
1258 if (!vq->iotlb) {
1259 rcu_read_lock();
1260
1261 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1262 if (likely(map)) {
1263 used = map->addr;
1264 used->flags = cpu_to_vhost16(vq, vq->used_flags);
1265 rcu_read_unlock();
1266 return 0;
1267 }
1268
1269 rcu_read_unlock();
1270 }
1271#endif
1272
1273 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), 931 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1274 &vq->used->flags); 932 &vq->used->flags);
1275} 933}
@@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
1277static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) 935static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
1278 936
1279{ 937{
1280#if VHOST_ARCH_CAN_ACCEL_UACCESS
1281 struct vhost_map *map;
1282 struct vring_used *used;
1283
1284 if (!vq->iotlb) {
1285 rcu_read_lock();
1286
1287 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1288 if (likely(map)) {
1289 used = map->addr;
1290 used->idx = cpu_to_vhost16(vq, vq->last_used_idx);
1291 rcu_read_unlock();
1292 return 0;
1293 }
1294
1295 rcu_read_unlock();
1296 }
1297#endif
1298
1299 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), 938 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
1300 &vq->used->idx); 939 &vq->used->idx);
1301} 940}
@@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1341static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, 980static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1342 __virtio16 *idx) 981 __virtio16 *idx)
1343{ 982{
1344#if VHOST_ARCH_CAN_ACCEL_UACCESS
1345 struct vhost_map *map;
1346 struct vring_avail *avail;
1347
1348 if (!vq->iotlb) {
1349 rcu_read_lock();
1350
1351 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1352 if (likely(map)) {
1353 avail = map->addr;
1354 *idx = avail->idx;
1355 rcu_read_unlock();
1356 return 0;
1357 }
1358
1359 rcu_read_unlock();
1360 }
1361#endif
1362
1363 return vhost_get_avail(vq, *idx, &vq->avail->idx); 983 return vhost_get_avail(vq, *idx, &vq->avail->idx);
1364} 984}
1365 985
1366static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, 986static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1367 __virtio16 *head, int idx) 987 __virtio16 *head, int idx)
1368{ 988{
1369#if VHOST_ARCH_CAN_ACCEL_UACCESS
1370 struct vhost_map *map;
1371 struct vring_avail *avail;
1372
1373 if (!vq->iotlb) {
1374 rcu_read_lock();
1375
1376 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1377 if (likely(map)) {
1378 avail = map->addr;
1379 *head = avail->ring[idx & (vq->num - 1)];
1380 rcu_read_unlock();
1381 return 0;
1382 }
1383
1384 rcu_read_unlock();
1385 }
1386#endif
1387
1388 return vhost_get_avail(vq, *head, 989 return vhost_get_avail(vq, *head,
1389 &vq->avail->ring[idx & (vq->num - 1)]); 990 &vq->avail->ring[idx & (vq->num - 1)]);
1390} 991}
@@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1392static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, 993static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1393 __virtio16 *flags) 994 __virtio16 *flags)
1394{ 995{
1395#if VHOST_ARCH_CAN_ACCEL_UACCESS
1396 struct vhost_map *map;
1397 struct vring_avail *avail;
1398
1399 if (!vq->iotlb) {
1400 rcu_read_lock();
1401
1402 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1403 if (likely(map)) {
1404 avail = map->addr;
1405 *flags = avail->flags;
1406 rcu_read_unlock();
1407 return 0;
1408 }
1409
1410 rcu_read_unlock();
1411 }
1412#endif
1413
1414 return vhost_get_avail(vq, *flags, &vq->avail->flags); 996 return vhost_get_avail(vq, *flags, &vq->avail->flags);
1415} 997}
1416 998
1417static inline int vhost_get_used_event(struct vhost_virtqueue *vq, 999static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1418 __virtio16 *event) 1000 __virtio16 *event)
1419{ 1001{
1420#if VHOST_ARCH_CAN_ACCEL_UACCESS
1421 struct vhost_map *map;
1422 struct vring_avail *avail;
1423
1424 if (!vq->iotlb) {
1425 rcu_read_lock();
1426 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1427 if (likely(map)) {
1428 avail = map->addr;
1429 *event = (__virtio16)avail->ring[vq->num];
1430 rcu_read_unlock();
1431 return 0;
1432 }
1433 rcu_read_unlock();
1434 }
1435#endif
1436
1437 return vhost_get_avail(vq, *event, vhost_used_event(vq)); 1002 return vhost_get_avail(vq, *event, vhost_used_event(vq));
1438} 1003}
1439 1004
1440static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, 1005static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1441 __virtio16 *idx) 1006 __virtio16 *idx)
1442{ 1007{
1443#if VHOST_ARCH_CAN_ACCEL_UACCESS
1444 struct vhost_map *map;
1445 struct vring_used *used;
1446
1447 if (!vq->iotlb) {
1448 rcu_read_lock();
1449
1450 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1451 if (likely(map)) {
1452 used = map->addr;
1453 *idx = used->idx;
1454 rcu_read_unlock();
1455 return 0;
1456 }
1457
1458 rcu_read_unlock();
1459 }
1460#endif
1461
1462 return vhost_get_used(vq, *idx, &vq->used->idx); 1008 return vhost_get_used(vq, *idx, &vq->used->idx);
1463} 1009}
1464 1010
1465static inline int vhost_get_desc(struct vhost_virtqueue *vq, 1011static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1466 struct vring_desc *desc, int idx) 1012 struct vring_desc *desc, int idx)
1467{ 1013{
1468#if VHOST_ARCH_CAN_ACCEL_UACCESS
1469 struct vhost_map *map;
1470 struct vring_desc *d;
1471
1472 if (!vq->iotlb) {
1473 rcu_read_lock();
1474
1475 map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]);
1476 if (likely(map)) {
1477 d = map->addr;
1478 *desc = *(d + idx);
1479 rcu_read_unlock();
1480 return 0;
1481 }
1482
1483 rcu_read_unlock();
1484 }
1485#endif
1486
1487 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); 1014 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1488} 1015}
1489 1016
@@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1824 return true; 1351 return true;
1825} 1352}
1826 1353
1827#if VHOST_ARCH_CAN_ACCEL_UACCESS
1828static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq)
1829{
1830 struct vhost_map __rcu *map;
1831 int i;
1832
1833 for (i = 0; i < VHOST_NUM_ADDRS; i++) {
1834 rcu_read_lock();
1835 map = rcu_dereference(vq->maps[i]);
1836 rcu_read_unlock();
1837 if (unlikely(!map))
1838 vhost_map_prefetch(vq, i);
1839 }
1840}
1841#endif
1842
1843int vq_meta_prefetch(struct vhost_virtqueue *vq) 1354int vq_meta_prefetch(struct vhost_virtqueue *vq)
1844{ 1355{
1845 unsigned int num = vq->num; 1356 unsigned int num = vq->num;
1846 1357
1847 if (!vq->iotlb) { 1358 if (!vq->iotlb)
1848#if VHOST_ARCH_CAN_ACCEL_UACCESS
1849 vhost_vq_map_prefetch(vq);
1850#endif
1851 return 1; 1359 return 1;
1852 }
1853 1360
1854 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, 1361 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
1855 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && 1362 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
@@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
2060 1567
2061 mutex_lock(&vq->mutex); 1568 mutex_lock(&vq->mutex);
2062 1569
2063#if VHOST_ARCH_CAN_ACCEL_UACCESS
2064 /* Unregister MMU notifer to allow invalidation callback
2065 * can access vq->uaddrs[] without holding a lock.
2066 */
2067 if (d->mm)
2068 mmu_notifier_unregister(&d->mmu_notifier, d->mm);
2069
2070 vhost_uninit_vq_maps(vq);
2071#endif
2072
2073 switch (ioctl) { 1570 switch (ioctl) {
2074 case VHOST_SET_VRING_NUM: 1571 case VHOST_SET_VRING_NUM:
2075 r = vhost_vring_set_num(d, vq, argp); 1572 r = vhost_vring_set_num(d, vq, argp);
@@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
2081 BUG(); 1578 BUG();
2082 } 1579 }
2083 1580
2084#if VHOST_ARCH_CAN_ACCEL_UACCESS
2085 vhost_setup_vq_uaddr(vq);
2086
2087 if (d->mm)
2088 mmu_notifier_register(&d->mmu_notifier, d->mm);
2089#endif
2090
2091 mutex_unlock(&vq->mutex); 1581 mutex_unlock(&vq->mutex);
2092 1582
2093 return r; 1583 return r;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 42a8c2a13ab1..e9ed2722b633 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,9 +12,6 @@
12#include <linux/virtio_config.h> 12#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h> 13#include <linux/virtio_ring.h>
14#include <linux/atomic.h> 14#include <linux/atomic.h>
15#include <linux/pagemap.h>
16#include <linux/mmu_notifier.h>
17#include <asm/cacheflush.h>
18 15
19struct vhost_work; 16struct vhost_work;
20typedef void (*vhost_work_fn_t)(struct vhost_work *work); 17typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -83,24 +80,6 @@ enum vhost_uaddr_type {
83 VHOST_NUM_ADDRS = 3, 80 VHOST_NUM_ADDRS = 3,
84}; 81};
85 82
86struct vhost_map {
87 int npages;
88 void *addr;
89 struct page **pages;
90};
91
92struct vhost_uaddr {
93 unsigned long uaddr;
94 size_t size;
95 bool write;
96};
97
98#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
99#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
100#else
101#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
102#endif
103
104/* The virtqueue structure describes a queue attached to a device. */ 83/* The virtqueue structure describes a queue attached to a device. */
105struct vhost_virtqueue { 84struct vhost_virtqueue {
106 struct vhost_dev *dev; 85 struct vhost_dev *dev;
@@ -111,22 +90,7 @@ struct vhost_virtqueue {
111 struct vring_desc __user *desc; 90 struct vring_desc __user *desc;
112 struct vring_avail __user *avail; 91 struct vring_avail __user *avail;
113 struct vring_used __user *used; 92 struct vring_used __user *used;
114
115#if VHOST_ARCH_CAN_ACCEL_UACCESS
116 /* Read by memory accessors, modified by meta data
117 * prefetching, MMU notifier and vring ioctl().
118 * Synchonrized through mmu_lock (writers) and RCU (writers
119 * and readers).
120 */
121 struct vhost_map __rcu *maps[VHOST_NUM_ADDRS];
122 /* Read by MMU notifier, modified by vring ioctl(),
123 * synchronized through MMU notifier
124 * registering/unregistering.
125 */
126 struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS];
127#endif
128 const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; 93 const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
129
130 struct file *kick; 94 struct file *kick;
131 struct eventfd_ctx *call_ctx; 95 struct eventfd_ctx *call_ctx;
132 struct eventfd_ctx *error_ctx; 96 struct eventfd_ctx *error_ctx;
@@ -181,8 +145,6 @@ struct vhost_virtqueue {
181 bool user_be; 145 bool user_be;
182#endif 146#endif
183 u32 busyloop_timeout; 147 u32 busyloop_timeout;
184 spinlock_t mmu_lock;
185 int invalidate_count;
186}; 148};
187 149
188struct vhost_msg_node { 150struct vhost_msg_node {
@@ -196,9 +158,6 @@ struct vhost_msg_node {
196 158
197struct vhost_dev { 159struct vhost_dev {
198 struct mm_struct *mm; 160 struct mm_struct *mm;
199#ifdef CONFIG_MMU_NOTIFIER
200 struct mmu_notifier mmu_notifier;
201#endif
202 struct mutex mutex; 161 struct mutex mutex;
203 struct vhost_virtqueue **vqs; 162 struct vhost_virtqueue **vqs;
204 int nvqs; 163 int nvqs;
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 92f23e3bc27a..7cacae5a8797 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -858,6 +858,7 @@ static void acornfb_parse_dram(char *opt)
858 case 'M': 858 case 'M':
859 case 'm': 859 case 'm':
860 size *= 1024; 860 size *= 1024;
861 /* Fall through */
861 case 'K': 862 case 'K':
862 case 'k': 863 case 'k':
863 size *= 1024; 864 size *= 1024;
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 4eacfb1ce1ac..eb729d704836 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
168 soft_margin = new_margin; 168 soft_margin = new_margin;
169 reload = soft_margin * (mem_fclk_21285 / 256); 169 reload = soft_margin * (mem_fclk_21285 / 256);
170 watchdog_ping(); 170 watchdog_ping();
171 /* Fall */ 171 /* Fall through */
172 case WDIOC_GETTIMEOUT: 172 case WDIOC_GETTIMEOUT:
173 ret = put_user(soft_margin, int_arg); 173 ret = put_user(soft_margin, int_arg);
174 break; 174 break;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ae1df496bf38..adcabd9473eb 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -386,8 +386,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
386 */ 386 */
387 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); 387 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
388 388
389 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, 389 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
390 attrs); 390 size, size, dir, attrs);
391 if (map == (phys_addr_t)DMA_MAPPING_ERROR) 391 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
392 return DMA_MAPPING_ERROR; 392 return DMA_MAPPING_ERROR;
393 393
@@ -397,7 +397,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
397 * Ensure that the address returned is DMA'ble 397 * Ensure that the address returned is DMA'ble
398 */ 398 */
399 if (unlikely(!dma_capable(dev, dev_addr, size))) { 399 if (unlikely(!dma_capable(dev, dev_addr, size))) {
400 swiotlb_tbl_unmap_single(dev, map, size, dir, 400 swiotlb_tbl_unmap_single(dev, map, size, size, dir,
401 attrs | DMA_ATTR_SKIP_CPU_SYNC); 401 attrs | DMA_ATTR_SKIP_CPU_SYNC);
402 return DMA_MAPPING_ERROR; 402 return DMA_MAPPING_ERROR;
403 } 403 }
@@ -433,7 +433,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
433 433
434 /* NOTE: We use dev_addr here, not paddr! */ 434 /* NOTE: We use dev_addr here, not paddr! */
435 if (is_xen_swiotlb_buffer(dev_addr)) 435 if (is_xen_swiotlb_buffer(dev_addr))
436 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); 436 swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
437} 437}
438 438
439static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 439static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index a2a87117d262..fd5133e26a38 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
74 cell = rcu_dereference_raw(net->ws_cell); 74 cell = rcu_dereference_raw(net->ws_cell);
75 if (cell) { 75 if (cell) {
76 afs_get_cell(cell); 76 afs_get_cell(cell);
77 ret = 0;
77 break; 78 break;
78 } 79 }
79 ret = -EDESTADDRREQ; 80 ret = -EDESTADDRREQ;
@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
108 109
109 done_seqretry(&net->cells_lock, seq); 110 done_seqretry(&net->cells_lock, seq);
110 111
112 if (ret != 0 && cell)
113 afs_put_cell(net, cell);
114
111 return ret == 0 ? cell : ERR_PTR(ret); 115 return ret == 0 ? cell : ERR_PTR(ret);
112} 116}
113 117
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 81207dc3c997..139b4e3cc946 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -959,7 +959,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
959 inode ? AFS_FS_I(inode) : NULL); 959 inode ? AFS_FS_I(inode) : NULL);
960 } else { 960 } else {
961 trace_afs_lookup(dvnode, &dentry->d_name, 961 trace_afs_lookup(dvnode, &dentry->d_name,
962 inode ? AFS_FS_I(inode) : NULL); 962 IS_ERR_OR_NULL(inode) ? NULL
963 : AFS_FS_I(inode));
963 } 964 }
964 return d; 965 return d;
965} 966}
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 2575503170fc..ca2452806ebf 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -2171,7 +2171,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); 2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
2172 2172
2173 size = round_up(acl->size, 4); 2173 size = round_up(acl->size, 4);
2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, 2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
2175 sizeof(__be32) * 2 + 2175 sizeof(__be32) * 2 +
2176 sizeof(struct yfs_xdr_YFSFid) + 2176 sizeof(struct yfs_xdr_YFSFid) +
2177 sizeof(__be32) + size, 2177 sizeof(__be32) + size,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e078cc55b989..b3c8b886bf64 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ get_more_pages:
913 if (page_offset(page) >= ceph_wbc.i_size) { 913 if (page_offset(page) >= ceph_wbc.i_size) {
914 dout("%p page eof %llu\n", 914 dout("%p page eof %llu\n",
915 page, ceph_wbc.i_size); 915 page, ceph_wbc.i_size);
916 if (ceph_wbc.size_stable || 916 if ((ceph_wbc.size_stable ||
917 page_offset(page) >= i_size_read(inode)) 917 page_offset(page) >= i_size_read(inode)) &&
918 clear_page_dirty_for_io(page))
918 mapping->a_ops->invalidatepage(page, 919 mapping->a_ops->invalidatepage(page,
919 0, PAGE_SIZE); 920 0, PAGE_SIZE);
920 unlock_page(page); 921 unlock_page(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d98dcd976c80..ce0f5658720a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1301,6 +1301,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1301{ 1301{
1302 struct ceph_inode_info *ci = cap->ci; 1302 struct ceph_inode_info *ci = cap->ci;
1303 struct inode *inode = &ci->vfs_inode; 1303 struct inode *inode = &ci->vfs_inode;
1304 struct ceph_buffer *old_blob = NULL;
1304 struct cap_msg_args arg; 1305 struct cap_msg_args arg;
1305 int held, revoking; 1306 int held, revoking;
1306 int wake = 0; 1307 int wake = 0;
@@ -1365,7 +1366,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1365 ci->i_requested_max_size = arg.max_size; 1366 ci->i_requested_max_size = arg.max_size;
1366 1367
1367 if (flushing & CEPH_CAP_XATTR_EXCL) { 1368 if (flushing & CEPH_CAP_XATTR_EXCL) {
1368 __ceph_build_xattrs_blob(ci); 1369 old_blob = __ceph_build_xattrs_blob(ci);
1369 arg.xattr_version = ci->i_xattrs.version; 1370 arg.xattr_version = ci->i_xattrs.version;
1370 arg.xattr_buf = ci->i_xattrs.blob; 1371 arg.xattr_buf = ci->i_xattrs.blob;
1371 } else { 1372 } else {
@@ -1409,6 +1410,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1409 1410
1410 spin_unlock(&ci->i_ceph_lock); 1411 spin_unlock(&ci->i_ceph_lock);
1411 1412
1413 ceph_buffer_put(old_blob);
1414
1412 ret = send_cap_msg(&arg); 1415 ret = send_cap_msg(&arg);
1413 if (ret < 0) { 1416 if (ret < 0) {
1414 dout("error sending cap msg, must requeue %p\n", inode); 1417 dout("error sending cap msg, must requeue %p\n", inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 791f84a13bb8..18500edefc56 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -736,6 +736,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
736 int issued, new_issued, info_caps; 736 int issued, new_issued, info_caps;
737 struct timespec64 mtime, atime, ctime; 737 struct timespec64 mtime, atime, ctime;
738 struct ceph_buffer *xattr_blob = NULL; 738 struct ceph_buffer *xattr_blob = NULL;
739 struct ceph_buffer *old_blob = NULL;
739 struct ceph_string *pool_ns = NULL; 740 struct ceph_string *pool_ns = NULL;
740 struct ceph_cap *new_cap = NULL; 741 struct ceph_cap *new_cap = NULL;
741 int err = 0; 742 int err = 0;
@@ -881,7 +882,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
881 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 882 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
882 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 883 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
883 if (ci->i_xattrs.blob) 884 if (ci->i_xattrs.blob)
884 ceph_buffer_put(ci->i_xattrs.blob); 885 old_blob = ci->i_xattrs.blob;
885 ci->i_xattrs.blob = xattr_blob; 886 ci->i_xattrs.blob = xattr_blob;
886 if (xattr_blob) 887 if (xattr_blob)
887 memcpy(ci->i_xattrs.blob->vec.iov_base, 888 memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1022,8 +1023,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
1022out: 1023out:
1023 if (new_cap) 1024 if (new_cap)
1024 ceph_put_cap(mdsc, new_cap); 1025 ceph_put_cap(mdsc, new_cap);
1025 if (xattr_blob) 1026 ceph_buffer_put(old_blob);
1026 ceph_buffer_put(xattr_blob); 1027 ceph_buffer_put(xattr_blob);
1027 ceph_put_string(pool_ns); 1028 ceph_put_string(pool_ns);
1028 return err; 1029 return err;
1029} 1030}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ac9b53b89365..5083e238ad15 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
111 req->r_wait_for_completion = ceph_lock_wait_for_completion; 111 req->r_wait_for_completion = ceph_lock_wait_for_completion;
112 112
113 err = ceph_mdsc_do_request(mdsc, inode, req); 113 err = ceph_mdsc_do_request(mdsc, inode, req);
114 114 if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
115 if (operation == CEPH_MDS_OP_GETFILELOCK) {
116 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); 115 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
117 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) 116 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
118 fl->fl_type = F_RDLCK; 117 fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4c6494eb02b5..ccfcc66aaf44 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -465,6 +465,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
465 struct inode *inode = &ci->vfs_inode; 465 struct inode *inode = &ci->vfs_inode;
466 struct ceph_cap_snap *capsnap; 466 struct ceph_cap_snap *capsnap;
467 struct ceph_snap_context *old_snapc, *new_snapc; 467 struct ceph_snap_context *old_snapc, *new_snapc;
468 struct ceph_buffer *old_blob = NULL;
468 int used, dirty; 469 int used, dirty;
469 470
470 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); 471 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -541,7 +542,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
541 capsnap->gid = inode->i_gid; 542 capsnap->gid = inode->i_gid;
542 543
543 if (dirty & CEPH_CAP_XATTR_EXCL) { 544 if (dirty & CEPH_CAP_XATTR_EXCL) {
544 __ceph_build_xattrs_blob(ci); 545 old_blob = __ceph_build_xattrs_blob(ci);
545 capsnap->xattr_blob = 546 capsnap->xattr_blob =
546 ceph_buffer_get(ci->i_xattrs.blob); 547 ceph_buffer_get(ci->i_xattrs.blob);
547 capsnap->xattr_version = ci->i_xattrs.version; 548 capsnap->xattr_version = ci->i_xattrs.version;
@@ -584,6 +585,7 @@ update_snapc:
584 } 585 }
585 spin_unlock(&ci->i_ceph_lock); 586 spin_unlock(&ci->i_ceph_lock);
586 587
588 ceph_buffer_put(old_blob);
587 kfree(capsnap); 589 kfree(capsnap);
588 ceph_put_snap_context(old_snapc); 590 ceph_put_snap_context(old_snapc);
589} 591}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d2352fd95dbc..6b9f1ee7de85 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -926,7 +926,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); 926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); 927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); 928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
929extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); 929extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); 930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
931extern const struct xattr_handler *ceph_xattr_handlers[]; 931extern const struct xattr_handler *ceph_xattr_handlers[];
932 932
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 37b458a9af3a..939eab7aa219 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -754,12 +754,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
754 754
755/* 755/*
756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob 756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
757 * and swap into place. 757 * and swap into place. It returns the old i_xattrs.blob (or NULL) so
758 * that it can be freed by the caller as the i_ceph_lock is likely to be
759 * held.
758 */ 760 */
759void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) 761struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
760{ 762{
761 struct rb_node *p; 763 struct rb_node *p;
762 struct ceph_inode_xattr *xattr = NULL; 764 struct ceph_inode_xattr *xattr = NULL;
765 struct ceph_buffer *old_blob = NULL;
763 void *dest; 766 void *dest;
764 767
765 dout("__build_xattrs_blob %p\n", &ci->vfs_inode); 768 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -790,12 +793,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
790 dest - ci->i_xattrs.prealloc_blob->vec.iov_base; 793 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
791 794
792 if (ci->i_xattrs.blob) 795 if (ci->i_xattrs.blob)
793 ceph_buffer_put(ci->i_xattrs.blob); 796 old_blob = ci->i_xattrs.blob;
794 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 797 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
795 ci->i_xattrs.prealloc_blob = NULL; 798 ci->i_xattrs.prealloc_blob = NULL;
796 ci->i_xattrs.dirty = false; 799 ci->i_xattrs.dirty = false;
797 ci->i_xattrs.version++; 800 ci->i_xattrs.version++;
798 } 801 }
802
803 return old_blob;
799} 804}
800 805
801static inline int __get_request_mask(struct inode *in) { 806static inline int __get_request_mask(struct inode *in) {
@@ -1036,6 +1041,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1036 struct ceph_inode_info *ci = ceph_inode(inode); 1041 struct ceph_inode_info *ci = ceph_inode(inode);
1037 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1042 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1038 struct ceph_cap_flush *prealloc_cf = NULL; 1043 struct ceph_cap_flush *prealloc_cf = NULL;
1044 struct ceph_buffer *old_blob = NULL;
1039 int issued; 1045 int issued;
1040 int err; 1046 int err;
1041 int dirty = 0; 1047 int dirty = 0;
@@ -1109,13 +1115,15 @@ retry:
1109 struct ceph_buffer *blob; 1115 struct ceph_buffer *blob;
1110 1116
1111 spin_unlock(&ci->i_ceph_lock); 1117 spin_unlock(&ci->i_ceph_lock);
1112 dout(" preaallocating new blob size=%d\n", required_blob_size); 1118 ceph_buffer_put(old_blob); /* Shouldn't be required */
1119 dout(" pre-allocating new blob size=%d\n", required_blob_size);
1113 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1120 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1114 if (!blob) 1121 if (!blob)
1115 goto do_sync_unlocked; 1122 goto do_sync_unlocked;
1116 spin_lock(&ci->i_ceph_lock); 1123 spin_lock(&ci->i_ceph_lock);
1124 /* prealloc_blob can't be released while holding i_ceph_lock */
1117 if (ci->i_xattrs.prealloc_blob) 1125 if (ci->i_xattrs.prealloc_blob)
1118 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 1126 old_blob = ci->i_xattrs.prealloc_blob;
1119 ci->i_xattrs.prealloc_blob = blob; 1127 ci->i_xattrs.prealloc_blob = blob;
1120 goto retry; 1128 goto retry;
1121 } 1129 }
@@ -1131,6 +1139,7 @@ retry:
1131 } 1139 }
1132 1140
1133 spin_unlock(&ci->i_ceph_lock); 1141 spin_unlock(&ci->i_ceph_lock);
1142 ceph_buffer_put(old_blob);
1134 if (lock_snap_rwsem) 1143 if (lock_snap_rwsem)
1135 up_read(&mdsc->snap_rwsem); 1144 up_read(&mdsc->snap_rwsem);
1136 if (dirty) 1145 if (dirty)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4b21a90015a9..99caf77df4a2 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
152extern const struct export_operations cifs_export_ops; 152extern const struct export_operations cifs_export_ops;
153#endif /* CONFIG_CIFS_NFSD_EXPORT */ 153#endif /* CONFIG_CIFS_NFSD_EXPORT */
154 154
155#define CIFS_VERSION "2.21" 155#define CIFS_VERSION "2.22"
156#endif /* _CIFSFS_H */ 156#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e23234207fc2..592a6cea2b79 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -579,6 +579,7 @@ extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
579 unsigned int *len, unsigned int *offset); 579 unsigned int *len, unsigned int *offset);
580 580
581void extract_unc_hostname(const char *unc, const char **h, size_t *len); 581void extract_unc_hostname(const char *unc, const char **h, size_t *len);
582int copy_path_name(char *dst, const char *src);
582 583
583#ifdef CONFIG_CIFS_DFS_UPCALL 584#ifdef CONFIG_CIFS_DFS_UPCALL
584static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, 585static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e2f95965065d..3907653e63c7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -942,10 +942,8 @@ PsxDelete:
942 PATH_MAX, nls_codepage, remap); 942 PATH_MAX, nls_codepage, remap);
943 name_len++; /* trailing null */ 943 name_len++; /* trailing null */
944 name_len *= 2; 944 name_len *= 2;
945 } else { /* BB add path length overrun check */ 945 } else {
946 name_len = strnlen(fileName, PATH_MAX); 946 name_len = copy_path_name(pSMB->FileName, fileName);
947 name_len++; /* trailing null */
948 strncpy(pSMB->FileName, fileName, name_len);
949 } 947 }
950 948
951 params = 6 + name_len; 949 params = 6 + name_len;
@@ -1015,10 +1013,8 @@ DelFileRetry:
1015 remap); 1013 remap);
1016 name_len++; /* trailing null */ 1014 name_len++; /* trailing null */
1017 name_len *= 2; 1015 name_len *= 2;
1018 } else { /* BB improve check for buffer overruns BB */ 1016 } else {
1019 name_len = strnlen(name, PATH_MAX); 1017 name_len = copy_path_name(pSMB->fileName, name);
1020 name_len++; /* trailing null */
1021 strncpy(pSMB->fileName, name, name_len);
1022 } 1018 }
1023 pSMB->SearchAttributes = 1019 pSMB->SearchAttributes =
1024 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); 1020 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
@@ -1062,10 +1058,8 @@ RmDirRetry:
1062 remap); 1058 remap);
1063 name_len++; /* trailing null */ 1059 name_len++; /* trailing null */
1064 name_len *= 2; 1060 name_len *= 2;
1065 } else { /* BB improve check for buffer overruns BB */ 1061 } else {
1066 name_len = strnlen(name, PATH_MAX); 1062 name_len = copy_path_name(pSMB->DirName, name);
1067 name_len++; /* trailing null */
1068 strncpy(pSMB->DirName, name, name_len);
1069 } 1063 }
1070 1064
1071 pSMB->BufferFormat = 0x04; 1065 pSMB->BufferFormat = 0x04;
@@ -1107,10 +1101,8 @@ MkDirRetry:
1107 remap); 1101 remap);
1108 name_len++; /* trailing null */ 1102 name_len++; /* trailing null */
1109 name_len *= 2; 1103 name_len *= 2;
1110 } else { /* BB improve check for buffer overruns BB */ 1104 } else {
1111 name_len = strnlen(name, PATH_MAX); 1105 name_len = copy_path_name(pSMB->DirName, name);
1112 name_len++; /* trailing null */
1113 strncpy(pSMB->DirName, name, name_len);
1114 } 1106 }
1115 1107
1116 pSMB->BufferFormat = 0x04; 1108 pSMB->BufferFormat = 0x04;
@@ -1157,10 +1149,8 @@ PsxCreat:
1157 PATH_MAX, nls_codepage, remap); 1149 PATH_MAX, nls_codepage, remap);
1158 name_len++; /* trailing null */ 1150 name_len++; /* trailing null */
1159 name_len *= 2; 1151 name_len *= 2;
1160 } else { /* BB improve the check for buffer overruns BB */ 1152 } else {
1161 name_len = strnlen(name, PATH_MAX); 1153 name_len = copy_path_name(pSMB->FileName, name);
1162 name_len++; /* trailing null */
1163 strncpy(pSMB->FileName, name, name_len);
1164 } 1154 }
1165 1155
1166 params = 6 + name_len; 1156 params = 6 + name_len;
@@ -1324,11 +1314,9 @@ OldOpenRetry:
1324 fileName, PATH_MAX, nls_codepage, remap); 1314 fileName, PATH_MAX, nls_codepage, remap);
1325 name_len++; /* trailing null */ 1315 name_len++; /* trailing null */
1326 name_len *= 2; 1316 name_len *= 2;
1327 } else { /* BB improve check for buffer overruns BB */ 1317 } else {
1328 count = 0; /* no pad */ 1318 count = 0; /* no pad */
1329 name_len = strnlen(fileName, PATH_MAX); 1319 name_len = copy_path_name(pSMB->fileName, fileName);
1330 name_len++; /* trailing null */
1331 strncpy(pSMB->fileName, fileName, name_len);
1332 } 1320 }
1333 if (*pOplock & REQ_OPLOCK) 1321 if (*pOplock & REQ_OPLOCK)
1334 pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); 1322 pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
@@ -1442,11 +1430,8 @@ openRetry:
1442 /* BB improve check for buffer overruns BB */ 1430 /* BB improve check for buffer overruns BB */
1443 /* no pad */ 1431 /* no pad */
1444 count = 0; 1432 count = 0;
1445 name_len = strnlen(path, PATH_MAX); 1433 name_len = copy_path_name(req->fileName, path);
1446 /* trailing null */
1447 name_len++;
1448 req->NameLength = cpu_to_le16(name_len); 1434 req->NameLength = cpu_to_le16(name_len);
1449 strncpy(req->fileName, path, name_len);
1450 } 1435 }
1451 1436
1452 if (*oplock & REQ_OPLOCK) 1437 if (*oplock & REQ_OPLOCK)
@@ -2812,15 +2797,10 @@ renameRetry:
2812 remap); 2797 remap);
2813 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2798 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2814 name_len2 *= 2; /* convert to bytes */ 2799 name_len2 *= 2; /* convert to bytes */
2815 } else { /* BB improve the check for buffer overruns BB */ 2800 } else {
2816 name_len = strnlen(from_name, PATH_MAX); 2801 name_len = copy_path_name(pSMB->OldFileName, from_name);
2817 name_len++; /* trailing null */ 2802 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
2818 strncpy(pSMB->OldFileName, from_name, name_len);
2819 name_len2 = strnlen(to_name, PATH_MAX);
2820 name_len2++; /* trailing null */
2821 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 2803 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
2822 strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
2823 name_len2++; /* trailing null */
2824 name_len2++; /* signature byte */ 2804 name_len2++; /* signature byte */
2825 } 2805 }
2826 2806
@@ -2962,15 +2942,10 @@ copyRetry:
2962 toName, PATH_MAX, nls_codepage, remap); 2942 toName, PATH_MAX, nls_codepage, remap);
2963 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2943 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2964 name_len2 *= 2; /* convert to bytes */ 2944 name_len2 *= 2; /* convert to bytes */
2965 } else { /* BB improve the check for buffer overruns BB */ 2945 } else {
2966 name_len = strnlen(fromName, PATH_MAX); 2946 name_len = copy_path_name(pSMB->OldFileName, fromName);
2967 name_len++; /* trailing null */
2968 strncpy(pSMB->OldFileName, fromName, name_len);
2969 name_len2 = strnlen(toName, PATH_MAX);
2970 name_len2++; /* trailing null */
2971 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 2947 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
2972 strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); 2948 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
2973 name_len2++; /* trailing null */
2974 name_len2++; /* signature byte */ 2949 name_len2++; /* signature byte */
2975 } 2950 }
2976 2951
@@ -3021,10 +2996,8 @@ createSymLinkRetry:
3021 name_len++; /* trailing null */ 2996 name_len++; /* trailing null */
3022 name_len *= 2; 2997 name_len *= 2;
3023 2998
3024 } else { /* BB improve the check for buffer overruns BB */ 2999 } else {
3025 name_len = strnlen(fromName, PATH_MAX); 3000 name_len = copy_path_name(pSMB->FileName, fromName);
3026 name_len++; /* trailing null */
3027 strncpy(pSMB->FileName, fromName, name_len);
3028 } 3001 }
3029 params = 6 + name_len; 3002 params = 6 + name_len;
3030 pSMB->MaxSetupCount = 0; 3003 pSMB->MaxSetupCount = 0;
@@ -3044,10 +3017,8 @@ createSymLinkRetry:
3044 PATH_MAX, nls_codepage, remap); 3017 PATH_MAX, nls_codepage, remap);
3045 name_len_target++; /* trailing null */ 3018 name_len_target++; /* trailing null */
3046 name_len_target *= 2; 3019 name_len_target *= 2;
3047 } else { /* BB improve the check for buffer overruns BB */ 3020 } else {
3048 name_len_target = strnlen(toName, PATH_MAX); 3021 name_len_target = copy_path_name(data_offset, toName);
3049 name_len_target++; /* trailing null */
3050 strncpy(data_offset, toName, name_len_target);
3051 } 3022 }
3052 3023
3053 pSMB->MaxParameterCount = cpu_to_le16(2); 3024 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3109,10 +3080,8 @@ createHardLinkRetry:
3109 name_len++; /* trailing null */ 3080 name_len++; /* trailing null */
3110 name_len *= 2; 3081 name_len *= 2;
3111 3082
3112 } else { /* BB improve the check for buffer overruns BB */ 3083 } else {
3113 name_len = strnlen(toName, PATH_MAX); 3084 name_len = copy_path_name(pSMB->FileName, toName);
3114 name_len++; /* trailing null */
3115 strncpy(pSMB->FileName, toName, name_len);
3116 } 3085 }
3117 params = 6 + name_len; 3086 params = 6 + name_len;
3118 pSMB->MaxSetupCount = 0; 3087 pSMB->MaxSetupCount = 0;
@@ -3131,10 +3100,8 @@ createHardLinkRetry:
3131 PATH_MAX, nls_codepage, remap); 3100 PATH_MAX, nls_codepage, remap);
3132 name_len_target++; /* trailing null */ 3101 name_len_target++; /* trailing null */
3133 name_len_target *= 2; 3102 name_len_target *= 2;
3134 } else { /* BB improve the check for buffer overruns BB */ 3103 } else {
3135 name_len_target = strnlen(fromName, PATH_MAX); 3104 name_len_target = copy_path_name(data_offset, fromName);
3136 name_len_target++; /* trailing null */
3137 strncpy(data_offset, fromName, name_len_target);
3138 } 3105 }
3139 3106
3140 pSMB->MaxParameterCount = cpu_to_le16(2); 3107 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3213,15 +3180,10 @@ winCreateHardLinkRetry:
3213 remap); 3180 remap);
3214 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 3181 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
3215 name_len2 *= 2; /* convert to bytes */ 3182 name_len2 *= 2; /* convert to bytes */
3216 } else { /* BB improve the check for buffer overruns BB */ 3183 } else {
3217 name_len = strnlen(from_name, PATH_MAX); 3184 name_len = copy_path_name(pSMB->OldFileName, from_name);
3218 name_len++; /* trailing null */
3219 strncpy(pSMB->OldFileName, from_name, name_len);
3220 name_len2 = strnlen(to_name, PATH_MAX);
3221 name_len2++; /* trailing null */
3222 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 3185 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
3223 strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2); 3186 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
3224 name_len2++; /* trailing null */
3225 name_len2++; /* signature byte */ 3187 name_len2++; /* signature byte */
3226 } 3188 }
3227 3189
@@ -3271,10 +3233,8 @@ querySymLinkRetry:
3271 remap); 3233 remap);
3272 name_len++; /* trailing null */ 3234 name_len++; /* trailing null */
3273 name_len *= 2; 3235 name_len *= 2;
3274 } else { /* BB improve the check for buffer overruns BB */ 3236 } else {
3275 name_len = strnlen(searchName, PATH_MAX); 3237 name_len = copy_path_name(pSMB->FileName, searchName);
3276 name_len++; /* trailing null */
3277 strncpy(pSMB->FileName, searchName, name_len);
3278 } 3238 }
3279 3239
3280 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 3240 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3691,10 +3651,8 @@ queryAclRetry:
3691 name_len *= 2; 3651 name_len *= 2;
3692 pSMB->FileName[name_len] = 0; 3652 pSMB->FileName[name_len] = 0;
3693 pSMB->FileName[name_len+1] = 0; 3653 pSMB->FileName[name_len+1] = 0;
3694 } else { /* BB improve the check for buffer overruns BB */ 3654 } else {
3695 name_len = strnlen(searchName, PATH_MAX); 3655 name_len = copy_path_name(pSMB->FileName, searchName);
3696 name_len++; /* trailing null */
3697 strncpy(pSMB->FileName, searchName, name_len);
3698 } 3656 }
3699 3657
3700 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 3658 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3776,10 +3734,8 @@ setAclRetry:
3776 PATH_MAX, nls_codepage, remap); 3734 PATH_MAX, nls_codepage, remap);
3777 name_len++; /* trailing null */ 3735 name_len++; /* trailing null */
3778 name_len *= 2; 3736 name_len *= 2;
3779 } else { /* BB improve the check for buffer overruns BB */ 3737 } else {
3780 name_len = strnlen(fileName, PATH_MAX); 3738 name_len = copy_path_name(pSMB->FileName, fileName);
3781 name_len++; /* trailing null */
3782 strncpy(pSMB->FileName, fileName, name_len);
3783 } 3739 }
3784 params = 6 + name_len; 3740 params = 6 + name_len;
3785 pSMB->MaxParameterCount = cpu_to_le16(2); 3741 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4184,9 +4140,7 @@ QInfRetry:
4184 name_len++; /* trailing null */ 4140 name_len++; /* trailing null */
4185 name_len *= 2; 4141 name_len *= 2;
4186 } else { 4142 } else {
4187 name_len = strnlen(search_name, PATH_MAX); 4143 name_len = copy_path_name(pSMB->FileName, search_name);
4188 name_len++; /* trailing null */
4189 strncpy(pSMB->FileName, search_name, name_len);
4190 } 4144 }
4191 pSMB->BufferFormat = 0x04; 4145 pSMB->BufferFormat = 0x04;
4192 name_len++; /* account for buffer type byte */ 4146 name_len++; /* account for buffer type byte */
@@ -4321,10 +4275,8 @@ QPathInfoRetry:
4321 PATH_MAX, nls_codepage, remap); 4275 PATH_MAX, nls_codepage, remap);
4322 name_len++; /* trailing null */ 4276 name_len++; /* trailing null */
4323 name_len *= 2; 4277 name_len *= 2;
4324 } else { /* BB improve the check for buffer overruns BB */ 4278 } else {
4325 name_len = strnlen(search_name, PATH_MAX); 4279 name_len = copy_path_name(pSMB->FileName, search_name);
4326 name_len++; /* trailing null */
4327 strncpy(pSMB->FileName, search_name, name_len);
4328 } 4280 }
4329 4281
4330 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 4282 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4490,10 +4442,8 @@ UnixQPathInfoRetry:
4490 PATH_MAX, nls_codepage, remap); 4442 PATH_MAX, nls_codepage, remap);
4491 name_len++; /* trailing null */ 4443 name_len++; /* trailing null */
4492 name_len *= 2; 4444 name_len *= 2;
4493 } else { /* BB improve the check for buffer overruns BB */ 4445 } else {
4494 name_len = strnlen(searchName, PATH_MAX); 4446 name_len = copy_path_name(pSMB->FileName, searchName);
4495 name_len++; /* trailing null */
4496 strncpy(pSMB->FileName, searchName, name_len);
4497 } 4447 }
4498 4448
4499 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 4449 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4593,17 +4543,16 @@ findFirstRetry:
4593 pSMB->FileName[name_len+1] = 0; 4543 pSMB->FileName[name_len+1] = 0;
4594 name_len += 2; 4544 name_len += 2;
4595 } 4545 }
4596 } else { /* BB add check for overrun of SMB buf BB */ 4546 } else {
4597 name_len = strnlen(searchName, PATH_MAX); 4547 name_len = copy_path_name(pSMB->FileName, searchName);
4598/* BB fix here and in unicode clause above ie
4599 if (name_len > buffersize-header)
4600 free buffer exit; BB */
4601 strncpy(pSMB->FileName, searchName, name_len);
4602 if (msearch) { 4548 if (msearch) {
4603 pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb); 4549 if (WARN_ON_ONCE(name_len > PATH_MAX-2))
4604 pSMB->FileName[name_len+1] = '*'; 4550 name_len = PATH_MAX-2;
4605 pSMB->FileName[name_len+2] = 0; 4551 /* overwrite nul byte */
4606 name_len += 3; 4552 pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
4553 pSMB->FileName[name_len] = '*';
4554 pSMB->FileName[name_len+1] = 0;
4555 name_len += 2;
4607 } 4556 }
4608 } 4557 }
4609 4558
@@ -4898,10 +4847,8 @@ GetInodeNumberRetry:
4898 remap); 4847 remap);
4899 name_len++; /* trailing null */ 4848 name_len++; /* trailing null */
4900 name_len *= 2; 4849 name_len *= 2;
4901 } else { /* BB improve the check for buffer overruns BB */ 4850 } else {
4902 name_len = strnlen(search_name, PATH_MAX); 4851 name_len = copy_path_name(pSMB->FileName, search_name);
4903 name_len++; /* trailing null */
4904 strncpy(pSMB->FileName, search_name, name_len);
4905 } 4852 }
4906 4853
4907 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 4854 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -5008,9 +4955,7 @@ getDFSRetry:
5008 name_len++; /* trailing null */ 4955 name_len++; /* trailing null */
5009 name_len *= 2; 4956 name_len *= 2;
5010 } else { /* BB improve the check for buffer overruns BB */ 4957 } else { /* BB improve the check for buffer overruns BB */
5011 name_len = strnlen(search_name, PATH_MAX); 4958 name_len = copy_path_name(pSMB->RequestFileName, search_name);
5012 name_len++; /* trailing null */
5013 strncpy(pSMB->RequestFileName, search_name, name_len);
5014 } 4959 }
5015 4960
5016 if (ses->server->sign) 4961 if (ses->server->sign)
@@ -5663,10 +5608,8 @@ SetEOFRetry:
5663 PATH_MAX, cifs_sb->local_nls, remap); 5608 PATH_MAX, cifs_sb->local_nls, remap);
5664 name_len++; /* trailing null */ 5609 name_len++; /* trailing null */
5665 name_len *= 2; 5610 name_len *= 2;
5666 } else { /* BB improve the check for buffer overruns BB */ 5611 } else {
5667 name_len = strnlen(file_name, PATH_MAX); 5612 name_len = copy_path_name(pSMB->FileName, file_name);
5668 name_len++; /* trailing null */
5669 strncpy(pSMB->FileName, file_name, name_len);
5670 } 5613 }
5671 params = 6 + name_len; 5614 params = 6 + name_len;
5672 data_count = sizeof(struct file_end_of_file_info); 5615 data_count = sizeof(struct file_end_of_file_info);
@@ -5959,10 +5902,8 @@ SetTimesRetry:
5959 PATH_MAX, nls_codepage, remap); 5902 PATH_MAX, nls_codepage, remap);
5960 name_len++; /* trailing null */ 5903 name_len++; /* trailing null */
5961 name_len *= 2; 5904 name_len *= 2;
5962 } else { /* BB improve the check for buffer overruns BB */ 5905 } else {
5963 name_len = strnlen(fileName, PATH_MAX); 5906 name_len = copy_path_name(pSMB->FileName, fileName);
5964 name_len++; /* trailing null */
5965 strncpy(pSMB->FileName, fileName, name_len);
5966 } 5907 }
5967 5908
5968 params = 6 + name_len; 5909 params = 6 + name_len;
@@ -6040,10 +5981,8 @@ SetAttrLgcyRetry:
6040 PATH_MAX, nls_codepage); 5981 PATH_MAX, nls_codepage);
6041 name_len++; /* trailing null */ 5982 name_len++; /* trailing null */
6042 name_len *= 2; 5983 name_len *= 2;
6043 } else { /* BB improve the check for buffer overruns BB */ 5984 } else {
6044 name_len = strnlen(fileName, PATH_MAX); 5985 name_len = copy_path_name(pSMB->fileName, fileName);
6045 name_len++; /* trailing null */
6046 strncpy(pSMB->fileName, fileName, name_len);
6047 } 5986 }
6048 pSMB->attr = cpu_to_le16(dos_attrs); 5987 pSMB->attr = cpu_to_le16(dos_attrs);
6049 pSMB->BufferFormat = 0x04; 5988 pSMB->BufferFormat = 0x04;
@@ -6203,10 +6142,8 @@ setPermsRetry:
6203 PATH_MAX, nls_codepage, remap); 6142 PATH_MAX, nls_codepage, remap);
6204 name_len++; /* trailing null */ 6143 name_len++; /* trailing null */
6205 name_len *= 2; 6144 name_len *= 2;
6206 } else { /* BB improve the check for buffer overruns BB */ 6145 } else {
6207 name_len = strnlen(file_name, PATH_MAX); 6146 name_len = copy_path_name(pSMB->FileName, file_name);
6208 name_len++; /* trailing null */
6209 strncpy(pSMB->FileName, file_name, name_len);
6210 } 6147 }
6211 6148
6212 params = 6 + name_len; 6149 params = 6 + name_len;
@@ -6298,10 +6235,8 @@ QAllEAsRetry:
6298 PATH_MAX, nls_codepage, remap); 6235 PATH_MAX, nls_codepage, remap);
6299 list_len++; /* trailing null */ 6236 list_len++; /* trailing null */
6300 list_len *= 2; 6237 list_len *= 2;
6301 } else { /* BB improve the check for buffer overruns BB */ 6238 } else {
6302 list_len = strnlen(searchName, PATH_MAX); 6239 list_len = copy_path_name(pSMB->FileName, searchName);
6303 list_len++; /* trailing null */
6304 strncpy(pSMB->FileName, searchName, list_len);
6305 } 6240 }
6306 6241
6307 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; 6242 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
@@ -6480,10 +6415,8 @@ SetEARetry:
6480 PATH_MAX, nls_codepage, remap); 6415 PATH_MAX, nls_codepage, remap);
6481 name_len++; /* trailing null */ 6416 name_len++; /* trailing null */
6482 name_len *= 2; 6417 name_len *= 2;
6483 } else { /* BB improve the check for buffer overruns BB */ 6418 } else {
6484 name_len = strnlen(fileName, PATH_MAX); 6419 name_len = copy_path_name(pSMB->FileName, fileName);
6485 name_len++; /* trailing null */
6486 strncpy(pSMB->FileName, fileName, name_len);
6487 } 6420 }
6488 6421
6489 params = 6 + name_len; 6422 params = 6 + name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a15a6e738eb5..5299effa6f7d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1113,7 +1113,7 @@ cifs_demultiplex_thread(void *p)
1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1114 1114
1115 set_freezable(); 1115 set_freezable();
1116 allow_signal(SIGKILL); 1116 allow_kernel_signal(SIGKILL);
1117 while (server->tcpStatus != CifsExiting) { 1117 while (server->tcpStatus != CifsExiting) {
1118 if (try_to_freeze()) 1118 if (try_to_freeze())
1119 continue; 1119 continue;
@@ -2981,6 +2981,7 @@ static int
2981cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) 2981cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2982{ 2982{
2983 int rc = 0; 2983 int rc = 0;
2984 int is_domain = 0;
2984 const char *delim, *payload; 2985 const char *delim, *payload;
2985 char *desc; 2986 char *desc;
2986 ssize_t len; 2987 ssize_t len;
@@ -3028,6 +3029,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
3028 rc = PTR_ERR(key); 3029 rc = PTR_ERR(key);
3029 goto out_err; 3030 goto out_err;
3030 } 3031 }
3032 is_domain = 1;
3031 } 3033 }
3032 3034
3033 down_read(&key->sem); 3035 down_read(&key->sem);
@@ -3085,6 +3087,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
3085 goto out_key_put; 3087 goto out_key_put;
3086 } 3088 }
3087 3089
3090 /*
3091 * If we have a domain key then we must set the domainName in the
3092 * for the request.
3093 */
3094 if (is_domain && ses->domainName) {
3095 vol->domainname = kstrndup(ses->domainName,
3096 strlen(ses->domainName),
3097 GFP_KERNEL);
3098 if (!vol->domainname) {
3099 cifs_dbg(FYI, "Unable to allocate %zd bytes for "
3100 "domain\n", len);
3101 rc = -ENOMEM;
3102 kfree(vol->username);
3103 vol->username = NULL;
3104 kzfree(vol->password);
3105 vol->password = NULL;
3106 goto out_key_put;
3107 }
3108 }
3109
3088out_key_put: 3110out_key_put:
3089 up_read(&key->sem); 3111 up_read(&key->sem);
3090 key_put(key); 3112 key_put(key);
@@ -4209,16 +4231,19 @@ build_unc_path_to_root(const struct smb_vol *vol,
4209 strlen(vol->prepath) + 1 : 0; 4231 strlen(vol->prepath) + 1 : 0;
4210 unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); 4232 unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
4211 4233
4234 if (unc_len > MAX_TREE_SIZE)
4235 return ERR_PTR(-EINVAL);
4236
4212 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); 4237 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
4213 if (full_path == NULL) 4238 if (full_path == NULL)
4214 return ERR_PTR(-ENOMEM); 4239 return ERR_PTR(-ENOMEM);
4215 4240
4216 strncpy(full_path, vol->UNC, unc_len); 4241 memcpy(full_path, vol->UNC, unc_len);
4217 pos = full_path + unc_len; 4242 pos = full_path + unc_len;
4218 4243
4219 if (pplen) { 4244 if (pplen) {
4220 *pos = CIFS_DIR_SEP(cifs_sb); 4245 *pos = CIFS_DIR_SEP(cifs_sb);
4221 strncpy(pos + 1, vol->prepath, pplen); 4246 memcpy(pos + 1, vol->prepath, pplen);
4222 pos += pplen; 4247 pos += pplen;
4223 } 4248 }
4224 4249
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f26a48dd2e39..be424e81e3ad 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -69,11 +69,10 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
69 return full_path; 69 return full_path;
70 70
71 if (dfsplen) 71 if (dfsplen)
72 strncpy(full_path, tcon->treeName, dfsplen); 72 memcpy(full_path, tcon->treeName, dfsplen);
73 full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb); 73 full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
74 strncpy(full_path + dfsplen + 1, vol->prepath, pplen); 74 memcpy(full_path + dfsplen + 1, vol->prepath, pplen);
75 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); 75 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
76 full_path[dfsplen + pplen] = 0; /* add trailing null */
77 return full_path; 76 return full_path;
78} 77}
79 78
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index f383877a6511..5ad83bdb9bea 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1011,3 +1011,25 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1011 *h = unc; 1011 *h = unc;
1012 *len = end - unc; 1012 *len = end - unc;
1013} 1013}
1014
1015/**
1016 * copy_path_name - copy src path to dst, possibly truncating
1017 *
1018 * returns number of bytes written (including trailing nul)
1019 */
1020int copy_path_name(char *dst, const char *src)
1021{
1022 int name_len;
1023
1024 /*
1025 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1026 * will truncate and strlen(dst) will be PATH_MAX-1
1027 */
1028 name_len = strscpy(dst, src, PATH_MAX);
1029 if (WARN_ON_ONCE(name_len < 0))
1030 name_len = PATH_MAX-1;
1031
1032 /* we count the trailing nul */
1033 name_len++;
1034 return name_len;
1035}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index dcd49ad60c83..4c764ff7edd2 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -159,13 +159,16 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
159 const struct nls_table *nls_cp) 159 const struct nls_table *nls_cp)
160{ 160{
161 char *bcc_ptr = *pbcc_area; 161 char *bcc_ptr = *pbcc_area;
162 int len;
162 163
163 /* copy user */ 164 /* copy user */
164 /* BB what about null user mounts - check that we do this BB */ 165 /* BB what about null user mounts - check that we do this BB */
165 /* copy user */ 166 /* copy user */
166 if (ses->user_name != NULL) { 167 if (ses->user_name != NULL) {
167 strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN); 168 len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
168 bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN); 169 if (WARN_ON_ONCE(len < 0))
170 len = CIFS_MAX_USERNAME_LEN - 1;
171 bcc_ptr += len;
169 } 172 }
170 /* else null user mount */ 173 /* else null user mount */
171 *bcc_ptr = 0; 174 *bcc_ptr = 0;
@@ -173,8 +176,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
173 176
174 /* copy domain */ 177 /* copy domain */
175 if (ses->domainName != NULL) { 178 if (ses->domainName != NULL) {
176 strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 179 len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
177 bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 180 if (WARN_ON_ONCE(len < 0))
181 len = CIFS_MAX_DOMAINNAME_LEN - 1;
182 bcc_ptr += len;
178 } /* else we will send a null domain name 183 } /* else we will send a null domain name
179 so the server will default to its own domain */ 184 so the server will default to its own domain */
180 *bcc_ptr = 0; 185 *bcc_ptr = 0;
@@ -242,9 +247,10 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
242 247
243 kfree(ses->serverOS); 248 kfree(ses->serverOS);
244 249
245 ses->serverOS = kzalloc(len + 1, GFP_KERNEL); 250 ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
246 if (ses->serverOS) { 251 if (ses->serverOS) {
247 strncpy(ses->serverOS, bcc_ptr, len); 252 memcpy(ses->serverOS, bcc_ptr, len);
253 ses->serverOS[len] = 0;
248 if (strncmp(ses->serverOS, "OS/2", 4) == 0) 254 if (strncmp(ses->serverOS, "OS/2", 4) == 0)
249 cifs_dbg(FYI, "OS/2 server\n"); 255 cifs_dbg(FYI, "OS/2 server\n");
250 } 256 }
@@ -258,9 +264,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
258 264
259 kfree(ses->serverNOS); 265 kfree(ses->serverNOS);
260 266
261 ses->serverNOS = kzalloc(len + 1, GFP_KERNEL); 267 ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
262 if (ses->serverNOS) 268 if (ses->serverNOS) {
263 strncpy(ses->serverNOS, bcc_ptr, len); 269 memcpy(ses->serverNOS, bcc_ptr, len);
270 ses->serverNOS[len] = 0;
271 }
264 272
265 bcc_ptr += len + 1; 273 bcc_ptr += len + 1;
266 bleft -= len + 1; 274 bleft -= len + 1;
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index f752d83a9c44..520f1813e789 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -20,6 +20,15 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22 22
23struct configfs_fragment {
24 atomic_t frag_count;
25 struct rw_semaphore frag_sem;
26 bool frag_dead;
27};
28
29void put_fragment(struct configfs_fragment *);
30struct configfs_fragment *get_fragment(struct configfs_fragment *);
31
23struct configfs_dirent { 32struct configfs_dirent {
24 atomic_t s_count; 33 atomic_t s_count;
25 int s_dependent_count; 34 int s_dependent_count;
@@ -34,6 +43,7 @@ struct configfs_dirent {
34#ifdef CONFIG_LOCKDEP 43#ifdef CONFIG_LOCKDEP
35 int s_depth; 44 int s_depth;
36#endif 45#endif
46 struct configfs_fragment *s_frag;
37}; 47};
38 48
39#define CONFIGFS_ROOT 0x0001 49#define CONFIGFS_ROOT 0x0001
@@ -61,8 +71,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in
61extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); 71extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
62extern int configfs_create_bin_file(struct config_item *, 72extern int configfs_create_bin_file(struct config_item *,
63 const struct configfs_bin_attribute *); 73 const struct configfs_bin_attribute *);
64extern int configfs_make_dirent(struct configfs_dirent *, 74extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
65 struct dentry *, void *, umode_t, int); 75 void *, umode_t, int, struct configfs_fragment *);
66extern int configfs_dirent_is_ready(struct configfs_dirent *); 76extern int configfs_dirent_is_ready(struct configfs_dirent *);
67 77
68extern void configfs_hash_and_remove(struct dentry * dir, const char * name); 78extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
@@ -137,6 +147,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
137{ 147{
138 if (!(sd->s_type & CONFIGFS_ROOT)) { 148 if (!(sd->s_type & CONFIGFS_ROOT)) {
139 kfree(sd->s_iattr); 149 kfree(sd->s_iattr);
150 put_fragment(sd->s_frag);
140 kmem_cache_free(configfs_dir_cachep, sd); 151 kmem_cache_free(configfs_dir_cachep, sd);
141 } 152 }
142} 153}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 92112915de8e..79fc25aaa8cd 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -151,11 +151,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
151 151
152#endif /* CONFIG_LOCKDEP */ 152#endif /* CONFIG_LOCKDEP */
153 153
154static struct configfs_fragment *new_fragment(void)
155{
156 struct configfs_fragment *p;
157
158 p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
159 if (p) {
160 atomic_set(&p->frag_count, 1);
161 init_rwsem(&p->frag_sem);
162 p->frag_dead = false;
163 }
164 return p;
165}
166
167void put_fragment(struct configfs_fragment *frag)
168{
169 if (frag && atomic_dec_and_test(&frag->frag_count))
170 kfree(frag);
171}
172
173struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
174{
175 if (likely(frag))
176 atomic_inc(&frag->frag_count);
177 return frag;
178}
179
154/* 180/*
155 * Allocates a new configfs_dirent and links it to the parent configfs_dirent 181 * Allocates a new configfs_dirent and links it to the parent configfs_dirent
156 */ 182 */
157static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd, 183static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
158 void *element, int type) 184 void *element, int type,
185 struct configfs_fragment *frag)
159{ 186{
160 struct configfs_dirent * sd; 187 struct configfs_dirent * sd;
161 188
@@ -175,6 +202,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
175 kmem_cache_free(configfs_dir_cachep, sd); 202 kmem_cache_free(configfs_dir_cachep, sd);
176 return ERR_PTR(-ENOENT); 203 return ERR_PTR(-ENOENT);
177 } 204 }
205 sd->s_frag = get_fragment(frag);
178 list_add(&sd->s_sibling, &parent_sd->s_children); 206 list_add(&sd->s_sibling, &parent_sd->s_children);
179 spin_unlock(&configfs_dirent_lock); 207 spin_unlock(&configfs_dirent_lock);
180 208
@@ -209,11 +237,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
209 237
210int configfs_make_dirent(struct configfs_dirent * parent_sd, 238int configfs_make_dirent(struct configfs_dirent * parent_sd,
211 struct dentry * dentry, void * element, 239 struct dentry * dentry, void * element,
212 umode_t mode, int type) 240 umode_t mode, int type, struct configfs_fragment *frag)
213{ 241{
214 struct configfs_dirent * sd; 242 struct configfs_dirent * sd;
215 243
216 sd = configfs_new_dirent(parent_sd, element, type); 244 sd = configfs_new_dirent(parent_sd, element, type, frag);
217 if (IS_ERR(sd)) 245 if (IS_ERR(sd))
218 return PTR_ERR(sd); 246 return PTR_ERR(sd);
219 247
@@ -260,7 +288,8 @@ static void init_symlink(struct inode * inode)
260 * until it is validated by configfs_dir_set_ready() 288 * until it is validated by configfs_dir_set_ready()
261 */ 289 */
262 290
263static int configfs_create_dir(struct config_item *item, struct dentry *dentry) 291static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
292 struct configfs_fragment *frag)
264{ 293{
265 int error; 294 int error;
266 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; 295 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
@@ -273,7 +302,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
273 return error; 302 return error;
274 303
275 error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, 304 error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
276 CONFIGFS_DIR | CONFIGFS_USET_CREATING); 305 CONFIGFS_DIR | CONFIGFS_USET_CREATING,
306 frag);
277 if (unlikely(error)) 307 if (unlikely(error))
278 return error; 308 return error;
279 309
@@ -338,9 +368,10 @@ int configfs_create_link(struct configfs_symlink *sl,
338{ 368{
339 int err = 0; 369 int err = 0;
340 umode_t mode = S_IFLNK | S_IRWXUGO; 370 umode_t mode = S_IFLNK | S_IRWXUGO;
371 struct configfs_dirent *p = parent->d_fsdata;
341 372
342 err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode, 373 err = configfs_make_dirent(p, dentry, sl, mode,
343 CONFIGFS_ITEM_LINK); 374 CONFIGFS_ITEM_LINK, p->s_frag);
344 if (!err) { 375 if (!err) {
345 err = configfs_create(dentry, mode, init_symlink); 376 err = configfs_create(dentry, mode, init_symlink);
346 if (err) { 377 if (err) {
@@ -599,7 +630,8 @@ static int populate_attrs(struct config_item *item)
599 630
600static int configfs_attach_group(struct config_item *parent_item, 631static int configfs_attach_group(struct config_item *parent_item,
601 struct config_item *item, 632 struct config_item *item,
602 struct dentry *dentry); 633 struct dentry *dentry,
634 struct configfs_fragment *frag);
603static void configfs_detach_group(struct config_item *item); 635static void configfs_detach_group(struct config_item *item);
604 636
605static void detach_groups(struct config_group *group) 637static void detach_groups(struct config_group *group)
@@ -647,7 +679,8 @@ static void detach_groups(struct config_group *group)
647 * try using vfs_mkdir. Just a thought. 679 * try using vfs_mkdir. Just a thought.
648 */ 680 */
649static int create_default_group(struct config_group *parent_group, 681static int create_default_group(struct config_group *parent_group,
650 struct config_group *group) 682 struct config_group *group,
683 struct configfs_fragment *frag)
651{ 684{
652 int ret; 685 int ret;
653 struct configfs_dirent *sd; 686 struct configfs_dirent *sd;
@@ -663,7 +696,7 @@ static int create_default_group(struct config_group *parent_group,
663 d_add(child, NULL); 696 d_add(child, NULL);
664 697
665 ret = configfs_attach_group(&parent_group->cg_item, 698 ret = configfs_attach_group(&parent_group->cg_item,
666 &group->cg_item, child); 699 &group->cg_item, child, frag);
667 if (!ret) { 700 if (!ret) {
668 sd = child->d_fsdata; 701 sd = child->d_fsdata;
669 sd->s_type |= CONFIGFS_USET_DEFAULT; 702 sd->s_type |= CONFIGFS_USET_DEFAULT;
@@ -677,13 +710,14 @@ static int create_default_group(struct config_group *parent_group,
677 return ret; 710 return ret;
678} 711}
679 712
680static int populate_groups(struct config_group *group) 713static int populate_groups(struct config_group *group,
714 struct configfs_fragment *frag)
681{ 715{
682 struct config_group *new_group; 716 struct config_group *new_group;
683 int ret = 0; 717 int ret = 0;
684 718
685 list_for_each_entry(new_group, &group->default_groups, group_entry) { 719 list_for_each_entry(new_group, &group->default_groups, group_entry) {
686 ret = create_default_group(group, new_group); 720 ret = create_default_group(group, new_group, frag);
687 if (ret) { 721 if (ret) {
688 detach_groups(group); 722 detach_groups(group);
689 break; 723 break;
@@ -797,11 +831,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
797 */ 831 */
798static int configfs_attach_item(struct config_item *parent_item, 832static int configfs_attach_item(struct config_item *parent_item,
799 struct config_item *item, 833 struct config_item *item,
800 struct dentry *dentry) 834 struct dentry *dentry,
835 struct configfs_fragment *frag)
801{ 836{
802 int ret; 837 int ret;
803 838
804 ret = configfs_create_dir(item, dentry); 839 ret = configfs_create_dir(item, dentry, frag);
805 if (!ret) { 840 if (!ret) {
806 ret = populate_attrs(item); 841 ret = populate_attrs(item);
807 if (ret) { 842 if (ret) {
@@ -831,12 +866,13 @@ static void configfs_detach_item(struct config_item *item)
831 866
832static int configfs_attach_group(struct config_item *parent_item, 867static int configfs_attach_group(struct config_item *parent_item,
833 struct config_item *item, 868 struct config_item *item,
834 struct dentry *dentry) 869 struct dentry *dentry,
870 struct configfs_fragment *frag)
835{ 871{
836 int ret; 872 int ret;
837 struct configfs_dirent *sd; 873 struct configfs_dirent *sd;
838 874
839 ret = configfs_attach_item(parent_item, item, dentry); 875 ret = configfs_attach_item(parent_item, item, dentry, frag);
840 if (!ret) { 876 if (!ret) {
841 sd = dentry->d_fsdata; 877 sd = dentry->d_fsdata;
842 sd->s_type |= CONFIGFS_USET_DIR; 878 sd->s_type |= CONFIGFS_USET_DIR;
@@ -852,7 +888,7 @@ static int configfs_attach_group(struct config_item *parent_item,
852 */ 888 */
853 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); 889 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
854 configfs_adjust_dir_dirent_depth_before_populate(sd); 890 configfs_adjust_dir_dirent_depth_before_populate(sd);
855 ret = populate_groups(to_config_group(item)); 891 ret = populate_groups(to_config_group(item), frag);
856 if (ret) { 892 if (ret) {
857 configfs_detach_item(item); 893 configfs_detach_item(item);
858 d_inode(dentry)->i_flags |= S_DEAD; 894 d_inode(dentry)->i_flags |= S_DEAD;
@@ -1247,6 +1283,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
1247 struct configfs_dirent *sd; 1283 struct configfs_dirent *sd;
1248 const struct config_item_type *type; 1284 const struct config_item_type *type;
1249 struct module *subsys_owner = NULL, *new_item_owner = NULL; 1285 struct module *subsys_owner = NULL, *new_item_owner = NULL;
1286 struct configfs_fragment *frag;
1250 char *name; 1287 char *name;
1251 1288
1252 sd = dentry->d_parent->d_fsdata; 1289 sd = dentry->d_parent->d_fsdata;
@@ -1265,6 +1302,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
1265 goto out; 1302 goto out;
1266 } 1303 }
1267 1304
1305 frag = new_fragment();
1306 if (!frag) {
1307 ret = -ENOMEM;
1308 goto out;
1309 }
1310
1268 /* Get a working ref for the duration of this function */ 1311 /* Get a working ref for the duration of this function */
1269 parent_item = configfs_get_config_item(dentry->d_parent); 1312 parent_item = configfs_get_config_item(dentry->d_parent);
1270 type = parent_item->ci_type; 1313 type = parent_item->ci_type;
@@ -1367,9 +1410,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
1367 spin_unlock(&configfs_dirent_lock); 1410 spin_unlock(&configfs_dirent_lock);
1368 1411
1369 if (group) 1412 if (group)
1370 ret = configfs_attach_group(parent_item, item, dentry); 1413 ret = configfs_attach_group(parent_item, item, dentry, frag);
1371 else 1414 else
1372 ret = configfs_attach_item(parent_item, item, dentry); 1415 ret = configfs_attach_item(parent_item, item, dentry, frag);
1373 1416
1374 spin_lock(&configfs_dirent_lock); 1417 spin_lock(&configfs_dirent_lock);
1375 sd->s_type &= ~CONFIGFS_USET_IN_MKDIR; 1418 sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
@@ -1406,6 +1449,7 @@ out_put:
1406 * reference. 1449 * reference.
1407 */ 1450 */
1408 config_item_put(parent_item); 1451 config_item_put(parent_item);
1452 put_fragment(frag);
1409 1453
1410out: 1454out:
1411 return ret; 1455 return ret;
@@ -1417,6 +1461,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
1417 struct config_item *item; 1461 struct config_item *item;
1418 struct configfs_subsystem *subsys; 1462 struct configfs_subsystem *subsys;
1419 struct configfs_dirent *sd; 1463 struct configfs_dirent *sd;
1464 struct configfs_fragment *frag;
1420 struct module *subsys_owner = NULL, *dead_item_owner = NULL; 1465 struct module *subsys_owner = NULL, *dead_item_owner = NULL;
1421 int ret; 1466 int ret;
1422 1467
@@ -1474,6 +1519,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
1474 } 1519 }
1475 } while (ret == -EAGAIN); 1520 } while (ret == -EAGAIN);
1476 1521
1522 frag = sd->s_frag;
1523 if (down_write_killable(&frag->frag_sem)) {
1524 spin_lock(&configfs_dirent_lock);
1525 configfs_detach_rollback(dentry);
1526 spin_unlock(&configfs_dirent_lock);
1527 return -EINTR;
1528 }
1529 frag->frag_dead = true;
1530 up_write(&frag->frag_sem);
1531
1477 /* Get a working ref for the duration of this function */ 1532 /* Get a working ref for the duration of this function */
1478 item = configfs_get_config_item(dentry); 1533 item = configfs_get_config_item(dentry);
1479 1534
@@ -1574,7 +1629,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
1574 */ 1629 */
1575 err = -ENOENT; 1630 err = -ENOENT;
1576 if (configfs_dirent_is_ready(parent_sd)) { 1631 if (configfs_dirent_is_ready(parent_sd)) {
1577 file->private_data = configfs_new_dirent(parent_sd, NULL, 0); 1632 file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
1578 if (IS_ERR(file->private_data)) 1633 if (IS_ERR(file->private_data))
1579 err = PTR_ERR(file->private_data); 1634 err = PTR_ERR(file->private_data);
1580 else 1635 else
@@ -1732,8 +1787,13 @@ int configfs_register_group(struct config_group *parent_group,
1732{ 1787{
1733 struct configfs_subsystem *subsys = parent_group->cg_subsys; 1788 struct configfs_subsystem *subsys = parent_group->cg_subsys;
1734 struct dentry *parent; 1789 struct dentry *parent;
1790 struct configfs_fragment *frag;
1735 int ret; 1791 int ret;
1736 1792
1793 frag = new_fragment();
1794 if (!frag)
1795 return -ENOMEM;
1796
1737 mutex_lock(&subsys->su_mutex); 1797 mutex_lock(&subsys->su_mutex);
1738 link_group(parent_group, group); 1798 link_group(parent_group, group);
1739 mutex_unlock(&subsys->su_mutex); 1799 mutex_unlock(&subsys->su_mutex);
@@ -1741,7 +1801,7 @@ int configfs_register_group(struct config_group *parent_group,
1741 parent = parent_group->cg_item.ci_dentry; 1801 parent = parent_group->cg_item.ci_dentry;
1742 1802
1743 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); 1803 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
1744 ret = create_default_group(parent_group, group); 1804 ret = create_default_group(parent_group, group, frag);
1745 if (ret) 1805 if (ret)
1746 goto err_out; 1806 goto err_out;
1747 1807
@@ -1749,12 +1809,14 @@ int configfs_register_group(struct config_group *parent_group,
1749 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); 1809 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
1750 spin_unlock(&configfs_dirent_lock); 1810 spin_unlock(&configfs_dirent_lock);
1751 inode_unlock(d_inode(parent)); 1811 inode_unlock(d_inode(parent));
1812 put_fragment(frag);
1752 return 0; 1813 return 0;
1753err_out: 1814err_out:
1754 inode_unlock(d_inode(parent)); 1815 inode_unlock(d_inode(parent));
1755 mutex_lock(&subsys->su_mutex); 1816 mutex_lock(&subsys->su_mutex);
1756 unlink_group(group); 1817 unlink_group(group);
1757 mutex_unlock(&subsys->su_mutex); 1818 mutex_unlock(&subsys->su_mutex);
1819 put_fragment(frag);
1758 return ret; 1820 return ret;
1759} 1821}
1760EXPORT_SYMBOL(configfs_register_group); 1822EXPORT_SYMBOL(configfs_register_group);
@@ -1770,16 +1832,12 @@ void configfs_unregister_group(struct config_group *group)
1770 struct configfs_subsystem *subsys = group->cg_subsys; 1832 struct configfs_subsystem *subsys = group->cg_subsys;
1771 struct dentry *dentry = group->cg_item.ci_dentry; 1833 struct dentry *dentry = group->cg_item.ci_dentry;
1772 struct dentry *parent = group->cg_item.ci_parent->ci_dentry; 1834 struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
1835 struct configfs_dirent *sd = dentry->d_fsdata;
1836 struct configfs_fragment *frag = sd->s_frag;
1773 1837
1774 mutex_lock(&subsys->su_mutex); 1838 down_write(&frag->frag_sem);
1775 if (!group->cg_item.ci_parent->ci_group) { 1839 frag->frag_dead = true;
1776 /* 1840 up_write(&frag->frag_sem);
1777 * The parent has already been unlinked and detached
1778 * due to a rmdir.
1779 */
1780 goto unlink_group;
1781 }
1782 mutex_unlock(&subsys->su_mutex);
1783 1841
1784 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); 1842 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
1785 spin_lock(&configfs_dirent_lock); 1843 spin_lock(&configfs_dirent_lock);
@@ -1796,7 +1854,6 @@ void configfs_unregister_group(struct config_group *group)
1796 dput(dentry); 1854 dput(dentry);
1797 1855
1798 mutex_lock(&subsys->su_mutex); 1856 mutex_lock(&subsys->su_mutex);
1799unlink_group:
1800 unlink_group(group); 1857 unlink_group(group);
1801 mutex_unlock(&subsys->su_mutex); 1858 mutex_unlock(&subsys->su_mutex);
1802} 1859}
@@ -1853,10 +1910,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1853 struct dentry *dentry; 1910 struct dentry *dentry;
1854 struct dentry *root; 1911 struct dentry *root;
1855 struct configfs_dirent *sd; 1912 struct configfs_dirent *sd;
1913 struct configfs_fragment *frag;
1914
1915 frag = new_fragment();
1916 if (!frag)
1917 return -ENOMEM;
1856 1918
1857 root = configfs_pin_fs(); 1919 root = configfs_pin_fs();
1858 if (IS_ERR(root)) 1920 if (IS_ERR(root)) {
1921 put_fragment(frag);
1859 return PTR_ERR(root); 1922 return PTR_ERR(root);
1923 }
1860 1924
1861 if (!group->cg_item.ci_name) 1925 if (!group->cg_item.ci_name)
1862 group->cg_item.ci_name = group->cg_item.ci_namebuf; 1926 group->cg_item.ci_name = group->cg_item.ci_namebuf;
@@ -1872,7 +1936,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1872 d_add(dentry, NULL); 1936 d_add(dentry, NULL);
1873 1937
1874 err = configfs_attach_group(sd->s_element, &group->cg_item, 1938 err = configfs_attach_group(sd->s_element, &group->cg_item,
1875 dentry); 1939 dentry, frag);
1876 if (err) { 1940 if (err) {
1877 BUG_ON(d_inode(dentry)); 1941 BUG_ON(d_inode(dentry));
1878 d_drop(dentry); 1942 d_drop(dentry);
@@ -1890,6 +1954,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1890 unlink_group(group); 1954 unlink_group(group);
1891 configfs_release_fs(); 1955 configfs_release_fs();
1892 } 1956 }
1957 put_fragment(frag);
1893 1958
1894 return err; 1959 return err;
1895} 1960}
@@ -1899,12 +1964,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
1899 struct config_group *group = &subsys->su_group; 1964 struct config_group *group = &subsys->su_group;
1900 struct dentry *dentry = group->cg_item.ci_dentry; 1965 struct dentry *dentry = group->cg_item.ci_dentry;
1901 struct dentry *root = dentry->d_sb->s_root; 1966 struct dentry *root = dentry->d_sb->s_root;
1967 struct configfs_dirent *sd = dentry->d_fsdata;
1968 struct configfs_fragment *frag = sd->s_frag;
1902 1969
1903 if (dentry->d_parent != root) { 1970 if (dentry->d_parent != root) {
1904 pr_err("Tried to unregister non-subsystem!\n"); 1971 pr_err("Tried to unregister non-subsystem!\n");
1905 return; 1972 return;
1906 } 1973 }
1907 1974
1975 down_write(&frag->frag_sem);
1976 frag->frag_dead = true;
1977 up_write(&frag->frag_sem);
1978
1908 inode_lock_nested(d_inode(root), 1979 inode_lock_nested(d_inode(root),
1909 I_MUTEX_PARENT); 1980 I_MUTEX_PARENT);
1910 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); 1981 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 61e4db4390a1..fb65b706cc0d 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -39,40 +39,44 @@ struct configfs_buffer {
39 bool write_in_progress; 39 bool write_in_progress;
40 char *bin_buffer; 40 char *bin_buffer;
41 int bin_buffer_size; 41 int bin_buffer_size;
42 int cb_max_size;
43 struct config_item *item;
44 struct module *owner;
45 union {
46 struct configfs_attribute *attr;
47 struct configfs_bin_attribute *bin_attr;
48 };
42}; 49};
43 50
51static inline struct configfs_fragment *to_frag(struct file *file)
52{
53 struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
44 54
45/** 55 return sd->s_frag;
46 * fill_read_buffer - allocate and fill buffer from item. 56}
47 * @dentry: dentry pointer. 57
48 * @buffer: data buffer for file. 58static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
49 *
50 * Allocate @buffer->page, if it hasn't been already, then call the
51 * config_item's show() method to fill the buffer with this attribute's
52 * data.
53 * This is called only once, on the file's first read.
54 */
55static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
56{ 59{
57 struct configfs_attribute * attr = to_attr(dentry); 60 struct configfs_fragment *frag = to_frag(file);
58 struct config_item * item = to_item(dentry->d_parent); 61 ssize_t count = -ENOENT;
59 int ret = 0;
60 ssize_t count;
61 62
62 if (!buffer->page) 63 if (!buffer->page)
63 buffer->page = (char *) get_zeroed_page(GFP_KERNEL); 64 buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
64 if (!buffer->page) 65 if (!buffer->page)
65 return -ENOMEM; 66 return -ENOMEM;
66 67
67 count = attr->show(item, buffer->page); 68 down_read(&frag->frag_sem);
68 69 if (!frag->frag_dead)
69 BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE); 70 count = buffer->attr->show(buffer->item, buffer->page);
70 if (count >= 0) { 71 up_read(&frag->frag_sem);
71 buffer->needs_read_fill = 0; 72
72 buffer->count = count; 73 if (count < 0)
73 } else 74 return count;
74 ret = count; 75 if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
75 return ret; 76 return -EIO;
77 buffer->needs_read_fill = 0;
78 buffer->count = count;
79 return 0;
76} 80}
77 81
78/** 82/**
@@ -97,12 +101,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
97static ssize_t 101static ssize_t
98configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) 102configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
99{ 103{
100 struct configfs_buffer * buffer = file->private_data; 104 struct configfs_buffer *buffer = file->private_data;
101 ssize_t retval = 0; 105 ssize_t retval = 0;
102 106
103 mutex_lock(&buffer->mutex); 107 mutex_lock(&buffer->mutex);
104 if (buffer->needs_read_fill) { 108 if (buffer->needs_read_fill) {
105 if ((retval = fill_read_buffer(file->f_path.dentry,buffer))) 109 retval = fill_read_buffer(file, buffer);
110 if (retval)
106 goto out; 111 goto out;
107 } 112 }
108 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n", 113 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
@@ -138,10 +143,8 @@ static ssize_t
138configfs_read_bin_file(struct file *file, char __user *buf, 143configfs_read_bin_file(struct file *file, char __user *buf,
139 size_t count, loff_t *ppos) 144 size_t count, loff_t *ppos)
140{ 145{
146 struct configfs_fragment *frag = to_frag(file);
141 struct configfs_buffer *buffer = file->private_data; 147 struct configfs_buffer *buffer = file->private_data;
142 struct dentry *dentry = file->f_path.dentry;
143 struct config_item *item = to_item(dentry->d_parent);
144 struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
145 ssize_t retval = 0; 148 ssize_t retval = 0;
146 ssize_t len = min_t(size_t, count, PAGE_SIZE); 149 ssize_t len = min_t(size_t, count, PAGE_SIZE);
147 150
@@ -156,14 +159,19 @@ configfs_read_bin_file(struct file *file, char __user *buf,
156 159
157 if (buffer->needs_read_fill) { 160 if (buffer->needs_read_fill) {
158 /* perform first read with buf == NULL to get extent */ 161 /* perform first read with buf == NULL to get extent */
159 len = bin_attr->read(item, NULL, 0); 162 down_read(&frag->frag_sem);
163 if (!frag->frag_dead)
164 len = buffer->bin_attr->read(buffer->item, NULL, 0);
165 else
166 len = -ENOENT;
167 up_read(&frag->frag_sem);
160 if (len <= 0) { 168 if (len <= 0) {
161 retval = len; 169 retval = len;
162 goto out; 170 goto out;
163 } 171 }
164 172
165 /* do not exceed the maximum value */ 173 /* do not exceed the maximum value */
166 if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) { 174 if (buffer->cb_max_size && len > buffer->cb_max_size) {
167 retval = -EFBIG; 175 retval = -EFBIG;
168 goto out; 176 goto out;
169 } 177 }
@@ -176,7 +184,13 @@ configfs_read_bin_file(struct file *file, char __user *buf,
176 buffer->bin_buffer_size = len; 184 buffer->bin_buffer_size = len;
177 185
178 /* perform second read to fill buffer */ 186 /* perform second read to fill buffer */
179 len = bin_attr->read(item, buffer->bin_buffer, len); 187 down_read(&frag->frag_sem);
188 if (!frag->frag_dead)
189 len = buffer->bin_attr->read(buffer->item,
190 buffer->bin_buffer, len);
191 else
192 len = -ENOENT;
193 up_read(&frag->frag_sem);
180 if (len < 0) { 194 if (len < 0) {
181 retval = len; 195 retval = len;
182 vfree(buffer->bin_buffer); 196 vfree(buffer->bin_buffer);
@@ -226,25 +240,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
226 return error ? -EFAULT : count; 240 return error ? -EFAULT : count;
227} 241}
228 242
229
230/**
231 * flush_write_buffer - push buffer to config_item.
232 * @dentry: dentry to the attribute
233 * @buffer: data buffer for file.
234 * @count: number of bytes
235 *
236 * Get the correct pointers for the config_item and the attribute we're
237 * dealing with, then call the store() method for the attribute,
238 * passing the buffer that we acquired in fill_write_buffer().
239 */
240
241static int 243static int
242flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count) 244flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
243{ 245{
244 struct configfs_attribute * attr = to_attr(dentry); 246 struct configfs_fragment *frag = to_frag(file);
245 struct config_item * item = to_item(dentry->d_parent); 247 int res = -ENOENT;
246 248
247 return attr->store(item, buffer->page, count); 249 down_read(&frag->frag_sem);
250 if (!frag->frag_dead)
251 res = buffer->attr->store(buffer->item, buffer->page, count);
252 up_read(&frag->frag_sem);
253 return res;
248} 254}
249 255
250 256
@@ -268,13 +274,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size
268static ssize_t 274static ssize_t
269configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 275configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
270{ 276{
271 struct configfs_buffer * buffer = file->private_data; 277 struct configfs_buffer *buffer = file->private_data;
272 ssize_t len; 278 ssize_t len;
273 279
274 mutex_lock(&buffer->mutex); 280 mutex_lock(&buffer->mutex);
275 len = fill_write_buffer(buffer, buf, count); 281 len = fill_write_buffer(buffer, buf, count);
276 if (len > 0) 282 if (len > 0)
277 len = flush_write_buffer(file->f_path.dentry, buffer, len); 283 len = flush_write_buffer(file, buffer, len);
278 if (len > 0) 284 if (len > 0)
279 *ppos += len; 285 *ppos += len;
280 mutex_unlock(&buffer->mutex); 286 mutex_unlock(&buffer->mutex);
@@ -299,8 +305,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
299 size_t count, loff_t *ppos) 305 size_t count, loff_t *ppos)
300{ 306{
301 struct configfs_buffer *buffer = file->private_data; 307 struct configfs_buffer *buffer = file->private_data;
302 struct dentry *dentry = file->f_path.dentry;
303 struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
304 void *tbuf = NULL; 308 void *tbuf = NULL;
305 ssize_t len; 309 ssize_t len;
306 310
@@ -316,8 +320,8 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
316 /* buffer grows? */ 320 /* buffer grows? */
317 if (*ppos + count > buffer->bin_buffer_size) { 321 if (*ppos + count > buffer->bin_buffer_size) {
318 322
319 if (bin_attr->cb_max_size && 323 if (buffer->cb_max_size &&
320 *ppos + count > bin_attr->cb_max_size) { 324 *ppos + count > buffer->cb_max_size) {
321 len = -EFBIG; 325 len = -EFBIG;
322 goto out; 326 goto out;
323 } 327 }
@@ -349,31 +353,51 @@ out:
349 return len; 353 return len;
350} 354}
351 355
352static int check_perm(struct inode * inode, struct file * file, int type) 356static int __configfs_open_file(struct inode *inode, struct file *file, int type)
353{ 357{
354 struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent); 358 struct dentry *dentry = file->f_path.dentry;
355 struct configfs_attribute * attr = to_attr(file->f_path.dentry); 359 struct configfs_fragment *frag = to_frag(file);
356 struct configfs_bin_attribute *bin_attr = NULL; 360 struct configfs_attribute *attr;
357 struct configfs_buffer * buffer; 361 struct configfs_buffer *buffer;
358 struct configfs_item_operations * ops = NULL; 362 int error;
359 int error = 0;
360 363
361 if (!item || !attr) 364 error = -ENOMEM;
362 goto Einval; 365 buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
366 if (!buffer)
367 goto out;
363 368
364 if (type & CONFIGFS_ITEM_BIN_ATTR) 369 error = -ENOENT;
365 bin_attr = to_bin_attr(file->f_path.dentry); 370 down_read(&frag->frag_sem);
371 if (unlikely(frag->frag_dead))
372 goto out_free_buffer;
366 373
367 /* Grab the module reference for this attribute if we have one */ 374 error = -EINVAL;
368 if (!try_module_get(attr->ca_owner)) { 375 buffer->item = to_item(dentry->d_parent);
369 error = -ENODEV; 376 if (!buffer->item)
370 goto Done; 377 goto out_free_buffer;
378
379 attr = to_attr(dentry);
380 if (!attr)
381 goto out_put_item;
382
383 if (type & CONFIGFS_ITEM_BIN_ATTR) {
384 buffer->bin_attr = to_bin_attr(dentry);
385 buffer->cb_max_size = buffer->bin_attr->cb_max_size;
386 } else {
387 buffer->attr = attr;
371 } 388 }
372 389
373 if (item->ci_type) 390 buffer->owner = attr->ca_owner;
374 ops = item->ci_type->ct_item_ops; 391 /* Grab the module reference for this attribute if we have one */
375 else 392 error = -ENODEV;
376 goto Eaccess; 393 if (!try_module_get(buffer->owner))
394 goto out_put_item;
395
396 error = -EACCES;
397 if (!buffer->item->ci_type)
398 goto out_put_module;
399
400 buffer->ops = buffer->item->ci_type->ct_item_ops;
377 401
378 /* File needs write support. 402 /* File needs write support.
379 * The inode's perms must say it's ok, 403 * The inode's perms must say it's ok,
@@ -381,13 +405,11 @@ static int check_perm(struct inode * inode, struct file * file, int type)
381 */ 405 */
382 if (file->f_mode & FMODE_WRITE) { 406 if (file->f_mode & FMODE_WRITE) {
383 if (!(inode->i_mode & S_IWUGO)) 407 if (!(inode->i_mode & S_IWUGO))
384 goto Eaccess; 408 goto out_put_module;
385
386 if ((type & CONFIGFS_ITEM_ATTR) && !attr->store) 409 if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
387 goto Eaccess; 410 goto out_put_module;
388 411 if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
389 if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write) 412 goto out_put_module;
390 goto Eaccess;
391 } 413 }
392 414
393 /* File needs read support. 415 /* File needs read support.
@@ -396,92 +418,72 @@ static int check_perm(struct inode * inode, struct file * file, int type)
396 */ 418 */
397 if (file->f_mode & FMODE_READ) { 419 if (file->f_mode & FMODE_READ) {
398 if (!(inode->i_mode & S_IRUGO)) 420 if (!(inode->i_mode & S_IRUGO))
399 goto Eaccess; 421 goto out_put_module;
400
401 if ((type & CONFIGFS_ITEM_ATTR) && !attr->show) 422 if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
402 goto Eaccess; 423 goto out_put_module;
403 424 if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
404 if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read) 425 goto out_put_module;
405 goto Eaccess;
406 } 426 }
407 427
408 /* No error? Great, allocate a buffer for the file, and store it
409 * it in file->private_data for easy access.
410 */
411 buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
412 if (!buffer) {
413 error = -ENOMEM;
414 goto Enomem;
415 }
416 mutex_init(&buffer->mutex); 428 mutex_init(&buffer->mutex);
417 buffer->needs_read_fill = 1; 429 buffer->needs_read_fill = 1;
418 buffer->read_in_progress = false; 430 buffer->read_in_progress = false;
419 buffer->write_in_progress = false; 431 buffer->write_in_progress = false;
420 buffer->ops = ops;
421 file->private_data = buffer; 432 file->private_data = buffer;
422 goto Done; 433 up_read(&frag->frag_sem);
434 return 0;
423 435
424 Einval: 436out_put_module:
425 error = -EINVAL; 437 module_put(buffer->owner);
426 goto Done; 438out_put_item:
427 Eaccess: 439 config_item_put(buffer->item);
428 error = -EACCES; 440out_free_buffer:
429 Enomem: 441 up_read(&frag->frag_sem);
430 module_put(attr->ca_owner); 442 kfree(buffer);
431 Done: 443out:
432 if (error && item)
433 config_item_put(item);
434 return error; 444 return error;
435} 445}
436 446
437static int configfs_release(struct inode *inode, struct file *filp) 447static int configfs_release(struct inode *inode, struct file *filp)
438{ 448{
439 struct config_item * item = to_item(filp->f_path.dentry->d_parent); 449 struct configfs_buffer *buffer = filp->private_data;
440 struct configfs_attribute * attr = to_attr(filp->f_path.dentry); 450
441 struct module * owner = attr->ca_owner; 451 module_put(buffer->owner);
442 struct configfs_buffer * buffer = filp->private_data; 452 if (buffer->page)
443 453 free_page((unsigned long)buffer->page);
444 if (item) 454 mutex_destroy(&buffer->mutex);
445 config_item_put(item); 455 kfree(buffer);
446 /* After this point, attr should not be accessed. */
447 module_put(owner);
448
449 if (buffer) {
450 if (buffer->page)
451 free_page((unsigned long)buffer->page);
452 mutex_destroy(&buffer->mutex);
453 kfree(buffer);
454 }
455 return 0; 456 return 0;
456} 457}
457 458
458static int configfs_open_file(struct inode *inode, struct file *filp) 459static int configfs_open_file(struct inode *inode, struct file *filp)
459{ 460{
460 return check_perm(inode, filp, CONFIGFS_ITEM_ATTR); 461 return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
461} 462}
462 463
463static int configfs_open_bin_file(struct inode *inode, struct file *filp) 464static int configfs_open_bin_file(struct inode *inode, struct file *filp)
464{ 465{
465 return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR); 466 return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
466} 467}
467 468
468static int configfs_release_bin_file(struct inode *inode, struct file *filp) 469static int configfs_release_bin_file(struct inode *inode, struct file *file)
469{ 470{
470 struct configfs_buffer *buffer = filp->private_data; 471 struct configfs_buffer *buffer = file->private_data;
471 struct dentry *dentry = filp->f_path.dentry;
472 struct config_item *item = to_item(dentry->d_parent);
473 struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
474 ssize_t len = 0;
475 int ret;
476 472
477 buffer->read_in_progress = false; 473 buffer->read_in_progress = false;
478 474
479 if (buffer->write_in_progress) { 475 if (buffer->write_in_progress) {
476 struct configfs_fragment *frag = to_frag(file);
480 buffer->write_in_progress = false; 477 buffer->write_in_progress = false;
481 478
482 len = bin_attr->write(item, buffer->bin_buffer, 479 down_read(&frag->frag_sem);
483 buffer->bin_buffer_size); 480 if (!frag->frag_dead) {
484 481 /* result of ->release() is ignored */
482 buffer->bin_attr->write(buffer->item,
483 buffer->bin_buffer,
484 buffer->bin_buffer_size);
485 }
486 up_read(&frag->frag_sem);
485 /* vfree on NULL is safe */ 487 /* vfree on NULL is safe */
486 vfree(buffer->bin_buffer); 488 vfree(buffer->bin_buffer);
487 buffer->bin_buffer = NULL; 489 buffer->bin_buffer = NULL;
@@ -489,10 +491,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
489 buffer->needs_read_fill = 1; 491 buffer->needs_read_fill = 1;
490 } 492 }
491 493
492 ret = configfs_release(inode, filp); 494 configfs_release(inode, file);
493 if (len < 0) 495 return 0;
494 return len;
495 return ret;
496} 496}
497 497
498 498
@@ -527,7 +527,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
527 527
528 inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL); 528 inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
529 error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, 529 error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
530 CONFIGFS_ITEM_ATTR); 530 CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
531 inode_unlock(d_inode(dir)); 531 inode_unlock(d_inode(dir));
532 532
533 return error; 533 return error;
@@ -549,7 +549,7 @@ int configfs_create_bin_file(struct config_item *item,
549 549
550 inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL); 550 inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
551 error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode, 551 error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
552 CONFIGFS_ITEM_BIN_ATTR); 552 CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
553 inode_unlock(dir->d_inode); 553 inode_unlock(dir->d_inode);
554 554
555 return error; 555 return error;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 24bbe3cb7ad4..cfb48bd088e1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -679,6 +679,13 @@ static void io_put_req(struct io_kiocb *req)
679 io_free_req(req); 679 io_free_req(req);
680} 680}
681 681
682static unsigned io_cqring_events(struct io_cq_ring *ring)
683{
684 /* See comment at the top of this file */
685 smp_rmb();
686 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
687}
688
682/* 689/*
683 * Find and free completed poll iocbs 690 * Find and free completed poll iocbs
684 */ 691 */
@@ -771,7 +778,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
771static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, 778static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
772 long min) 779 long min)
773{ 780{
774 while (!list_empty(&ctx->poll_list)) { 781 while (!list_empty(&ctx->poll_list) && !need_resched()) {
775 int ret; 782 int ret;
776 783
777 ret = io_do_iopoll(ctx, nr_events, min); 784 ret = io_do_iopoll(ctx, nr_events, min);
@@ -798,6 +805,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
798 unsigned int nr_events = 0; 805 unsigned int nr_events = 0;
799 806
800 io_iopoll_getevents(ctx, &nr_events, 1); 807 io_iopoll_getevents(ctx, &nr_events, 1);
808
809 /*
810 * Ensure we allow local-to-the-cpu processing to take place,
811 * in this case we need to ensure that we reap all events.
812 */
813 cond_resched();
801 } 814 }
802 mutex_unlock(&ctx->uring_lock); 815 mutex_unlock(&ctx->uring_lock);
803} 816}
@@ -805,11 +818,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
805static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, 818static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
806 long min) 819 long min)
807{ 820{
808 int ret = 0; 821 int iters, ret = 0;
809 822
823 /*
824 * We disallow the app entering submit/complete with polling, but we
825 * still need to lock the ring to prevent racing with polled issue
826 * that got punted to a workqueue.
827 */
828 mutex_lock(&ctx->uring_lock);
829
830 iters = 0;
810 do { 831 do {
811 int tmin = 0; 832 int tmin = 0;
812 833
834 /*
835 * Don't enter poll loop if we already have events pending.
836 * If we do, we can potentially be spinning for commands that
837 * already triggered a CQE (eg in error).
838 */
839 if (io_cqring_events(ctx->cq_ring))
840 break;
841
842 /*
843 * If a submit got punted to a workqueue, we can have the
844 * application entering polling for a command before it gets
845 * issued. That app will hold the uring_lock for the duration
846 * of the poll right here, so we need to take a breather every
847 * now and then to ensure that the issue has a chance to add
848 * the poll to the issued list. Otherwise we can spin here
849 * forever, while the workqueue is stuck trying to acquire the
850 * very same mutex.
851 */
852 if (!(++iters & 7)) {
853 mutex_unlock(&ctx->uring_lock);
854 mutex_lock(&ctx->uring_lock);
855 }
856
813 if (*nr_events < min) 857 if (*nr_events < min)
814 tmin = min - *nr_events; 858 tmin = min - *nr_events;
815 859
@@ -819,6 +863,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
819 ret = 0; 863 ret = 0;
820 } while (min && !*nr_events && !need_resched()); 864 } while (min && !*nr_events && !need_resched());
821 865
866 mutex_unlock(&ctx->uring_lock);
822 return ret; 867 return ret;
823} 868}
824 869
@@ -2280,15 +2325,7 @@ static int io_sq_thread(void *data)
2280 unsigned nr_events = 0; 2325 unsigned nr_events = 0;
2281 2326
2282 if (ctx->flags & IORING_SETUP_IOPOLL) { 2327 if (ctx->flags & IORING_SETUP_IOPOLL) {
2283 /*
2284 * We disallow the app entering submit/complete
2285 * with polling, but we still need to lock the
2286 * ring to prevent racing with polled issue
2287 * that got punted to a workqueue.
2288 */
2289 mutex_lock(&ctx->uring_lock);
2290 io_iopoll_check(ctx, &nr_events, 0); 2328 io_iopoll_check(ctx, &nr_events, 0);
2291 mutex_unlock(&ctx->uring_lock);
2292 } else { 2329 } else {
2293 /* 2330 /*
2294 * Normal IO, just pretend everything completed. 2331 * Normal IO, just pretend everything completed.
@@ -2433,13 +2470,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2433 return submit; 2470 return submit;
2434} 2471}
2435 2472
2436static unsigned io_cqring_events(struct io_cq_ring *ring)
2437{
2438 /* See comment at the top of this file */
2439 smp_rmb();
2440 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
2441}
2442
2443/* 2473/*
2444 * Wait until events become available, if we don't already have some. The 2474 * Wait until events become available, if we don't already have some. The
2445 * application must reap them itself, as they reside on the shared cq ring. 2475 * application must reap them itself, as they reside on the shared cq ring.
@@ -3190,9 +3220,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3190 min_complete = min(min_complete, ctx->cq_entries); 3220 min_complete = min(min_complete, ctx->cq_entries);
3191 3221
3192 if (ctx->flags & IORING_SETUP_IOPOLL) { 3222 if (ctx->flags & IORING_SETUP_IOPOLL) {
3193 mutex_lock(&ctx->uring_lock);
3194 ret = io_iopoll_check(ctx, &nr_events, min_complete); 3223 ret = io_iopoll_check(ctx, &nr_events, min_complete);
3195 mutex_unlock(&ctx->uring_lock);
3196 } else { 3224 } else {
3197 ret = io_cqring_wait(ctx, min_complete, sig, sigsz); 3225 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3198 } 3226 }
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8d501093660f..0adfd8840110 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode)) 1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
1488 nfs_file_set_open_context(file, ctx); 1488 nfs_file_set_open_context(file, ctx);
1489 else 1489 else
1490 err = -ESTALE; 1490 err = -EOPENSTALE;
1491out: 1491out:
1492 return err; 1492 return err;
1493} 1493}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0cb442406168..222d7115db71 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
401 unsigned long bytes = 0; 401 unsigned long bytes = 0;
402 struct nfs_direct_req *dreq = hdr->dreq; 402 struct nfs_direct_req *dreq = hdr->dreq;
403 403
404 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
405 goto out_put;
406
407 spin_lock(&dreq->lock); 404 spin_lock(&dreq->lock);
408 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0)) 405 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
409 dreq->error = hdr->error; 406 dreq->error = hdr->error;
410 else 407
408 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
409 spin_unlock(&dreq->lock);
410 goto out_put;
411 }
412
413 if (hdr->good_bytes != 0)
411 nfs_direct_good_bytes(dreq, hdr); 414 nfs_direct_good_bytes(dreq, hdr);
412 415
416 if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
417 dreq->error = 0;
418
413 spin_unlock(&dreq->lock); 419 spin_unlock(&dreq->lock);
414 420
415 while (!list_empty(&hdr->pages)) { 421 while (!list_empty(&hdr->pages)) {
@@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
782 bool request_commit = false; 788 bool request_commit = false;
783 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 789 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
784 790
785 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
786 goto out_put;
787
788 nfs_init_cinfo_from_dreq(&cinfo, dreq); 791 nfs_init_cinfo_from_dreq(&cinfo, dreq);
789 792
790 spin_lock(&dreq->lock); 793 spin_lock(&dreq->lock);
791 794
792 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) 795 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
793 dreq->error = hdr->error; 796 dreq->error = hdr->error;
794 if (dreq->error == 0) { 797
798 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
799 spin_unlock(&dreq->lock);
800 goto out_put;
801 }
802
803 if (hdr->good_bytes != 0) {
795 nfs_direct_good_bytes(dreq, hdr); 804 nfs_direct_good_bytes(dreq, hdr);
796 if (nfs_write_need_commit(hdr)) { 805 if (nfs_write_need_commit(hdr)) {
797 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) 806 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index b04e20d28162..5657b7f2611f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/nfs_fs.h> 10#include <linux/nfs_fs.h>
11#include <linux/nfs_mount.h>
11#include <linux/nfs_page.h> 12#include <linux/nfs_page.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/sched/mm.h> 14#include <linux/sched/mm.h>
@@ -928,7 +929,9 @@ retry:
928 pgm = &pgio->pg_mirrors[0]; 929 pgm = &pgio->pg_mirrors[0];
929 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 930 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
930 931
931 pgio->pg_maxretrans = io_maxretrans; 932 if (NFS_SERVER(pgio->pg_inode)->flags &
933 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
934 pgio->pg_maxretrans = io_maxretrans;
932 return; 935 return;
933out_nolseg: 936out_nolseg:
934 if (pgio->pg_error < 0) 937 if (pgio->pg_error < 0)
@@ -940,6 +943,7 @@ out_mds:
940 pgio->pg_lseg); 943 pgio->pg_lseg);
941 pnfs_put_lseg(pgio->pg_lseg); 944 pnfs_put_lseg(pgio->pg_lseg);
942 pgio->pg_lseg = NULL; 945 pgio->pg_lseg = NULL;
946 pgio->pg_maxretrans = 0;
943 nfs_pageio_reset_read_mds(pgio); 947 nfs_pageio_reset_read_mds(pgio);
944} 948}
945 949
@@ -1000,7 +1004,9 @@ retry:
1000 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; 1004 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
1001 } 1005 }
1002 1006
1003 pgio->pg_maxretrans = io_maxretrans; 1007 if (NFS_SERVER(pgio->pg_inode)->flags &
1008 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1009 pgio->pg_maxretrans = io_maxretrans;
1004 return; 1010 return;
1005 1011
1006out_mds: 1012out_mds:
@@ -1010,6 +1016,7 @@ out_mds:
1010 pgio->pg_lseg); 1016 pgio->pg_lseg);
1011 pnfs_put_lseg(pgio->pg_lseg); 1017 pnfs_put_lseg(pgio->pg_lseg);
1012 pgio->pg_lseg = NULL; 1018 pgio->pg_lseg = NULL;
1019 pgio->pg_maxretrans = 0;
1013 nfs_pageio_reset_write_mds(pgio); 1020 nfs_pageio_reset_write_mds(pgio);
1014} 1021}
1015 1022
@@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1148 break; 1155 break;
1149 case -NFS4ERR_RETRY_UNCACHED_REP: 1156 case -NFS4ERR_RETRY_UNCACHED_REP:
1150 break; 1157 break;
1151 case -EAGAIN:
1152 return -NFS4ERR_RESET_TO_PNFS;
1153 /* Invalidate Layout errors */ 1158 /* Invalidate Layout errors */
1154 case -NFS4ERR_PNFS_NO_LAYOUT: 1159 case -NFS4ERR_PNFS_NO_LAYOUT:
1155 case -ESTALE: /* mapped NFS4ERR_STALE */ 1160 case -ESTALE: /* mapped NFS4ERR_STALE */
@@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1210 case -EBADHANDLE: 1215 case -EBADHANDLE:
1211 case -ELOOP: 1216 case -ELOOP:
1212 case -ENOSPC: 1217 case -ENOSPC:
1213 case -EAGAIN:
1214 break; 1218 break;
1215 case -EJUKEBOX: 1219 case -EJUKEBOX:
1216 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1220 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1445 ff_layout_read_prepare_common(task, hdr); 1449 ff_layout_read_prepare_common(task, hdr);
1446} 1450}
1447 1451
1448static void
1449ff_layout_io_prepare_transmit(struct rpc_task *task,
1450 void *data)
1451{
1452 struct nfs_pgio_header *hdr = data;
1453
1454 if (!pnfs_is_valid_lseg(hdr->lseg))
1455 rpc_exit(task, -EAGAIN);
1456}
1457
1458static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1452static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1459{ 1453{
1460 struct nfs_pgio_header *hdr = data; 1454 struct nfs_pgio_header *hdr = data;
@@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data)
1740 1734
1741static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { 1735static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1742 .rpc_call_prepare = ff_layout_read_prepare_v3, 1736 .rpc_call_prepare = ff_layout_read_prepare_v3,
1743 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1744 .rpc_call_done = ff_layout_read_call_done, 1737 .rpc_call_done = ff_layout_read_call_done,
1745 .rpc_count_stats = ff_layout_read_count_stats, 1738 .rpc_count_stats = ff_layout_read_count_stats,
1746 .rpc_release = ff_layout_read_release, 1739 .rpc_release = ff_layout_read_release,
@@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1748 1741
1749static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { 1742static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1750 .rpc_call_prepare = ff_layout_read_prepare_v4, 1743 .rpc_call_prepare = ff_layout_read_prepare_v4,
1751 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1752 .rpc_call_done = ff_layout_read_call_done, 1744 .rpc_call_done = ff_layout_read_call_done,
1753 .rpc_count_stats = ff_layout_read_count_stats, 1745 .rpc_count_stats = ff_layout_read_count_stats,
1754 .rpc_release = ff_layout_read_release, 1746 .rpc_release = ff_layout_read_release,
@@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1756 1748
1757static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { 1749static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1758 .rpc_call_prepare = ff_layout_write_prepare_v3, 1750 .rpc_call_prepare = ff_layout_write_prepare_v3,
1759 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1760 .rpc_call_done = ff_layout_write_call_done, 1751 .rpc_call_done = ff_layout_write_call_done,
1761 .rpc_count_stats = ff_layout_write_count_stats, 1752 .rpc_count_stats = ff_layout_write_count_stats,
1762 .rpc_release = ff_layout_write_release, 1753 .rpc_release = ff_layout_write_release,
@@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1764 1755
1765static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { 1756static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1766 .rpc_call_prepare = ff_layout_write_prepare_v4, 1757 .rpc_call_prepare = ff_layout_write_prepare_v4,
1767 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1768 .rpc_call_done = ff_layout_write_call_done, 1758 .rpc_call_done = ff_layout_write_call_done,
1769 .rpc_count_stats = ff_layout_write_count_stats, 1759 .rpc_count_stats = ff_layout_write_count_stats,
1770 .rpc_release = ff_layout_write_release, 1760 .rpc_release = ff_layout_write_release,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8a1758200b57..2a03bfeec10a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1403,12 +1403,22 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1404 return 0; 1404 return 0;
1405 1405
1406 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
1407 /* Only a mounted-on-fileid? Just exit */
1408 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1409 return 0;
1406 /* Has the inode gone and changed behind our back? */ 1410 /* Has the inode gone and changed behind our back? */
1407 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) 1411 } else if (nfsi->fileid != fattr->fileid) {
1412 /* Is this perhaps the mounted-on fileid? */
1413 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1414 nfsi->fileid == fattr->mounted_on_fileid)
1415 return 0;
1408 return -ESTALE; 1416 return -ESTALE;
1417 }
1409 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) 1418 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
1410 return -ESTALE; 1419 return -ESTALE;
1411 1420
1421
1412 if (!nfs_file_has_buffered_writers(nfsi)) { 1422 if (!nfs_file_has_buffered_writers(nfsi)) {
1413 /* Verify a few of the more important attributes */ 1423 /* Verify a few of the more important attributes */
1414 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr)) 1424 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
@@ -1768,18 +1778,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
1768EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc); 1778EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
1769 1779
1770 1780
1771static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
1772 struct nfs_fattr *fattr)
1773{
1774 bool ret1 = true, ret2 = true;
1775
1776 if (fattr->valid & NFS_ATTR_FATTR_FILEID)
1777 ret1 = (nfsi->fileid == fattr->fileid);
1778 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1779 ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
1780 return ret1 || ret2;
1781}
1782
1783/* 1781/*
1784 * Many nfs protocol calls return the new file attributes after 1782 * Many nfs protocol calls return the new file attributes after
1785 * an operation. Here we update the inode to reflect the state 1783 * an operation. Here we update the inode to reflect the state
@@ -1810,7 +1808,16 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1810 nfs_display_fhandle_hash(NFS_FH(inode)), 1808 nfs_display_fhandle_hash(NFS_FH(inode)),
1811 atomic_read(&inode->i_count), fattr->valid); 1809 atomic_read(&inode->i_count), fattr->valid);
1812 1810
1813 if (!nfs_fileid_valid(nfsi, fattr)) { 1811 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
1812 /* Only a mounted-on-fileid? Just exit */
1813 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1814 return 0;
1815 /* Has the inode gone and changed behind our back? */
1816 } else if (nfsi->fileid != fattr->fileid) {
1817 /* Is this perhaps the mounted-on fileid? */
1818 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1819 nfsi->fileid == fattr->mounted_on_fileid)
1820 return 0;
1814 printk(KERN_ERR "NFS: server %s error: fileid changed\n" 1821 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
1815 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n", 1822 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
1816 NFS_SERVER(inode)->nfs_client->cl_hostname, 1823 NFS_SERVER(inode)->nfs_client->cl_hostname,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index a2346a2f8361..e64f810223be 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -775,3 +775,13 @@ static inline bool nfs_error_is_fatal(int err)
775 } 775 }
776} 776}
777 777
778static inline bool nfs_error_is_fatal_on_server(int err)
779{
780 switch (err) {
781 case 0:
782 case -ERESTARTSYS:
783 case -EINTR:
784 return false;
785 }
786 return nfs_error_is_fatal(err);
787}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 96db471ca2e5..339663d04bf8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
73 if (IS_ERR(inode)) { 73 if (IS_ERR(inode)) {
74 err = PTR_ERR(inode); 74 err = PTR_ERR(inode);
75 switch (err) { 75 switch (err) {
76 case -EPERM:
77 case -EACCES:
78 case -EDQUOT:
79 case -ENOSPC:
80 case -EROFS:
81 goto out_put_ctx;
82 default: 76 default:
77 goto out_put_ctx;
78 case -ENOENT:
79 case -ESTALE:
80 case -EISDIR:
81 case -ENOTDIR:
82 case -ELOOP:
83 goto out_drop; 83 goto out_drop;
84 } 84 }
85 } 85 }
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ed4e1b07447b..20b3717cd7ca 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
590 } 590 }
591 591
592 hdr->res.fattr = &hdr->fattr; 592 hdr->res.fattr = &hdr->fattr;
593 hdr->res.count = count; 593 hdr->res.count = 0;
594 hdr->res.eof = 0; 594 hdr->res.eof = 0;
595 hdr->res.verf = &hdr->verf; 595 hdr->res.verf = &hdr->verf;
596 nfs_fattr_init(&hdr->fattr); 596 nfs_fattr_init(&hdr->fattr);
@@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, 1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1252 struct nfs_pgio_header *hdr) 1252 struct nfs_pgio_header *hdr)
1253{ 1253{
1254 LIST_HEAD(failed); 1254 LIST_HEAD(pages);
1255 1255
1256 desc->pg_io_completion = hdr->io_completion; 1256 desc->pg_io_completion = hdr->io_completion;
1257 desc->pg_dreq = hdr->dreq; 1257 desc->pg_dreq = hdr->dreq;
1258 while (!list_empty(&hdr->pages)) { 1258 list_splice_init(&hdr->pages, &pages);
1259 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1259 while (!list_empty(&pages)) {
1260 struct nfs_page *req = nfs_list_entry(pages.next);
1260 1261
1261 if (!nfs_pageio_add_request(desc, req)) 1262 if (!nfs_pageio_add_request(desc, req))
1262 nfs_list_move_request(req, &failed); 1263 break;
1263 } 1264 }
1264 nfs_pageio_complete(desc); 1265 nfs_pageio_complete(desc);
1265 if (!list_empty(&failed)) { 1266 if (!list_empty(&pages)) {
1266 list_move(&failed, &hdr->pages); 1267 int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1267 return desc->pg_error < 0 ? desc->pg_error : -EIO; 1268 hdr->completion_ops->error_cleanup(&pages, err);
1269 nfs_set_pgio_error(hdr, err, hdr->io_start);
1270 return err;
1268 } 1271 }
1269 return 0; 1272 return 0;
1270} 1273}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index c0046c348910..82af4809b869 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
627 /* Add this address as an alias */ 627 /* Add this address as an alias */
628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
629 rpc_clnt_test_and_add_xprt, NULL); 629 rpc_clnt_test_and_add_xprt, NULL);
630 } else 630 continue;
631 clp = get_v3_ds_connect(mds_srv, 631 }
632 (struct sockaddr *)&da->da_addr, 632 clp = get_v3_ds_connect(mds_srv,
633 da->da_addrlen, IPPROTO_TCP, 633 (struct sockaddr *)&da->da_addr,
634 timeo, retrans); 634 da->da_addrlen, IPPROTO_TCP,
635 timeo, retrans);
636 if (IS_ERR(clp))
637 continue;
638 clp->cl_rpcclient->cl_softerr = 0;
639 clp->cl_rpcclient->cl_softrtry = 0;
635 } 640 }
636 641
637 if (IS_ERR(clp)) { 642 if (IS_ERR(clp)) {
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 5552fa8b6e12..0f7288b94633 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
594 /* Emulate the eof flag, which isn't normally needed in NFSv2 594 /* Emulate the eof flag, which isn't normally needed in NFSv2
595 * as it is guaranteed to always return the file attributes 595 * as it is guaranteed to always return the file attributes
596 */ 596 */
597 if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) 597 if ((hdr->res.count == 0 && hdr->args.count > 0) ||
598 hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
598 hdr->res.eof = 1; 599 hdr->res.eof = 1;
599 } 600 }
600 return 0; 601 return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
615 616
616static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 617static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
617{ 618{
618 if (task->tk_status >= 0) 619 if (task->tk_status >= 0) {
620 hdr->res.count = hdr->args.count;
619 nfs_writeback_update_inode(hdr); 621 nfs_writeback_update_inode(hdr);
622 }
620 return 0; 623 return 0;
621} 624}
622 625
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c19841c82b6a..cfe0b586eadd 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
91} 91}
92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
93 93
94static void nfs_readpage_release(struct nfs_page *req) 94static void nfs_readpage_release(struct nfs_page *req, int error)
95{ 95{
96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
97 struct page *page = req->wb_page;
97 98
98 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, 99 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
99 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, 100 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
100 (long long)req_offset(req)); 101 (long long)req_offset(req));
101 102
103 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
104 SetPageError(page);
102 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { 105 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
103 if (PageUptodate(req->wb_page)) 106 struct address_space *mapping = page_file_mapping(page);
104 nfs_readpage_to_fscache(inode, req->wb_page, 0);
105 107
106 unlock_page(req->wb_page); 108 if (PageUptodate(page))
109 nfs_readpage_to_fscache(inode, page, 0);
110 else if (!PageError(page) && !PagePrivate(page))
111 generic_error_remove_page(mapping, page);
112 unlock_page(page);
107 } 113 }
108 nfs_release_request(req); 114 nfs_release_request(req);
109} 115}
@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
131 &nfs_async_read_completion_ops); 137 &nfs_async_read_completion_ops);
132 if (!nfs_pageio_add_request(&pgio, new)) { 138 if (!nfs_pageio_add_request(&pgio, new)) {
133 nfs_list_remove_request(new); 139 nfs_list_remove_request(new);
134 nfs_readpage_release(new); 140 nfs_readpage_release(new, pgio.pg_error);
135 } 141 }
136 nfs_pageio_complete(&pgio); 142 nfs_pageio_complete(&pgio);
137 143
@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
153static void nfs_read_completion(struct nfs_pgio_header *hdr) 159static void nfs_read_completion(struct nfs_pgio_header *hdr)
154{ 160{
155 unsigned long bytes = 0; 161 unsigned long bytes = 0;
162 int error;
156 163
157 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 164 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
158 goto out; 165 goto out;
@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
179 zero_user_segment(page, start, end); 186 zero_user_segment(page, start, end);
180 } 187 }
181 } 188 }
189 error = 0;
182 bytes += req->wb_bytes; 190 bytes += req->wb_bytes;
183 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 191 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
184 if (bytes <= hdr->good_bytes) 192 if (bytes <= hdr->good_bytes)
185 nfs_page_group_set_uptodate(req); 193 nfs_page_group_set_uptodate(req);
194 else {
195 error = hdr->error;
196 xchg(&nfs_req_openctx(req)->error, error);
197 }
186 } else 198 } else
187 nfs_page_group_set_uptodate(req); 199 nfs_page_group_set_uptodate(req);
188 nfs_list_remove_request(req); 200 nfs_list_remove_request(req);
189 nfs_readpage_release(req); 201 nfs_readpage_release(req, error);
190 } 202 }
191out: 203out:
192 hdr->release(hdr); 204 hdr->release(hdr);
@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
213 while (!list_empty(head)) { 225 while (!list_empty(head)) {
214 req = nfs_list_entry(head->next); 226 req = nfs_list_entry(head->next);
215 nfs_list_remove_request(req); 227 nfs_list_remove_request(req);
216 nfs_readpage_release(req); 228 nfs_readpage_release(req, error);
217 } 229 }
218} 230}
219 231
@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
337 goto out; 349 goto out;
338 } 350 }
339 351
352 xchg(&ctx->error, 0);
340 error = nfs_readpage_async(ctx, inode, page); 353 error = nfs_readpage_async(ctx, inode, page);
341 354 if (!error) {
355 error = wait_on_page_locked_killable(page);
356 if (!PageUptodate(page) && !error)
357 error = xchg(&ctx->error, 0);
358 }
342out: 359out:
343 put_nfs_open_context(ctx); 360 put_nfs_open_context(ctx);
344 return error; 361 return error;
@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
372 zero_user_segment(page, len, PAGE_SIZE); 389 zero_user_segment(page, len, PAGE_SIZE);
373 if (!nfs_pageio_add_request(desc->pgio, new)) { 390 if (!nfs_pageio_add_request(desc->pgio, new)) {
374 nfs_list_remove_request(new); 391 nfs_list_remove_request(new);
375 nfs_readpage_release(new);
376 error = desc->pgio->pg_error; 392 error = desc->pgio->pg_error;
393 nfs_readpage_release(new, error);
377 goto out; 394 goto out;
378 } 395 }
379 return 0; 396 return 0;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 92d9cadc6102..85ca49549b39 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
58static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 58static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
59static const struct nfs_rw_ops nfs_rw_write_ops; 59static const struct nfs_rw_ops nfs_rw_write_ops;
60static void nfs_inode_remove_request(struct nfs_page *req);
60static void nfs_clear_request_commit(struct nfs_page *req); 61static void nfs_clear_request_commit(struct nfs_page *req);
61static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 62static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
62 struct inode *inode); 63 struct inode *inode);
@@ -591,23 +592,13 @@ release_request:
591 592
592static void nfs_write_error(struct nfs_page *req, int error) 593static void nfs_write_error(struct nfs_page *req, int error)
593{ 594{
595 nfs_set_pageerror(page_file_mapping(req->wb_page));
594 nfs_mapping_set_error(req->wb_page, error); 596 nfs_mapping_set_error(req->wb_page, error);
597 nfs_inode_remove_request(req);
595 nfs_end_page_writeback(req); 598 nfs_end_page_writeback(req);
596 nfs_release_request(req); 599 nfs_release_request(req);
597} 600}
598 601
599static bool
600nfs_error_is_fatal_on_server(int err)
601{
602 switch (err) {
603 case 0:
604 case -ERESTARTSYS:
605 case -EINTR:
606 return false;
607 }
608 return nfs_error_is_fatal(err);
609}
610
611/* 602/*
612 * Find an associated nfs write request, and prepare to flush it out 603 * Find an associated nfs write request, and prepare to flush it out
613 * May return an error if the user signalled nfs_wait_on_request(). 604 * May return an error if the user signalled nfs_wait_on_request().
@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
615static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 606static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
616 struct page *page) 607 struct page *page)
617{ 608{
618 struct address_space *mapping;
619 struct nfs_page *req; 609 struct nfs_page *req;
620 int ret = 0; 610 int ret = 0;
621 611
@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
630 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 620 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
631 621
632 /* If there is a fatal error that covers this write, just exit */ 622 /* If there is a fatal error that covers this write, just exit */
633 ret = 0; 623 ret = pgio->pg_error;
634 mapping = page_file_mapping(page); 624 if (nfs_error_is_fatal_on_server(ret))
635 if (test_bit(AS_ENOSPC, &mapping->flags) ||
636 test_bit(AS_EIO, &mapping->flags))
637 goto out_launder; 625 goto out_launder;
638 626
627 ret = 0;
639 if (!nfs_pageio_add_request(pgio, req)) { 628 if (!nfs_pageio_add_request(pgio, req)) {
640 ret = pgio->pg_error; 629 ret = pgio->pg_error;
641 /* 630 /*
@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
647 } else 636 } else
648 ret = -EAGAIN; 637 ret = -EAGAIN;
649 nfs_redirty_request(req); 638 nfs_redirty_request(req);
639 pgio->pg_error = 0;
650 } else 640 } else
651 nfs_add_stats(page_file_mapping(page)->host, 641 nfs_add_stats(page_file_mapping(page)->host,
652 NFSIOS_WRITEPAGES, 1); 642 NFSIOS_WRITEPAGES, 1);
@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
666 ret = nfs_page_async_flush(pgio, page); 656 ret = nfs_page_async_flush(pgio, page);
667 if (ret == -EAGAIN) { 657 if (ret == -EAGAIN) {
668 redirty_page_for_writepage(wbc, page); 658 redirty_page_for_writepage(wbc, page);
669 ret = 0; 659 ret = AOP_WRITEPAGE_ACTIVATE;
670 } 660 }
671 return ret; 661 return ret;
672} 662}
@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
685 nfs_pageio_init_write(&pgio, inode, 0, 675 nfs_pageio_init_write(&pgio, inode, 0,
686 false, &nfs_async_write_completion_ops); 676 false, &nfs_async_write_completion_ops);
687 err = nfs_do_writepage(page, wbc, &pgio); 677 err = nfs_do_writepage(page, wbc, &pgio);
678 pgio.pg_error = 0;
688 nfs_pageio_complete(&pgio); 679 nfs_pageio_complete(&pgio);
689 if (err < 0) 680 if (err < 0)
690 return err; 681 return err;
691 if (pgio.pg_error < 0) 682 if (nfs_error_is_fatal(pgio.pg_error))
692 return pgio.pg_error; 683 return pgio.pg_error;
693 return 0; 684 return 0;
694} 685}
@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
698 int ret; 689 int ret;
699 690
700 ret = nfs_writepage_locked(page, wbc); 691 ret = nfs_writepage_locked(page, wbc);
701 unlock_page(page); 692 if (ret != AOP_WRITEPAGE_ACTIVATE)
693 unlock_page(page);
702 return ret; 694 return ret;
703} 695}
704 696
@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
707 int ret; 699 int ret;
708 700
709 ret = nfs_do_writepage(page, wbc, data); 701 ret = nfs_do_writepage(page, wbc, data);
710 unlock_page(page); 702 if (ret != AOP_WRITEPAGE_ACTIVATE)
703 unlock_page(page);
711 return ret; 704 return ret;
712} 705}
713 706
@@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
733 &nfs_async_write_completion_ops); 726 &nfs_async_write_completion_ops);
734 pgio.pg_io_completion = ioc; 727 pgio.pg_io_completion = ioc;
735 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 728 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
729 pgio.pg_error = 0;
736 nfs_pageio_complete(&pgio); 730 nfs_pageio_complete(&pgio);
737 nfs_io_completion_put(ioc); 731 nfs_io_completion_put(ioc);
738 732
739 if (err < 0) 733 if (err < 0)
740 goto out_err; 734 goto out_err;
741 err = pgio.pg_error; 735 err = pgio.pg_error;
742 if (err < 0) 736 if (nfs_error_is_fatal(err))
743 goto out_err; 737 goto out_err;
744 return 0; 738 return 0;
745out_err: 739out_err:
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 26ad75ae2be0..96352ab7bd81 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -571,7 +571,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
571 */ 571 */
572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
573{ 573{
574 struct nfsd_net *nn = v; 574 struct nfsd_net *nn = m->private;
575 575
576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); 576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
577 seq_printf(m, "num entries: %u\n", 577 seq_printf(m, "num entries: %u\n",
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 13c548733860..3cf4f6aa48d6 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1171,13 +1171,17 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
1171 return inode; 1171 return inode;
1172} 1172}
1173 1173
1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
1175{ 1175{
1176 struct inode *inode; 1176 struct inode *inode;
1177 1177
1178 inode = nfsd_get_inode(dir->i_sb, mode); 1178 inode = nfsd_get_inode(dir->i_sb, mode);
1179 if (!inode) 1179 if (!inode)
1180 return -ENOMEM; 1180 return -ENOMEM;
1181 if (ncl) {
1182 inode->i_private = ncl;
1183 kref_get(&ncl->cl_ref);
1184 }
1181 d_add(dentry, inode); 1185 d_add(dentry, inode);
1182 inc_nlink(dir); 1186 inc_nlink(dir);
1183 fsnotify_mkdir(dir, dentry); 1187 fsnotify_mkdir(dir, dentry);
@@ -1194,17 +1198,14 @@ static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *nc
1194 dentry = d_alloc_name(parent, name); 1198 dentry = d_alloc_name(parent, name);
1195 if (!dentry) 1199 if (!dentry)
1196 goto out_err; 1200 goto out_err;
1197 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600); 1201 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
1198 if (ret) 1202 if (ret)
1199 goto out_err; 1203 goto out_err;
1200 if (ncl) {
1201 d_inode(dentry)->i_private = ncl;
1202 kref_get(&ncl->cl_ref);
1203 }
1204out: 1204out:
1205 inode_unlock(dir); 1205 inode_unlock(dir);
1206 return dentry; 1206 return dentry;
1207out_err: 1207out_err:
1208 dput(dentry);
1208 dentry = ERR_PTR(ret); 1209 dentry = ERR_PTR(ret);
1209 goto out; 1210 goto out;
1210} 1211}
@@ -1214,11 +1215,9 @@ static void clear_ncl(struct inode *inode)
1214 struct nfsdfs_client *ncl = inode->i_private; 1215 struct nfsdfs_client *ncl = inode->i_private;
1215 1216
1216 inode->i_private = NULL; 1217 inode->i_private = NULL;
1217 synchronize_rcu();
1218 kref_put(&ncl->cl_ref, ncl->cl_release); 1218 kref_put(&ncl->cl_ref, ncl->cl_release);
1219} 1219}
1220 1220
1221
1222static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode) 1221static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
1223{ 1222{
1224 struct nfsdfs_client *nc = inode->i_private; 1223 struct nfsdfs_client *nc = inode->i_private;
@@ -1232,9 +1231,9 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
1232{ 1231{
1233 struct nfsdfs_client *nc; 1232 struct nfsdfs_client *nc;
1234 1233
1235 rcu_read_lock(); 1234 inode_lock_shared(inode);
1236 nc = __get_nfsdfs_client(inode); 1235 nc = __get_nfsdfs_client(inode);
1237 rcu_read_unlock(); 1236 inode_unlock_shared(inode);
1238 return nc; 1237 return nc;
1239} 1238}
1240/* from __rpc_unlink */ 1239/* from __rpc_unlink */
diff --git a/fs/read_write.c b/fs/read_write.c
index 1f5088dec566..5bbf587f5bc1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1811,10 +1811,7 @@ static int generic_remap_check_len(struct inode *inode_in,
1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; 1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
1812} 1812}
1813 1813
1814/* 1814/* Read a page's worth of file data into the page cache. */
1815 * Read a page's worth of file data into the page cache. Return the page
1816 * locked.
1817 */
1818static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) 1815static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1819{ 1816{
1820 struct page *page; 1817 struct page *page;
@@ -1826,11 +1823,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1826 put_page(page); 1823 put_page(page);
1827 return ERR_PTR(-EIO); 1824 return ERR_PTR(-EIO);
1828 } 1825 }
1829 lock_page(page);
1830 return page; 1826 return page;
1831} 1827}
1832 1828
1833/* 1829/*
1830 * Lock two pages, ensuring that we lock in offset order if the pages are from
1831 * the same file.
1832 */
1833static void vfs_lock_two_pages(struct page *page1, struct page *page2)
1834{
1835 /* Always lock in order of increasing index. */
1836 if (page1->index > page2->index)
1837 swap(page1, page2);
1838
1839 lock_page(page1);
1840 if (page1 != page2)
1841 lock_page(page2);
1842}
1843
1844/* Unlock two pages, being careful not to unlock the same page twice. */
1845static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
1846{
1847 unlock_page(page1);
1848 if (page1 != page2)
1849 unlock_page(page2);
1850}
1851
1852/*
1834 * Compare extents of two files to see if they are the same. 1853 * Compare extents of two files to see if they are the same.
1835 * Caller must have locked both inodes to prevent write races. 1854 * Caller must have locked both inodes to prevent write races.
1836 */ 1855 */
@@ -1867,10 +1886,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1867 dest_page = vfs_dedupe_get_page(dest, destoff); 1886 dest_page = vfs_dedupe_get_page(dest, destoff);
1868 if (IS_ERR(dest_page)) { 1887 if (IS_ERR(dest_page)) {
1869 error = PTR_ERR(dest_page); 1888 error = PTR_ERR(dest_page);
1870 unlock_page(src_page);
1871 put_page(src_page); 1889 put_page(src_page);
1872 goto out_error; 1890 goto out_error;
1873 } 1891 }
1892
1893 vfs_lock_two_pages(src_page, dest_page);
1894
1895 /*
1896 * Now that we've locked both pages, make sure they're still
1897 * mapped to the file data we're interested in. If not,
1898 * someone is invalidating pages on us and we lose.
1899 */
1900 if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
1901 src_page->mapping != src->i_mapping ||
1902 dest_page->mapping != dest->i_mapping) {
1903 same = false;
1904 goto unlock;
1905 }
1906
1874 src_addr = kmap_atomic(src_page); 1907 src_addr = kmap_atomic(src_page);
1875 dest_addr = kmap_atomic(dest_page); 1908 dest_addr = kmap_atomic(dest_page);
1876 1909
@@ -1882,8 +1915,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1882 1915
1883 kunmap_atomic(dest_addr); 1916 kunmap_atomic(dest_addr);
1884 kunmap_atomic(src_addr); 1917 kunmap_atomic(src_addr);
1885 unlock_page(dest_page); 1918unlock:
1886 unlock_page(src_page); 1919 vfs_unlock_two_pages(src_page, dest_page);
1887 put_page(dest_page); 1920 put_page(dest_page);
1888 put_page(src_page); 1921 put_page(src_page);
1889 1922
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 80d7301ab76d..c0b84e960b20 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -51,7 +51,7 @@
51static void shrink_liability(struct ubifs_info *c, int nr_to_write) 51static void shrink_liability(struct ubifs_info *c, int nr_to_write)
52{ 52{
53 down_read(&c->vfs_sb->s_umount); 53 down_read(&c->vfs_sb->s_umount);
54 writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); 54 writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE);
55 up_read(&c->vfs_sb->s_umount); 55 up_read(&c->vfs_sb->s_umount);
56} 56}
57 57
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index b52624e28fa1..3b4b4114f208 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,6 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) 129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
130{ 130{
131 if (orph->del) { 131 if (orph->del) {
132 spin_unlock(&c->orphan_lock);
133 dbg_gen("deleted twice ino %lu", orph->inum); 132 dbg_gen("deleted twice ino %lu", orph->inum);
134 return; 133 return;
135 } 134 }
@@ -138,7 +137,6 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
138 orph->del = 1; 137 orph->del = 1;
139 orph->dnext = c->orph_dnext; 138 orph->dnext = c->orph_dnext;
140 c->orph_dnext = orph; 139 c->orph_dnext = orph;
141 spin_unlock(&c->orphan_lock);
142 dbg_gen("delete later ino %lu", orph->inum); 140 dbg_gen("delete later ino %lu", orph->inum);
143 return; 141 return;
144 } 142 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 2c0803b0ac3a..8c1d571334bc 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -609,6 +609,10 @@ static int init_constants_early(struct ubifs_info *c)
609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
610 if (c->max_bu_buf_len > c->leb_size) 610 if (c->max_bu_buf_len > c->leb_size)
611 c->max_bu_buf_len = c->leb_size; 611 c->max_bu_buf_len = c->leb_size;
612
613 /* Log is ready, preserve one LEB for commits. */
614 c->min_log_bytes = c->leb_size;
615
612 return 0; 616 return 0;
613} 617}
614 618
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ccbdbd62f0d8..fe6d804a38dc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
880 /* len == 0 means wake all */ 880 /* len == 0 means wake all */
881 struct userfaultfd_wake_range range = { .len = 0, }; 881 struct userfaultfd_wake_range range = { .len = 0, };
882 unsigned long new_flags; 882 unsigned long new_flags;
883 bool still_valid;
883 884
884 WRITE_ONCE(ctx->released, true); 885 WRITE_ONCE(ctx->released, true);
885 886
@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
895 * taking the mmap_sem for writing. 896 * taking the mmap_sem for writing.
896 */ 897 */
897 down_write(&mm->mmap_sem); 898 down_write(&mm->mmap_sem);
898 if (!mmget_still_valid(mm)) 899 still_valid = mmget_still_valid(mm);
899 goto skip_mm;
900 prev = NULL; 900 prev = NULL;
901 for (vma = mm->mmap; vma; vma = vma->vm_next) { 901 for (vma = mm->mmap; vma; vma = vma->vm_next) {
902 cond_resched(); 902 cond_resched();
@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
907 continue; 907 continue;
908 } 908 }
909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); 909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
910 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, 910 if (still_valid) {
911 new_flags, vma->anon_vma, 911 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
912 vma->vm_file, vma->vm_pgoff, 912 new_flags, vma->anon_vma,
913 vma_policy(vma), 913 vma->vm_file, vma->vm_pgoff,
914 NULL_VM_UFFD_CTX); 914 vma_policy(vma),
915 if (prev) 915 NULL_VM_UFFD_CTX);
916 vma = prev; 916 if (prev)
917 else 917 vma = prev;
918 prev = vma; 918 else
919 prev = vma;
920 }
919 vma->vm_flags = new_flags; 921 vma->vm_flags = new_flags;
920 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 922 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
921 } 923 }
922skip_mm:
923 up_write(&mm->mmap_sem); 924 up_write(&mm->mmap_sem);
924 mmput(mm); 925 mmput(mm);
925wakeup: 926wakeup:
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7fcf7569743f..7bd7534f5051 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -547,63 +547,12 @@ xfs_file_compat_ioctl(
547 struct inode *inode = file_inode(filp); 547 struct inode *inode = file_inode(filp);
548 struct xfs_inode *ip = XFS_I(inode); 548 struct xfs_inode *ip = XFS_I(inode);
549 struct xfs_mount *mp = ip->i_mount; 549 struct xfs_mount *mp = ip->i_mount;
550 void __user *arg = (void __user *)p; 550 void __user *arg = compat_ptr(p);
551 int error; 551 int error;
552 552
553 trace_xfs_file_compat_ioctl(ip); 553 trace_xfs_file_compat_ioctl(ip);
554 554
555 switch (cmd) { 555 switch (cmd) {
556 /* No size or alignment issues on any arch */
557 case XFS_IOC_DIOINFO:
558 case XFS_IOC_FSGEOMETRY_V4:
559 case XFS_IOC_FSGEOMETRY:
560 case XFS_IOC_AG_GEOMETRY:
561 case XFS_IOC_FSGETXATTR:
562 case XFS_IOC_FSSETXATTR:
563 case XFS_IOC_FSGETXATTRA:
564 case XFS_IOC_FSSETDM:
565 case XFS_IOC_GETBMAP:
566 case XFS_IOC_GETBMAPA:
567 case XFS_IOC_GETBMAPX:
568 case XFS_IOC_FSCOUNTS:
569 case XFS_IOC_SET_RESBLKS:
570 case XFS_IOC_GET_RESBLKS:
571 case XFS_IOC_FSGROWFSLOG:
572 case XFS_IOC_GOINGDOWN:
573 case XFS_IOC_ERROR_INJECTION:
574 case XFS_IOC_ERROR_CLEARALL:
575 case FS_IOC_GETFSMAP:
576 case XFS_IOC_SCRUB_METADATA:
577 case XFS_IOC_BULKSTAT:
578 case XFS_IOC_INUMBERS:
579 return xfs_file_ioctl(filp, cmd, p);
580#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
581 /*
582 * These are handled fine if no alignment issues. To support x32
583 * which uses native 64-bit alignment we must emit these cases in
584 * addition to the ia-32 compat set below.
585 */
586 case XFS_IOC_ALLOCSP:
587 case XFS_IOC_FREESP:
588 case XFS_IOC_RESVSP:
589 case XFS_IOC_UNRESVSP:
590 case XFS_IOC_ALLOCSP64:
591 case XFS_IOC_FREESP64:
592 case XFS_IOC_RESVSP64:
593 case XFS_IOC_UNRESVSP64:
594 case XFS_IOC_FSGEOMETRY_V1:
595 case XFS_IOC_FSGROWFSDATA:
596 case XFS_IOC_FSGROWFSRT:
597 case XFS_IOC_ZERO_RANGE:
598#ifdef CONFIG_X86_X32
599 /*
600 * x32 special: this gets a different cmd number from the ia-32 compat
601 * case below; the associated data will match native 64-bit alignment.
602 */
603 case XFS_IOC_SWAPEXT:
604#endif
605 return xfs_file_ioctl(filp, cmd, p);
606#endif
607#if defined(BROKEN_X86_ALIGNMENT) 556#if defined(BROKEN_X86_ALIGNMENT)
608 case XFS_IOC_ALLOCSP_32: 557 case XFS_IOC_ALLOCSP_32:
609 case XFS_IOC_FREESP_32: 558 case XFS_IOC_FREESP_32:
@@ -705,6 +654,7 @@ xfs_file_compat_ioctl(
705 case XFS_IOC_FSSETDM_BY_HANDLE_32: 654 case XFS_IOC_FSSETDM_BY_HANDLE_32:
706 return xfs_compat_fssetdm_by_handle(filp, arg); 655 return xfs_compat_fssetdm_by_handle(filp, arg);
707 default: 656 default:
708 return -ENOIOCTLCMD; 657 /* try the native version */
658 return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
709 } 659 }
710} 660}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ff3c1fae5357..fe285d123d69 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -793,6 +793,7 @@ xfs_setattr_nonsize(
793 793
794out_cancel: 794out_cancel:
795 xfs_trans_cancel(tp); 795 xfs_trans_cancel(tp);
796 xfs_iunlock(ip, XFS_ILOCK_EXCL);
796out_dqrele: 797out_dqrele:
797 xfs_qm_dqrele(udqp); 798 xfs_qm_dqrele(udqp);
798 xfs_qm_dqrele(gdqp); 799 xfs_qm_dqrele(gdqp);
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 0c954cad7449..a339bd5fa260 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -32,7 +32,7 @@ xfs_break_leased_layouts(
32 struct xfs_inode *ip = XFS_I(inode); 32 struct xfs_inode *ip = XFS_I(inode);
33 int error; 33 int error;
34 34
35 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { 35 while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
36 xfs_iunlock(ip, *iolock); 36 xfs_iunlock(ip, *iolock);
37 *did_unlock = true; 37 *did_unlock = true;
38 error = break_layout(inode, true); 38 error = break_layout(inode, true);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index c4ec7afd1170..edbe37b7f636 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1190,11 +1190,11 @@ xfs_reflink_remap_blocks(
1190} 1190}
1191 1191
1192/* 1192/*
1193 * Grab the exclusive iolock for a data copy from src to dest, making 1193 * Grab the exclusive iolock for a data copy from src to dest, making sure to
1194 * sure to abide vfs locking order (lowest pointer value goes first) and 1194 * abide vfs locking order (lowest pointer value goes first) and breaking the
1195 * breaking the pnfs layout leases on dest before proceeding. The loop 1195 * layout leases before proceeding. The loop is needed because we cannot call
1196 * is needed because we cannot call the blocking break_layout() with the 1196 * the blocking break_layout() with the iolocks held, and therefore have to
1197 * src iolock held, and therefore have to back out both locks. 1197 * back out both locks.
1198 */ 1198 */
1199static int 1199static int
1200xfs_iolock_two_inodes_and_break_layout( 1200xfs_iolock_two_inodes_and_break_layout(
@@ -1203,33 +1203,44 @@ xfs_iolock_two_inodes_and_break_layout(
1203{ 1203{
1204 int error; 1204 int error;
1205 1205
1206retry: 1206 if (src > dest)
1207 if (src < dest) { 1207 swap(src, dest);
1208 inode_lock_shared(src);
1209 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1210 } else {
1211 /* src >= dest */
1212 inode_lock(dest);
1213 }
1214 1208
1215 error = break_layout(dest, false); 1209retry:
1216 if (error == -EWOULDBLOCK) { 1210 /* Wait to break both inodes' layouts before we start locking. */
1217 inode_unlock(dest); 1211 error = break_layout(src, true);
1218 if (src < dest) 1212 if (error)
1219 inode_unlock_shared(src); 1213 return error;
1214 if (src != dest) {
1220 error = break_layout(dest, true); 1215 error = break_layout(dest, true);
1221 if (error) 1216 if (error)
1222 return error; 1217 return error;
1223 goto retry;
1224 } 1218 }
1219
1220 /* Lock one inode and make sure nobody got in and leased it. */
1221 inode_lock(src);
1222 error = break_layout(src, false);
1225 if (error) { 1223 if (error) {
1224 inode_unlock(src);
1225 if (error == -EWOULDBLOCK)
1226 goto retry;
1227 return error;
1228 }
1229
1230 if (src == dest)
1231 return 0;
1232
1233 /* Lock the other inode and make sure nobody got in and leased it. */
1234 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1235 error = break_layout(dest, false);
1236 if (error) {
1237 inode_unlock(src);
1226 inode_unlock(dest); 1238 inode_unlock(dest);
1227 if (src < dest) 1239 if (error == -EWOULDBLOCK)
1228 inode_unlock_shared(src); 1240 goto retry;
1229 return error; 1241 return error;
1230 } 1242 }
1231 if (src > dest) 1243
1232 inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
1233 return 0; 1244 return 0;
1234} 1245}
1235 1246
@@ -1247,10 +1258,10 @@ xfs_reflink_remap_unlock(
1247 1258
1248 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1259 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
1249 if (!same_inode) 1260 if (!same_inode)
1250 xfs_iunlock(src, XFS_MMAPLOCK_SHARED); 1261 xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
1251 inode_unlock(inode_out); 1262 inode_unlock(inode_out);
1252 if (!same_inode) 1263 if (!same_inode)
1253 inode_unlock_shared(inode_in); 1264 inode_unlock(inode_in);
1254} 1265}
1255 1266
1256/* 1267/*
@@ -1325,7 +1336,7 @@ xfs_reflink_remap_prep(
1325 if (same_inode) 1336 if (same_inode)
1326 xfs_ilock(src, XFS_MMAPLOCK_EXCL); 1337 xfs_ilock(src, XFS_MMAPLOCK_EXCL);
1327 else 1338 else
1328 xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest, 1339 xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
1329 XFS_MMAPLOCK_EXCL); 1340 XFS_MMAPLOCK_EXCL);
1330 1341
1331 /* Check file eligibility and prepare for block sharing. */ 1342 /* Check file eligibility and prepare for block sharing. */
diff --git a/include/dt-bindings/memory/mt8183-larb-port.h b/include/dt-bindings/memory/mt8183-larb-port.h
new file mode 100644
index 000000000000..2c579f305162
--- /dev/null
+++ b/include/dt-bindings/memory/mt8183-larb-port.h
@@ -0,0 +1,130 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018 MediaTek Inc.
4 * Author: Yong Wu <yong.wu@mediatek.com>
5 */
6#ifndef __DTS_IOMMU_PORT_MT8183_H
7#define __DTS_IOMMU_PORT_MT8183_H
8
9#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
10
11#define M4U_LARB0_ID 0
12#define M4U_LARB1_ID 1
13#define M4U_LARB2_ID 2
14#define M4U_LARB3_ID 3
15#define M4U_LARB4_ID 4
16#define M4U_LARB5_ID 5
17#define M4U_LARB6_ID 6
18#define M4U_LARB7_ID 7
19
20/* larb0 */
21#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
22#define M4U_PORT_DISP_2L_OVL0_LARB0 MTK_M4U_ID(M4U_LARB0_ID, 1)
23#define M4U_PORT_DISP_2L_OVL1_LARB0 MTK_M4U_ID(M4U_LARB0_ID, 2)
24#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3)
25#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 4)
26#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5)
27#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 6)
28#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7)
29#define M4U_PORT_MDP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 8)
30#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 9)
31
32/* larb1 */
33#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0)
34#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1)
35#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 2)
36#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 3)
37#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 4)
38#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 5)
39#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 6)
40
41/* larb2 VPU0 */
42#define M4U_PORT_IMG_IPUO MTK_M4U_ID(M4U_LARB2_ID, 0)
43#define M4U_PORT_IMG_IPU3O MTK_M4U_ID(M4U_LARB2_ID, 1)
44#define M4U_PORT_IMG_IPUI MTK_M4U_ID(M4U_LARB2_ID, 2)
45
46/* larb3 VPU1 */
47#define M4U_PORT_CAM_IPUO MTK_M4U_ID(M4U_LARB3_ID, 0)
48#define M4U_PORT_CAM_IPU2O MTK_M4U_ID(M4U_LARB3_ID, 1)
49#define M4U_PORT_CAM_IPU3O MTK_M4U_ID(M4U_LARB3_ID, 2)
50#define M4U_PORT_CAM_IPUI MTK_M4U_ID(M4U_LARB3_ID, 3)
51#define M4U_PORT_CAM_IPU2I MTK_M4U_ID(M4U_LARB3_ID, 4)
52
53/* larb4 */
54#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB4_ID, 0)
55#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB4_ID, 1)
56#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB4_ID, 2)
57#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB4_ID, 3)
58#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB4_ID, 4)
59#define M4U_PORT_JPGENC_RDMA MTK_M4U_ID(M4U_LARB4_ID, 5)
60#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB4_ID, 6)
61#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB4_ID, 7)
62#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB4_ID, 8)
63#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB4_ID, 9)
64#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB4_ID, 10)
65
66/* larb5 */
67#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB5_ID, 0)
68#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB5_ID, 1)
69#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB5_ID, 2)
70#define M4U_PORT_CAM_VIPI MTK_M4U_ID(M4U_LARB5_ID, 3)
71#define M4U_PORT_CAM_LCEI MTK_M4U_ID(M4U_LARB5_ID, 4)
72#define M4U_PORT_CAM_SMXI MTK_M4U_ID(M4U_LARB5_ID, 5)
73#define M4U_PORT_CAM_SMXO MTK_M4U_ID(M4U_LARB5_ID, 6)
74#define M4U_PORT_CAM_WPE0_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 7)
75#define M4U_PORT_CAM_WPE0_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 8)
76#define M4U_PORT_CAM_WPE0_WDMA MTK_M4U_ID(M4U_LARB5_ID, 9)
77#define M4U_PORT_CAM_FDVT_RP MTK_M4U_ID(M4U_LARB5_ID, 10)
78#define M4U_PORT_CAM_FDVT_WR MTK_M4U_ID(M4U_LARB5_ID, 11)
79#define M4U_PORT_CAM_FDVT_RB MTK_M4U_ID(M4U_LARB5_ID, 12)
80#define M4U_PORT_CAM_WPE1_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 13)
81#define M4U_PORT_CAM_WPE1_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 14)
82#define M4U_PORT_CAM_WPE1_WDMA MTK_M4U_ID(M4U_LARB5_ID, 15)
83#define M4U_PORT_CAM_DPE_RDMA MTK_M4U_ID(M4U_LARB5_ID, 16)
84#define M4U_PORT_CAM_DPE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 17)
85#define M4U_PORT_CAM_MFB_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 18)
86#define M4U_PORT_CAM_MFB_RDMA1 MTK_M4U_ID(M4U_LARB5_ID, 19)
87#define M4U_PORT_CAM_MFB_WDMA MTK_M4U_ID(M4U_LARB5_ID, 20)
88#define M4U_PORT_CAM_RSC_RDMA0 MTK_M4U_ID(M4U_LARB5_ID, 21)
89#define M4U_PORT_CAM_RSC_WDMA MTK_M4U_ID(M4U_LARB5_ID, 22)
90#define M4U_PORT_CAM_OWE_RDMA MTK_M4U_ID(M4U_LARB5_ID, 23)
91#define M4U_PORT_CAM_OWE_WDMA MTK_M4U_ID(M4U_LARB5_ID, 24)
92
93/* larb6 */
94#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB6_ID, 0)
95#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB6_ID, 1)
96#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB6_ID, 2)
97#define M4U_PORT_CAM_AFO MTK_M4U_ID(M4U_LARB6_ID, 3)
98#define M4U_PORT_CAM_LSCI0 MTK_M4U_ID(M4U_LARB6_ID, 4)
99#define M4U_PORT_CAM_LSCI1 MTK_M4U_ID(M4U_LARB6_ID, 5)
100#define M4U_PORT_CAM_PDO MTK_M4U_ID(M4U_LARB6_ID, 6)
101#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB6_ID, 7)
102#define M4U_PORT_CAM_LCSO MTK_M4U_ID(M4U_LARB6_ID, 8)
103#define M4U_PORT_CAM_CAM_RSSO_A MTK_M4U_ID(M4U_LARB6_ID, 9)
104#define M4U_PORT_CAM_UFEO MTK_M4U_ID(M4U_LARB6_ID, 10)
105#define M4U_PORT_CAM_SOCO MTK_M4U_ID(M4U_LARB6_ID, 11)
106#define M4U_PORT_CAM_SOC1 MTK_M4U_ID(M4U_LARB6_ID, 12)
107#define M4U_PORT_CAM_SOC2 MTK_M4U_ID(M4U_LARB6_ID, 13)
108#define M4U_PORT_CAM_CCUI MTK_M4U_ID(M4U_LARB6_ID, 14)
109#define M4U_PORT_CAM_CCUO MTK_M4U_ID(M4U_LARB6_ID, 15)
110#define M4U_PORT_CAM_RAWI_A MTK_M4U_ID(M4U_LARB6_ID, 16)
111#define M4U_PORT_CAM_CCUG MTK_M4U_ID(M4U_LARB6_ID, 17)
112#define M4U_PORT_CAM_PSO MTK_M4U_ID(M4U_LARB6_ID, 18)
113#define M4U_PORT_CAM_AFO_1 MTK_M4U_ID(M4U_LARB6_ID, 19)
114#define M4U_PORT_CAM_LSCI_2 MTK_M4U_ID(M4U_LARB6_ID, 20)
115#define M4U_PORT_CAM_PDI MTK_M4U_ID(M4U_LARB6_ID, 21)
116#define M4U_PORT_CAM_FLKO MTK_M4U_ID(M4U_LARB6_ID, 22)
117#define M4U_PORT_CAM_LMVO MTK_M4U_ID(M4U_LARB6_ID, 23)
118#define M4U_PORT_CAM_UFGO MTK_M4U_ID(M4U_LARB6_ID, 24)
119#define M4U_PORT_CAM_SPARE MTK_M4U_ID(M4U_LARB6_ID, 25)
120#define M4U_PORT_CAM_SPARE_2 MTK_M4U_ID(M4U_LARB6_ID, 26)
121#define M4U_PORT_CAM_SPARE_3 MTK_M4U_ID(M4U_LARB6_ID, 27)
122#define M4U_PORT_CAM_SPARE_4 MTK_M4U_ID(M4U_LARB6_ID, 28)
123#define M4U_PORT_CAM_SPARE_5 MTK_M4U_ID(M4U_LARB6_ID, 29)
124#define M4U_PORT_CAM_SPARE_6 MTK_M4U_ID(M4U_LARB6_ID, 30)
125
126/* CCU */
127#define M4U_PORT_CCU0 MTK_M4U_ID(M4U_LARB7_ID, 0)
128#define M4U_PORT_CCU1 MTK_M4U_ID(M4U_LARB7_ID, 1)
129
130#endif
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 4a4d00646040..21e950e4ab62 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -184,6 +184,9 @@ extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
184extern int 184extern int
185amd_iommu_update_ga(int cpu, bool is_run, void *data); 185amd_iommu_update_ga(int cpu, bool is_run, void *data);
186 186
187extern int amd_iommu_activate_guest_mode(void *data);
188extern int amd_iommu_deactivate_guest_mode(void *data);
189
187#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ 190#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
188 191
189static inline int 192static inline int
@@ -198,6 +201,15 @@ amd_iommu_update_ga(int cpu, bool is_run, void *data)
198 return 0; 201 return 0;
199} 202}
200 203
204static inline int amd_iommu_activate_guest_mode(void *data)
205{
206 return 0;
207}
208
209static inline int amd_iommu_deactivate_guest_mode(void *data)
210{
211 return 0;
212}
201#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */ 213#endif /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
202 214
203#endif /* _ASM_X86_AMD_IOMMU_H */ 215#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index feff3fe4467e..1b1fa1557e68 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -311,6 +311,7 @@ enum req_flag_bits {
311 __REQ_RAHEAD, /* read ahead, can fail anytime */ 311 __REQ_RAHEAD, /* read ahead, can fail anytime */
312 __REQ_BACKGROUND, /* background IO */ 312 __REQ_BACKGROUND, /* background IO */
313 __REQ_NOWAIT, /* Don't wait if request will block */ 313 __REQ_NOWAIT, /* Don't wait if request will block */
314 __REQ_NOWAIT_INLINE, /* Return would-block error inline */
314 /* 315 /*
315 * When a shared kthread needs to issue a bio for a cgroup, doing 316 * When a shared kthread needs to issue a bio for a cgroup, doing
316 * so synchronously can lead to priority inversions as the kthread 317 * so synchronously can lead to priority inversions as the kthread
@@ -345,6 +346,7 @@ enum req_flag_bits {
345#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 346#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
346#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 347#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
347#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 348#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
349#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
348#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) 350#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
349 351
350#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 352#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
@@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op)
418 420
419typedef unsigned int blk_qc_t; 421typedef unsigned int blk_qc_t;
420#define BLK_QC_T_NONE -1U 422#define BLK_QC_T_NONE -1U
423#define BLK_QC_T_EAGAIN -2U
421#define BLK_QC_T_SHIFT 16 424#define BLK_QC_T_SHIFT 16
422#define BLK_QC_T_INTERNAL (1U << 31) 425#define BLK_QC_T_INTERNAL (1U << 31)
423 426
424static inline bool blk_qc_t_valid(blk_qc_t cookie) 427static inline bool blk_qc_t_valid(blk_qc_t cookie)
425{ 428{
426 return cookie != BLK_QC_T_NONE; 429 return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
427} 430}
428 431
429static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 432static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 5e58bb29b1a3..11cdc7c60480 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
30 30
31static inline void ceph_buffer_put(struct ceph_buffer *b) 31static inline void ceph_buffer_put(struct ceph_buffer *b)
32{ 32{
33 kref_put(&b->kref, ceph_buffer_release); 33 if (b)
34 kref_put(&b->kref, ceph_buffer_release);
34} 35}
35 36
36extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); 37extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f0fd5636fddb..5e88e7e33abe 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
24 long ______r; \ 24 long ______r; \
25 static struct ftrace_likely_data \ 25 static struct ftrace_likely_data \
26 __aligned(4) \ 26 __aligned(4) \
27 __section("_ftrace_annotated_branch") \ 27 __section(_ftrace_annotated_branch) \
28 ______f = { \ 28 ______f = { \
29 .data.func = __func__, \ 29 .data.func = __func__, \
30 .data.file = __FILE__, \ 30 .data.file = __FILE__, \
@@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
60#define __trace_if_value(cond) ({ \ 60#define __trace_if_value(cond) ({ \
61 static struct ftrace_branch_data \ 61 static struct ftrace_branch_data \
62 __aligned(4) \ 62 __aligned(4) \
63 __section("_ftrace_branch") \ 63 __section(_ftrace_branch) \
64 __if_trace = { \ 64 __if_trace = { \
65 .func = __func__, \ 65 .func = __func__, \
66 .file = __FILE__, \ 66 .file = __FILE__, \
@@ -118,7 +118,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
118 ".popsection\n\t" 118 ".popsection\n\t"
119 119
120/* Annotate a C jump table to allow objtool to follow the code flow */ 120/* Annotate a C jump table to allow objtool to follow the code flow */
121#define __annotate_jump_table __section(".rodata..c_jump_table") 121#define __annotate_jump_table __section(.rodata..c_jump_table)
122 122
123#else 123#else
124#define annotate_reachable() 124#define annotate_reachable()
@@ -298,7 +298,7 @@ unsigned long read_word_at_a_time(const void *addr)
298 * visible to the compiler. 298 * visible to the compiler.
299 */ 299 */
300#define __ADDRESSABLE(sym) \ 300#define __ADDRESSABLE(sym) \
301 static void * __section(".discard.addressable") __used \ 301 static void * __section(.discard.addressable) __used \
302 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; 302 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
303 303
304/** 304/**
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index c05d4e661489..03f8e98e3bcc 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, 160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
161 gfp_t gfp) 161 gfp_t gfp)
162{ 162{
163 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 163 return NULL;
164 size_t align = get_order(PAGE_ALIGN(size));
165
166 return alloc_pages_node(node, gfp, align);
167} 164}
168 165
169static inline void dma_free_contiguous(struct device *dev, struct page *page, 166static inline void dma_free_contiguous(struct device *dev, struct page *page,
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 40915b461f18..f757a58191a6 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -241,30 +241,6 @@ static inline int irq_to_gpio(unsigned irq)
241 return -EINVAL; 241 return -EINVAL;
242} 242}
243 243
244static inline int
245gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
246 unsigned int gpio_offset, unsigned int pin_offset,
247 unsigned int npins)
248{
249 WARN_ON(1);
250 return -EINVAL;
251}
252
253static inline int
254gpiochip_add_pingroup_range(struct gpio_chip *chip,
255 struct pinctrl_dev *pctldev,
256 unsigned int gpio_offset, const char *pin_group)
257{
258 WARN_ON(1);
259 return -EINVAL;
260}
261
262static inline void
263gpiochip_remove_pin_ranges(struct gpio_chip *chip)
264{
265 WARN_ON(1);
266}
267
268static inline int devm_gpio_request(struct device *dev, unsigned gpio, 244static inline int devm_gpio_request(struct device *dev, unsigned gpio,
269 const char *label) 245 const char *label)
270{ 246{
diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h
index ceabb01a6a7d..1ecb6b45812c 100644
--- a/include/linux/input/elan-i2c-ids.h
+++ b/include/linux/input/elan-i2c-ids.h
@@ -48,7 +48,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
48 { "ELAN0618", 0 }, 48 { "ELAN0618", 0 },
49 { "ELAN0619", 0 }, 49 { "ELAN0619", 0 },
50 { "ELAN061A", 0 }, 50 { "ELAN061A", 0 },
51 { "ELAN061B", 0 }, 51/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
52 { "ELAN061C", 0 }, 52 { "ELAN061C", 0 },
53 { "ELAN061D", 0 }, 53 { "ELAN061D", 0 },
54 { "ELAN061E", 0 }, 54 { "ELAN061E", 0 },
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index f2ae8a006ff8..ed11ef594378 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -272,6 +272,8 @@
272#define dma_frcd_type(d) ((d >> 30) & 1) 272#define dma_frcd_type(d) ((d >> 30) & 1)
273#define dma_frcd_fault_reason(c) (c & 0xff) 273#define dma_frcd_fault_reason(c) (c & 0xff)
274#define dma_frcd_source_id(c) (c & 0xffff) 274#define dma_frcd_source_id(c) (c & 0xffff)
275#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
276#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
275/* low 64 bit */ 277/* low 64 bit */
276#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) 278#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
277 279
@@ -346,7 +348,6 @@ enum {
346#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) 348#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
347 349
348#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) 350#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
349#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
350#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) 351#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
351#define QI_EIOTLB_AM(am) (((u64)am)) 352#define QI_EIOTLB_AM(am) (((u64)am))
352#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) 353#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
@@ -378,8 +379,6 @@ enum {
378#define QI_RESP_INVALID 0x1 379#define QI_RESP_INVALID 0x1
379#define QI_RESP_FAILURE 0xf 380#define QI_RESP_FAILURE 0xf
380 381
381#define QI_GRAN_ALL_ALL 0
382#define QI_GRAN_NONG_ALL 1
383#define QI_GRAN_NONG_PASID 2 382#define QI_GRAN_NONG_PASID 2
384#define QI_GRAN_PSI_PASID 3 383#define QI_GRAN_PSI_PASID 3
385 384
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 6b1b8be3ebec..ec7a13405f10 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -76,10 +76,9 @@ struct io_pgtable_cfg {
76 * (unmapped) entries but the hardware might do so anyway, perform 76 * (unmapped) entries but the hardware might do so anyway, perform
77 * TLB maintenance when mapping as well as when unmapping. 77 * TLB maintenance when mapping as well as when unmapping.
78 * 78 *
79 * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all 79 * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend
80 * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit 80 * to support up to 34 bits PA where the bit32 and bit33 are
81 * when the SoC is in "4GB mode" and they can only access the high 81 * encoded in the bit9 and bit4 of the PTE respectively.
82 * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
83 * 82 *
84 * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs 83 * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
85 * on unmap, for DMA domains using the flush queue mechanism for 84 * on unmap, for DMA domains using the flush queue mechanism for
@@ -88,7 +87,7 @@ struct io_pgtable_cfg {
88 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) 87 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
89 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) 88 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
90 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) 89 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
91 #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) 90 #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
92 #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) 91 #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
93 unsigned long quirks; 92 unsigned long quirks;
94 unsigned long pgsize_bitmap; 93 unsigned long pgsize_bitmap;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 64ebaff33455..29bac5345563 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -436,6 +436,9 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
436extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 436extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
437extern int iommu_request_dm_for_dev(struct device *dev); 437extern int iommu_request_dm_for_dev(struct device *dev);
438extern int iommu_request_dma_domain_for_dev(struct device *dev); 438extern int iommu_request_dma_domain_for_dev(struct device *dev);
439extern void iommu_set_default_passthrough(bool cmd_line);
440extern void iommu_set_default_translated(bool cmd_line);
441extern bool iommu_default_passthrough(void);
439extern struct iommu_resv_region * 442extern struct iommu_resv_region *
440iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 443iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
441 enum iommu_resv_type type); 444 enum iommu_resv_type type);
@@ -736,6 +739,19 @@ static inline int iommu_request_dma_domain_for_dev(struct device *dev)
736 return -ENODEV; 739 return -ENODEV;
737} 740}
738 741
742static inline void iommu_set_default_passthrough(bool cmd_line)
743{
744}
745
746static inline void iommu_set_default_translated(bool cmd_line)
747{
748}
749
750static inline bool iommu_default_passthrough(void)
751{
752 return true;
753}
754
739static inline int iommu_attach_group(struct iommu_domain *domain, 755static inline int iommu_attach_group(struct iommu_domain *domain,
740 struct iommu_group *group) 756 struct iommu_group *group)
741{ 757{
diff --git a/include/linux/key.h b/include/linux/key.h
index 91f391cd272e..50028338a4cc 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -94,11 +94,11 @@ struct keyring_index_key {
94 union { 94 union {
95 struct { 95 struct {
96#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */ 96#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
97 u8 desc_len; 97 u16 desc_len;
98 char desc[sizeof(long) - 1]; /* First few chars of description */ 98 char desc[sizeof(long) - 2]; /* First few chars of description */
99#else 99#else
100 char desc[sizeof(long) - 1]; /* First few chars of description */ 100 char desc[sizeof(long) - 2]; /* First few chars of description */
101 u8 desc_len; 101 u16 desc_len;
102#endif 102#endif
103 }; 103 };
104 unsigned long x; 104 unsigned long x;
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index cbd9d8495690..88e1e6304a71 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
117unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, 117unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
118 resource_size_t hw_addr, resource_size_t size); 118 resource_size_t hw_addr, resource_size_t size);
119int logic_pio_register_range(struct logic_pio_hwaddr *newrange); 119int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
120void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
120resource_size_t logic_pio_to_hwaddr(unsigned long pio); 121resource_size_t logic_pio_to_hwaddr(unsigned long pio);
121unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); 122unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
122 123
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index ce9839c8bc1a..c2f056b5766d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -446,11 +446,11 @@ enum {
446}; 446};
447 447
448enum { 448enum {
449 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, 449 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
450}; 450};
451 451
452enum { 452enum {
453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, 453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
454}; 454};
455 455
456enum { 456enum {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index ec571fd7fcf8..b8b570c30b5e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10054,9 +10054,8 @@ struct mlx5_ifc_tls_static_params_bits {
10054}; 10054};
10055 10055
10056struct mlx5_ifc_tls_progress_params_bits { 10056struct mlx5_ifc_tls_progress_params_bits {
10057 u8 valid[0x1]; 10057 u8 reserved_at_0[0x8];
10058 u8 reserved_at_1[0x7]; 10058 u8 tisn[0x18];
10059 u8 pd[0x18];
10060 10059
10061 u8 next_record_tcp_sn[0x20]; 10060 u8 next_record_tcp_sn[0x20];
10062 10061
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d77d717c620c..3f38c30d2f13 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -215,8 +215,9 @@ enum node_stat_item {
215 NR_INACTIVE_FILE, /* " " " " " */ 215 NR_INACTIVE_FILE, /* " " " " " */
216 NR_ACTIVE_FILE, /* " " " " " */ 216 NR_ACTIVE_FILE, /* " " " " " */
217 NR_UNEVICTABLE, /* " " " " " */ 217 NR_UNEVICTABLE, /* " " " " " */
218 NR_SLAB_RECLAIMABLE, 218 NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
219 NR_SLAB_UNRECLAIMABLE, 219 NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
220 * memcg_flush_percpu_vmstats() first. */
220 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 221 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
221 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 222 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
222 WORKINGSET_NODES, 223 WORKINGSET_NODES,
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index 7a6871ac8784..74c6f9241944 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -4,6 +4,9 @@
4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> 4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
5 */ 5 */
6 6
7#ifndef _NF_CONNTRACK_H323_TYPES_H
8#define _NF_CONNTRACK_H323_TYPES_H
9
7typedef struct TransportAddress_ipAddress { /* SEQUENCE */ 10typedef struct TransportAddress_ipAddress { /* SEQUENCE */
8 int options; /* No use */ 11 int options; /* No use */
9 unsigned int ip; 12 unsigned int ip;
@@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
931 InfoRequestResponse infoRequestResponse; 934 InfoRequestResponse infoRequestResponse;
932 }; 935 };
933} RasMessage; 936} RasMessage;
937
938#endif /* _NF_CONNTRACK_H323_TYPES_H */
diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h
index 153bf25b4df3..2c32ca09df02 100644
--- a/include/linux/omap-iommu.h
+++ b/include/linux/omap-iommu.h
@@ -10,12 +10,27 @@
10#ifndef _OMAP_IOMMU_H_ 10#ifndef _OMAP_IOMMU_H_
11#define _OMAP_IOMMU_H_ 11#define _OMAP_IOMMU_H_
12 12
13struct iommu_domain;
14
13#ifdef CONFIG_OMAP_IOMMU 15#ifdef CONFIG_OMAP_IOMMU
14extern void omap_iommu_save_ctx(struct device *dev); 16extern void omap_iommu_save_ctx(struct device *dev);
15extern void omap_iommu_restore_ctx(struct device *dev); 17extern void omap_iommu_restore_ctx(struct device *dev);
18
19int omap_iommu_domain_deactivate(struct iommu_domain *domain);
20int omap_iommu_domain_activate(struct iommu_domain *domain);
16#else 21#else
17static inline void omap_iommu_save_ctx(struct device *dev) {} 22static inline void omap_iommu_save_ctx(struct device *dev) {}
18static inline void omap_iommu_restore_ctx(struct device *dev) {} 23static inline void omap_iommu_restore_ctx(struct device *dev) {}
24
25static inline int omap_iommu_domain_deactivate(struct iommu_domain *domain)
26{
27 return -ENODEV;
28}
29
30static inline int omap_iommu_domain_activate(struct iommu_domain *domain)
31{
32 return -ENODEV;
33}
19#endif 34#endif
20 35
21#endif 36#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 462b90b73f93..2fb9c8ffaf10 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1107,6 +1107,7 @@ int genphy_c45_an_disable_aneg(struct phy_device *phydev);
1107int genphy_c45_read_mdix(struct phy_device *phydev); 1107int genphy_c45_read_mdix(struct phy_device *phydev);
1108int genphy_c45_pma_read_abilities(struct phy_device *phydev); 1108int genphy_c45_pma_read_abilities(struct phy_device *phydev);
1109int genphy_c45_read_status(struct phy_device *phydev); 1109int genphy_c45_read_status(struct phy_device *phydev);
1110int genphy_c45_config_aneg(struct phy_device *phydev);
1110 1111
1111/* The gen10g_* functions are the old Clause 45 stub */ 1112/* The gen10g_* functions are the old Clause 45 stub */
1112int gen10g_config_aneg(struct phy_device *phydev); 1113int gen10g_config_aneg(struct phy_device *phydev);
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index 44d913a7580c..8474a0208b34 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -13,4 +13,8 @@ struct iommu_platform_data {
13 const char *reset_name; 13 const char *reset_name;
14 int (*assert_reset)(struct platform_device *pdev, const char *name); 14 int (*assert_reset)(struct platform_device *pdev, const char *name);
15 int (*deassert_reset)(struct platform_device *pdev, const char *name); 15 int (*deassert_reset)(struct platform_device *pdev, const char *name);
16 int (*device_enable)(struct platform_device *pdev);
17 int (*device_idle)(struct platform_device *pdev);
18 int (*set_pwrdm_constraint)(struct platform_device *pdev, bool request,
19 u8 *pwrst);
16}; 20};
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b5d99482d3fe..1a5f88316b08 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -282,6 +282,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
282extern void exit_signals(struct task_struct *tsk); 282extern void exit_signals(struct task_struct *tsk);
283extern void kernel_sigaction(int, __sighandler_t); 283extern void kernel_sigaction(int, __sighandler_t);
284 284
285#define SIG_KTHREAD ((__force __sighandler_t)2)
286#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
287
285static inline void allow_signal(int sig) 288static inline void allow_signal(int sig)
286{ 289{
287 /* 290 /*
@@ -289,7 +292,17 @@ static inline void allow_signal(int sig)
289 * know it'll be handled, so that they don't get converted to 292 * know it'll be handled, so that they don't get converted to
290 * SIGKILL or just silently dropped. 293 * SIGKILL or just silently dropped.
291 */ 294 */
292 kernel_sigaction(sig, (__force __sighandler_t)2); 295 kernel_sigaction(sig, SIG_KTHREAD);
296}
297
298static inline void allow_kernel_signal(int sig)
299{
300 /*
301 * Kernel threads handle their own signals. Let the signal code
302 * know signals sent by the kernel will be handled, so that they
303 * don't get silently dropped.
304 */
305 kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
293} 306}
294 307
295static inline void disallow_signal(int sig) 308static inline void disallow_signal(int sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d8af86d995d6..ba5583522d24 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1374,6 +1374,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1374 to->l4_hash = from->l4_hash; 1374 to->l4_hash = from->l4_hash;
1375}; 1375};
1376 1376
1377static inline void skb_copy_decrypted(struct sk_buff *to,
1378 const struct sk_buff *from)
1379{
1380#ifdef CONFIG_TLS_DEVICE
1381 to->decrypted = from->decrypted;
1382#endif
1383}
1384
1377#ifdef NET_SKBUFF_DATA_USES_OFFSET 1385#ifdef NET_SKBUFF_DATA_USES_OFFSET
1378static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1386static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1379{ 1387{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 97523818cb14..fc0bed59fc84 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -292,6 +292,9 @@ struct ucred {
292#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ 292#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
293#define MSG_EOF MSG_FIN 293#define MSG_EOF MSG_FIN
294#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ 294#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
295#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
296 * plain text and require encryption
297 */
295 298
296#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ 299#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
297#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 300#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index baa3ecdb882f..27536b961552 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *);
98 98
99struct rpc_call_ops { 99struct rpc_call_ops {
100 void (*rpc_call_prepare)(struct rpc_task *, void *); 100 void (*rpc_call_prepare)(struct rpc_task *, void *);
101 void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
102 void (*rpc_call_done)(struct rpc_task *, void *); 101 void (*rpc_call_done)(struct rpc_task *, void *);
103 void (*rpc_count_stats)(struct rpc_task *, void *); 102 void (*rpc_count_stats)(struct rpc_task *, void *);
104 void (*rpc_release)(void *); 103 void (*rpc_release)(void *);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 361f62bb4a8e..cde3dc18e21a 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -46,13 +46,17 @@ enum dma_sync_target {
46 46
47extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 47extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
48 dma_addr_t tbl_dma_addr, 48 dma_addr_t tbl_dma_addr,
49 phys_addr_t phys, size_t size, 49 phys_addr_t phys,
50 size_t mapping_size,
51 size_t alloc_size,
50 enum dma_data_direction dir, 52 enum dma_data_direction dir,
51 unsigned long attrs); 53 unsigned long attrs);
52 54
53extern void swiotlb_tbl_unmap_single(struct device *hwdev, 55extern void swiotlb_tbl_unmap_single(struct device *hwdev,
54 phys_addr_t tlb_addr, 56 phys_addr_t tlb_addr,
55 size_t size, enum dma_data_direction dir, 57 size_t mapping_size,
58 size_t alloc_size,
59 enum dma_data_direction dir,
56 unsigned long attrs); 60 unsigned long attrs);
57 61
58extern void swiotlb_tbl_sync_single(struct device *hwdev, 62extern void swiotlb_tbl_sync_single(struct device *hwdev,
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 7acb953298a7..84ff2844df2a 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -57,6 +57,7 @@ struct tk_read_base {
57 * @cs_was_changed_seq: The sequence number of clocksource change events 57 * @cs_was_changed_seq: The sequence number of clocksource change events
58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second 58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds 59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
60 * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
60 * @cycle_interval: Number of clock cycles in one NTP interval 61 * @cycle_interval: Number of clock cycles in one NTP interval
61 * @xtime_interval: Number of clock shifted nano seconds in one NTP 62 * @xtime_interval: Number of clock shifted nano seconds in one NTP
62 * interval. 63 * interval.
@@ -84,6 +85,9 @@ struct tk_read_base {
84 * 85 *
85 * wall_to_monotonic is no longer the boot time, getboottime must be 86 * wall_to_monotonic is no longer the boot time, getboottime must be
86 * used instead. 87 * used instead.
88 *
89 * @monotonic_to_boottime is a timespec64 representation of @offs_boot to
90 * accelerate the VDSO update for CLOCK_BOOTTIME.
87 */ 91 */
88struct timekeeper { 92struct timekeeper {
89 struct tk_read_base tkr_mono; 93 struct tk_read_base tkr_mono;
@@ -99,6 +103,7 @@ struct timekeeper {
99 u8 cs_was_changed_seq; 103 u8 cs_was_changed_seq;
100 ktime_t next_leap_ktime; 104 ktime_t next_leap_ktime;
101 u64 raw_sec; 105 u64 raw_sec;
106 struct timespec64 monotonic_to_boot;
102 107
103 /* The following members are for timekeeping internal use */ 108 /* The following members are for timekeeping internal use */
104 u64 cycle_interval; 109 u64 cycle_interval;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5150436783e8..30a8cdcfd4a4 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -548,6 +548,7 @@ extern int trace_event_get_offsets(struct trace_event_call *call);
548 548
549#define is_signed_type(type) (((type)(-1)) < (type)1) 549#define is_signed_type(type) (((type)(-1)) < (type)1)
550 550
551int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
551int trace_set_clr_event(const char *system, const char *event, int set); 552int trace_set_clr_event(const char *system, const char *event, int set);
552 553
553/* 554/*
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index f37d12877754..adcc6a97db61 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -308,6 +308,7 @@ do { \
308 \ 308 \
309 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 309 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
310 R##_e = X##_e; \ 310 R##_e = X##_e; \
311 /* Fall through */ \
311 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \ 312 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
312 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ 313 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
313 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ 314 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
@@ -318,6 +319,7 @@ do { \
318 \ 319 \
319 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \ 320 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
320 R##_e = Y##_e; \ 321 R##_e = Y##_e; \
322 /* Fall through */ \
321 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \ 323 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
322 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ 324 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
323 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ 325 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
@@ -415,6 +417,7 @@ do { \
415 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ 417 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
416 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ 418 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
417 R##_s = X##_s; \ 419 R##_s = X##_s; \
420 /* Fall through */ \
418 \ 421 \
419 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \ 422 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
420 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ 423 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
@@ -428,6 +431,7 @@ do { \
428 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ 431 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
429 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ 432 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
430 R##_s = Y##_s; \ 433 R##_s = Y##_s; \
434 /* Fall through */ \
431 \ 435 \
432 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \ 436 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
433 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 437 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
@@ -493,6 +497,7 @@ do { \
493 \ 497 \
494 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 498 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
495 FP_SET_EXCEPTION(FP_EX_DIVZERO); \ 499 FP_SET_EXCEPTION(FP_EX_DIVZERO); \
500 /* Fall through */ \
496 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ 501 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
497 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ 502 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
498 R##_c = FP_CLS_INF; \ 503 R##_c = FP_CLS_INF; \
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c61a1bf4e3de..3a1a72990fce 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -15,6 +15,7 @@
15struct tcf_idrinfo { 15struct tcf_idrinfo {
16 struct mutex lock; 16 struct mutex lock;
17 struct idr action_idr; 17 struct idr action_idr;
18 struct net *net;
18}; 19};
19 20
20struct tc_action_ops; 21struct tc_action_ops;
@@ -108,7 +109,7 @@ struct tc_action_net {
108}; 109};
109 110
110static inline 111static inline
111int tc_action_net_init(struct tc_action_net *tn, 112int tc_action_net_init(struct net *net, struct tc_action_net *tn,
112 const struct tc_action_ops *ops) 113 const struct tc_action_ops *ops)
113{ 114{
114 int err = 0; 115 int err = 0;
@@ -117,6 +118,7 @@ int tc_action_net_init(struct tc_action_net *tn,
117 if (!tn->idrinfo) 118 if (!tn->idrinfo)
118 return -ENOMEM; 119 return -ENOMEM;
119 tn->ops = ops; 120 tn->ops = ops;
121 tn->idrinfo->net = net;
120 mutex_init(&tn->idrinfo->lock); 122 mutex_init(&tn->idrinfo->lock);
121 idr_init(&tn->idrinfo->action_idr); 123 idr_init(&tn->idrinfo->action_idr);
122 return err; 124 return err;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index becdad576859..3f62b347b04a 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb,
206 unsigned int len) 206 unsigned int len)
207{ 207{
208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len) 208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
209 return -EINVAL; 209 return 0;
210 210
211 return pskb_may_pull(skb, len); 211 return pskb_may_pull(skb, len);
212} 212}
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ded574b32c20..ffc95b382eb5 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -278,6 +278,7 @@ struct hci_dev {
278 __u16 conn_info_min_age; 278 __u16 conn_info_min_age;
279 __u16 conn_info_max_age; 279 __u16 conn_info_max_age;
280 __u16 auth_payload_timeout; 280 __u16 auth_payload_timeout;
281 __u8 min_enc_key_size;
281 __u8 ssp_debug_mode; 282 __u8 ssp_debug_mode;
282 __u8 hw_error_code; 283 __u8 hw_error_code;
283 __u32 clock; 284 __u32 clock;
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 010f26b31c89..bac79e817776 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -171,7 +171,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
171void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, 171void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
172 struct sk_buff *parent); 172 struct sk_buff *parent);
173void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, 173void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
174 void *reasm_data); 174 void *reasm_data, bool try_coalesce);
175struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); 175struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
176 176
177#endif 177#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 4a9da951a794..ab40d7afdc54 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -52,7 +52,7 @@ struct bpf_prog;
52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) 52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
53 53
54struct net { 54struct net {
55 refcount_t passive; /* To decided when the network 55 refcount_t passive; /* To decide when the network
56 * namespace should be freed. 56 * namespace should be freed.
57 */ 57 */
58 refcount_t count; /* To decided when the network 58 refcount_t count; /* To decided when the network
@@ -61,7 +61,6 @@ struct net {
61 spinlock_t rules_mod_lock; 61 spinlock_t rules_mod_lock;
62 62
63 u32 hash_mix; 63 u32 hash_mix;
64 atomic64_t cookie_gen;
65 64
66 struct list_head list; /* list of network namespaces */ 65 struct list_head list; /* list of network namespaces */
67 struct list_head exit_list; /* To linked to call pernet exit 66 struct list_head exit_list; /* To linked to call pernet exit
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 9b624566b82d..475d6f28ca67 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -421,8 +421,7 @@ struct nft_set {
421 unsigned char *udata; 421 unsigned char *udata;
422 /* runtime data below here */ 422 /* runtime data below here */
423 const struct nft_set_ops *ops ____cacheline_aligned; 423 const struct nft_set_ops *ops ____cacheline_aligned;
424 u16 flags:13, 424 u16 flags:14,
425 bound:1,
426 genmask:2; 425 genmask:2;
427 u8 klen; 426 u8 klen;
428 u8 dlen; 427 u8 dlen;
@@ -1348,12 +1347,15 @@ struct nft_trans_rule {
1348struct nft_trans_set { 1347struct nft_trans_set {
1349 struct nft_set *set; 1348 struct nft_set *set;
1350 u32 set_id; 1349 u32 set_id;
1350 bool bound;
1351}; 1351};
1352 1352
1353#define nft_trans_set(trans) \ 1353#define nft_trans_set(trans) \
1354 (((struct nft_trans_set *)trans->data)->set) 1354 (((struct nft_trans_set *)trans->data)->set)
1355#define nft_trans_set_id(trans) \ 1355#define nft_trans_set_id(trans) \
1356 (((struct nft_trans_set *)trans->data)->set_id) 1356 (((struct nft_trans_set *)trans->data)->set_id)
1357#define nft_trans_set_bound(trans) \
1358 (((struct nft_trans_set *)trans->data)->bound)
1357 1359
1358struct nft_trans_chain { 1360struct nft_trans_chain {
1359 bool update; 1361 bool update;
@@ -1384,12 +1386,15 @@ struct nft_trans_table {
1384struct nft_trans_elem { 1386struct nft_trans_elem {
1385 struct nft_set *set; 1387 struct nft_set *set;
1386 struct nft_set_elem elem; 1388 struct nft_set_elem elem;
1389 bool bound;
1387}; 1390};
1388 1391
1389#define nft_trans_elem_set(trans) \ 1392#define nft_trans_elem_set(trans) \
1390 (((struct nft_trans_elem *)trans->data)->set) 1393 (((struct nft_trans_elem *)trans->data)->set)
1391#define nft_trans_elem(trans) \ 1394#define nft_trans_elem(trans) \
1392 (((struct nft_trans_elem *)trans->data)->elem) 1395 (((struct nft_trans_elem *)trans->data)->elem)
1396#define nft_trans_elem_set_bound(trans) \
1397 (((struct nft_trans_elem *)trans->data)->bound)
1393 1398
1394struct nft_trans_obj { 1399struct nft_trans_obj {
1395 struct nft_object *obj; 1400 struct nft_object *obj;
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index 3196663a10e3..c8b9dec376f5 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -73,4 +73,6 @@ int nft_flow_rule_offload_commit(struct net *net);
73 (__reg)->key = __key; \ 73 (__reg)->key = __key; \
74 memset(&(__reg)->mask, 0xff, (__reg)->len); 74 memset(&(__reg)->mask, 0xff, (__reg)->len);
75 75
76int nft_chain_offload_priority(struct nft_base_chain *basechain);
77
76#endif 78#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e4650e5b64a1..b140c8f1be22 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -684,9 +684,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
684 const struct nla_policy *policy, 684 const struct nla_policy *policy,
685 struct netlink_ext_ack *extack) 685 struct netlink_ext_ack *extack)
686{ 686{
687 return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), 687 return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
688 nlmsg_attrlen(nlh, hdrlen), policy, 688 NL_VALIDATE_STRICT, extack);
689 NL_VALIDATE_STRICT, extack);
690} 689}
691 690
692/** 691/**
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 25f1f9a8419b..95f766c31c90 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -141,12 +141,6 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
141 141
142 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 142 nh_grp = rcu_dereference_rtnl(nh->nh_grp);
143 rc = nh_grp->num_nh; 143 rc = nh_grp->num_nh;
144 } else {
145 const struct nh_info *nhi;
146
147 nhi = rcu_dereference_rtnl(nh->nh_info);
148 if (nhi->reject_nh)
149 rc = 0;
150 } 144 }
151 145
152 return rc; 146 return rc;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e429809ca90d..98be18ef1ed3 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -646,7 +646,7 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
646{ 646{
647 cls_common->chain_index = tp->chain->index; 647 cls_common->chain_index = tp->chain->index;
648 cls_common->protocol = tp->protocol; 648 cls_common->protocol = tp->protocol;
649 cls_common->prio = tp->prio; 649 cls_common->prio = tp->prio >> 16;
650 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 650 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
651 cls_common->extack = extack; 651 cls_common->extack = extack;
652} 652}
diff --git a/include/net/psample.h b/include/net/psample.h
index 37a4df2325b2..6b578ce69cd8 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -11,6 +11,7 @@ struct psample_group {
11 u32 group_num; 11 u32 group_num;
12 u32 refcount; 12 u32 refcount;
13 u32 seq; 13 u32 seq;
14 struct rcu_head rcu;
14}; 15};
15 16
16struct psample_group *psample_group_get(struct net *net, u32 group_num); 17struct psample_group *psample_group_get(struct net *net, u32 group_num);
diff --git a/include/net/route.h b/include/net/route.h
index 630a0493f1f3..dfce19c9fa96 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -233,7 +233,7 @@ void rt_del_uncached_list(struct rtable *rt);
233 233
234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
235 u32 table_id, struct fib_info *fi, 235 u32 table_id, struct fib_info *fi,
236 int *fa_index, int fa_start); 236 int *fa_index, int fa_start, unsigned int flags);
237 237
238static inline void ip_rt_put(struct rtable *rt) 238static inline void ip_rt_put(struct rtable *rt)
239{ 239{
diff --git a/include/net/sock.h b/include/net/sock.h
index 228db3998e46..2c53f1a1d905 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2482,6 +2482,7 @@ static inline bool sk_fullsock(const struct sock *sk)
2482 2482
2483/* Checks if this SKB belongs to an HW offloaded socket 2483/* Checks if this SKB belongs to an HW offloaded socket
2484 * and whether any SW fallbacks are required based on dev. 2484 * and whether any SW fallbacks are required based on dev.
2485 * Check decrypted mark in case skb_orphan() cleared socket.
2485 */ 2486 */
2486static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, 2487static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2487 struct net_device *dev) 2488 struct net_device *dev)
@@ -2489,8 +2490,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2489#ifdef CONFIG_SOCK_VALIDATE_XMIT 2490#ifdef CONFIG_SOCK_VALIDATE_XMIT
2490 struct sock *sk = skb->sk; 2491 struct sock *sk = skb->sk;
2491 2492
2492 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) 2493 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2493 skb = sk->sk_validate_xmit_skb(sk, dev, skb); 2494 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2495#ifdef CONFIG_TLS_DEVICE
2496 } else if (unlikely(skb->decrypted)) {
2497 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2498 kfree_skb(skb);
2499 skb = NULL;
2500#endif
2501 }
2494#endif 2502#endif
2495 2503
2496 return skb; 2504 return skb;
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index b0fc6b26bdf5..83df1ec6664e 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -105,8 +105,7 @@ struct rdma_restrack_entry {
105}; 105};
106 106
107int rdma_restrack_count(struct ib_device *dev, 107int rdma_restrack_count(struct ib_device *dev,
108 enum rdma_restrack_type type, 108 enum rdma_restrack_type type);
109 struct pid_namespace *ns);
110 109
111void rdma_restrack_kadd(struct rdma_restrack_entry *res); 110void rdma_restrack_kadd(struct rdma_restrack_entry *res);
112void rdma_restrack_uadd(struct rdma_restrack_entry *res); 111void rdma_restrack_uadd(struct rdma_restrack_entry *res);
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index 50f49e043668..d1a93c73f006 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -46,7 +46,9 @@ struct mcip_cmd {
46#define CMD_IDU_ENABLE 0x71 46#define CMD_IDU_ENABLE 0x71
47#define CMD_IDU_DISABLE 0x72 47#define CMD_IDU_DISABLE 0x72
48#define CMD_IDU_SET_MODE 0x74 48#define CMD_IDU_SET_MODE 0x74
49#define CMD_IDU_READ_MODE 0x75
49#define CMD_IDU_SET_DEST 0x76 50#define CMD_IDU_SET_DEST 0x76
51#define CMD_IDU_ACK_CIRQ 0x79
50#define CMD_IDU_SET_MASK 0x7C 52#define CMD_IDU_SET_MASK 0x7C
51 53
52#define IDU_M_TRIG_LEVEL 0x0 54#define IDU_M_TRIG_LEVEL 0x0
@@ -119,4 +121,13 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
119 __mcip_cmd(cmd, param); 121 __mcip_cmd(cmd, param);
120} 122}
121 123
124/*
125 * Read MCIP register
126 */
127static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param)
128{
129 __mcip_cmd(cmd, param);
130 return read_aux_reg(ARC_REG_MCIP_READBACK);
131}
132
122#endif 133#endif
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index 79b74ced9d91..5a34b87d89e3 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -20,11 +20,6 @@ struct mtk_smi_larb_iommu {
20 unsigned int mmu; 20 unsigned int mmu;
21}; 21};
22 22
23struct mtk_smi_iommu {
24 unsigned int larb_nr;
25 struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
26};
27
28/* 23/*
29 * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter. 24 * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter.
30 * It also initialize some basic setting(like iommu). 25 * It also initialize some basic setting(like iommu).
diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h
new file mode 100644
index 000000000000..54e61d456cdf
--- /dev/null
+++ b/include/trace/events/intel_iommu.h
@@ -0,0 +1,106 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Intel IOMMU trace support
4 *
5 * Copyright (C) 2019 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9#ifdef CONFIG_INTEL_IOMMU
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM intel_iommu
12
13#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
14#define _TRACE_INTEL_IOMMU_H
15
16#include <linux/tracepoint.h>
17#include <linux/intel-iommu.h>
18
19DECLARE_EVENT_CLASS(dma_map,
20 TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
21 size_t size),
22
23 TP_ARGS(dev, dev_addr, phys_addr, size),
24
25 TP_STRUCT__entry(
26 __string(dev_name, dev_name(dev))
27 __field(dma_addr_t, dev_addr)
28 __field(phys_addr_t, phys_addr)
29 __field(size_t, size)
30 ),
31
32 TP_fast_assign(
33 __assign_str(dev_name, dev_name(dev));
34 __entry->dev_addr = dev_addr;
35 __entry->phys_addr = phys_addr;
36 __entry->size = size;
37 ),
38
39 TP_printk("dev=%s dev_addr=0x%llx phys_addr=0x%llx size=%zu",
40 __get_str(dev_name),
41 (unsigned long long)__entry->dev_addr,
42 (unsigned long long)__entry->phys_addr,
43 __entry->size)
44);
45
46DEFINE_EVENT(dma_map, map_single,
47 TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
48 size_t size),
49 TP_ARGS(dev, dev_addr, phys_addr, size)
50);
51
52DEFINE_EVENT(dma_map, map_sg,
53 TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
54 size_t size),
55 TP_ARGS(dev, dev_addr, phys_addr, size)
56);
57
58DEFINE_EVENT(dma_map, bounce_map_single,
59 TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
60 size_t size),
61 TP_ARGS(dev, dev_addr, phys_addr, size)
62);
63
64DECLARE_EVENT_CLASS(dma_unmap,
65 TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
66
67 TP_ARGS(dev, dev_addr, size),
68
69 TP_STRUCT__entry(
70 __string(dev_name, dev_name(dev))
71 __field(dma_addr_t, dev_addr)
72 __field(size_t, size)
73 ),
74
75 TP_fast_assign(
76 __assign_str(dev_name, dev_name(dev));
77 __entry->dev_addr = dev_addr;
78 __entry->size = size;
79 ),
80
81 TP_printk("dev=%s dev_addr=0x%llx size=%zu",
82 __get_str(dev_name),
83 (unsigned long long)__entry->dev_addr,
84 __entry->size)
85);
86
87DEFINE_EVENT(dma_unmap, unmap_single,
88 TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
89 TP_ARGS(dev, dev_addr, size)
90);
91
92DEFINE_EVENT(dma_unmap, unmap_sg,
93 TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
94 TP_ARGS(dev, dev_addr, size)
95);
96
97DEFINE_EVENT(dma_unmap, bounce_unmap_single,
98 TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
99 TP_ARGS(dev, dev_addr, size)
100);
101
102#endif /* _TRACE_INTEL_IOMMU_H */
103
104/* This part must be outside protection */
105#include <trace/define_trace.h>
106#endif /* CONFIG_INTEL_IOMMU */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index cc1d060cbf13..a13a62db3565 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -23,20 +23,17 @@
23#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY 23#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
24 24
25enum rxrpc_skb_trace { 25enum rxrpc_skb_trace {
26 rxrpc_skb_rx_cleaned, 26 rxrpc_skb_cleaned,
27 rxrpc_skb_rx_freed, 27 rxrpc_skb_freed,
28 rxrpc_skb_rx_got, 28 rxrpc_skb_got,
29 rxrpc_skb_rx_lost, 29 rxrpc_skb_lost,
30 rxrpc_skb_rx_purged, 30 rxrpc_skb_new,
31 rxrpc_skb_rx_received, 31 rxrpc_skb_purged,
32 rxrpc_skb_rx_rotated, 32 rxrpc_skb_received,
33 rxrpc_skb_rx_seen, 33 rxrpc_skb_rotated,
34 rxrpc_skb_tx_cleaned, 34 rxrpc_skb_seen,
35 rxrpc_skb_tx_freed, 35 rxrpc_skb_unshared,
36 rxrpc_skb_tx_got, 36 rxrpc_skb_unshared_nomem,
37 rxrpc_skb_tx_new,
38 rxrpc_skb_tx_rotated,
39 rxrpc_skb_tx_seen,
40}; 37};
41 38
42enum rxrpc_local_trace { 39enum rxrpc_local_trace {
@@ -228,20 +225,17 @@ enum rxrpc_tx_point {
228 * Declare tracing information enums and their string mappings for display. 225 * Declare tracing information enums and their string mappings for display.
229 */ 226 */
230#define rxrpc_skb_traces \ 227#define rxrpc_skb_traces \
231 EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ 228 EM(rxrpc_skb_cleaned, "CLN") \
232 EM(rxrpc_skb_rx_freed, "Rx FRE") \ 229 EM(rxrpc_skb_freed, "FRE") \
233 EM(rxrpc_skb_rx_got, "Rx GOT") \ 230 EM(rxrpc_skb_got, "GOT") \
234 EM(rxrpc_skb_rx_lost, "Rx *L*") \ 231 EM(rxrpc_skb_lost, "*L*") \
235 EM(rxrpc_skb_rx_purged, "Rx PUR") \ 232 EM(rxrpc_skb_new, "NEW") \
236 EM(rxrpc_skb_rx_received, "Rx RCV") \ 233 EM(rxrpc_skb_purged, "PUR") \
237 EM(rxrpc_skb_rx_rotated, "Rx ROT") \ 234 EM(rxrpc_skb_received, "RCV") \
238 EM(rxrpc_skb_rx_seen, "Rx SEE") \ 235 EM(rxrpc_skb_rotated, "ROT") \
239 EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ 236 EM(rxrpc_skb_seen, "SEE") \
240 EM(rxrpc_skb_tx_freed, "Tx FRE") \ 237 EM(rxrpc_skb_unshared, "UNS") \
241 EM(rxrpc_skb_tx_got, "Tx GOT") \ 238 E_(rxrpc_skb_unshared_nomem, "US0")
242 EM(rxrpc_skb_tx_new, "Tx NEW") \
243 EM(rxrpc_skb_tx_rotated, "Tx ROT") \
244 E_(rxrpc_skb_tx_seen, "Tx SEE")
245 239
246#define rxrpc_local_traces \ 240#define rxrpc_local_traces \
247 EM(rxrpc_local_got, "GOT") \ 241 EM(rxrpc_local_got, "GOT") \
@@ -498,10 +492,10 @@ rxrpc_tx_points;
498#define E_(a, b) { a, b } 492#define E_(a, b) { a, b }
499 493
500TRACE_EVENT(rxrpc_local, 494TRACE_EVENT(rxrpc_local,
501 TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, 495 TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
502 int usage, const void *where), 496 int usage, const void *where),
503 497
504 TP_ARGS(local, op, usage, where), 498 TP_ARGS(local_debug_id, op, usage, where),
505 499
506 TP_STRUCT__entry( 500 TP_STRUCT__entry(
507 __field(unsigned int, local ) 501 __field(unsigned int, local )
@@ -511,7 +505,7 @@ TRACE_EVENT(rxrpc_local,
511 ), 505 ),
512 506
513 TP_fast_assign( 507 TP_fast_assign(
514 __entry->local = local->debug_id; 508 __entry->local = local_debug_id;
515 __entry->op = op; 509 __entry->op = op;
516 __entry->usage = usage; 510 __entry->usage = usage;
517 __entry->where = where; 511 __entry->where = where;
@@ -643,13 +637,14 @@ TRACE_EVENT(rxrpc_call,
643 637
644TRACE_EVENT(rxrpc_skb, 638TRACE_EVENT(rxrpc_skb,
645 TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op, 639 TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
646 int usage, int mod_count, const void *where), 640 int usage, int mod_count, u8 flags, const void *where),
647 641
648 TP_ARGS(skb, op, usage, mod_count, where), 642 TP_ARGS(skb, op, usage, mod_count, flags, where),
649 643
650 TP_STRUCT__entry( 644 TP_STRUCT__entry(
651 __field(struct sk_buff *, skb ) 645 __field(struct sk_buff *, skb )
652 __field(enum rxrpc_skb_trace, op ) 646 __field(enum rxrpc_skb_trace, op )
647 __field(u8, flags )
653 __field(int, usage ) 648 __field(int, usage )
654 __field(int, mod_count ) 649 __field(int, mod_count )
655 __field(const void *, where ) 650 __field(const void *, where )
@@ -657,14 +652,16 @@ TRACE_EVENT(rxrpc_skb,
657 652
658 TP_fast_assign( 653 TP_fast_assign(
659 __entry->skb = skb; 654 __entry->skb = skb;
655 __entry->flags = flags;
660 __entry->op = op; 656 __entry->op = op;
661 __entry->usage = usage; 657 __entry->usage = usage;
662 __entry->mod_count = mod_count; 658 __entry->mod_count = mod_count;
663 __entry->where = where; 659 __entry->where = where;
664 ), 660 ),
665 661
666 TP_printk("s=%p %s u=%d m=%d p=%pSR", 662 TP_printk("s=%p %cx %s u=%d m=%d p=%pSR",
667 __entry->skb, 663 __entry->skb,
664 __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R',
668 __print_symbolic(__entry->op, rxrpc_skb_traces), 665 __print_symbolic(__entry->op, rxrpc_skb_traces),
669 __entry->usage, 666 __entry->usage,
670 __entry->mod_count, 667 __entry->mod_count,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index fa1c753dcdbc..a5aa7d3ac6a1 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1466,8 +1466,8 @@ union bpf_attr {
1466 * If no cookie has been set yet, generate a new cookie. Once 1466 * If no cookie has been set yet, generate a new cookie. Once
1467 * generated, the socket cookie remains stable for the life of the 1467 * generated, the socket cookie remains stable for the life of the
1468 * socket. This helper can be useful for monitoring per socket 1468 * socket. This helper can be useful for monitoring per socket
1469 * networking traffic statistics as it provides a unique socket 1469 * networking traffic statistics as it provides a global socket
1470 * identifier per namespace. 1470 * identifier that can be assumed unique.
1471 * Return 1471 * Return
1472 * A 8-byte long non-decreasing number on success, or 0 if the 1472 * A 8-byte long non-decreasing number on success, or 0 if the
1473 * socket field is missing inside *skb*. 1473 * socket field is missing inside *skb*.
diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h
index a18b719f49d4..784ba0b9690a 100644
--- a/include/uapi/linux/jffs2.h
+++ b/include/uapi/linux/jffs2.h
@@ -77,11 +77,6 @@
77 77
78#define JFFS2_ACL_VERSION 0x0001 78#define JFFS2_ACL_VERSION 0x0001
79 79
80// Maybe later...
81//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
82//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
83
84
85#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at 80#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
86 mount time, don't wait for it to 81 mount time, don't wait for it to
87 happen later */ 82 happen later */
diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
index 5c8a4d760ee3..b5123ab8d54a 100644
--- a/include/uapi/linux/netfilter/xt_nfacct.h
+++ b/include/uapi/linux/netfilter/xt_nfacct.h
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
11 struct nf_acct *nfacct; 11 struct nf_acct *nfacct;
12}; 12};
13 13
14struct xt_nfacct_match_info_v1 {
15 char name[NFACCT_NAME_MAX];
16 struct nf_acct *nfacct __attribute__((aligned(8)));
17};
18
14#endif /* _XT_NFACCT_MATCH_H */ 19#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index fd6b5f66e2c5..cba368e55863 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -250,6 +250,7 @@ struct rds_info_rdma_connection {
250 __u32 rdma_mr_max; 250 __u32 rdma_mr_max;
251 __u32 rdma_mr_size; 251 __u32 rdma_mr_size;
252 __u8 tos; 252 __u8 tos;
253 __u8 sl;
253 __u32 cache_allocs; 254 __u32 cache_allocs;
254}; 255};
255 256
@@ -265,6 +266,7 @@ struct rds6_info_rdma_connection {
265 __u32 rdma_mr_max; 266 __u32 rdma_mr_max;
266 __u32 rdma_mr_size; 267 __u32 rdma_mr_size;
267 __u8 tos; 268 __u8 tos;
269 __u8 sl;
268 __u32 cache_allocs; 270 __u32 cache_allocs;
269}; 271};
270 272
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 8191a7db2777..66088a9e9b9e 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -890,7 +890,8 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog,
890 890
891static int bpf_jit_blind_insn(const struct bpf_insn *from, 891static int bpf_jit_blind_insn(const struct bpf_insn *from,
892 const struct bpf_insn *aux, 892 const struct bpf_insn *aux,
893 struct bpf_insn *to_buff) 893 struct bpf_insn *to_buff,
894 bool emit_zext)
894{ 895{
895 struct bpf_insn *to = to_buff; 896 struct bpf_insn *to = to_buff;
896 u32 imm_rnd = get_random_int(); 897 u32 imm_rnd = get_random_int();
@@ -1005,6 +1006,8 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
1005 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1006 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1006 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1007 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1007 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1008 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1009 if (emit_zext)
1010 *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1008 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1011 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX);
1009 break; 1012 break;
1010 1013
@@ -1088,7 +1091,8 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1088 insn[1].code == 0) 1091 insn[1].code == 0)
1089 memcpy(aux, insn, sizeof(aux)); 1092 memcpy(aux, insn, sizeof(aux));
1090 1093
1091 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 1094 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1095 clone->aux->verifier_zext);
1092 if (!rewritten) 1096 if (!rewritten)
1093 continue; 1097 continue;
1094 1098
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5d141f16f6fa..272071e9112f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1707 if (err) 1707 if (err)
1708 goto free_used_maps; 1708 goto free_used_maps;
1709 1709
1710 err = bpf_prog_new_fd(prog); 1710 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
1711 if (err < 0) { 1711 * effectively publicly exposed. However, retrieving via
1712 /* failed to allocate fd. 1712 * bpf_prog_get_fd_by_id() will take another reference,
1713 * bpf_prog_put() is needed because the above 1713 * therefore it cannot be gone underneath us.
1714 * bpf_prog_alloc_id() has published the prog 1714 *
1715 * to the userspace and the userspace may 1715 * Only for the time /after/ successful bpf_prog_new_fd()
1716 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. 1716 * and before returning to userspace, we might just hold
1717 */ 1717 * one reference and any parallel close on that fd could
1718 bpf_prog_put(prog); 1718 * rip everything out. Hence, below notifications must
1719 return err; 1719 * happen before bpf_prog_new_fd().
1720 } 1720 *
1721 1721 * Also, any failure handling from this point onwards must
1722 * be using bpf_prog_put() given the program is exposed.
1723 */
1722 bpf_prog_kallsyms_add(prog); 1724 bpf_prog_kallsyms_add(prog);
1723 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 1725 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
1726
1727 err = bpf_prog_new_fd(prog);
1728 if (err < 0)
1729 bpf_prog_put(prog);
1724 return err; 1730 return err;
1725 1731
1726free_used_maps: 1732free_used_maps:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c84d83f86141..b5c14c9d7b98 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
985 reg->smax_value = S64_MAX; 985 reg->smax_value = S64_MAX;
986 reg->umin_value = 0; 986 reg->umin_value = 0;
987 reg->umax_value = U64_MAX; 987 reg->umax_value = U64_MAX;
988
989 /* constant backtracking is enabled for root only for now */
990 reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
991} 988}
992 989
993/* Mark a register as having a completely unknown (scalar) value. */ 990/* Mark a register as having a completely unknown (scalar) value. */
@@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
1014 __mark_reg_not_init(regs + regno); 1011 __mark_reg_not_init(regs + regno);
1015 return; 1012 return;
1016 } 1013 }
1017 __mark_reg_unknown(regs + regno); 1014 regs += regno;
1015 __mark_reg_unknown(regs);
1016 /* constant backtracking is enabled for root without bpf2bpf calls */
1017 regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1018 true : false;
1018} 1019}
1019 1020
1020static void __mark_reg_not_init(struct bpf_reg_state *reg) 1021static void __mark_reg_not_init(struct bpf_reg_state *reg)
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 2bd410f934b3..69cfb4345388 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
230 */ 230 */
231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) 231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
232{ 232{
233 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 233 size_t count = size >> PAGE_SHIFT;
234 size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
235 size_t align = get_order(PAGE_ALIGN(size));
236 struct page *page = NULL; 234 struct page *page = NULL;
237 struct cma *cma = NULL; 235 struct cma *cma = NULL;
238 236
@@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
243 241
244 /* CMA can be used only in the context which permits sleeping */ 242 /* CMA can be used only in the context which permits sleeping */
245 if (cma && gfpflags_allow_blocking(gfp)) { 243 if (cma && gfpflags_allow_blocking(gfp)) {
244 size_t align = get_order(size);
246 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); 245 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
247 246
248 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); 247 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
249 } 248 }
250 249
251 /* Fallback allocation of normal pages */
252 if (!page)
253 page = alloc_pages_node(node, gfp, align);
254 return page; 250 return page;
255} 251}
256 252
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 795c9b095d75..8402b29c280f 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -85,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
85struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 85struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
87{ 87{
88 size_t alloc_size = PAGE_ALIGN(size);
89 int node = dev_to_node(dev);
88 struct page *page = NULL; 90 struct page *page = NULL;
89 u64 phys_mask; 91 u64 phys_mask;
90 92
@@ -95,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
95 gfp &= ~__GFP_ZERO; 97 gfp &= ~__GFP_ZERO;
96 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 98 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
97 &phys_mask); 99 &phys_mask);
100 page = dma_alloc_contiguous(dev, alloc_size, gfp);
101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
102 dma_free_contiguous(dev, page, alloc_size);
103 page = NULL;
104 }
98again: 105again:
99 page = dma_alloc_contiguous(dev, size, gfp); 106 if (!page)
107 page = alloc_pages_node(node, gfp, get_order(alloc_size));
100 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 108 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
101 dma_free_contiguous(dev, page, size); 109 dma_free_contiguous(dev, page, size);
102 page = NULL; 110 page = NULL;
@@ -297,7 +305,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
297 dma_direct_sync_single_for_cpu(dev, addr, size, dir); 305 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
298 306
299 if (unlikely(is_swiotlb_buffer(phys))) 307 if (unlikely(is_swiotlb_buffer(phys)))
300 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); 308 swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
301} 309}
302EXPORT_SYMBOL(dma_direct_unmap_page); 310EXPORT_SYMBOL(dma_direct_unmap_page);
303 311
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 9de232229063..796a44f8ef5a 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -444,7 +444,9 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
444 444
445phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 445phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
446 dma_addr_t tbl_dma_addr, 446 dma_addr_t tbl_dma_addr,
447 phys_addr_t orig_addr, size_t size, 447 phys_addr_t orig_addr,
448 size_t mapping_size,
449 size_t alloc_size,
448 enum dma_data_direction dir, 450 enum dma_data_direction dir,
449 unsigned long attrs) 451 unsigned long attrs)
450{ 452{
@@ -464,6 +466,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
464 pr_warn_once("%s is active and system is using DMA bounce buffers\n", 466 pr_warn_once("%s is active and system is using DMA bounce buffers\n",
465 sme_active() ? "SME" : "SEV"); 467 sme_active() ? "SME" : "SEV");
466 468
469 if (mapping_size > alloc_size) {
470 dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
471 mapping_size, alloc_size);
472 return (phys_addr_t)DMA_MAPPING_ERROR;
473 }
474
467 mask = dma_get_seg_boundary(hwdev); 475 mask = dma_get_seg_boundary(hwdev);
468 476
469 tbl_dma_addr &= mask; 477 tbl_dma_addr &= mask;
@@ -471,8 +479,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
471 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 479 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
472 480
473 /* 481 /*
474 * Carefully handle integer overflow which can occur when mask == ~0UL. 482 * Carefully handle integer overflow which can occur when mask == ~0UL.
475 */ 483 */
476 max_slots = mask + 1 484 max_slots = mask + 1
477 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT 485 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
478 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 486 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
@@ -481,8 +489,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
481 * For mappings greater than or equal to a page, we limit the stride 489 * For mappings greater than or equal to a page, we limit the stride
482 * (and hence alignment) to a page size. 490 * (and hence alignment) to a page size.
483 */ 491 */
484 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 492 nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
485 if (size >= PAGE_SIZE) 493 if (alloc_size >= PAGE_SIZE)
486 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); 494 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
487 else 495 else
488 stride = 1; 496 stride = 1;
@@ -547,7 +555,7 @@ not_found:
547 spin_unlock_irqrestore(&io_tlb_lock, flags); 555 spin_unlock_irqrestore(&io_tlb_lock, flags);
548 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) 556 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
549 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", 557 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
550 size, io_tlb_nslabs, tmp_io_tlb_used); 558 alloc_size, io_tlb_nslabs, tmp_io_tlb_used);
551 return (phys_addr_t)DMA_MAPPING_ERROR; 559 return (phys_addr_t)DMA_MAPPING_ERROR;
552found: 560found:
553 io_tlb_used += nslots; 561 io_tlb_used += nslots;
@@ -562,7 +570,7 @@ found:
562 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); 570 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
563 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 571 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
564 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 572 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
565 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); 573 swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
566 574
567 return tlb_addr; 575 return tlb_addr;
568} 576}
@@ -571,11 +579,11 @@ found:
571 * tlb_addr is the physical address of the bounce buffer to unmap. 579 * tlb_addr is the physical address of the bounce buffer to unmap.
572 */ 580 */
573void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, 581void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
574 size_t size, enum dma_data_direction dir, 582 size_t mapping_size, size_t alloc_size,
575 unsigned long attrs) 583 enum dma_data_direction dir, unsigned long attrs)
576{ 584{
577 unsigned long flags; 585 unsigned long flags;
578 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 586 int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
579 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 587 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
580 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 588 phys_addr_t orig_addr = io_tlb_orig_addr[index];
581 589
@@ -585,7 +593,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
585 if (orig_addr != INVALID_PHYS_ADDR && 593 if (orig_addr != INVALID_PHYS_ADDR &&
586 !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 594 !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
587 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 595 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
588 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); 596 swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);
589 597
590 /* 598 /*
591 * Return the buffer to the free list by setting the corresponding 599 * Return the buffer to the free list by setting the corresponding
@@ -665,14 +673,14 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
665 673
666 /* Oh well, have to allocate and map a bounce buffer. */ 674 /* Oh well, have to allocate and map a bounce buffer. */
667 *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), 675 *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
668 *phys, size, dir, attrs); 676 *phys, size, size, dir, attrs);
669 if (*phys == (phys_addr_t)DMA_MAPPING_ERROR) 677 if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
670 return false; 678 return false;
671 679
672 /* Ensure that the address returned is DMA'ble */ 680 /* Ensure that the address returned is DMA'ble */
673 *dma_addr = __phys_to_dma(dev, *phys); 681 *dma_addr = __phys_to_dma(dev, *phys);
674 if (unlikely(!dma_capable(dev, *dma_addr, size))) { 682 if (unlikely(!dma_capable(dev, *dma_addr, size))) {
675 swiotlb_tbl_unmap_single(dev, *phys, size, dir, 683 swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
676 attrs | DMA_ATTR_SKIP_CPU_SYNC); 684 attrs | DMA_ATTR_SKIP_CPU_SYNC);
677 return false; 685 return false;
678 } 686 }
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9484e88dabc2..9be995fc3c5a 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -295,6 +295,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
295 } 295 }
296} 296}
297 297
298static void irq_sysfs_del(struct irq_desc *desc)
299{
300 /*
301 * If irq_sysfs_init() has not yet been invoked (early boot), then
302 * irq_kobj_base is NULL and the descriptor was never added.
303 * kobject_del() complains about a object with no parent, so make
304 * it conditional.
305 */
306 if (irq_kobj_base)
307 kobject_del(&desc->kobj);
308}
309
298static int __init irq_sysfs_init(void) 310static int __init irq_sysfs_init(void)
299{ 311{
300 struct irq_desc *desc; 312 struct irq_desc *desc;
@@ -325,6 +337,7 @@ static struct kobj_type irq_kobj_type = {
325}; 337};
326 338
327static void irq_sysfs_add(int irq, struct irq_desc *desc) {} 339static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
340static void irq_sysfs_del(struct irq_desc *desc) {}
328 341
329#endif /* CONFIG_SYSFS */ 342#endif /* CONFIG_SYSFS */
330 343
@@ -438,7 +451,7 @@ static void free_desc(unsigned int irq)
438 * The sysfs entry must be serialized against a concurrent 451 * The sysfs entry must be serialized against a concurrent
439 * irq_sysfs_init() as well. 452 * irq_sysfs_init() as well.
440 */ 453 */
441 kobject_del(&desc->kobj); 454 irq_sysfs_del(desc);
442 delete_irq_desc(irq); 455 delete_irq_desc(irq);
443 456
444 /* 457 /*
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 95a260f9214b..136ce049c4ad 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
263{ 263{
264 char namebuf[KSYM_NAME_LEN]; 264 char namebuf[KSYM_NAME_LEN];
265 265
266 if (is_ksym_addr(addr)) 266 if (is_ksym_addr(addr)) {
267 return !!get_symbol_pos(addr, symbolsize, offset); 267 get_symbol_pos(addr, symbolsize, offset);
268 return 1;
269 }
268 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || 270 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
269 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 271 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
270} 272}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9873fc627d61..d9770a5393c8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
470 */ 470 */
471static void do_optimize_kprobes(void) 471static void do_optimize_kprobes(void)
472{ 472{
473 lockdep_assert_held(&text_mutex);
473 /* 474 /*
474 * The optimization/unoptimization refers online_cpus via 475 * The optimization/unoptimization refers online_cpus via
475 * stop_machine() and cpu-hotplug modifies online_cpus. 476 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void)
487 list_empty(&optimizing_list)) 488 list_empty(&optimizing_list))
488 return; 489 return;
489 490
490 mutex_lock(&text_mutex);
491 arch_optimize_kprobes(&optimizing_list); 491 arch_optimize_kprobes(&optimizing_list);
492 mutex_unlock(&text_mutex);
493} 492}
494 493
495/* 494/*
@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void)
500{ 499{
501 struct optimized_kprobe *op, *tmp; 500 struct optimized_kprobe *op, *tmp;
502 501
502 lockdep_assert_held(&text_mutex);
503 /* See comment in do_optimize_kprobes() */ 503 /* See comment in do_optimize_kprobes() */
504 lockdep_assert_cpus_held(); 504 lockdep_assert_cpus_held();
505 505
@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void)
507 if (list_empty(&unoptimizing_list)) 507 if (list_empty(&unoptimizing_list))
508 return; 508 return;
509 509
510 mutex_lock(&text_mutex);
511 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 510 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
512 /* Loop free_list for disarming */ 511 /* Loop free_list for disarming */
513 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 512 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void)
524 } else 523 } else
525 list_del_init(&op->list); 524 list_del_init(&op->list);
526 } 525 }
527 mutex_unlock(&text_mutex);
528} 526}
529 527
530/* Reclaim all kprobes on the free_list */ 528/* Reclaim all kprobes on the free_list */
@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work)
556{ 554{
557 mutex_lock(&kprobe_mutex); 555 mutex_lock(&kprobe_mutex);
558 cpus_read_lock(); 556 cpus_read_lock();
557 mutex_lock(&text_mutex);
559 /* Lock modules while optimizing kprobes */ 558 /* Lock modules while optimizing kprobes */
560 mutex_lock(&module_mutex); 559 mutex_lock(&module_mutex);
561 560
@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work)
583 do_free_cleaned_kprobes(); 582 do_free_cleaned_kprobes();
584 583
585 mutex_unlock(&module_mutex); 584 mutex_unlock(&module_mutex);
585 mutex_unlock(&text_mutex);
586 cpus_read_unlock(); 586 cpus_read_unlock();
587 mutex_unlock(&kprobe_mutex); 587 mutex_unlock(&kprobe_mutex);
588 588
diff --git a/kernel/module.c b/kernel/module.c
index 5933395af9a0..9ee93421269c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -65,9 +65,9 @@
65/* 65/*
66 * Modules' sections will be aligned on page boundaries 66 * Modules' sections will be aligned on page boundaries
67 * to ensure complete separation of code and data, but 67 * to ensure complete separation of code and data, but
68 * only when CONFIG_STRICT_MODULE_RWX=y 68 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
69 */ 69 */
70#ifdef CONFIG_STRICT_MODULE_RWX 70#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
71# define debug_align(X) ALIGN(X, PAGE_SIZE) 71# define debug_align(X) ALIGN(X, PAGE_SIZE)
72#else 72#else
73# define debug_align(X) (X) 73# define debug_align(X) (X)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b037f195473..df9f1fe5689b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3904,7 +3904,7 @@ void __noreturn do_task_dead(void)
3904 3904
3905static inline void sched_submit_work(struct task_struct *tsk) 3905static inline void sched_submit_work(struct task_struct *tsk)
3906{ 3906{
3907 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3907 if (!tsk->state)
3908 return; 3908 return;
3909 3909
3910 /* 3910 /*
@@ -3920,6 +3920,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
3920 preempt_enable_no_resched(); 3920 preempt_enable_no_resched();
3921 } 3921 }
3922 3922
3923 if (tsk_is_pi_blocked(tsk))
3924 return;
3925
3923 /* 3926 /*
3924 * If we are going to sleep and we have plugged IO queued, 3927 * If we are going to sleep and we have plugged IO queued,
3925 * make sure to submit it to avoid deadlocks. 3928 * make sure to submit it to avoid deadlocks.
@@ -5102,37 +5105,40 @@ out_unlock:
5102 return retval; 5105 return retval;
5103} 5106}
5104 5107
5105static int sched_read_attr(struct sched_attr __user *uattr, 5108/*
5106 struct sched_attr *attr, 5109 * Copy the kernel size attribute structure (which might be larger
5107 unsigned int usize) 5110 * than what user-space knows about) to user-space.
5111 *
5112 * Note that all cases are valid: user-space buffer can be larger or
5113 * smaller than the kernel-space buffer. The usual case is that both
5114 * have the same size.
5115 */
5116static int
5117sched_attr_copy_to_user(struct sched_attr __user *uattr,
5118 struct sched_attr *kattr,
5119 unsigned int usize)
5108{ 5120{
5109 int ret; 5121 unsigned int ksize = sizeof(*kattr);
5110 5122
5111 if (!access_ok(uattr, usize)) 5123 if (!access_ok(uattr, usize))
5112 return -EFAULT; 5124 return -EFAULT;
5113 5125
5114 /* 5126 /*
5115 * If we're handed a smaller struct than we know of, 5127 * sched_getattr() ABI forwards and backwards compatibility:
5116 * ensure all the unknown bits are 0 - i.e. old 5128 *
5117 * user-space does not get uncomplete information. 5129 * If usize == ksize then we just copy everything to user-space and all is good.
5130 *
5131 * If usize < ksize then we only copy as much as user-space has space for,
5132 * this keeps ABI compatibility as well. We skip the rest.
5133 *
5134 * If usize > ksize then user-space is using a newer version of the ABI,
5135 * which part the kernel doesn't know about. Just ignore it - tooling can
5136 * detect the kernel's knowledge of attributes from the attr->size value
5137 * which is set to ksize in this case.
5118 */ 5138 */
5119 if (usize < sizeof(*attr)) { 5139 kattr->size = min(usize, ksize);
5120 unsigned char *addr;
5121 unsigned char *end;
5122
5123 addr = (void *)attr + usize;
5124 end = (void *)attr + sizeof(*attr);
5125 5140
5126 for (; addr < end; addr++) { 5141 if (copy_to_user(uattr, kattr, kattr->size))
5127 if (*addr)
5128 return -EFBIG;
5129 }
5130
5131 attr->size = usize;
5132 }
5133
5134 ret = copy_to_user(uattr, attr, attr->size);
5135 if (ret)
5136 return -EFAULT; 5142 return -EFAULT;
5137 5143
5138 return 0; 5144 return 0;
@@ -5142,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr,
5142 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 5148 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
5143 * @pid: the pid in question. 5149 * @pid: the pid in question.
5144 * @uattr: structure containing the extended parameters. 5150 * @uattr: structure containing the extended parameters.
5145 * @size: sizeof(attr) for fwd/bwd comp. 5151 * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
5146 * @flags: for future extension. 5152 * @flags: for future extension.
5147 */ 5153 */
5148SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 5154SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
5149 unsigned int, size, unsigned int, flags) 5155 unsigned int, usize, unsigned int, flags)
5150{ 5156{
5151 struct sched_attr attr = { 5157 struct sched_attr kattr = { };
5152 .size = sizeof(struct sched_attr),
5153 };
5154 struct task_struct *p; 5158 struct task_struct *p;
5155 int retval; 5159 int retval;
5156 5160
5157 if (!uattr || pid < 0 || size > PAGE_SIZE || 5161 if (!uattr || pid < 0 || usize > PAGE_SIZE ||
5158 size < SCHED_ATTR_SIZE_VER0 || flags) 5162 usize < SCHED_ATTR_SIZE_VER0 || flags)
5159 return -EINVAL; 5163 return -EINVAL;
5160 5164
5161 rcu_read_lock(); 5165 rcu_read_lock();
@@ -5168,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
5168 if (retval) 5172 if (retval)
5169 goto out_unlock; 5173 goto out_unlock;
5170 5174
5171 attr.sched_policy = p->policy; 5175 kattr.sched_policy = p->policy;
5172 if (p->sched_reset_on_fork) 5176 if (p->sched_reset_on_fork)
5173 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5177 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
5174 if (task_has_dl_policy(p)) 5178 if (task_has_dl_policy(p))
5175 __getparam_dl(p, &attr); 5179 __getparam_dl(p, &kattr);
5176 else if (task_has_rt_policy(p)) 5180 else if (task_has_rt_policy(p))
5177 attr.sched_priority = p->rt_priority; 5181 kattr.sched_priority = p->rt_priority;
5178 else 5182 else
5179 attr.sched_nice = task_nice(p); 5183 kattr.sched_nice = task_nice(p);
5180 5184
5181#ifdef CONFIG_UCLAMP_TASK 5185#ifdef CONFIG_UCLAMP_TASK
5182 attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 5186 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
5183 attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 5187 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
5184#endif 5188#endif
5185 5189
5186 rcu_read_unlock(); 5190 rcu_read_unlock();
5187 5191
5188 retval = sched_read_attr(uattr, &attr, size); 5192 return sched_attr_copy_to_user(uattr, &kattr, usize);
5189 return retval;
5190 5193
5191out_unlock: 5194out_unlock:
5192 rcu_read_unlock(); 5195 rcu_read_unlock();
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bc9cfeaac8bd..500f5db0de0b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4470 if (likely(cfs_rq->runtime_remaining > 0)) 4470 if (likely(cfs_rq->runtime_remaining > 0))
4471 return; 4471 return;
4472 4472
4473 if (cfs_rq->throttled)
4474 return;
4473 /* 4475 /*
4474 * if we're unable to extend our runtime we resched so that the active 4476 * if we're unable to extend our runtime we resched so that the active
4475 * hierarchy can be throttled 4477 * hierarchy can be throttled
@@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4673 if (!cfs_rq_throttled(cfs_rq)) 4675 if (!cfs_rq_throttled(cfs_rq))
4674 goto next; 4676 goto next;
4675 4677
4678 /* By the above check, this should never be true */
4679 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
4680
4676 runtime = -cfs_rq->runtime_remaining + 1; 4681 runtime = -cfs_rq->runtime_remaining + 1;
4677 if (runtime > remaining) 4682 if (runtime > remaining)
4678 runtime = remaining; 4683 runtime = remaining;
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 23fbbcc414d5..6e52b67b420e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref)
1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1132 */ 1132 */
1133 if (kworker_to_destroy) { 1133 if (kworker_to_destroy) {
1134 /*
1135 * After the RCU grace period has expired, the worker
1136 * can no longer be found through group->poll_kworker.
1137 * But it might have been already scheduled before
1138 * that - deschedule it cleanly before destroying it.
1139 */
1134 kthread_cancel_delayed_work_sync(&group->poll_work); 1140 kthread_cancel_delayed_work_sync(&group->poll_work);
1141 atomic_set(&group->poll_scheduled, 0);
1142
1135 kthread_destroy_worker(kworker_to_destroy); 1143 kthread_destroy_worker(kworker_to_destroy);
1136 } 1144 }
1137 kfree(t); 1145 kfree(t);
diff --git a/kernel/signal.c b/kernel/signal.c
index e667be6907d7..534fec266a33 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -90,6 +90,11 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true; 91 return true;
92 92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
93 return sig_handler_ignored(handler, sig); 98 return sig_handler_ignored(handler, sig);
94} 99}
95 100
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d911c8470149..ca69290bee2a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -146,6 +146,11 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
147{ 147{
148 tk->offs_boot = ktime_add(tk->offs_boot, delta); 148 tk->offs_boot = ktime_add(tk->offs_boot, delta);
149 /*
150 * Timespec representation for VDSO update to avoid 64bit division
151 * on every update.
152 */
153 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
149} 154}
150 155
151/* 156/*
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 8cf3596a4ce6..4bc37ac3bb05 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -17,7 +17,7 @@ static inline void update_vdso_data(struct vdso_data *vdata,
17 struct timekeeper *tk) 17 struct timekeeper *tk)
18{ 18{
19 struct vdso_timestamp *vdso_ts; 19 struct vdso_timestamp *vdso_ts;
20 u64 nsec; 20 u64 nsec, sec;
21 21
22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; 22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last;
23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; 23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask;
@@ -45,23 +45,27 @@ static inline void update_vdso_data(struct vdso_data *vdata,
45 } 45 }
46 vdso_ts->nsec = nsec; 46 vdso_ts->nsec = nsec;
47 47
48 /* CLOCK_MONOTONIC_RAW */ 48 /* Copy MONOTONIC time for BOOTTIME */
49 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; 49 sec = vdso_ts->sec;
50 vdso_ts->sec = tk->raw_sec; 50 /* Add the boot offset */
51 vdso_ts->nsec = tk->tkr_raw.xtime_nsec; 51 sec += tk->monotonic_to_boot.tv_sec;
52 nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
52 53
53 /* CLOCK_BOOTTIME */ 54 /* CLOCK_BOOTTIME */
54 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; 55 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
55 vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 56 vdso_ts->sec = sec;
56 nsec = tk->tkr_mono.xtime_nsec; 57
57 nsec += ((u64)(tk->wall_to_monotonic.tv_nsec +
58 ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift);
59 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { 58 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
60 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); 59 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
61 vdso_ts->sec++; 60 vdso_ts->sec++;
62 } 61 }
63 vdso_ts->nsec = nsec; 62 vdso_ts->nsec = nsec;
64 63
64 /* CLOCK_MONOTONIC_RAW */
65 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
66 vdso_ts->sec = tk->raw_sec;
67 vdso_ts->nsec = tk->tkr_raw.xtime_nsec;
68
65 /* CLOCK_TAI */ 69 /* CLOCK_TAI */
66 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; 70 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
67 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; 71 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca34503f178..f9821a3374e9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3095,6 +3095,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
3095 hnd = &iter->probe_entry->hlist; 3095 hnd = &iter->probe_entry->hlist;
3096 3096
3097 hash = iter->probe->ops.func_hash->filter_hash; 3097 hash = iter->probe->ops.func_hash->filter_hash;
3098
3099 /*
3100 * A probe being registered may temporarily have an empty hash
3101 * and it's at the end of the func_probes list.
3102 */
3103 if (!hash || hash == EMPTY_HASH)
3104 return NULL;
3105
3098 size = 1 << hash->size_bits; 3106 size = 1 << hash->size_bits;
3099 3107
3100 retry: 3108 retry:
@@ -4320,12 +4328,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
4320 4328
4321 mutex_unlock(&ftrace_lock); 4329 mutex_unlock(&ftrace_lock);
4322 4330
4331 /*
4332 * Note, there's a small window here that the func_hash->filter_hash
4333 * may be NULL or empty. Need to be carefule when reading the loop.
4334 */
4323 mutex_lock(&probe->ops.func_hash->regex_lock); 4335 mutex_lock(&probe->ops.func_hash->regex_lock);
4324 4336
4325 orig_hash = &probe->ops.func_hash->filter_hash; 4337 orig_hash = &probe->ops.func_hash->filter_hash;
4326 old_hash = *orig_hash; 4338 old_hash = *orig_hash;
4327 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4339 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4328 4340
4341 if (!hash) {
4342 ret = -ENOMEM;
4343 goto out;
4344 }
4345
4329 ret = ftrace_match_records(hash, glob, strlen(glob)); 4346 ret = ftrace_match_records(hash, glob, strlen(glob));
4330 4347
4331 /* Nothing found? */ 4348 /* Nothing found? */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 525a97fbbc60..563e80f9006a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1567,9 +1567,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1567 1567
1568/** 1568/**
1569 * update_max_tr_single - only copy one trace over, and reset the rest 1569 * update_max_tr_single - only copy one trace over, and reset the rest
1570 * @tr - tracer 1570 * @tr: tracer
1571 * @tsk - task with the latency 1571 * @tsk: task with the latency
1572 * @cpu - the cpu of the buffer to copy. 1572 * @cpu: the cpu of the buffer to copy.
1573 * 1573 *
1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1575 */ 1575 */
@@ -1767,7 +1767,7 @@ static void __init apply_trace_boot_options(void);
1767 1767
1768/** 1768/**
1769 * register_tracer - register a tracer with the ftrace system. 1769 * register_tracer - register a tracer with the ftrace system.
1770 * @type - the plugin for the tracer 1770 * @type: the plugin for the tracer
1771 * 1771 *
1772 * Register a new plugin tracer. 1772 * Register a new plugin tracer.
1773 */ 1773 */
@@ -2230,9 +2230,9 @@ static bool tracing_record_taskinfo_skip(int flags)
2230/** 2230/**
2231 * tracing_record_taskinfo - record the task info of a task 2231 * tracing_record_taskinfo - record the task info of a task
2232 * 2232 *
2233 * @task - task to record 2233 * @task: task to record
2234 * @flags - TRACE_RECORD_CMDLINE for recording comm 2234 * @flags: TRACE_RECORD_CMDLINE for recording comm
2235 * - TRACE_RECORD_TGID for recording tgid 2235 * TRACE_RECORD_TGID for recording tgid
2236 */ 2236 */
2237void tracing_record_taskinfo(struct task_struct *task, int flags) 2237void tracing_record_taskinfo(struct task_struct *task, int flags)
2238{ 2238{
@@ -2258,10 +2258,10 @@ void tracing_record_taskinfo(struct task_struct *task, int flags)
2258/** 2258/**
2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch 2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2260 * 2260 *
2261 * @prev - previous task during sched_switch 2261 * @prev: previous task during sched_switch
2262 * @next - next task during sched_switch 2262 * @next: next task during sched_switch
2263 * @flags - TRACE_RECORD_CMDLINE for recording comm 2263 * @flags: TRACE_RECORD_CMDLINE for recording comm
2264 * TRACE_RECORD_TGID for recording tgid 2264 * TRACE_RECORD_TGID for recording tgid
2265 */ 2265 */
2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev, 2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2267 struct task_struct *next, int flags) 2267 struct task_struct *next, int flags)
@@ -3072,7 +3072,9 @@ static void trace_printk_start_stop_comm(int enabled)
3072 3072
3073/** 3073/**
3074 * trace_vbprintk - write binary msg to tracing buffer 3074 * trace_vbprintk - write binary msg to tracing buffer
3075 * 3075 * @ip: The address of the caller
3076 * @fmt: The string format to write to the buffer
3077 * @args: Arguments for @fmt
3076 */ 3078 */
3077int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3079int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3078{ 3080{
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c7506bc81b75..648930823b57 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -787,7 +787,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
787 return ret; 787 return ret;
788} 788}
789 789
790static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 790int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
791{ 791{
792 char *event = NULL, *sub = NULL, *match; 792 char *event = NULL, *sub = NULL, *match;
793 int ret; 793 int ret;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index dbef0d135075..fb6bfbc5bf86 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -895,7 +895,8 @@ void trace_probe_cleanup(struct trace_probe *tp)
895 for (i = 0; i < tp->nr_args; i++) 895 for (i = 0; i < tp->nr_args; i++)
896 traceprobe_free_probe_arg(&tp->args[i]); 896 traceprobe_free_probe_arg(&tp->args[i]);
897 897
898 kfree(call->class->system); 898 if (call->class)
899 kfree(call->class->system);
899 kfree(call->name); 900 kfree(call->name);
900 kfree(call->print_fmt); 901 kfree(call->print_fmt);
901} 902}
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 117ad0e7fbf4..70dab9ac7827 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -68,7 +68,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
68{ 68{
69 size /= esize; 69 size /= esize;
70 70
71 size = roundup_pow_of_two(size); 71 if (!is_power_of_2(size))
72 size = rounddown_pow_of_two(size);
72 73
73 fifo->in = 0; 74 fifo->in = 0;
74 fifo->out = 0; 75 fifo->out = 0;
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index feea48fd1a0d..905027574e5d 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
35 struct logic_pio_hwaddr *range; 35 struct logic_pio_hwaddr *range;
36 resource_size_t start; 36 resource_size_t start;
37 resource_size_t end; 37 resource_size_t end;
38 resource_size_t mmio_sz = 0; 38 resource_size_t mmio_end = 0;
39 resource_size_t iio_sz = MMIO_UPPER_LIMIT; 39 resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 int ret = 0; 40 int ret = 0;
41 41
@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
46 end = new_range->hw_start + new_range->size; 46 end = new_range->hw_start + new_range->size;
47 47
48 mutex_lock(&io_range_mutex); 48 mutex_lock(&io_range_mutex);
49 list_for_each_entry_rcu(range, &io_range_list, list) { 49 list_for_each_entry(range, &io_range_list, list) {
50 if (range->fwnode == new_range->fwnode) { 50 if (range->fwnode == new_range->fwnode) {
51 /* range already there */ 51 /* range already there */
52 goto end_register; 52 goto end_register;
@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
56 /* for MMIO ranges we need to check for overlap */ 56 /* for MMIO ranges we need to check for overlap */
57 if (start >= range->hw_start + range->size || 57 if (start >= range->hw_start + range->size ||
58 end < range->hw_start) { 58 end < range->hw_start) {
59 mmio_sz += range->size; 59 mmio_end = range->io_start + range->size;
60 } else { 60 } else {
61 ret = -EFAULT; 61 ret = -EFAULT;
62 goto end_register; 62 goto end_register;
@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
69 69
70 /* range not registered yet, check for available space */ 70 /* range not registered yet, check for available space */
71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) { 71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
72 if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) { 72 if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
73 /* if it's too big check if 64K space can be reserved */ 73 /* if it's too big check if 64K space can be reserved */
74 if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) { 74 if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
75 ret = -E2BIG; 75 ret = -E2BIG;
76 goto end_register; 76 goto end_register;
77 } 77 }
78 new_range->size = SZ_64K; 78 new_range->size = SZ_64K;
79 pr_warn("Requested IO range too big, new size set to 64K\n"); 79 pr_warn("Requested IO range too big, new size set to 64K\n");
80 } 80 }
81 new_range->io_start = mmio_sz; 81 new_range->io_start = mmio_end;
82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) { 82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) { 83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
84 ret = -E2BIG; 84 ret = -E2BIG;
@@ -99,6 +99,20 @@ end_register:
99} 99}
100 100
101/** 101/**
102 * logic_pio_unregister_range - unregister a logical PIO range for a host
103 * @range: pointer to the IO range which has been already registered.
104 *
105 * Unregister a previously-registered IO range node.
106 */
107void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
108{
109 mutex_lock(&io_range_mutex);
110 list_del_rcu(&range->list);
111 mutex_unlock(&io_range_mutex);
112 synchronize_rcu();
113}
114
115/**
102 * find_io_range_by_fwnode - find logical PIO range for given FW node 116 * find_io_range_by_fwnode - find logical PIO range for given FW node
103 * @fwnode: FW node handle associated with logical PIO range 117 * @fwnode: FW node handle associated with logical PIO range
104 * 118 *
@@ -108,26 +122,38 @@ end_register:
108 */ 122 */
109struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode) 123struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
110{ 124{
111 struct logic_pio_hwaddr *range; 125 struct logic_pio_hwaddr *range, *found_range = NULL;
112 126
127 rcu_read_lock();
113 list_for_each_entry_rcu(range, &io_range_list, list) { 128 list_for_each_entry_rcu(range, &io_range_list, list) {
114 if (range->fwnode == fwnode) 129 if (range->fwnode == fwnode) {
115 return range; 130 found_range = range;
131 break;
132 }
116 } 133 }
117 return NULL; 134 rcu_read_unlock();
135
136 return found_range;
118} 137}
119 138
120/* Return a registered range given an input PIO token */ 139/* Return a registered range given an input PIO token */
121static struct logic_pio_hwaddr *find_io_range(unsigned long pio) 140static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
122{ 141{
123 struct logic_pio_hwaddr *range; 142 struct logic_pio_hwaddr *range, *found_range = NULL;
124 143
144 rcu_read_lock();
125 list_for_each_entry_rcu(range, &io_range_list, list) { 145 list_for_each_entry_rcu(range, &io_range_list, list) {
126 if (in_range(pio, range->io_start, range->size)) 146 if (in_range(pio, range->io_start, range->size)) {
127 return range; 147 found_range = range;
148 break;
149 }
128 } 150 }
129 pr_err("PIO entry token %lx invalid\n", pio); 151 rcu_read_unlock();
130 return NULL; 152
153 if (!found_range)
154 pr_err("PIO entry token 0x%lx invalid\n", pio);
155
156 return found_range;
131} 157}
132 158
133/** 159/**
@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
180{ 206{
181 struct logic_pio_hwaddr *range; 207 struct logic_pio_hwaddr *range;
182 208
209 rcu_read_lock();
183 list_for_each_entry_rcu(range, &io_range_list, list) { 210 list_for_each_entry_rcu(range, &io_range_list, list) {
184 if (range->flags != LOGIC_PIO_CPU_MMIO) 211 if (range->flags != LOGIC_PIO_CPU_MMIO)
185 continue; 212 continue;
186 if (in_range(addr, range->hw_start, range->size)) 213 if (in_range(addr, range->hw_start, range->size)) {
187 return addr - range->hw_start + range->io_start; 214 unsigned long cpuaddr;
215
216 cpuaddr = addr - range->hw_start + range->io_start;
217
218 rcu_read_unlock();
219 return cpuaddr;
220 }
188 } 221 }
189 pr_err("addr %llx not registered in io_range_list\n", 222 rcu_read_unlock();
190 (unsigned long long) addr); 223
224 pr_err("addr %pa not registered in io_range_list\n", &addr);
225
191 return ~0UL; 226 return ~0UL;
192} 227}
193 228
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 798275a51887..26de020aae7b 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
124struct page *balloon_page_alloc(void) 124struct page *balloon_page_alloc(void)
125{ 125{
126 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 126 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
127 __GFP_NOMEMALLOC | __GFP_NORETRY); 127 __GFP_NOMEMALLOC | __GFP_NORETRY |
128 __GFP_NOWARN);
128 return page; 129 return page;
129} 130}
130EXPORT_SYMBOL_GPL(balloon_page_alloc); 131EXPORT_SYMBOL_GPL(balloon_page_alloc);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 738065f765ab..de1f15969e27 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -32,6 +32,7 @@
32#include <linux/shmem_fs.h> 32#include <linux/shmem_fs.h>
33#include <linux/oom.h> 33#include <linux/oom.h>
34#include <linux/numa.h> 34#include <linux/numa.h>
35#include <linux/page_owner.h>
35 36
36#include <asm/tlb.h> 37#include <asm/tlb.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
@@ -2516,6 +2517,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2516 } 2517 }
2517 2518
2518 ClearPageCompound(head); 2519 ClearPageCompound(head);
2520
2521 split_page_owner(head, HPAGE_PMD_ORDER);
2522
2519 /* See comment in __split_huge_page_tail() */ 2523 /* See comment in __split_huge_page_tail() */
2520 if (PageAnon(head)) { 2524 if (PageAnon(head)) {
2521 /* Additional pin to swap cache */ 2525 /* Additional pin to swap cache */
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 2277b82902d8..95d16a42db6b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -407,8 +407,14 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
407 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 407 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
408 return shadow_byte < 0 || 408 return shadow_byte < 0 ||
409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE; 409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
410 else 410
411 return tag != (u8)shadow_byte; 411 /* else CONFIG_KASAN_SW_TAGS: */
412 if ((u8)shadow_byte == KASAN_TAG_INVALID)
413 return true;
414 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
415 return true;
416
417 return false;
412} 418}
413 419
414static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 420static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6f5c0c517c49..9ec5e12486a7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -752,15 +752,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
752 /* Update memcg */ 752 /* Update memcg */
753 __mod_memcg_state(memcg, idx, val); 753 __mod_memcg_state(memcg, idx, val);
754 754
755 /* Update lruvec */
756 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
757
755 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 758 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
756 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 759 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
757 struct mem_cgroup_per_node *pi; 760 struct mem_cgroup_per_node *pi;
758 761
759 /*
760 * Batch local counters to keep them in sync with
761 * the hierarchical ones.
762 */
763 __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
764 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 762 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
765 atomic_long_add(x, &pi->lruvec_stat[idx]); 763 atomic_long_add(x, &pi->lruvec_stat[idx]);
766 x = 0; 764 x = 0;
@@ -3260,6 +3258,72 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3260 } 3258 }
3261} 3259}
3262 3260
3261static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
3262{
3263 unsigned long stat[MEMCG_NR_STAT];
3264 struct mem_cgroup *mi;
3265 int node, cpu, i;
3266 int min_idx, max_idx;
3267
3268 if (slab_only) {
3269 min_idx = NR_SLAB_RECLAIMABLE;
3270 max_idx = NR_SLAB_UNRECLAIMABLE;
3271 } else {
3272 min_idx = 0;
3273 max_idx = MEMCG_NR_STAT;
3274 }
3275
3276 for (i = min_idx; i < max_idx; i++)
3277 stat[i] = 0;
3278
3279 for_each_online_cpu(cpu)
3280 for (i = min_idx; i < max_idx; i++)
3281 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3282
3283 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3284 for (i = min_idx; i < max_idx; i++)
3285 atomic_long_add(stat[i], &mi->vmstats[i]);
3286
3287 if (!slab_only)
3288 max_idx = NR_VM_NODE_STAT_ITEMS;
3289
3290 for_each_node(node) {
3291 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3292 struct mem_cgroup_per_node *pi;
3293
3294 for (i = min_idx; i < max_idx; i++)
3295 stat[i] = 0;
3296
3297 for_each_online_cpu(cpu)
3298 for (i = min_idx; i < max_idx; i++)
3299 stat[i] += per_cpu(
3300 pn->lruvec_stat_cpu->count[i], cpu);
3301
3302 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3303 for (i = min_idx; i < max_idx; i++)
3304 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3305 }
3306}
3307
3308static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3309{
3310 unsigned long events[NR_VM_EVENT_ITEMS];
3311 struct mem_cgroup *mi;
3312 int cpu, i;
3313
3314 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3315 events[i] = 0;
3316
3317 for_each_online_cpu(cpu)
3318 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3319 events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3320 cpu);
3321
3322 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3323 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3324 atomic_long_add(events[i], &mi->vmevents[i]);
3325}
3326
3263#ifdef CONFIG_MEMCG_KMEM 3327#ifdef CONFIG_MEMCG_KMEM
3264static int memcg_online_kmem(struct mem_cgroup *memcg) 3328static int memcg_online_kmem(struct mem_cgroup *memcg)
3265{ 3329{
@@ -3309,7 +3373,14 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
3309 if (!parent) 3373 if (!parent)
3310 parent = root_mem_cgroup; 3374 parent = root_mem_cgroup;
3311 3375
3376 /*
3377 * Deactivate and reparent kmem_caches. Then flush percpu
3378 * slab statistics to have precise values at the parent and
3379 * all ancestor levels. It's required to keep slab stats
3380 * accurate after the reparenting of kmem_caches.
3381 */
3312 memcg_deactivate_kmem_caches(memcg, parent); 3382 memcg_deactivate_kmem_caches(memcg, parent);
3383 memcg_flush_percpu_vmstats(memcg, true);
3313 3384
3314 kmemcg_id = memcg->kmemcg_id; 3385 kmemcg_id = memcg->kmemcg_id;
3315 BUG_ON(kmemcg_id < 0); 3386 BUG_ON(kmemcg_id < 0);
@@ -4682,6 +4753,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
4682{ 4753{
4683 int node; 4754 int node;
4684 4755
4756 /*
4757 * Flush percpu vmstats and vmevents to guarantee the value correctness
4758 * on parent's and all ancestor levels.
4759 */
4760 memcg_flush_percpu_vmstats(memcg, false);
4761 memcg_flush_percpu_vmevents(memcg);
4685 for_each_node(node) 4762 for_each_node(node)
4686 free_mem_cgroup_per_node_info(memcg, node); 4763 free_mem_cgroup_per_node_info(memcg, node);
4687 free_percpu(memcg->vmstats_percpu); 4764 free_percpu(memcg->vmstats_percpu);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 272c6de1bf4e..9c9194959271 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2238,27 +2238,12 @@ static int move_freepages(struct zone *zone,
2238 unsigned int order; 2238 unsigned int order;
2239 int pages_moved = 0; 2239 int pages_moved = 0;
2240 2240
2241#ifndef CONFIG_HOLES_IN_ZONE
2242 /*
2243 * page_zone is not safe to call in this context when
2244 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
2245 * anyway as we check zone boundaries in move_freepages_block().
2246 * Remove at a later date when no bug reports exist related to
2247 * grouping pages by mobility
2248 */
2249 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
2250 pfn_valid(page_to_pfn(end_page)) &&
2251 page_zone(start_page) != page_zone(end_page));
2252#endif
2253 for (page = start_page; page <= end_page;) { 2241 for (page = start_page; page <= end_page;) {
2254 if (!pfn_valid_within(page_to_pfn(page))) { 2242 if (!pfn_valid_within(page_to_pfn(page))) {
2255 page++; 2243 page++;
2256 continue; 2244 continue;
2257 } 2245 }
2258 2246
2259 /* Make sure we are not inadvertently changing nodes */
2260 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2261
2262 if (!PageBuddy(page)) { 2247 if (!PageBuddy(page)) {
2263 /* 2248 /*
2264 * We assume that pages that could be isolated for 2249 * We assume that pages that could be isolated for
@@ -2273,6 +2258,10 @@ static int move_freepages(struct zone *zone,
2273 continue; 2258 continue;
2274 } 2259 }
2275 2260
2261 /* Make sure we are not inadvertently changing nodes */
2262 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2263 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2264
2276 order = page_order(page); 2265 order = page_order(page);
2277 move_to_free_area(page, &zone->free_area[order], migratetype); 2266 move_to_free_area(page, &zone->free_area[order], migratetype);
2278 page += 1 << order; 2267 page += 1 << order;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c77d1e3761a7..a6c5d0b28321 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3220,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3220 3220
3221#ifdef CONFIG_MEMCG 3221#ifdef CONFIG_MEMCG
3222 3222
3223/* Only used by soft limit reclaim. Do not reuse for anything else. */
3223unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 3224unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3224 gfp_t gfp_mask, bool noswap, 3225 gfp_t gfp_mask, bool noswap,
3225 pg_data_t *pgdat, 3226 pg_data_t *pgdat,
@@ -3235,7 +3236,8 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3235 }; 3236 };
3236 unsigned long lru_pages; 3237 unsigned long lru_pages;
3237 3238
3238 set_task_reclaim_state(current, &sc.reclaim_state); 3239 WARN_ON_ONCE(!current->reclaim_state);
3240
3239 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 3241 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3240 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 3242 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3241 3243
@@ -3253,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3253 3255
3254 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 3256 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3255 3257
3256 set_task_reclaim_state(current, NULL);
3257 *nr_scanned = sc.nr_scanned; 3258 *nr_scanned = sc.nr_scanned;
3258 3259
3259 return sc.nr_reclaimed; 3260 return sc.nr_reclaimed;
diff --git a/mm/z3fold.c b/mm/z3fold.c
index ed19d98c9dcd..75b7962439ff 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,6 +41,7 @@
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/wait.h>
44#include <linux/zpool.h> 45#include <linux/zpool.h>
45#include <linux/magic.h> 46#include <linux/magic.h>
46 47
@@ -145,6 +146,8 @@ struct z3fold_header {
145 * @release_wq: workqueue for safe page release 146 * @release_wq: workqueue for safe page release
146 * @work: work_struct for safe page release 147 * @work: work_struct for safe page release
147 * @inode: inode for z3fold pseudo filesystem 148 * @inode: inode for z3fold pseudo filesystem
149 * @destroying: bool to stop migration once we start destruction
150 * @isolated: int to count the number of pages currently in isolation
148 * 151 *
149 * This structure is allocated at pool creation time and maintains metadata 152 * This structure is allocated at pool creation time and maintains metadata
150 * pertaining to a particular z3fold pool. 153 * pertaining to a particular z3fold pool.
@@ -163,8 +166,11 @@ struct z3fold_pool {
163 const struct zpool_ops *zpool_ops; 166 const struct zpool_ops *zpool_ops;
164 struct workqueue_struct *compact_wq; 167 struct workqueue_struct *compact_wq;
165 struct workqueue_struct *release_wq; 168 struct workqueue_struct *release_wq;
169 struct wait_queue_head isolate_wait;
166 struct work_struct work; 170 struct work_struct work;
167 struct inode *inode; 171 struct inode *inode;
172 bool destroying;
173 int isolated;
168}; 174};
169 175
170/* 176/*
@@ -769,6 +775,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
769 goto out_c; 775 goto out_c;
770 spin_lock_init(&pool->lock); 776 spin_lock_init(&pool->lock);
771 spin_lock_init(&pool->stale_lock); 777 spin_lock_init(&pool->stale_lock);
778 init_waitqueue_head(&pool->isolate_wait);
772 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); 779 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
773 if (!pool->unbuddied) 780 if (!pool->unbuddied)
774 goto out_pool; 781 goto out_pool;
@@ -808,6 +815,15 @@ out:
808 return NULL; 815 return NULL;
809} 816}
810 817
818static bool pool_isolated_are_drained(struct z3fold_pool *pool)
819{
820 bool ret;
821
822 spin_lock(&pool->lock);
823 ret = pool->isolated == 0;
824 spin_unlock(&pool->lock);
825 return ret;
826}
811/** 827/**
812 * z3fold_destroy_pool() - destroys an existing z3fold pool 828 * z3fold_destroy_pool() - destroys an existing z3fold pool
813 * @pool: the z3fold pool to be destroyed 829 * @pool: the z3fold pool to be destroyed
@@ -817,6 +833,22 @@ out:
817static void z3fold_destroy_pool(struct z3fold_pool *pool) 833static void z3fold_destroy_pool(struct z3fold_pool *pool)
818{ 834{
819 kmem_cache_destroy(pool->c_handle); 835 kmem_cache_destroy(pool->c_handle);
836 /*
837 * We set pool-> destroying under lock to ensure that
838 * z3fold_page_isolate() sees any changes to destroying. This way we
839 * avoid the need for any memory barriers.
840 */
841
842 spin_lock(&pool->lock);
843 pool->destroying = true;
844 spin_unlock(&pool->lock);
845
846 /*
847 * We need to ensure that no pages are being migrated while we destroy
848 * these workqueues, as migration can queue work on either of the
849 * workqueues.
850 */
851 wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
820 852
821 /* 853 /*
822 * We need to destroy pool->compact_wq before pool->release_wq, 854 * We need to destroy pool->compact_wq before pool->release_wq,
@@ -1307,6 +1339,28 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1307 return atomic64_read(&pool->pages_nr); 1339 return atomic64_read(&pool->pages_nr);
1308} 1340}
1309 1341
1342/*
1343 * z3fold_dec_isolated() expects to be called while pool->lock is held.
1344 */
1345static void z3fold_dec_isolated(struct z3fold_pool *pool)
1346{
1347 assert_spin_locked(&pool->lock);
1348 VM_BUG_ON(pool->isolated <= 0);
1349 pool->isolated--;
1350
1351 /*
1352 * If we have no more isolated pages, we have to see if
1353 * z3fold_destroy_pool() is waiting for a signal.
1354 */
1355 if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
1356 wake_up_all(&pool->isolate_wait);
1357}
1358
1359static void z3fold_inc_isolated(struct z3fold_pool *pool)
1360{
1361 pool->isolated++;
1362}
1363
1310static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) 1364static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1311{ 1365{
1312 struct z3fold_header *zhdr; 1366 struct z3fold_header *zhdr;
@@ -1333,6 +1387,34 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1333 spin_lock(&pool->lock); 1387 spin_lock(&pool->lock);
1334 if (!list_empty(&page->lru)) 1388 if (!list_empty(&page->lru))
1335 list_del(&page->lru); 1389 list_del(&page->lru);
1390 /*
1391 * We need to check for destruction while holding pool->lock, as
1392 * otherwise destruction could see 0 isolated pages, and
1393 * proceed.
1394 */
1395 if (unlikely(pool->destroying)) {
1396 spin_unlock(&pool->lock);
1397 /*
1398 * If this page isn't stale, somebody else holds a
1399 * reference to it. Let't drop our refcount so that they
1400 * can call the release logic.
1401 */
1402 if (unlikely(kref_put(&zhdr->refcount,
1403 release_z3fold_page_locked))) {
1404 /*
1405 * If we get here we have kref problems, so we
1406 * should freak out.
1407 */
1408 WARN(1, "Z3fold is experiencing kref problems\n");
1409 z3fold_page_unlock(zhdr);
1410 return false;
1411 }
1412 z3fold_page_unlock(zhdr);
1413 return false;
1414 }
1415
1416
1417 z3fold_inc_isolated(pool);
1336 spin_unlock(&pool->lock); 1418 spin_unlock(&pool->lock);
1337 z3fold_page_unlock(zhdr); 1419 z3fold_page_unlock(zhdr);
1338 return true; 1420 return true;
@@ -1401,6 +1483,10 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
1401 1483
1402 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); 1484 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1403 1485
1486 spin_lock(&pool->lock);
1487 z3fold_dec_isolated(pool);
1488 spin_unlock(&pool->lock);
1489
1404 page_mapcount_reset(page); 1490 page_mapcount_reset(page);
1405 put_page(page); 1491 put_page(page);
1406 return 0; 1492 return 0;
@@ -1420,10 +1506,14 @@ static void z3fold_page_putback(struct page *page)
1420 INIT_LIST_HEAD(&page->lru); 1506 INIT_LIST_HEAD(&page->lru);
1421 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 1507 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1422 atomic64_dec(&pool->pages_nr); 1508 atomic64_dec(&pool->pages_nr);
1509 spin_lock(&pool->lock);
1510 z3fold_dec_isolated(pool);
1511 spin_unlock(&pool->lock);
1423 return; 1512 return;
1424 } 1513 }
1425 spin_lock(&pool->lock); 1514 spin_lock(&pool->lock);
1426 list_add(&page->lru, &pool->lru); 1515 list_add(&page->lru, &pool->lru);
1516 z3fold_dec_isolated(pool);
1427 spin_unlock(&pool->lock); 1517 spin_unlock(&pool->lock);
1428 z3fold_page_unlock(zhdr); 1518 z3fold_page_unlock(zhdr);
1429} 1519}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 57fbb7ced69f..e98bb6ab4f7e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -54,6 +54,7 @@
54#include <linux/mount.h> 54#include <linux/mount.h>
55#include <linux/pseudo_fs.h> 55#include <linux/pseudo_fs.h>
56#include <linux/migrate.h> 56#include <linux/migrate.h>
57#include <linux/wait.h>
57#include <linux/pagemap.h> 58#include <linux/pagemap.h>
58#include <linux/fs.h> 59#include <linux/fs.h>
59 60
@@ -268,6 +269,10 @@ struct zs_pool {
268#ifdef CONFIG_COMPACTION 269#ifdef CONFIG_COMPACTION
269 struct inode *inode; 270 struct inode *inode;
270 struct work_struct free_work; 271 struct work_struct free_work;
272 /* A wait queue for when migration races with async_free_zspage() */
273 struct wait_queue_head migration_wait;
274 atomic_long_t isolated_pages;
275 bool destroying;
271#endif 276#endif
272}; 277};
273 278
@@ -1862,6 +1867,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
1862 zspage->isolated--; 1867 zspage->isolated--;
1863} 1868}
1864 1869
1870static void putback_zspage_deferred(struct zs_pool *pool,
1871 struct size_class *class,
1872 struct zspage *zspage)
1873{
1874 enum fullness_group fg;
1875
1876 fg = putback_zspage(class, zspage);
1877 if (fg == ZS_EMPTY)
1878 schedule_work(&pool->free_work);
1879
1880}
1881
1882static inline void zs_pool_dec_isolated(struct zs_pool *pool)
1883{
1884 VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
1885 atomic_long_dec(&pool->isolated_pages);
1886 /*
1887 * There's no possibility of racing, since wait_for_isolated_drain()
1888 * checks the isolated count under &class->lock after enqueuing
1889 * on migration_wait.
1890 */
1891 if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
1892 wake_up_all(&pool->migration_wait);
1893}
1894
1865static void replace_sub_page(struct size_class *class, struct zspage *zspage, 1895static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1866 struct page *newpage, struct page *oldpage) 1896 struct page *newpage, struct page *oldpage)
1867{ 1897{
@@ -1931,6 +1961,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1931 */ 1961 */
1932 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { 1962 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1933 get_zspage_mapping(zspage, &class_idx, &fullness); 1963 get_zspage_mapping(zspage, &class_idx, &fullness);
1964 atomic_long_inc(&pool->isolated_pages);
1934 remove_zspage(class, zspage, fullness); 1965 remove_zspage(class, zspage, fullness);
1935 } 1966 }
1936 1967
@@ -2030,8 +2061,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2030 * Page migration is done so let's putback isolated zspage to 2061 * Page migration is done so let's putback isolated zspage to
2031 * the list if @page is final isolated subpage in the zspage. 2062 * the list if @page is final isolated subpage in the zspage.
2032 */ 2063 */
2033 if (!is_zspage_isolated(zspage)) 2064 if (!is_zspage_isolated(zspage)) {
2034 putback_zspage(class, zspage); 2065 /*
2066 * We cannot race with zs_destroy_pool() here because we wait
2067 * for isolation to hit zero before we start destroying.
2068 * Also, we ensure that everyone can see pool->destroying before
2069 * we start waiting.
2070 */
2071 putback_zspage_deferred(pool, class, zspage);
2072 zs_pool_dec_isolated(pool);
2073 }
2035 2074
2036 reset_page(page); 2075 reset_page(page);
2037 put_page(page); 2076 put_page(page);
@@ -2077,13 +2116,12 @@ static void zs_page_putback(struct page *page)
2077 spin_lock(&class->lock); 2116 spin_lock(&class->lock);
2078 dec_zspage_isolation(zspage); 2117 dec_zspage_isolation(zspage);
2079 if (!is_zspage_isolated(zspage)) { 2118 if (!is_zspage_isolated(zspage)) {
2080 fg = putback_zspage(class, zspage);
2081 /* 2119 /*
2082 * Due to page_lock, we cannot free zspage immediately 2120 * Due to page_lock, we cannot free zspage immediately
2083 * so let's defer. 2121 * so let's defer.
2084 */ 2122 */
2085 if (fg == ZS_EMPTY) 2123 putback_zspage_deferred(pool, class, zspage);
2086 schedule_work(&pool->free_work); 2124 zs_pool_dec_isolated(pool);
2087 } 2125 }
2088 spin_unlock(&class->lock); 2126 spin_unlock(&class->lock);
2089} 2127}
@@ -2107,8 +2145,36 @@ static int zs_register_migration(struct zs_pool *pool)
2107 return 0; 2145 return 0;
2108} 2146}
2109 2147
2148static bool pool_isolated_are_drained(struct zs_pool *pool)
2149{
2150 return atomic_long_read(&pool->isolated_pages) == 0;
2151}
2152
2153/* Function for resolving migration */
2154static void wait_for_isolated_drain(struct zs_pool *pool)
2155{
2156
2157 /*
2158 * We're in the process of destroying the pool, so there are no
2159 * active allocations. zs_page_isolate() fails for completely free
2160 * zspages, so we need only wait for the zs_pool's isolated
2161 * count to hit zero.
2162 */
2163 wait_event(pool->migration_wait,
2164 pool_isolated_are_drained(pool));
2165}
2166
2110static void zs_unregister_migration(struct zs_pool *pool) 2167static void zs_unregister_migration(struct zs_pool *pool)
2111{ 2168{
2169 pool->destroying = true;
2170 /*
2171 * We need a memory barrier here to ensure global visibility of
2172 * pool->destroying. Thus pool->isolated pages will either be 0 in which
2173 * case we don't care, or it will be > 0 and pool->destroying will
2174 * ensure that we wake up once isolation hits 0.
2175 */
2176 smp_mb();
2177 wait_for_isolated_drain(pool); /* This can block */
2112 flush_work(&pool->free_work); 2178 flush_work(&pool->free_work);
2113 iput(pool->inode); 2179 iput(pool->inode);
2114} 2180}
@@ -2346,6 +2412,10 @@ struct zs_pool *zs_create_pool(const char *name)
2346 if (!pool->name) 2412 if (!pool->name)
2347 goto err; 2413 goto err;
2348 2414
2415#ifdef CONFIG_COMPACTION
2416 init_waitqueue_head(&pool->migration_wait);
2417#endif
2418
2349 if (create_cache(pool)) 2419 if (create_cache(pool))
2350 goto err; 2420 goto err;
2351 2421
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 240ed70912d6..d78938e3e008 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -277,17 +277,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
277 * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached 277 * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
278 * @buff_pos: current position in the skb 278 * @buff_pos: current position in the skb
279 * @packet_len: total length of the skb 279 * @packet_len: total length of the skb
280 * @tvlv_len: tvlv length of the previously considered OGM 280 * @ogm_packet: potential OGM in buffer
281 * 281 *
282 * Return: true if there is enough space for another OGM, false otherwise. 282 * Return: true if there is enough space for another OGM, false otherwise.
283 */ 283 */
284static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, 284static bool
285 __be16 tvlv_len) 285batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
286 const struct batadv_ogm_packet *ogm_packet)
286{ 287{
287 int next_buff_pos = 0; 288 int next_buff_pos = 0;
288 289
289 next_buff_pos += buff_pos + BATADV_OGM_HLEN; 290 /* check if there is enough space for the header */
290 next_buff_pos += ntohs(tvlv_len); 291 next_buff_pos += buff_pos + sizeof(*ogm_packet);
292 if (next_buff_pos > packet_len)
293 return false;
294
295 /* check if there is enough space for the optional TVLV */
296 next_buff_pos += ntohs(ogm_packet->tvlv_len);
291 297
292 return (next_buff_pos <= packet_len) && 298 return (next_buff_pos <= packet_len) &&
293 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); 299 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -315,7 +321,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
315 321
316 /* adjust all flags and log packets */ 322 /* adjust all flags and log packets */
317 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 323 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
318 batadv_ogm_packet->tvlv_len)) { 324 batadv_ogm_packet)) {
319 /* we might have aggregated direct link packets with an 325 /* we might have aggregated direct link packets with an
320 * ordinary base packet 326 * ordinary base packet
321 */ 327 */
@@ -1704,7 +1710,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
1704 1710
1705 /* unpack the aggregated packets and process them one by one */ 1711 /* unpack the aggregated packets and process them one by one */
1706 while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb), 1712 while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
1707 ogm_packet->tvlv_len)) { 1713 ogm_packet)) {
1708 batadv_iv_ogm_process(skb, ogm_offset, if_incoming); 1714 batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
1709 1715
1710 ogm_offset += BATADV_OGM_HLEN; 1716 ogm_offset += BATADV_OGM_HLEN;
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index fad95ef64e01..bc06e3cdfa84 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -631,17 +631,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv,
631 * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated 631 * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated
632 * @buff_pos: current position in the skb 632 * @buff_pos: current position in the skb
633 * @packet_len: total length of the skb 633 * @packet_len: total length of the skb
634 * @tvlv_len: tvlv length of the previously considered OGM 634 * @ogm2_packet: potential OGM2 in buffer
635 * 635 *
636 * Return: true if there is enough space for another OGM, false otherwise. 636 * Return: true if there is enough space for another OGM, false otherwise.
637 */ 637 */
638static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, 638static bool
639 __be16 tvlv_len) 639batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
640 const struct batadv_ogm2_packet *ogm2_packet)
640{ 641{
641 int next_buff_pos = 0; 642 int next_buff_pos = 0;
642 643
643 next_buff_pos += buff_pos + BATADV_OGM2_HLEN; 644 /* check if there is enough space for the header */
644 next_buff_pos += ntohs(tvlv_len); 645 next_buff_pos += buff_pos + sizeof(*ogm2_packet);
646 if (next_buff_pos > packet_len)
647 return false;
648
649 /* check if there is enough space for the optional TVLV */
650 next_buff_pos += ntohs(ogm2_packet->tvlv_len);
645 651
646 return (next_buff_pos <= packet_len) && 652 return (next_buff_pos <= packet_len) &&
647 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); 653 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -818,7 +824,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
818 ogm_packet = (struct batadv_ogm2_packet *)skb->data; 824 ogm_packet = (struct batadv_ogm2_packet *)skb->data;
819 825
820 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), 826 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
821 ogm_packet->tvlv_len)) { 827 ogm_packet)) {
822 batadv_v_ogm_process(skb, ogm_offset, if_incoming); 828 batadv_v_ogm_process(skb, ogm_offset, if_incoming);
823 829
824 ogm_offset += BATADV_OGM2_HLEN; 830 ogm_offset += BATADV_OGM2_HLEN;
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 67d7f83009ae..1d5bdf3a4b65 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -2303,7 +2303,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
2303 2303
2304 while (bucket_tmp < hash->size) { 2304 while (bucket_tmp < hash->size) {
2305 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2305 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
2306 *bucket, &idx_tmp)) 2306 bucket_tmp, &idx_tmp))
2307 break; 2307 break;
2308 2308
2309 bucket_tmp++; 2309 bucket_tmp++;
@@ -2420,8 +2420,10 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2420 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2420 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2421 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2421 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2422 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2422 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2423 batadv_mcast_want_rtr4_update(bat_priv, orig, BATADV_NO_FLAGS); 2423 batadv_mcast_want_rtr4_update(bat_priv, orig,
2424 batadv_mcast_want_rtr6_update(bat_priv, orig, BATADV_NO_FLAGS); 2424 BATADV_MCAST_WANT_NO_RTR4);
2425 batadv_mcast_want_rtr6_update(bat_priv, orig,
2426 BATADV_MCAST_WANT_NO_RTR6);
2425 2427
2426 spin_unlock_bh(&orig->mcast_handler_lock); 2428 spin_unlock_bh(&orig->mcast_handler_lock);
2427} 2429}
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 6f08fd122a8d..7e052d6f759b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
164{ 164{
165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype); 165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
166 166
167 return attr ? nla_get_u32(attr) : 0; 167 return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
168} 168}
169 169
170/** 170/**
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b9585e7d9d2e..04bc79359a17 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3202,6 +3202,7 @@ struct hci_dev *hci_alloc_dev(void)
3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3205 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3205 3206
3206 mutex_init(&hdev->lock); 3207 mutex_init(&hdev->lock);
3207 mutex_init(&hdev->req_lock); 3208 mutex_init(&hdev->req_lock);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index bb67f4a5479a..402e2cc54044 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -433,6 +433,35 @@ static int auto_accept_delay_set(void *data, u64 val)
433 return 0; 433 return 0;
434} 434}
435 435
436static int min_encrypt_key_size_set(void *data, u64 val)
437{
438 struct hci_dev *hdev = data;
439
440 if (val < 1 || val > 16)
441 return -EINVAL;
442
443 hci_dev_lock(hdev);
444 hdev->min_enc_key_size = val;
445 hci_dev_unlock(hdev);
446
447 return 0;
448}
449
450static int min_encrypt_key_size_get(void *data, u64 *val)
451{
452 struct hci_dev *hdev = data;
453
454 hci_dev_lock(hdev);
455 *val = hdev->min_enc_key_size;
456 hci_dev_unlock(hdev);
457
458 return 0;
459}
460
461DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops,
462 min_encrypt_key_size_get,
463 min_encrypt_key_size_set, "%llu\n");
464
436static int auto_accept_delay_get(void *data, u64 *val) 465static int auto_accept_delay_get(void *data, u64 *val)
437{ 466{
438 struct hci_dev *hdev = data; 467 struct hci_dev *hdev = data;
@@ -545,6 +574,8 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev)
545 if (lmp_ssp_capable(hdev)) { 574 if (lmp_ssp_capable(hdev)) {
546 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs, 575 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
547 hdev, &ssp_debug_mode_fops); 576 hdev, &ssp_debug_mode_fops);
577 debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs,
578 hdev, &min_encrypt_key_size_fops);
548 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, 579 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
549 hdev, &auto_accept_delay_fops); 580 hdev, &auto_accept_delay_fops);
550 } 581 }
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5abd423b55fa..8d889969ae7e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -101,6 +101,7 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
101{ 101{
102 struct sk_buff *skb; 102 struct sk_buff *skb;
103 struct sock *sk = sock->sk; 103 struct sock *sk = sock->sk;
104 int ret;
104 105
105 BT_DBG("session %p data %p size %d", session, data, size); 106 BT_DBG("session %p data %p size %d", session, data, size);
106 107
@@ -114,13 +115,17 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
114 } 115 }
115 116
116 skb_put_u8(skb, hdr); 117 skb_put_u8(skb, hdr);
117 if (data && size > 0) 118 if (data && size > 0) {
118 skb_put_data(skb, data, size); 119 skb_put_data(skb, data, size);
120 ret = size;
121 } else {
122 ret = 0;
123 }
119 124
120 skb_queue_tail(transmit, skb); 125 skb_queue_tail(transmit, skb);
121 wake_up_interruptible(sk_sleep(sk)); 126 wake_up_interruptible(sk_sleep(sk));
122 127
123 return 0; 128 return ret;
124} 129}
125 130
126static int hidp_send_ctrl_message(struct hidp_session *session, 131static int hidp_send_ctrl_message(struct hidp_session *session,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index cc506fe99b4d..dfc1edb168b7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1361,7 +1361,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1361 * actually encrypted before enforcing a key size. 1361 * actually encrypted before enforcing a key size.
1362 */ 1362 */
1363 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || 1363 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1364 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE); 1364 hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1365} 1365}
1366 1366
1367static void l2cap_do_start(struct l2cap_chan *chan) 1367static void l2cap_do_start(struct l2cap_chan *chan)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index c8177a89f52c..4096d8a74a2b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
221 return NF_DROP; 221 return NF_DROP;
222 } 222 }
223 223
224 ADD_COUNTER(*(counter_base + i), 1, skb->len); 224 ADD_COUNTER(*(counter_base + i), skb->len, 1);
225 225
226 /* these should only watch: not modify, nor tell us 226 /* these should only watch: not modify, nor tell us
227 * what to do with the packet 227 * what to do with the packet
@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
959 continue; 959 continue;
960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
961 for (i = 0; i < nentries; i++) 961 for (i = 0; i < nentries; i++)
962 ADD_COUNTER(counters[i], counter_base[i].pcnt, 962 ADD_COUNTER(counters[i], counter_base[i].bcnt,
963 counter_base[i].bcnt); 963 counter_base[i].pcnt);
964 } 964 }
965} 965}
966 966
@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
1280 1280
1281 /* we add to the counters of the first cpu */ 1281 /* we add to the counters of the first cpu */
1282 for (i = 0; i < num_counters; i++) 1282 for (i = 0; i < num_counters; i++)
1283 ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt); 1283 ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
1284 1284
1285 write_unlock_bh(&t->lock); 1285 write_unlock_bh(&t->lock);
1286 ret = 0; 1286 ret = 0;
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 1804e867f715..7c9e92b2f806 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -53,7 +53,7 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
53 goto err; 53 goto err;
54 54
55 br_vlan_get_proto(br_dev, &p_proto); 55 br_vlan_get_proto(br_dev, &p_proto);
56 nft_reg_store16(dest, p_proto); 56 nft_reg_store16(dest, htons(p_proto));
57 return; 57 return;
58 } 58 }
59 default: 59 default:
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 5d6724cee38f..4f75df40fb12 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
136 if (key) { 136 if (key) {
137 kfree(key->key); 137 kfree(key->key);
138 key->key = NULL; 138 key->key = NULL;
139 crypto_free_sync_skcipher(key->tfm); 139 if (key->tfm) {
140 key->tfm = NULL; 140 crypto_free_sync_skcipher(key->tfm);
141 key->tfm = NULL;
142 }
141 } 143 }
142} 144}
143 145
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0b2df09b2554..78ae6e8c953d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1496,7 +1496,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1496 struct ceph_osds up, acting; 1496 struct ceph_osds up, acting;
1497 bool force_resend = false; 1497 bool force_resend = false;
1498 bool unpaused = false; 1498 bool unpaused = false;
1499 bool legacy_change; 1499 bool legacy_change = false;
1500 bool split = false; 1500 bool split = false;
1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1502 bool recovery_deletes = ceph_osdmap_flag(osdc, 1502 bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1584,15 +1584,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1584 t->osd = acting.primary; 1584 t->osd = acting.primary;
1585 } 1585 }
1586 1586
1587 if (unpaused || legacy_change || force_resend || 1587 if (unpaused || legacy_change || force_resend || split)
1588 (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1589 RESEND_ON_SPLIT)))
1590 ct_res = CALC_TARGET_NEED_RESEND; 1588 ct_res = CALC_TARGET_NEED_RESEND;
1591 else 1589 else
1592 ct_res = CALC_TARGET_NO_ACTION; 1590 ct_res = CALC_TARGET_NO_ACTION;
1593 1591
1594out: 1592out:
1595 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd); 1593 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1594 legacy_change, force_resend, split, ct_res, t->osd);
1596 return ct_res; 1595 return ct_res;
1597} 1596}
1598 1597
diff --git a/net/core/filter.c b/net/core/filter.c
index 7878f918b8c0..4c6a252d4212 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8757,13 +8757,13 @@ sk_reuseport_is_valid_access(int off, int size,
8757 return size == size_default; 8757 return size == size_default;
8758 8758
8759 /* Fields that allow narrowing */ 8759 /* Fields that allow narrowing */
8760 case offsetof(struct sk_reuseport_md, eth_protocol): 8760 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
8761 if (size < FIELD_SIZEOF(struct sk_buff, protocol)) 8761 if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8762 return false; 8762 return false;
8763 /* fall through */ 8763 /* fall through */
8764 case offsetof(struct sk_reuseport_md, ip_protocol): 8764 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
8765 case offsetof(struct sk_reuseport_md, bind_inany): 8765 case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
8766 case offsetof(struct sk_reuseport_md, len): 8766 case bpf_ctx_range(struct sk_reuseport_md, len):
8767 bpf_ctx_record_field_size(info, size_default); 8767 bpf_ctx_record_field_size(info, size_default);
8768 return bpf_ctx_narrow_access_ok(off, size, size_default); 8768 return bpf_ctx_narrow_access_ok(off, size, size_default);
8769 8769
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 3e6fedb57bc1..2470b4b404e6 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
142 mutex_unlock(&flow_dissector_mutex); 142 mutex_unlock(&flow_dissector_mutex);
143 return -ENOENT; 143 return -ENOENT;
144 } 144 }
145 bpf_prog_put(attached);
146 RCU_INIT_POINTER(net->flow_dissector_prog, NULL); 145 RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
146 bpf_prog_put(attached);
147 mutex_unlock(&flow_dissector_mutex); 147 mutex_unlock(&flow_dissector_mutex);
148 return 0; 148 return 0;
149} 149}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2cf27da1baeb..849380a622ef 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -122,7 +122,7 @@ static void queue_process(struct work_struct *work)
122 txq = netdev_get_tx_queue(dev, q_index); 122 txq = netdev_get_tx_queue(dev, q_index);
123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
124 if (netif_xmit_frozen_or_stopped(txq) || 124 if (netif_xmit_frozen_or_stopped(txq) ||
125 netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { 125 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
126 skb_queue_head(&npinfo->txq, skb); 126 skb_queue_head(&npinfo->txq, skb);
127 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
128 local_irq_restore(flags); 128 local_irq_restore(flags);
@@ -335,7 +335,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
335 335
336 HARD_TX_UNLOCK(dev, txq); 336 HARD_TX_UNLOCK(dev, txq);
337 337
338 if (status == NETDEV_TX_OK) 338 if (dev_xmit_complete(status))
339 break; 339 break;
340 340
341 } 341 }
@@ -352,7 +352,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
352 352
353 } 353 }
354 354
355 if (status != NETDEV_TX_OK) { 355 if (!dev_xmit_complete(status)) {
356 skb_queue_tail(&npinfo->txq, skb); 356 skb_queue_tail(&npinfo->txq, skb);
357 schedule_delayed_work(&npinfo->tx_work,0); 357 schedule_delayed_work(&npinfo->tx_work,0);
358 } 358 }
diff --git a/net/core/sock.c b/net/core/sock.c
index d57b0cc995a0..545fac19a711 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1992,6 +1992,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1992} 1992}
1993EXPORT_SYMBOL(skb_set_owner_w); 1993EXPORT_SYMBOL(skb_set_owner_w);
1994 1994
1995static bool can_skb_orphan_partial(const struct sk_buff *skb)
1996{
1997#ifdef CONFIG_TLS_DEVICE
1998 /* Drivers depend on in-order delivery for crypto offload,
1999 * partial orphan breaks out-of-order-OK logic.
2000 */
2001 if (skb->decrypted)
2002 return false;
2003#endif
2004 return (skb->destructor == sock_wfree ||
2005 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2006}
2007
1995/* This helper is used by netem, as it can hold packets in its 2008/* This helper is used by netem, as it can hold packets in its
1996 * delay queue. We want to allow the owner socket to send more 2009 * delay queue. We want to allow the owner socket to send more
1997 * packets, as if they were already TX completed by a typical driver. 2010 * packets, as if they were already TX completed by a typical driver.
@@ -2003,11 +2016,7 @@ void skb_orphan_partial(struct sk_buff *skb)
2003 if (skb_is_tcp_pure_ack(skb)) 2016 if (skb_is_tcp_pure_ack(skb))
2004 return; 2017 return;
2005 2018
2006 if (skb->destructor == sock_wfree 2019 if (can_skb_orphan_partial(skb)) {
2007#ifdef CONFIG_INET
2008 || skb->destructor == tcp_wfree
2009#endif
2010 ) {
2011 struct sock *sk = skb->sk; 2020 struct sock *sk = skb->sk;
2012 2021
2013 if (refcount_inc_not_zero(&sk->sk_refcnt)) { 2022 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
@@ -3278,16 +3287,17 @@ static __init int net_inuse_init(void)
3278 3287
3279core_initcall(net_inuse_init); 3288core_initcall(net_inuse_init);
3280 3289
3281static void assign_proto_idx(struct proto *prot) 3290static int assign_proto_idx(struct proto *prot)
3282{ 3291{
3283 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 3292 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3284 3293
3285 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 3294 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3286 pr_err("PROTO_INUSE_NR exhausted\n"); 3295 pr_err("PROTO_INUSE_NR exhausted\n");
3287 return; 3296 return -ENOSPC;
3288 } 3297 }
3289 3298
3290 set_bit(prot->inuse_idx, proto_inuse_idx); 3299 set_bit(prot->inuse_idx, proto_inuse_idx);
3300 return 0;
3291} 3301}
3292 3302
3293static void release_proto_idx(struct proto *prot) 3303static void release_proto_idx(struct proto *prot)
@@ -3296,8 +3306,9 @@ static void release_proto_idx(struct proto *prot)
3296 clear_bit(prot->inuse_idx, proto_inuse_idx); 3306 clear_bit(prot->inuse_idx, proto_inuse_idx);
3297} 3307}
3298#else 3308#else
3299static inline void assign_proto_idx(struct proto *prot) 3309static inline int assign_proto_idx(struct proto *prot)
3300{ 3310{
3311 return 0;
3301} 3312}
3302 3313
3303static inline void release_proto_idx(struct proto *prot) 3314static inline void release_proto_idx(struct proto *prot)
@@ -3346,6 +3357,8 @@ static int req_prot_init(const struct proto *prot)
3346 3357
3347int proto_register(struct proto *prot, int alloc_slab) 3358int proto_register(struct proto *prot, int alloc_slab)
3348{ 3359{
3360 int ret = -ENOBUFS;
3361
3349 if (alloc_slab) { 3362 if (alloc_slab) {
3350 prot->slab = kmem_cache_create_usercopy(prot->name, 3363 prot->slab = kmem_cache_create_usercopy(prot->name,
3351 prot->obj_size, 0, 3364 prot->obj_size, 0,
@@ -3382,20 +3395,27 @@ int proto_register(struct proto *prot, int alloc_slab)
3382 } 3395 }
3383 3396
3384 mutex_lock(&proto_list_mutex); 3397 mutex_lock(&proto_list_mutex);
3398 ret = assign_proto_idx(prot);
3399 if (ret) {
3400 mutex_unlock(&proto_list_mutex);
3401 goto out_free_timewait_sock_slab_name;
3402 }
3385 list_add(&prot->node, &proto_list); 3403 list_add(&prot->node, &proto_list);
3386 assign_proto_idx(prot);
3387 mutex_unlock(&proto_list_mutex); 3404 mutex_unlock(&proto_list_mutex);
3388 return 0; 3405 return ret;
3389 3406
3390out_free_timewait_sock_slab_name: 3407out_free_timewait_sock_slab_name:
3391 kfree(prot->twsk_prot->twsk_slab_name); 3408 if (alloc_slab && prot->twsk_prot)
3409 kfree(prot->twsk_prot->twsk_slab_name);
3392out_free_request_sock_slab: 3410out_free_request_sock_slab:
3393 req_prot_cleanup(prot->rsk_prot); 3411 if (alloc_slab) {
3412 req_prot_cleanup(prot->rsk_prot);
3394 3413
3395 kmem_cache_destroy(prot->slab); 3414 kmem_cache_destroy(prot->slab);
3396 prot->slab = NULL; 3415 prot->slab = NULL;
3416 }
3397out: 3417out:
3398 return -ENOBUFS; 3418 return ret;
3399} 3419}
3400EXPORT_SYMBOL(proto_register); 3420EXPORT_SYMBOL(proto_register);
3401 3421
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 3312a5849a97..c13ffbd33d8d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -19,6 +19,7 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
19static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); 19static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
20static DEFINE_MUTEX(sock_diag_table_mutex); 20static DEFINE_MUTEX(sock_diag_table_mutex);
21static struct workqueue_struct *broadcast_wq; 21static struct workqueue_struct *broadcast_wq;
22static atomic64_t cookie_gen;
22 23
23u64 sock_gen_cookie(struct sock *sk) 24u64 sock_gen_cookie(struct sock *sk)
24{ 25{
@@ -27,7 +28,7 @@ u64 sock_gen_cookie(struct sock *sk)
27 28
28 if (res) 29 if (res)
29 return res; 30 return res;
30 res = atomic64_inc_return(&sock_net(sk)->cookie_gen); 31 res = atomic64_inc_return(&cookie_gen);
31 atomic64_cmpxchg(&sk->sk_cookie, 0, res); 32 atomic64_cmpxchg(&sk->sk_cookie, 0, res);
32 } 33 }
33} 34}
diff --git a/net/core/stream.c b/net/core/stream.c
index e94bb02a5629..4f1d4aa5fb38 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
120 int err = 0; 120 int err = 0;
121 long vm_wait = 0; 121 long vm_wait = 0;
122 long current_timeo = *timeo_p; 122 long current_timeo = *timeo_p;
123 bool noblock = (*timeo_p ? false : true);
124 DEFINE_WAIT_FUNC(wait, woken_wake_function); 123 DEFINE_WAIT_FUNC(wait, woken_wake_function);
125 124
126 if (sk_stream_memory_free(sk)) 125 if (sk_stream_memory_free(sk))
@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
133 132
134 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 133 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
135 goto do_error; 134 goto do_error;
136 if (!*timeo_p) { 135 if (!*timeo_p)
137 if (noblock) 136 goto do_eagain;
138 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
139 goto do_nonblock;
140 }
141 if (signal_pending(current)) 137 if (signal_pending(current))
142 goto do_interrupted; 138 goto do_interrupted;
143 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 139 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -169,7 +165,13 @@ out:
169do_error: 165do_error:
170 err = -EPIPE; 166 err = -EPIPE;
171 goto out; 167 goto out;
172do_nonblock: 168do_eagain:
169 /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
170 * be generated later.
171 * When TCP receives ACK packets that make room, tcp_check_space()
172 * only calls tcp_new_space() if SOCK_NOSPACE is set.
173 */
174 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
173 err = -EAGAIN; 175 err = -EAGAIN;
174 goto out; 176 goto out;
175do_interrupted: 177do_interrupted:
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 4ec5b7f85d51..09d9286b27cc 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
153{ 153{
154 int port; 154 int port;
155 155
156 if (!ds->ops->port_mdb_add)
157 return;
158
156 for_each_set_bit(port, bitmap, ds->num_ports) 159 for_each_set_bit(port, bitmap, ds->num_ports)
157 ds->ops->port_mdb_add(ds, port, mdb); 160 ds->ops->port_mdb_add(ds, port, mdb);
158} 161}
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 6ebbd799c4eb..67a1bc635a7b 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -28,6 +28,7 @@
28 * 28 *
29 * RSV - VID[9]: 29 * RSV - VID[9]:
30 * To be used for further expansion of SWITCH_ID or for other purposes. 30 * To be used for further expansion of SWITCH_ID or for other purposes.
31 * Must be transmitted as zero and ignored on receive.
31 * 32 *
32 * SWITCH_ID - VID[8:6]: 33 * SWITCH_ID - VID[8:6]:
33 * Index of switch within DSA tree. Must be between 0 and 34 * Index of switch within DSA tree. Must be between 0 and
@@ -35,6 +36,7 @@
35 * 36 *
36 * RSV - VID[5:4]: 37 * RSV - VID[5:4]:
37 * To be used for further expansion of PORT or for other purposes. 38 * To be used for further expansion of PORT or for other purposes.
39 * Must be transmitted as zero and ignored on receive.
38 * 40 *
39 * PORT - VID[3:0]: 41 * PORT - VID[3:0]:
40 * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1. 42 * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index e4aba5d485be..bbe9b3b2d395 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -170,7 +170,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
170 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); 170 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
171 if (!reasm_data) 171 if (!reasm_data)
172 goto out_oom; 172 goto out_oom;
173 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 173 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
174 174
175 skb->dev = ldev; 175 skb->dev = ldev;
176 skb->tstamp = fq->q.stamp; 176 skb->tstamp = fq->q.stamp;
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index dacbd58e1799..badc5cfe4dc6 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1092,7 +1092,7 @@ static struct packet_type ieee802154_packet_type = {
1092 1092
1093static int __init af_ieee802154_init(void) 1093static int __init af_ieee802154_init(void)
1094{ 1094{
1095 int rc = -EINVAL; 1095 int rc;
1096 1096
1097 rc = proto_register(&ieee802154_raw_prot, 1); 1097 rc = proto_register(&ieee802154_raw_prot, 1);
1098 if (rc) 1098 if (rc)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2b2b3d291ab0..1ab2fb6bb37d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2145,7 +2145,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
2145 2145
2146 if (filter->dump_exceptions) { 2146 if (filter->dump_exceptions) {
2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi, 2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
2148 &i_fa, s_fa); 2148 &i_fa, s_fa, flags);
2149 if (err < 0) 2149 if (err < 0)
2150 goto stop; 2150 goto stop;
2151 } 2151 }
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 1510e951f451..4298aae74e0e 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
582 582
583 if (!rt) 583 if (!rt)
584 goto out; 584 goto out;
585 net = dev_net(rt->dst.dev); 585
586 if (rt->dst.dev)
587 net = dev_net(rt->dst.dev);
588 else if (skb_in->dev)
589 net = dev_net(skb_in->dev);
590 else
591 goto out;
586 592
587 /* 593 /*
588 * Find the original header. It is expected to be valid, of course. 594 * Find the original header. It is expected to be valid, of course.
@@ -902,7 +908,7 @@ static bool icmp_redirect(struct sk_buff *skb)
902 return false; 908 return false;
903 } 909 }
904 910
905 icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway); 911 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
906 return true; 912 return true;
907} 913}
908 914
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 180f6896b98b..480d0b22db1a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1475,7 +1475,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group);
1475 1475
1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) 1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1477{ 1477{
1478 __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE); 1478 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
1479} 1479}
1480EXPORT_SYMBOL(ip_mc_inc_group); 1480EXPORT_SYMBOL(ip_mc_inc_group);
1481 1481
@@ -2197,7 +2197,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
2197 iml->sflist = NULL; 2197 iml->sflist = NULL;
2198 iml->sfmode = mode; 2198 iml->sfmode = mode;
2199 rcu_assign_pointer(inet->mc_list, iml); 2199 rcu_assign_pointer(inet->mc_list, iml);
2200 __ip_mc_inc_group(in_dev, addr, mode); 2200 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
2201 err = 0; 2201 err = 0;
2202done: 2202done:
2203 return err; 2203 return err;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index a999451345f9..10d31733297d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -475,11 +475,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
475EXPORT_SYMBOL(inet_frag_reasm_prepare); 475EXPORT_SYMBOL(inet_frag_reasm_prepare);
476 476
477void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, 477void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
478 void *reasm_data) 478 void *reasm_data, bool try_coalesce)
479{ 479{
480 struct sk_buff **nextp = (struct sk_buff **)reasm_data; 480 struct sk_buff **nextp = (struct sk_buff **)reasm_data;
481 struct rb_node *rbn; 481 struct rb_node *rbn;
482 struct sk_buff *fp; 482 struct sk_buff *fp;
483 int sum_truesize;
483 484
484 skb_push(head, head->data - skb_network_header(head)); 485 skb_push(head, head->data - skb_network_header(head));
485 486
@@ -487,25 +488,41 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
487 fp = FRAG_CB(head)->next_frag; 488 fp = FRAG_CB(head)->next_frag;
488 rbn = rb_next(&head->rbnode); 489 rbn = rb_next(&head->rbnode);
489 rb_erase(&head->rbnode, &q->rb_fragments); 490 rb_erase(&head->rbnode, &q->rb_fragments);
491
492 sum_truesize = head->truesize;
490 while (rbn || fp) { 493 while (rbn || fp) {
491 /* fp points to the next sk_buff in the current run; 494 /* fp points to the next sk_buff in the current run;
492 * rbn points to the next run. 495 * rbn points to the next run.
493 */ 496 */
494 /* Go through the current run. */ 497 /* Go through the current run. */
495 while (fp) { 498 while (fp) {
496 *nextp = fp; 499 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
497 nextp = &fp->next; 500 bool stolen;
498 fp->prev = NULL; 501 int delta;
499 memset(&fp->rbnode, 0, sizeof(fp->rbnode)); 502
500 fp->sk = NULL; 503 sum_truesize += fp->truesize;
501 head->data_len += fp->len;
502 head->len += fp->len;
503 if (head->ip_summed != fp->ip_summed) 504 if (head->ip_summed != fp->ip_summed)
504 head->ip_summed = CHECKSUM_NONE; 505 head->ip_summed = CHECKSUM_NONE;
505 else if (head->ip_summed == CHECKSUM_COMPLETE) 506 else if (head->ip_summed == CHECKSUM_COMPLETE)
506 head->csum = csum_add(head->csum, fp->csum); 507 head->csum = csum_add(head->csum, fp->csum);
507 head->truesize += fp->truesize; 508
508 fp = FRAG_CB(fp)->next_frag; 509 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
510 &delta)) {
511 kfree_skb_partial(fp, stolen);
512 } else {
513 fp->prev = NULL;
514 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
515 fp->sk = NULL;
516
517 head->data_len += fp->len;
518 head->len += fp->len;
519 head->truesize += fp->truesize;
520
521 *nextp = fp;
522 nextp = &fp->next;
523 }
524
525 fp = next_frag;
509 } 526 }
510 /* Move to the next run. */ 527 /* Move to the next run. */
511 if (rbn) { 528 if (rbn) {
@@ -516,7 +533,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
516 rbn = rbnext; 533 rbn = rbnext;
517 } 534 }
518 } 535 }
519 sub_frag_mem_limit(q->fqdir, head->truesize); 536 sub_frag_mem_limit(q->fqdir, sum_truesize);
520 537
521 *nextp = NULL; 538 *nextp = NULL;
522 skb_mark_not_on_list(head); 539 skb_mark_not_on_list(head);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 4385eb9e781f..cfeb8890f94e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -393,6 +393,11 @@ err:
393 return err; 393 return err;
394} 394}
395 395
396static bool ip_frag_coalesce_ok(const struct ipq *qp)
397{
398 return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
399}
400
396/* Build a new IP datagram from all its fragments. */ 401/* Build a new IP datagram from all its fragments. */
397static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 402static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
398 struct sk_buff *prev_tail, struct net_device *dev) 403 struct sk_buff *prev_tail, struct net_device *dev)
@@ -421,7 +426,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
421 if (len > 65535) 426 if (len > 65535)
422 goto out_oversize; 427 goto out_oversize;
423 428
424 inet_frag_reasm_finish(&qp->q, skb, reasm_data); 429 inet_frag_reasm_finish(&qp->q, skb, reasm_data,
430 ip_frag_coalesce_ok(qp));
425 431
426 skb->dev = dev; 432 skb->dev = dev;
427 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); 433 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 517300d587a7..b6a6f18c3dd1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2728,7 +2728,8 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
2728/* called with rcu_read_lock held */ 2728/* called with rcu_read_lock held */
2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4, 2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2731 struct sk_buff *skb, u32 portid, u32 seq) 2731 struct sk_buff *skb, u32 portid, u32 seq,
2732 unsigned int flags)
2732{ 2733{
2733 struct rtmsg *r; 2734 struct rtmsg *r;
2734 struct nlmsghdr *nlh; 2735 struct nlmsghdr *nlh;
@@ -2736,7 +2737,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2736 u32 error; 2737 u32 error;
2737 u32 metrics[RTAX_MAX]; 2738 u32 metrics[RTAX_MAX];
2738 2739
2739 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0); 2740 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2740 if (!nlh) 2741 if (!nlh)
2741 return -EMSGSIZE; 2742 return -EMSGSIZE;
2742 2743
@@ -2860,7 +2861,7 @@ nla_put_failure:
2860static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, 2861static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2861 struct netlink_callback *cb, u32 table_id, 2862 struct netlink_callback *cb, u32 table_id,
2862 struct fnhe_hash_bucket *bucket, int genid, 2863 struct fnhe_hash_bucket *bucket, int genid,
2863 int *fa_index, int fa_start) 2864 int *fa_index, int fa_start, unsigned int flags)
2864{ 2865{
2865 int i; 2866 int i;
2866 2867
@@ -2891,7 +2892,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2891 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, 2892 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2892 table_id, NULL, skb, 2893 table_id, NULL, skb,
2893 NETLINK_CB(cb->skb).portid, 2894 NETLINK_CB(cb->skb).portid,
2894 cb->nlh->nlmsg_seq); 2895 cb->nlh->nlmsg_seq, flags);
2895 if (err) 2896 if (err)
2896 return err; 2897 return err;
2897next: 2898next:
@@ -2904,7 +2905,7 @@ next:
2904 2905
2905int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 2906int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2906 u32 table_id, struct fib_info *fi, 2907 u32 table_id, struct fib_info *fi,
2907 int *fa_index, int fa_start) 2908 int *fa_index, int fa_start, unsigned int flags)
2908{ 2909{
2909 struct net *net = sock_net(cb->skb->sk); 2910 struct net *net = sock_net(cb->skb->sk);
2910 int nhsel, genid = fnhe_genid(net); 2911 int nhsel, genid = fnhe_genid(net);
@@ -2922,7 +2923,8 @@ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2922 err = 0; 2923 err = 0;
2923 if (bucket) 2924 if (bucket)
2924 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, 2925 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2925 genid, fa_index, fa_start); 2926 genid, fa_index, fa_start,
2927 flags);
2926 rcu_read_unlock(); 2928 rcu_read_unlock();
2927 if (err) 2929 if (err)
2928 return err; 2930 return err;
@@ -3183,7 +3185,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3183 fl4.flowi4_tos, res.fi, 0); 3185 fl4.flowi4_tos, res.fi, 0);
3184 } else { 3186 } else {
3185 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, 3187 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3186 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); 3188 NETLINK_CB(in_skb).portid,
3189 nlh->nlmsg_seq, 0);
3187 } 3190 }
3188 if (err < 0) 3191 if (err < 0)
3189 goto errout_rcu; 3192 goto errout_rcu;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 776905899ac0..61082065b26a 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -935,6 +935,22 @@ static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
935 return mss_now; 935 return mss_now;
936} 936}
937 937
938/* In some cases, both sendpage() and sendmsg() could have added
939 * an skb to the write queue, but failed adding payload on it.
940 * We need to remove it to consume less memory, but more
941 * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
942 * users.
943 */
944static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
945{
946 if (skb && !skb->len) {
947 tcp_unlink_write_queue(skb, sk);
948 if (tcp_write_queue_empty(sk))
949 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
950 sk_wmem_free_skb(sk, skb);
951 }
952}
953
938ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 954ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
939 size_t size, int flags) 955 size_t size, int flags)
940{ 956{
@@ -984,6 +1000,9 @@ new_segment:
984 if (!skb) 1000 if (!skb)
985 goto wait_for_memory; 1001 goto wait_for_memory;
986 1002
1003#ifdef CONFIG_TLS_DEVICE
1004 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
1005#endif
987 skb_entail(sk, skb); 1006 skb_entail(sk, skb);
988 copy = size_goal; 1007 copy = size_goal;
989 } 1008 }
@@ -1061,6 +1080,7 @@ out:
1061 return copied; 1080 return copied;
1062 1081
1063do_error: 1082do_error:
1083 tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
1064 if (copied) 1084 if (copied)
1065 goto out; 1085 goto out;
1066out_err: 1086out_err:
@@ -1385,18 +1405,11 @@ out_nopush:
1385 sock_zerocopy_put(uarg); 1405 sock_zerocopy_put(uarg);
1386 return copied + copied_syn; 1406 return copied + copied_syn;
1387 1407
1408do_error:
1409 skb = tcp_write_queue_tail(sk);
1388do_fault: 1410do_fault:
1389 if (!skb->len) { 1411 tcp_remove_empty_skb(sk, skb);
1390 tcp_unlink_write_queue(skb, sk);
1391 /* It is the one place in all of TCP, except connection
1392 * reset, where we can be unlinking the send_head.
1393 */
1394 if (tcp_write_queue_empty(sk))
1395 tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1396 sk_wmem_free_skb(sk, skb);
1397 }
1398 1412
1399do_error:
1400 if (copied + copied_syn) 1413 if (copied + copied_syn)
1401 goto out; 1414 goto out;
1402out_err: 1415out_err:
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 3d1e15401384..8a56e09cfb0e 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -398,10 +398,14 @@ more_data:
398static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 398static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
399{ 399{
400 struct sk_msg tmp, *msg_tx = NULL; 400 struct sk_msg tmp, *msg_tx = NULL;
401 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
402 int copied = 0, err = 0; 401 int copied = 0, err = 0;
403 struct sk_psock *psock; 402 struct sk_psock *psock;
404 long timeo; 403 long timeo;
404 int flags;
405
406 /* Don't let internal do_tcp_sendpages() flags through */
407 flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
408 flags |= MSG_NO_SHARED_FRAGS;
405 409
406 psock = sk_psock_get(sk); 410 psock = sk_psock_get(sk);
407 if (unlikely(!psock)) 411 if (unlikely(!psock))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6e4afc48d7bb..8a645f304e6c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1320,6 +1320,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1320 buff = sk_stream_alloc_skb(sk, nsize, gfp, true); 1320 buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1321 if (!buff) 1321 if (!buff)
1322 return -ENOMEM; /* We'll just try again later. */ 1322 return -ENOMEM; /* We'll just try again later. */
1323 skb_copy_decrypted(buff, skb);
1323 1324
1324 sk->sk_wmem_queued += buff->truesize; 1325 sk->sk_wmem_queued += buff->truesize;
1325 sk_mem_charge(sk, buff->truesize); 1326 sk_mem_charge(sk, buff->truesize);
@@ -1874,6 +1875,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1874 buff = sk_stream_alloc_skb(sk, 0, gfp, true); 1875 buff = sk_stream_alloc_skb(sk, 0, gfp, true);
1875 if (unlikely(!buff)) 1876 if (unlikely(!buff))
1876 return -ENOMEM; 1877 return -ENOMEM;
1878 skb_copy_decrypted(buff, skb);
1877 1879
1878 sk->sk_wmem_queued += buff->truesize; 1880 sk->sk_wmem_queued += buff->truesize;
1879 sk_mem_charge(sk, buff->truesize); 1881 sk_mem_charge(sk, buff->truesize);
@@ -2051,7 +2053,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2051 if (len <= skb->len) 2053 if (len <= skb->len)
2052 break; 2054 break;
2053 2055
2054 if (unlikely(TCP_SKB_CB(skb)->eor)) 2056 if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
2055 return false; 2057 return false;
2056 2058
2057 len -= skb->len; 2059 len -= skb->len;
@@ -2143,6 +2145,7 @@ static int tcp_mtu_probe(struct sock *sk)
2143 sk_mem_charge(sk, nskb->truesize); 2145 sk_mem_charge(sk, nskb->truesize);
2144 2146
2145 skb = tcp_send_head(sk); 2147 skb = tcp_send_head(sk);
2148 skb_copy_decrypted(nskb, skb);
2146 2149
2147 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 2150 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2148 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 2151 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
@@ -2167,6 +2170,7 @@ static int tcp_mtu_probe(struct sock *sk)
2167 * we need to propagate it to the new skb. 2170 * we need to propagate it to the new skb.
2168 */ 2171 */
2169 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; 2172 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2173 tcp_skb_collapse_tstamp(nskb, skb);
2170 tcp_unlink_write_queue(skb, sk); 2174 tcp_unlink_write_queue(skb, sk);
2171 sk_wmem_free_skb(sk, skb); 2175 sk_wmem_free_skb(sk, skb);
2172 } else { 2176 } else {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index dc73888c7859..6a576ff92c39 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -478,7 +478,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
478 if (!idev) { 478 if (!idev) {
479 idev = ipv6_add_dev(dev); 479 idev = ipv6_add_dev(dev);
480 if (IS_ERR(idev)) 480 if (IS_ERR(idev))
481 return NULL; 481 return idev;
482 } 482 }
483 483
484 if (dev->flags&IFF_UP) 484 if (dev->flags&IFF_UP)
@@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1045 int err = 0; 1045 int err = 0;
1046 1046
1047 if (addr_type == IPV6_ADDR_ANY || 1047 if (addr_type == IPV6_ADDR_ANY ||
1048 addr_type & IPV6_ADDR_MULTICAST || 1048 (addr_type & IPV6_ADDR_MULTICAST &&
1049 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1049 (!(idev->dev->flags & IFF_LOOPBACK) && 1050 (!(idev->dev->flags & IFF_LOOPBACK) &&
1050 !netif_is_l3_master(idev->dev) && 1051 !netif_is_l3_master(idev->dev) &&
1051 addr_type & IPV6_ADDR_LOOPBACK)) 1052 addr_type & IPV6_ADDR_LOOPBACK))
@@ -2465,8 +2466,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2465 ASSERT_RTNL(); 2466 ASSERT_RTNL();
2466 2467
2467 idev = ipv6_find_idev(dev); 2468 idev = ipv6_find_idev(dev);
2468 if (!idev) 2469 if (IS_ERR(idev))
2469 return ERR_PTR(-ENOBUFS); 2470 return idev;
2470 2471
2471 if (idev->cnf.disable_ipv6) 2472 if (idev->cnf.disable_ipv6)
2472 return ERR_PTR(-EACCES); 2473 return ERR_PTR(-EACCES);
@@ -3158,7 +3159,7 @@ static void init_loopback(struct net_device *dev)
3158 ASSERT_RTNL(); 3159 ASSERT_RTNL();
3159 3160
3160 idev = ipv6_find_idev(dev); 3161 idev = ipv6_find_idev(dev);
3161 if (!idev) { 3162 if (IS_ERR(idev)) {
3162 pr_debug("%s: add_dev failed\n", __func__); 3163 pr_debug("%s: add_dev failed\n", __func__);
3163 return; 3164 return;
3164 } 3165 }
@@ -3373,7 +3374,7 @@ static void addrconf_sit_config(struct net_device *dev)
3373 */ 3374 */
3374 3375
3375 idev = ipv6_find_idev(dev); 3376 idev = ipv6_find_idev(dev);
3376 if (!idev) { 3377 if (IS_ERR(idev)) {
3377 pr_debug("%s: add_dev failed\n", __func__); 3378 pr_debug("%s: add_dev failed\n", __func__);
3378 return; 3379 return;
3379 } 3380 }
@@ -3398,7 +3399,7 @@ static void addrconf_gre_config(struct net_device *dev)
3398 ASSERT_RTNL(); 3399 ASSERT_RTNL();
3399 3400
3400 idev = ipv6_find_idev(dev); 3401 idev = ipv6_find_idev(dev);
3401 if (!idev) { 3402 if (IS_ERR(idev)) {
3402 pr_debug("%s: add_dev failed\n", __func__); 3403 pr_debug("%s: add_dev failed\n", __func__);
3403 return; 3404 return;
3404 } 3405 }
@@ -4772,8 +4773,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4772 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC; 4773 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4773 4774
4774 idev = ipv6_find_idev(dev); 4775 idev = ipv6_find_idev(dev);
4775 if (!idev) 4776 if (IS_ERR(idev))
4776 return -ENOBUFS; 4777 return PTR_ERR(idev);
4777 4778
4778 if (!ipv6_allow_optimistic_dad(net, idev)) 4779 if (!ipv6_allow_optimistic_dad(net, idev))
4779 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC; 4780 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7f3f13c37916..eaa4c2cc2fbb 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -787,14 +787,15 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
787 if (pmc) { 787 if (pmc) {
788 im->idev = pmc->idev; 788 im->idev = pmc->idev;
789 if (im->mca_sfmode == MCAST_INCLUDE) { 789 if (im->mca_sfmode == MCAST_INCLUDE) {
790 im->mca_tomb = pmc->mca_tomb; 790 swap(im->mca_tomb, pmc->mca_tomb);
791 im->mca_sources = pmc->mca_sources; 791 swap(im->mca_sources, pmc->mca_sources);
792 for (psf = im->mca_sources; psf; psf = psf->sf_next) 792 for (psf = im->mca_sources; psf; psf = psf->sf_next)
793 psf->sf_crcount = idev->mc_qrv; 793 psf->sf_crcount = idev->mc_qrv;
794 } else { 794 } else {
795 im->mca_crcount = idev->mc_qrv; 795 im->mca_crcount = idev->mc_qrv;
796 } 796 }
797 in6_dev_put(pmc->idev); 797 in6_dev_put(pmc->idev);
798 ip6_mc_clear_src(pmc);
798 kfree(pmc); 799 kfree(pmc);
799 } 800 }
800 spin_unlock_bh(&im->mca_lock); 801 spin_unlock_bh(&im->mca_lock);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 0f82c150543b..fed9666a2f7d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
348 348
349 skb_reset_transport_header(skb); 349 skb_reset_transport_header(skb);
350 350
351 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 351 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
352 352
353 skb->ignore_df = 1; 353 skb->ignore_df = 1;
354 skb->dev = dev; 354 skb->dev = dev;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index ca05b16f1bb9..1f5d4d196dcc 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -282,7 +282,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
282 282
283 skb_reset_transport_header(skb); 283 skb_reset_transport_header(skb);
284 284
285 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 285 inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
286 286
287 skb->dev = dev; 287 skb->dev = dev;
288 ipv6_hdr(skb)->payload_len = htons(payload_len); 288 ipv6_hdr(skb)->payload_len = htons(payload_len);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4d458067d80d..111c400199ec 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1546,6 +1546,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1546 if (is_multicast_ether_addr(mac)) 1546 if (is_multicast_ether_addr(mac))
1547 return -EINVAL; 1547 return -EINVAL;
1548 1548
1549 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
1550 sdata->vif.type == NL80211_IFTYPE_STATION &&
1551 !sdata->u.mgd.associated)
1552 return -EINVAL;
1553
1549 sta = sta_info_alloc(sdata, mac, GFP_KERNEL); 1554 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
1550 if (!sta) 1555 if (!sta)
1551 return -ENOMEM; 1556 return -ENOMEM;
@@ -1553,10 +1558,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1553 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 1558 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
1554 sta->sta.tdls = true; 1559 sta->sta.tdls = true;
1555 1560
1556 if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
1557 !sdata->u.mgd.associated)
1558 return -EINVAL;
1559
1560 err = sta_apply_parameters(local, sta, params); 1561 err = sta_apply_parameters(local, sta, params);
1561 if (err) { 1562 if (err) {
1562 sta_info_free(local, sta); 1563 sta_info_free(local, sta);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3c1ab870fefe..768d14c9a716 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2447,11 +2447,13 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2447 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && 2447 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) &&
2448 sdata->control_port_over_nl80211)) { 2448 sdata->control_port_over_nl80211)) {
2449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2450 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2450 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2451 2451
2452 cfg80211_rx_control_port(dev, skb, noencrypt); 2452 cfg80211_rx_control_port(dev, skb, noencrypt);
2453 dev_kfree_skb(skb); 2453 dev_kfree_skb(skb);
2454 } else { 2454 } else {
2455 memset(skb->cb, 0, sizeof(skb->cb));
2456
2455 /* deliver to local stack */ 2457 /* deliver to local stack */
2456 if (rx->napi) 2458 if (rx->napi)
2457 napi_gro_receive(rx->napi, skb); 2459 napi_gro_receive(rx->napi, skb);
@@ -2546,8 +2548,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2546 2548
2547 if (skb) { 2549 if (skb) {
2548 skb->protocol = eth_type_trans(skb, dev); 2550 skb->protocol = eth_type_trans(skb, dev);
2549 memset(skb->cb, 0, sizeof(skb->cb));
2550
2551 ieee80211_deliver_skb_to_local_stack(skb, rx); 2551 ieee80211_deliver_skb_to_local_stack(skb, rx);
2552 } 2552 }
2553 2553
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index d25e91d7bdc1..44b675016393 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb)
133 mpls_stats_inc_outucastpkts(out_dev, skb); 133 mpls_stats_inc_outucastpkts(out_dev, skb);
134 134
135 if (rt) { 135 if (rt) {
136 if (rt->rt_gw_family == AF_INET) 136 if (rt->rt_gw_family == AF_INET6)
137 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
138 skb);
139 else if (rt->rt_gw_family == AF_INET6)
140 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6, 137 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
141 skb); 138 skb);
139 else
140 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
141 skb);
142 } else if (rt6) { 142 } else if (rt6) {
143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) { 143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
144 /* 6PE (RFC 4798) */ 144 /* 6PE (RFC 4798) */
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index 5c3fad8cba57..0187e65176c0 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -54,7 +54,7 @@ static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h,
54 checksum = ncsi_calculate_checksum((unsigned char *)h, 54 checksum = ncsi_calculate_checksum((unsigned char *)h,
55 sizeof(*h) + nca->payload); 55 sizeof(*h) + nca->payload);
56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) + 56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
57 nca->payload); 57 ALIGN(nca->payload, 4));
58 *pchecksum = htonl(checksum); 58 *pchecksum = htonl(checksum);
59} 59}
60 60
@@ -309,14 +309,21 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
309 309
310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca) 310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
311{ 311{
312 struct ncsi_cmd_handler *nch = NULL;
312 struct ncsi_request *nr; 313 struct ncsi_request *nr;
314 unsigned char type;
313 struct ethhdr *eh; 315 struct ethhdr *eh;
314 struct ncsi_cmd_handler *nch = NULL;
315 int i, ret; 316 int i, ret;
316 317
318 /* Use OEM generic handler for Netlink request */
319 if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
320 type = NCSI_PKT_CMD_OEM;
321 else
322 type = nca->type;
323
317 /* Search for the handler */ 324 /* Search for the handler */
318 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) { 325 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
319 if (ncsi_cmd_handlers[i].type == nca->type) { 326 if (ncsi_cmd_handlers[i].type == type) {
320 if (ncsi_cmd_handlers[i].handler) 327 if (ncsi_cmd_handlers[i].handler)
321 nch = &ncsi_cmd_handlers[i]; 328 nch = &ncsi_cmd_handlers[i];
322 else 329 else
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 7581bf919885..d876bd55f356 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -47,7 +47,8 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED || 47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) { 48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
49 netdev_dbg(nr->ndp->ndev.dev, 49 netdev_dbg(nr->ndp->ndev.dev,
50 "NCSI: non zero response/reason code\n"); 50 "NCSI: non zero response/reason code %04xh, %04xh\n",
51 ntohs(h->code), ntohs(h->reason));
51 return -EPERM; 52 return -EPERM;
52 } 53 }
53 54
@@ -55,7 +56,7 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
55 * sender doesn't support checksum according to NCSI 56 * sender doesn't support checksum according to NCSI
56 * specification. 57 * specification.
57 */ 58 */
58 pchecksum = (__be32 *)((void *)(h + 1) + payload - 4); 59 pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
59 if (ntohl(*pchecksum) == 0) 60 if (ntohl(*pchecksum) == 0)
60 return 0; 61 return 0;
61 62
@@ -63,7 +64,9 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
63 sizeof(*h) + payload - 4); 64 sizeof(*h) + payload - 4);
64 65
65 if (*pchecksum != htonl(checksum)) { 66 if (*pchecksum != htonl(checksum)) {
66 netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n"); 67 netdev_dbg(nr->ndp->ndev.dev,
68 "NCSI: checksum mismatched; recd: %08x calc: %08x\n",
69 *pchecksum, htonl(checksum));
67 return -EINVAL; 70 return -EINVAL;
68 } 71 }
69 72
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a542761e90d1..81a8ef42b88d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
453 * table location, we assume id gets exposed to userspace. 453 * table location, we assume id gets exposed to userspace.
454 * 454 *
455 * Following nf_conn items do not change throughout lifetime 455 * Following nf_conn items do not change throughout lifetime
456 * of the nf_conn after it has been committed to main hash table: 456 * of the nf_conn:
457 * 457 *
458 * 1. nf_conn address 458 * 1. nf_conn address
459 * 2. nf_conn->ext address 459 * 2. nf_conn->master address (normally NULL)
460 * 3. nf_conn->master address (normally NULL) 460 * 3. the associated net namespace
461 * 4. tuple 461 * 4. the original direction tuple
462 * 5. the associated net namespace
463 */ 462 */
464u32 nf_ct_get_id(const struct nf_conn *ct) 463u32 nf_ct_get_id(const struct nf_conn *ct)
465{ 464{
@@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
469 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); 468 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
470 469
471 a = (unsigned long)ct; 470 a = (unsigned long)ct;
472 b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct)); 471 b = (unsigned long)ct->master;
473 c = (unsigned long)ct->ext; 472 c = (unsigned long)nf_ct_net(ct);
474 d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash), 473 d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
474 sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
475 &ct_id_seed); 475 &ct_id_seed);
476#ifdef CONFIG_64BIT 476#ifdef CONFIG_64BIT
477 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); 477 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 0ecb3e289ef2..8d96738b7dfd 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen,
322 i++; 322 i++;
323 } 323 }
324 324
325 pr_debug("Skipped up to `%c'!\n", skip); 325 pr_debug("Skipped up to 0x%hhx delimiter!\n", skip);
326 326
327 *numoff = i; 327 *numoff = i;
328 *numlen = getnum(data + i, dlen - i, cmd, term, numoff); 328 *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index e0d392cb3075..0006503d2da9 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -1037,9 +1037,14 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
1037 table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; 1037 table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
1038 table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; 1038 table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
1039 table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; 1039 table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
1040 table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
1041 table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper;
1040#ifdef CONFIG_NF_CONNTRACK_EVENTS 1042#ifdef CONFIG_NF_CONNTRACK_EVENTS
1041 table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; 1043 table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
1042#endif 1044#endif
1045#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
1046 table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp;
1047#endif
1043 table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; 1048 table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout;
1044 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; 1049 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
1045 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; 1050 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index e3d797252a98..80a8f9ae4c93 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -111,15 +111,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
111#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) 111#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) 112#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
113 113
114static void flow_offload_fixup_ct_state(struct nf_conn *ct) 114static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
115{
116 return (__s32)(timeout - (u32)jiffies);
117}
118
119static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
115{ 120{
116 const struct nf_conntrack_l4proto *l4proto; 121 const struct nf_conntrack_l4proto *l4proto;
122 int l4num = nf_ct_protonum(ct);
117 unsigned int timeout; 123 unsigned int timeout;
118 int l4num;
119
120 l4num = nf_ct_protonum(ct);
121 if (l4num == IPPROTO_TCP)
122 flow_offload_fixup_tcp(&ct->proto.tcp);
123 124
124 l4proto = nf_ct_l4proto_find(l4num); 125 l4proto = nf_ct_l4proto_find(l4num);
125 if (!l4proto) 126 if (!l4proto)
@@ -132,7 +133,20 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
132 else 133 else
133 return; 134 return;
134 135
135 ct->timeout = nfct_time_stamp + timeout; 136 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
137 ct->timeout = nfct_time_stamp + timeout;
138}
139
140static void flow_offload_fixup_ct_state(struct nf_conn *ct)
141{
142 if (nf_ct_protonum(ct) == IPPROTO_TCP)
143 flow_offload_fixup_tcp(&ct->proto.tcp);
144}
145
146static void flow_offload_fixup_ct(struct nf_conn *ct)
147{
148 flow_offload_fixup_ct_state(ct);
149 flow_offload_fixup_ct_timeout(ct);
136} 150}
137 151
138void flow_offload_free(struct flow_offload *flow) 152void flow_offload_free(struct flow_offload *flow)
@@ -208,6 +222,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
208} 222}
209EXPORT_SYMBOL_GPL(flow_offload_add); 223EXPORT_SYMBOL_GPL(flow_offload_add);
210 224
225static inline bool nf_flow_has_expired(const struct flow_offload *flow)
226{
227 return nf_flow_timeout_delta(flow->timeout) <= 0;
228}
229
211static void flow_offload_del(struct nf_flowtable *flow_table, 230static void flow_offload_del(struct nf_flowtable *flow_table,
212 struct flow_offload *flow) 231 struct flow_offload *flow)
213{ 232{
@@ -223,6 +242,11 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
223 e = container_of(flow, struct flow_offload_entry, flow); 242 e = container_of(flow, struct flow_offload_entry, flow);
224 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status); 243 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
225 244
245 if (nf_flow_has_expired(flow))
246 flow_offload_fixup_ct(e->ct);
247 else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
248 flow_offload_fixup_ct_timeout(e->ct);
249
226 flow_offload_free(flow); 250 flow_offload_free(flow);
227} 251}
228 252
@@ -298,11 +322,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
298 return err; 322 return err;
299} 323}
300 324
301static inline bool nf_flow_has_expired(const struct flow_offload *flow)
302{
303 return (__s32)(flow->timeout - (u32)jiffies) <= 0;
304}
305
306static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) 325static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
307{ 326{
308 struct nf_flowtable *flow_table = data; 327 struct nf_flowtable *flow_table = data;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index cdfc33517e85..b9e7dd6e60ce 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -214,6 +214,24 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
214 return true; 214 return true;
215} 215}
216 216
217static int nf_flow_offload_dst_check(struct dst_entry *dst)
218{
219 if (unlikely(dst_xfrm(dst)))
220 return dst_check(dst, 0) ? 0 : -1;
221
222 return 0;
223}
224
225static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
226 const struct nf_hook_state *state,
227 struct dst_entry *dst)
228{
229 skb_orphan(skb);
230 skb_dst_set_noref(skb, dst);
231 dst_output(state->net, state->sk, skb);
232 return NF_STOLEN;
233}
234
217unsigned int 235unsigned int
218nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, 236nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
219 const struct nf_hook_state *state) 237 const struct nf_hook_state *state)
@@ -254,12 +272,25 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
254 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) 272 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
255 return NF_ACCEPT; 273 return NF_ACCEPT;
256 274
275 if (nf_flow_offload_dst_check(&rt->dst)) {
276 flow_offload_teardown(flow);
277 return NF_ACCEPT;
278 }
279
257 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) 280 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
258 return NF_DROP; 281 return NF_DROP;
259 282
260 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 283 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
261 iph = ip_hdr(skb); 284 iph = ip_hdr(skb);
262 ip_decrease_ttl(iph); 285 ip_decrease_ttl(iph);
286 skb->tstamp = 0;
287
288 if (unlikely(dst_xfrm(&rt->dst))) {
289 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
290 IPCB(skb)->iif = skb->dev->ifindex;
291 IPCB(skb)->flags = IPSKB_FORWARDED;
292 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
293 }
263 294
264 skb->dev = outdev; 295 skb->dev = outdev;
265 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); 296 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
@@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
467 sizeof(*ip6h))) 498 sizeof(*ip6h)))
468 return NF_ACCEPT; 499 return NF_ACCEPT;
469 500
501 if (nf_flow_offload_dst_check(&rt->dst)) {
502 flow_offload_teardown(flow);
503 return NF_ACCEPT;
504 }
505
470 if (skb_try_make_writable(skb, sizeof(*ip6h))) 506 if (skb_try_make_writable(skb, sizeof(*ip6h)))
471 return NF_DROP; 507 return NF_DROP;
472 508
@@ -476,6 +512,14 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
476 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 512 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
477 ip6h = ipv6_hdr(skb); 513 ip6h = ipv6_hdr(skb);
478 ip6h->hop_limit--; 514 ip6h->hop_limit--;
515 skb->tstamp = 0;
516
517 if (unlikely(dst_xfrm(&rt->dst))) {
518 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
519 IP6CB(skb)->iif = skb->dev->ifindex;
520 IP6CB(skb)->flags = IP6SKB_FORWARDED;
521 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
522 }
479 523
480 skb->dev = outdev; 524 skb->dev = outdev;
481 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); 525 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 605a7cfe7ca7..d47469f824a1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -138,9 +138,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
138 return; 138 return;
139 139
140 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { 140 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
141 if (trans->msg_type == NFT_MSG_NEWSET && 141 switch (trans->msg_type) {
142 nft_trans_set(trans) == set) { 142 case NFT_MSG_NEWSET:
143 set->bound = true; 143 if (nft_trans_set(trans) == set)
144 nft_trans_set_bound(trans) = true;
145 break;
146 case NFT_MSG_NEWSETELEM:
147 if (nft_trans_elem_set(trans) == set)
148 nft_trans_elem_set_bound(trans) = true;
144 break; 149 break;
145 } 150 }
146 } 151 }
@@ -1662,6 +1667,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
1662 1667
1663 chain->flags |= NFT_BASE_CHAIN | flags; 1668 chain->flags |= NFT_BASE_CHAIN | flags;
1664 basechain->policy = NF_ACCEPT; 1669 basechain->policy = NF_ACCEPT;
1670 if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
1671 nft_chain_offload_priority(basechain) < 0)
1672 return -EOPNOTSUPP;
1673
1665 flow_block_init(&basechain->flow_block); 1674 flow_block_init(&basechain->flow_block);
1666 } else { 1675 } else {
1667 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 1676 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
@@ -6906,7 +6915,7 @@ static int __nf_tables_abort(struct net *net)
6906 break; 6915 break;
6907 case NFT_MSG_NEWSET: 6916 case NFT_MSG_NEWSET:
6908 trans->ctx.table->use--; 6917 trans->ctx.table->use--;
6909 if (nft_trans_set(trans)->bound) { 6918 if (nft_trans_set_bound(trans)) {
6910 nft_trans_destroy(trans); 6919 nft_trans_destroy(trans);
6911 break; 6920 break;
6912 } 6921 }
@@ -6918,7 +6927,7 @@ static int __nf_tables_abort(struct net *net)
6918 nft_trans_destroy(trans); 6927 nft_trans_destroy(trans);
6919 break; 6928 break;
6920 case NFT_MSG_NEWSETELEM: 6929 case NFT_MSG_NEWSETELEM:
6921 if (nft_trans_elem_set(trans)->bound) { 6930 if (nft_trans_elem_set_bound(trans)) {
6922 nft_trans_destroy(trans); 6931 nft_trans_destroy(trans);
6923 break; 6932 break;
6924 } 6933 }
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 64f5fd5f240e..c0d18c1d77ac 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -103,10 +103,11 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
103} 103}
104 104
105static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, 105static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
106 __be16 proto, 106 __be16 proto, int priority,
107 struct netlink_ext_ack *extack) 107 struct netlink_ext_ack *extack)
108{ 108{
109 common->protocol = proto; 109 common->protocol = proto;
110 common->prio = priority;
110 common->extack = extack; 111 common->extack = extack;
111} 112}
112 113
@@ -124,6 +125,15 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
124 return 0; 125 return 0;
125} 126}
126 127
128int nft_chain_offload_priority(struct nft_base_chain *basechain)
129{
130 if (basechain->ops.priority <= 0 ||
131 basechain->ops.priority > USHRT_MAX)
132 return -1;
133
134 return 0;
135}
136
127static int nft_flow_offload_rule(struct nft_trans *trans, 137static int nft_flow_offload_rule(struct nft_trans *trans,
128 enum flow_cls_command command) 138 enum flow_cls_command command)
129{ 139{
@@ -142,7 +152,8 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
142 if (flow) 152 if (flow)
143 proto = flow->proto; 153 proto = flow->proto;
144 154
145 nft_flow_offload_common_init(&cls_flow.common, proto, &extack); 155 nft_flow_offload_common_init(&cls_flow.common, proto,
156 basechain->ops.priority, &extack);
146 cls_flow.command = command; 157 cls_flow.command = command;
147 cls_flow.cookie = (unsigned long) rule; 158 cls_flow.cookie = (unsigned long) rule;
148 if (flow) 159 if (flow)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index aa5f571d4361..01705ad74a9a 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
72{ 72{
73 struct nft_flow_offload *priv = nft_expr_priv(expr); 73 struct nft_flow_offload *priv = nft_expr_priv(expr);
74 struct nf_flowtable *flowtable = &priv->flowtable->data; 74 struct nf_flowtable *flowtable = &priv->flowtable->data;
75 struct tcphdr _tcph, *tcph = NULL;
75 enum ip_conntrack_info ctinfo; 76 enum ip_conntrack_info ctinfo;
76 struct nf_flow_route route; 77 struct nf_flow_route route;
77 struct flow_offload *flow; 78 struct flow_offload *flow;
78 enum ip_conntrack_dir dir; 79 enum ip_conntrack_dir dir;
79 bool is_tcp = false;
80 struct nf_conn *ct; 80 struct nf_conn *ct;
81 int ret; 81 int ret;
82 82
@@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
89 89
90 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { 90 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
91 case IPPROTO_TCP: 91 case IPPROTO_TCP:
92 is_tcp = true; 92 tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
93 sizeof(_tcph), &_tcph);
94 if (unlikely(!tcph || tcph->fin || tcph->rst))
95 goto out;
93 break; 96 break;
94 case IPPROTO_UDP: 97 case IPPROTO_UDP:
95 break; 98 break;
@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
115 if (!flow) 118 if (!flow)
116 goto err_flow_alloc; 119 goto err_flow_alloc;
117 120
118 if (is_tcp) { 121 if (tcph) {
119 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 122 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
120 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 123 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
121 } 124 }
@@ -146,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
146 return nft_chain_validate_hooks(ctx->chain, hook_mask); 149 return nft_chain_validate_hooks(ctx->chain, hook_mask);
147} 150}
148 151
152static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
153 [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
154 .len = NFT_NAME_MAXLEN - 1 },
155};
156
149static int nft_flow_offload_init(const struct nft_ctx *ctx, 157static int nft_flow_offload_init(const struct nft_ctx *ctx,
150 const struct nft_expr *expr, 158 const struct nft_expr *expr,
151 const struct nlattr * const tb[]) 159 const struct nlattr * const tb[])
@@ -204,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
204static struct nft_expr_type nft_flow_offload_type __read_mostly = { 212static struct nft_expr_type nft_flow_offload_type __read_mostly = {
205 .name = "flow_offload", 213 .name = "flow_offload",
206 .ops = &nft_flow_offload_ops, 214 .ops = &nft_flow_offload_ops,
215 .policy = nft_flow_offload_policy,
207 .maxattr = NFTA_FLOW_MAX, 216 .maxattr = NFTA_FLOW_MAX,
208 .owner = THIS_MODULE, 217 .owner = THIS_MODULE,
209}; 218};
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index d0ab1adf5bff..5aab6df74e0f 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
54 nfnl_acct_put(info->nfacct); 54 nfnl_acct_put(info->nfacct);
55} 55}
56 56
57static struct xt_match nfacct_mt_reg __read_mostly = { 57static struct xt_match nfacct_mt_reg[] __read_mostly = {
58 .name = "nfacct", 58 {
59 .family = NFPROTO_UNSPEC, 59 .name = "nfacct",
60 .checkentry = nfacct_mt_checkentry, 60 .revision = 0,
61 .match = nfacct_mt, 61 .family = NFPROTO_UNSPEC,
62 .destroy = nfacct_mt_destroy, 62 .checkentry = nfacct_mt_checkentry,
63 .matchsize = sizeof(struct xt_nfacct_match_info), 63 .match = nfacct_mt,
64 .usersize = offsetof(struct xt_nfacct_match_info, nfacct), 64 .destroy = nfacct_mt_destroy,
65 .me = THIS_MODULE, 65 .matchsize = sizeof(struct xt_nfacct_match_info),
66 .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
67 .me = THIS_MODULE,
68 },
69 {
70 .name = "nfacct",
71 .revision = 1,
72 .family = NFPROTO_UNSPEC,
73 .checkentry = nfacct_mt_checkentry,
74 .match = nfacct_mt,
75 .destroy = nfacct_mt_destroy,
76 .matchsize = sizeof(struct xt_nfacct_match_info_v1),
77 .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
78 .me = THIS_MODULE,
79 },
66}; 80};
67 81
68static int __init nfacct_mt_init(void) 82static int __init nfacct_mt_init(void)
69{ 83{
70 return xt_register_match(&nfacct_mt_reg); 84 return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
71} 85}
72 86
73static void __exit nfacct_mt_exit(void) 87static void __exit nfacct_mt_exit(void)
74{ 88{
75 xt_unregister_match(&nfacct_mt_reg); 89 xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
76} 90}
77 91
78module_init(nfacct_mt_init); 92module_init(nfacct_mt_init);
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index ead7c6022208..b92b22ce8abd 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
101 if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) && 101 if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) &&
102 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || 102 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) ||
103 info->invert & XT_PHYSDEV_OP_BRIDGED) && 103 info->invert & XT_PHYSDEV_OP_BRIDGED) &&
104 par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | 104 par->hook_mask & (1 << NF_INET_LOCAL_OUT)) {
105 (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
106 pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); 105 pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
107 if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) 106 return -EINVAL;
108 return -EINVAL;
109 } 107 }
110 108
111 if (!brnf_probed) { 109 if (!brnf_probed) {
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 848c6eb55064..05249eb45082 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -67,6 +67,7 @@ struct ovs_conntrack_info {
67 struct md_mark mark; 67 struct md_mark mark;
68 struct md_labels labels; 68 struct md_labels labels;
69 char timeout[CTNL_TIMEOUT_NAME_MAX]; 69 char timeout[CTNL_TIMEOUT_NAME_MAX];
70 struct nf_ct_timeout *nf_ct_timeout;
70#if IS_ENABLED(CONFIG_NF_NAT) 71#if IS_ENABLED(CONFIG_NF_NAT)
71 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */ 72 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
72#endif 73#endif
@@ -524,6 +525,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key,
524 return -EPFNOSUPPORT; 525 return -EPFNOSUPPORT;
525 } 526 }
526 527
528 /* The key extracted from the fragment that completed this datagram
529 * likely didn't have an L4 header, so regenerate it.
530 */
531 ovs_flow_key_update_l3l4(skb, key);
532
527 key->ip.frag = OVS_FRAG_TYPE_NONE; 533 key->ip.frag = OVS_FRAG_TYPE_NONE;
528 skb_clear_hash(skb); 534 skb_clear_hash(skb);
529 skb->ignore_df = 1; 535 skb->ignore_df = 1;
@@ -697,6 +703,14 @@ static bool skb_nfct_cached(struct net *net,
697 if (help && rcu_access_pointer(help->helper) != info->helper) 703 if (help && rcu_access_pointer(help->helper) != info->helper)
698 return false; 704 return false;
699 } 705 }
706 if (info->nf_ct_timeout) {
707 struct nf_conn_timeout *timeout_ext;
708
709 timeout_ext = nf_ct_timeout_find(ct);
710 if (!timeout_ext || info->nf_ct_timeout !=
711 rcu_dereference(timeout_ext->timeout))
712 return false;
713 }
700 /* Force conntrack entry direction to the current packet? */ 714 /* Force conntrack entry direction to the current packet? */
701 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { 715 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
702 /* Delete the conntrack entry if confirmed, else just release 716 /* Delete the conntrack entry if confirmed, else just release
@@ -1565,7 +1579,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1565 case OVS_CT_ATTR_TIMEOUT: 1579 case OVS_CT_ATTR_TIMEOUT:
1566 memcpy(info->timeout, nla_data(a), nla_len(a)); 1580 memcpy(info->timeout, nla_data(a), nla_len(a));
1567 if (!memchr(info->timeout, '\0', nla_len(a))) { 1581 if (!memchr(info->timeout, '\0', nla_len(a))) {
1568 OVS_NLERR(log, "Invalid conntrack helper"); 1582 OVS_NLERR(log, "Invalid conntrack timeout");
1569 return -EINVAL; 1583 return -EINVAL;
1570 } 1584 }
1571 break; 1585 break;
@@ -1657,6 +1671,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1657 ct_info.timeout)) 1671 ct_info.timeout))
1658 pr_info_ratelimited("Failed to associated timeout " 1672 pr_info_ratelimited("Failed to associated timeout "
1659 "policy `%s'\n", ct_info.timeout); 1673 "policy `%s'\n", ct_info.timeout);
1674 else
1675 ct_info.nf_ct_timeout = rcu_dereference(
1676 nf_ct_timeout_find(ct_info.ct)->timeout);
1677
1660 } 1678 }
1661 1679
1662 if (helper) { 1680 if (helper) {
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index bc89e16e0505..9d81d2c7bf82 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -523,78 +523,15 @@ static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
523} 523}
524 524
525/** 525/**
526 * key_extract - extracts a flow key from an Ethernet frame. 526 * key_extract_l3l4 - extracts L3/L4 header information.
527 * @skb: sk_buff that contains the frame, with skb->data pointing to the 527 * @skb: sk_buff that contains the frame, with skb->data pointing to the
528 * Ethernet header 528 * L3 header
529 * @key: output flow key 529 * @key: output flow key
530 * 530 *
531 * The caller must ensure that skb->len >= ETH_HLEN.
532 *
533 * Returns 0 if successful, otherwise a negative errno value.
534 *
535 * Initializes @skb header fields as follows:
536 *
537 * - skb->mac_header: the L2 header.
538 *
539 * - skb->network_header: just past the L2 header, or just past the
540 * VLAN header, to the first byte of the L2 payload.
541 *
542 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
543 * on output, then just past the IP header, if one is present and
544 * of a correct length, otherwise the same as skb->network_header.
545 * For other key->eth.type values it is left untouched.
546 *
547 * - skb->protocol: the type of the data starting at skb->network_header.
548 * Equals to key->eth.type.
549 */ 531 */
550static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) 532static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
551{ 533{
552 int error; 534 int error;
553 struct ethhdr *eth;
554
555 /* Flags are always used as part of stats */
556 key->tp.flags = 0;
557
558 skb_reset_mac_header(skb);
559
560 /* Link layer. */
561 clear_vlan(key);
562 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
563 if (unlikely(eth_type_vlan(skb->protocol)))
564 return -EINVAL;
565
566 skb_reset_network_header(skb);
567 key->eth.type = skb->protocol;
568 } else {
569 eth = eth_hdr(skb);
570 ether_addr_copy(key->eth.src, eth->h_source);
571 ether_addr_copy(key->eth.dst, eth->h_dest);
572
573 __skb_pull(skb, 2 * ETH_ALEN);
574 /* We are going to push all headers that we pull, so no need to
575 * update skb->csum here.
576 */
577
578 if (unlikely(parse_vlan(skb, key)))
579 return -ENOMEM;
580
581 key->eth.type = parse_ethertype(skb);
582 if (unlikely(key->eth.type == htons(0)))
583 return -ENOMEM;
584
585 /* Multiple tagged packets need to retain TPID to satisfy
586 * skb_vlan_pop(), which will later shift the ethertype into
587 * skb->protocol.
588 */
589 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
590 skb->protocol = key->eth.cvlan.tpid;
591 else
592 skb->protocol = key->eth.type;
593
594 skb_reset_network_header(skb);
595 __skb_push(skb, skb->data - skb_mac_header(skb));
596 }
597 skb_reset_mac_len(skb);
598 535
599 /* Network layer. */ 536 /* Network layer. */
600 if (key->eth.type == htons(ETH_P_IP)) { 537 if (key->eth.type == htons(ETH_P_IP)) {
@@ -623,6 +560,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
623 offset = nh->frag_off & htons(IP_OFFSET); 560 offset = nh->frag_off & htons(IP_OFFSET);
624 if (offset) { 561 if (offset) {
625 key->ip.frag = OVS_FRAG_TYPE_LATER; 562 key->ip.frag = OVS_FRAG_TYPE_LATER;
563 memset(&key->tp, 0, sizeof(key->tp));
626 return 0; 564 return 0;
627 } 565 }
628 if (nh->frag_off & htons(IP_MF) || 566 if (nh->frag_off & htons(IP_MF) ||
@@ -740,8 +678,10 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
740 return error; 678 return error;
741 } 679 }
742 680
743 if (key->ip.frag == OVS_FRAG_TYPE_LATER) 681 if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
682 memset(&key->tp, 0, sizeof(key->tp));
744 return 0; 683 return 0;
684 }
745 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 685 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
746 key->ip.frag = OVS_FRAG_TYPE_FIRST; 686 key->ip.frag = OVS_FRAG_TYPE_FIRST;
747 687
@@ -788,6 +728,92 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
788 return 0; 728 return 0;
789} 729}
790 730
731/**
732 * key_extract - extracts a flow key from an Ethernet frame.
733 * @skb: sk_buff that contains the frame, with skb->data pointing to the
734 * Ethernet header
735 * @key: output flow key
736 *
737 * The caller must ensure that skb->len >= ETH_HLEN.
738 *
739 * Returns 0 if successful, otherwise a negative errno value.
740 *
741 * Initializes @skb header fields as follows:
742 *
743 * - skb->mac_header: the L2 header.
744 *
745 * - skb->network_header: just past the L2 header, or just past the
746 * VLAN header, to the first byte of the L2 payload.
747 *
748 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
749 * on output, then just past the IP header, if one is present and
750 * of a correct length, otherwise the same as skb->network_header.
751 * For other key->eth.type values it is left untouched.
752 *
753 * - skb->protocol: the type of the data starting at skb->network_header.
754 * Equals to key->eth.type.
755 */
756static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
757{
758 struct ethhdr *eth;
759
760 /* Flags are always used as part of stats */
761 key->tp.flags = 0;
762
763 skb_reset_mac_header(skb);
764
765 /* Link layer. */
766 clear_vlan(key);
767 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
768 if (unlikely(eth_type_vlan(skb->protocol)))
769 return -EINVAL;
770
771 skb_reset_network_header(skb);
772 key->eth.type = skb->protocol;
773 } else {
774 eth = eth_hdr(skb);
775 ether_addr_copy(key->eth.src, eth->h_source);
776 ether_addr_copy(key->eth.dst, eth->h_dest);
777
778 __skb_pull(skb, 2 * ETH_ALEN);
779 /* We are going to push all headers that we pull, so no need to
780 * update skb->csum here.
781 */
782
783 if (unlikely(parse_vlan(skb, key)))
784 return -ENOMEM;
785
786 key->eth.type = parse_ethertype(skb);
787 if (unlikely(key->eth.type == htons(0)))
788 return -ENOMEM;
789
790 /* Multiple tagged packets need to retain TPID to satisfy
791 * skb_vlan_pop(), which will later shift the ethertype into
792 * skb->protocol.
793 */
794 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
795 skb->protocol = key->eth.cvlan.tpid;
796 else
797 skb->protocol = key->eth.type;
798
799 skb_reset_network_header(skb);
800 __skb_push(skb, skb->data - skb_mac_header(skb));
801 }
802
803 skb_reset_mac_len(skb);
804
805 /* Fill out L3/L4 key info, if any */
806 return key_extract_l3l4(skb, key);
807}
808
809/* In the case of conntrack fragment handling it expects L3 headers,
810 * add a helper.
811 */
812int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
813{
814 return key_extract_l3l4(skb, key);
815}
816
791int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 817int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
792{ 818{
793 int res; 819 int res;
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index a5506e2d4b7a..b830d5ff7af4 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -270,6 +270,7 @@ void ovs_flow_stats_clear(struct sw_flow *);
270u64 ovs_flow_used_time(unsigned long flow_jiffies); 270u64 ovs_flow_used_time(unsigned long flow_jiffies);
271 271
272int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); 272int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
273int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key);
273int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, 274int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
274 struct sk_buff *skb, 275 struct sk_buff *skb,
275 struct sw_flow_key *key); 276 struct sw_flow_key *key);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8d54f3047768..e2742b006d25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2618,6 +2618,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2618 2618
2619 mutex_lock(&po->pg_vec_lock); 2619 mutex_lock(&po->pg_vec_lock);
2620 2620
2621 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2622 * we need to confirm it under protection of pg_vec_lock.
2623 */
2624 if (unlikely(!po->tx_ring.pg_vec)) {
2625 err = -EBUSY;
2626 goto out;
2627 }
2621 if (likely(saddr == NULL)) { 2628 if (likely(saddr == NULL)) {
2622 dev = packet_cached_dev_get(po); 2629 dev = packet_cached_dev_get(po);
2623 proto = po->num; 2630 proto = po->num;
diff --git a/net/psample/psample.c b/net/psample/psample.c
index 841f198ea1a8..66e4b61a350d 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -154,7 +154,7 @@ static void psample_group_destroy(struct psample_group *group)
154{ 154{
155 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP); 155 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
156 list_del(&group->list); 156 list_del(&group->list);
157 kfree(group); 157 kfree_rcu(group, rcu);
158} 158}
159 159
160static struct psample_group * 160static struct psample_group *
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ec05d91aa9a2..45acab2de0cf 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
291 void *buffer) 291 void *buffer)
292{ 292{
293 struct rds_info_rdma_connection *iinfo = buffer; 293 struct rds_info_rdma_connection *iinfo = buffer;
294 struct rds_ib_connection *ic; 294 struct rds_ib_connection *ic = conn->c_transport_data;
295 295
296 /* We will only ever look at IB transports */ 296 /* We will only ever look at IB transports */
297 if (conn->c_trans != &rds_ib_transport) 297 if (conn->c_trans != &rds_ib_transport)
@@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
301 301
302 iinfo->src_addr = conn->c_laddr.s6_addr32[3]; 302 iinfo->src_addr = conn->c_laddr.s6_addr32[3];
303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; 303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
304 iinfo->tos = conn->c_tos; 304 if (ic) {
305 iinfo->tos = conn->c_tos;
306 iinfo->sl = ic->i_sl;
307 }
305 308
306 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); 309 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
307 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); 310 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
308 if (rds_conn_state(conn) == RDS_CONN_UP) { 311 if (rds_conn_state(conn) == RDS_CONN_UP) {
309 struct rds_ib_device *rds_ibdev; 312 struct rds_ib_device *rds_ibdev;
310 313
311 ic = conn->c_transport_data;
312
313 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, 314 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
314 (union ib_gid *)&iinfo->dst_gid); 315 (union ib_gid *)&iinfo->dst_gid);
315 316
@@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
329 void *buffer) 330 void *buffer)
330{ 331{
331 struct rds6_info_rdma_connection *iinfo6 = buffer; 332 struct rds6_info_rdma_connection *iinfo6 = buffer;
332 struct rds_ib_connection *ic; 333 struct rds_ib_connection *ic = conn->c_transport_data;
333 334
334 /* We will only ever look at IB transports */ 335 /* We will only ever look at IB transports */
335 if (conn->c_trans != &rds_ib_transport) 336 if (conn->c_trans != &rds_ib_transport)
@@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
337 338
338 iinfo6->src_addr = conn->c_laddr; 339 iinfo6->src_addr = conn->c_laddr;
339 iinfo6->dst_addr = conn->c_faddr; 340 iinfo6->dst_addr = conn->c_faddr;
341 if (ic) {
342 iinfo6->tos = conn->c_tos;
343 iinfo6->sl = ic->i_sl;
344 }
340 345
341 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); 346 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
342 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); 347 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
@@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
344 if (rds_conn_state(conn) == RDS_CONN_UP) { 349 if (rds_conn_state(conn) == RDS_CONN_UP) {
345 struct rds_ib_device *rds_ibdev; 350 struct rds_ib_device *rds_ibdev;
346 351
347 ic = conn->c_transport_data;
348 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, 352 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
349 (union ib_gid *)&iinfo6->dst_gid); 353 (union ib_gid *)&iinfo6->dst_gid);
350 rds_ibdev = ic->rds_ibdev; 354 rds_ibdev = ic->rds_ibdev;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 303c6ee8bdb7..f2b558e8b5ea 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -220,6 +220,7 @@ struct rds_ib_connection {
220 /* Send/Recv vectors */ 220 /* Send/Recv vectors */
221 int i_scq_vector; 221 int i_scq_vector;
222 int i_rcq_vector; 222 int i_rcq_vector;
223 u8 i_sl;
223}; 224};
224 225
225/* This assumes that atomic_t is at least 32 bits */ 226/* This assumes that atomic_t is at least 32 bits */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index fddaa09f7b0d..233f1368162b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
152 RDS_PROTOCOL_MINOR(conn->c_version), 152 RDS_PROTOCOL_MINOR(conn->c_version),
153 ic->i_flowctl ? ", flow control" : ""); 153 ic->i_flowctl ? ", flow control" : "");
154 154
155 /* receive sl from the peer */
156 ic->i_sl = ic->i_cm_id->route.path_rec->sl;
157
155 atomic_set(&ic->i_cq_quiesce, 0); 158 atomic_set(&ic->i_cq_quiesce, 0);
156 159
157 /* Init rings and fill recv. this needs to wait until protocol 160 /* Init rings and fill recv. this needs to wait until protocol
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 9986d6065c4d..5f741e51b4ba 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id;
43static struct rdma_cm_id *rds6_rdma_listen_id; 43static struct rdma_cm_id *rds6_rdma_listen_id;
44#endif 44#endif
45 45
46/* Per IB specification 7.7.3, service level is a 4-bit field. */
47#define TOS_TO_SL(tos) ((tos) & 0xF)
48
46static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, 49static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
47 struct rdma_cm_event *event, 50 struct rdma_cm_event *event,
48 bool isv6) 51 bool isv6)
@@ -97,10 +100,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
97 struct rds_ib_connection *ibic; 100 struct rds_ib_connection *ibic;
98 101
99 ibic = conn->c_transport_data; 102 ibic = conn->c_transport_data;
100 if (ibic && ibic->i_cm_id == cm_id) 103 if (ibic && ibic->i_cm_id == cm_id) {
104 cm_id->route.path_rec[0].sl =
105 TOS_TO_SL(conn->c_tos);
101 ret = trans->cm_initiate_connect(cm_id, isv6); 106 ret = trans->cm_initiate_connect(cm_id, isv6);
102 else 107 } else {
103 rds_conn_drop(conn); 108 rds_conn_drop(conn);
109 }
104 } 110 }
105 break; 111 break;
106 112
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 853de4876088..a42ba7fa06d5 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -811,6 +811,7 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
811 811
812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); 812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence);
813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len); 813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len);
814 minfo6.tos = inc->i_conn->c_tos;
814 815
815 if (flip) { 816 if (flip) {
816 minfo6.laddr = *daddr; 817 minfo6.laddr = *daddr;
@@ -824,6 +825,8 @@ void rds6_inc_info_copy(struct rds_incoming *inc,
824 minfo6.fport = inc->i_hdr.h_dport; 825 minfo6.fport = inc->i_hdr.h_dport;
825 } 826 }
826 827
828 minfo6.flags = 0;
829
827 rds_info_copy(iter, &minfo6, sizeof(minfo6)); 830 rds_info_copy(iter, &minfo6, sizeof(minfo6));
828} 831}
829#endif 832#endif
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d09eaf153544..d72ddb67bb74 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
193 193
194service_in_use: 194service_in_use:
195 write_unlock(&local->services_lock); 195 write_unlock(&local->services_lock);
196 rxrpc_put_local(local); 196 rxrpc_unuse_local(local);
197 ret = -EADDRINUSE; 197 ret = -EADDRINUSE;
198error_unlock: 198error_unlock:
199 release_sock(&rx->sk); 199 release_sock(&rx->sk);
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(rxrpc_kernel_check_life);
402 */ 402 */
403void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) 403void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
404{ 404{
405 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 405 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
406 rxrpc_propose_ack_ping_for_check_life); 406 rxrpc_propose_ack_ping_for_check_life);
407 rxrpc_send_ack_packet(call, true, NULL); 407 rxrpc_send_ack_packet(call, true, NULL);
408} 408}
@@ -862,7 +862,6 @@ static void rxrpc_sock_destructor(struct sock *sk)
862static int rxrpc_release_sock(struct sock *sk) 862static int rxrpc_release_sock(struct sock *sk)
863{ 863{
864 struct rxrpc_sock *rx = rxrpc_sk(sk); 864 struct rxrpc_sock *rx = rxrpc_sk(sk);
865 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
866 865
867 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); 866 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
868 867
@@ -898,10 +897,8 @@ static int rxrpc_release_sock(struct sock *sk)
898 rxrpc_release_calls_on_socket(rx); 897 rxrpc_release_calls_on_socket(rx);
899 flush_workqueue(rxrpc_workqueue); 898 flush_workqueue(rxrpc_workqueue);
900 rxrpc_purge_queue(&sk->sk_receive_queue); 899 rxrpc_purge_queue(&sk->sk_receive_queue);
901 rxrpc_queue_work(&rxnet->service_conn_reaper);
902 rxrpc_queue_work(&rxnet->client_conn_reaper);
903 900
904 rxrpc_put_local(rx->local); 901 rxrpc_unuse_local(rx->local);
905 rx->local = NULL; 902 rx->local = NULL;
906 key_put(rx->key); 903 key_put(rx->key);
907 rx->key = NULL; 904 rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 822f45386e31..8051dfdcf26d 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -185,11 +185,17 @@ struct rxrpc_host_header {
185 * - max 48 bytes (struct sk_buff::cb) 185 * - max 48 bytes (struct sk_buff::cb)
186 */ 186 */
187struct rxrpc_skb_priv { 187struct rxrpc_skb_priv {
188 union { 188 atomic_t nr_ring_pins; /* Number of rxtx ring pins */
189 u8 nr_jumbo; /* Number of jumbo subpackets */ 189 u8 nr_subpackets; /* Number of subpackets */
190 }; 190 u8 rx_flags; /* Received packet flags */
191#define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */
192#define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */
191 union { 193 union {
192 int remain; /* amount of space remaining for next write */ 194 int remain; /* amount of space remaining for next write */
195
196 /* List of requested ACKs on subpackets */
197 unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) /
198 BITS_PER_LONG];
193 }; 199 };
194 200
195 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ 201 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */
@@ -254,7 +260,8 @@ struct rxrpc_security {
254 */ 260 */
255struct rxrpc_local { 261struct rxrpc_local {
256 struct rcu_head rcu; 262 struct rcu_head rcu;
257 atomic_t usage; 263 atomic_t active_users; /* Number of users of the local endpoint */
264 atomic_t usage; /* Number of references to the structure */
258 struct rxrpc_net *rxnet; /* The network ns in which this resides */ 265 struct rxrpc_net *rxnet; /* The network ns in which this resides */
259 struct list_head link; 266 struct list_head link;
260 struct socket *socket; /* my UDP socket */ 267 struct socket *socket; /* my UDP socket */
@@ -612,8 +619,7 @@ struct rxrpc_call {
612#define RXRPC_TX_ANNO_LAST 0x04 619#define RXRPC_TX_ANNO_LAST 0x04
613#define RXRPC_TX_ANNO_RESENT 0x08 620#define RXRPC_TX_ANNO_RESENT 0x08
614 621
615#define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */ 622#define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */
616#define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */
617#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ 623#define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */
618 rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but 624 rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but
619 * not hard-ACK'd packet follows this. 625 * not hard-ACK'd packet follows this.
@@ -649,7 +655,6 @@ struct rxrpc_call {
649 655
650 /* receive-phase ACK management */ 656 /* receive-phase ACK management */
651 u8 ackr_reason; /* reason to ACK */ 657 u8 ackr_reason; /* reason to ACK */
652 u16 ackr_skew; /* skew on packet being ACK'd */
653 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ 658 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
654 rxrpc_serial_t ackr_first_seq; /* first sequence number received */ 659 rxrpc_serial_t ackr_first_seq; /* first sequence number received */
655 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ 660 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
@@ -743,7 +748,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
743/* 748/*
744 * call_event.c 749 * call_event.c
745 */ 750 */
746void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, 751void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
747 enum rxrpc_propose_ack_trace); 752 enum rxrpc_propose_ack_trace);
748void rxrpc_process_call(struct work_struct *); 753void rxrpc_process_call(struct work_struct *);
749 754
@@ -905,6 +910,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *);
905void rxrpc_put_client_conn(struct rxrpc_connection *); 910void rxrpc_put_client_conn(struct rxrpc_connection *);
906void rxrpc_discard_expired_client_conns(struct work_struct *); 911void rxrpc_discard_expired_client_conns(struct work_struct *);
907void rxrpc_destroy_all_client_connections(struct rxrpc_net *); 912void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
913void rxrpc_clean_up_local_conns(struct rxrpc_local *);
908 914
909/* 915/*
910 * conn_event.c 916 * conn_event.c
@@ -1002,6 +1008,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
1002struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); 1008struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
1003struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); 1009struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
1004void rxrpc_put_local(struct rxrpc_local *); 1010void rxrpc_put_local(struct rxrpc_local *);
1011struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
1012void rxrpc_unuse_local(struct rxrpc_local *);
1005void rxrpc_queue_local(struct rxrpc_local *); 1013void rxrpc_queue_local(struct rxrpc_local *);
1006void rxrpc_destroy_all_locals(struct rxrpc_net *); 1014void rxrpc_destroy_all_locals(struct rxrpc_net *);
1007 1015
@@ -1103,6 +1111,7 @@ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *);
1103void rxrpc_packet_destructor(struct sk_buff *); 1111void rxrpc_packet_destructor(struct sk_buff *);
1104void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); 1112void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace);
1105void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); 1113void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace);
1114void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace);
1106void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); 1115void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace);
1107void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); 1116void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace);
1108void rxrpc_purge_queue(struct sk_buff_head *); 1117void rxrpc_purge_queue(struct sk_buff_head *);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index bc2adeb3acb9..cedbbb3a7c2e 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -43,8 +43,7 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
43 * propose an ACK be sent 43 * propose an ACK be sent
44 */ 44 */
45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
46 u16 skew, u32 serial, bool immediate, 46 u32 serial, bool immediate, bool background,
47 bool background,
48 enum rxrpc_propose_ack_trace why) 47 enum rxrpc_propose_ack_trace why)
49{ 48{
50 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; 49 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
@@ -69,14 +68,12 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
69 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { 68 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
70 outcome = rxrpc_propose_ack_update; 69 outcome = rxrpc_propose_ack_update;
71 call->ackr_serial = serial; 70 call->ackr_serial = serial;
72 call->ackr_skew = skew;
73 } 71 }
74 if (!immediate) 72 if (!immediate)
75 goto trace; 73 goto trace;
76 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { 74 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
77 call->ackr_reason = ack_reason; 75 call->ackr_reason = ack_reason;
78 call->ackr_serial = serial; 76 call->ackr_serial = serial;
79 call->ackr_skew = skew;
80 } else { 77 } else {
81 outcome = rxrpc_propose_ack_subsume; 78 outcome = rxrpc_propose_ack_subsume;
82 } 79 }
@@ -137,11 +134,11 @@ trace:
137 * propose an ACK be sent, locking the call structure 134 * propose an ACK be sent, locking the call structure
138 */ 135 */
139void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 136void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
140 u16 skew, u32 serial, bool immediate, bool background, 137 u32 serial, bool immediate, bool background,
141 enum rxrpc_propose_ack_trace why) 138 enum rxrpc_propose_ack_trace why)
142{ 139{
143 spin_lock_bh(&call->lock); 140 spin_lock_bh(&call->lock);
144 __rxrpc_propose_ACK(call, ack_reason, skew, serial, 141 __rxrpc_propose_ACK(call, ack_reason, serial,
145 immediate, background, why); 142 immediate, background, why);
146 spin_unlock_bh(&call->lock); 143 spin_unlock_bh(&call->lock);
147} 144}
@@ -202,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
202 continue; 199 continue;
203 200
204 skb = call->rxtx_buffer[ix]; 201 skb = call->rxtx_buffer[ix];
205 rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 202 rxrpc_see_skb(skb, rxrpc_skb_seen);
206 203
207 if (anno_type == RXRPC_TX_ANNO_UNACK) { 204 if (anno_type == RXRPC_TX_ANNO_UNACK) {
208 if (ktime_after(skb->tstamp, max_age)) { 205 if (ktime_after(skb->tstamp, max_age)) {
@@ -239,7 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
239 ack_ts = ktime_sub(now, call->acks_latest_ts); 236 ack_ts = ktime_sub(now, call->acks_latest_ts);
240 if (ktime_to_ns(ack_ts) < call->peer->rtt) 237 if (ktime_to_ns(ack_ts) < call->peer->rtt)
241 goto out; 238 goto out;
242 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 239 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
243 rxrpc_propose_ack_ping_for_lost_ack); 240 rxrpc_propose_ack_ping_for_lost_ack);
244 rxrpc_send_ack_packet(call, true, NULL); 241 rxrpc_send_ack_packet(call, true, NULL);
245 goto out; 242 goto out;
@@ -258,18 +255,18 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
258 continue; 255 continue;
259 256
260 skb = call->rxtx_buffer[ix]; 257 skb = call->rxtx_buffer[ix];
261 rxrpc_get_skb(skb, rxrpc_skb_tx_got); 258 rxrpc_get_skb(skb, rxrpc_skb_got);
262 spin_unlock_bh(&call->lock); 259 spin_unlock_bh(&call->lock);
263 260
264 if (rxrpc_send_data_packet(call, skb, true) < 0) { 261 if (rxrpc_send_data_packet(call, skb, true) < 0) {
265 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 262 rxrpc_free_skb(skb, rxrpc_skb_freed);
266 return; 263 return;
267 } 264 }
268 265
269 if (rxrpc_is_client_call(call)) 266 if (rxrpc_is_client_call(call))
270 rxrpc_expose_client_call(call); 267 rxrpc_expose_client_call(call);
271 268
272 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 269 rxrpc_free_skb(skb, rxrpc_skb_freed);
273 spin_lock_bh(&call->lock); 270 spin_lock_bh(&call->lock);
274 271
275 /* We need to clear the retransmit state, but there are two 272 /* We need to clear the retransmit state, but there are two
@@ -372,7 +369,7 @@ recheck_state:
372 if (time_after_eq(now, t)) { 369 if (time_after_eq(now, t)) {
373 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); 370 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
374 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); 371 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
375 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true, 372 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
376 rxrpc_propose_ack_ping_for_keepalive); 373 rxrpc_propose_ack_ping_for_keepalive);
377 set_bit(RXRPC_CALL_EV_PING, &call->events); 374 set_bit(RXRPC_CALL_EV_PING, &call->events);
378 } 375 }
@@ -407,7 +404,7 @@ recheck_state:
407 send_ack = NULL; 404 send_ack = NULL;
408 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { 405 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
409 call->acks_lost_top = call->tx_top; 406 call->acks_lost_top = call->tx_top;
410 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 407 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
411 rxrpc_propose_ack_ping_for_lost_ack); 408 rxrpc_propose_ack_ping_for_lost_ack);
412 send_ack = &call->acks_lost_ping; 409 send_ack = &call->acks_lost_ping;
413 } 410 }
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 217b12be9e08..014548c259ce 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -422,6 +422,19 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
422} 422}
423 423
424/* 424/*
425 * Clean up the RxTx skb ring.
426 */
427static void rxrpc_cleanup_ring(struct rxrpc_call *call)
428{
429 int i;
430
431 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
432 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
433 call->rxtx_buffer[i] = NULL;
434 }
435}
436
437/*
425 * Detach a call from its owning socket. 438 * Detach a call from its owning socket.
426 */ 439 */
427void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) 440void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
@@ -429,7 +442,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
429 const void *here = __builtin_return_address(0); 442 const void *here = __builtin_return_address(0);
430 struct rxrpc_connection *conn = call->conn; 443 struct rxrpc_connection *conn = call->conn;
431 bool put = false; 444 bool put = false;
432 int i;
433 445
434 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 446 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
435 447
@@ -479,13 +491,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
479 if (conn) 491 if (conn)
480 rxrpc_disconnect_call(call); 492 rxrpc_disconnect_call(call);
481 493
482 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { 494 rxrpc_cleanup_ring(call);
483 rxrpc_free_skb(call->rxtx_buffer[i],
484 (call->tx_phase ? rxrpc_skb_tx_cleaned :
485 rxrpc_skb_rx_cleaned));
486 call->rxtx_buffer[i] = NULL;
487 }
488
489 _leave(""); 495 _leave("");
490} 496}
491 497
@@ -568,8 +574,6 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
568 */ 574 */
569void rxrpc_cleanup_call(struct rxrpc_call *call) 575void rxrpc_cleanup_call(struct rxrpc_call *call)
570{ 576{
571 int i;
572
573 _net("DESTROY CALL %d", call->debug_id); 577 _net("DESTROY CALL %d", call->debug_id);
574 578
575 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 579 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
@@ -580,13 +584,8 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
580 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 584 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
581 ASSERTCMP(call->conn, ==, NULL); 585 ASSERTCMP(call->conn, ==, NULL);
582 586
583 /* Clean up the Rx/Tx buffer */ 587 rxrpc_cleanup_ring(call);
584 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) 588 rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
585 rxrpc_free_skb(call->rxtx_buffer[i],
586 (call->tx_phase ? rxrpc_skb_tx_cleaned :
587 rxrpc_skb_rx_cleaned));
588
589 rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
590 589
591 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 590 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
592} 591}
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index aea82f909c60..3f1da1b49f69 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -1162,3 +1162,47 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1162 1162
1163 _leave(""); 1163 _leave("");
1164} 1164}
1165
1166/*
1167 * Clean up the client connections on a local endpoint.
1168 */
1169void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1170{
1171 struct rxrpc_connection *conn, *tmp;
1172 struct rxrpc_net *rxnet = local->rxnet;
1173 unsigned int nr_active;
1174 LIST_HEAD(graveyard);
1175
1176 _enter("");
1177
1178 spin_lock(&rxnet->client_conn_cache_lock);
1179 nr_active = rxnet->nr_active_client_conns;
1180
1181 list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1182 cache_link) {
1183 if (conn->params.local == local) {
1184 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
1185
1186 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1187 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1188 BUG();
1189 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1190 list_move(&conn->cache_link, &graveyard);
1191 nr_active--;
1192 }
1193 }
1194
1195 rxnet->nr_active_client_conns = nr_active;
1196 spin_unlock(&rxnet->client_conn_cache_lock);
1197 ASSERTCMP(nr_active, >=, 0);
1198
1199 while (!list_empty(&graveyard)) {
1200 conn = list_entry(graveyard.next,
1201 struct rxrpc_connection, cache_link);
1202 list_del_init(&conn->cache_link);
1203
1204 rxrpc_put_connection(conn);
1205 }
1206
1207 _leave(" [culled]");
1208}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index df6624c140be..a1ceef4f5cd0 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -472,7 +472,7 @@ void rxrpc_process_connection(struct work_struct *work)
472 /* go through the conn-level event packets, releasing the ref on this 472 /* go through the conn-level event packets, releasing the ref on this
473 * connection that each one has when we've finished with it */ 473 * connection that each one has when we've finished with it */
474 while ((skb = skb_dequeue(&conn->rx_queue))) { 474 while ((skb = skb_dequeue(&conn->rx_queue))) {
475 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 475 rxrpc_see_skb(skb, rxrpc_skb_seen);
476 ret = rxrpc_process_event(conn, skb, &abort_code); 476 ret = rxrpc_process_event(conn, skb, &abort_code);
477 switch (ret) { 477 switch (ret) {
478 case -EPROTO: 478 case -EPROTO:
@@ -484,7 +484,7 @@ void rxrpc_process_connection(struct work_struct *work)
484 goto requeue_and_leave; 484 goto requeue_and_leave;
485 case -ECONNABORTED: 485 case -ECONNABORTED:
486 default: 486 default:
487 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 487 rxrpc_free_skb(skb, rxrpc_skb_freed);
488 break; 488 break;
489 } 489 }
490 } 490 }
@@ -501,6 +501,6 @@ requeue_and_leave:
501protocol_error: 501protocol_error:
502 if (rxrpc_abort_connection(conn, ret, abort_code) < 0) 502 if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
503 goto requeue_and_leave; 503 goto requeue_and_leave;
504 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 504 rxrpc_free_skb(skb, rxrpc_skb_freed);
505 goto out; 505 goto out;
506} 506}
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 434ef392212b..ed05b6922132 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -398,7 +398,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
399 continue; 399 continue;
400 400
401 if (rxnet->live) { 401 if (rxnet->live && !conn->params.local->dead) {
402 idle_timestamp = READ_ONCE(conn->idle_timestamp); 402 idle_timestamp = READ_ONCE(conn->idle_timestamp);
403 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; 403 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
404 if (conn->params.local->service_closed) 404 if (conn->params.local->service_closed)
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 5bd6f1546e5c..d122c53c8697 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -196,15 +196,14 @@ send_extra_data:
196 * Ping the other end to fill our RTT cache and to retrieve the rwind 196 * Ping the other end to fill our RTT cache and to retrieve the rwind
197 * and MTU parameters. 197 * and MTU parameters.
198 */ 198 */
199static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, 199static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
200 int skew)
201{ 200{
202 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 201 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
203 ktime_t now = skb->tstamp; 202 ktime_t now = skb->tstamp;
204 203
205 if (call->peer->rtt_usage < 3 || 204 if (call->peer->rtt_usage < 3 ||
206 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 205 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
207 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 206 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
208 true, true, 207 true, true,
209 rxrpc_propose_ack_ping_for_params); 208 rxrpc_propose_ack_ping_for_params);
210} 209}
@@ -234,7 +233,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
234 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; 233 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
235 skb = call->rxtx_buffer[ix]; 234 skb = call->rxtx_buffer[ix];
236 annotation = call->rxtx_annotations[ix]; 235 annotation = call->rxtx_annotations[ix];
237 rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); 236 rxrpc_see_skb(skb, rxrpc_skb_rotated);
238 call->rxtx_buffer[ix] = NULL; 237 call->rxtx_buffer[ix] = NULL;
239 call->rxtx_annotations[ix] = 0; 238 call->rxtx_annotations[ix] = 0;
240 skb->next = list; 239 skb->next = list;
@@ -259,7 +258,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
259 skb = list; 258 skb = list;
260 list = skb->next; 259 list = skb->next;
261 skb_mark_not_on_list(skb); 260 skb_mark_not_on_list(skb);
262 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 261 rxrpc_free_skb(skb, rxrpc_skb_freed);
263 } 262 }
264 263
265 return rot_last; 264 return rot_last;
@@ -348,7 +347,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
348} 347}
349 348
350/* 349/*
351 * Scan a jumbo packet to validate its structure and to work out how many 350 * Scan a data packet to validate its structure and to work out how many
352 * subpackets it contains. 351 * subpackets it contains.
353 * 352 *
354 * A jumbo packet is a collection of consecutive packets glued together with 353 * A jumbo packet is a collection of consecutive packets glued together with
@@ -359,16 +358,21 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
359 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any 358 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
360 * size. 359 * size.
361 */ 360 */
362static bool rxrpc_validate_jumbo(struct sk_buff *skb) 361static bool rxrpc_validate_data(struct sk_buff *skb)
363{ 362{
364 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 363 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
365 unsigned int offset = sizeof(struct rxrpc_wire_header); 364 unsigned int offset = sizeof(struct rxrpc_wire_header);
366 unsigned int len = skb->len; 365 unsigned int len = skb->len;
367 int nr_jumbo = 1;
368 u8 flags = sp->hdr.flags; 366 u8 flags = sp->hdr.flags;
369 367
370 do { 368 for (;;) {
371 nr_jumbo++; 369 if (flags & RXRPC_REQUEST_ACK)
370 __set_bit(sp->nr_subpackets, sp->rx_req_ack);
371 sp->nr_subpackets++;
372
373 if (!(flags & RXRPC_JUMBO_PACKET))
374 break;
375
372 if (len - offset < RXRPC_JUMBO_SUBPKTLEN) 376 if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
373 goto protocol_error; 377 goto protocol_error;
374 if (flags & RXRPC_LAST_PACKET) 378 if (flags & RXRPC_LAST_PACKET)
@@ -377,9 +381,10 @@ static bool rxrpc_validate_jumbo(struct sk_buff *skb)
377 if (skb_copy_bits(skb, offset, &flags, 1) < 0) 381 if (skb_copy_bits(skb, offset, &flags, 1) < 0)
378 goto protocol_error; 382 goto protocol_error;
379 offset += sizeof(struct rxrpc_jumbo_header); 383 offset += sizeof(struct rxrpc_jumbo_header);
380 } while (flags & RXRPC_JUMBO_PACKET); 384 }
381 385
382 sp->nr_jumbo = nr_jumbo; 386 if (flags & RXRPC_LAST_PACKET)
387 sp->rx_flags |= RXRPC_SKB_INCL_LAST;
383 return true; 388 return true;
384 389
385protocol_error: 390protocol_error:
@@ -400,10 +405,10 @@ protocol_error:
400 * (that information is encoded in the ACK packet). 405 * (that information is encoded in the ACK packet).
401 */ 406 */
402static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, 407static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
403 u8 annotation, bool *_jumbo_bad) 408 bool is_jumbo, bool *_jumbo_bad)
404{ 409{
405 /* Discard normal packets that are duplicates. */ 410 /* Discard normal packets that are duplicates. */
406 if (annotation == 0) 411 if (is_jumbo)
407 return; 412 return;
408 413
409 /* Skip jumbo subpackets that are duplicates. When we've had three or 414 /* Skip jumbo subpackets that are duplicates. When we've had three or
@@ -417,30 +422,30 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
417} 422}
418 423
419/* 424/*
420 * Process a DATA packet, adding the packet to the Rx ring. 425 * Process a DATA packet, adding the packet to the Rx ring. The caller's
426 * packet ref must be passed on or discarded.
421 */ 427 */
422static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, 428static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
423 u16 skew)
424{ 429{
425 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 430 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
426 enum rxrpc_call_state state; 431 enum rxrpc_call_state state;
427 unsigned int offset = sizeof(struct rxrpc_wire_header); 432 unsigned int j;
428 unsigned int ix;
429 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 433 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
430 rxrpc_seq_t seq = sp->hdr.seq, hard_ack; 434 rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack;
431 bool immediate_ack = false, jumbo_bad = false, queued; 435 bool immediate_ack = false, jumbo_bad = false;
432 u16 len; 436 u8 ack = 0;
433 u8 ack = 0, flags, annotation = 0;
434 437
435 _enter("{%u,%u},{%u,%u}", 438 _enter("{%u,%u},{%u,%u}",
436 call->rx_hard_ack, call->rx_top, skb->len, seq); 439 call->rx_hard_ack, call->rx_top, skb->len, seq0);
437 440
438 _proto("Rx DATA %%%u { #%u f=%02x }", 441 _proto("Rx DATA %%%u { #%u f=%02x n=%u }",
439 sp->hdr.serial, seq, sp->hdr.flags); 442 sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets);
440 443
441 state = READ_ONCE(call->state); 444 state = READ_ONCE(call->state);
442 if (state >= RXRPC_CALL_COMPLETE) 445 if (state >= RXRPC_CALL_COMPLETE) {
446 rxrpc_free_skb(skb, rxrpc_skb_freed);
443 return; 447 return;
448 }
444 449
445 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { 450 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
446 unsigned long timo = READ_ONCE(call->next_req_timo); 451 unsigned long timo = READ_ONCE(call->next_req_timo);
@@ -465,156 +470,157 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
465 !rxrpc_receiving_reply(call)) 470 !rxrpc_receiving_reply(call))
466 goto unlock; 471 goto unlock;
467 472
468 call->ackr_prev_seq = seq; 473 call->ackr_prev_seq = seq0;
469
470 hard_ack = READ_ONCE(call->rx_hard_ack); 474 hard_ack = READ_ONCE(call->rx_hard_ack);
471 if (after(seq, hard_ack + call->rx_winsize)) {
472 ack = RXRPC_ACK_EXCEEDS_WINDOW;
473 ack_serial = serial;
474 goto ack;
475 }
476 475
477 flags = sp->hdr.flags; 476 if (sp->nr_subpackets > 1) {
478 if (flags & RXRPC_JUMBO_PACKET) {
479 if (call->nr_jumbo_bad > 3) { 477 if (call->nr_jumbo_bad > 3) {
480 ack = RXRPC_ACK_NOSPACE; 478 ack = RXRPC_ACK_NOSPACE;
481 ack_serial = serial; 479 ack_serial = serial;
482 goto ack; 480 goto ack;
483 } 481 }
484 annotation = 1;
485 } 482 }
486 483
487next_subpacket: 484 for (j = 0; j < sp->nr_subpackets; j++) {
488 queued = false; 485 rxrpc_serial_t serial = sp->hdr.serial + j;
489 ix = seq & RXRPC_RXTX_BUFF_MASK; 486 rxrpc_seq_t seq = seq0 + j;
490 len = skb->len; 487 unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK;
491 if (flags & RXRPC_JUMBO_PACKET) 488 bool terminal = (j == sp->nr_subpackets - 1);
492 len = RXRPC_JUMBO_DATALEN; 489 bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST);
493 490 u8 flags, annotation = j;
494 if (flags & RXRPC_LAST_PACKET) { 491
495 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 492 _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }",
496 seq != call->rx_top) { 493 j, serial, seq, terminal, last);
497 rxrpc_proto_abort("LSN", call, seq); 494
498 goto unlock; 495 if (last) {
499 } 496 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
500 } else { 497 seq != call->rx_top) {
501 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 498 rxrpc_proto_abort("LSN", call, seq);
502 after_eq(seq, call->rx_top)) { 499 goto unlock;
503 rxrpc_proto_abort("LSA", call, seq); 500 }
504 goto unlock; 501 } else {
502 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
503 after_eq(seq, call->rx_top)) {
504 rxrpc_proto_abort("LSA", call, seq);
505 goto unlock;
506 }
505 } 507 }
506 }
507
508 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
509 if (before_eq(seq, hard_ack)) {
510 ack = RXRPC_ACK_DUPLICATE;
511 ack_serial = serial;
512 goto skip;
513 }
514 508
515 if (flags & RXRPC_REQUEST_ACK && !ack) { 509 flags = 0;
516 ack = RXRPC_ACK_REQUESTED; 510 if (last)
517 ack_serial = serial; 511 flags |= RXRPC_LAST_PACKET;
518 } 512 if (!terminal)
513 flags |= RXRPC_JUMBO_PACKET;
514 if (test_bit(j, sp->rx_req_ack))
515 flags |= RXRPC_REQUEST_ACK;
516 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
519 517
520 if (call->rxtx_buffer[ix]) { 518 if (before_eq(seq, hard_ack)) {
521 rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
522 if (ack != RXRPC_ACK_DUPLICATE) {
523 ack = RXRPC_ACK_DUPLICATE; 519 ack = RXRPC_ACK_DUPLICATE;
524 ack_serial = serial; 520 ack_serial = serial;
521 continue;
525 } 522 }
526 immediate_ack = true;
527 goto skip;
528 }
529
530 /* Queue the packet. We use a couple of memory barriers here as need
531 * to make sure that rx_top is perceived to be set after the buffer
532 * pointer and that the buffer pointer is set after the annotation and
533 * the skb data.
534 *
535 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
536 * and also rxrpc_fill_out_ack().
537 */
538 rxrpc_get_skb(skb, rxrpc_skb_rx_got);
539 call->rxtx_annotations[ix] = annotation;
540 smp_wmb();
541 call->rxtx_buffer[ix] = skb;
542 if (after(seq, call->rx_top)) {
543 smp_store_release(&call->rx_top, seq);
544 } else if (before(seq, call->rx_top)) {
545 /* Send an immediate ACK if we fill in a hole */
546 if (!ack) {
547 ack = RXRPC_ACK_DELAY;
548 ack_serial = serial;
549 }
550 immediate_ack = true;
551 }
552 if (flags & RXRPC_LAST_PACKET) {
553 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
554 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
555 } else {
556 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
557 }
558 queued = true;
559 523
560 if (after_eq(seq, call->rx_expect_next)) { 524 if (call->rxtx_buffer[ix]) {
561 if (after(seq, call->rx_expect_next)) { 525 rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1,
562 _net("OOS %u > %u", seq, call->rx_expect_next); 526 &jumbo_bad);
563 ack = RXRPC_ACK_OUT_OF_SEQUENCE; 527 if (ack != RXRPC_ACK_DUPLICATE) {
564 ack_serial = serial; 528 ack = RXRPC_ACK_DUPLICATE;
529 ack_serial = serial;
530 }
531 immediate_ack = true;
532 continue;
565 } 533 }
566 call->rx_expect_next = seq + 1;
567 }
568 534
569skip:
570 offset += len;
571 if (flags & RXRPC_JUMBO_PACKET) {
572 if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
573 rxrpc_proto_abort("XJF", call, seq);
574 goto unlock;
575 }
576 offset += sizeof(struct rxrpc_jumbo_header);
577 seq++;
578 serial++;
579 annotation++;
580 if (flags & RXRPC_JUMBO_PACKET)
581 annotation |= RXRPC_RX_ANNO_JLAST;
582 if (after(seq, hard_ack + call->rx_winsize)) { 535 if (after(seq, hard_ack + call->rx_winsize)) {
583 ack = RXRPC_ACK_EXCEEDS_WINDOW; 536 ack = RXRPC_ACK_EXCEEDS_WINDOW;
584 ack_serial = serial; 537 ack_serial = serial;
585 if (!jumbo_bad) { 538 if (flags & RXRPC_JUMBO_PACKET) {
586 call->nr_jumbo_bad++; 539 if (!jumbo_bad) {
587 jumbo_bad = true; 540 call->nr_jumbo_bad++;
541 jumbo_bad = true;
542 }
588 } 543 }
544
589 goto ack; 545 goto ack;
590 } 546 }
591 547
592 _proto("Rx DATA Jumbo %%%u", serial); 548 if (flags & RXRPC_REQUEST_ACK && !ack) {
593 goto next_subpacket; 549 ack = RXRPC_ACK_REQUESTED;
594 } 550 ack_serial = serial;
551 }
552
553 /* Queue the packet. We use a couple of memory barriers here as need
554 * to make sure that rx_top is perceived to be set after the buffer
555 * pointer and that the buffer pointer is set after the annotation and
556 * the skb data.
557 *
558 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
559 * and also rxrpc_fill_out_ack().
560 */
561 if (!terminal)
562 rxrpc_get_skb(skb, rxrpc_skb_got);
563 call->rxtx_annotations[ix] = annotation;
564 smp_wmb();
565 call->rxtx_buffer[ix] = skb;
566 if (after(seq, call->rx_top)) {
567 smp_store_release(&call->rx_top, seq);
568 } else if (before(seq, call->rx_top)) {
569 /* Send an immediate ACK if we fill in a hole */
570 if (!ack) {
571 ack = RXRPC_ACK_DELAY;
572 ack_serial = serial;
573 }
574 immediate_ack = true;
575 }
595 576
596 if (queued && flags & RXRPC_LAST_PACKET && !ack) { 577 if (terminal) {
597 ack = RXRPC_ACK_DELAY; 578 /* From this point on, we're not allowed to touch the
598 ack_serial = serial; 579 * packet any longer as its ref now belongs to the Rx
580 * ring.
581 */
582 skb = NULL;
583 }
584
585 if (last) {
586 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
587 if (!ack) {
588 ack = RXRPC_ACK_DELAY;
589 ack_serial = serial;
590 }
591 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
592 } else {
593 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
594 }
595
596 if (after_eq(seq, call->rx_expect_next)) {
597 if (after(seq, call->rx_expect_next)) {
598 _net("OOS %u > %u", seq, call->rx_expect_next);
599 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
600 ack_serial = serial;
601 }
602 call->rx_expect_next = seq + 1;
603 }
599 } 604 }
600 605
601ack: 606ack:
602 if (ack) 607 if (ack)
603 rxrpc_propose_ACK(call, ack, skew, ack_serial, 608 rxrpc_propose_ACK(call, ack, ack_serial,
604 immediate_ack, true, 609 immediate_ack, true,
605 rxrpc_propose_ack_input_data); 610 rxrpc_propose_ack_input_data);
606 else 611 else
607 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, 612 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
608 false, true, 613 false, true,
609 rxrpc_propose_ack_input_data); 614 rxrpc_propose_ack_input_data);
610 615
611 if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { 616 if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) {
612 trace_rxrpc_notify_socket(call->debug_id, serial); 617 trace_rxrpc_notify_socket(call->debug_id, serial);
613 rxrpc_notify_socket(call); 618 rxrpc_notify_socket(call);
614 } 619 }
615 620
616unlock: 621unlock:
617 spin_unlock(&call->input_lock); 622 spin_unlock(&call->input_lock);
623 rxrpc_free_skb(skb, rxrpc_skb_freed);
618 _leave(" [queued]"); 624 _leave(" [queued]");
619} 625}
620 626
@@ -822,8 +828,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
822 * soft-ACK means that the packet may be discarded and retransmission 828 * soft-ACK means that the packet may be discarded and retransmission
823 * requested. A phase is complete when all packets are hard-ACK'd. 829 * requested. A phase is complete when all packets are hard-ACK'd.
824 */ 830 */
825static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, 831static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
826 u16 skew)
827{ 832{
828 struct rxrpc_ack_summary summary = { 0 }; 833 struct rxrpc_ack_summary summary = { 0 };
829 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 834 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -867,11 +872,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
867 if (buf.ack.reason == RXRPC_ACK_PING) { 872 if (buf.ack.reason == RXRPC_ACK_PING) {
868 _proto("Rx ACK %%%u PING Request", sp->hdr.serial); 873 _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
869 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, 874 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
870 skew, sp->hdr.serial, true, true, 875 sp->hdr.serial, true, true,
871 rxrpc_propose_ack_respond_to_ping); 876 rxrpc_propose_ack_respond_to_ping);
872 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 877 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
873 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, 878 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
874 skew, sp->hdr.serial, true, true, 879 sp->hdr.serial, true, true,
875 rxrpc_propose_ack_respond_to_ack); 880 rxrpc_propose_ack_respond_to_ack);
876 } 881 }
877 882
@@ -948,7 +953,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
948 RXRPC_TX_ANNO_LAST && 953 RXRPC_TX_ANNO_LAST &&
949 summary.nr_acks == call->tx_top - hard_ack && 954 summary.nr_acks == call->tx_top - hard_ack &&
950 rxrpc_is_client_call(call)) 955 rxrpc_is_client_call(call))
951 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 956 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
952 false, true, 957 false, true,
953 rxrpc_propose_ack_ping_for_lost_reply); 958 rxrpc_propose_ack_ping_for_lost_reply);
954 959
@@ -1004,7 +1009,7 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
1004 * Process an incoming call packet. 1009 * Process an incoming call packet.
1005 */ 1010 */
1006static void rxrpc_input_call_packet(struct rxrpc_call *call, 1011static void rxrpc_input_call_packet(struct rxrpc_call *call,
1007 struct sk_buff *skb, u16 skew) 1012 struct sk_buff *skb)
1008{ 1013{
1009 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1014 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1010 unsigned long timo; 1015 unsigned long timo;
@@ -1023,11 +1028,11 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1023 1028
1024 switch (sp->hdr.type) { 1029 switch (sp->hdr.type) {
1025 case RXRPC_PACKET_TYPE_DATA: 1030 case RXRPC_PACKET_TYPE_DATA:
1026 rxrpc_input_data(call, skb, skew); 1031 rxrpc_input_data(call, skb);
1027 break; 1032 goto no_free;
1028 1033
1029 case RXRPC_PACKET_TYPE_ACK: 1034 case RXRPC_PACKET_TYPE_ACK:
1030 rxrpc_input_ack(call, skb, skew); 1035 rxrpc_input_ack(call, skb);
1031 break; 1036 break;
1032 1037
1033 case RXRPC_PACKET_TYPE_BUSY: 1038 case RXRPC_PACKET_TYPE_BUSY:
@@ -1051,6 +1056,8 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1051 break; 1056 break;
1052 } 1057 }
1053 1058
1059 rxrpc_free_skb(skb, rxrpc_skb_freed);
1060no_free:
1054 _leave(""); 1061 _leave("");
1055} 1062}
1056 1063
@@ -1108,8 +1115,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
1108{ 1115{
1109 _enter("%p,%p", local, skb); 1116 _enter("%p,%p", local, skb);
1110 1117
1111 skb_queue_tail(&local->event_queue, skb); 1118 if (rxrpc_get_local_maybe(local)) {
1112 rxrpc_queue_local(local); 1119 skb_queue_tail(&local->event_queue, skb);
1120 rxrpc_queue_local(local);
1121 } else {
1122 rxrpc_free_skb(skb, rxrpc_skb_freed);
1123 }
1113} 1124}
1114 1125
1115/* 1126/*
@@ -1119,8 +1130,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
1119{ 1130{
1120 CHECK_SLAB_OKAY(&local->usage); 1131 CHECK_SLAB_OKAY(&local->usage);
1121 1132
1122 skb_queue_tail(&local->reject_queue, skb); 1133 if (rxrpc_get_local_maybe(local)) {
1123 rxrpc_queue_local(local); 1134 skb_queue_tail(&local->reject_queue, skb);
1135 rxrpc_queue_local(local);
1136 } else {
1137 rxrpc_free_skb(skb, rxrpc_skb_freed);
1138 }
1124} 1139}
1125 1140
1126/* 1141/*
@@ -1173,7 +1188,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1173 struct rxrpc_peer *peer = NULL; 1188 struct rxrpc_peer *peer = NULL;
1174 struct rxrpc_sock *rx = NULL; 1189 struct rxrpc_sock *rx = NULL;
1175 unsigned int channel; 1190 unsigned int channel;
1176 int skew = 0;
1177 1191
1178 _enter("%p", udp_sk); 1192 _enter("%p", udp_sk);
1179 1193
@@ -1184,7 +1198,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1184 if (skb->tstamp == 0) 1198 if (skb->tstamp == 0)
1185 skb->tstamp = ktime_get_real(); 1199 skb->tstamp = ktime_get_real();
1186 1200
1187 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1201 rxrpc_new_skb(skb, rxrpc_skb_received);
1188 1202
1189 skb_pull(skb, sizeof(struct udphdr)); 1203 skb_pull(skb, sizeof(struct udphdr));
1190 1204
@@ -1201,7 +1215,7 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1201 static int lose; 1215 static int lose;
1202 if ((lose++ & 7) == 7) { 1216 if ((lose++ & 7) == 7) {
1203 trace_rxrpc_rx_lose(sp); 1217 trace_rxrpc_rx_lose(sp);
1204 rxrpc_free_skb(skb, rxrpc_skb_rx_lost); 1218 rxrpc_free_skb(skb, rxrpc_skb_lost);
1205 return 0; 1219 return 0;
1206 } 1220 }
1207 } 1221 }
@@ -1233,9 +1247,26 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1233 if (sp->hdr.callNumber == 0 || 1247 if (sp->hdr.callNumber == 0 ||
1234 sp->hdr.seq == 0) 1248 sp->hdr.seq == 0)
1235 goto bad_message; 1249 goto bad_message;
1236 if (sp->hdr.flags & RXRPC_JUMBO_PACKET && 1250 if (!rxrpc_validate_data(skb))
1237 !rxrpc_validate_jumbo(skb))
1238 goto bad_message; 1251 goto bad_message;
1252
1253 /* Unshare the packet so that it can be modified for in-place
1254 * decryption.
1255 */
1256 if (sp->hdr.securityIndex != 0) {
1257 struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC);
1258 if (!nskb) {
1259 rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem);
1260 goto out;
1261 }
1262
1263 if (nskb != skb) {
1264 rxrpc_eaten_skb(skb, rxrpc_skb_received);
1265 rxrpc_new_skb(skb, rxrpc_skb_unshared);
1266 skb = nskb;
1267 sp = rxrpc_skb(skb);
1268 }
1269 }
1239 break; 1270 break;
1240 1271
1241 case RXRPC_PACKET_TYPE_CHALLENGE: 1272 case RXRPC_PACKET_TYPE_CHALLENGE:
@@ -1301,15 +1332,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1301 goto out; 1332 goto out;
1302 } 1333 }
1303 1334
1304 /* Note the serial number skew here */ 1335 if ((int)sp->hdr.serial - (int)conn->hi_serial > 0)
1305 skew = (int)sp->hdr.serial - (int)conn->hi_serial; 1336 conn->hi_serial = sp->hdr.serial;
1306 if (skew >= 0) {
1307 if (skew > 0)
1308 conn->hi_serial = sp->hdr.serial;
1309 } else {
1310 skew = -skew;
1311 skew = min(skew, 65535);
1312 }
1313 1337
1314 /* Call-bound packets are routed by connection channel. */ 1338 /* Call-bound packets are routed by connection channel. */
1315 channel = sp->hdr.cid & RXRPC_CHANNELMASK; 1339 channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -1372,15 +1396,18 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1372 call = rxrpc_new_incoming_call(local, rx, skb); 1396 call = rxrpc_new_incoming_call(local, rx, skb);
1373 if (!call) 1397 if (!call)
1374 goto reject_packet; 1398 goto reject_packet;
1375 rxrpc_send_ping(call, skb, skew); 1399 rxrpc_send_ping(call, skb);
1376 mutex_unlock(&call->user_mutex); 1400 mutex_unlock(&call->user_mutex);
1377 } 1401 }
1378 1402
1379 rxrpc_input_call_packet(call, skb, skew); 1403 /* Process a call packet; this either discards or passes on the ref
1380 goto discard; 1404 * elsewhere.
1405 */
1406 rxrpc_input_call_packet(call, skb);
1407 goto out;
1381 1408
1382discard: 1409discard:
1383 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1410 rxrpc_free_skb(skb, rxrpc_skb_freed);
1384out: 1411out:
1385 trace_rxrpc_rx_done(0, 0); 1412 trace_rxrpc_rx_done(0, 0);
1386 return 0; 1413 return 0;
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index e93a78f7c05e..3ce6d628cd75 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -90,7 +90,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
90 if (skb) { 90 if (skb) {
91 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 91 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
92 92
93 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 93 rxrpc_see_skb(skb, rxrpc_skb_seen);
94 _debug("{%d},{%u}", local->debug_id, sp->hdr.type); 94 _debug("{%d},{%u}", local->debug_id, sp->hdr.type);
95 95
96 switch (sp->hdr.type) { 96 switch (sp->hdr.type) {
@@ -108,7 +108,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
108 break; 108 break;
109 } 109 }
110 110
111 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 111 rxrpc_free_skb(skb, rxrpc_skb_freed);
112 } 112 }
113 113
114 _leave(""); 114 _leave("");
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index b1c71bad510b..36587260cabd 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); 79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
80 if (local) { 80 if (local) {
81 atomic_set(&local->usage, 1); 81 atomic_set(&local->usage, 1);
82 atomic_set(&local->active_users, 1);
82 local->rxnet = rxnet; 83 local->rxnet = rxnet;
83 INIT_LIST_HEAD(&local->link); 84 INIT_LIST_HEAD(&local->link);
84 INIT_WORK(&local->processor, rxrpc_local_processor); 85 INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
92 local->debug_id = atomic_inc_return(&rxrpc_debug_id); 93 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
93 memcpy(&local->srx, srx, sizeof(*srx)); 94 memcpy(&local->srx, srx, sizeof(*srx));
94 local->srx.srx_service = 0; 95 local->srx.srx_service = 0;
95 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); 96 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
96 } 97 }
97 98
98 _leave(" = %p", local); 99 _leave(" = %p", local);
@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
266 * bind the transport socket may still fail if we're attempting 267 * bind the transport socket may still fail if we're attempting
267 * to use a local address that the dying object is still using. 268 * to use a local address that the dying object is still using.
268 */ 269 */
269 if (!rxrpc_get_local_maybe(local)) { 270 if (!rxrpc_use_local(local))
270 cursor = cursor->next;
271 list_del_init(&local->link);
272 break; 271 break;
273 }
274 272
275 age = "old"; 273 age = "old";
276 goto found; 274 goto found;
@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
284 if (ret < 0) 282 if (ret < 0)
285 goto sock_error; 283 goto sock_error;
286 284
287 list_add_tail(&local->link, cursor); 285 if (cursor != &rxnet->local_endpoints)
286 list_replace_init(cursor, &local->link);
287 else
288 list_add_tail(&local->link, cursor);
288 age = "new"; 289 age = "new";
289 290
290found: 291found:
@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
320 int n; 321 int n;
321 322
322 n = atomic_inc_return(&local->usage); 323 n = atomic_inc_return(&local->usage);
323 trace_rxrpc_local(local, rxrpc_local_got, n, here); 324 trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
324 return local; 325 return local;
325} 326}
326 327
@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
334 if (local) { 335 if (local) {
335 int n = atomic_fetch_add_unless(&local->usage, 1, 0); 336 int n = atomic_fetch_add_unless(&local->usage, 1, 0);
336 if (n > 0) 337 if (n > 0)
337 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); 338 trace_rxrpc_local(local->debug_id, rxrpc_local_got,
339 n + 1, here);
338 else 340 else
339 local = NULL; 341 local = NULL;
340 } 342 }
@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
342} 344}
343 345
344/* 346/*
345 * Queue a local endpoint. 347 * Queue a local endpoint and pass the caller's reference to the work item.
346 */ 348 */
347void rxrpc_queue_local(struct rxrpc_local *local) 349void rxrpc_queue_local(struct rxrpc_local *local)
348{ 350{
349 const void *here = __builtin_return_address(0); 351 const void *here = __builtin_return_address(0);
352 unsigned int debug_id = local->debug_id;
353 int n = atomic_read(&local->usage);
350 354
351 if (rxrpc_queue_work(&local->processor)) 355 if (rxrpc_queue_work(&local->processor))
352 trace_rxrpc_local(local, rxrpc_local_queued, 356 trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
353 atomic_read(&local->usage), here); 357 else
354} 358 rxrpc_put_local(local);
355
356/*
357 * A local endpoint reached its end of life.
358 */
359static void __rxrpc_put_local(struct rxrpc_local *local)
360{
361 _enter("%d", local->debug_id);
362 rxrpc_queue_work(&local->processor);
363} 359}
364 360
365/* 361/*
@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
372 368
373 if (local) { 369 if (local) {
374 n = atomic_dec_return(&local->usage); 370 n = atomic_dec_return(&local->usage);
375 trace_rxrpc_local(local, rxrpc_local_put, n, here); 371 trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
376 372
377 if (n == 0) 373 if (n == 0)
378 __rxrpc_put_local(local); 374 call_rcu(&local->rcu, rxrpc_local_rcu);
375 }
376}
377
378/*
379 * Start using a local endpoint.
380 */
381struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
382{
383 unsigned int au;
384
385 local = rxrpc_get_local_maybe(local);
386 if (!local)
387 return NULL;
388
389 au = atomic_fetch_add_unless(&local->active_users, 1, 0);
390 if (au == 0) {
391 rxrpc_put_local(local);
392 return NULL;
393 }
394
395 return local;
396}
397
398/*
399 * Cease using a local endpoint. Once the number of active users reaches 0, we
400 * start the closure of the transport in the work processor.
401 */
402void rxrpc_unuse_local(struct rxrpc_local *local)
403{
404 unsigned int au;
405
406 if (local) {
407 au = atomic_dec_return(&local->active_users);
408 if (au == 0)
409 rxrpc_queue_local(local);
410 else
411 rxrpc_put_local(local);
379 } 412 }
380} 413}
381 414
@@ -393,21 +426,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
393 426
394 _enter("%d", local->debug_id); 427 _enter("%d", local->debug_id);
395 428
396 /* We can get a race between an incoming call packet queueing the
397 * processor again and the work processor starting the destruction
398 * process which will shut down the UDP socket.
399 */
400 if (local->dead) {
401 _leave(" [already dead]");
402 return;
403 }
404 local->dead = true; 429 local->dead = true;
405 430
406 mutex_lock(&rxnet->local_mutex); 431 mutex_lock(&rxnet->local_mutex);
407 list_del_init(&local->link); 432 list_del_init(&local->link);
408 mutex_unlock(&rxnet->local_mutex); 433 mutex_unlock(&rxnet->local_mutex);
409 434
410 ASSERT(RB_EMPTY_ROOT(&local->client_conns)); 435 rxrpc_clean_up_local_conns(local);
436 rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
411 ASSERT(!local->service); 437 ASSERT(!local->service);
412 438
413 if (socket) { 439 if (socket) {
@@ -422,13 +448,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
422 */ 448 */
423 rxrpc_purge_queue(&local->reject_queue); 449 rxrpc_purge_queue(&local->reject_queue);
424 rxrpc_purge_queue(&local->event_queue); 450 rxrpc_purge_queue(&local->event_queue);
425
426 _debug("rcu local %d", local->debug_id);
427 call_rcu(&local->rcu, rxrpc_local_rcu);
428} 451}
429 452
430/* 453/*
431 * Process events on an endpoint 454 * Process events on an endpoint. The work item carries a ref which
455 * we must release.
432 */ 456 */
433static void rxrpc_local_processor(struct work_struct *work) 457static void rxrpc_local_processor(struct work_struct *work)
434{ 458{
@@ -436,13 +460,15 @@ static void rxrpc_local_processor(struct work_struct *work)
436 container_of(work, struct rxrpc_local, processor); 460 container_of(work, struct rxrpc_local, processor);
437 bool again; 461 bool again;
438 462
439 trace_rxrpc_local(local, rxrpc_local_processing, 463 trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
440 atomic_read(&local->usage), NULL); 464 atomic_read(&local->usage), NULL);
441 465
442 do { 466 do {
443 again = false; 467 again = false;
444 if (atomic_read(&local->usage) == 0) 468 if (atomic_read(&local->active_users) == 0) {
445 return rxrpc_local_destroyer(local); 469 rxrpc_local_destroyer(local);
470 break;
471 }
446 472
447 if (!skb_queue_empty(&local->reject_queue)) { 473 if (!skb_queue_empty(&local->reject_queue)) {
448 rxrpc_reject_packets(local); 474 rxrpc_reject_packets(local);
@@ -454,6 +480,8 @@ static void rxrpc_local_processor(struct work_struct *work)
454 again = true; 480 again = true;
455 } 481 }
456 } while (again); 482 } while (again);
483
484 rxrpc_put_local(local);
457} 485}
458 486
459/* 487/*
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 948e3fe249ec..935bb60fff56 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -87,7 +87,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
87 *_top = top; 87 *_top = top;
88 88
89 pkt->ack.bufferSpace = htons(8); 89 pkt->ack.bufferSpace = htons(8);
90 pkt->ack.maxSkew = htons(call->ackr_skew); 90 pkt->ack.maxSkew = htons(0);
91 pkt->ack.firstPacket = htonl(hard_ack + 1); 91 pkt->ack.firstPacket = htonl(hard_ack + 1);
92 pkt->ack.previousPacket = htonl(call->ackr_prev_seq); 92 pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
93 pkt->ack.serial = htonl(serial); 93 pkt->ack.serial = htonl(serial);
@@ -228,7 +228,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
228 if (ping) 228 if (ping)
229 clear_bit(RXRPC_CALL_PINGING, &call->flags); 229 clear_bit(RXRPC_CALL_PINGING, &call->flags);
230 rxrpc_propose_ACK(call, pkt->ack.reason, 230 rxrpc_propose_ACK(call, pkt->ack.reason,
231 ntohs(pkt->ack.maxSkew),
232 ntohl(pkt->ack.serial), 231 ntohl(pkt->ack.serial),
233 false, true, 232 false, true,
234 rxrpc_propose_ack_retry_tx); 233 rxrpc_propose_ack_retry_tx);
@@ -566,7 +565,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
566 memset(&whdr, 0, sizeof(whdr)); 565 memset(&whdr, 0, sizeof(whdr));
567 566
568 while ((skb = skb_dequeue(&local->reject_queue))) { 567 while ((skb = skb_dequeue(&local->reject_queue))) {
569 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 568 rxrpc_see_skb(skb, rxrpc_skb_seen);
570 sp = rxrpc_skb(skb); 569 sp = rxrpc_skb(skb);
571 570
572 switch (skb->mark) { 571 switch (skb->mark) {
@@ -582,7 +581,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
582 ioc = 2; 581 ioc = 2;
583 break; 582 break;
584 default: 583 default:
585 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 584 rxrpc_free_skb(skb, rxrpc_skb_freed);
586 continue; 585 continue;
587 } 586 }
588 587
@@ -607,7 +606,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
607 rxrpc_tx_point_reject); 606 rxrpc_tx_point_reject);
608 } 607 }
609 608
610 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 609 rxrpc_free_skb(skb, rxrpc_skb_freed);
611 } 610 }
612 611
613 _leave(""); 612 _leave("");
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 7666ec72d37e..c97ebdc043e4 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -163,11 +163,11 @@ void rxrpc_error_report(struct sock *sk)
163 _leave("UDP socket errqueue empty"); 163 _leave("UDP socket errqueue empty");
164 return; 164 return;
165 } 165 }
166 rxrpc_new_skb(skb, rxrpc_skb_rx_received); 166 rxrpc_new_skb(skb, rxrpc_skb_received);
167 serr = SKB_EXT_ERR(skb); 167 serr = SKB_EXT_ERR(skb);
168 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { 168 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
169 _leave("UDP empty message"); 169 _leave("UDP empty message");
170 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 170 rxrpc_free_skb(skb, rxrpc_skb_freed);
171 return; 171 return;
172 } 172 }
173 173
@@ -177,7 +177,7 @@ void rxrpc_error_report(struct sock *sk)
177 peer = NULL; 177 peer = NULL;
178 if (!peer) { 178 if (!peer) {
179 rcu_read_unlock(); 179 rcu_read_unlock();
180 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 180 rxrpc_free_skb(skb, rxrpc_skb_freed);
181 _leave(" [no peer]"); 181 _leave(" [no peer]");
182 return; 182 return;
183 } 183 }
@@ -189,7 +189,7 @@ void rxrpc_error_report(struct sock *sk)
189 serr->ee.ee_code == ICMP_FRAG_NEEDED)) { 189 serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
190 rxrpc_adjust_mtu(peer, serr); 190 rxrpc_adjust_mtu(peer, serr);
191 rcu_read_unlock(); 191 rcu_read_unlock();
192 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 192 rxrpc_free_skb(skb, rxrpc_skb_freed);
193 rxrpc_put_peer(peer); 193 rxrpc_put_peer(peer);
194 _leave(" [MTU update]"); 194 _leave(" [MTU update]");
195 return; 195 return;
@@ -197,7 +197,7 @@ void rxrpc_error_report(struct sock *sk)
197 197
198 rxrpc_store_error(peer, serr); 198 rxrpc_store_error(peer, serr);
199 rcu_read_unlock(); 199 rcu_read_unlock();
200 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 200 rxrpc_free_skb(skb, rxrpc_skb_freed);
201 rxrpc_put_peer(peer); 201 rxrpc_put_peer(peer);
202 202
203 _leave(""); 203 _leave("");
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index 99ce322d7caa..49bb972539aa 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -89,6 +89,15 @@ struct rxrpc_jumbo_header {
89#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ 89#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */
90#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) 90#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header))
91 91
92/*
93 * The maximum number of subpackets that can possibly fit in a UDP packet is:
94 *
95 * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1
96 * = ((65535 - 28 - 28) / 1416) + 1
97 * = 46 non-terminal packets and 1 terminal packet.
98 */
99#define RXRPC_MAX_NR_JUMBO 47
100
92/*****************************************************************************/ 101/*****************************************************************************/
93/* 102/*
94 * on-the-wire Rx ACK packet data payload 103 * on-the-wire Rx ACK packet data payload
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 5abf46cf9e6c..3b0becb12041 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -141,7 +141,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); 141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
142 142
143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { 143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true, 144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
145 rxrpc_propose_ack_terminal_ack); 145 rxrpc_propose_ack_terminal_ack);
146 //rxrpc_send_ack_packet(call, false, NULL); 146 //rxrpc_send_ack_packet(call, false, NULL);
147 } 147 }
@@ -159,7 +159,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; 160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
161 write_unlock_bh(&call->state_lock); 161 write_unlock_bh(&call->state_lock);
162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, 162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
163 rxrpc_propose_ack_processing_op); 163 rxrpc_propose_ack_processing_op);
164 break; 164 break;
165 default: 165 default:
@@ -177,7 +177,8 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
177 struct sk_buff *skb; 177 struct sk_buff *skb;
178 rxrpc_serial_t serial; 178 rxrpc_serial_t serial;
179 rxrpc_seq_t hard_ack, top; 179 rxrpc_seq_t hard_ack, top;
180 u8 flags; 180 bool last = false;
181 u8 subpacket;
181 int ix; 182 int ix;
182 183
183 _enter("%d", call->debug_id); 184 _enter("%d", call->debug_id);
@@ -189,30 +190,32 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
189 hard_ack++; 190 hard_ack++;
190 ix = hard_ack & RXRPC_RXTX_BUFF_MASK; 191 ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
191 skb = call->rxtx_buffer[ix]; 192 skb = call->rxtx_buffer[ix];
192 rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); 193 rxrpc_see_skb(skb, rxrpc_skb_rotated);
193 sp = rxrpc_skb(skb); 194 sp = rxrpc_skb(skb);
194 flags = sp->hdr.flags; 195
195 serial = sp->hdr.serial; 196 subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
196 if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) 197 serial = sp->hdr.serial + subpacket;
197 serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; 198
199 if (subpacket == sp->nr_subpackets - 1 &&
200 sp->rx_flags & RXRPC_SKB_INCL_LAST)
201 last = true;
198 202
199 call->rxtx_buffer[ix] = NULL; 203 call->rxtx_buffer[ix] = NULL;
200 call->rxtx_annotations[ix] = 0; 204 call->rxtx_annotations[ix] = 0;
201 /* Barrier against rxrpc_input_data(). */ 205 /* Barrier against rxrpc_input_data(). */
202 smp_store_release(&call->rx_hard_ack, hard_ack); 206 smp_store_release(&call->rx_hard_ack, hard_ack);
203 207
204 rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 208 rxrpc_free_skb(skb, rxrpc_skb_freed);
205 209
206 _debug("%u,%u,%02x", hard_ack, top, flags);
207 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); 210 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
208 if (flags & RXRPC_LAST_PACKET) { 211 if (last) {
209 rxrpc_end_rx_phase(call, serial); 212 rxrpc_end_rx_phase(call, serial);
210 } else { 213 } else {
211 /* Check to see if there's an ACK that needs sending. */ 214 /* Check to see if there's an ACK that needs sending. */
212 if (after_eq(hard_ack, call->ackr_consumed + 2) || 215 if (after_eq(hard_ack, call->ackr_consumed + 2) ||
213 after_eq(top, call->ackr_seen + 2) || 216 after_eq(top, call->ackr_seen + 2) ||
214 (hard_ack == top && after(hard_ack, call->ackr_consumed))) 217 (hard_ack == top && after(hard_ack, call->ackr_consumed)))
215 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, 218 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
216 true, true, 219 true, true,
217 rxrpc_propose_ack_rotate_rx); 220 rxrpc_propose_ack_rotate_rx);
218 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) 221 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
@@ -233,18 +236,19 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
233 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 236 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
234 rxrpc_seq_t seq = sp->hdr.seq; 237 rxrpc_seq_t seq = sp->hdr.seq;
235 u16 cksum = sp->hdr.cksum; 238 u16 cksum = sp->hdr.cksum;
239 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
236 240
237 _enter(""); 241 _enter("");
238 242
239 /* For all but the head jumbo subpacket, the security checksum is in a 243 /* For all but the head jumbo subpacket, the security checksum is in a
240 * jumbo header immediately prior to the data. 244 * jumbo header immediately prior to the data.
241 */ 245 */
242 if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) { 246 if (subpacket > 0) {
243 __be16 tmp; 247 __be16 tmp;
244 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) 248 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0)
245 BUG(); 249 BUG();
246 cksum = ntohs(tmp); 250 cksum = ntohs(tmp);
247 seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; 251 seq += subpacket;
248 } 252 }
249 253
250 return call->conn->security->verify_packet(call, skb, offset, len, 254 return call->conn->security->verify_packet(call, skb, offset, len,
@@ -265,19 +269,18 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
265 u8 *_annotation, 269 u8 *_annotation,
266 unsigned int *_offset, unsigned int *_len) 270 unsigned int *_offset, unsigned int *_len)
267{ 271{
272 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
268 unsigned int offset = sizeof(struct rxrpc_wire_header); 273 unsigned int offset = sizeof(struct rxrpc_wire_header);
269 unsigned int len; 274 unsigned int len;
270 int ret; 275 int ret;
271 u8 annotation = *_annotation; 276 u8 annotation = *_annotation;
277 u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
272 278
273 /* Locate the subpacket */ 279 /* Locate the subpacket */
280 offset += subpacket * RXRPC_JUMBO_SUBPKTLEN;
274 len = skb->len - offset; 281 len = skb->len - offset;
275 if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) { 282 if (subpacket < sp->nr_subpackets - 1)
276 offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * 283 len = RXRPC_JUMBO_DATALEN;
277 RXRPC_JUMBO_SUBPKTLEN);
278 len = (annotation & RXRPC_RX_ANNO_JLAST) ?
279 skb->len - offset : RXRPC_JUMBO_SUBPKTLEN;
280 }
281 284
282 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { 285 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
283 ret = rxrpc_verify_packet(call, skb, annotation, offset, len); 286 ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -303,6 +306,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
303{ 306{
304 struct rxrpc_skb_priv *sp; 307 struct rxrpc_skb_priv *sp;
305 struct sk_buff *skb; 308 struct sk_buff *skb;
309 rxrpc_serial_t serial;
306 rxrpc_seq_t hard_ack, top, seq; 310 rxrpc_seq_t hard_ack, top, seq;
307 size_t remain; 311 size_t remain;
308 bool last; 312 bool last;
@@ -336,12 +340,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
336 break; 340 break;
337 } 341 }
338 smp_rmb(); 342 smp_rmb();
339 rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 343 rxrpc_see_skb(skb, rxrpc_skb_seen);
340 sp = rxrpc_skb(skb); 344 sp = rxrpc_skb(skb);
341 345
342 if (!(flags & MSG_PEEK)) 346 if (!(flags & MSG_PEEK)) {
347 serial = sp->hdr.serial;
348 serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
343 trace_rxrpc_receive(call, rxrpc_receive_front, 349 trace_rxrpc_receive(call, rxrpc_receive_front,
344 sp->hdr.serial, seq); 350 serial, seq);
351 }
345 352
346 if (msg) 353 if (msg)
347 sock_recv_timestamp(msg, sock->sk, skb); 354 sock_recv_timestamp(msg, sock->sk, skb);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index ae8cd8926456..c60c520fde7c 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -187,10 +187,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
187 struct rxrpc_skb_priv *sp; 187 struct rxrpc_skb_priv *sp;
188 struct rxrpc_crypt iv; 188 struct rxrpc_crypt iv;
189 struct scatterlist sg[16]; 189 struct scatterlist sg[16];
190 struct sk_buff *trailer;
191 unsigned int len; 190 unsigned int len;
192 u16 check; 191 u16 check;
193 int nsg;
194 int err; 192 int err;
195 193
196 sp = rxrpc_skb(skb); 194 sp = rxrpc_skb(skb);
@@ -214,15 +212,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
214 crypto_skcipher_encrypt(req); 212 crypto_skcipher_encrypt(req);
215 213
216 /* we want to encrypt the skbuff in-place */ 214 /* we want to encrypt the skbuff in-place */
217 nsg = skb_cow_data(skb, 0, &trailer); 215 err = -EMSGSIZE;
218 err = -ENOMEM; 216 if (skb_shinfo(skb)->nr_frags > 16)
219 if (nsg < 0 || nsg > 16)
220 goto out; 217 goto out;
221 218
222 len = data_size + call->conn->size_align - 1; 219 len = data_size + call->conn->size_align - 1;
223 len &= ~(call->conn->size_align - 1); 220 len &= ~(call->conn->size_align - 1);
224 221
225 sg_init_table(sg, nsg); 222 sg_init_table(sg, ARRAY_SIZE(sg));
226 err = skb_to_sgvec(skb, sg, 0, len); 223 err = skb_to_sgvec(skb, sg, 0, len);
227 if (unlikely(err < 0)) 224 if (unlikely(err < 0))
228 goto out; 225 goto out;
@@ -319,11 +316,10 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
319 struct rxkad_level1_hdr sechdr; 316 struct rxkad_level1_hdr sechdr;
320 struct rxrpc_crypt iv; 317 struct rxrpc_crypt iv;
321 struct scatterlist sg[16]; 318 struct scatterlist sg[16];
322 struct sk_buff *trailer;
323 bool aborted; 319 bool aborted;
324 u32 data_size, buf; 320 u32 data_size, buf;
325 u16 check; 321 u16 check;
326 int nsg, ret; 322 int ret;
327 323
328 _enter(""); 324 _enter("");
329 325
@@ -336,11 +332,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
336 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 332 /* Decrypt the skbuff in-place. TODO: We really want to decrypt
337 * directly into the target buffer. 333 * directly into the target buffer.
338 */ 334 */
339 nsg = skb_cow_data(skb, 0, &trailer); 335 sg_init_table(sg, ARRAY_SIZE(sg));
340 if (nsg < 0 || nsg > 16)
341 goto nomem;
342
343 sg_init_table(sg, nsg);
344 ret = skb_to_sgvec(skb, sg, offset, 8); 336 ret = skb_to_sgvec(skb, sg, offset, 8);
345 if (unlikely(ret < 0)) 337 if (unlikely(ret < 0))
346 return ret; 338 return ret;
@@ -388,10 +380,6 @@ protocol_error:
388 if (aborted) 380 if (aborted)
389 rxrpc_send_abort_packet(call); 381 rxrpc_send_abort_packet(call);
390 return -EPROTO; 382 return -EPROTO;
391
392nomem:
393 _leave(" = -ENOMEM");
394 return -ENOMEM;
395} 383}
396 384
397/* 385/*
@@ -406,7 +394,6 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
406 struct rxkad_level2_hdr sechdr; 394 struct rxkad_level2_hdr sechdr;
407 struct rxrpc_crypt iv; 395 struct rxrpc_crypt iv;
408 struct scatterlist _sg[4], *sg; 396 struct scatterlist _sg[4], *sg;
409 struct sk_buff *trailer;
410 bool aborted; 397 bool aborted;
411 u32 data_size, buf; 398 u32 data_size, buf;
412 u16 check; 399 u16 check;
@@ -423,12 +410,11 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
423 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 410 /* Decrypt the skbuff in-place. TODO: We really want to decrypt
424 * directly into the target buffer. 411 * directly into the target buffer.
425 */ 412 */
426 nsg = skb_cow_data(skb, 0, &trailer);
427 if (nsg < 0)
428 goto nomem;
429
430 sg = _sg; 413 sg = _sg;
431 if (unlikely(nsg > 4)) { 414 nsg = skb_shinfo(skb)->nr_frags;
415 if (nsg <= 4) {
416 nsg = 4;
417 } else {
432 sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); 418 sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
433 if (!sg) 419 if (!sg)
434 goto nomem; 420 goto nomem;
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index bae14438f869..6a1547b270fe 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -176,7 +176,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
176 skb->tstamp = ktime_get_real(); 176 skb->tstamp = ktime_get_real();
177 177
178 ix = seq & RXRPC_RXTX_BUFF_MASK; 178 ix = seq & RXRPC_RXTX_BUFF_MASK;
179 rxrpc_get_skb(skb, rxrpc_skb_tx_got); 179 rxrpc_get_skb(skb, rxrpc_skb_got);
180 call->rxtx_annotations[ix] = annotation; 180 call->rxtx_annotations[ix] = annotation;
181 smp_wmb(); 181 smp_wmb();
182 call->rxtx_buffer[ix] = skb; 182 call->rxtx_buffer[ix] = skb;
@@ -248,7 +248,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
248 } 248 }
249 249
250out: 250out:
251 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 251 rxrpc_free_skb(skb, rxrpc_skb_freed);
252 _leave(" = %d", ret); 252 _leave(" = %d", ret);
253 return ret; 253 return ret;
254} 254}
@@ -289,7 +289,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
289 289
290 skb = call->tx_pending; 290 skb = call->tx_pending;
291 call->tx_pending = NULL; 291 call->tx_pending = NULL;
292 rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 292 rxrpc_see_skb(skb, rxrpc_skb_seen);
293 293
294 copied = 0; 294 copied = 0;
295 do { 295 do {
@@ -336,7 +336,9 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
336 if (!skb) 336 if (!skb)
337 goto maybe_error; 337 goto maybe_error;
338 338
339 rxrpc_new_skb(skb, rxrpc_skb_tx_new); 339 sp = rxrpc_skb(skb);
340 sp->rx_flags |= RXRPC_SKB_TX_BUFFER;
341 rxrpc_new_skb(skb, rxrpc_skb_new);
340 342
341 _debug("ALLOC SEND %p", skb); 343 _debug("ALLOC SEND %p", skb);
342 344
@@ -346,7 +348,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
346 skb_reserve(skb, call->conn->security_size); 348 skb_reserve(skb, call->conn->security_size);
347 skb->len += call->conn->security_size; 349 skb->len += call->conn->security_size;
348 350
349 sp = rxrpc_skb(skb);
350 sp->remain = chunk; 351 sp->remain = chunk;
351 if (sp->remain > skb_tailroom(skb)) 352 if (sp->remain > skb_tailroom(skb))
352 sp->remain = skb_tailroom(skb); 353 sp->remain = skb_tailroom(skb);
@@ -439,7 +440,7 @@ out:
439 return ret; 440 return ret;
440 441
441call_terminated: 442call_terminated:
442 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 443 rxrpc_free_skb(skb, rxrpc_skb_freed);
443 _leave(" = %d", call->error); 444 _leave(" = %d", call->error);
444 return call->error; 445 return call->error;
445 446
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c
index 9ad5045b7c2f..0348d2bf6f7d 100644
--- a/net/rxrpc/skbuff.c
+++ b/net/rxrpc/skbuff.c
@@ -14,7 +14,8 @@
14#include <net/af_rxrpc.h> 14#include <net/af_rxrpc.h>
15#include "ar-internal.h" 15#include "ar-internal.h"
16 16
17#define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) 17#define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER)
18#define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs)
18 19
19/* 20/*
20 * Note the allocation or reception of a socket buffer. 21 * Note the allocation or reception of a socket buffer.
@@ -22,8 +23,9 @@
22void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 23void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
23{ 24{
24 const void *here = __builtin_return_address(0); 25 const void *here = __builtin_return_address(0);
25 int n = atomic_inc_return(select_skb_count(op)); 26 int n = atomic_inc_return(select_skb_count(skb));
26 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 27 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
28 rxrpc_skb(skb)->rx_flags, here);
27} 29}
28 30
29/* 31/*
@@ -33,8 +35,9 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
33{ 35{
34 const void *here = __builtin_return_address(0); 36 const void *here = __builtin_return_address(0);
35 if (skb) { 37 if (skb) {
36 int n = atomic_read(select_skb_count(op)); 38 int n = atomic_read(select_skb_count(skb));
37 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 39 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
40 rxrpc_skb(skb)->rx_flags, here);
38 } 41 }
39} 42}
40 43
@@ -44,12 +47,23 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
44void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 47void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
45{ 48{
46 const void *here = __builtin_return_address(0); 49 const void *here = __builtin_return_address(0);
47 int n = atomic_inc_return(select_skb_count(op)); 50 int n = atomic_inc_return(select_skb_count(skb));
48 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 51 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
52 rxrpc_skb(skb)->rx_flags, here);
49 skb_get(skb); 53 skb_get(skb);
50} 54}
51 55
52/* 56/*
57 * Note the dropping of a ref on a socket buffer by the core.
58 */
59void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
60{
61 const void *here = __builtin_return_address(0);
62 int n = atomic_inc_return(&rxrpc_n_rx_skbs);
63 trace_rxrpc_skb(skb, op, 0, n, 0, here);
64}
65
66/*
53 * Note the destruction of a socket buffer. 67 * Note the destruction of a socket buffer.
54 */ 68 */
55void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 69void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
@@ -58,8 +72,9 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
58 if (skb) { 72 if (skb) {
59 int n; 73 int n;
60 CHECK_SLAB_OKAY(&skb->users); 74 CHECK_SLAB_OKAY(&skb->users);
61 n = atomic_dec_return(select_skb_count(op)); 75 n = atomic_dec_return(select_skb_count(skb));
62 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 76 trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n,
77 rxrpc_skb(skb)->rx_flags, here);
63 kfree_skb(skb); 78 kfree_skb(skb);
64 } 79 }
65} 80}
@@ -72,9 +87,10 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
72 const void *here = __builtin_return_address(0); 87 const void *here = __builtin_return_address(0);
73 struct sk_buff *skb; 88 struct sk_buff *skb;
74 while ((skb = skb_dequeue((list))) != NULL) { 89 while ((skb = skb_dequeue((list))) != NULL) {
75 int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); 90 int n = atomic_dec_return(select_skb_count(skb));
76 trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, 91 trace_rxrpc_skb(skb, rxrpc_skb_purged,
77 refcount_read(&skb->users), n, here); 92 refcount_read(&skb->users), n,
93 rxrpc_skb(skb)->rx_flags, here);
78 kfree_skb(skb); 94 kfree_skb(skb);
79 } 95 }
80} 96}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index fd1f7e799e23..04b7bd4ec751 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -422,7 +422,7 @@ static __net_init int bpf_init_net(struct net *net)
422{ 422{
423 struct tc_action_net *tn = net_generic(net, bpf_net_id); 423 struct tc_action_net *tn = net_generic(net, bpf_net_id);
424 424
425 return tc_action_net_init(tn, &act_bpf_ops); 425 return tc_action_net_init(net, tn, &act_bpf_ops);
426} 426}
427 427
428static void __net_exit bpf_exit_net(struct list_head *net_list) 428static void __net_exit bpf_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 32ac04d77a45..2b43cacf82af 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -231,7 +231,7 @@ static __net_init int connmark_init_net(struct net *net)
231{ 231{
232 struct tc_action_net *tn = net_generic(net, connmark_net_id); 232 struct tc_action_net *tn = net_generic(net, connmark_net_id);
233 233
234 return tc_action_net_init(tn, &act_connmark_ops); 234 return tc_action_net_init(net, tn, &act_connmark_ops);
235} 235}
236 236
237static void __net_exit connmark_exit_net(struct list_head *net_list) 237static void __net_exit connmark_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 9b9288267a54..d3cfad88dc3a 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -714,7 +714,7 @@ static __net_init int csum_init_net(struct net *net)
714{ 714{
715 struct tc_action_net *tn = net_generic(net, csum_net_id); 715 struct tc_action_net *tn = net_generic(net, csum_net_id);
716 716
717 return tc_action_net_init(tn, &act_csum_ops); 717 return tc_action_net_init(net, tn, &act_csum_ops);
718} 718}
719 719
720static void __net_exit csum_exit_net(struct list_head *net_list) 720static void __net_exit csum_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 33a1a7406e87..cdd6f3818097 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -939,7 +939,7 @@ static __net_init int ct_init_net(struct net *net)
939 tn->labels = true; 939 tn->labels = true;
940 } 940 }
941 941
942 return tc_action_net_init(&tn->tn, &act_ct_ops); 942 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
943} 943}
944 944
945static void __net_exit ct_exit_net(struct list_head *net_list) 945static void __net_exit ct_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 06ef74b74911..0dbcfd1dca7b 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -376,7 +376,7 @@ static __net_init int ctinfo_init_net(struct net *net)
376{ 376{
377 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 377 struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
378 378
379 return tc_action_net_init(tn, &act_ctinfo_ops); 379 return tc_action_net_init(net, tn, &act_ctinfo_ops);
380} 380}
381 381
382static void __net_exit ctinfo_exit_net(struct list_head *net_list) 382static void __net_exit ctinfo_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 8f0140c6ca58..324f1d1f6d47 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -278,7 +278,7 @@ static __net_init int gact_init_net(struct net *net)
278{ 278{
279 struct tc_action_net *tn = net_generic(net, gact_net_id); 279 struct tc_action_net *tn = net_generic(net, gact_net_id);
280 280
281 return tc_action_net_init(tn, &act_gact_ops); 281 return tc_action_net_init(net, tn, &act_gact_ops);
282} 282}
283 283
284static void __net_exit gact_exit_net(struct list_head *net_list) 284static void __net_exit gact_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 92ee853d43e6..3a31e241c647 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -890,7 +890,7 @@ static __net_init int ife_init_net(struct net *net)
890{ 890{
891 struct tc_action_net *tn = net_generic(net, ife_net_id); 891 struct tc_action_net *tn = net_generic(net, ife_net_id);
892 892
893 return tc_action_net_init(tn, &act_ife_ops); 893 return tc_action_net_init(net, tn, &act_ife_ops);
894} 894}
895 895
896static void __net_exit ife_exit_net(struct list_head *net_list) 896static void __net_exit ife_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index ce2c30a591d2..214a03d405cf 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -61,12 +61,13 @@ static int ipt_init_target(struct net *net, struct xt_entry_target *t,
61 return 0; 61 return 0;
62} 62}
63 63
64static void ipt_destroy_target(struct xt_entry_target *t) 64static void ipt_destroy_target(struct xt_entry_target *t, struct net *net)
65{ 65{
66 struct xt_tgdtor_param par = { 66 struct xt_tgdtor_param par = {
67 .target = t->u.kernel.target, 67 .target = t->u.kernel.target,
68 .targinfo = t->data, 68 .targinfo = t->data,
69 .family = NFPROTO_IPV4, 69 .family = NFPROTO_IPV4,
70 .net = net,
70 }; 71 };
71 if (par.target->destroy != NULL) 72 if (par.target->destroy != NULL)
72 par.target->destroy(&par); 73 par.target->destroy(&par);
@@ -78,7 +79,7 @@ static void tcf_ipt_release(struct tc_action *a)
78 struct tcf_ipt *ipt = to_ipt(a); 79 struct tcf_ipt *ipt = to_ipt(a);
79 80
80 if (ipt->tcfi_t) { 81 if (ipt->tcfi_t) {
81 ipt_destroy_target(ipt->tcfi_t); 82 ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net);
82 kfree(ipt->tcfi_t); 83 kfree(ipt->tcfi_t);
83 } 84 }
84 kfree(ipt->tcfi_tname); 85 kfree(ipt->tcfi_tname);
@@ -180,7 +181,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
180 181
181 spin_lock_bh(&ipt->tcf_lock); 182 spin_lock_bh(&ipt->tcf_lock);
182 if (ret != ACT_P_CREATED) { 183 if (ret != ACT_P_CREATED) {
183 ipt_destroy_target(ipt->tcfi_t); 184 ipt_destroy_target(ipt->tcfi_t, net);
184 kfree(ipt->tcfi_tname); 185 kfree(ipt->tcfi_tname);
185 kfree(ipt->tcfi_t); 186 kfree(ipt->tcfi_t);
186 } 187 }
@@ -350,7 +351,7 @@ static __net_init int ipt_init_net(struct net *net)
350{ 351{
351 struct tc_action_net *tn = net_generic(net, ipt_net_id); 352 struct tc_action_net *tn = net_generic(net, ipt_net_id);
352 353
353 return tc_action_net_init(tn, &act_ipt_ops); 354 return tc_action_net_init(net, tn, &act_ipt_ops);
354} 355}
355 356
356static void __net_exit ipt_exit_net(struct list_head *net_list) 357static void __net_exit ipt_exit_net(struct list_head *net_list)
@@ -399,7 +400,7 @@ static __net_init int xt_init_net(struct net *net)
399{ 400{
400 struct tc_action_net *tn = net_generic(net, xt_net_id); 401 struct tc_action_net *tn = net_generic(net, xt_net_id);
401 402
402 return tc_action_net_init(tn, &act_xt_ops); 403 return tc_action_net_init(net, tn, &act_xt_ops);
403} 404}
404 405
405static void __net_exit xt_exit_net(struct list_head *net_list) 406static void __net_exit xt_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index be3f88dfc37e..9d1bf508075a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -453,7 +453,7 @@ static __net_init int mirred_init_net(struct net *net)
453{ 453{
454 struct tc_action_net *tn = net_generic(net, mirred_net_id); 454 struct tc_action_net *tn = net_generic(net, mirred_net_id);
455 455
456 return tc_action_net_init(tn, &act_mirred_ops); 456 return tc_action_net_init(net, tn, &act_mirred_ops);
457} 457}
458 458
459static void __net_exit mirred_exit_net(struct list_head *net_list) 459static void __net_exit mirred_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 0f299e3b618c..e168df0e008a 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -375,7 +375,7 @@ static __net_init int mpls_init_net(struct net *net)
375{ 375{
376 struct tc_action_net *tn = net_generic(net, mpls_net_id); 376 struct tc_action_net *tn = net_generic(net, mpls_net_id);
377 377
378 return tc_action_net_init(tn, &act_mpls_ops); 378 return tc_action_net_init(net, tn, &act_mpls_ops);
379} 379}
380 380
381static void __net_exit mpls_exit_net(struct list_head *net_list) 381static void __net_exit mpls_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 7b858c11b1b5..ea4c5359e7df 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -327,7 +327,7 @@ static __net_init int nat_init_net(struct net *net)
327{ 327{
328 struct tc_action_net *tn = net_generic(net, nat_net_id); 328 struct tc_action_net *tn = net_generic(net, nat_net_id);
329 329
330 return tc_action_net_init(tn, &act_nat_ops); 330 return tc_action_net_init(net, tn, &act_nat_ops);
331} 331}
332 332
333static void __net_exit nat_exit_net(struct list_head *net_list) 333static void __net_exit nat_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17360c6faeaa..cdfaa79382a2 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -498,7 +498,7 @@ static __net_init int pedit_init_net(struct net *net)
498{ 498{
499 struct tc_action_net *tn = net_generic(net, pedit_net_id); 499 struct tc_action_net *tn = net_generic(net, pedit_net_id);
500 500
501 return tc_action_net_init(tn, &act_pedit_ops); 501 return tc_action_net_init(net, tn, &act_pedit_ops);
502} 502}
503 503
504static void __net_exit pedit_exit_net(struct list_head *net_list) 504static void __net_exit pedit_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 49cec3e64a4d..6315e0f8d26e 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -371,7 +371,7 @@ static __net_init int police_init_net(struct net *net)
371{ 371{
372 struct tc_action_net *tn = net_generic(net, police_net_id); 372 struct tc_action_net *tn = net_generic(net, police_net_id);
373 373
374 return tc_action_net_init(tn, &act_police_ops); 374 return tc_action_net_init(net, tn, &act_police_ops);
375} 375}
376 376
377static void __net_exit police_exit_net(struct list_head *net_list) 377static void __net_exit police_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 595308d60133..10229124a992 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -102,13 +102,17 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
103 s->rate = rate; 103 s->rate = rate;
104 s->psample_group_num = psample_group_num; 104 s->psample_group_num = psample_group_num;
105 RCU_INIT_POINTER(s->psample_group, psample_group); 105 rcu_swap_protected(s->psample_group, psample_group,
106 lockdep_is_held(&s->tcf_lock));
106 107
107 if (tb[TCA_SAMPLE_TRUNC_SIZE]) { 108 if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
108 s->truncate = true; 109 s->truncate = true;
109 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); 110 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
110 } 111 }
111 spin_unlock_bh(&s->tcf_lock); 112 spin_unlock_bh(&s->tcf_lock);
113
114 if (psample_group)
115 psample_group_put(psample_group);
112 if (goto_ch) 116 if (goto_ch)
113 tcf_chain_put_by_act(goto_ch); 117 tcf_chain_put_by_act(goto_ch);
114 118
@@ -265,7 +269,7 @@ static __net_init int sample_init_net(struct net *net)
265{ 269{
266 struct tc_action_net *tn = net_generic(net, sample_net_id); 270 struct tc_action_net *tn = net_generic(net, sample_net_id);
267 271
268 return tc_action_net_init(tn, &act_sample_ops); 272 return tc_action_net_init(net, tn, &act_sample_ops);
269} 273}
270 274
271static void __net_exit sample_exit_net(struct list_head *net_list) 275static void __net_exit sample_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 33aefa25b545..6120e56117ca 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -232,7 +232,7 @@ static __net_init int simp_init_net(struct net *net)
232{ 232{
233 struct tc_action_net *tn = net_generic(net, simp_net_id); 233 struct tc_action_net *tn = net_generic(net, simp_net_id);
234 234
235 return tc_action_net_init(tn, &act_simp_ops); 235 return tc_action_net_init(net, tn, &act_simp_ops);
236} 236}
237 237
238static void __net_exit simp_exit_net(struct list_head *net_list) 238static void __net_exit simp_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index b100870f02a6..6a8d3337c577 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -307,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
307 return tcf_idr_search(tn, a, index); 307 return tcf_idr_search(tn, a, index);
308} 308}
309 309
310static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
311{
312 return nla_total_size(sizeof(struct tc_skbedit))
313 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
314 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
315 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
316 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
317 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
318 + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
319}
320
310static struct tc_action_ops act_skbedit_ops = { 321static struct tc_action_ops act_skbedit_ops = {
311 .kind = "skbedit", 322 .kind = "skbedit",
312 .id = TCA_ID_SKBEDIT, 323 .id = TCA_ID_SKBEDIT,
@@ -316,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = {
316 .init = tcf_skbedit_init, 327 .init = tcf_skbedit_init,
317 .cleanup = tcf_skbedit_cleanup, 328 .cleanup = tcf_skbedit_cleanup,
318 .walk = tcf_skbedit_walker, 329 .walk = tcf_skbedit_walker,
330 .get_fill_size = tcf_skbedit_get_fill_size,
319 .lookup = tcf_skbedit_search, 331 .lookup = tcf_skbedit_search,
320 .size = sizeof(struct tcf_skbedit), 332 .size = sizeof(struct tcf_skbedit),
321}; 333};
@@ -324,7 +336,7 @@ static __net_init int skbedit_init_net(struct net *net)
324{ 336{
325 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 337 struct tc_action_net *tn = net_generic(net, skbedit_net_id);
326 338
327 return tc_action_net_init(tn, &act_skbedit_ops); 339 return tc_action_net_init(net, tn, &act_skbedit_ops);
328} 340}
329 341
330static void __net_exit skbedit_exit_net(struct list_head *net_list) 342static void __net_exit skbedit_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 7da3518e18ef..888437f97ba6 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -287,7 +287,7 @@ static __net_init int skbmod_init_net(struct net *net)
287{ 287{
288 struct tc_action_net *tn = net_generic(net, skbmod_net_id); 288 struct tc_action_net *tn = net_generic(net, skbmod_net_id);
289 289
290 return tc_action_net_init(tn, &act_skbmod_ops); 290 return tc_action_net_init(net, tn, &act_skbmod_ops);
291} 291}
292 292
293static void __net_exit skbmod_exit_net(struct list_head *net_list) 293static void __net_exit skbmod_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 6d0debdc9b97..2f83a79f76aa 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -600,7 +600,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
600{ 600{
601 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); 601 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
602 602
603 return tc_action_net_init(tn, &act_tunnel_key_ops); 603 return tc_action_net_init(net, tn, &act_tunnel_key_ops);
604} 604}
605 605
606static void __net_exit tunnel_key_exit_net(struct list_head *net_list) 606static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index a3c9eea1ee8a..287a30bf8930 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -334,7 +334,7 @@ static __net_init int vlan_init_net(struct net *net)
334{ 334{
335 struct tc_action_net *tn = net_generic(net, vlan_net_id); 335 struct tc_action_net *tn = net_generic(net, vlan_net_id);
336 336
337 return tc_action_net_init(tn, &act_vlan_ops); 337 return tc_action_net_init(net, tn, &act_vlan_ops);
338} 338}
339 339
340static void __net_exit vlan_exit_net(struct list_head *net_list) 340static void __net_exit vlan_exit_net(struct list_head *net_list)
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 732e109c3055..810645b5c086 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -181,11 +181,6 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
181 s64 credits; 181 s64 credits;
182 int len; 182 int len;
183 183
184 if (atomic64_read(&q->port_rate) == -1) {
185 WARN_ONCE(1, "cbs: dequeue() called with unknown port rate.");
186 return NULL;
187 }
188
189 if (q->credits < 0) { 184 if (q->credits < 0) {
190 credits = timediff_to_credits(now - q->last, q->idleslope); 185 credits = timediff_to_credits(now - q->last, q->idleslope);
191 186
@@ -303,11 +298,19 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
303static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) 298static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
304{ 299{
305 struct ethtool_link_ksettings ecmd; 300 struct ethtool_link_ksettings ecmd;
301 int speed = SPEED_10;
306 int port_rate = -1; 302 int port_rate = -1;
303 int err;
304
305 err = __ethtool_get_link_ksettings(dev, &ecmd);
306 if (err < 0)
307 goto skip;
308
309 if (ecmd.base.speed != SPEED_UNKNOWN)
310 speed = ecmd.base.speed;
307 311
308 if (!__ethtool_get_link_ksettings(dev, &ecmd) && 312skip:
309 ecmd.base.speed != SPEED_UNKNOWN) 313 port_rate = speed * 1000 * BYTES_PER_KBIT;
310 port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT;
311 314
312 atomic64_set(&q->port_rate, port_rate); 315 atomic64_set(&q->port_rate, port_rate);
313 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n", 316 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 11c03cf4aa74..137db1cbde85 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -624,8 +624,12 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
624 624
625 err = skb_array_produce(q, skb); 625 err = skb_array_produce(q, skb);
626 626
627 if (unlikely(err)) 627 if (unlikely(err)) {
628 return qdisc_drop_cpu(skb, qdisc, to_free); 628 if (qdisc_is_percpu_stats(qdisc))
629 return qdisc_drop_cpu(skb, qdisc, to_free);
630 else
631 return qdisc_drop(skb, qdisc, to_free);
632 }
629 633
630 qdisc_update_stats_at_enqueue(qdisc, pkt_len); 634 qdisc_update_stats_at_enqueue(qdisc, pkt_len);
631 return NET_XMIT_SUCCESS; 635 return NET_XMIT_SUCCESS;
@@ -688,11 +692,14 @@ static void pfifo_fast_reset(struct Qdisc *qdisc)
688 kfree_skb(skb); 692 kfree_skb(skb);
689 } 693 }
690 694
691 for_each_possible_cpu(i) { 695 if (qdisc_is_percpu_stats(qdisc)) {
692 struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); 696 for_each_possible_cpu(i) {
697 struct gnet_stats_queue *q;
693 698
694 q->backlog = 0; 699 q = per_cpu_ptr(qdisc->cpu_qstats, i);
695 q->qlen = 0; 700 q->backlog = 0;
701 q->qlen = 0;
702 }
696 } 703 }
697} 704}
698 705
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index c39db507ba3f..8d8bc2ec5cd6 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -477,11 +477,6 @@ static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
477 u32 gate_mask; 477 u32 gate_mask;
478 int i; 478 int i;
479 479
480 if (atomic64_read(&q->picos_per_byte) == -1) {
481 WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte.");
482 return NULL;
483 }
484
485 rcu_read_lock(); 480 rcu_read_lock();
486 entry = rcu_dereference(q->current_entry); 481 entry = rcu_dereference(q->current_entry);
487 /* if there's no entry, it means that the schedule didn't 482 /* if there's no entry, it means that the schedule didn't
@@ -958,12 +953,20 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
958 struct taprio_sched *q) 953 struct taprio_sched *q)
959{ 954{
960 struct ethtool_link_ksettings ecmd; 955 struct ethtool_link_ksettings ecmd;
961 int picos_per_byte = -1; 956 int speed = SPEED_10;
957 int picos_per_byte;
958 int err;
962 959
963 if (!__ethtool_get_link_ksettings(dev, &ecmd) && 960 err = __ethtool_get_link_ksettings(dev, &ecmd);
964 ecmd.base.speed != SPEED_UNKNOWN) 961 if (err < 0)
965 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, 962 goto skip;
966 ecmd.base.speed * 1000 * 1000); 963
964 if (ecmd.base.speed != SPEED_UNKNOWN)
965 speed = ecmd.base.speed;
966
967skip:
968 picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
969 speed * 1000 * 1000);
967 970
968 atomic64_set(&q->picos_per_byte, picos_per_byte); 971 atomic64_set(&q->picos_per_byte, picos_per_byte);
969 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", 972 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
@@ -1195,7 +1198,8 @@ unlock:
1195 spin_unlock_bh(qdisc_lock(sch)); 1198 spin_unlock_bh(qdisc_lock(sch));
1196 1199
1197free_sched: 1200free_sched:
1198 kfree(new_admin); 1201 if (new_admin)
1202 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1199 1203
1200 return err; 1204 return err;
1201} 1205}
@@ -1248,6 +1252,10 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1248 */ 1252 */
1249 q->clockid = -1; 1253 q->clockid = -1;
1250 1254
1255 spin_lock(&taprio_list_lock);
1256 list_add(&q->taprio_list, &taprio_list);
1257 spin_unlock(&taprio_list_lock);
1258
1251 if (sch->parent != TC_H_ROOT) 1259 if (sch->parent != TC_H_ROOT)
1252 return -EOPNOTSUPP; 1260 return -EOPNOTSUPP;
1253 1261
@@ -1265,10 +1273,6 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
1265 if (!opt) 1273 if (!opt)
1266 return -EINVAL; 1274 return -EINVAL;
1267 1275
1268 spin_lock(&taprio_list_lock);
1269 list_add(&q->taprio_list, &taprio_list);
1270 spin_unlock(&taprio_list_lock);
1271
1272 for (i = 0; i < dev->num_tx_queues; i++) { 1276 for (i = 0; i < dev->num_tx_queues; i++) {
1273 struct netdev_queue *dev_queue; 1277 struct netdev_queue *dev_queue;
1274 struct Qdisc *qdisc; 1278 struct Qdisc *qdisc;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index a554d6d15d1b..1cf5bb5b73c4 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
546 */ 546 */
547 if (net->sctp.pf_enable && 547 if (net->sctp.pf_enable &&
548 (transport->state == SCTP_ACTIVE) && 548 (transport->state == SCTP_ACTIVE) &&
549 (asoc->pf_retrans < transport->pathmaxrxt) && 549 (transport->error_count < transport->pathmaxrxt) &&
550 (transport->error_count > asoc->pf_retrans)) { 550 (transport->error_count > asoc->pf_retrans)) {
551 551
552 sctp_assoc_control_transport(asoc, transport, 552 sctp_assoc_control_transport(asoc, transport,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 25946604af85..e83cdaa2ab76 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
316 nstr_list[i] = htons(str_list[i]); 316 nstr_list[i] = htons(str_list[i]);
317 317
318 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) { 318 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
319 kfree(nstr_list);
319 retval = -EAGAIN; 320 retval = -EAGAIN;
320 goto out; 321 goto out;
321 } 322 }
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index f0de323d15d6..6c8f09c1ce51 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
76 DEFINE_WAIT_FUNC(wait, woken_wake_function); 76 DEFINE_WAIT_FUNC(wait, woken_wake_function);
77 struct smc_connection *conn = &smc->conn; 77 struct smc_connection *conn = &smc->conn;
78 struct sock *sk = &smc->sk; 78 struct sock *sk = &smc->sk;
79 bool noblock;
80 long timeo; 79 long timeo;
81 int rc = 0; 80 int rc = 0;
82 81
83 /* similar to sk_stream_wait_memory */ 82 /* similar to sk_stream_wait_memory */
84 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 83 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
85 noblock = timeo ? false : true;
86 add_wait_queue(sk_sleep(sk), &wait); 84 add_wait_queue(sk_sleep(sk), &wait);
87 while (1) { 85 while (1) {
88 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
97 break; 95 break;
98 } 96 }
99 if (!timeo) { 97 if (!timeo) {
100 if (noblock) 98 /* ensure EPOLLOUT is subsequently generated */
101 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 99 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
102 rc = -EAGAIN; 100 rc = -EAGAIN;
103 break; 101 break;
104 } 102 }
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d8679b6027e9..a07b516e503a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1970,6 +1970,7 @@ call_bind(struct rpc_task *task)
1970static void 1970static void
1971call_bind_status(struct rpc_task *task) 1971call_bind_status(struct rpc_task *task)
1972{ 1972{
1973 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1973 int status = -EIO; 1974 int status = -EIO;
1974 1975
1975 if (rpc_task_transmitted(task)) { 1976 if (rpc_task_transmitted(task)) {
@@ -1977,14 +1978,15 @@ call_bind_status(struct rpc_task *task)
1977 return; 1978 return;
1978 } 1979 }
1979 1980
1980 if (task->tk_status >= 0) { 1981 dprint_status(task);
1981 dprint_status(task); 1982 trace_rpc_bind_status(task);
1983 if (task->tk_status >= 0)
1984 goto out_next;
1985 if (xprt_bound(xprt)) {
1982 task->tk_status = 0; 1986 task->tk_status = 0;
1983 task->tk_action = call_connect; 1987 goto out_next;
1984 return;
1985 } 1988 }
1986 1989
1987 trace_rpc_bind_status(task);
1988 switch (task->tk_status) { 1990 switch (task->tk_status) {
1989 case -ENOMEM: 1991 case -ENOMEM:
1990 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1992 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
@@ -2003,6 +2005,9 @@ call_bind_status(struct rpc_task *task)
2003 task->tk_rebind_retry--; 2005 task->tk_rebind_retry--;
2004 rpc_delay(task, 3*HZ); 2006 rpc_delay(task, 3*HZ);
2005 goto retry_timeout; 2007 goto retry_timeout;
2008 case -ENOBUFS:
2009 rpc_delay(task, HZ >> 2);
2010 goto retry_timeout;
2006 case -EAGAIN: 2011 case -EAGAIN:
2007 goto retry_timeout; 2012 goto retry_timeout;
2008 case -ETIMEDOUT: 2013 case -ETIMEDOUT:
@@ -2026,7 +2031,6 @@ call_bind_status(struct rpc_task *task)
2026 case -ENETDOWN: 2031 case -ENETDOWN:
2027 case -EHOSTUNREACH: 2032 case -EHOSTUNREACH:
2028 case -ENETUNREACH: 2033 case -ENETUNREACH:
2029 case -ENOBUFS:
2030 case -EPIPE: 2034 case -EPIPE:
2031 dprintk("RPC: %5u remote rpcbind unreachable: %d\n", 2035 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
2032 task->tk_pid, task->tk_status); 2036 task->tk_pid, task->tk_status);
@@ -2043,7 +2047,9 @@ call_bind_status(struct rpc_task *task)
2043 2047
2044 rpc_call_rpcerror(task, status); 2048 rpc_call_rpcerror(task, status);
2045 return; 2049 return;
2046 2050out_next:
2051 task->tk_action = call_connect;
2052 return;
2047retry_timeout: 2053retry_timeout:
2048 task->tk_status = 0; 2054 task->tk_status = 0;
2049 task->tk_action = call_bind; 2055 task->tk_action = call_bind;
@@ -2090,6 +2096,7 @@ call_connect(struct rpc_task *task)
2090static void 2096static void
2091call_connect_status(struct rpc_task *task) 2097call_connect_status(struct rpc_task *task)
2092{ 2098{
2099 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2093 struct rpc_clnt *clnt = task->tk_client; 2100 struct rpc_clnt *clnt = task->tk_client;
2094 int status = task->tk_status; 2101 int status = task->tk_status;
2095 2102
@@ -2099,8 +2106,17 @@ call_connect_status(struct rpc_task *task)
2099 } 2106 }
2100 2107
2101 dprint_status(task); 2108 dprint_status(task);
2102
2103 trace_rpc_connect_status(task); 2109 trace_rpc_connect_status(task);
2110
2111 if (task->tk_status == 0) {
2112 clnt->cl_stats->netreconn++;
2113 goto out_next;
2114 }
2115 if (xprt_connected(xprt)) {
2116 task->tk_status = 0;
2117 goto out_next;
2118 }
2119
2104 task->tk_status = 0; 2120 task->tk_status = 0;
2105 switch (status) { 2121 switch (status) {
2106 case -ECONNREFUSED: 2122 case -ECONNREFUSED:
@@ -2117,8 +2133,6 @@ call_connect_status(struct rpc_task *task)
2117 case -ENETDOWN: 2133 case -ENETDOWN:
2118 case -ENETUNREACH: 2134 case -ENETUNREACH:
2119 case -EHOSTUNREACH: 2135 case -EHOSTUNREACH:
2120 case -EADDRINUSE:
2121 case -ENOBUFS:
2122 case -EPIPE: 2136 case -EPIPE:
2123 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2137 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
2124 task->tk_rqstp->rq_connect_cookie); 2138 task->tk_rqstp->rq_connect_cookie);
@@ -2127,17 +2141,20 @@ call_connect_status(struct rpc_task *task)
2127 /* retry with existing socket, after a delay */ 2141 /* retry with existing socket, after a delay */
2128 rpc_delay(task, 3*HZ); 2142 rpc_delay(task, 3*HZ);
2129 /* fall through */ 2143 /* fall through */
2144 case -EADDRINUSE:
2130 case -ENOTCONN: 2145 case -ENOTCONN:
2131 case -EAGAIN: 2146 case -EAGAIN:
2132 case -ETIMEDOUT: 2147 case -ETIMEDOUT:
2133 goto out_retry; 2148 goto out_retry;
2134 case 0: 2149 case -ENOBUFS:
2135 clnt->cl_stats->netreconn++; 2150 rpc_delay(task, HZ >> 2);
2136 task->tk_action = call_transmit; 2151 goto out_retry;
2137 return;
2138 } 2152 }
2139 rpc_call_rpcerror(task, status); 2153 rpc_call_rpcerror(task, status);
2140 return; 2154 return;
2155out_next:
2156 task->tk_action = call_transmit;
2157 return;
2141out_retry: 2158out_retry:
2142 /* Check for timeouts before looping back to call_bind */ 2159 /* Check for timeouts before looping back to call_bind */
2143 task->tk_action = call_bind; 2160 task->tk_action = call_bind;
@@ -2365,7 +2382,7 @@ call_status(struct rpc_task *task)
2365 case -ECONNABORTED: 2382 case -ECONNABORTED:
2366 case -ENOTCONN: 2383 case -ENOTCONN:
2367 rpc_force_rebind(clnt); 2384 rpc_force_rebind(clnt);
2368 /* fall through */ 2385 break;
2369 case -EADDRINUSE: 2386 case -EADDRINUSE:
2370 rpc_delay(task, 3*HZ); 2387 rpc_delay(task, 3*HZ);
2371 /* fall through */ 2388 /* fall through */
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 783748dc5e6f..2e71f5455c6c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1408,13 +1408,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1408 status = -EBADMSG; 1408 status = -EBADMSG;
1409 goto out_dequeue; 1409 goto out_dequeue;
1410 } 1410 }
1411 if (task->tk_ops->rpc_call_prepare_transmit) {
1412 task->tk_ops->rpc_call_prepare_transmit(task,
1413 task->tk_calldata);
1414 status = task->tk_status;
1415 if (status < 0)
1416 goto out_dequeue;
1417 }
1418 if (RPC_SIGNALLED(task)) { 1411 if (RPC_SIGNALLED(task)) {
1419 status = -ERESTARTSYS; 1412 status = -ERESTARTSYS;
1420 goto out_dequeue; 1413 goto out_dequeue;
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d00913..0f1eaed1bd1b 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
75 tipc_set_node_id(net, node_id); 75 tipc_set_node_id(net, node_id);
76 } 76 }
77 tn->trial_addr = addr; 77 tn->trial_addr = addr;
78 tn->addr_trial_end = jiffies;
78 pr_info("32-bit node address hash set to %x\n", addr); 79 pr_info("32-bit node address hash set to %x\n", addr);
79} 80}
80 81
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 66d3a07bc571..c2c5c53cad22 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -106,8 +106,6 @@ struct tipc_stats {
106 * @transmitq: queue for sent, non-acked messages 106 * @transmitq: queue for sent, non-acked messages
107 * @backlogq: queue for messages waiting to be sent 107 * @backlogq: queue for messages waiting to be sent
108 * @snt_nxt: next sequence number to use for outbound messages 108 * @snt_nxt: next sequence number to use for outbound messages
109 * @prev_from: sequence number of most previous retransmission request
110 * @stale_limit: time when repeated identical retransmits must force link reset
111 * @ackers: # of peers that needs to ack each packet before it can be released 109 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast. 110 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages 111 * @rcv_nxt: next sequence number to expect for inbound messages
@@ -164,9 +162,7 @@ struct tipc_link {
164 u16 limit; 162 u16 limit;
165 } backlog[5]; 163 } backlog[5];
166 u16 snd_nxt; 164 u16 snd_nxt;
167 u16 prev_from;
168 u16 window; 165 u16 window;
169 unsigned long stale_limit;
170 166
171 /* Reception */ 167 /* Reception */
172 u16 rcv_nxt; 168 u16 rcv_nxt;
@@ -1044,47 +1040,53 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
1044 * link_retransmit_failure() - Detect repeated retransmit failures 1040 * link_retransmit_failure() - Detect repeated retransmit failures
1045 * @l: tipc link sender 1041 * @l: tipc link sender
1046 * @r: tipc link receiver (= l in case of unicast) 1042 * @r: tipc link receiver (= l in case of unicast)
1047 * @from: seqno of the 1st packet in retransmit request
1048 * @rc: returned code 1043 * @rc: returned code
1049 * 1044 *
1050 * Return: true if the repeated retransmit failures happens, otherwise 1045 * Return: true if the repeated retransmit failures happens, otherwise
1051 * false 1046 * false
1052 */ 1047 */
1053static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, 1048static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1054 u16 from, int *rc) 1049 int *rc)
1055{ 1050{
1056 struct sk_buff *skb = skb_peek(&l->transmq); 1051 struct sk_buff *skb = skb_peek(&l->transmq);
1057 struct tipc_msg *hdr; 1052 struct tipc_msg *hdr;
1058 1053
1059 if (!skb) 1054 if (!skb)
1060 return false; 1055 return false;
1061 hdr = buf_msg(skb);
1062 1056
1063 /* Detect repeated retransmit failures on same packet */ 1057 if (!TIPC_SKB_CB(skb)->retr_cnt)
1064 if (r->prev_from != from) { 1058 return false;
1065 r->prev_from = from;
1066 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1067 } else if (time_after(jiffies, r->stale_limit)) {
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr),
1072 msg_errcode(hdr));
1073 pr_info("sqno %u, prev: %x, src: %x\n",
1074 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1075
1076 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1077 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1078 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1079 1059
1080 if (link_is_bc_sndlink(l)) 1060 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1081 *rc = TIPC_LINK_DOWN_EVT; 1061 msecs_to_jiffies(r->tolerance)))
1062 return false;
1063
1064 hdr = buf_msg(skb);
1065 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1066 return false;
1082 1067
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1072 pr_info("sqno %u, prev: %x, dest: %x\n",
1073 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1074 pr_info("retr_stamp %d, retr_cnt %d\n",
1075 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1076 TIPC_SKB_CB(skb)->retr_cnt);
1077
1078 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1079 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1080 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1081
1082 if (link_is_bc_sndlink(l)) {
1083 r->state = LINK_RESET;
1084 *rc = TIPC_LINK_DOWN_EVT;
1085 } else {
1083 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1086 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1084 return true;
1085 } 1087 }
1086 1088
1087 return false; 1089 return true;
1088} 1090}
1089 1091
1090/* tipc_link_bc_retrans() - retransmit zero or more packets 1092/* tipc_link_bc_retrans() - retransmit zero or more packets
@@ -1110,7 +1112,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1110 1112
1111 trace_tipc_link_retrans(r, from, to, &l->transmq); 1113 trace_tipc_link_retrans(r, from, to, &l->transmq);
1112 1114
1113 if (link_retransmit_failure(l, r, from, &rc)) 1115 if (link_retransmit_failure(l, r, &rc))
1114 return rc; 1116 return rc;
1115 1117
1116 skb_queue_walk(&l->transmq, skb) { 1118 skb_queue_walk(&l->transmq, skb) {
@@ -1119,11 +1121,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1119 continue; 1121 continue;
1120 if (more(msg_seqno(hdr), to)) 1122 if (more(msg_seqno(hdr), to))
1121 break; 1123 break;
1122 if (link_is_bc_sndlink(l)) { 1124
1123 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1125 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1124 continue; 1126 continue;
1125 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 1127 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1126 }
1127 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC); 1128 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
1128 if (!_skb) 1129 if (!_skb)
1129 return 0; 1130 return 0;
@@ -1133,6 +1134,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1133 _skb->priority = TC_PRIO_CONTROL; 1134 _skb->priority = TC_PRIO_CONTROL;
1134 __skb_queue_tail(xmitq, _skb); 1135 __skb_queue_tail(xmitq, _skb);
1135 l->stats.retransmitted++; 1136 l->stats.retransmitted++;
1137
1138 /* Increase actual retrans counter & mark first time */
1139 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1140 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1136 } 1141 }
1137 return 0; 1142 return 0;
1138} 1143}
@@ -1357,12 +1362,10 @@ static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1357 struct tipc_msg *hdr; 1362 struct tipc_msg *hdr;
1358 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1363 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1359 u16 ack = l->rcv_nxt - 1; 1364 u16 ack = l->rcv_nxt - 1;
1365 bool passed = false;
1360 u16 seqno, n = 0; 1366 u16 seqno, n = 0;
1361 int rc = 0; 1367 int rc = 0;
1362 1368
1363 if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
1364 return rc;
1365
1366 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1369 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1367 seqno = buf_seqno(skb); 1370 seqno = buf_seqno(skb);
1368 1371
@@ -1372,12 +1375,17 @@ next_gap_ack:
1372 __skb_unlink(skb, &l->transmq); 1375 __skb_unlink(skb, &l->transmq);
1373 kfree_skb(skb); 1376 kfree_skb(skb);
1374 } else if (less_eq(seqno, acked + gap)) { 1377 } else if (less_eq(seqno, acked + gap)) {
1375 /* retransmit skb */ 1378 /* First, check if repeated retrans failures occurs? */
1379 if (!passed && link_retransmit_failure(l, l, &rc))
1380 return rc;
1381 passed = true;
1382
1383 /* retransmit skb if unrestricted*/
1376 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1384 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1377 continue; 1385 continue;
1378 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; 1386 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1379 1387 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
1380 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 1388 GFP_ATOMIC);
1381 if (!_skb) 1389 if (!_skb)
1382 continue; 1390 continue;
1383 hdr = buf_msg(_skb); 1391 hdr = buf_msg(_skb);
@@ -1386,6 +1394,10 @@ next_gap_ack:
1386 _skb->priority = TC_PRIO_CONTROL; 1394 _skb->priority = TC_PRIO_CONTROL;
1387 __skb_queue_tail(xmitq, _skb); 1395 __skb_queue_tail(xmitq, _skb);
1388 l->stats.retransmitted++; 1396 l->stats.retransmitted++;
1397
1398 /* Increase actual retrans counter & mark first time */
1399 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1400 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1389 } else { 1401 } else {
1390 /* retry with Gap ACK blocks if any */ 1402 /* retry with Gap ACK blocks if any */
1391 if (!ga || n >= ga->gack_cnt) 1403 if (!ga || n >= ga->gack_cnt)
@@ -2577,7 +2589,7 @@ int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2577 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); 2589 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2578 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); 2590 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2579 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); 2591 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2580 i += scnprintf(buf + i, sz - i, " %u", l->prev_from); 2592 i += scnprintf(buf + i, sz - i, " %u", 0);
2581 i += scnprintf(buf + i, sz - i, " %u", 0); 2593 i += scnprintf(buf + i, sz - i, " %u", 0);
2582 i += scnprintf(buf + i, sz - i, " %u", l->acked); 2594 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2583 2595
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index da509f0eb9ca..d7ebc9e955f6 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -102,13 +102,15 @@ struct plist;
102#define TIPC_MEDIA_INFO_OFFSET 5 102#define TIPC_MEDIA_INFO_OFFSET 5
103 103
104struct tipc_skb_cb { 104struct tipc_skb_cb {
105 u32 bytes_read;
106 u32 orig_member;
107 struct sk_buff *tail; 105 struct sk_buff *tail;
108 unsigned long nxt_retr; 106 unsigned long nxt_retr;
109 bool validated; 107 unsigned long retr_stamp;
108 u32 bytes_read;
109 u32 orig_member;
110 u16 chain_imp; 110 u16 chain_imp;
111 u16 ackers; 111 u16 ackers;
112 u16 retr_cnt;
113 bool validated;
112}; 114};
113 115
114#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 116#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 7c0b2b778703..43922d86e510 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -373,9 +373,9 @@ static int tls_push_data(struct sock *sk,
373 struct tls_context *tls_ctx = tls_get_ctx(sk); 373 struct tls_context *tls_ctx = tls_get_ctx(sk);
374 struct tls_prot_info *prot = &tls_ctx->prot_info; 374 struct tls_prot_info *prot = &tls_ctx->prot_info;
375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); 375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
376 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
377 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); 376 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
378 struct tls_record_info *record = ctx->open_record; 377 struct tls_record_info *record = ctx->open_record;
378 int tls_push_record_flags;
379 struct page_frag *pfrag; 379 struct page_frag *pfrag;
380 size_t orig_size = size; 380 size_t orig_size = size;
381 u32 max_open_record_len; 381 u32 max_open_record_len;
@@ -390,6 +390,9 @@ static int tls_push_data(struct sock *sk,
390 if (sk->sk_err) 390 if (sk->sk_err)
391 return -sk->sk_err; 391 return -sk->sk_err;
392 392
393 flags |= MSG_SENDPAGE_DECRYPTED;
394 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
395
393 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 396 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
394 if (tls_is_partially_sent_record(tls_ctx)) { 397 if (tls_is_partially_sent_record(tls_ctx)) {
395 rc = tls_push_partial_record(sk, tls_ctx, flags); 398 rc = tls_push_partial_record(sk, tls_ctx, flags);
@@ -576,7 +579,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
576 gfp_t sk_allocation = sk->sk_allocation; 579 gfp_t sk_allocation = sk->sk_allocation;
577 580
578 sk->sk_allocation = GFP_ATOMIC; 581 sk->sk_allocation = GFP_ATOMIC;
579 tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL); 582 tls_push_partial_record(sk, ctx,
583 MSG_DONTWAIT | MSG_NOSIGNAL |
584 MSG_SENDPAGE_DECRYPTED);
580 sk->sk_allocation = sk_allocation; 585 sk->sk_allocation = sk_allocation;
581 } 586 }
582} 587}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 9cbbae606ced..43252a801c3f 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -308,6 +308,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
308 if (free_ctx) 308 if (free_ctx)
309 icsk->icsk_ulp_data = NULL; 309 icsk->icsk_ulp_data = NULL;
310 sk->sk_prot = ctx->sk_proto; 310 sk->sk_prot = ctx->sk_proto;
311 if (sk->sk_write_space == tls_write_space)
312 sk->sk_write_space = ctx->sk_write_space;
311 write_unlock_bh(&sk->sk_callback_lock); 313 write_unlock_bh(&sk->sk_callback_lock);
312 release_sock(sk); 314 release_sock(sk);
313 if (ctx->tx_conf == TLS_SW) 315 if (ctx->tx_conf == TLS_SW)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4831ad745f91..327479ce69f5 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void)
2788 2788
2789 /* When last_request->processed becomes true this will be rescheduled */ 2789 /* When last_request->processed becomes true this will be rescheduled */
2790 if (lr && !lr->processed) { 2790 if (lr && !lr->processed) {
2791 reg_process_hint(lr); 2791 pr_debug("Pending regulatory request, waiting for it to be processed...\n");
2792 return; 2792 return;
2793 } 2793 }
2794 2794
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d0e35b7b9e35..e74837824cea 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -233,25 +233,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
233 233
234 switch (params->cipher) { 234 switch (params->cipher) {
235 case WLAN_CIPHER_SUITE_TKIP: 235 case WLAN_CIPHER_SUITE_TKIP:
236 /* Extended Key ID can only be used with CCMP/GCMP ciphers */
237 if ((pairwise && key_idx) ||
238 params->mode != NL80211_KEY_RX_TX)
239 return -EINVAL;
240 break;
236 case WLAN_CIPHER_SUITE_CCMP: 241 case WLAN_CIPHER_SUITE_CCMP:
237 case WLAN_CIPHER_SUITE_CCMP_256: 242 case WLAN_CIPHER_SUITE_CCMP_256:
238 case WLAN_CIPHER_SUITE_GCMP: 243 case WLAN_CIPHER_SUITE_GCMP:
239 case WLAN_CIPHER_SUITE_GCMP_256: 244 case WLAN_CIPHER_SUITE_GCMP_256:
240 /* IEEE802.11-2016 allows only 0 and - when using Extended Key 245 /* IEEE802.11-2016 allows only 0 and - when supporting
241 * ID - 1 as index for pairwise keys. 246 * Extended Key ID - 1 as index for pairwise keys.
242 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when 247 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when
243 * the driver supports Extended Key ID. 248 * the driver supports Extended Key ID.
244 * @NL80211_KEY_SET_TX can't be set when installing and 249 * @NL80211_KEY_SET_TX can't be set when installing and
245 * validating a key. 250 * validating a key.
246 */ 251 */
247 if (params->mode == NL80211_KEY_NO_TX) { 252 if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
248 if (!wiphy_ext_feature_isset(&rdev->wiphy, 253 params->mode == NL80211_KEY_SET_TX)
249 NL80211_EXT_FEATURE_EXT_KEY_ID)) 254 return -EINVAL;
250 return -EINVAL; 255 if (wiphy_ext_feature_isset(&rdev->wiphy,
251 else if (!pairwise || key_idx < 0 || key_idx > 1) 256 NL80211_EXT_FEATURE_EXT_KEY_ID)) {
257 if (pairwise && (key_idx < 0 || key_idx > 1))
252 return -EINVAL; 258 return -EINVAL;
253 } else if ((pairwise && key_idx) || 259 } else if (pairwise && key_idx) {
254 params->mode == NL80211_KEY_SET_TX) {
255 return -EINVAL; 260 return -EINVAL;
256 } 261 }
257 break; 262 break;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 83de74ca729a..688aac7a6943 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -365,7 +365,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); 365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
366 if (!umem->pages) { 366 if (!umem->pages) {
367 err = -ENOMEM; 367 err = -ENOMEM;
368 goto out_account; 368 goto out_pin;
369 } 369 }
370 370
371 for (i = 0; i < umem->npgs; i++) 371 for (i = 0; i < umem->npgs; i++)
@@ -373,6 +373,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
373 373
374 return 0; 374 return 0;
375 375
376out_pin:
377 xdp_umem_unpin_pages(umem);
376out_account: 378out_account:
377 xdp_umem_unaccount_pages(umem); 379 xdp_umem_unaccount_pages(umem);
378 return err; 380 return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8ca637a72697..ec94f5795ea4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3269,7 +3269,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3269 struct flowi4 *fl4 = &fl->u.ip4; 3269 struct flowi4 *fl4 = &fl->u.ip4;
3270 int oif = 0; 3270 int oif = 0;
3271 3271
3272 if (skb_dst(skb)) 3272 if (skb_dst(skb) && skb_dst(skb)->dev)
3273 oif = skb_dst(skb)->dev->ifindex; 3273 oif = skb_dst(skb)->dev->ifindex;
3274 3274
3275 memset(fl4, 0, sizeof(struct flowi4)); 3275 memset(fl4, 0, sizeof(struct flowi4));
@@ -3387,7 +3387,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3387 3387
3388 nexthdr = nh[nhoff]; 3388 nexthdr = nh[nhoff];
3389 3389
3390 if (skb_dst(skb)) 3390 if (skb_dst(skb) && skb_dst(skb)->dev)
3391 oif = skb_dst(skb)->dev->ifindex; 3391 oif = skb_dst(skb)->dev->ifindex;
3392 3392
3393 memset(fl6, 0, sizeof(struct flowi6)); 3393 memset(fl6, 0, sizeof(struct flowi6));
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 7325f382dbf4..957b9e3e1492 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,7 +595,7 @@ struct key *request_key_and_link(struct key_type *type,
595 595
596 key = check_cached_key(&ctx); 596 key = check_cached_key(&ctx);
597 if (key) 597 if (key)
598 return key; 598 goto error_free;
599 599
600 /* search all the process keyrings for a key */ 600 /* search all the process keyrings for a key */
601 rcu_read_lock(); 601 rcu_read_lock();
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index e73ec040e250..ecba39c93fd9 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key,
66{ 66{
67 struct request_key_auth *rka = dereference_key_rcu(key); 67 struct request_key_auth *rka = dereference_key_rcu(key);
68 68
69 if (!rka)
70 return;
71
69 seq_puts(m, "key:"); 72 seq_puts(m, "key:");
70 seq_puts(m, key->description); 73 seq_puts(m, key->description);
71 if (key_is_positive(key)) 74 if (key_is_positive(key))
@@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key,
83 size_t datalen; 86 size_t datalen;
84 long ret; 87 long ret;
85 88
89 if (!rka)
90 return -EKEYREVOKED;
91
86 datalen = rka->callout_len; 92 datalen = rka->callout_len;
87 ret = datalen; 93 ret = datalen;
88 94
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 7737b2670064..6d9592f0ae1d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1835,8 +1835,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
1835 if (cptr->type == USER_CLIENT) { 1835 if (cptr->type == USER_CLIENT) {
1836 info->input_pool = cptr->data.user.fifo_pool_size; 1836 info->input_pool = cptr->data.user.fifo_pool_size;
1837 info->input_free = info->input_pool; 1837 info->input_free = info->input_pool;
1838 if (cptr->data.user.fifo) 1838 info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
1839 info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
1840 } else { 1839 } else {
1841 info->input_pool = 0; 1840 info->input_pool = 0;
1842 info->input_free = 0; 1841 info->input_free = 0;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index ea69261f269a..eaaa8b5830bb 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -263,3 +263,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
263 263
264 return 0; 264 return 0;
265} 265}
266
267/* get the number of unused cells safely */
268int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
269{
270 unsigned long flags;
271 int cells;
272
273 if (!f)
274 return 0;
275
276 snd_use_lock_use(&f->use_lock);
277 spin_lock_irqsave(&f->lock, flags);
278 cells = snd_seq_unused_cells(f->pool);
279 spin_unlock_irqrestore(&f->lock, flags);
280 snd_use_lock_free(&f->use_lock);
281 return cells;
282}
diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
index edc68743943d..b56a7b897c9c 100644
--- a/sound/core/seq/seq_fifo.h
+++ b/sound/core/seq/seq_fifo.h
@@ -53,5 +53,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
53/* resize pool in fifo */ 53/* resize pool in fifo */
54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize); 54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
55 55
56/* get the number of unused cells safely */
57int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
56 58
57#endif 59#endif
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 9ea39348cdf5..7c6d1c277d4d 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -248,7 +248,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
248 unsigned int channels = params_channels(hw_params); 248 unsigned int channels = params_channels(hw_params);
249 249
250 mutex_lock(&oxfw->mutex); 250 mutex_lock(&oxfw->mutex);
251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
252 rate, channels); 252 rate, channels);
253 if (err >= 0) 253 if (err >= 0)
254 ++oxfw->substreams_count; 254 ++oxfw->substreams_count;
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 92390d457567..18e6546b4467 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -824,6 +824,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
824 while (id >= 0) { 824 while (id >= 0) {
825 const struct hda_fixup *fix = codec->fixup_list + id; 825 const struct hda_fixup *fix = codec->fixup_list + id;
826 826
827 if (++depth > 10)
828 break;
827 if (fix->chained_before) 829 if (fix->chained_before)
828 apply_fixup(codec, fix->chain_id, action, depth + 1); 830 apply_fixup(codec, fix->chain_id, action, depth + 1);
829 831
@@ -863,8 +865,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
863 } 865 }
864 if (!fix->chained || fix->chained_before) 866 if (!fix->chained || fix->chained_before)
865 break; 867 break;
866 if (++depth > 10)
867 break;
868 id = fix->chain_id; 868 id = fix->chain_id;
869 } 869 }
870} 870}
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 5bf24fb819d2..10d502328b76 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6009,7 +6009,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
6009 if (spec->init_hook) 6009 if (spec->init_hook)
6010 spec->init_hook(codec); 6010 spec->init_hook(codec);
6011 6011
6012 snd_hda_apply_verbs(codec); 6012 if (!spec->skip_verbs)
6013 snd_hda_apply_verbs(codec);
6013 6014
6014 init_multi_out(codec); 6015 init_multi_out(codec);
6015 init_extra_out(codec); 6016 init_extra_out(codec);
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 5f199dcb0d18..fb9f1a90238b 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -243,6 +243,7 @@ struct hda_gen_spec {
243 unsigned int indep_hp_enabled:1; /* independent HP enabled */ 243 unsigned int indep_hp_enabled:1; /* independent HP enabled */
244 unsigned int have_aamix_ctl:1; 244 unsigned int have_aamix_ctl:1;
245 unsigned int hp_mic_jack_modes:1; 245 unsigned int hp_mic_jack_modes:1;
246 unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
246 247
247 /* additional mute flags (only effective with auto_mute_via_amp=1) */ 248 /* additional mute flags (only effective with auto_mute_via_amp=1) */
248 u64 mute_bits; 249 u64 mute_bits;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0d51823d7270..6d1fb7c11f17 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1175,6 +1175,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), 1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), 1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), 1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ), 1179 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
1179 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), 1180 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
1180 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), 1181 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 14298ef45b21..968d3caab6ac 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -611,18 +611,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
611 611
612/* update LED status via GPIO */ 612/* update LED status via GPIO */
613static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask, 613static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
614 bool enabled) 614 bool led_on)
615{ 615{
616 struct conexant_spec *spec = codec->spec; 616 struct conexant_spec *spec = codec->spec;
617 unsigned int oldval = spec->gpio_led; 617 unsigned int oldval = spec->gpio_led;
618 618
619 if (spec->mute_led_polarity) 619 if (spec->mute_led_polarity)
620 enabled = !enabled; 620 led_on = !led_on;
621 621
622 if (enabled) 622 if (led_on)
623 spec->gpio_led &= ~mask;
624 else
625 spec->gpio_led |= mask; 623 spec->gpio_led |= mask;
624 else
625 spec->gpio_led &= ~mask;
626 codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
627 mask, led_on, spec->gpio_led);
626 if (spec->gpio_led != oldval) 628 if (spec->gpio_led != oldval)
627 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 629 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
628 spec->gpio_led); 630 spec->gpio_led);
@@ -633,8 +635,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
633{ 635{
634 struct hda_codec *codec = private_data; 636 struct hda_codec *codec = private_data;
635 struct conexant_spec *spec = codec->spec; 637 struct conexant_spec *spec = codec->spec;
636 638 /* muted -> LED on */
637 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); 639 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
638} 640}
639 641
640/* turn on/off mic-mute LED via GPIO per capture hook */ 642/* turn on/off mic-mute LED via GPIO per capture hook */
@@ -656,7 +658,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
656 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 }, 658 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
657 {} 659 {}
658 }; 660 };
659 codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
660 661
661 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 662 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
662 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook; 663 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e333b3e30e31..c1ddfd2fac52 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -837,9 +837,11 @@ static int alc_init(struct hda_codec *codec)
837 if (spec->init_hook) 837 if (spec->init_hook)
838 spec->init_hook(codec); 838 spec->init_hook(codec);
839 839
840 spec->gen.skip_verbs = 1; /* applied in below */
840 snd_hda_gen_init(codec); 841 snd_hda_gen_init(codec);
841 alc_fix_pll(codec); 842 alc_fix_pll(codec);
842 alc_auto_init_amp(codec, spec->init_amp); 843 alc_auto_init_amp(codec, spec->init_amp);
844 snd_hda_apply_verbs(codec); /* apply verbs here after own init */
843 845
844 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT); 846 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
845 847
@@ -5797,6 +5799,7 @@ enum {
5797 ALC286_FIXUP_ACER_AIO_HEADSET_MIC, 5799 ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
5798 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, 5800 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
5799 ALC299_FIXUP_PREDATOR_SPK, 5801 ALC299_FIXUP_PREDATOR_SPK,
5802 ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
5800}; 5803};
5801 5804
5802static const struct hda_fixup alc269_fixups[] = { 5805static const struct hda_fixup alc269_fixups[] = {
@@ -6837,6 +6840,16 @@ static const struct hda_fixup alc269_fixups[] = {
6837 { } 6840 { }
6838 } 6841 }
6839 }, 6842 },
6843 [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
6844 .type = HDA_FIXUP_PINS,
6845 .v.pins = (const struct hda_pintbl[]) {
6846 { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
6847 { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
6848 { }
6849 },
6850 .chained = true,
6851 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6852 },
6840}; 6853};
6841 6854
6842static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6855static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6979,6 +6992,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6979 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6992 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
6980 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6993 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6981 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6994 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6995 SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6982 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 6996 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
6983 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 6997 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
6984 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 6998 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6995,6 +7009,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6995 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), 7009 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
6996 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 7010 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
6997 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 7011 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
7012 SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
6998 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 7013 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
6999 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), 7014 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
7000 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), 7015 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -7072,6 +7087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7072 SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7087 SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7073 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7088 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7074 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7089 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7090 SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
7075 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 7091 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
7076 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 7092 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
7077 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 7093 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8946,6 +8962,7 @@ static int patch_alc680(struct hda_codec *codec)
8946static const struct hda_device_id snd_hda_id_realtek[] = { 8962static const struct hda_device_id snd_hda_id_realtek[] = {
8947 HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269), 8963 HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
8948 HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269), 8964 HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
8965 HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
8949 HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269), 8966 HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
8950 HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269), 8967 HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
8951 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), 8968 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 2c03e0f6bf72..f70211e6b174 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -550,6 +550,15 @@ int line6_init_pcm(struct usb_line6 *line6,
550 line6pcm->volume_monitor = 255; 550 line6pcm->volume_monitor = 255;
551 line6pcm->line6 = line6; 551 line6pcm->line6 = line6;
552 552
553 spin_lock_init(&line6pcm->out.lock);
554 spin_lock_init(&line6pcm->in.lock);
555 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
556
557 line6->line6pcm = line6pcm;
558
559 pcm->private_data = line6pcm;
560 pcm->private_free = line6_cleanup_pcm;
561
553 line6pcm->max_packet_size_in = 562 line6pcm->max_packet_size_in =
554 usb_maxpacket(line6->usbdev, 563 usb_maxpacket(line6->usbdev,
555 usb_rcvisocpipe(line6->usbdev, ep_read), 0); 564 usb_rcvisocpipe(line6->usbdev, ep_read), 0);
@@ -562,15 +571,6 @@ int line6_init_pcm(struct usb_line6 *line6,
562 return -EINVAL; 571 return -EINVAL;
563 } 572 }
564 573
565 spin_lock_init(&line6pcm->out.lock);
566 spin_lock_init(&line6pcm->in.lock);
567 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
568
569 line6->line6pcm = line6pcm;
570
571 pcm->private_data = line6pcm;
572 pcm->private_free = line6_cleanup_pcm;
573
574 err = line6_create_audio_out_urbs(line6pcm); 574 err = line6_create_audio_out_urbs(line6pcm);
575 if (err < 0) 575 if (err < 0)
576 return err; 576 return err;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index b5927c3d5bc0..eceab19766db 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -739,7 +739,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
739 struct uac_mixer_unit_descriptor *desc) 739 struct uac_mixer_unit_descriptor *desc)
740{ 740{
741 int mu_channels; 741 int mu_channels;
742 void *c;
743 742
744 if (desc->bLength < sizeof(*desc)) 743 if (desc->bLength < sizeof(*desc))
745 return -EINVAL; 744 return -EINVAL;
@@ -762,13 +761,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
762 break; 761 break;
763 } 762 }
764 763
765 if (!mu_channels)
766 return 0;
767
768 c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
769 if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
770 return 0; /* no bmControls -> skip */
771
772 return mu_channels; 764 return mu_channels;
773} 765}
774 766
@@ -2009,6 +2001,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
2009 * Mixer Unit 2001 * Mixer Unit
2010 */ 2002 */
2011 2003
2004/* check whether the given in/out overflows bmMixerControls matrix */
2005static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
2006 int protocol, int num_ins, int num_outs)
2007{
2008 u8 *hdr = (u8 *)desc;
2009 u8 *c = uac_mixer_unit_bmControls(desc, protocol);
2010 size_t rest; /* remaining bytes after bmMixerControls */
2011
2012 switch (protocol) {
2013 case UAC_VERSION_1:
2014 default:
2015 rest = 1; /* iMixer */
2016 break;
2017 case UAC_VERSION_2:
2018 rest = 2; /* bmControls + iMixer */
2019 break;
2020 case UAC_VERSION_3:
2021 rest = 6; /* bmControls + wMixerDescrStr */
2022 break;
2023 }
2024
2025 /* overflow? */
2026 return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
2027}
2028
2012/* 2029/*
2013 * build a mixer unit control 2030 * build a mixer unit control
2014 * 2031 *
@@ -2137,6 +2154,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2137 if (err < 0) 2154 if (err < 0)
2138 return err; 2155 return err;
2139 num_ins += iterm.channels; 2156 num_ins += iterm.channels;
2157 if (mixer_bitmap_overflow(desc, state->mixer->protocol,
2158 num_ins, num_outs))
2159 break;
2140 for (; ich < num_ins; ich++) { 2160 for (; ich < num_ins; ich++) {
2141 int och, ich_has_controls = 0; 2161 int och, ich_has_controls = 0;
2142 2162
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 199fa157a411..27dcb3743690 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1155,17 +1155,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
1155{ 1155{
1156 struct usb_mixer_interface *mixer; 1156 struct usb_mixer_interface *mixer;
1157 struct usb_mixer_elem_info *cval; 1157 struct usb_mixer_elem_info *cval;
1158 int unitid = 12; /* SamleRate ExtensionUnit ID */ 1158 int unitid = 12; /* SampleRate ExtensionUnit ID */
1159 1159
1160 list_for_each_entry(mixer, &chip->mixer_list, list) { 1160 list_for_each_entry(mixer, &chip->mixer_list, list) {
1161 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); 1161 if (mixer->id_elems[unitid]) {
1162 if (cval) { 1162 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, 1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
1164 cval->control << 8, 1164 cval->control << 8,
1165 samplerate_id); 1165 samplerate_id);
1166 snd_usb_mixer_notify_id(mixer, unitid); 1166 snd_usb_mixer_notify_id(mixer, unitid);
1167 break;
1167 } 1168 }
1168 break;
1169 } 1169 }
1170} 1170}
1171 1171
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 75b96929f76c..e4bbf79de956 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -339,6 +339,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
339 ep = 0x81; 339 ep = 0x81;
340 ifnum = 2; 340 ifnum = 2;
341 goto add_sync_ep_from_ifnum; 341 goto add_sync_ep_from_ifnum;
342 case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
342 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ 343 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
343 ep = 0x81; 344 ep = 0x81;
344 ifnum = 1; 345 ifnum = 1;
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 5215e0870bcb..6a71324be628 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -204,7 +204,11 @@ int do_pin_fd(int fd, const char *name)
204 if (err) 204 if (err)
205 return err; 205 return err;
206 206
207 return bpf_obj_pin(fd, name); 207 err = bpf_obj_pin(fd, name);
208 if (err)
209 p_err("can't pin the object (%s): %s", name, strerror(errno));
210
211 return err;
208} 212}
209 213
210int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) 214int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
@@ -237,7 +241,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
237 241
238 fd = get_fd_by_id(id); 242 fd = get_fd_by_id(id);
239 if (fd < 0) { 243 if (fd < 0) {
240 p_err("can't get prog by id (%u): %s", id, strerror(errno)); 244 p_err("can't open object by id (%u): %s", id, strerror(errno));
241 return -1; 245 return -1;
242 } 246 }
243 247
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 66f04a4846a5..43fdbbfe41bb 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -363,7 +363,9 @@ static int do_show(int argc, char **argv)
363 if (fd < 0) 363 if (fd < 0)
364 return -1; 364 return -1;
365 365
366 return show_prog(fd); 366 err = show_prog(fd);
367 close(fd);
368 return err;
367 } 369 }
368 370
369 if (argc) 371 if (argc)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index f5597503c771..e9ef4ca6a655 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
809 int sn_offset = 0; 809 int sn_offset = 0;
810 int error = 0; 810 int error = 0;
811 char *buffer; 811 char *buffer;
812 struct hv_kvp_ipaddr_value *ip_buffer; 812 struct hv_kvp_ipaddr_value *ip_buffer = NULL;
813 char cidr_mask[5]; /* /xyz */ 813 char cidr_mask[5]; /* /xyz */
814 int weight; 814 int weight;
815 int i; 815 int i;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4e455018da65..a5aa7d3ac6a1 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1466,8 +1466,8 @@ union bpf_attr {
1466 * If no cookie has been set yet, generate a new cookie. Once 1466 * If no cookie has been set yet, generate a new cookie. Once
1467 * generated, the socket cookie remains stable for the life of the 1467 * generated, the socket cookie remains stable for the life of the
1468 * socket. This helper can be useful for monitoring per socket 1468 * socket. This helper can be useful for monitoring per socket
1469 * networking traffic statistics as it provides a unique socket 1469 * networking traffic statistics as it provides a global socket
1470 * identifier per namespace. 1470 * identifier that can be assumed unique.
1471 * Return 1471 * Return
1472 * A 8-byte long non-decreasing number on success, or 0 if the 1472 * A 8-byte long non-decreasing number on success, or 0 if the
1473 * socket field is missing inside *skb*. 1473 * socket field is missing inside *skb*.
@@ -1571,8 +1571,11 @@ union bpf_attr {
1571 * but this is only implemented for native XDP (with driver 1571 * but this is only implemented for native XDP (with driver
1572 * support) as of this writing). 1572 * support) as of this writing).
1573 * 1573 *
1574 * All values for *flags* are reserved for future usage, and must 1574 * The lower two bits of *flags* are used as the return code if
1575 * be left at zero. 1575 * the map lookup fails. This is so that the return value can be
1576 * one of the XDP program return codes up to XDP_TX, as chosen by
1577 * the caller. Any higher bits in the *flags* argument must be
1578 * unset.
1576 * 1579 *
1577 * When used to redirect packets to net devices, this helper 1580 * When used to redirect packets to net devices, this helper
1578 * provides a high performance increase over **bpf_redirect**\ (). 1581 * provides a high performance increase over **bpf_redirect**\ ().
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2586b6cb8f34..2b57d7ea7836 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -182,7 +182,6 @@ struct bpf_program {
182 bpf_program_clear_priv_t clear_priv; 182 bpf_program_clear_priv_t clear_priv;
183 183
184 enum bpf_attach_type expected_attach_type; 184 enum bpf_attach_type expected_attach_type;
185 int btf_fd;
186 void *func_info; 185 void *func_info;
187 __u32 func_info_rec_size; 186 __u32 func_info_rec_size;
188 __u32 func_info_cnt; 187 __u32 func_info_cnt;
@@ -313,7 +312,6 @@ void bpf_program__unload(struct bpf_program *prog)
313 prog->instances.nr = -1; 312 prog->instances.nr = -1;
314 zfree(&prog->instances.fds); 313 zfree(&prog->instances.fds);
315 314
316 zclose(prog->btf_fd);
317 zfree(&prog->func_info); 315 zfree(&prog->func_info);
318 zfree(&prog->line_info); 316 zfree(&prog->line_info);
319} 317}
@@ -392,7 +390,6 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
392 prog->instances.fds = NULL; 390 prog->instances.fds = NULL;
393 prog->instances.nr = -1; 391 prog->instances.nr = -1;
394 prog->type = BPF_PROG_TYPE_UNSPEC; 392 prog->type = BPF_PROG_TYPE_UNSPEC;
395 prog->btf_fd = -1;
396 393
397 return 0; 394 return 0;
398errout: 395errout:
@@ -2288,9 +2285,6 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2288 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 2285 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2289 } 2286 }
2290 2287
2291 if (!insn_offset)
2292 prog->btf_fd = btf__fd(obj->btf);
2293
2294 return 0; 2288 return 0;
2295} 2289}
2296 2290
@@ -2463,7 +2457,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2463 char *cp, errmsg[STRERR_BUFSIZE]; 2457 char *cp, errmsg[STRERR_BUFSIZE];
2464 int log_buf_size = BPF_LOG_BUF_SIZE; 2458 int log_buf_size = BPF_LOG_BUF_SIZE;
2465 char *log_buf; 2459 char *log_buf;
2466 int ret; 2460 int btf_fd, ret;
2467 2461
2468 if (!insns || !insns_cnt) 2462 if (!insns || !insns_cnt)
2469 return -EINVAL; 2463 return -EINVAL;
@@ -2478,7 +2472,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2478 load_attr.license = license; 2472 load_attr.license = license;
2479 load_attr.kern_version = kern_version; 2473 load_attr.kern_version = kern_version;
2480 load_attr.prog_ifindex = prog->prog_ifindex; 2474 load_attr.prog_ifindex = prog->prog_ifindex;
2481 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 2475 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
2476 if (prog->obj->btf_ext)
2477 btf_fd = bpf_object__btf_fd(prog->obj);
2478 else
2479 btf_fd = -1;
2480 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
2482 load_attr.func_info = prog->func_info; 2481 load_attr.func_info = prog->func_info;
2483 load_attr.func_info_rec_size = prog->func_info_rec_size; 2482 load_attr.func_info_rec_size = prog->func_info_rec_size;
2484 load_attr.func_info_cnt = prog->func_info_cnt; 2483 load_attr.func_info_cnt = prog->func_info_cnt;
@@ -5000,13 +4999,15 @@ int libbpf_num_possible_cpus(void)
5000 static const char *fcpu = "/sys/devices/system/cpu/possible"; 4999 static const char *fcpu = "/sys/devices/system/cpu/possible";
5001 int len = 0, n = 0, il = 0, ir = 0; 5000 int len = 0, n = 0, il = 0, ir = 0;
5002 unsigned int start = 0, end = 0; 5001 unsigned int start = 0, end = 0;
5002 int tmp_cpus = 0;
5003 static int cpus; 5003 static int cpus;
5004 char buf[128]; 5004 char buf[128];
5005 int error = 0; 5005 int error = 0;
5006 int fd = -1; 5006 int fd = -1;
5007 5007
5008 if (cpus > 0) 5008 tmp_cpus = READ_ONCE(cpus);
5009 return cpus; 5009 if (tmp_cpus > 0)
5010 return tmp_cpus;
5010 5011
5011 fd = open(fcpu, O_RDONLY); 5012 fd = open(fcpu, O_RDONLY);
5012 if (fd < 0) { 5013 if (fd < 0) {
@@ -5029,7 +5030,7 @@ int libbpf_num_possible_cpus(void)
5029 } 5030 }
5030 buf[len] = '\0'; 5031 buf[len] = '\0';
5031 5032
5032 for (ir = 0, cpus = 0; ir <= len; ir++) { 5033 for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
5033 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ 5034 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
5034 if (buf[ir] == ',' || buf[ir] == '\0') { 5035 if (buf[ir] == ',' || buf[ir] == '\0') {
5035 buf[ir] = '\0'; 5036 buf[ir] = '\0';
@@ -5041,13 +5042,15 @@ int libbpf_num_possible_cpus(void)
5041 } else if (n == 1) { 5042 } else if (n == 1) {
5042 end = start; 5043 end = start;
5043 } 5044 }
5044 cpus += end - start + 1; 5045 tmp_cpus += end - start + 1;
5045 il = ir + 1; 5046 il = ir + 1;
5046 } 5047 }
5047 } 5048 }
5048 if (cpus <= 0) { 5049 if (tmp_cpus <= 0) {
5049 pr_warning("Invalid #CPUs %d from %s\n", cpus, fcpu); 5050 pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
5050 return -EINVAL; 5051 return -EINVAL;
5051 } 5052 }
5052 return cpus; 5053
5054 WRITE_ONCE(cpus, tmp_cpus);
5055 return tmp_cpus;
5053} 5056}
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 045f5f7d68ab..13f1e8b9ac52 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -9,9 +9,10 @@ ifeq ("$(origin O)", "command line")
9endif 9endif
10 10
11turbostat : turbostat.c 11turbostat : turbostat.c
12override CFLAGS += -Wall -I../../../include 12override CFLAGS += -O2 -Wall -I../../../include
13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' 13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
14override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' 14override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
15override CFLAGS += -D_FORTIFY_SOURCE=2
15 16
16%: %.c 17%: %.c
17 @mkdir -p $(BUILD_OUTPUT) 18 @mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 75fc4fb9901c..b2a86438f074 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -39,7 +39,6 @@ FILE *outf;
39int *fd_percpu; 39int *fd_percpu;
40struct timeval interval_tv = {5, 0}; 40struct timeval interval_tv = {5, 0};
41struct timespec interval_ts = {5, 0}; 41struct timespec interval_ts = {5, 0};
42struct timespec one_msec = {0, 1000000};
43unsigned int num_iterations; 42unsigned int num_iterations;
44unsigned int debug; 43unsigned int debug;
45unsigned int quiet; 44unsigned int quiet;
@@ -60,6 +59,7 @@ unsigned int do_irtl_hsw;
60unsigned int units = 1000000; /* MHz etc */ 59unsigned int units = 1000000; /* MHz etc */
61unsigned int genuine_intel; 60unsigned int genuine_intel;
62unsigned int authentic_amd; 61unsigned int authentic_amd;
62unsigned int hygon_genuine;
63unsigned int max_level, max_extended_level; 63unsigned int max_level, max_extended_level;
64unsigned int has_invariant_tsc; 64unsigned int has_invariant_tsc;
65unsigned int do_nhm_platform_info; 65unsigned int do_nhm_platform_info;
@@ -100,6 +100,7 @@ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
100unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ 100unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
101unsigned int has_misc_feature_control; 101unsigned int has_misc_feature_control;
102unsigned int first_counter_read = 1; 102unsigned int first_counter_read = 1;
103int ignore_stdin;
103 104
104#define RAPL_PKG (1 << 0) 105#define RAPL_PKG (1 << 0)
105 /* 0x610 MSR_PKG_POWER_LIMIT */ 106 /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -166,6 +167,7 @@ size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
166struct thread_data { 167struct thread_data {
167 struct timeval tv_begin; 168 struct timeval tv_begin;
168 struct timeval tv_end; 169 struct timeval tv_end;
170 struct timeval tv_delta;
169 unsigned long long tsc; 171 unsigned long long tsc;
170 unsigned long long aperf; 172 unsigned long long aperf;
171 unsigned long long mperf; 173 unsigned long long mperf;
@@ -506,6 +508,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU
506unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; 508unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
507 509
508#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) 510#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
511#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
509#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) 512#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
510#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) 513#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
511#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) 514#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
@@ -849,7 +852,6 @@ int dump_counters(struct thread_data *t, struct core_data *c,
849 outp += sprintf(outp, "pc8: %016llX\n", p->pc8); 852 outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
850 outp += sprintf(outp, "pc9: %016llX\n", p->pc9); 853 outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
851 outp += sprintf(outp, "pc10: %016llX\n", p->pc10); 854 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
852 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
853 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi); 855 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
854 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi); 856 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
855 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); 857 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
@@ -911,7 +913,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
911 if (DO_BIC(BIC_TOD)) 913 if (DO_BIC(BIC_TOD))
912 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); 914 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec);
913 915
914 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 916 interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0;
915 917
916 tsc = t->tsc * tsc_tweak; 918 tsc = t->tsc * tsc_tweak;
917 919
@@ -1287,6 +1289,14 @@ delta_core(struct core_data *new, struct core_data *old)
1287 } 1289 }
1288} 1290}
1289 1291
1292int soft_c1_residency_display(int bic)
1293{
1294 if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
1295 return 0;
1296
1297 return DO_BIC_READ(bic);
1298}
1299
1290/* 1300/*
1291 * old = new - old 1301 * old = new - old
1292 */ 1302 */
@@ -1309,6 +1319,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1309 * over-write old w/ new so we can print end of interval values 1319 * over-write old w/ new so we can print end of interval values
1310 */ 1320 */
1311 1321
1322 timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta);
1312 old->tv_begin = new->tv_begin; 1323 old->tv_begin = new->tv_begin;
1313 old->tv_end = new->tv_end; 1324 old->tv_end = new->tv_end;
1314 1325
@@ -1322,7 +1333,8 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1322 1333
1323 old->c1 = new->c1 - old->c1; 1334 old->c1 = new->c1 - old->c1;
1324 1335
1325 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1336 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1337 soft_c1_residency_display(BIC_Avg_MHz)) {
1326 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { 1338 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
1327 old->aperf = new->aperf - old->aperf; 1339 old->aperf = new->aperf - old->aperf;
1328 old->mperf = new->mperf - old->mperf; 1340 old->mperf = new->mperf - old->mperf;
@@ -1404,6 +1416,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
1404 t->tv_begin.tv_usec = 0; 1416 t->tv_begin.tv_usec = 0;
1405 t->tv_end.tv_sec = 0; 1417 t->tv_end.tv_sec = 0;
1406 t->tv_end.tv_usec = 0; 1418 t->tv_end.tv_usec = 0;
1419 t->tv_delta.tv_sec = 0;
1420 t->tv_delta.tv_usec = 0;
1407 1421
1408 t->tsc = 0; 1422 t->tsc = 0;
1409 t->aperf = 0; 1423 t->aperf = 0;
@@ -1573,6 +1587,9 @@ void compute_average(struct thread_data *t, struct core_data *c,
1573 1587
1574 for_all_cpus(sum_counters, t, c, p); 1588 for_all_cpus(sum_counters, t, c, p);
1575 1589
1590 /* Use the global time delta for the average. */
1591 average.threads.tv_delta = tv_delta;
1592
1576 average.threads.tsc /= topo.num_cpus; 1593 average.threads.tsc /= topo.num_cpus;
1577 average.threads.aperf /= topo.num_cpus; 1594 average.threads.aperf /= topo.num_cpus;
1578 average.threads.mperf /= topo.num_cpus; 1595 average.threads.mperf /= topo.num_cpus;
@@ -1714,7 +1731,7 @@ void get_apic_id(struct thread_data *t)
1714 if (!DO_BIC(BIC_X2APIC)) 1731 if (!DO_BIC(BIC_X2APIC))
1715 return; 1732 return;
1716 1733
1717 if (authentic_amd) { 1734 if (authentic_amd || hygon_genuine) {
1718 unsigned int topology_extensions; 1735 unsigned int topology_extensions;
1719 1736
1720 if (max_extended_level < 0x8000001e) 1737 if (max_extended_level < 0x8000001e)
@@ -1762,19 +1779,20 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1762 struct msr_counter *mp; 1779 struct msr_counter *mp;
1763 int i; 1780 int i;
1764 1781
1765 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1766
1767 if (cpu_migrate(cpu)) { 1782 if (cpu_migrate(cpu)) {
1768 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 1783 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
1769 return -1; 1784 return -1;
1770 } 1785 }
1771 1786
1787 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1788
1772 if (first_counter_read) 1789 if (first_counter_read)
1773 get_apic_id(t); 1790 get_apic_id(t);
1774retry: 1791retry:
1775 t->tsc = rdtsc(); /* we are running on local CPU of interest */ 1792 t->tsc = rdtsc(); /* we are running on local CPU of interest */
1776 1793
1777 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1794 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1795 soft_c1_residency_display(BIC_Avg_MHz)) {
1778 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; 1796 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
1779 1797
1780 /* 1798 /*
@@ -1851,20 +1869,20 @@ retry:
1851 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1869 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1852 goto done; 1870 goto done;
1853 1871
1854 if (DO_BIC(BIC_CPU_c3)) { 1872 if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
1855 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1873 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1856 return -6; 1874 return -6;
1857 } 1875 }
1858 1876
1859 if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { 1877 if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
1860 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1878 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1861 return -7; 1879 return -7;
1862 } else if (do_knl_cstates) { 1880 } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
1863 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) 1881 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1864 return -7; 1882 return -7;
1865 } 1883 }
1866 1884
1867 if (DO_BIC(BIC_CPU_c7)) 1885 if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
1868 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) 1886 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1869 return -8; 1887 return -8;
1870 1888
@@ -2912,6 +2930,7 @@ int snapshot_cpu_lpi_us(void)
2912 if (retval != 1) { 2930 if (retval != 1) {
2913 fprintf(stderr, "Disabling Low Power Idle CPU output\n"); 2931 fprintf(stderr, "Disabling Low Power Idle CPU output\n");
2914 BIC_NOT_PRESENT(BIC_CPU_LPI); 2932 BIC_NOT_PRESENT(BIC_CPU_LPI);
2933 fclose(fp);
2915 return -1; 2934 return -1;
2916 } 2935 }
2917 2936
@@ -2938,6 +2957,7 @@ int snapshot_sys_lpi_us(void)
2938 if (retval != 1) { 2957 if (retval != 1) {
2939 fprintf(stderr, "Disabling Low Power Idle System output\n"); 2958 fprintf(stderr, "Disabling Low Power Idle System output\n");
2940 BIC_NOT_PRESENT(BIC_SYS_LPI); 2959 BIC_NOT_PRESENT(BIC_SYS_LPI);
2960 fclose(fp);
2941 return -1; 2961 return -1;
2942 } 2962 }
2943 fclose(fp); 2963 fclose(fp);
@@ -2985,8 +3005,6 @@ static void signal_handler (int signal)
2985 fprintf(stderr, "SIGUSR1\n"); 3005 fprintf(stderr, "SIGUSR1\n");
2986 break; 3006 break;
2987 } 3007 }
2988 /* make sure this manually-invoked interval is at least 1ms long */
2989 nanosleep(&one_msec, NULL);
2990} 3008}
2991 3009
2992void setup_signal_handler(void) 3010void setup_signal_handler(void)
@@ -3005,29 +3023,38 @@ void setup_signal_handler(void)
3005 3023
3006void do_sleep(void) 3024void do_sleep(void)
3007{ 3025{
3008 struct timeval select_timeout; 3026 struct timeval tout;
3027 struct timespec rest;
3009 fd_set readfds; 3028 fd_set readfds;
3010 int retval; 3029 int retval;
3011 3030
3012 FD_ZERO(&readfds); 3031 FD_ZERO(&readfds);
3013 FD_SET(0, &readfds); 3032 FD_SET(0, &readfds);
3014 3033
3015 if (!isatty(fileno(stdin))) { 3034 if (ignore_stdin) {
3016 nanosleep(&interval_ts, NULL); 3035 nanosleep(&interval_ts, NULL);
3017 return; 3036 return;
3018 } 3037 }
3019 3038
3020 select_timeout = interval_tv; 3039 tout = interval_tv;
3021 retval = select(1, &readfds, NULL, NULL, &select_timeout); 3040 retval = select(1, &readfds, NULL, NULL, &tout);
3022 3041
3023 if (retval == 1) { 3042 if (retval == 1) {
3024 switch (getc(stdin)) { 3043 switch (getc(stdin)) {
3025 case 'q': 3044 case 'q':
3026 exit_requested = 1; 3045 exit_requested = 1;
3027 break; 3046 break;
3047 case EOF:
3048 /*
3049 * 'stdin' is a pipe closed on the other end. There
3050 * won't be any further input.
3051 */
3052 ignore_stdin = 1;
3053 /* Sleep the rest of the time */
3054 rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000);
3055 rest.tv_nsec = (tout.tv_usec % 1000000) * 1000;
3056 nanosleep(&rest, NULL);
3028 } 3057 }
3029 /* make sure this manually-invoked interval is at least 1ms long */
3030 nanosleep(&one_msec, NULL);
3031 } 3058 }
3032} 3059}
3033 3060
@@ -3209,6 +3236,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
3209 break; 3236 break;
3210 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3237 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3211 case INTEL_FAM6_HASWELL_X: /* HSX */ 3238 case INTEL_FAM6_HASWELL_X: /* HSX */
3239 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3212 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3240 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3213 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3241 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3214 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3242 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3405,6 +3433,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
3405 case INTEL_FAM6_IVYBRIDGE: /* IVB */ 3433 case INTEL_FAM6_IVYBRIDGE: /* IVB */
3406 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3434 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3407 case INTEL_FAM6_HASWELL_X: /* HSX */ 3435 case INTEL_FAM6_HASWELL_X: /* HSX */
3436 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3408 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3437 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3409 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3438 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3410 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3439 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3803,6 +3832,7 @@ double get_tdp_amd(unsigned int family)
3803{ 3832{
3804 switch (family) { 3833 switch (family) {
3805 case 0x17: 3834 case 0x17:
3835 case 0x18:
3806 default: 3836 default:
3807 /* This is the max stock TDP of HEDT/Server Fam17h chips */ 3837 /* This is the max stock TDP of HEDT/Server Fam17h chips */
3808 return 250.0; 3838 return 250.0;
@@ -3841,6 +3871,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
3841 case INTEL_FAM6_SANDYBRIDGE: 3871 case INTEL_FAM6_SANDYBRIDGE:
3842 case INTEL_FAM6_IVYBRIDGE: 3872 case INTEL_FAM6_IVYBRIDGE:
3843 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3873 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3874 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3844 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3875 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3845 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3876 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3846 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3877 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3982,6 +4013,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
3982 4013
3983 switch (family) { 4014 switch (family) {
3984 case 0x17: /* Zen, Zen+ */ 4015 case 0x17: /* Zen, Zen+ */
4016 case 0x18: /* Hygon Dhyana */
3985 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; 4017 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
3986 if (rapl_joules) { 4018 if (rapl_joules) {
3987 BIC_PRESENT(BIC_Pkg_J); 4019 BIC_PRESENT(BIC_Pkg_J);
@@ -4002,7 +4034,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
4002 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); 4034 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
4003 rapl_power_units = ldexp(1.0, -(msr & 0xf)); 4035 rapl_power_units = ldexp(1.0, -(msr & 0xf));
4004 4036
4005 tdp = get_tdp_amd(model); 4037 tdp = get_tdp_amd(family);
4006 4038
4007 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 4039 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
4008 if (!quiet) 4040 if (!quiet)
@@ -4018,7 +4050,7 @@ void rapl_probe(unsigned int family, unsigned int model)
4018{ 4050{
4019 if (genuine_intel) 4051 if (genuine_intel)
4020 rapl_probe_intel(family, model); 4052 rapl_probe_intel(family, model);
4021 if (authentic_amd) 4053 if (authentic_amd || hygon_genuine)
4022 rapl_probe_amd(family, model); 4054 rapl_probe_amd(family, model);
4023} 4055}
4024 4056
@@ -4032,6 +4064,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
4032 4064
4033 switch (model) { 4065 switch (model) {
4034 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4066 case INTEL_FAM6_HASWELL_CORE: /* HSW */
4067 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4035 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4068 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
4036 do_gfx_perf_limit_reasons = 1; 4069 do_gfx_perf_limit_reasons = 1;
4037 case INTEL_FAM6_HASWELL_X: /* HSX */ 4070 case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -4251,6 +4284,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4251 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ 4284 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
4252 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4285 case INTEL_FAM6_HASWELL_CORE: /* HSW */
4253 case INTEL_FAM6_HASWELL_X: /* HSW */ 4286 case INTEL_FAM6_HASWELL_X: /* HSW */
4287 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4254 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4288 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
4255 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4289 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
4256 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 4290 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -4267,7 +4301,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4267} 4301}
4268 4302
4269/* 4303/*
4270 * HSW adds support for additional MSRs: 4304 * HSW ULT added support for C8/C9/C10 MSRs:
4271 * 4305 *
4272 * MSR_PKG_C8_RESIDENCY 0x00000630 4306 * MSR_PKG_C8_RESIDENCY 0x00000630
4273 * MSR_PKG_C9_RESIDENCY 0x00000631 4307 * MSR_PKG_C9_RESIDENCY 0x00000631
@@ -4278,13 +4312,13 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4278 * MSR_PKGC10_IRTL 0x00000635 4312 * MSR_PKGC10_IRTL 0x00000635
4279 * 4313 *
4280 */ 4314 */
4281int has_hsw_msrs(unsigned int family, unsigned int model) 4315int has_c8910_msrs(unsigned int family, unsigned int model)
4282{ 4316{
4283 if (!genuine_intel) 4317 if (!genuine_intel)
4284 return 0; 4318 return 0;
4285 4319
4286 switch (model) { 4320 switch (model) {
4287 case INTEL_FAM6_HASWELL_CORE: 4321 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4288 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4322 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
4289 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 4323 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
4290 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 4324 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
@@ -4568,9 +4602,6 @@ unsigned int intel_model_duplicates(unsigned int model)
4568 case INTEL_FAM6_XEON_PHI_KNM: 4602 case INTEL_FAM6_XEON_PHI_KNM:
4569 return INTEL_FAM6_XEON_PHI_KNL; 4603 return INTEL_FAM6_XEON_PHI_KNL;
4570 4604
4571 case INTEL_FAM6_HASWELL_ULT:
4572 return INTEL_FAM6_HASWELL_CORE;
4573
4574 case INTEL_FAM6_BROADWELL_X: 4605 case INTEL_FAM6_BROADWELL_X:
4575 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 4606 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
4576 return INTEL_FAM6_BROADWELL_X; 4607 return INTEL_FAM6_BROADWELL_X;
@@ -4582,7 +4613,11 @@ unsigned int intel_model_duplicates(unsigned int model)
4582 return INTEL_FAM6_SKYLAKE_MOBILE; 4613 return INTEL_FAM6_SKYLAKE_MOBILE;
4583 4614
4584 case INTEL_FAM6_ICELAKE_MOBILE: 4615 case INTEL_FAM6_ICELAKE_MOBILE:
4616 case INTEL_FAM6_ICELAKE_NNPI:
4585 return INTEL_FAM6_CANNONLAKE_MOBILE; 4617 return INTEL_FAM6_CANNONLAKE_MOBILE;
4618
4619 case INTEL_FAM6_ATOM_TREMONT_X:
4620 return INTEL_FAM6_ATOM_GOLDMONT_X;
4586 } 4621 }
4587 return model; 4622 return model;
4588} 4623}
@@ -4600,6 +4635,8 @@ void process_cpuid()
4600 genuine_intel = 1; 4635 genuine_intel = 1;
4601 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) 4636 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
4602 authentic_amd = 1; 4637 authentic_amd = 1;
4638 else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e)
4639 hygon_genuine = 1;
4603 4640
4604 if (!quiet) 4641 if (!quiet)
4605 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", 4642 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
@@ -4820,12 +4857,12 @@ void process_cpuid()
4820 BIC_NOT_PRESENT(BIC_CPU_c7); 4857 BIC_NOT_PRESENT(BIC_CPU_c7);
4821 BIC_NOT_PRESENT(BIC_Pkgpc7); 4858 BIC_NOT_PRESENT(BIC_Pkgpc7);
4822 } 4859 }
4823 if (has_hsw_msrs(family, model)) { 4860 if (has_c8910_msrs(family, model)) {
4824 BIC_PRESENT(BIC_Pkgpc8); 4861 BIC_PRESENT(BIC_Pkgpc8);
4825 BIC_PRESENT(BIC_Pkgpc9); 4862 BIC_PRESENT(BIC_Pkgpc9);
4826 BIC_PRESENT(BIC_Pkgpc10); 4863 BIC_PRESENT(BIC_Pkgpc10);
4827 } 4864 }
4828 do_irtl_hsw = has_hsw_msrs(family, model); 4865 do_irtl_hsw = has_c8910_msrs(family, model);
4829 if (has_skl_msrs(family, model)) { 4866 if (has_skl_msrs(family, model)) {
4830 BIC_PRESENT(BIC_Totl_c0); 4867 BIC_PRESENT(BIC_Totl_c0);
4831 BIC_PRESENT(BIC_Any_c0); 4868 BIC_PRESENT(BIC_Any_c0);
@@ -5123,7 +5160,7 @@ int initialize_counters(int cpu_id)
5123 5160
5124void allocate_output_buffer() 5161void allocate_output_buffer()
5125{ 5162{
5126 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); 5163 output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
5127 outp = output_buffer; 5164 outp = output_buffer;
5128 if (outp == NULL) 5165 if (outp == NULL)
5129 err(-1, "calloc output buffer"); 5166 err(-1, "calloc output buffer");
@@ -5269,7 +5306,7 @@ int get_and_dump_counters(void)
5269} 5306}
5270 5307
5271void print_version() { 5308void print_version() {
5272 fprintf(outf, "turbostat version 19.03.20" 5309 fprintf(outf, "turbostat version 19.08.31"
5273 " - Len Brown <lenb@kernel.org>\n"); 5310 " - Len Brown <lenb@kernel.org>\n");
5274} 5311}
5275 5312
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index 1fdeef864e7c..666b325a62a2 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -9,8 +9,9 @@ ifeq ("$(origin O)", "command line")
9endif 9endif
10 10
11x86_energy_perf_policy : x86_energy_perf_policy.c 11x86_energy_perf_policy : x86_energy_perf_policy.c
12override CFLAGS += -Wall -I../../../include 12override CFLAGS += -O2 -Wall -I../../../include
13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' 13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
14override CFLAGS += -D_FORTIFY_SOURCE=2
14 15
15%: %.c 16%: %.c
16 @mkdir -p $(BUILD_OUTPUT) 17 @mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
index 17db1c3af4d0..78c6361898b1 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -40,7 +40,7 @@ in the same processor package.
40Hardware P-States (HWP) are effectively an expansion of hardware 40Hardware P-States (HWP) are effectively an expansion of hardware
41P-state control from the opportunistic turbo-mode P-state range 41P-state control from the opportunistic turbo-mode P-state range
42to include the entire range of available P-states. 42to include the entire range of available P-states.
43On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP. 43On Broadwell Xeon, the initial HWP implementation, EPB influenced HWP.
44That influence was removed in subsequent generations, 44That influence was removed in subsequent generations,
45where it was moved to the 45where it was moved to the
46Energy_Performance_Preference (EPP) field in 46Energy_Performance_Preference (EPP) field in
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 34a796b303fe..3fe1eed900d4 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -545,7 +545,7 @@ void cmdline(int argc, char **argv)
545 545
546 progname = argv[0]; 546 progname = argv[0];
547 547
548 while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw", 548 while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
549 long_options, &option_index)) != -1) { 549 long_options, &option_index)) != -1) {
550 switch (opt) { 550 switch (opt) {
551 case 'a': 551 case 'a':
@@ -1259,6 +1259,15 @@ void probe_dev_msr(void)
1259 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 1259 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1260 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 1260 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1261} 1261}
1262
1263static void get_cpuid_or_exit(unsigned int leaf,
1264 unsigned int *eax, unsigned int *ebx,
1265 unsigned int *ecx, unsigned int *edx)
1266{
1267 if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
1268 errx(1, "Processor not supported\n");
1269}
1270
1262/* 1271/*
1263 * early_cpuid() 1272 * early_cpuid()
1264 * initialize turbo_is_enabled, has_hwp, has_epb 1273 * initialize turbo_is_enabled, has_hwp, has_epb
@@ -1266,15 +1275,10 @@ void probe_dev_msr(void)
1266 */ 1275 */
1267void early_cpuid(void) 1276void early_cpuid(void)
1268{ 1277{
1269 unsigned int eax, ebx, ecx, edx, max_level; 1278 unsigned int eax, ebx, ecx, edx;
1270 unsigned int fms, family, model; 1279 unsigned int fms, family, model;
1271 1280
1272 __get_cpuid(0, &max_level, &ebx, &ecx, &edx); 1281 get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1273
1274 if (max_level < 6)
1275 errx(1, "Processor not supported\n");
1276
1277 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1278 family = (fms >> 8) & 0xf; 1282 family = (fms >> 8) & 0xf;
1279 model = (fms >> 4) & 0xf; 1283 model = (fms >> 4) & 0xf;
1280 if (family == 6 || family == 0xf) 1284 if (family == 6 || family == 0xf)
@@ -1288,7 +1292,7 @@ void early_cpuid(void)
1288 bdx_highest_ratio = msr & 0xFF; 1292 bdx_highest_ratio = msr & 0xFF;
1289 } 1293 }
1290 1294
1291 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); 1295 get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1292 turbo_is_enabled = (eax >> 1) & 1; 1296 turbo_is_enabled = (eax >> 1) & 1;
1293 has_hwp = (eax >> 7) & 1; 1297 has_hwp = (eax >> 7) & 1;
1294 has_epb = (ecx >> 3) & 1; 1298 has_epb = (ecx >> 3) & 1;
@@ -1306,7 +1310,7 @@ void parse_cpuid(void)
1306 1310
1307 eax = ebx = ecx = edx = 0; 1311 eax = ebx = ecx = edx = 0;
1308 1312
1309 __get_cpuid(0, &max_level, &ebx, &ecx, &edx); 1313 get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
1310 1314
1311 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) 1315 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
1312 genuine_intel = 1; 1316 genuine_intel = 1;
@@ -1315,7 +1319,7 @@ void parse_cpuid(void)
1315 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", 1319 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
1316 (char *)&ebx, (char *)&edx, (char *)&ecx); 1320 (char *)&ebx, (char *)&edx, (char *)&ecx);
1317 1321
1318 __get_cpuid(1, &fms, &ebx, &ecx, &edx); 1322 get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1319 family = (fms >> 8) & 0xf; 1323 family = (fms >> 8) & 0xf;
1320 model = (fms >> 4) & 0xf; 1324 model = (fms >> 4) & 0xf;
1321 stepping = fms & 0xf; 1325 stepping = fms & 0xf;
@@ -1340,7 +1344,7 @@ void parse_cpuid(void)
1340 errx(1, "CPUID: no MSR"); 1344 errx(1, "CPUID: no MSR");
1341 1345
1342 1346
1343 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); 1347 get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1344 /* turbo_is_enabled already set */ 1348 /* turbo_is_enabled already set */
1345 /* has_hwp already set */ 1349 /* has_hwp already set */
1346 has_hwp_notify = eax & (1 << 8); 1350 has_hwp_notify = eax & (1 << 8);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index c085964e1d05..96752ebd938f 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) 34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
35TEST_GEN_FILES = $(BPF_OBJ_FILES) 35TEST_GEN_FILES = $(BPF_OBJ_FILES)
36 36
37BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
38TEST_FILES = $(BTF_C_FILES)
39
37# Also test sub-register code-gen if LLVM has eBPF v3 processor support which 40# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
38# contains both ALU32 and JMP32 instructions. 41# contains both ALU32 and JMP32 instructions.
39SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \ 42SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
@@ -68,7 +71,8 @@ TEST_PROGS := test_kmod.sh \
68TEST_PROGS_EXTENDED := with_addr.sh \ 71TEST_PROGS_EXTENDED := with_addr.sh \
69 with_tunnels.sh \ 72 with_tunnels.sh \
70 tcp_client.py \ 73 tcp_client.py \
71 tcp_server.py 74 tcp_server.py \
75 test_xdp_vlan.sh
72 76
73# Compile but not part of 'make run_tests' 77# Compile but not part of 'make run_tests'
74TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ 78TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index f7a0744db31e..5dc109f4c097 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
34CONFIG_MPLS_ROUTING=m 34CONFIG_MPLS_ROUTING=m
35CONFIG_MPLS_IPTUNNEL=m 35CONFIG_MPLS_IPTUNNEL=m
36CONFIG_IPV6_SIT=m 36CONFIG_IPV6_SIT=m
37CONFIG_BPF_JIT=y
diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/test_btf_dump.c
index 8f850823d35f..6e75dd3cb14f 100644
--- a/tools/testing/selftests/bpf/test_btf_dump.c
+++ b/tools/testing/selftests/bpf/test_btf_dump.c
@@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
97 } 97 }
98 98
99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name); 99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
100 if (access(test_file, R_OK) == -1)
101 /*
102 * When the test is run with O=, kselftest copies TEST_FILES
103 * without preserving the directory structure.
104 */
105 snprintf(test_file, sizeof(test_file), "%s.c",
106 test_case->name);
100 /* 107 /*
101 * Diff test output and expected test output, contained between 108 * Diff test output and expected test output, contained between
102 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. 109 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 2fc4625c1a15..655729004391 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -20,9 +20,9 @@ int main(int argc, char **argv)
20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
22 BPF_FUNC_get_local_storage), 22 BPF_FUNC_get_local_storage),
23 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 23 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), 24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
25 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), 25 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
26 26
27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ 27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
@@ -30,7 +30,7 @@ int main(int argc, char **argv)
30 BPF_FUNC_get_local_storage), 30 BPF_FUNC_get_local_storage),
31 BPF_MOV64_IMM(BPF_REG_1, 1), 31 BPF_MOV64_IMM(BPF_REG_1, 1),
32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 33 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), 34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
36 BPF_EXIT_INSN(), 36 BPF_EXIT_INSN(),
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index fb679ac3d4b0..0e6652733462 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
13#include <bpf/bpf.h> 13#include <bpf/bpf.h>
14 14
15#include "cgroup_helpers.h" 15#include "cgroup_helpers.h"
16#include "bpf_endian.h"
16#include "bpf_rlimit.h" 17#include "bpf_rlimit.h"
17#include "bpf_util.h" 18#include "bpf_util.h"
18 19
@@ -232,7 +233,8 @@ static struct sock_test tests[] = {
232 /* if (ip == expected && port == expected) */ 233 /* if (ip == expected && port == expected) */
233 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 234 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
234 offsetof(struct bpf_sock, src_ip6[3])), 235 offsetof(struct bpf_sock, src_ip6[3])),
235 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4), 236 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
237 __bpf_constant_ntohl(0x00000001), 4),
236 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 238 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
237 offsetof(struct bpf_sock, src_port)), 239 offsetof(struct bpf_sock, src_port)),
238 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2), 240 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
@@ -261,7 +263,8 @@ static struct sock_test tests[] = {
261 /* if (ip == expected && port == expected) */ 263 /* if (ip == expected && port == expected) */
262 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 264 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
263 offsetof(struct bpf_sock, src_ip4)), 265 offsetof(struct bpf_sock, src_ip4)),
264 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4), 266 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
267 __bpf_constant_ntohl(0x7F000001), 4),
265 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 268 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
266 offsetof(struct bpf_sock, src_port)), 269 offsetof(struct bpf_sock, src_port)),
267 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2), 270 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c
index 5e980a5ab69d..1fc4e61e9f9f 100644
--- a/tools/testing/selftests/bpf/verifier/loops1.c
+++ b/tools/testing/selftests/bpf/verifier/loops1.c
@@ -159,3 +159,31 @@
159 .errstr = "loop detected", 159 .errstr = "loop detected",
160 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 160 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
161}, 161},
162{
163 "not-taken loop with back jump to 1st insn",
164 .insns = {
165 BPF_MOV64_IMM(BPF_REG_0, 123),
166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, -2),
167 BPF_EXIT_INSN(),
168 },
169 .result = ACCEPT,
170 .prog_type = BPF_PROG_TYPE_XDP,
171 .retval = 123,
172},
173{
174 "taken loop with back jump to 1st insn",
175 .insns = {
176 BPF_MOV64_IMM(BPF_REG_1, 10),
177 BPF_MOV64_IMM(BPF_REG_2, 0),
178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
179 BPF_EXIT_INSN(),
180 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
181 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
182 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -3),
183 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
184 BPF_EXIT_INSN(),
185 },
186 .result = ACCEPT,
187 .prog_type = BPF_PROG_TYPE_XDP,
188 .retval = 55,
189},
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
index 4059014d93ea..4912d23844bc 100644
--- a/tools/testing/selftests/kvm/include/evmcs.h
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -220,6 +220,8 @@ struct hv_enlightened_vmcs {
220struct hv_enlightened_vmcs *current_evmcs; 220struct hv_enlightened_vmcs *current_evmcs;
221struct hv_vp_assist_page *current_vp_assist; 221struct hv_vp_assist_page *current_vp_assist;
222 222
223int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
224
223static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) 225static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
224{ 226{
225 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | 227 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 6cb34a0fa200..0a5e487dbc50 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1060,9 +1060,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", 1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1061 r); 1061 r);
1062 1062
1063 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); 1063 if (kvm_check_cap(KVM_CAP_XCRS)) {
1064 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", 1064 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1065 r); 1065 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1066 r);
1067 }
1066 1068
1067 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); 1069 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
1068 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", 1070 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
@@ -1103,9 +1105,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
1103 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", 1105 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1104 r); 1106 r);
1105 1107
1106 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); 1108 if (kvm_check_cap(KVM_CAP_XCRS)) {
1107 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", 1109 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1108 r); 1110 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1111 r);
1112 }
1109 1113
1110 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); 1114 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1111 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", 1115 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 204f847bd065..9cef0455b819 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -12,6 +12,26 @@
12 12
13bool enable_evmcs; 13bool enable_evmcs;
14 14
15int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
16{
17 uint16_t evmcs_ver;
18
19 struct kvm_enable_cap enable_evmcs_cap = {
20 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
21 .args[0] = (unsigned long)&evmcs_ver
22 };
23
24 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
25
26 /* KVM should return supported EVMCS version range */
27 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
28 (evmcs_ver & 0xff) > 0,
29 "Incorrect EVMCS version range: %x:%x\n",
30 evmcs_ver & 0xff, evmcs_ver >> 8);
31
32 return evmcs_ver;
33}
34
15/* Allocate memory regions for nested VMX tests. 35/* Allocate memory regions for nested VMX tests.
16 * 36 *
17 * Input Args: 37 * Input Args:
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index f95c08343b48..92915e6408e7 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -79,11 +79,6 @@ int main(int argc, char *argv[])
79 struct kvm_x86_state *state; 79 struct kvm_x86_state *state;
80 struct ucall uc; 80 struct ucall uc;
81 int stage; 81 int stage;
82 uint16_t evmcs_ver;
83 struct kvm_enable_cap enable_evmcs_cap = {
84 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
85 .args[0] = (unsigned long)&evmcs_ver
86 };
87 82
88 /* Create VM */ 83 /* Create VM */
89 vm = vm_create_default(VCPU_ID, 0, guest_code); 84 vm = vm_create_default(VCPU_ID, 0, guest_code);
@@ -96,13 +91,7 @@ int main(int argc, char *argv[])
96 exit(KSFT_SKIP); 91 exit(KSFT_SKIP);
97 } 92 }
98 93
99 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 94 vcpu_enable_evmcs(vm, VCPU_ID);
100
101 /* KVM should return supported EVMCS version range */
102 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
103 (evmcs_ver & 0xff) > 0,
104 "Incorrect EVMCS version range: %x:%x\n",
105 evmcs_ver & 0xff, evmcs_ver >> 8);
106 95
107 run = vcpu_state(vm, VCPU_ID); 96 run = vcpu_state(vm, VCPU_ID);
108 97
@@ -146,7 +135,7 @@ int main(int argc, char *argv[])
146 kvm_vm_restart(vm, O_RDWR); 135 kvm_vm_restart(vm, O_RDWR);
147 vm_vcpu_add(vm, VCPU_ID); 136 vm_vcpu_add(vm, VCPU_ID);
148 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 137 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
149 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 138 vcpu_enable_evmcs(vm, VCPU_ID);
150 vcpu_load_state(vm, VCPU_ID, state); 139 vcpu_load_state(vm, VCPU_ID, state);
151 run = vcpu_state(vm, VCPU_ID); 140 run = vcpu_state(vm, VCPU_ID);
152 free(state); 141 free(state);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index f72b3043db0e..ee59831fbc98 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -18,6 +18,7 @@
18#include "test_util.h" 18#include "test_util.h"
19#include "kvm_util.h" 19#include "kvm_util.h"
20#include "processor.h" 20#include "processor.h"
21#include "vmx.h"
21 22
22#define VCPU_ID 0 23#define VCPU_ID 0
23 24
@@ -106,12 +107,7 @@ int main(int argc, char *argv[])
106{ 107{
107 struct kvm_vm *vm; 108 struct kvm_vm *vm;
108 int rv; 109 int rv;
109 uint16_t evmcs_ver;
110 struct kvm_cpuid2 *hv_cpuid_entries; 110 struct kvm_cpuid2 *hv_cpuid_entries;
111 struct kvm_enable_cap enable_evmcs_cap = {
112 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
113 .args[0] = (unsigned long)&evmcs_ver
114 };
115 111
116 /* Tell stdout not to buffer its content */ 112 /* Tell stdout not to buffer its content */
117 setbuf(stdout, NULL); 113 setbuf(stdout, NULL);
@@ -136,14 +132,14 @@ int main(int argc, char *argv[])
136 132
137 free(hv_cpuid_entries); 133 free(hv_cpuid_entries);
138 134
139 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 135 if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
140
141 if (rv) {
142 fprintf(stderr, 136 fprintf(stderr,
143 "Enlightened VMCS is unsupported, skip related test\n"); 137 "Enlightened VMCS is unsupported, skip related test\n");
144 goto vm_free; 138 goto vm_free;
145 } 139 }
146 140
141 vcpu_enable_evmcs(vm, VCPU_ID);
142
147 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); 143 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
148 if (!hv_cpuid_entries) 144 if (!hv_cpuid_entries)
149 return 1; 145 return 1;
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 40050e44ec0a..f9334bd3cce9 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -99,8 +99,8 @@ int main(int argc, char *argv[])
99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); 99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, 100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
102 test_msr_platform_info_disabled(vm);
103 test_msr_platform_info_enabled(vm); 102 test_msr_platform_info_enabled(vm);
103 test_msr_platform_info_disabled(vm);
104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); 104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
105 105
106 kvm_vm_free(vm); 106 kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
index ed7218d166da..853e370e8a39 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -25,24 +25,17 @@
25#define VMCS12_REVISION 0x11e57ed0 25#define VMCS12_REVISION 0x11e57ed0
26#define VCPU_ID 5 26#define VCPU_ID 5
27 27
28bool have_evmcs;
29
28void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) 30void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
29{ 31{
30 volatile struct kvm_run *run;
31
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); 32 vcpu_nested_state_set(vm, VCPU_ID, state, false);
33 run = vcpu_state(vm, VCPU_ID);
34 vcpu_run(vm, VCPU_ID);
35 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
36 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
37 run->exit_reason,
38 exit_reason_str(run->exit_reason));
39} 33}
40 34
41void test_nested_state_expect_errno(struct kvm_vm *vm, 35void test_nested_state_expect_errno(struct kvm_vm *vm,
42 struct kvm_nested_state *state, 36 struct kvm_nested_state *state,
43 int expected_errno) 37 int expected_errno)
44{ 38{
45 volatile struct kvm_run *run;
46 int rv; 39 int rv;
47 40
48 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); 41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
@@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm,
50 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", 43 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
51 strerror(expected_errno), expected_errno, rv, strerror(errno), 44 strerror(expected_errno), expected_errno, rv, strerror(errno),
52 errno); 45 errno);
53 run = vcpu_state(vm, VCPU_ID);
54 vcpu_run(vm, VCPU_ID);
55 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
56 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
57 run->exit_reason,
58 exit_reason_str(run->exit_reason));
59} 46}
60 47
61void test_nested_state_expect_einval(struct kvm_vm *vm, 48void test_nested_state_expect_einval(struct kvm_vm *vm,
@@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
90{ 77{
91 memset(state, 0, size); 78 memset(state, 0, size);
92 state->flags = KVM_STATE_NESTED_GUEST_MODE | 79 state->flags = KVM_STATE_NESTED_GUEST_MODE |
93 KVM_STATE_NESTED_RUN_PENDING | 80 KVM_STATE_NESTED_RUN_PENDING;
94 KVM_STATE_NESTED_EVMCS; 81 if (have_evmcs)
82 state->flags |= KVM_STATE_NESTED_EVMCS;
95 state->format = 0; 83 state->format = 0;
96 state->size = size; 84 state->size = size;
97 state->hdr.vmx.vmxon_pa = 0x1000; 85 state->hdr.vmx.vmxon_pa = 0x1000;
@@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm)
141 /* 129 /*
142 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without 130 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
143 * setting the nested state but flags other than eVMCS must be clear. 131 * setting the nested state but flags other than eVMCS must be clear.
132 * The eVMCS flag can be set if the enlightened VMCS capability has
133 * been enabled.
144 */ 134 */
145 set_default_vmx_state(state, state_sz); 135 set_default_vmx_state(state, state_sz);
146 state->hdr.vmx.vmxon_pa = -1ull; 136 state->hdr.vmx.vmxon_pa = -1ull;
147 state->hdr.vmx.vmcs12_pa = -1ull; 137 state->hdr.vmx.vmcs12_pa = -1ull;
148 test_nested_state_expect_einval(vm, state); 138 test_nested_state_expect_einval(vm, state);
149 139
150 state->flags = KVM_STATE_NESTED_EVMCS; 140 state->flags &= KVM_STATE_NESTED_EVMCS;
141 if (have_evmcs) {
142 test_nested_state_expect_einval(vm, state);
143 vcpu_enable_evmcs(vm, VCPU_ID);
144 }
151 test_nested_state(vm, state); 145 test_nested_state(vm, state);
152 146
153 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ 147 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
@@ -232,6 +226,8 @@ int main(int argc, char *argv[])
232 struct kvm_nested_state state; 226 struct kvm_nested_state state;
233 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 227 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
234 228
229 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
230
235 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { 231 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
236 printf("KVM_CAP_NESTED_STATE not available, skipping test\n"); 232 printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
237 exit(KSFT_SKIP); 233 exit(KSFT_SKIP);
diff --git a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
index 41476399e184..f6e65674b83c 100755
--- a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
+++ b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
@@ -30,7 +30,7 @@ do_test() {
30 ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1" 30 ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1"
31 val=$(ip netns exec "${NETNS}" nstat -az | \ 31 val=$(ip netns exec "${NETNS}" nstat -az | \
32 grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}') 32 grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}')
33 if [ $val -ne 0 ]; then 33 if [ "$val" != 0 ]; then
34 echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero" 34 echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero"
35 return 1 35 return 1
36 fi 36 fi
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index fe52488a6f72..16571ac1dab4 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -321,4 +321,52 @@ else
321 ip netns exec nsr1 nft list ruleset 321 ip netns exec nsr1 nft list ruleset
322fi 322fi
323 323
324KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
325KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
326SPI1=$RANDOM
327SPI2=$RANDOM
328
329if [ $SPI1 -eq $SPI2 ]; then
330 SPI2=$((SPI2+1))
331fi
332
333do_esp() {
334 local ns=$1
335 local me=$2
336 local remote=$3
337 local lnet=$4
338 local rnet=$5
339 local spi_out=$6
340 local spi_in=$7
341
342 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
343 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
344
345 # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
346 ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 1 action allow
347 # to fwd decrypted packets after esp processing:
348 ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 1 action allow
349
350}
351
352do_esp nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
353
354do_esp nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
355
356ip netns exec nsr1 nft delete table ip nat
357
358# restore default routes
359ip -net ns2 route del 192.168.10.1 via 10.0.2.1
360ip -net ns2 route add default via 10.0.2.1
361ip -net ns2 route add default via dead:2::1
362
363test_tcp_forwarding ns1 ns2
364if [ $? -eq 0 ] ;then
365 echo "PASS: ipsec tunnel mode for ns1/ns2"
366else
367 echo "FAIL: ipsec tunnel mode for ns1/ns2"
368 ip netns exec nsr1 nft list ruleset 1>&2
369 ip netns exec nsr1 cat /proc/net/xfrm_stat 1>&2
370fi
371
324exit $ret 372exit $ret
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
index affa7f2d9670..9539cffa9e5e 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
@@ -64,7 +64,7 @@ class SubPlugin(TdcPlugin):
64 cmdlist.insert(0, self.args.NAMES['NS']) 64 cmdlist.insert(0, self.args.NAMES['NS'])
65 cmdlist.insert(0, 'exec') 65 cmdlist.insert(0, 'exec')
66 cmdlist.insert(0, 'netns') 66 cmdlist.insert(0, 'netns')
67 cmdlist.insert(0, 'ip') 67 cmdlist.insert(0, self.args.NAMES['IP'])
68 else: 68 else:
69 pass 69 pass
70 70
@@ -78,16 +78,16 @@ class SubPlugin(TdcPlugin):
78 return command 78 return command
79 79
80 def _ports_create(self): 80 def _ports_create(self):
81 cmd = 'ip link add $DEV0 type veth peer name $DEV1' 81 cmd = '$IP link add $DEV0 type veth peer name $DEV1'
82 self._exec_cmd('pre', cmd) 82 self._exec_cmd('pre', cmd)
83 cmd = 'ip link set $DEV0 up' 83 cmd = '$IP link set $DEV0 up'
84 self._exec_cmd('pre', cmd) 84 self._exec_cmd('pre', cmd)
85 if not self.args.namespace: 85 if not self.args.namespace:
86 cmd = 'ip link set $DEV1 up' 86 cmd = '$IP link set $DEV1 up'
87 self._exec_cmd('pre', cmd) 87 self._exec_cmd('pre', cmd)
88 88
89 def _ports_destroy(self): 89 def _ports_destroy(self):
90 cmd = 'ip link del $DEV0' 90 cmd = '$IP link del $DEV0'
91 self._exec_cmd('post', cmd) 91 self._exec_cmd('post', cmd)
92 92
93 def _ns_create(self): 93 def _ns_create(self):
@@ -97,16 +97,16 @@ class SubPlugin(TdcPlugin):
97 ''' 97 '''
98 self._ports_create() 98 self._ports_create()
99 if self.args.namespace: 99 if self.args.namespace:
100 cmd = 'ip netns add {}'.format(self.args.NAMES['NS']) 100 cmd = '$IP netns add {}'.format(self.args.NAMES['NS'])
101 self._exec_cmd('pre', cmd) 101 self._exec_cmd('pre', cmd)
102 cmd = 'ip link set $DEV1 netns {}'.format(self.args.NAMES['NS']) 102 cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS'])
103 self._exec_cmd('pre', cmd) 103 self._exec_cmd('pre', cmd)
104 cmd = 'ip -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) 104 cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS'])
105 self._exec_cmd('pre', cmd) 105 self._exec_cmd('pre', cmd)
106 if self.args.device: 106 if self.args.device:
107 cmd = 'ip link set $DEV2 netns {}'.format(self.args.NAMES['NS']) 107 cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS'])
108 self._exec_cmd('pre', cmd) 108 self._exec_cmd('pre', cmd)
109 cmd = 'ip -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) 109 cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS'])
110 self._exec_cmd('pre', cmd) 110 self._exec_cmd('pre', cmd)
111 111
112 def _ns_destroy(self): 112 def _ns_destroy(self):
@@ -115,7 +115,7 @@ class SubPlugin(TdcPlugin):
115 devices as well) 115 devices as well)
116 ''' 116 '''
117 if self.args.namespace: 117 if self.args.namespace:
118 cmd = 'ip netns delete {}'.format(self.args.NAMES['NS']) 118 cmd = '$IP netns delete {}'.format(self.args.NAMES['NS'])
119 self._exec_cmd('post', cmd) 119 self._exec_cmd('post', cmd)
120 120
121 def _exec_cmd(self, stage, command): 121 def _exec_cmd(self, stage, command):
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index bf5ebf59c2d4..9cdd2e31ac2c 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -670,5 +670,52 @@
670 "teardown": [ 670 "teardown": [
671 "$TC actions flush action skbedit" 671 "$TC actions flush action skbedit"
672 ] 672 ]
673 },
674 {
675 "id": "630c",
676 "name": "Add batch of 32 skbedit actions with all parameters and cookie",
677 "category": [
678 "actions",
679 "skbedit"
680 ],
681 "setup": [
682 [
683 "$TC actions flush action skbedit",
684 0,
685 1,
686 255
687 ]
688 ],
689 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
690 "expExitCode": "0",
691 "verifyCmd": "$TC actions list action skbedit",
692 "matchPattern": "^[ \t]+index [0-9]+ ref",
693 "matchCount": "32",
694 "teardown": [
695 "$TC actions flush action skbedit"
696 ]
697 },
698 {
699 "id": "706d",
700 "name": "Delete batch of 32 skbedit actions with all parameters",
701 "category": [
702 "actions",
703 "skbedit"
704 ],
705 "setup": [
706 [
707 "$TC actions flush action skbedit",
708 0,
709 1,
710 255
711 ],
712 "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
713 ],
714 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
715 "expExitCode": "0",
716 "verifyCmd": "$TC actions list action skbedit",
717 "matchPattern": "^[ \t]+index [0-9]+ ref",
718 "matchCount": "0",
719 "teardown": []
673 } 720 }
674] 721]
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index a8a6a0c883f1..6af5c91337f2 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
86 unsigned int len; 86 unsigned int len;
87 int mask; 87 int mask;
88 88
89 /* Detect an already handled MMIO return */
90 if (unlikely(!vcpu->mmio_needed))
91 return 0;
92
93 vcpu->mmio_needed = 0;
94
89 if (!run->mmio.is_write) { 95 if (!run->mmio.is_write) {
90 len = run->mmio.len; 96 len = run->mmio.len;
91 if (len > sizeof(unsigned long)) 97 if (len > sizeof(unsigned long))
@@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
188 run->mmio.is_write = is_write; 194 run->mmio.is_write = is_write;
189 run->mmio.phys_addr = fault_ipa; 195 run->mmio.phys_addr = fault_ipa;
190 run->mmio.len = len; 196 run->mmio.len = len;
197 vcpu->mmio_needed = 1;
191 198
192 if (!ret) { 199 if (!ret) {
193 /* We handled the access successfully in the kernel. */ 200 /* We handled the access successfully in the kernel. */
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index bdbc297d06fb..e621b5d45b27 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -8,6 +8,7 @@
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
10#include <kvm/arm_vgic.h> 10#include <kvm/arm_vgic.h>
11#include <asm/kvm_emulate.h>
11#include <asm/kvm_mmu.h> 12#include <asm/kvm_mmu.h>
12#include "vgic.h" 13#include "vgic.h"
13 14
@@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
164 irq->vcpu = NULL; 165 irq->vcpu = NULL;
165 irq->target_vcpu = vcpu0; 166 irq->target_vcpu = vcpu0;
166 kref_init(&irq->refcount); 167 kref_init(&irq->refcount);
167 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { 168 switch (dist->vgic_model) {
169 case KVM_DEV_TYPE_ARM_VGIC_V2:
168 irq->targets = 0; 170 irq->targets = 0;
169 irq->group = 0; 171 irq->group = 0;
170 } else { 172 break;
173 case KVM_DEV_TYPE_ARM_VGIC_V3:
171 irq->mpidr = 0; 174 irq->mpidr = 0;
172 irq->group = 1; 175 irq->group = 1;
176 break;
177 default:
178 kfree(dist->spis);
179 return -EINVAL;
173 } 180 }
174 } 181 }
175 return 0; 182 return 0;
@@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
209 irq->intid = i; 216 irq->intid = i;
210 irq->vcpu = NULL; 217 irq->vcpu = NULL;
211 irq->target_vcpu = vcpu; 218 irq->target_vcpu = vcpu;
212 irq->targets = 1U << vcpu->vcpu_id;
213 kref_init(&irq->refcount); 219 kref_init(&irq->refcount);
214 if (vgic_irq_is_sgi(i)) { 220 if (vgic_irq_is_sgi(i)) {
215 /* SGIs */ 221 /* SGIs */
@@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
219 /* PPIs */ 225 /* PPIs */
220 irq->config = VGIC_CONFIG_LEVEL; 226 irq->config = VGIC_CONFIG_LEVEL;
221 } 227 }
222
223 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
224 irq->group = 1;
225 else
226 irq->group = 0;
227 } 228 }
228 229
229 if (!irqchip_in_kernel(vcpu->kvm)) 230 if (!irqchip_in_kernel(vcpu->kvm))
@@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm)
286 287
287 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 288 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
288 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 289 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
289 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 290 switch (dist->vgic_model) {
291 case KVM_DEV_TYPE_ARM_VGIC_V3:
290 irq->group = 1; 292 irq->group = 1;
291 else 293 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
294 break;
295 case KVM_DEV_TYPE_ARM_VGIC_V2:
292 irq->group = 0; 296 irq->group = 0;
297 irq->targets = 1U << idx;
298 break;
299 default:
300 ret = -EINVAL;
301 goto out;
302 }
293 } 303 }
294 } 304 }
295 305
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 44efc2ff863f..0d090482720d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -211,6 +211,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
211 vgic_irq_set_phys_active(irq, true); 211 vgic_irq_set_phys_active(irq, true);
212} 212}
213 213
214static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
215{
216 return (vgic_irq_is_sgi(irq->intid) &&
217 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
218}
219
214void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, 220void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 gpa_t addr, unsigned int len, 221 gpa_t addr, unsigned int len,
216 unsigned long val) 222 unsigned long val)
@@ -223,6 +229,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
223 for_each_set_bit(i, &val, len * 8) { 229 for_each_set_bit(i, &val, len * 8) {
224 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 230 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
225 231
232 /* GICD_ISPENDR0 SGI bits are WI */
233 if (is_vgic_v2_sgi(vcpu, irq)) {
234 vgic_put_irq(vcpu->kvm, irq);
235 continue;
236 }
237
226 raw_spin_lock_irqsave(&irq->irq_lock, flags); 238 raw_spin_lock_irqsave(&irq->irq_lock, flags);
227 if (irq->hw) 239 if (irq->hw)
228 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 240 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
@@ -270,6 +282,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
270 for_each_set_bit(i, &val, len * 8) { 282 for_each_set_bit(i, &val, len * 8) {
271 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 283 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
272 284
285 /* GICD_ICPENDR0 SGI bits are WI */
286 if (is_vgic_v2_sgi(vcpu, irq)) {
287 vgic_put_irq(vcpu->kvm, irq);
288 continue;
289 }
290
273 raw_spin_lock_irqsave(&irq->irq_lock, flags); 291 raw_spin_lock_irqsave(&irq->irq_lock, flags);
274 292
275 if (irq->hw) 293 if (irq->hw)
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 96aab77d0471..b00aa304c260 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -184,7 +184,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
184 if (vgic_irq_is_sgi(irq->intid)) { 184 if (vgic_irq_is_sgi(irq->intid)) {
185 u32 src = ffs(irq->source); 185 u32 src = ffs(irq->source);
186 186
187 BUG_ON(!src); 187 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
188 irq->intid))
189 return;
190
188 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 191 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
189 irq->source &= ~(1 << (src - 1)); 192 irq->source &= ~(1 << (src - 1));
190 if (irq->source) { 193 if (irq->source) {
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 0c653a1e5215..a4ad431c92a9 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -167,7 +167,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
167 model == KVM_DEV_TYPE_ARM_VGIC_V2) { 167 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
168 u32 src = ffs(irq->source); 168 u32 src = ffs(irq->source);
169 169
170 BUG_ON(!src); 170 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
171 irq->intid))
172 return;
173
171 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 174 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
172 irq->source &= ~(1 << (src - 1)); 175 irq->source &= ~(1 << (src - 1));
173 if (irq->source) { 176 if (irq->source) {
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 13d4b38a94ec..e7bde65ba67c 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -254,6 +254,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
254 bool penda, pendb; 254 bool penda, pendb;
255 int ret; 255 int ret;
256 256
257 /*
258 * list_sort may call this function with the same element when
259 * the list is fairly long.
260 */
261 if (unlikely(irqa == irqb))
262 return 0;
263
257 raw_spin_lock(&irqa->irq_lock); 264 raw_spin_lock(&irqa->irq_lock);
258 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 265 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
259 266