summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap5
-rw-r--r--Documentation/PCI/index.rst2
-rw-r--r--Documentation/PCI/pciebus-howto.rst (renamed from Documentation/PCI/picebus-howto.rst)0
-rw-r--r--Documentation/admin-guide/sysctl/net.rst29
-rw-r--r--Documentation/devicetree/bindings/Makefile4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt30
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt30
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml3
-rw-r--r--Documentation/networking/tls-offload.rst18
-rw-r--r--Documentation/networking/tuntap.txt4
-rw-r--r--MAINTAINERS58
-rw-r--r--Makefile2
-rw-r--r--arch/arc/boot/dts/Makefile3
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/linkage.h8
-rw-r--r--arch/arc/include/asm/mach_desc.h3
-rw-r--r--arch/arc/kernel/mcip.c60
-rw-r--r--arch/arc/kernel/unwind.c5
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arc/plat-hsdk/platform.c87
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi16
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi32
-rw-r--r--arch/arm/boot/dts/am4372.dtsi32
-rw-r--r--arch/arm/boot/dts/am437x-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/am571x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am572x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am574x-idk.dts7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi3
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15-revc.dts7
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts2
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi6
-rw-r--r--arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi50
-rw-r--r--arch/arm/boot/dts/vf610-bk4.dts4
-rw-r--r--arch/arm/lib/backtrace.S2
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S3
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq.c4
-rw-r--r--arch/arm/mach-omap2/omap4-common.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c3
-rw-r--r--arch/arm/mach-rpc/riscpc.c1
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm/mm/init.c8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts1
-rw-r--r--arch/arm64/kernel/cpufeature.c14
-rw-r--r--arch/arm64/kernel/ftrace.c22
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sli-defs.h1
-rw-r--r--arch/nds32/kernel/signal.c2
-rw-r--r--arch/parisc/include/asm/pgtable.h3
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/dma-common.c17
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c6
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c6
-rw-r--r--arch/riscv/configs/defconfig2
-rw-r--r--arch/riscv/configs/rv32_defconfig3
-rw-r--r--arch/riscv/include/asm/fixmap.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h12
-rw-r--r--arch/riscv/include/asm/switch_to.h8
-rw-r--r--arch/riscv/include/asm/tlbflush.h11
-rw-r--r--arch/riscv/kernel/process.c11
-rw-r--r--arch/s390/net/bpf_jit_comp.c12
-rw-r--r--arch/sh/kernel/disassemble.c5
-rw-r--r--arch/sh/kernel/hw_breakpoint.c1
-rw-r--r--arch/um/include/shared/timer-internal.h14
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/time.c16
-rw-r--r--arch/x86/events/amd/ibs.c13
-rw-r--r--arch/x86/events/core.c2
-rw-r--r--arch/x86/events/intel/core.c6
-rw-r--r--arch/x86/include/asm/ftrace.h1
-rw-r--r--arch/x86/include/asm/perf_event.h12
-rw-r--r--arch/x86/kvm/hyperv.c5
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/mmu.c33
-rw-r--r--arch/x86/kvm/svm.c9
-rw-r--r--arch/x86/kvm/vmx/vmx.c1
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--arch/x86/net/bpf_jit_comp.c9
-rw-r--r--arch/xtensa/kernel/setup.c1
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--drivers/atm/Kconfig2
-rw-r--r--drivers/auxdisplay/Kconfig5
-rw-r--r--drivers/auxdisplay/charlcd.c2
-rw-r--r--drivers/auxdisplay/charlcd.h (renamed from include/misc/charlcd.h)5
-rw-r--r--drivers/auxdisplay/hd44780.c3
-rw-r--r--drivers/auxdisplay/ht16k33.c4
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/rbd.c11
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/bluetooth/btqca.c29
-rw-r--r--drivers/bluetooth/btqca.h7
-rw-r--r--drivers/bluetooth/btusb.c4
-rw-r--r--drivers/bluetooth/hci_qca.c9
-rw-r--r--drivers/bus/hisi_lpc.c47
-rw-r--r--drivers/bus/ti-sysc.c24
-rw-r--r--drivers/clk/clk.c49
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5-subcmu.h2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c7
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c162
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c2
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.c8
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h2
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c18
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c34
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c29
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-mdma.c2
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c38
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/gpio/gpiolib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c30
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c5
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/ast/ast_post.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c3
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c55
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c10
-rw-r--r--drivers/hid/hid-cp2112.c8
-rw-r--r--drivers/hid/hid-logitech-hidpp.c22
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/wacom_wac.c7
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hv/hv_trace.h2
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hwtracing/intel_th/msu.h2
-rw-r--r--drivers/hwtracing/intel_th/pti.h2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c1
-rw-r--r--drivers/i2c/busses/i2c-emev2.c16
-rw-r--r--drivers/i2c/busses/i2c-i801.c15
-rw-r--r--drivers/i2c/busses/i2c-imx.c18
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c11
-rw-r--r--drivers/i2c/busses/i2c-piix4.c12
-rw-r--r--drivers/i2c/busses/i2c-rcar.c11
-rw-r--r--drivers/i2c/busses/i2c-stm32.h2
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/iio/adc/max9611.c2
-rw-r--r--drivers/iio/frequency/adf4371.c8
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/counters.c16
-rw-r--r--drivers/infiniband/core/nldev.c11
-rw-r--r--drivers/infiniband/core/restrack.c15
-rw-r--r--drivers/infiniband/core/umem.c7
-rw-r--r--drivers/infiniband/core/umem_odp.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h11
-rw-r--r--drivers/infiniband/hw/hfi1/fault.c12
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c76
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c11
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c41
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/sw/siw/Kconfig2
-rw-r--r--drivers/infiniband/sw/siw/siw.h10
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c113
-rw-r--r--drivers/infiniband/sw/siw/siw_cq.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c14
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.h2
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c16
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c26
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c80
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c56
-rw-r--r--drivers/input/serio/hyperv-keyboard.c35
-rw-r--r--drivers/iommu/arm-smmu-v3.c4
-rw-r--r--drivers/iommu/dma-iommu.c28
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c2
-rw-r--r--drivers/iommu/intel-iommu.c11
-rw-r--r--drivers/md/dm-bufio.c4
-rw-r--r--drivers/md/dm-dust.c11
-rw-r--r--drivers/md/dm-integrity.c15
-rw-r--r--drivers/md/dm-kcopyd.c5
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c68
-rw-r--r--drivers/md/dm-zoned-reclaim.c47
-rw-r--r--drivers/md/dm-zoned-target.c68
-rw-r--r--drivers/md/dm-zoned.h11
-rw-r--r--drivers/md/persistent-data/dm-btree.c31
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c3
-rw-r--r--drivers/mfd/rk808.c6
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/habanalabs/device.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c72
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h2
-rw-r--r--drivers/misc/habanalabs/habanalabs.h9
-rw-r--r--drivers/misc/habanalabs/hw_queue.c14
-rw-r--r--drivers/misc/habanalabs/include/goya/goya_packets.h13
-rw-r--r--drivers/misc/habanalabs/irq.c27
-rw-r--r--drivers/misc/habanalabs/memory.c2
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/host/sdhci-cadence.c1
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c3
-rw-r--r--drivers/mmc/host/sdhci-sprd.c30
-rw-r--r--drivers/mmc/host/sdhci-tegra.c14
-rw-r--r--drivers/mtd/hyperbus/Kconfig1
-rw-r--r--drivers/mtd/maps/sa1100-flash.c1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c5
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h6
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c4
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c138
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ace.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c12
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/hyperv/netvsc_drv.c9
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c8
-rw-r--r--drivers/net/netdevsim/dev.c63
-rw-r--r--drivers/net/netdevsim/fib.c102
-rw-r--r--drivers/net/netdevsim/netdev.c9
-rw-r--r--drivers/net/netdevsim/netdevsim.h10
-rw-r--r--drivers/net/phy/at803x.c32
-rw-r--r--drivers/net/phy/phy-c45.c14
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/usb/cx82310_eth.c3
-rw-r--r--drivers/net/usb/kalmia.c6
-rw-r--r--drivers/net/usb/lan78xx.c8
-rw-r--r--drivers/net/usb/r8152.c5
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c13
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/nvme/host/core.c29
-rw-r--r--drivers/nvme/host/multipath.c77
-rw-r--r--drivers/nvme/host/nvme.h26
-rw-r--r--drivers/nvme/host/pci.c17
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/loop.c8
-rw-r--r--drivers/nvme/target/nvmet.h3
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/resolver.c12
-rw-r--r--drivers/pci/pcie/aspm.c20
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c4
-rw-r--r--drivers/power/supply/ab8500_charger.c1
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/soc/ixp4xx/Kconfig4
-rw-r--r--drivers/soc/ti/pm33xx.c19
-rw-r--r--drivers/soundwire/Kconfig7
-rw-r--r--drivers/soundwire/Makefile2
-rw-r--r--drivers/soundwire/cadence_master.c8
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c8
-rw-r--r--drivers/target/target_core_user.c9
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c19
-rw-r--r--drivers/usb/class/cdc-acm.c12
-rw-r--r--drivers/usb/core/buffer.c10
-rw-r--r--drivers/usb/core/file.c10
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/dwc2/hcd.c2
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c28
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/video/fbdev/acornfb.c1
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--fs/afs/cell.c4
-rw-r--r--fs/afs/cmservice.c10
-rw-r--r--fs/afs/dir.c92
-rw-r--r--fs/afs/file.c12
-rw-r--r--fs/afs/vlclient.c11
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/block_dev.c49
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c71
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/ceph/inode.c7
-rw-r--r--fs/ceph/locks.c3
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/ceph/xattr.c19
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/cifssmb.c197
-rw-r--r--fs/cifs/connect.c31
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/misc.c22
-rw-r--r--fs/cifs/sess.c26
-rw-r--r--fs/io_uring.c86
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c27
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c28
-rw-r--r--fs/nfs/inode.c33
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/nfs4file.c12
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/pnfs_nfs.c15
-rw-r--r--fs/nfs/proc.c7
-rw-r--r--fs/nfs/read.c35
-rw-r--r--fs/nfs/write.c38
-rw-r--r--fs/nfsd/nfscache.c2
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/read_write.c49
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/userfaultfd.c25
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c29
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c19
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c3
-rw-r--r--fs/xfs/xfs_ioctl32.c56
-rw-r--r--fs/xfs/xfs_iops.c1
-rw-r--r--fs/xfs/xfs_log.c5
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c63
-rw-r--r--include/asm-generic/5level-fixup.h21
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/dma-contiguous.h5
-rw-r--r--include/linux/dma-noncoherent.h13
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/gpio.h24
-rw-r--r--include/linux/key.h8
-rw-r--r--include/linux/logic_pio.h1
-rw-r--r--include/linux/memcontrol.h19
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h5
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/signal.h15
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/socket.h3
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/timekeeper_internal.h5
-rw-r--r--include/linux/trace_events.h1
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/math-emu/op-common.h5
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/net/net_namespace.h3
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nf_tables_offload.h2
-rw-r--r--include/net/netlink.h5
-rw-r--r--include/net/nexthop.h6
-rw-r--r--include/net/pkt_cls.h2
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sock.h10
-rw-r--r--include/rdma/restrack.h3
-rw-r--r--include/soc/arc/mcip.h11
-rw-r--r--include/trace/events/rxrpc.h6
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/jffs2.h5
-rw-r--r--include/uapi/linux/netfilter/xt_nfacct.h5
-rw-r--r--include/uapi/linux/rds.h2
-rw-r--r--include/uapi/rdma/siw-abi.h3
-rw-r--r--kernel/bpf/syscall.c30
-rw-r--r--kernel/bpf/verifier.c9
-rw-r--r--kernel/configs.c16
-rw-r--r--kernel/dma/contiguous.c8
-rw-r--r--kernel/dma/direct.c20
-rw-r--r--kernel/dma/mapping.c19
-rw-r--r--kernel/dma/remap.c2
-rw-r--r--kernel/irq/irqdesc.c15
-rw-r--r--kernel/kallsyms.c6
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/module.c4
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/cpufreq_schedutil.c14
-rw-r--r--kernel/sched/psi.c8
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/time/timekeeping.c5
-rw-r--r--kernel/time/vsyscall.c22
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c26
-rw-r--r--kernel/trace/trace_events.c2
-rw-r--r--kernel/trace/trace_probe.c3
-rw-r--r--lib/kfifo.c3
-rw-r--r--lib/logic_pio.c73
-rw-r--r--mm/huge_memory.c55
-rw-r--r--mm/hugetlb.c19
-rw-r--r--mm/kasan/common.c10
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memcontrol.c146
-rw-r--r--mm/mempolicy.c134
-rw-r--r--mm/memremap.c24
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/usercopy.c2
-rw-r--r--mm/vmalloc.c12
-rw-r--r--mm/vmscan.c18
-rw-r--r--mm/workingset.c10
-rw-r--r--mm/z3fold.c104
-rw-r--r--mm/zsmalloc.c80
-rw-r--r--net/batman-adv/multicast.c8
-rw-r--r--net/batman-adv/netlink.c2
-rw-r--r--net/bluetooth/hci_core.c1
-rw-r--r--net/bluetooth/hci_debugfs.c31
-rw-r--r--net/bluetooth/hidp/core.c9
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bridge/netfilter/ebtables.c8
-rw-r--r--net/ceph/crypto.c6
-rw-r--r--net/ceph/osd_client.c9
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/sock.c50
-rw-r--r--net/core/sock_diag.c3
-rw-r--r--net/core/stream.c16
-rw-r--r--net/dsa/switch.c3
-rw-r--r--net/ieee802154/6lowpan/reassembly.c2
-rw-r--r--net/ieee802154/socket.c2
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_fragment.c39
-rw-r--r--net/ipv4/ip_fragment.c8
-rw-r--r--net/ipv4/route.c17
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_bpf.c6
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/mac80211/cfg.c9
-rw-r--r--net/mpls/mpls_iptunnel.c8
-rw-r--r--net/ncsi/ncsi-cmd.c13
-rw-r--r--net/ncsi/ncsi-rsp.c9
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_flow_table_core.c43
-rw-r--r--net/netfilter/nf_flow_table_ip.c43
-rw-r--r--net/netfilter/nf_tables_api.c19
-rw-r--r--net/netfilter/nf_tables_offload.c17
-rw-r--r--net/netfilter/nft_flow_offload.c15
-rw-r--r--net/netfilter/xt_nfacct.c36
-rw-r--r--net/openvswitch/conntrack.c15
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rds/ib.c16
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/rdma_transport.c10
-rw-r--r--net/rxrpc/af_rxrpc.c6
-rw-r--r--net/rxrpc/ar-internal.h8
-rw-r--r--net/rxrpc/call_event.c15
-rw-r--r--net/rxrpc/input.c59
-rw-r--r--net/rxrpc/local_object.c103
-rw-r--r--net/rxrpc/output.c3
-rw-r--r--net/rxrpc/recvmsg.c6
-rw-r--r--net/sched/act_skbedit.c12
-rw-r--r--net/sched/sch_taprio.c3
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/stream.c1
-rw-r--r--net/smc/smc_tx.c6
-rw-r--r--net/sunrpc/clnt.c47
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--net/tipc/addr.c1
-rw-r--r--net/tipc/link.c92
-rw-r--r--net/tipc/msg.h8
-rw-r--r--net/tls/tls_device.c9
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/util.c23
-rw-r--r--net/xdp/xdp_umem.c4
-rw-r--r--net/xfrm/xfrm_policy.c4
-rw-r--r--samples/auxdisplay/cfag12864b-example.c2
-rw-r--r--scripts/coccinelle/api/atomic_as_refcounter.cocci1
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/trusted.c13
-rw-r--r--sound/core/seq/seq_clientmgr.c3
-rw-r--r--sound/core/seq/seq_fifo.c17
-rw-r--r--sound/core/seq/seq_fifo.h2
-rw-r--r--sound/firewire/oxfw/oxfw-pcm.c2
-rw-r--r--sound/pci/hda/hda_generic.c21
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/patch_ca0132.c1
-rw-r--r--sound/pci/hda/patch_conexant.c32
-rw-r--r--sound/pci/hda/patch_realtek.c12
-rw-r--r--sound/usb/line6/pcm.c18
-rw-r--r--sound/usb/mixer.c73
-rw-r--r--sound/usb/mixer_quirks.c8
-rw-r--r--sound/usb/pcm.c1
-rw-r--r--tools/bpf/bpftool/common.c8
-rw-r--r--tools/bpf/bpftool/prog.c4
-rwxr-xr-xtools/hv/hv_get_dhcp_info.sh2
-rw-r--r--tools/hv/hv_kvp_daemon.c10
-rwxr-xr-xtools/hv/hv_set_ifconfig.sh2
-rw-r--r--tools/hv/hv_vss_daemon.c4
-rw-r--r--tools/hv/lsvmbus75
-rw-r--r--tools/include/uapi/linux/bpf.h11
-rw-r--r--tools/lib/bpf/libbpf.c33
-rw-r--r--tools/power/x86/turbostat/Makefile3
-rw-r--r--tools/power/x86/turbostat/turbostat.c101
-rw-r--r--tools/power/x86/x86_energy_perf_policy/Makefile3
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.82
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c28
-rw-r--r--tools/testing/selftests/bpf/Makefile6
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/test_btf_dump.c7
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c6
-rw-r--r--tools/testing/selftests/bpf/test_sock.c7
-rw-r--r--tools/testing/selftests/bpf/verifier/loops1.c28
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h2
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c16
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c15
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c12
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c32
-rwxr-xr-xtools/testing/selftests/net/tcp_fastopen_backup_key.sh2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh48
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json47
-rw-r--r--virt/kvm/arm/mmio.c7
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c30
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c5
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
-rw-r--r--virt/kvm/arm/vgic/vgic.c7
630 files changed, 5812 insertions, 3205 deletions
diff --git a/.mailmap b/.mailmap
index acba1a6163f1..afaad605284a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -64,6 +64,9 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com>
64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com> 64Dengcheng Zhu <dzhu@wavecomp.com> <dczhu@mips.com>
65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> 65Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com>
66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> 66Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
67Dmitry Safonov <0x7f454c46@gmail.com> <dsafonov@virtuozzo.com>
68Dmitry Safonov <0x7f454c46@gmail.com> <d.safonov@partner.samsung.com>
69Dmitry Safonov <0x7f454c46@gmail.com> <dima@arista.com>
67Domen Puncer <domen@coderock.org> 70Domen Puncer <domen@coderock.org>
68Douglas Gilbert <dougg@torque.net> 71Douglas Gilbert <dougg@torque.net>
69Ed L. Cashin <ecashin@coraid.com> 72Ed L. Cashin <ecashin@coraid.com>
@@ -160,6 +163,8 @@ Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.co
160Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> 163Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
161Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> 164Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
162Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com> 165Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com>
166Maxime Ripard <mripard@kernel.org> <maxime.ripard@bootlin.com>
167Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
163Mayuresh Janorkar <mayur@ti.com> 168Mayuresh Janorkar <mayur@ti.com>
164Michael Buesch <m@bues.ch> 169Michael Buesch <m@bues.ch>
165Michel Dänzer <michel@tungstengraphics.com> 170Michel Dänzer <michel@tungstengraphics.com>
diff --git a/Documentation/PCI/index.rst b/Documentation/PCI/index.rst
index f4c6121868c3..6768305e4c26 100644
--- a/Documentation/PCI/index.rst
+++ b/Documentation/PCI/index.rst
@@ -9,7 +9,7 @@ Linux PCI Bus Subsystem
9 :numbered: 9 :numbered:
10 10
11 pci 11 pci
12 picebus-howto 12 pciebus-howto
13 pci-iov-howto 13 pci-iov-howto
14 msi-howto 14 msi-howto
15 acpi-info 15 acpi-info
diff --git a/Documentation/PCI/picebus-howto.rst b/Documentation/PCI/pciebus-howto.rst
index f882ff62c51f..f882ff62c51f 100644
--- a/Documentation/PCI/picebus-howto.rst
+++ b/Documentation/PCI/pciebus-howto.rst
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index a7d44e71019d..287b98708a40 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -39,7 +39,6 @@ Table : Subdirectories in /proc/sys/net
39 802 E802 protocol ax25 AX25 39 802 E802 protocol ax25 AX25
40 ethernet Ethernet protocol rose X.25 PLP layer 40 ethernet Ethernet protocol rose X.25 PLP layer
41 ipv4 IP version 4 x25 X.25 protocol 41 ipv4 IP version 4 x25 X.25 protocol
42 ipx IPX token-ring IBM token ring
43 bridge Bridging decnet DEC net 42 bridge Bridging decnet DEC net
44 ipv6 IP version 6 tipc TIPC 43 ipv6 IP version 6 tipc TIPC
45 ========= =================== = ========== ================== 44 ========= =================== = ========== ==================
@@ -401,33 +400,7 @@ interface.
401(network) that the route leads to, the router (may be directly connected), the 400(network) that the route leads to, the router (may be directly connected), the
402route flags, and the device the route is using. 401route flags, and the device the route is using.
403 402
404 4035. TIPC
4055. IPX
406------
407
408The IPX protocol has no tunable values in proc/sys/net.
409
410The IPX protocol does, however, provide proc/net/ipx. This lists each IPX
411socket giving the local and remote addresses in Novell format (that is
412network:node:port). In accordance with the strange Novell tradition,
413everything but the port is in hex. Not_Connected is displayed for sockets that
414are not tied to a specific remote address. The Tx and Rx queue sizes indicate
415the number of bytes pending for transmission and reception. The state
416indicates the state the socket is in and the uid is the owning uid of the
417socket.
418
419The /proc/net/ipx_interface file lists all IPX interfaces. For each interface
420it gives the network number, the node number, and indicates if the network is
421the primary network. It also indicates which device it is bound to (or
422Internal for internal networks) and the Frame Type if appropriate. Linux
423supports 802.3, 802.2, 802.2 SNAP and DIX (Blue Book) ethernet framing for
424IPX.
425
426The /proc/net/ipx_route table holds a list of IPX routes. For each route it
427gives the destination network, the router node (or Directly) and the network
428address of the router (or Connected) for internal networks.
429
4306. TIPC
431------- 404-------
432 405
433tipc_rmem 406tipc_rmem
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index 6b0dfd5c17ba..5138a2f6232a 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -19,7 +19,9 @@ quiet_cmd_mk_schema = SCHEMA $@
19 19
20DT_DOCS = $(shell \ 20DT_DOCS = $(shell \
21 cd $(srctree)/$(src) && \ 21 cd $(srctree)/$(src) && \
22 find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ 22 find * \( -name '*.yaml' ! \
23 -name $(DT_TMP_SCHEMA) ! \
24 -name '*.example.dt.yaml' \) \
23 ) 25 )
24 26
25DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) 27DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS))
diff --git a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
index 09fc02b99845..a5c1db95b3ec 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/snps,archs-idu-intc.txt
@@ -1,20 +1,30 @@
1* ARC-HS Interrupt Distribution Unit 1* ARC-HS Interrupt Distribution Unit
2 2
3 This optional 2nd level interrupt controller can be used in SMP configurations for 3 This optional 2nd level interrupt controller can be used in SMP configurations
4 dynamic IRQ routing, load balancing of common/external IRQs towards core intc. 4 for dynamic IRQ routing, load balancing of common/external IRQs towards core
5 intc.
5 6
6Properties: 7Properties:
7 8
8- compatible: "snps,archs-idu-intc" 9- compatible: "snps,archs-idu-intc"
9- interrupt-controller: This is an interrupt controller. 10- interrupt-controller: This is an interrupt controller.
10- #interrupt-cells: Must be <1>. 11- #interrupt-cells: Must be <1> or <2>.
11 12
12 Value of the cell specifies the "common" IRQ from peripheral to IDU. Number N 13 Value of the first cell specifies the "common" IRQ from peripheral to IDU.
13 of the particular interrupt line of IDU corresponds to the line N+24 of the 14 Number N of the particular interrupt line of IDU corresponds to the line N+24
14 core interrupt controller. 15 of the core interrupt controller.
15 16
16 intc accessed via the special ARC AUX register interface, hence "reg" property 17 The (optional) second cell specifies any of the following flags:
17 is not specified. 18 - bits[3:0] trigger type and level flags
19 1 = low-to-high edge triggered
20 2 = NOT SUPPORTED (high-to-low edge triggered)
21 4 = active high level-sensitive <<< DEFAULT
22 8 = NOT SUPPORTED (active low level-sensitive)
23 When no second cell is specified, the interrupt is assumed to be level
24 sensitive.
25
26 The interrupt controller is accessed via the special ARC AUX register
27 interface, hence "reg" property is not specified.
18 28
19Example: 29Example:
20 core_intc: core-interrupt-controller { 30 core_intc: core-interrupt-controller {
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 2d41fb96ce0a..5b88fae0307d 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -7,18 +7,6 @@ Required properties:
7- phy-mode : See ethernet.txt file in the same directory 7- phy-mode : See ethernet.txt file in the same directory
8 8
9Optional properties: 9Optional properties:
10- phy-reset-gpios : Should specify the gpio for phy reset
11- phy-reset-duration : Reset duration in milliseconds. Should present
12 only if property "phy-reset-gpios" is available. Missing the property
13 will have the duration be 1 millisecond. Numbers greater than 1000 are
14 invalid and 1 millisecond will be used instead.
15- phy-reset-active-high : If present then the reset sequence using the GPIO
16 specified in the "phy-reset-gpios" property is reversed (H=reset state,
17 L=operation state).
18- phy-reset-post-delay : Post reset delay in milliseconds. If present then
19 a delay of phy-reset-post-delay milliseconds will be observed after the
20 phy-reset-gpios has been toggled. Can be omitted thus no delay is
21 observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
22- phy-supply : regulator that powers the Ethernet PHY. 10- phy-supply : regulator that powers the Ethernet PHY.
23- phy-handle : phandle to the PHY device connected to this device. 11- phy-handle : phandle to the PHY device connected to this device.
24- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. 12- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
@@ -47,11 +35,27 @@ Optional properties:
47 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse 35 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
48 per second interrupt associated with 1588 precision time protocol(PTP). 36 per second interrupt associated with 1588 precision time protocol(PTP).
49 37
50
51Optional subnodes: 38Optional subnodes:
52- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes 39- mdio : specifies the mdio bus in the FEC, used as a container for phy nodes
53 according to phy.txt in the same directory 40 according to phy.txt in the same directory
54 41
42Deprecated optional properties:
43 To avoid these, create a phy node according to phy.txt in the same
44 directory, and point the fec's "phy-handle" property to it. Then use
45 the phy's reset binding, again described by phy.txt.
46- phy-reset-gpios : Should specify the gpio for phy reset
47- phy-reset-duration : Reset duration in milliseconds. Should present
48 only if property "phy-reset-gpios" is available. Missing the property
49 will have the duration be 1 millisecond. Numbers greater than 1000 are
50 invalid and 1 millisecond will be used instead.
51- phy-reset-active-high : If present then the reset sequence using the GPIO
52 specified in the "phy-reset-gpios" property is reversed (H=reset state,
53 L=operation state).
54- phy-reset-post-delay : Post reset delay in milliseconds. If present then
55 a delay of phy-reset-post-delay milliseconds will be observed after the
56 phy-reset-gpios has been toggled. Can be omitted thus no delay is
57 observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
58
55Example: 59Example:
56 60
57ethernet@83fec000 { 61ethernet@83fec000 {
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 91d3e78b3395..400df2da018a 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -37,7 +37,8 @@ properties:
37 hwlocks: true 37 hwlocks: true
38 38
39 st,syscfg: 39 st,syscfg:
40 $ref: "/schemas/types.yaml#/definitions/phandle-array" 40 allOf:
41 - $ref: "/schemas/types.yaml#/definitions/phandle-array"
41 description: Should be phandle/offset/mask 42 description: Should be phandle/offset/mask
42 items: 43 items:
43 - description: Phandle to the syscon node which includes IRQ mux selection. 44 - description: Phandle to the syscon node which includes IRQ mux selection.
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index b70b70dc4524..0dd3f748239f 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -506,21 +506,3 @@ Drivers should ignore the changes to TLS the device feature flags.
506These flags will be acted upon accordingly by the core ``ktls`` code. 506These flags will be acted upon accordingly by the core ``ktls`` code.
507TLS device feature flags only control adding of new TLS connection 507TLS device feature flags only control adding of new TLS connection
508offloads, old connections will remain active after flags are cleared. 508offloads, old connections will remain active after flags are cleared.
509
510Known bugs
511==========
512
513skb_orphan() leaks clear text
514-----------------------------
515
516Currently drivers depend on the :c:member:`sk` member of
517:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
518encryption. Any operation which removes or does not preserve the socket
519association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
520will cause the driver to miss the packets and lead to clear text leaks.
521
522Redirects leak clear text
523-------------------------
524
525In the RX direction, if segment has already been decrypted by the device
526and it gets redirected or mirrored - clear text will be transmitted out.
diff --git a/Documentation/networking/tuntap.txt b/Documentation/networking/tuntap.txt
index 949d5dcdd9a3..0104830d5075 100644
--- a/Documentation/networking/tuntap.txt
+++ b/Documentation/networking/tuntap.txt
@@ -204,8 +204,8 @@ Ethernet device, which instead of receiving packets from a physical
204media, receives them from user space program and instead of sending 204media, receives them from user space program and instead of sending
205packets via physical media sends them to the user space program. 205packets via physical media sends them to the user space program.
206 206
207Let's say that you configured IPX on the tap0, then whenever 207Let's say that you configured IPv6 on the tap0, then whenever
208the kernel sends an IPX packet to tap0, it is passed to the application 208the kernel sends an IPv6 packet to tap0, it is passed to the application
209(VTun for example). The application encrypts, compresses and sends it to 209(VTun for example). The application encrypts, compresses and sends it to
210the other side over TCP or UDP. The application on the other side decompresses 210the other side over TCP or UDP. The application on the other side decompresses
211and decrypts the data received and writes the packet to the TAP device, 211and decrypts the data received and writes the packet to the TAP device,
diff --git a/MAINTAINERS b/MAINTAINERS
index f3a78403b47f..028bc47526c0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -183,7 +183,7 @@ M: Realtek linux nic maintainers <nic_swsd@realtek.com>
183M: Heiner Kallweit <hkallweit1@gmail.com> 183M: Heiner Kallweit <hkallweit1@gmail.com>
184L: netdev@vger.kernel.org 184L: netdev@vger.kernel.org
185S: Maintained 185S: Maintained
186F: drivers/net/ethernet/realtek/r8169.c 186F: drivers/net/ethernet/realtek/r8169*
187 187
1888250/16?50 (AND CLONE UARTS) SERIAL DRIVER 1888250/16?50 (AND CLONE UARTS) SERIAL DRIVER
189M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 189M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -683,7 +683,7 @@ S: Maintained
683F: drivers/crypto/sunxi-ss/ 683F: drivers/crypto/sunxi-ss/
684 684
685ALLWINNER VPU DRIVER 685ALLWINNER VPU DRIVER
686M: Maxime Ripard <maxime.ripard@bootlin.com> 686M: Maxime Ripard <mripard@kernel.org>
687M: Paul Kocialkowski <paul.kocialkowski@bootlin.com> 687M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
688L: linux-media@vger.kernel.org 688L: linux-media@vger.kernel.org
689S: Maintained 689S: Maintained
@@ -1408,7 +1408,7 @@ S: Maintained
1408F: drivers/clk/sunxi/ 1408F: drivers/clk/sunxi/
1409 1409
1410ARM/Allwinner sunXi SoC support 1410ARM/Allwinner sunXi SoC support
1411M: Maxime Ripard <maxime.ripard@bootlin.com> 1411M: Maxime Ripard <mripard@kernel.org>
1412M: Chen-Yu Tsai <wens@csie.org> 1412M: Chen-Yu Tsai <wens@csie.org>
1413L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1413L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1414S: Maintained 1414S: Maintained
@@ -3577,7 +3577,7 @@ F: Documentation/filesystems/caching/cachefiles.txt
3577F: fs/cachefiles/ 3577F: fs/cachefiles/
3578 3578
3579CADENCE MIPI-CSI2 BRIDGES 3579CADENCE MIPI-CSI2 BRIDGES
3580M: Maxime Ripard <maxime.ripard@bootlin.com> 3580M: Maxime Ripard <mripard@kernel.org>
3581L: linux-media@vger.kernel.org 3581L: linux-media@vger.kernel.org
3582S: Maintained 3582S: Maintained
3583F: Documentation/devicetree/bindings/media/cdns,*.txt 3583F: Documentation/devicetree/bindings/media/cdns,*.txt
@@ -5295,7 +5295,7 @@ F: include/linux/vga*
5295 5295
5296DRM DRIVERS AND MISC GPU PATCHES 5296DRM DRIVERS AND MISC GPU PATCHES
5297M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> 5297M: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
5298M: Maxime Ripard <maxime.ripard@bootlin.com> 5298M: Maxime Ripard <mripard@kernel.org>
5299M: Sean Paul <sean@poorly.run> 5299M: Sean Paul <sean@poorly.run>
5300W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html 5300W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
5301S: Maintained 5301S: Maintained
@@ -5308,7 +5308,7 @@ F: include/uapi/drm/drm*
5308F: include/linux/vga* 5308F: include/linux/vga*
5309 5309
5310DRM DRIVERS FOR ALLWINNER A10 5310DRM DRIVERS FOR ALLWINNER A10
5311M: Maxime Ripard <maxime.ripard@bootlin.com> 5311M: Maxime Ripard <mripard@kernel.org>
5312L: dri-devel@lists.freedesktop.org 5312L: dri-devel@lists.freedesktop.org
5313S: Supported 5313S: Supported
5314F: drivers/gpu/drm/sun4i/ 5314F: drivers/gpu/drm/sun4i/
@@ -6065,7 +6065,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
6065M: Heiner Kallweit <hkallweit1@gmail.com> 6065M: Heiner Kallweit <hkallweit1@gmail.com>
6066L: netdev@vger.kernel.org 6066L: netdev@vger.kernel.org
6067S: Maintained 6067S: Maintained
6068F: Documentation/ABI/testing/sysfs-bus-mdio 6068F: Documentation/ABI/testing/sysfs-class-net-phydev
6069F: Documentation/devicetree/bindings/net/ethernet-phy.yaml 6069F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
6070F: Documentation/devicetree/bindings/net/mdio* 6070F: Documentation/devicetree/bindings/net/mdio*
6071F: Documentation/networking/phy.rst 6071F: Documentation/networking/phy.rst
@@ -6441,6 +6441,14 @@ S: Maintained
6441F: drivers/perf/fsl_imx8_ddr_perf.c 6441F: drivers/perf/fsl_imx8_ddr_perf.c
6442F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt 6442F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
6443 6443
6444FREESCALE IMX I2C DRIVER
6445M: Oleksij Rempel <o.rempel@pengutronix.de>
6446R: Pengutronix Kernel Team <kernel@pengutronix.de>
6447L: linux-i2c@vger.kernel.org
6448S: Maintained
6449F: drivers/i2c/busses/i2c-imx.c
6450F: Documentation/devicetree/bindings/i2c/i2c-imx.txt
6451
6444FREESCALE IMX LPI2C DRIVER 6452FREESCALE IMX LPI2C DRIVER
6445M: Dong Aisheng <aisheng.dong@nxp.com> 6453M: Dong Aisheng <aisheng.dong@nxp.com>
6446L: linux-i2c@vger.kernel.org 6454L: linux-i2c@vger.kernel.org
@@ -7452,7 +7460,7 @@ F: drivers/net/hyperv/
7452F: drivers/scsi/storvsc_drv.c 7460F: drivers/scsi/storvsc_drv.c
7453F: drivers/uio/uio_hv_generic.c 7461F: drivers/uio/uio_hv_generic.c
7454F: drivers/video/fbdev/hyperv_fb.c 7462F: drivers/video/fbdev/hyperv_fb.c
7455F: drivers/iommu/hyperv_iommu.c 7463F: drivers/iommu/hyperv-iommu.c
7456F: net/vmw_vsock/hyperv_transport.c 7464F: net/vmw_vsock/hyperv_transport.c
7457F: include/clocksource/hyperv_timer.h 7465F: include/clocksource/hyperv_timer.h
7458F: include/linux/hyperv.h 7466F: include/linux/hyperv.h
@@ -7505,7 +7513,7 @@ I2C MV64XXX MARVELL AND ALLWINNER DRIVER
7505M: Gregory CLEMENT <gregory.clement@bootlin.com> 7513M: Gregory CLEMENT <gregory.clement@bootlin.com>
7506L: linux-i2c@vger.kernel.org 7514L: linux-i2c@vger.kernel.org
7507S: Maintained 7515S: Maintained
7508F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt 7516F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
7509F: drivers/i2c/busses/i2c-mv64xxx.c 7517F: drivers/i2c/busses/i2c-mv64xxx.c
7510 7518
7511I2C OVER PARALLEL PORT 7519I2C OVER PARALLEL PORT
@@ -8422,7 +8430,6 @@ L: linux-xfs@vger.kernel.org
8422L: linux-fsdevel@vger.kernel.org 8430L: linux-fsdevel@vger.kernel.org
8423T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git 8431T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
8424S: Supported 8432S: Supported
8425F: fs/iomap.c
8426F: fs/iomap/ 8433F: fs/iomap/
8427F: include/linux/iomap.h 8434F: include/linux/iomap.h
8428 8435
@@ -8447,11 +8454,6 @@ S: Maintained
8447F: fs/io_uring.c 8454F: fs/io_uring.c
8448F: include/uapi/linux/io_uring.h 8455F: include/uapi/linux/io_uring.h
8449 8456
8450IP MASQUERADING
8451M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
8452S: Maintained
8453F: net/ipv4/netfilter/ipt_MASQUERADE.c
8454
8455IPMI SUBSYSTEM 8457IPMI SUBSYSTEM
8456M: Corey Minyard <minyard@acm.org> 8458M: Corey Minyard <minyard@acm.org>
8457L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) 8459L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -8825,14 +8827,6 @@ F: virt/kvm/*
8825F: tools/kvm/ 8827F: tools/kvm/
8826F: tools/testing/selftests/kvm/ 8828F: tools/testing/selftests/kvm/
8827 8829
8828KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
8829M: Joerg Roedel <joro@8bytes.org>
8830L: kvm@vger.kernel.org
8831W: http://www.linux-kvm.org/
8832S: Maintained
8833F: arch/x86/include/asm/svm.h
8834F: arch/x86/kvm/svm.c
8835
8836KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64) 8830KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
8837M: Marc Zyngier <maz@kernel.org> 8831M: Marc Zyngier <maz@kernel.org>
8838R: James Morse <james.morse@arm.com> 8832R: James Morse <james.morse@arm.com>
@@ -8875,7 +8869,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com>
8875M: Janosch Frank <frankja@linux.ibm.com> 8869M: Janosch Frank <frankja@linux.ibm.com>
8876R: David Hildenbrand <david@redhat.com> 8870R: David Hildenbrand <david@redhat.com>
8877R: Cornelia Huck <cohuck@redhat.com> 8871R: Cornelia Huck <cohuck@redhat.com>
8878L: linux-s390@vger.kernel.org 8872L: kvm@vger.kernel.org
8879W: http://www.ibm.com/developerworks/linux/linux390/ 8873W: http://www.ibm.com/developerworks/linux/linux390/
8880T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git 8874T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
8881S: Supported 8875S: Supported
@@ -8890,6 +8884,11 @@ F: tools/testing/selftests/kvm/*/s390x/
8890KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) 8884KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
8891M: Paolo Bonzini <pbonzini@redhat.com> 8885M: Paolo Bonzini <pbonzini@redhat.com>
8892M: Radim Krčmář <rkrcmar@redhat.com> 8886M: Radim Krčmář <rkrcmar@redhat.com>
8887R: Sean Christopherson <sean.j.christopherson@intel.com>
8888R: Vitaly Kuznetsov <vkuznets@redhat.com>
8889R: Wanpeng Li <wanpengli@tencent.com>
8890R: Jim Mattson <jmattson@google.com>
8891R: Joerg Roedel <joro@8bytes.org>
8893L: kvm@vger.kernel.org 8892L: kvm@vger.kernel.org
8894W: http://www.linux-kvm.org 8893W: http://www.linux-kvm.org
8895T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git 8894T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
@@ -8897,8 +8896,12 @@ S: Supported
8897F: arch/x86/kvm/ 8896F: arch/x86/kvm/
8898F: arch/x86/kvm/*/ 8897F: arch/x86/kvm/*/
8899F: arch/x86/include/uapi/asm/kvm* 8898F: arch/x86/include/uapi/asm/kvm*
8899F: arch/x86/include/uapi/asm/vmx.h
8900F: arch/x86/include/uapi/asm/svm.h
8900F: arch/x86/include/asm/kvm* 8901F: arch/x86/include/asm/kvm*
8901F: arch/x86/include/asm/pvclock-abi.h 8902F: arch/x86/include/asm/pvclock-abi.h
8903F: arch/x86/include/asm/svm.h
8904F: arch/x86/include/asm/vmx.h
8902F: arch/x86/kernel/kvm.c 8905F: arch/x86/kernel/kvm.c
8903F: arch/x86/kernel/kvmclock.c 8906F: arch/x86/kernel/kvmclock.c
8904 8907
@@ -11078,7 +11081,7 @@ NET_FAILOVER MODULE
11078M: Sridhar Samudrala <sridhar.samudrala@intel.com> 11081M: Sridhar Samudrala <sridhar.samudrala@intel.com>
11079L: netdev@vger.kernel.org 11082L: netdev@vger.kernel.org
11080S: Supported 11083S: Supported
11081F: driver/net/net_failover.c 11084F: drivers/net/net_failover.c
11082F: include/net/net_failover.h 11085F: include/net/net_failover.h
11083F: Documentation/networking/net_failover.rst 11086F: Documentation/networking/net_failover.rst
11084 11087
@@ -14470,6 +14473,7 @@ F: drivers/net/phy/phylink.c
14470F: drivers/net/phy/sfp* 14473F: drivers/net/phy/sfp*
14471F: include/linux/phylink.h 14474F: include/linux/phylink.h
14472F: include/linux/sfp.h 14475F: include/linux/sfp.h
14476K: phylink
14473 14477
14474SGI GRU DRIVER 14478SGI GRU DRIVER
14475M: Dimitri Sivanich <sivanich@sgi.com> 14479M: Dimitri Sivanich <sivanich@sgi.com>
@@ -14875,9 +14879,9 @@ F: include/linux/arm_sdei.h
14875F: include/uapi/linux/arm_sdei.h 14879F: include/uapi/linux/arm_sdei.h
14876 14880
14877SOFTWARE RAID (Multiple Disks) SUPPORT 14881SOFTWARE RAID (Multiple Disks) SUPPORT
14878M: Shaohua Li <shli@kernel.org> 14882M: Song Liu <song@kernel.org>
14879L: linux-raid@vger.kernel.org 14883L: linux-raid@vger.kernel.org
14880T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git 14884T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
14881S: Supported 14885S: Supported
14882F: drivers/md/Makefile 14886F: drivers/md/Makefile
14883F: drivers/md/Kconfig 14887F: drivers/md/Kconfig
diff --git a/Makefile b/Makefile
index 1b23f95db176..f125625efd60 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 3 3PATCHLEVEL = 3
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc4 5EXTRAVERSION = -rc6
6NAME = Bobtail Squid 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index a83c4f5e928b..8483a86c743d 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -12,3 +12,6 @@ dtb-y := $(builtindtb-y).dtb
12# for CONFIG_OF_ALL_DTBS test 12# for CONFIG_OF_ALL_DTBS test
13dtstree := $(srctree)/$(src) 13dtstree := $(srctree)/$(src)
14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) 14dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
15
16# board-specific dtc flags
17DTC_FLAGS_hsdk += --pad 20
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index f5ae394ebe06..41b16f21beec 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -256,7 +256,7 @@
256 256
257.macro FAKE_RET_FROM_EXCPN 257.macro FAKE_RET_FROM_EXCPN
258 lr r9, [status32] 258 lr r9, [status32]
259 bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK) 259 bic r9, r9, STATUS_AE_MASK
260 or r9, r9, STATUS_IE_MASK 260 or r9, r9, STATUS_IE_MASK
261 kflag r9 261 kflag r9
262.endm 262.endm
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
index a0eeb9f8f0a9..d9ee43c6b7db 100644
--- a/arch/arc/include/asm/linkage.h
+++ b/arch/arc/include/asm/linkage.h
@@ -62,15 +62,15 @@
62#else /* !__ASSEMBLY__ */ 62#else /* !__ASSEMBLY__ */
63 63
64#ifdef CONFIG_ARC_HAS_ICCM 64#ifdef CONFIG_ARC_HAS_ICCM
65#define __arcfp_code __attribute__((__section__(".text.arcfp"))) 65#define __arcfp_code __section(.text.arcfp)
66#else 66#else
67#define __arcfp_code __attribute__((__section__(".text"))) 67#define __arcfp_code __section(.text)
68#endif 68#endif
69 69
70#ifdef CONFIG_ARC_HAS_DCCM 70#ifdef CONFIG_ARC_HAS_DCCM
71#define __arcfp_data __attribute__((__section__(".data.arcfp"))) 71#define __arcfp_data __section(.data.arcfp)
72#else 72#else
73#define __arcfp_data __attribute__((__section__(".data"))) 73#define __arcfp_data __section(.data)
74#endif 74#endif
75 75
76#endif /* __ASSEMBLY__ */ 76#endif /* __ASSEMBLY__ */
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 8ac0e2ac3e70..73746ed5b834 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -53,8 +53,7 @@ extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
53 */ 53 */
54#define MACHINE_START(_type, _name) \ 54#define MACHINE_START(_type, _name) \
55static const struct machine_desc __mach_desc_##_type \ 55static const struct machine_desc __mach_desc_##_type \
56__used \ 56__used __section(.arch.info.init) = { \
57__attribute__((__section__(".arch.info.init"))) = { \
58 .name = _name, 57 .name = _name,
59 58
60#define MACHINE_END \ 59#define MACHINE_END \
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 18b493dfb3a8..abf9398cc333 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -202,8 +202,8 @@ static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask); 202 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
203} 203}
204 204
205static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl, 205static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
206 unsigned int distr) 206 bool set_distr, unsigned int distr)
207{ 207{
208 union { 208 union {
209 unsigned int word; 209 unsigned int word;
@@ -212,8 +212,11 @@ static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
212 }; 212 };
213 } data; 213 } data;
214 214
215 data.distr = distr; 215 data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
216 data.lvl = lvl; 216 if (set_distr)
217 data.distr = distr;
218 if (set_lvl)
219 data.lvl = lvl;
217 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word); 220 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
218} 221}
219 222
@@ -240,6 +243,25 @@ static void idu_irq_unmask(struct irq_data *data)
240 raw_spin_unlock_irqrestore(&mcip_lock, flags); 243 raw_spin_unlock_irqrestore(&mcip_lock, flags);
241} 244}
242 245
246static void idu_irq_ack(struct irq_data *data)
247{
248 unsigned long flags;
249
250 raw_spin_lock_irqsave(&mcip_lock, flags);
251 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
252 raw_spin_unlock_irqrestore(&mcip_lock, flags);
253}
254
255static void idu_irq_mask_ack(struct irq_data *data)
256{
257 unsigned long flags;
258
259 raw_spin_lock_irqsave(&mcip_lock, flags);
260 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
261 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
262 raw_spin_unlock_irqrestore(&mcip_lock, flags);
263}
264
243static int 265static int
244idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask, 266idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
245 bool force) 267 bool force)
@@ -263,13 +285,36 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
263 else 285 else
264 distribution_mode = IDU_M_DISTRI_RR; 286 distribution_mode = IDU_M_DISTRI_RR;
265 287
266 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode); 288 idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
267 289
268 raw_spin_unlock_irqrestore(&mcip_lock, flags); 290 raw_spin_unlock_irqrestore(&mcip_lock, flags);
269 291
270 return IRQ_SET_MASK_OK; 292 return IRQ_SET_MASK_OK;
271} 293}
272 294
295static int idu_irq_set_type(struct irq_data *data, u32 type)
296{
297 unsigned long flags;
298
299 /*
300 * ARCv2 IDU HW does not support inverse polarity, so these are the
301 * only interrupt types supported.
302 */
303 if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
304 return -EINVAL;
305
306 raw_spin_lock_irqsave(&mcip_lock, flags);
307
308 idu_set_mode(data->hwirq, true,
309 type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
310 IDU_M_TRIG_LEVEL,
311 false, 0);
312
313 raw_spin_unlock_irqrestore(&mcip_lock, flags);
314
315 return 0;
316}
317
273static void idu_irq_enable(struct irq_data *data) 318static void idu_irq_enable(struct irq_data *data)
274{ 319{
275 /* 320 /*
@@ -289,7 +334,10 @@ static struct irq_chip idu_irq_chip = {
289 .name = "MCIP IDU Intc", 334 .name = "MCIP IDU Intc",
290 .irq_mask = idu_irq_mask, 335 .irq_mask = idu_irq_mask,
291 .irq_unmask = idu_irq_unmask, 336 .irq_unmask = idu_irq_unmask,
337 .irq_ack = idu_irq_ack,
338 .irq_mask_ack = idu_irq_mask_ack,
292 .irq_enable = idu_irq_enable, 339 .irq_enable = idu_irq_enable,
340 .irq_set_type = idu_irq_set_type,
293#ifdef CONFIG_SMP 341#ifdef CONFIG_SMP
294 .irq_set_affinity = idu_irq_set_affinity, 342 .irq_set_affinity = idu_irq_set_affinity,
295#endif 343#endif
@@ -317,7 +365,7 @@ static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t
317} 365}
318 366
319static const struct irq_domain_ops idu_irq_ops = { 367static const struct irq_domain_ops idu_irq_ops = {
320 .xlate = irq_domain_xlate_onecell, 368 .xlate = irq_domain_xlate_onetwocell,
321 .map = idu_irq_map, 369 .map = idu_irq_map,
322}; 370};
323 371
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index c2663fce7f6c..dc05a63516f5 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -572,6 +572,7 @@ static unsigned long read_pointer(const u8 **pLoc, const void *end,
572#else 572#else
573 BUILD_BUG_ON(sizeof(u32) != sizeof(value)); 573 BUILD_BUG_ON(sizeof(u32) != sizeof(value));
574#endif 574#endif
575 /* Fall through */
575 case DW_EH_PE_native: 576 case DW_EH_PE_native:
576 if (end < (const void *)(ptr.pul + 1)) 577 if (end < (const void *)(ptr.pul + 1))
577 return 0; 578 return 0;
@@ -826,7 +827,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
826 case DW_CFA_def_cfa: 827 case DW_CFA_def_cfa:
827 state->cfa.reg = get_uleb128(&ptr.p8, end); 828 state->cfa.reg = get_uleb128(&ptr.p8, end);
828 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg); 829 unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
829 /*nobreak*/ 830 /* fall through */
830 case DW_CFA_def_cfa_offset: 831 case DW_CFA_def_cfa_offset:
831 state->cfa.offs = get_uleb128(&ptr.p8, end); 832 state->cfa.offs = get_uleb128(&ptr.p8, end);
832 unw_debug("cfa_def_cfa_offset: 0x%lx ", 833 unw_debug("cfa_def_cfa_offset: 0x%lx ",
@@ -834,7 +835,7 @@ static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
834 break; 835 break;
835 case DW_CFA_def_cfa_sf: 836 case DW_CFA_def_cfa_sf:
836 state->cfa.reg = get_uleb128(&ptr.p8, end); 837 state->cfa.reg = get_uleb128(&ptr.p8, end);
837 /*nobreak */ 838 /* fall through */
838 case DW_CFA_def_cfa_offset_sf: 839 case DW_CFA_def_cfa_offset_sf:
839 state->cfa.offs = get_sleb128(&ptr.p8, end) 840 state->cfa.offs = get_sleb128(&ptr.p8, end)
840 * state->dataAlign; 841 * state->dataAlign;
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 62c210e7ee4c..70a3fbe79fba 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -101,7 +101,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
101 if (is_isa_arcv2() && ioc_enable && coherent) 101 if (is_isa_arcv2() && ioc_enable && coherent)
102 dev->dma_coherent = true; 102 dev->dma_coherent = true;
103 103
104 dev_info(dev, "use %sncoherent DMA ops\n", 104 dev_info(dev, "use %scoherent DMA ops\n",
105 dev->dma_coherent ? "" : "non"); 105 dev->dma_coherent ? "" : "non");
106} 106}
107 107
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 7dd2dd335cf6..0b961a2a10b8 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -6,11 +6,15 @@
6 */ 6 */
7 7
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/of_fdt.h>
10#include <linux/libfdt.h>
9#include <linux/smp.h> 11#include <linux/smp.h>
10#include <asm/arcregs.h> 12#include <asm/arcregs.h>
11#include <asm/io.h> 13#include <asm/io.h>
12#include <asm/mach_desc.h> 14#include <asm/mach_desc.h>
13 15
16int arc_hsdk_axi_dmac_coherent __section(.data) = 0;
17
14#define ARC_CCM_UNUSED_ADDR 0x60000000 18#define ARC_CCM_UNUSED_ADDR 0x60000000
15 19
16static void __init hsdk_init_per_cpu(unsigned int cpu) 20static void __init hsdk_init_per_cpu(unsigned int cpu)
@@ -97,6 +101,42 @@ static void __init hsdk_enable_gpio_intc_wire(void)
97 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN); 101 iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
98} 102}
99 103
104static int __init hsdk_tweak_node_coherency(const char *path, bool coherent)
105{
106 void *fdt = initial_boot_params;
107 const void *prop;
108 int node, ret;
109 bool dt_coh_set;
110
111 node = fdt_path_offset(fdt, path);
112 if (node < 0)
113 goto tweak_fail;
114
115 prop = fdt_getprop(fdt, node, "dma-coherent", &ret);
116 if (!prop && ret != -FDT_ERR_NOTFOUND)
117 goto tweak_fail;
118
119 dt_coh_set = ret != -FDT_ERR_NOTFOUND;
120 ret = 0;
121
122 /* need to remove "dma-coherent" property */
123 if (dt_coh_set && !coherent)
124 ret = fdt_delprop(fdt, node, "dma-coherent");
125
126 /* need to set "dma-coherent" property */
127 if (!dt_coh_set && coherent)
128 ret = fdt_setprop(fdt, node, "dma-coherent", NULL, 0);
129
130 if (ret < 0)
131 goto tweak_fail;
132
133 return 0;
134
135tweak_fail:
136 pr_err("failed to tweak %s to %scoherent\n", path, coherent ? "" : "non");
137 return -EFAULT;
138}
139
100enum hsdk_axi_masters { 140enum hsdk_axi_masters {
101 M_HS_CORE = 0, 141 M_HS_CORE = 0,
102 M_HS_RTT, 142 M_HS_RTT,
@@ -162,6 +202,39 @@ enum hsdk_axi_masters {
162#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180)) 202#define CREG_PAE ((void __iomem *)(CREG_BASE + 0x180))
163#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194)) 203#define CREG_PAE_UPDT ((void __iomem *)(CREG_BASE + 0x194))
164 204
205static void __init hsdk_init_memory_bridge_axi_dmac(void)
206{
207 bool coherent = !!arc_hsdk_axi_dmac_coherent;
208 u32 axi_m_slv1, axi_m_oft1;
209
210 /*
211 * Don't tweak memory bridge configuration if we failed to tweak DTB
212 * as we will end up in a inconsistent state.
213 */
214 if (hsdk_tweak_node_coherency("/soc/dmac@80000", coherent))
215 return;
216
217 if (coherent) {
218 axi_m_slv1 = 0x77999999;
219 axi_m_oft1 = 0x76DCBA98;
220 } else {
221 axi_m_slv1 = 0x77777777;
222 axi_m_oft1 = 0x76543210;
223 }
224
225 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
226 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
227 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_0));
228 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_0));
229 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
230
231 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
233 writel(axi_m_slv1, CREG_AXI_M_SLV1(M_DMAC_1));
234 writel(axi_m_oft1, CREG_AXI_M_OFT1(M_DMAC_1));
235 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
236}
237
165static void __init hsdk_init_memory_bridge(void) 238static void __init hsdk_init_memory_bridge(void)
166{ 239{
167 u32 reg; 240 u32 reg;
@@ -227,24 +300,14 @@ static void __init hsdk_init_memory_bridge(void)
227 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU)); 300 writel(0x76543210, CREG_AXI_M_OFT1(M_GPU));
228 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU)); 301 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_GPU));
229 302
230 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_0));
231 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_0));
232 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_0));
233 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_0));
234 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_0));
235
236 writel(0x77777777, CREG_AXI_M_SLV0(M_DMAC_1));
237 writel(0x77777777, CREG_AXI_M_SLV1(M_DMAC_1));
238 writel(0xFEDCBA98, CREG_AXI_M_OFT0(M_DMAC_1));
239 writel(0x76543210, CREG_AXI_M_OFT1(M_DMAC_1));
240 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DMAC_1));
241
242 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS)); 303 writel(0x00000000, CREG_AXI_M_SLV0(M_DVFS));
243 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS)); 304 writel(0x60000000, CREG_AXI_M_SLV1(M_DVFS));
244 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS)); 305 writel(0x00000000, CREG_AXI_M_OFT0(M_DVFS));
245 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS)); 306 writel(0x00000000, CREG_AXI_M_OFT1(M_DVFS));
246 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS)); 307 writel(UPDATE_VAL, CREG_AXI_M_UPDT(M_DVFS));
247 308
309 hsdk_init_memory_bridge_axi_dmac();
310
248 /* 311 /*
249 * PAE remapping for DMA clients does not work due to an RTL bug, so 312 * PAE remapping for DMA clients does not work due to an RTL bug, so
250 * CREG_PAE register must be programmed to all zeroes, otherwise it 313 * CREG_PAE register must be programmed to all zeroes, otherwise it
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 33b00579beff..24360211534a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,6 +7,8 @@ config ARM
7 select ARCH_HAS_BINFMT_FLAT 7 select ARCH_HAS_BINFMT_FLAT
8 select ARCH_HAS_DEBUG_VIRTUAL if MMU 8 select ARCH_HAS_DEBUG_VIRTUAL if MMU
9 select ARCH_HAS_DEVMEM_IS_ALLOWED 9 select ARCH_HAS_DEVMEM_IS_ALLOWED
10 select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
11 select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
10 select ARCH_HAS_ELF_RANDOMIZE 12 select ARCH_HAS_ELF_RANDOMIZE
11 select ARCH_HAS_FORTIFY_SOURCE 13 select ARCH_HAS_FORTIFY_SOURCE
12 select ARCH_HAS_KEEPINITRD 14 select ARCH_HAS_KEEPINITRD
@@ -18,6 +20,8 @@ config ARM
18 select ARCH_HAS_SET_MEMORY 20 select ARCH_HAS_SET_MEMORY
19 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 21 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
20 select ARCH_HAS_STRICT_MODULE_RWX if MMU 22 select ARCH_HAS_STRICT_MODULE_RWX if MMU
23 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
24 select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
21 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU 25 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
22 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 26 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
23 select ARCH_HAVE_CUSTOM_GPIO_H 27 select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index ced1a19d5f89..46849d6ecb3e 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -185,7 +185,7 @@
185 uart0: serial@0 { 185 uart0: serial@0 {
186 compatible = "ti,am3352-uart", "ti,omap3-uart"; 186 compatible = "ti,am3352-uart", "ti,omap3-uart";
187 clock-frequency = <48000000>; 187 clock-frequency = <48000000>;
188 reg = <0x0 0x2000>; 188 reg = <0x0 0x1000>;
189 interrupts = <72>; 189 interrupts = <72>;
190 status = "disabled"; 190 status = "disabled";
191 dmas = <&edma 26 0>, <&edma 27 0>; 191 dmas = <&edma 26 0>, <&edma 27 0>;
@@ -934,7 +934,7 @@
934 uart1: serial@0 { 934 uart1: serial@0 {
935 compatible = "ti,am3352-uart", "ti,omap3-uart"; 935 compatible = "ti,am3352-uart", "ti,omap3-uart";
936 clock-frequency = <48000000>; 936 clock-frequency = <48000000>;
937 reg = <0x0 0x2000>; 937 reg = <0x0 0x1000>;
938 interrupts = <73>; 938 interrupts = <73>;
939 status = "disabled"; 939 status = "disabled";
940 dmas = <&edma 28 0>, <&edma 29 0>; 940 dmas = <&edma 28 0>, <&edma 29 0>;
@@ -966,7 +966,7 @@
966 uart2: serial@0 { 966 uart2: serial@0 {
967 compatible = "ti,am3352-uart", "ti,omap3-uart"; 967 compatible = "ti,am3352-uart", "ti,omap3-uart";
968 clock-frequency = <48000000>; 968 clock-frequency = <48000000>;
969 reg = <0x0 0x2000>; 969 reg = <0x0 0x1000>;
970 interrupts = <74>; 970 interrupts = <74>;
971 status = "disabled"; 971 status = "disabled";
972 dmas = <&edma 30 0>, <&edma 31 0>; 972 dmas = <&edma 30 0>, <&edma 31 0>;
@@ -1614,7 +1614,7 @@
1614 uart3: serial@0 { 1614 uart3: serial@0 {
1615 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1615 compatible = "ti,am3352-uart", "ti,omap3-uart";
1616 clock-frequency = <48000000>; 1616 clock-frequency = <48000000>;
1617 reg = <0x0 0x2000>; 1617 reg = <0x0 0x1000>;
1618 interrupts = <44>; 1618 interrupts = <44>;
1619 status = "disabled"; 1619 status = "disabled";
1620 }; 1620 };
@@ -1644,7 +1644,7 @@
1644 uart4: serial@0 { 1644 uart4: serial@0 {
1645 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1645 compatible = "ti,am3352-uart", "ti,omap3-uart";
1646 clock-frequency = <48000000>; 1646 clock-frequency = <48000000>;
1647 reg = <0x0 0x2000>; 1647 reg = <0x0 0x1000>;
1648 interrupts = <45>; 1648 interrupts = <45>;
1649 status = "disabled"; 1649 status = "disabled";
1650 }; 1650 };
@@ -1674,7 +1674,7 @@
1674 uart5: serial@0 { 1674 uart5: serial@0 {
1675 compatible = "ti,am3352-uart", "ti,omap3-uart"; 1675 compatible = "ti,am3352-uart", "ti,omap3-uart";
1676 clock-frequency = <48000000>; 1676 clock-frequency = <48000000>;
1677 reg = <0x0 0x2000>; 1677 reg = <0x0 0x1000>;
1678 interrupts = <46>; 1678 interrupts = <46>;
1679 status = "disabled"; 1679 status = "disabled";
1680 }; 1680 };
@@ -1758,6 +1758,8 @@
1758 1758
1759 target-module@cc000 { /* 0x481cc000, ap 60 46.0 */ 1759 target-module@cc000 { /* 0x481cc000, ap 60 46.0 */
1760 compatible = "ti,sysc-omap4", "ti,sysc"; 1760 compatible = "ti,sysc-omap4", "ti,sysc";
1761 reg = <0xcc020 0x4>;
1762 reg-names = "rev";
1761 ti,hwmods = "d_can0"; 1763 ti,hwmods = "d_can0";
1762 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1764 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1763 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>, 1765 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>,
@@ -1780,6 +1782,8 @@
1780 1782
1781 target-module@d0000 { /* 0x481d0000, ap 62 42.0 */ 1783 target-module@d0000 { /* 0x481d0000, ap 62 42.0 */
1782 compatible = "ti,sysc-omap4", "ti,sysc"; 1784 compatible = "ti,sysc-omap4", "ti,sysc";
1785 reg = <0xd0020 0x4>;
1786 reg-names = "rev";
1783 ti,hwmods = "d_can1"; 1787 ti,hwmods = "d_can1";
1784 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1788 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1785 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>, 1789 clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>,
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index e5c2f71a7c77..fb6b8aa12cc5 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -234,13 +234,33 @@
234 interrupt-names = "edma3_tcerrint"; 234 interrupt-names = "edma3_tcerrint";
235 }; 235 };
236 236
237 mmc3: mmc@47810000 { 237 target-module@47810000 {
238 compatible = "ti,omap4-hsmmc"; 238 compatible = "ti,sysc-omap2", "ti,sysc";
239 ti,hwmods = "mmc3"; 239 ti,hwmods = "mmc3";
240 ti,needs-special-reset; 240 reg = <0x478102fc 0x4>,
241 interrupts = <29>; 241 <0x47810110 0x4>,
242 reg = <0x47810000 0x1000>; 242 <0x47810114 0x4>;
243 status = "disabled"; 243 reg-names = "rev", "sysc", "syss";
244 ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
245 SYSC_OMAP2_ENAWAKEUP |
246 SYSC_OMAP2_SOFTRESET |
247 SYSC_OMAP2_AUTOIDLE)>;
248 ti,sysc-sidle = <SYSC_IDLE_FORCE>,
249 <SYSC_IDLE_NO>,
250 <SYSC_IDLE_SMART>;
251 ti,syss-mask = <1>;
252 clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>;
253 clock-names = "fck";
254 #address-cells = <1>;
255 #size-cells = <1>;
256 ranges = <0x0 0x47810000 0x1000>;
257
258 mmc3: mmc@0 {
259 compatible = "ti,omap4-hsmmc";
260 ti,needs-special-reset;
261 interrupts = <29>;
262 reg = <0x0 0x1000>;
263 };
244 }; 264 };
245 265
246 usb: usb@47400000 { 266 usb: usb@47400000 {
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 55aff4db9c7c..848e2a8884e2 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -228,13 +228,33 @@
228 interrupt-names = "edma3_tcerrint"; 228 interrupt-names = "edma3_tcerrint";
229 }; 229 };
230 230
231 mmc3: mmc@47810000 { 231 target-module@47810000 {
232 compatible = "ti,omap4-hsmmc"; 232 compatible = "ti,sysc-omap2", "ti,sysc";
233 reg = <0x47810000 0x1000>;
234 ti,hwmods = "mmc3"; 233 ti,hwmods = "mmc3";
235 ti,needs-special-reset; 234 reg = <0x478102fc 0x4>,
236 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; 235 <0x47810110 0x4>,
237 status = "disabled"; 236 <0x47810114 0x4>;
237 reg-names = "rev", "sysc", "syss";
238 ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY |
239 SYSC_OMAP2_ENAWAKEUP |
240 SYSC_OMAP2_SOFTRESET |
241 SYSC_OMAP2_AUTOIDLE)>;
242 ti,sysc-sidle = <SYSC_IDLE_FORCE>,
243 <SYSC_IDLE_NO>,
244 <SYSC_IDLE_SMART>;
245 ti,syss-mask = <1>;
246 clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>;
247 clock-names = "fck";
248 #address-cells = <1>;
249 #size-cells = <1>;
250 ranges = <0x0 0x47810000 0x1000>;
251
252 mmc3: mmc@0 {
253 compatible = "ti,omap4-hsmmc";
254 ti,needs-special-reset;
255 interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
256 reg = <0x0 0x1000>;
257 };
238 }; 258 };
239 259
240 sham: sham@53100000 { 260 sham: sham@53100000 {
diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi
index 989cb60b9029..04bee4ff9dcb 100644
--- a/arch/arm/boot/dts/am437x-l4.dtsi
+++ b/arch/arm/boot/dts/am437x-l4.dtsi
@@ -1574,6 +1574,8 @@
1574 1574
1575 target-module@cc000 { /* 0x481cc000, ap 50 46.0 */ 1575 target-module@cc000 { /* 0x481cc000, ap 50 46.0 */
1576 compatible = "ti,sysc-omap4", "ti,sysc"; 1576 compatible = "ti,sysc-omap4", "ti,sysc";
1577 reg = <0xcc020 0x4>;
1578 reg-names = "rev";
1577 ti,hwmods = "d_can0"; 1579 ti,hwmods = "d_can0";
1578 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1580 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1579 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; 1581 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>;
@@ -1593,6 +1595,8 @@
1593 1595
1594 target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */ 1596 target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */
1595 compatible = "ti,sysc-omap4", "ti,sysc"; 1597 compatible = "ti,sysc-omap4", "ti,sysc";
1598 reg = <0xd0020 0x4>;
1599 reg-names = "rev";
1596 ti,hwmods = "d_can1"; 1600 ti,hwmods = "d_can1";
1597 /* Domains (P, C): per_pwrdm, l4ls_clkdm */ 1601 /* Domains (P, C): per_pwrdm, l4ls_clkdm */
1598 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; 1602 clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>;
diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts
index 1d5e99964bbf..0aaacea1d887 100644
--- a/arch/arm/boot/dts/am571x-idk.dts
+++ b/arch/arm/boot/dts/am571x-idk.dts
@@ -175,14 +175,9 @@
175}; 175};
176 176
177&mmc1 { 177&mmc1 {
178 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 178 pinctrl-names = "default", "hs";
179 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 179 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
180 pinctrl-1 = <&mmc1_pins_hs>; 180 pinctrl-1 = <&mmc1_pins_hs>;
181 pinctrl-2 = <&mmc1_pins_sdr12>;
182 pinctrl-3 = <&mmc1_pins_sdr25>;
183 pinctrl-4 = <&mmc1_pins_sdr50>;
184 pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>;
185 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
186}; 181};
187 182
188&mmc2 { 183&mmc2 {
diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts
index c65d7f6d3b5a..ea1c119feaa5 100644
--- a/arch/arm/boot/dts/am572x-idk.dts
+++ b/arch/arm/boot/dts/am572x-idk.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 20 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
27}; 22};
28 23
29&mmc2 { 24&mmc2 {
diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts
index dc5141c35610..7935d70874ce 100644
--- a/arch/arm/boot/dts/am574x-idk.dts
+++ b/arch/arm/boot/dts/am574x-idk.dts
@@ -24,14 +24,9 @@
24}; 24};
25 25
26&mmc1 { 26&mmc1 {
27 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 27 pinctrl-names = "default", "hs";
28 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; 28 pinctrl-0 = <&mmc1_pins_default_no_clk_pu>;
29 pinctrl-1 = <&mmc1_pins_hs>; 29 pinctrl-1 = <&mmc1_pins_hs>;
30 pinctrl-2 = <&mmc1_pins_default>;
31 pinctrl-3 = <&mmc1_pins_hs>;
32 pinctrl-4 = <&mmc1_pins_sdr50>;
33 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>;
34 pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>;
35}; 30};
36 31
37&mmc2 { 32&mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
index d02f5fa61e5f..bc76f1705c0f 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi
@@ -379,7 +379,7 @@
379 }; 379 };
380}; 380};
381 381
382&gpio7 { 382&gpio7_target {
383 ti,no-reset-on-init; 383 ti,no-reset-on-init;
384 ti,no-idle-on-init; 384 ti,no-idle-on-init;
385}; 385};
@@ -430,6 +430,7 @@
430 430
431 bus-width = <4>; 431 bus-width = <4>;
432 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ 432 cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */
433 no-1-8-v;
433}; 434};
434 435
435&mmc2 { 436&mmc2 {
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
index a374b5cd6db0..7b113b52c3fb 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default>; 20 pinctrl-0 = <&mmc1_pins_default>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>;
27 vmmc-supply = <&vdd_3v3>; 22 vmmc-supply = <&vdd_3v3>;
28 vqmmc-supply = <&ldo1_reg>; 23 vqmmc-supply = <&ldo1_reg>;
29}; 24};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
index 4badd2144db9..30c500b15b21 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts
@@ -16,14 +16,9 @@
16}; 16};
17 17
18&mmc1 { 18&mmc1 {
19 pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; 19 pinctrl-names = "default", "hs";
20 pinctrl-0 = <&mmc1_pins_default>; 20 pinctrl-0 = <&mmc1_pins_default>;
21 pinctrl-1 = <&mmc1_pins_hs>; 21 pinctrl-1 = <&mmc1_pins_hs>;
22 pinctrl-2 = <&mmc1_pins_sdr12>;
23 pinctrl-3 = <&mmc1_pins_sdr25>;
24 pinctrl-4 = <&mmc1_pins_sdr50>;
25 pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>;
26 pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>;
27 vmmc-supply = <&vdd_3v3>; 22 vmmc-supply = <&vdd_3v3>;
28 vqmmc-supply = <&ldo1_reg>; 23 vqmmc-supply = <&ldo1_reg>;
29}; 24};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 714e971b912a..de7f85efaa51 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -498,7 +498,7 @@
498 phy-supply = <&ldousb_reg>; 498 phy-supply = <&ldousb_reg>;
499}; 499};
500 500
501&gpio7 { 501&gpio7_target {
502 ti,no-reset-on-init; 502 ti,no-reset-on-init;
503 ti,no-idle-on-init; 503 ti,no-idle-on-init;
504}; 504};
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 23faedec08ab..21e5914fdd62 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -1261,7 +1261,7 @@
1261 }; 1261 };
1262 }; 1262 };
1263 1263
1264 target-module@51000 { /* 0x48051000, ap 45 2e.0 */ 1264 gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */
1265 compatible = "ti,sysc-omap2", "ti,sysc"; 1265 compatible = "ti,sysc-omap2", "ti,sysc";
1266 ti,hwmods = "gpio7"; 1266 ti,hwmods = "gpio7";
1267 reg = <0x51000 0x4>, 1267 reg = <0x51000 0x4>,
@@ -3025,7 +3025,7 @@
3025 3025
3026 target-module@80000 { /* 0x48480000, ap 31 16.0 */ 3026 target-module@80000 { /* 0x48480000, ap 31 16.0 */
3027 compatible = "ti,sysc-omap4", "ti,sysc"; 3027 compatible = "ti,sysc-omap4", "ti,sysc";
3028 reg = <0x80000 0x4>; 3028 reg = <0x80020 0x4>;
3029 reg-names = "rev"; 3029 reg-names = "rev";
3030 clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>; 3030 clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>;
3031 clock-names = "fck"; 3031 clock-names = "fck";
@@ -4577,7 +4577,7 @@
4577 4577
4578 target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */ 4578 target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */
4579 compatible = "ti,sysc-omap4", "ti,sysc"; 4579 compatible = "ti,sysc-omap4", "ti,sysc";
4580 reg = <0xc000 0x4>; 4580 reg = <0xc020 0x4>;
4581 reg-names = "rev"; 4581 reg-names = "rev";
4582 clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>; 4582 clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>;
4583 clock-names = "fck"; 4583 clock-names = "fck";
diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
index 28ebb4eb884a..214b9e6de2c3 100644
--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
+++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi
@@ -32,7 +32,7 @@
32 * 32 *
33 * Datamanual Revisions: 33 * Datamanual Revisions:
34 * 34 *
35 * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016 35 * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019
36 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 36 * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016
37 * 37 *
38 */ 38 */
@@ -229,45 +229,45 @@
229 229
230 mmc3_pins_default: mmc3_pins_default { 230 mmc3_pins_default: mmc3_pins_default {
231 pinctrl-single,pins = < 231 pinctrl-single,pins = <
232 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 232 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
233 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 233 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
234 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 234 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
235 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 235 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
236 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 236 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
237 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 237 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
238 >; 238 >;
239 }; 239 };
240 240
241 mmc3_pins_hs: mmc3_pins_hs { 241 mmc3_pins_hs: mmc3_pins_hs {
242 pinctrl-single,pins = < 242 pinctrl-single,pins = <
243 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 243 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
244 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 244 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
245 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 245 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
246 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 246 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
247 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 247 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
248 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 248 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
249 >; 249 >;
250 }; 250 };
251 251
252 mmc3_pins_sdr12: mmc3_pins_sdr12 { 252 mmc3_pins_sdr12: mmc3_pins_sdr12 {
253 pinctrl-single,pins = < 253 pinctrl-single,pins = <
254 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 254 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
255 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 255 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
256 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 256 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
257 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 257 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
258 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 258 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
259 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 259 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
260 >; 260 >;
261 }; 261 };
262 262
263 mmc3_pins_sdr25: mmc3_pins_sdr25 { 263 mmc3_pins_sdr25: mmc3_pins_sdr25 {
264 pinctrl-single,pins = < 264 pinctrl-single,pins = <
265 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ 265 DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */
266 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ 266 DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */
267 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ 267 DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */
268 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ 268 DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */
269 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ 269 DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */
270 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ 270 DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */
271 >; 271 >;
272 }; 272 };
273 273
diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts
index 3fa0cbe456db..0f3870d3b099 100644
--- a/arch/arm/boot/dts/vf610-bk4.dts
+++ b/arch/arm/boot/dts/vf610-bk4.dts
@@ -246,13 +246,13 @@
246 reg = <0>; 246 reg = <0>;
247 }; 247 };
248 248
249 n25q128a13_2: flash@1 { 249 n25q128a13_2: flash@2 {
250 compatible = "n25q128a13", "jedec,spi-nor"; 250 compatible = "n25q128a13", "jedec,spi-nor";
251 #address-cells = <1>; 251 #address-cells = <1>;
252 #size-cells = <1>; 252 #size-cells = <1>;
253 spi-max-frequency = <66000000>; 253 spi-max-frequency = <66000000>;
254 spi-rx-bus-width = <2>; 254 spi-rx-bus-width = <2>;
255 reg = <1>; 255 reg = <2>;
256 }; 256 };
257}; 257};
258 258
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 1d5210eb4776..582925238d65 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -66,7 +66,7 @@ for_each_frame: tst frame, mask @ Check for address exceptions
66 66
671003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists, 671003: ldr r2, [sv_pc, #-4] @ if stmfd sp!, {args} exists,
68 ldr r3, .Ldsi+4 @ adjust saved 'pc' back one 68 ldr r3, .Ldsi+4 @ adjust saved 'pc' back one
69 teq r3, r2, lsr #10 @ instruction 69 teq r3, r2, lsr #11 @ instruction
70 subne r0, sv_pc, #4 @ allow for mov 70 subne r0, sv_pc, #4 @ allow for mov
71 subeq r0, sv_pc, #8 @ allow for mov + stmia 71 subeq r0, sv_pc, #8 @ allow for mov + stmia
72 72
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 81159af44862..14a6c3eb3298 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -126,6 +126,8 @@ restart:
126 orr r11, r11, r13 @ mask all requested interrupts 126 orr r11, r11, r13 @ mask all requested interrupts
127 str r11, [r12, #OMAP1510_GPIO_INT_MASK] 127 str r11, [r12, #OMAP1510_GPIO_INT_MASK]
128 128
129 str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts
130
129 ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set? 131 ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set?
130 beq hksw @ no - try next source 132 beq hksw @ no - try next source
131 133
@@ -133,7 +135,6 @@ restart:
133 @@@@@@@@@@@@@@@@@@@@@@ 135 @@@@@@@@@@@@@@@@@@@@@@
134 @ Keyboard clock FIQ mode interrupt handler 136 @ Keyboard clock FIQ mode interrupt handler
135 @ r10 now contains KEYBRD_CLK_MASK, use it 137 @ r10 now contains KEYBRD_CLK_MASK, use it
136 str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt
137 bic r11, r11, r10 @ unmask it 138 bic r11, r11, r10 @ unmask it
138 str r11, [r12, #OMAP1510_GPIO_INT_MASK] 139 str r11, [r12, #OMAP1510_GPIO_INT_MASK]
139 140
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c
index 43899fa56674..0254eb9cf8c6 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq.c
+++ b/arch/arm/mach-omap1/ams-delta-fiq.c
@@ -70,9 +70,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
70 * interrupts default to since commit 80ac93c27441 70 * interrupts default to since commit 80ac93c27441
71 * requires interrupt already acked and unmasked. 71 * requires interrupt already acked and unmasked.
72 */ 72 */
73 if (irq_chip->irq_ack) 73 if (!WARN_ON_ONCE(!irq_chip->irq_unmask))
74 irq_chip->irq_ack(d);
75 if (irq_chip->irq_unmask)
76 irq_chip->irq_unmask(d); 74 irq_chip->irq_unmask(d);
77 } 75 }
78 for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) 76 for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++)
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index f9c02f9f1c92..5c3845730dbf 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -127,6 +127,9 @@ static int __init omap4_sram_init(void)
127 struct device_node *np; 127 struct device_node *np;
128 struct gen_pool *sram_pool; 128 struct gen_pool *sram_pool;
129 129
130 if (!soc_is_omap44xx() && !soc_is_omap54xx())
131 return 0;
132
130 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); 133 np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
131 if (!np) 134 if (!np)
132 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", 135 pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 4a5b4aee6615..1ec21e9ba1e9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = {
379static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = { 379static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = {
380 .rev_offs = 0x0, 380 .rev_offs = 0x0,
381 .sysc_offs = 0x4, 381 .sysc_offs = 0x4,
382 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, 382 .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
383 SYSC_HAS_RESET_STATUS,
383 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), 384 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
384 .sysc_fields = &omap_hwmod_sysc_type2, 385 .sysc_fields = &omap_hwmod_sysc_type2,
385}; 386};
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index 0ce56ad754ce..ea2c84214bac 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -46,6 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
46 switch (tag->u.acorn.vram_pages) { 46 switch (tag->u.acorn.vram_pages) {
47 case 512: 47 case 512:
48 vram_size += PAGE_SIZE * 256; 48 vram_size += PAGE_SIZE * 256;
49 /* Fall through - ??? */
49 case 256: 50 case 256:
50 vram_size += PAGE_SIZE * 256; 51 vram_size += PAGE_SIZE * 256;
51 default: 52 default:
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c54cd7ed90ba..c1222c0e9fd3 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -664,10 +664,6 @@ config ARM_LPAE
664 !CPU_32v4 && !CPU_32v3 664 !CPU_32v4 && !CPU_32v3
665 select PHYS_ADDR_T_64BIT 665 select PHYS_ADDR_T_64BIT
666 select SWIOTLB 666 select SWIOTLB
667 select ARCH_HAS_DMA_COHERENT_TO_PFN
668 select ARCH_HAS_DMA_MMAP_PGPROT
669 select ARCH_HAS_SYNC_DMA_FOR_DEVICE
670 select ARCH_HAS_SYNC_DMA_FOR_CPU
671 help 667 help
672 Say Y if you have an ARMv7 processor supporting the LPAE page 668 Say Y if you have an ARMv7 processor supporting the LPAE page
673 table format and you would like to access memory beyond the 669 table format and you would like to access memory beyond the
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6774b03aa405..d42557ee69c2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
2405pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 2405pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
2406 unsigned long attrs) 2406 unsigned long attrs)
2407{ 2407{
2408 if (!dev_is_dma_coherent(dev)) 2408 return __get_dma_pgprot(attrs, prot);
2409 return __get_dma_pgprot(attrs, prot);
2410 return prot;
2411} 2409}
2412 2410
2413void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2411void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 16d373d587c4..b4be3baa83d4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -175,6 +175,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
175#ifdef CONFIG_HAVE_ARCH_PFN_VALID 175#ifdef CONFIG_HAVE_ARCH_PFN_VALID
176int pfn_valid(unsigned long pfn) 176int pfn_valid(unsigned long pfn)
177{ 177{
178 phys_addr_t addr = __pfn_to_phys(pfn);
179
180 if (__phys_to_pfn(addr) != pfn)
181 return 0;
182
178 return memblock_is_map_memory(__pfn_to_phys(pfn)); 183 return memblock_is_map_memory(__pfn_to_phys(pfn));
179} 184}
180EXPORT_SYMBOL(pfn_valid); 185EXPORT_SYMBOL(pfn_valid);
@@ -628,7 +633,8 @@ static void update_sections_early(struct section_perm perms[], int n)
628 if (t->flags & PF_KTHREAD) 633 if (t->flags & PF_KTHREAD)
629 continue; 634 continue;
630 for_each_thread(t, s) 635 for_each_thread(t, s)
631 set_section_perms(perms, n, true, s->mm); 636 if (s->mm)
637 set_section_perms(perms, n, true, s->mm);
632 } 638 }
633 set_section_perms(perms, n, true, current->active_mm); 639 set_section_perms(perms, n, true, current->active_mm);
634 set_section_perms(perms, n, true, &init_mm); 640 set_section_perms(perms, n, true, &init_mm);
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index c7a87368850b..12aa7eaeaf68 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -339,6 +339,12 @@
339 pinctrl-names = "default"; 339 pinctrl-names = "default";
340}; 340};
341 341
342&ir {
343 status = "okay";
344 pinctrl-0 = <&remote_input_ao_pins>;
345 pinctrl-names = "default";
346};
347
342&pwm_ef { 348&pwm_ef {
343 status = "okay"; 349 status = "okay";
344 pinctrl-0 = <&pwm_e_pins>; 350 pinctrl-0 = <&pwm_e_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
index f8d43e3dcf20..1785552d450c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi
@@ -2386,6 +2386,7 @@
2386 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>; 2386 clocks = <&clkc CLKID_USB1_DDR_BRIDGE>;
2387 clock-names = "ddr"; 2387 clock-names = "ddr";
2388 phys = <&usb2_phy1>; 2388 phys = <&usb2_phy1>;
2389 phy-names = "usb2-phy";
2389 dr_mode = "peripheral"; 2390 dr_mode = "peripheral";
2390 g-rx-fifo-size = <192>; 2391 g-rx-fifo-size = <192>;
2391 g-np-tx-fifo-size = <128>; 2392 g-np-tx-fifo-size = <128>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
index 81780ffcc7f0..4e916e1f71f7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dts
@@ -53,6 +53,7 @@
53 53
54 gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>; 54 gpio = <&gpio_ao GPIOAO_8 GPIO_ACTIVE_HIGH>;
55 enable-active-high; 55 enable-active-high;
56 regulator-always-on;
56 }; 57 };
57 58
58 tf_io: gpio-regulator-tf_io { 59 tf_io: gpio-regulator-tf_io {
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d19d14ba9ae4..b1fdc486aed8 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -184,9 +184,17 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
184}; 184};
185 185
186static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { 186static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
187 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), 187 /*
188 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), 188 * We already refuse to boot CPUs that don't support our configured
189 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), 189 * page size, so we can only detect mismatches for a page size other
190 * than the one we're currently using. Unfortunately, SoCs like this
191 * exist in the wild so, even though we don't like it, we'll have to go
192 * along with it and treat them as non-strict.
193 */
194 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
195 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
196 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
197
190 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), 198 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
191 /* Linux shouldn't care about secure memory */ 199 /* Linux shouldn't care about secure memory */
192 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), 200 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 1285c7b2947f..171773257974 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -73,7 +73,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
73 73
74 if (offset < -SZ_128M || offset >= SZ_128M) { 74 if (offset < -SZ_128M || offset >= SZ_128M) {
75#ifdef CONFIG_ARM64_MODULE_PLTS 75#ifdef CONFIG_ARM64_MODULE_PLTS
76 struct plt_entry trampoline; 76 struct plt_entry trampoline, *dst;
77 struct module *mod; 77 struct module *mod;
78 78
79 /* 79 /*
@@ -106,23 +106,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
106 * to check if the actual opcodes are in fact identical, 106 * to check if the actual opcodes are in fact identical,
107 * regardless of the offset in memory so use memcmp() instead. 107 * regardless of the offset in memory so use memcmp() instead.
108 */ 108 */
109 trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline); 109 dst = mod->arch.ftrace_trampoline;
110 if (memcmp(mod->arch.ftrace_trampoline, &trampoline, 110 trampoline = get_plt_entry(addr, dst);
111 sizeof(trampoline))) { 111 if (memcmp(dst, &trampoline, sizeof(trampoline))) {
112 if (plt_entry_is_initialized(mod->arch.ftrace_trampoline)) { 112 if (plt_entry_is_initialized(dst)) {
113 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n"); 113 pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
117 /* point the trampoline to our ftrace entry point */ 117 /* point the trampoline to our ftrace entry point */
118 module_disable_ro(mod); 118 module_disable_ro(mod);
119 *mod->arch.ftrace_trampoline = trampoline; 119 *dst = trampoline;
120 module_enable_ro(mod, true); 120 module_enable_ro(mod, true);
121 121
122 /* update trampoline before patching in the branch */ 122 /*
123 smp_wmb(); 123 * Ensure updated trampoline is visible to instruction
124 * fetch before we patch in the branch.
125 */
126 __flush_icache_range((unsigned long)&dst[0],
127 (unsigned long)&dst[1]);
124 } 128 }
125 addr = (unsigned long)(void *)mod->arch.ftrace_trampoline; 129 addr = (unsigned long)dst;
126#else /* CONFIG_ARM64_MODULE_PLTS */ 130#else /* CONFIG_ARM64_MODULE_PLTS */
127 return -EINVAL; 131 return -EINVAL;
128#endif /* CONFIG_ARM64_MODULE_PLTS */ 132#endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 1d3f0b5a9940..bd2b039f43a6 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -14,9 +14,7 @@
14pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 14pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
15 unsigned long attrs) 15 unsigned long attrs)
16{ 16{
17 if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) 17 return pgprot_writecombine(prot);
18 return pgprot_writecombine(prot);
19 return prot;
20} 18}
21 19
22void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 20void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
diff --git a/arch/mips/include/asm/octeon/cvmx-sli-defs.h b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
index 52cf96ea43e5..cbc7cdae1c6a 100644
--- a/arch/mips/include/asm/octeon/cvmx-sli-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sli-defs.h
@@ -46,6 +46,7 @@ static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK: 46 case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) 47 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
48 return 0x0000000000003CB0ull; 48 return 0x0000000000003CB0ull;
49 /* Else, fall through */
49 default: 50 default:
50 return 0x0000000000023CB0ull; 51 return 0x0000000000023CB0ull;
51 } 52 }
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
index fe61513982b4..330b19fcd990 100644
--- a/arch/nds32/kernel/signal.c
+++ b/arch/nds32/kernel/signal.c
@@ -316,6 +316,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
316 regs->uregs[0] = -EINTR; 316 regs->uregs[0] = -EINTR;
317 break; 317 break;
318 } 318 }
319 /* Else, fall through */
319 case -ERESTARTNOINTR: 320 case -ERESTARTNOINTR:
320 regs->uregs[0] = regs->orig_r0; 321 regs->uregs[0] = regs->orig_r0;
321 regs->ipc -= 4; 322 regs->ipc -= 4;
@@ -360,6 +361,7 @@ static void do_signal(struct pt_regs *regs)
360 switch (regs->uregs[0]) { 361 switch (regs->uregs[0]) {
361 case -ERESTART_RESTARTBLOCK: 362 case -ERESTART_RESTARTBLOCK:
362 regs->uregs[15] = __NR_restart_syscall; 363 regs->uregs[15] = __NR_restart_syscall;
364 /* Fall through */
363 case -ERESTARTNOHAND: 365 case -ERESTARTNOHAND:
364 case -ERESTARTSYS: 366 case -ERESTARTSYS:
365 case -ERESTARTNOINTR: 367 case -ERESTARTNOINTR:
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index a39b079e73f2..6d58c1739b42 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -2,6 +2,7 @@
2#ifndef _PARISC_PGTABLE_H 2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H 3#define _PARISC_PGTABLE_H
4 4
5#include <asm/page.h>
5#include <asm-generic/4level-fixup.h> 6#include <asm-generic/4level-fixup.h>
6 7
7#include <asm/fixmap.h> 8#include <asm/fixmap.h>
@@ -98,8 +99,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
98 99
99#endif /* !__ASSEMBLY__ */ 100#endif /* !__ASSEMBLY__ */
100 101
101#include <asm/page.h>
102
103#define pte_ERROR(e) \ 102#define pte_ERROR(e) \
104 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
105#define pmd_ERROR(e) \ 104#define pmd_ERROR(e) \
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 77f6ebf97113..d8dcd8820369 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -121,7 +121,6 @@ config PPC
121 select ARCH_32BIT_OFF_T if PPC32 121 select ARCH_32BIT_OFF_T if PPC32
122 select ARCH_HAS_DEBUG_VIRTUAL 122 select ARCH_HAS_DEBUG_VIRTUAL
123 select ARCH_HAS_DEVMEM_IS_ALLOWED 123 select ARCH_HAS_DEVMEM_IS_ALLOWED
124 select ARCH_HAS_DMA_MMAP_PGPROT
125 select ARCH_HAS_ELF_RANDOMIZE 124 select ARCH_HAS_ELF_RANDOMIZE
126 select ARCH_HAS_FORTIFY_SOURCE 125 select ARCH_HAS_FORTIFY_SOURCE
127 select ARCH_HAS_GCOV_PROFILE_ALL 126 select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ea0c69236789..56dfa7a2a6f2 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
49 signal.o sysfs.o cacheinfo.o time.o \ 49 signal.o sysfs.o cacheinfo.o time.o \
50 prom.o traps.o setup-common.o \ 50 prom.o traps.o setup-common.o \
51 udbg.o misc.o io.o misc_$(BITS).o \ 51 udbg.o misc.o io.o misc_$(BITS).o \
52 of_platform.o prom_parse.o \ 52 of_platform.o prom_parse.o
53 dma-common.o
54obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 53obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
55 signal_64.o ptrace32.o \ 54 signal_64.o ptrace32.o \
56 paca.o nvram_64.o firmware.o 55 paca.o nvram_64.o firmware.o
diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c
deleted file mode 100644
index dc7ef6b17b69..000000000000
--- a/arch/powerpc/kernel/dma-common.c
+++ /dev/null
@@ -1,17 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Contains common dma routines for all powerpc platforms.
4 *
5 * Copyright (C) 2019 Shawn Anastasio.
6 */
7
8#include <linux/mm.h>
9#include <linux/dma-noncoherent.h>
10
11pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
12 unsigned long attrs)
13{
14 if (!dev_is_dma_coherent(dev))
15 return pgprot_noncached(prot);
16 return prot;
17}
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index e99a14798ab0..c4b606fe73eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -660,8 +660,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
660 } 660 }
661 tce = be64_to_cpu(tce); 661 tce = be64_to_cpu(tce);
662 662
663 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) 663 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
664 return H_PARAMETER; 664 ret = H_PARAMETER;
665 goto unlock_exit;
666 }
665 667
666 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 668 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
667 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, 669 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index f50bbeedfc66..b4f20f13b860 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -556,8 +556,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); 556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
557 557
558 ua = 0; 558 ua = 0;
559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) 559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
560 return H_PARAMETER; 560 ret = H_PARAMETER;
561 goto unlock_exit;
562 }
561 563
562 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 564 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
563 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt, 565 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 93205c0bf71d..3efff552a261 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -54,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
54CONFIG_SERIAL_OF_PLATFORM=y 54CONFIG_SERIAL_OF_PLATFORM=y
55CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 55CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
56CONFIG_HVC_RISCV_SBI=y 56CONFIG_HVC_RISCV_SBI=y
57CONFIG_HW_RANDOM=y
58CONFIG_HW_RANDOM_VIRTIO=y
57CONFIG_SPI=y 59CONFIG_SPI=y
58CONFIG_SPI_SIFIVE=y 60CONFIG_SPI_SIFIVE=y
59# CONFIG_PTP_1588_CLOCK is not set 61# CONFIG_PTP_1588_CLOCK is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index d5449ef805a3..7da93e494445 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
34CONFIG_PCI_HOST_GENERIC=y 34CONFIG_PCI_HOST_GENERIC=y
35CONFIG_PCIE_XILINX=y 35CONFIG_PCIE_XILINX=y
36CONFIG_DEVTMPFS=y 36CONFIG_DEVTMPFS=y
37CONFIG_DEVTMPFS_MOUNT=y
37CONFIG_BLK_DEV_LOOP=y 38CONFIG_BLK_DEV_LOOP=y
38CONFIG_VIRTIO_BLK=y 39CONFIG_VIRTIO_BLK=y
39CONFIG_BLK_DEV_SD=y 40CONFIG_BLK_DEV_SD=y
@@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
53CONFIG_SERIAL_OF_PLATFORM=y 54CONFIG_SERIAL_OF_PLATFORM=y
54CONFIG_SERIAL_EARLYCON_RISCV_SBI=y 55CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
55CONFIG_HVC_RISCV_SBI=y 56CONFIG_HVC_RISCV_SBI=y
57CONFIG_HW_RANDOM=y
58CONFIG_HW_RANDOM_VIRTIO=y
56# CONFIG_PTP_1588_CLOCK is not set 59# CONFIG_PTP_1588_CLOCK is not set
57CONFIG_DRM=y 60CONFIG_DRM=y
58CONFIG_DRM_RADEON=y 61CONFIG_DRM_RADEON=y
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 9c66033c3a54..161f28d04a07 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -30,10 +30,6 @@ enum fixed_addresses {
30 __end_of_fixed_addresses 30 __end_of_fixed_addresses
31}; 31};
32 32
33#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
34#define FIXADDR_TOP (VMALLOC_START)
35#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
36
37#define FIXMAP_PAGE_IO PAGE_KERNEL 33#define FIXMAP_PAGE_IO PAGE_KERNEL
38 34
39#define __early_set_fixmap __set_fixmap 35#define __early_set_fixmap __set_fixmap
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index a364aba23d55..c24a083b3e12 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -420,14 +420,22 @@ static inline void pgtable_cache_init(void)
420#define VMALLOC_END (PAGE_OFFSET - 1) 420#define VMALLOC_END (PAGE_OFFSET - 1)
421#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) 421#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
422 422
423#define FIXADDR_TOP VMALLOC_START
424#ifdef CONFIG_64BIT
425#define FIXADDR_SIZE PMD_SIZE
426#else
427#define FIXADDR_SIZE PGDIR_SIZE
428#endif
429#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
430
423/* 431/*
424 * Task size is 0x4000000000 for RV64 or 0xb800000 for RV32. 432 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
425 * Note that PGDIR_SIZE must evenly divide TASK_SIZE. 433 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
426 */ 434 */
427#ifdef CONFIG_64BIT 435#ifdef CONFIG_64BIT
428#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) 436#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
429#else 437#else
430#define TASK_SIZE VMALLOC_START 438#define TASK_SIZE FIXADDR_START
431#endif 439#endif
432 440
433#include <asm-generic/pgtable.h> 441#include <asm-generic/pgtable.h>
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 853b65ef656d..f0227bdce0f0 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -16,7 +16,13 @@ extern void __fstate_restore(struct task_struct *restore_from);
16 16
17static inline void __fstate_clean(struct pt_regs *regs) 17static inline void __fstate_clean(struct pt_regs *regs)
18{ 18{
19 regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN; 19 regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
20}
21
22static inline void fstate_off(struct task_struct *task,
23 struct pt_regs *regs)
24{
25 regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF;
20} 26}
21 27
22static inline void fstate_save(struct task_struct *task, 28static inline void fstate_save(struct task_struct *task,
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 687dd19735a7..4d9bbe8438bf 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -53,10 +53,17 @@ static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
53} 53}
54 54
55#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) 55#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
56#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) 56
57#define flush_tlb_range(vma, start, end) \ 57#define flush_tlb_range(vma, start, end) \
58 remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start)) 58 remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
59#define flush_tlb_mm(mm) \ 59
60static inline void flush_tlb_page(struct vm_area_struct *vma,
61 unsigned long addr)
62{
63 flush_tlb_range(vma, addr, addr + PAGE_SIZE);
64}
65
66#define flush_tlb_mm(mm) \
60 remote_sfence_vma(mm_cpumask(mm), 0, -1) 67 remote_sfence_vma(mm_cpumask(mm), 0, -1)
61 68
62#endif /* CONFIG_SMP */ 69#endif /* CONFIG_SMP */
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index f23794bd1e90..fb3a082362eb 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -64,8 +64,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
64 unsigned long sp) 64 unsigned long sp)
65{ 65{
66 regs->sstatus = SR_SPIE; 66 regs->sstatus = SR_SPIE;
67 if (has_fpu) 67 if (has_fpu) {
68 regs->sstatus |= SR_FS_INITIAL; 68 regs->sstatus |= SR_FS_INITIAL;
69 /*
70 * Restore the initial value to the FP register
71 * before starting the user program.
72 */
73 fstate_restore(current, regs);
74 }
69 regs->sepc = pc; 75 regs->sepc = pc;
70 regs->sp = sp; 76 regs->sp = sp;
71 set_fs(USER_DS); 77 set_fs(USER_DS);
@@ -75,10 +81,11 @@ void flush_thread(void)
75{ 81{
76#ifdef CONFIG_FPU 82#ifdef CONFIG_FPU
77 /* 83 /*
78 * Reset FPU context 84 * Reset FPU state and context
79 * frm: round to nearest, ties to even (IEEE default) 85 * frm: round to nearest, ties to even (IEEE default)
80 * fflags: accrued exceptions cleared 86 * fflags: accrued exceptions cleared
81 */ 87 */
88 fstate_off(current, task_pt_regs(current));
82 memset(&current->thread.fstate, 0, sizeof(current->thread.fstate)); 89 memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
83#endif 90#endif
84} 91}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index e636728ab452..955eb355c2fd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
863 break; 863 break;
864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */ 864 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
865 /* lcgr %dst,%dst */ 865 /* lcgr %dst,%dst */
866 EMIT4(0xb9130000, dst_reg, dst_reg); 866 EMIT4(0xb9030000, dst_reg, dst_reg);
867 break; 867 break;
868 /* 868 /*
869 * BPF_FROM_BE/LE 869 * BPF_FROM_BE/LE
@@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1049 /* llgf %w1,map.max_entries(%b2) */ 1049 /* llgf %w1,map.max_entries(%b2) */
1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, 1050 EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
1051 offsetof(struct bpf_array, map.max_entries)); 1051 offsetof(struct bpf_array, map.max_entries));
1052 /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ 1052 /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
1053 EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, 1053 EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
1054 REG_W1, 0, 0xa); 1054 REG_W1, 0, 0xa);
1055 1055
1056 /* 1056 /*
@@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
1076 * goto out; 1076 * goto out;
1077 */ 1077 */
1078 1078
1079 /* sllg %r1,%b3,3: %r1 = index * 8 */ 1079 /* llgfr %r1,%b3: %r1 = (u32) index */
1080 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); 1080 EMIT4(0xb9160000, REG_1, BPF_REG_3);
1081 /* sllg %r1,%r1,3: %r1 *= 8 */
1082 EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
1081 /* lg %r1,prog(%b2,%r1) */ 1083 /* lg %r1,prog(%b2,%r1) */
1082 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, 1084 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
1083 REG_1, offsetof(struct bpf_array, ptrs)); 1085 REG_1, offsetof(struct bpf_array, ptrs));
diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c
index defebf1a9c8a..845543780cc5 100644
--- a/arch/sh/kernel/disassemble.c
+++ b/arch/sh/kernel/disassemble.c
@@ -475,8 +475,6 @@ static void print_sh_insn(u32 memaddr, u16 insn)
475 printk("dbr"); 475 printk("dbr");
476 break; 476 break;
477 case FD_REG_N: 477 case FD_REG_N:
478 if (0)
479 goto d_reg_n;
480 case F_REG_N: 478 case F_REG_N:
481 printk("fr%d", rn); 479 printk("fr%d", rn);
482 break; 480 break;
@@ -488,7 +486,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
488 printk("xd%d", rn & ~1); 486 printk("xd%d", rn & ~1);
489 break; 487 break;
490 } 488 }
491 d_reg_n: 489 /* else, fall through */
492 case D_REG_N: 490 case D_REG_N:
493 printk("dr%d", rn); 491 printk("dr%d", rn);
494 break; 492 break;
@@ -497,6 +495,7 @@ static void print_sh_insn(u32 memaddr, u16 insn)
497 printk("xd%d", rm & ~1); 495 printk("xd%d", rm & ~1);
498 break; 496 break;
499 } 497 }
498 /* else, fall through */
500 case D_REG_M: 499 case D_REG_M:
501 printk("dr%d", rm); 500 printk("dr%d", rm);
502 break; 501 break;
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index 3bd010b4c55f..f10d64311127 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -157,6 +157,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
157 switch (sh_type) { 157 switch (sh_type) {
158 case SH_BREAKPOINT_READ: 158 case SH_BREAKPOINT_READ:
159 *gen_type = HW_BREAKPOINT_R; 159 *gen_type = HW_BREAKPOINT_R;
160 break;
160 case SH_BREAKPOINT_WRITE: 161 case SH_BREAKPOINT_WRITE:
161 *gen_type = HW_BREAKPOINT_W; 162 *gen_type = HW_BREAKPOINT_W;
162 break; 163 break;
diff --git a/arch/um/include/shared/timer-internal.h b/arch/um/include/shared/timer-internal.h
index 8574338bf23b..9991ec2371e4 100644
--- a/arch/um/include/shared/timer-internal.h
+++ b/arch/um/include/shared/timer-internal.h
@@ -34,10 +34,13 @@ static inline void time_travel_set_time(unsigned long long ns)
34 time_travel_time = ns; 34 time_travel_time = ns;
35} 35}
36 36
37static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 37static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
38 unsigned long long expiry)
39{ 38{
40 time_travel_timer_mode = mode; 39 time_travel_timer_mode = mode;
40}
41
42static inline void time_travel_set_timer_expiry(unsigned long long expiry)
43{
41 time_travel_timer_expiry = expiry; 44 time_travel_timer_expiry = expiry;
42} 45}
43#else 46#else
@@ -50,8 +53,11 @@ static inline void time_travel_set_time(unsigned long long ns)
50{ 53{
51} 54}
52 55
53static inline void time_travel_set_timer(enum time_travel_timer_mode mode, 56static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
54 unsigned long long expiry) 57{
58}
59
60static inline void time_travel_set_timer_expiry(unsigned long long expiry)
55{ 61{
56} 62}
57 63
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 67c0d1a860e9..6bede7888fc2 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -213,7 +213,7 @@ static void time_travel_sleep(unsigned long long duration)
213 if (time_travel_timer_mode != TT_TMR_DISABLED || 213 if (time_travel_timer_mode != TT_TMR_DISABLED ||
214 time_travel_timer_expiry < next) { 214 time_travel_timer_expiry < next) {
215 if (time_travel_timer_mode == TT_TMR_ONESHOT) 215 if (time_travel_timer_mode == TT_TMR_ONESHOT)
216 time_travel_set_timer(TT_TMR_DISABLED, 0); 216 time_travel_set_timer_mode(TT_TMR_DISABLED);
217 /* 217 /*
218 * time_travel_time will be adjusted in the timer 218 * time_travel_time will be adjusted in the timer
219 * IRQ handler so it works even when the signal 219 * IRQ handler so it works even when the signal
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 6a051b078359..234757233355 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -50,7 +50,7 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
50static int itimer_shutdown(struct clock_event_device *evt) 50static int itimer_shutdown(struct clock_event_device *evt)
51{ 51{
52 if (time_travel_mode != TT_MODE_OFF) 52 if (time_travel_mode != TT_MODE_OFF)
53 time_travel_set_timer(TT_TMR_DISABLED, 0); 53 time_travel_set_timer_mode(TT_TMR_DISABLED);
54 54
55 if (time_travel_mode != TT_MODE_INFCPU) 55 if (time_travel_mode != TT_MODE_INFCPU)
56 os_timer_disable(); 56 os_timer_disable();
@@ -62,9 +62,10 @@ static int itimer_set_periodic(struct clock_event_device *evt)
62{ 62{
63 unsigned long long interval = NSEC_PER_SEC / HZ; 63 unsigned long long interval = NSEC_PER_SEC / HZ;
64 64
65 if (time_travel_mode != TT_MODE_OFF) 65 if (time_travel_mode != TT_MODE_OFF) {
66 time_travel_set_timer(TT_TMR_PERIODIC, 66 time_travel_set_timer_mode(TT_TMR_PERIODIC);
67 time_travel_time + interval); 67 time_travel_set_timer_expiry(time_travel_time + interval);
68 }
68 69
69 if (time_travel_mode != TT_MODE_INFCPU) 70 if (time_travel_mode != TT_MODE_INFCPU)
70 os_timer_set_interval(interval); 71 os_timer_set_interval(interval);
@@ -77,9 +78,10 @@ static int itimer_next_event(unsigned long delta,
77{ 78{
78 delta += 1; 79 delta += 1;
79 80
80 if (time_travel_mode != TT_MODE_OFF) 81 if (time_travel_mode != TT_MODE_OFF) {
81 time_travel_set_timer(TT_TMR_ONESHOT, 82 time_travel_set_timer_mode(TT_TMR_ONESHOT);
82 time_travel_time + delta); 83 time_travel_set_timer_expiry(time_travel_time + delta);
84 }
83 85
84 if (time_travel_mode != TT_MODE_INFCPU) 86 if (time_travel_mode != TT_MODE_INFCPU)
85 return os_timer_one_shot(delta); 87 return os_timer_one_shot(delta);
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 62f317c9113a..5b35b7ea5d72 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -661,10 +661,17 @@ fail:
661 661
662 throttle = perf_event_overflow(event, &data, &regs); 662 throttle = perf_event_overflow(event, &data, &regs);
663out: 663out:
664 if (throttle) 664 if (throttle) {
665 perf_ibs_stop(event, 0); 665 perf_ibs_stop(event, 0);
666 else 666 } else {
667 perf_ibs_enable_event(perf_ibs, hwc, period >> 4); 667 period >>= 4;
668
669 if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
670 (*config & IBS_OP_CNT_CTL))
671 period |= *config & IBS_OP_CUR_CNT_RAND;
672
673 perf_ibs_enable_event(perf_ibs, hwc, period);
674 }
668 675
669 perf_event_update_userpage(event); 676 perf_event_update_userpage(event);
670 677
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 81b005e4c7d9..325959d19d9a 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1236,7 +1236,7 @@ void x86_pmu_enable_event(struct perf_event *event)
1236 * Add a single event to the PMU. 1236 * Add a single event to the PMU.
1237 * 1237 *
1238 * The event is added to the group of enabled events 1238 * The event is added to the group of enabled events
1239 * but only if it can be scehduled with existing events. 1239 * but only if it can be scheduled with existing events.
1240 */ 1240 */
1241static int x86_pmu_add(struct perf_event *event, int flags) 1241static int x86_pmu_add(struct perf_event *event, int flags)
1242{ 1242{
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 648260b5f367..e4c2cb65ea50 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
3572 return left; 3572 return left;
3573} 3573}
3574 3574
3575static u64 nhm_limit_period(struct perf_event *event, u64 left)
3576{
3577 return max(left, 32ULL);
3578}
3579
3575PMU_FORMAT_ATTR(event, "config:0-7" ); 3580PMU_FORMAT_ATTR(event, "config:0-7" );
3576PMU_FORMAT_ATTR(umask, "config:8-15" ); 3581PMU_FORMAT_ATTR(umask, "config:8-15" );
3577PMU_FORMAT_ATTR(edge, "config:18" ); 3582PMU_FORMAT_ATTR(edge, "config:18" );
@@ -4606,6 +4611,7 @@ __init int intel_pmu_init(void)
4606 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; 4611 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
4607 x86_pmu.enable_all = intel_pmu_nhm_enable_all; 4612 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
4608 x86_pmu.extra_regs = intel_nehalem_extra_regs; 4613 x86_pmu.extra_regs = intel_nehalem_extra_regs;
4614 x86_pmu.limit_period = nhm_limit_period;
4609 4615
4610 mem_attr = nhm_mem_events_attrs; 4616 mem_attr = nhm_mem_events_attrs;
4611 4617
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 287f1f7b2e52..c38a66661576 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -16,7 +16,6 @@
16#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 16#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19extern void mcount(void);
20extern atomic_t modifying_ftrace_code; 19extern atomic_t modifying_ftrace_code;
21extern void __fentry__(void); 20extern void __fentry__(void);
22 21
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 1392d5e6e8d6..ee26e9215f18 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -252,16 +252,20 @@ struct pebs_lbr {
252#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) 252#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
253#define IBSCTL_LVT_OFFSET_MASK 0x0F 253#define IBSCTL_LVT_OFFSET_MASK 0x0F
254 254
255/* ibs fetch bits/masks */ 255/* IBS fetch bits/masks */
256#define IBS_FETCH_RAND_EN (1ULL<<57) 256#define IBS_FETCH_RAND_EN (1ULL<<57)
257#define IBS_FETCH_VAL (1ULL<<49) 257#define IBS_FETCH_VAL (1ULL<<49)
258#define IBS_FETCH_ENABLE (1ULL<<48) 258#define IBS_FETCH_ENABLE (1ULL<<48)
259#define IBS_FETCH_CNT 0xFFFF0000ULL 259#define IBS_FETCH_CNT 0xFFFF0000ULL
260#define IBS_FETCH_MAX_CNT 0x0000FFFFULL 260#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
261 261
262/* ibs op bits/masks */ 262/*
263/* lower 4 bits of the current count are ignored: */ 263 * IBS op bits/masks
264#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) 264 * The lower 7 bits of the current count are random bits
265 * preloaded by hardware and ignored in software
266 */
267#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
268#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
265#define IBS_OP_CNT_CTL (1ULL<<19) 269#define IBS_OP_CNT_CTL (1ULL<<19)
266#define IBS_OP_VAL (1ULL<<18) 270#define IBS_OP_VAL (1ULL<<18)
267#define IBS_OP_ENABLE (1ULL<<17) 271#define IBS_OP_ENABLE (1ULL<<17)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c10a8b10b203..fff790a3f4ee 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1781,7 +1781,7 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1781int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, 1781int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1782 struct kvm_cpuid_entry2 __user *entries) 1782 struct kvm_cpuid_entry2 __user *entries)
1783{ 1783{
1784 uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu); 1784 uint16_t evmcs_ver = 0;
1785 struct kvm_cpuid_entry2 cpuid_entries[] = { 1785 struct kvm_cpuid_entry2 cpuid_entries[] = {
1786 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, 1786 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1787 { .function = HYPERV_CPUID_INTERFACE }, 1787 { .function = HYPERV_CPUID_INTERFACE },
@@ -1793,6 +1793,9 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1793 }; 1793 };
1794 int i, nent = ARRAY_SIZE(cpuid_entries); 1794 int i, nent = ARRAY_SIZE(cpuid_entries);
1795 1795
1796 if (kvm_x86_ops->nested_get_evmcs_version)
1797 evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
1798
1796 /* Skip NESTED_FEATURES if eVMCS is not supported */ 1799 /* Skip NESTED_FEATURES if eVMCS is not supported */
1797 if (!evmcs_ver) 1800 if (!evmcs_ver)
1798 --nent; 1801 --nent;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 685d17c11461..e904ff06a83d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -216,6 +216,9 @@ static void recalculate_apic_map(struct kvm *kvm)
216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) 216 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
217 new->phys_map[xapic_id] = apic; 217 new->phys_map[xapic_id] = apic;
218 218
219 if (!kvm_apic_sw_enabled(apic))
220 continue;
221
219 ldr = kvm_lapic_get_reg(apic, APIC_LDR); 222 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
220 223
221 if (apic_x2apic_mode(apic)) { 224 if (apic_x2apic_mode(apic)) {
@@ -258,6 +261,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
258 static_key_slow_dec_deferred(&apic_sw_disabled); 261 static_key_slow_dec_deferred(&apic_sw_disabled);
259 else 262 else
260 static_key_slow_inc(&apic_sw_disabled.key); 263 static_key_slow_inc(&apic_sw_disabled.key);
264
265 recalculate_apic_map(apic->vcpu->kvm);
261 } 266 }
262} 267}
263 268
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 24843cf49579..218b277bfda3 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5653 struct kvm_memory_slot *slot, 5653 struct kvm_memory_slot *slot,
5654 struct kvm_page_track_notifier_node *node) 5654 struct kvm_page_track_notifier_node *node)
5655{ 5655{
5656 struct kvm_mmu_page *sp; 5656 kvm_mmu_zap_all(kvm);
5657 LIST_HEAD(invalid_list);
5658 unsigned long i;
5659 bool flush;
5660 gfn_t gfn;
5661
5662 spin_lock(&kvm->mmu_lock);
5663
5664 if (list_empty(&kvm->arch.active_mmu_pages))
5665 goto out_unlock;
5666
5667 flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
5668
5669 for (i = 0; i < slot->npages; i++) {
5670 gfn = slot->base_gfn + i;
5671
5672 for_each_valid_sp(kvm, sp, gfn) {
5673 if (sp->gfn != gfn)
5674 continue;
5675
5676 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
5677 }
5678 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5679 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5680 flush = false;
5681 cond_resched_lock(&kvm->mmu_lock);
5682 }
5683 }
5684 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
5685
5686out_unlock:
5687 spin_unlock(&kvm->mmu_lock);
5688} 5657}
5689 5658
5690void kvm_mmu_init_vm(struct kvm *kvm) 5659void kvm_mmu_init_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d685491fce4d..e0368076a1ef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1714,7 +1714,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1714 if (!entry) 1714 if (!entry)
1715 return -EINVAL; 1715 return -EINVAL;
1716 1716
1717 new_entry = READ_ONCE(*entry);
1718 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & 1717 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1719 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) | 1718 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1720 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK); 1719 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
@@ -7129,12 +7128,6 @@ failed:
7129 return ret; 7128 return ret;
7130} 7129}
7131 7130
7132static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
7133{
7134 /* Not supported */
7135 return 0;
7136}
7137
7138static int nested_enable_evmcs(struct kvm_vcpu *vcpu, 7131static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
7139 uint16_t *vmcs_version) 7132 uint16_t *vmcs_version)
7140{ 7133{
@@ -7333,7 +7326,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7333 .mem_enc_unreg_region = svm_unregister_enc_region, 7326 .mem_enc_unreg_region = svm_unregister_enc_region,
7334 7327
7335 .nested_enable_evmcs = nested_enable_evmcs, 7328 .nested_enable_evmcs = nested_enable_evmcs,
7336 .nested_get_evmcs_version = nested_get_evmcs_version, 7329 .nested_get_evmcs_version = NULL,
7337 7330
7338 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault, 7331 .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7339}; 7332};
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 42ed3faa6af8..c030c96fc81a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7797,6 +7797,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
7797 .set_nested_state = NULL, 7797 .set_nested_state = NULL,
7798 .get_vmcs12_pages = NULL, 7798 .get_vmcs12_pages = NULL,
7799 .nested_enable_evmcs = NULL, 7799 .nested_enable_evmcs = NULL,
7800 .nested_get_evmcs_version = NULL,
7800 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault, 7801 .need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
7801}; 7802};
7802 7803
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 93b0bd45ac73..290c3c3efb87 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6594,12 +6594,13 @@ restart:
6594 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); 6594 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
6595 toggle_interruptibility(vcpu, ctxt->interruptibility); 6595 toggle_interruptibility(vcpu, ctxt->interruptibility);
6596 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 6596 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
6597 kvm_rip_write(vcpu, ctxt->eip);
6598 if (r == EMULATE_DONE && ctxt->tf)
6599 kvm_vcpu_do_singlestep(vcpu, &r);
6600 if (!ctxt->have_exception || 6597 if (!ctxt->have_exception ||
6601 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 6598 exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
6599 kvm_rip_write(vcpu, ctxt->eip);
6600 if (r == EMULATE_DONE && ctxt->tf)
6601 kvm_vcpu_do_singlestep(vcpu, &r);
6602 __kvm_set_rflags(vcpu, ctxt->eflags); 6602 __kvm_set_rflags(vcpu, ctxt->eflags);
6603 }
6603 6604
6604 /* 6605 /*
6605 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will 6606 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index eaaed5bfc4a4..991549a1c5f3 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -390,8 +390,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
390 390
391 emit_prologue(&prog, bpf_prog->aux->stack_depth, 391 emit_prologue(&prog, bpf_prog->aux->stack_depth,
392 bpf_prog_was_classic(bpf_prog)); 392 bpf_prog_was_classic(bpf_prog));
393 addrs[0] = prog - temp;
393 394
394 for (i = 0; i < insn_cnt; i++, insn++) { 395 for (i = 1; i <= insn_cnt; i++, insn++) {
395 const s32 imm32 = insn->imm; 396 const s32 imm32 = insn->imm;
396 u32 dst_reg = insn->dst_reg; 397 u32 dst_reg = insn->dst_reg;
397 u32 src_reg = insn->src_reg; 398 u32 src_reg = insn->src_reg;
@@ -1105,7 +1106,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1105 extra_pass = true; 1106 extra_pass = true;
1106 goto skip_init_addrs; 1107 goto skip_init_addrs;
1107 } 1108 }
1108 addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL); 1109 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1109 if (!addrs) { 1110 if (!addrs) {
1110 prog = orig_prog; 1111 prog = orig_prog;
1111 goto out_addrs; 1112 goto out_addrs;
@@ -1115,7 +1116,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1115 * Before first pass, make a rough estimation of addrs[] 1116 * Before first pass, make a rough estimation of addrs[]
1116 * each BPF instruction is translated to less than 64 bytes 1117 * each BPF instruction is translated to less than 64 bytes
1117 */ 1118 */
1118 for (proglen = 0, i = 0; i < prog->len; i++) { 1119 for (proglen = 0, i = 0; i <= prog->len; i++) {
1119 proglen += 64; 1120 proglen += 64;
1120 addrs[i] = proglen; 1121 addrs[i] = proglen;
1121 } 1122 }
@@ -1180,7 +1181,7 @@ out_image:
1180 1181
1181 if (!image || !prog->is_func || extra_pass) { 1182 if (!image || !prog->is_func || extra_pass) {
1182 if (image) 1183 if (image)
1183 bpf_prog_fill_jited_linfo(prog, addrs); 1184 bpf_prog_fill_jited_linfo(prog, addrs + 1);
1184out_addrs: 1185out_addrs:
1185 kfree(addrs); 1186 kfree(addrs);
1186 kfree(jit_data); 1187 kfree(jit_data);
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 5cb8a62e091c..7c3106093c75 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -511,6 +511,7 @@ void cpu_reset(void)
511 "add %2, %2, %7\n\t" 511 "add %2, %2, %7\n\t"
512 "addi %0, %0, -1\n\t" 512 "addi %0, %0, -1\n\t"
513 "bnez %0, 1b\n\t" 513 "bnez %0, 1b\n\t"
514 "isync\n\t"
514 /* Jump to identity mapping */ 515 /* Jump to identity mapping */
515 "jx %3\n" 516 "jx %3\n"
516 "2:\n\t" 517 "2:\n\t"
diff --git a/block/blk-mq.c b/block/blk-mq.c
index f78d3287dd82..0835f4d8d42e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1958,13 +1958,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1958 rq = blk_mq_get_request(q, bio, &data); 1958 rq = blk_mq_get_request(q, bio, &data);
1959 if (unlikely(!rq)) { 1959 if (unlikely(!rq)) {
1960 rq_qos_cleanup(q, bio); 1960 rq_qos_cleanup(q, bio);
1961 1961 if (bio->bi_opf & REQ_NOWAIT)
1962 cookie = BLK_QC_T_NONE;
1963 if (bio->bi_opf & REQ_NOWAIT_INLINE)
1964 cookie = BLK_QC_T_EAGAIN;
1965 else if (bio->bi_opf & REQ_NOWAIT)
1966 bio_wouldblock_error(bio); 1962 bio_wouldblock_error(bio);
1967 return cookie; 1963 return BLK_QC_T_NONE;
1968 } 1964 }
1969 1965
1970 trace_block_getrq(q, bio, bio->bi_opf); 1966 trace_block_getrq(q, bio, bio->bi_opf);
@@ -2666,8 +2662,6 @@ void blk_mq_release(struct request_queue *q)
2666 struct blk_mq_hw_ctx *hctx, *next; 2662 struct blk_mq_hw_ctx *hctx, *next;
2667 int i; 2663 int i;
2668 2664
2669 cancel_delayed_work_sync(&q->requeue_work);
2670
2671 queue_for_each_hw_ctx(q, hctx, i) 2665 queue_for_each_hw_ctx(q, hctx, i)
2672 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); 2666 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
2673 2667
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 977c659dcd18..9bfa3ea4ed63 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -892,6 +892,9 @@ static void __blk_release_queue(struct work_struct *work)
892 892
893 blk_free_queue_stats(q->stats); 893 blk_free_queue_stats(q->stats);
894 894
895 if (queue_is_mq(q))
896 cancel_delayed_work_sync(&q->requeue_work);
897
895 blk_exit_queue(q); 898 blk_exit_queue(q);
896 899
897 blk_queue_free_zone_bitmaps(q); 900 blk_queue_free_zone_bitmaps(q);
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig
index 2e2efa577437..8c37294f1d1e 100644
--- a/drivers/atm/Kconfig
+++ b/drivers/atm/Kconfig
@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI
200 make the card work). 200 make the card work).
201 201
202config ATM_NICSTAR_USE_IDT77105 202config ATM_NICSTAR_USE_IDT77105
203 bool "Use IDT77015 PHY driver (25Mbps)" 203 bool "Use IDT77105 PHY driver (25Mbps)"
204 depends on ATM_NICSTAR 204 depends on ATM_NICSTAR
205 help 205 help
206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In 206 Support for the PHYsical layer chip in ForeRunner LE25 cards. In
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index dd61fdd400f0..68489d1f00bb 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -448,6 +448,11 @@ config PANEL_BOOT_MESSAGE
448choice 448choice
449 prompt "Backlight initial state" 449 prompt "Backlight initial state"
450 default CHARLCD_BL_FLASH 450 default CHARLCD_BL_FLASH
451 ---help---
452 Select the initial backlight state on boot or module load.
453
454 Previously, there was no option for this: the backlight flashed
455 briefly on init. Now you can also turn it off/on.
451 456
452 config CHARLCD_BL_OFF 457 config CHARLCD_BL_OFF
453 bool "Off" 458 bool "Off"
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 92745efefb54..bef6b85778b6 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -20,7 +20,7 @@
20 20
21#include <generated/utsrelease.h> 21#include <generated/utsrelease.h>
22 22
23#include <misc/charlcd.h> 23#include "charlcd.h"
24 24
25#define LCD_MINOR 156 25#define LCD_MINOR 156
26 26
diff --git a/include/misc/charlcd.h b/drivers/auxdisplay/charlcd.h
index 8cf6c18b0adb..00911ad0f3de 100644
--- a/include/misc/charlcd.h
+++ b/drivers/auxdisplay/charlcd.h
@@ -6,6 +6,9 @@
6 * Copyright (C) 2016-2017 Glider bvba 6 * Copyright (C) 2016-2017 Glider bvba
7 */ 7 */
8 8
9#ifndef _CHARLCD_H
10#define _CHARLCD_H
11
9struct charlcd { 12struct charlcd {
10 const struct charlcd_ops *ops; 13 const struct charlcd_ops *ops;
11 const unsigned char *char_conv; /* Optional */ 14 const unsigned char *char_conv; /* Optional */
@@ -37,3 +40,5 @@ int charlcd_register(struct charlcd *lcd);
37int charlcd_unregister(struct charlcd *lcd); 40int charlcd_unregister(struct charlcd *lcd);
38 41
39void charlcd_poke(struct charlcd *lcd); 42void charlcd_poke(struct charlcd *lcd);
43
44#endif /* CHARLCD_H */
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index ab15b64707ad..bcbe13092327 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -14,8 +14,7 @@
14#include <linux/property.h> 14#include <linux/property.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include <misc/charlcd.h> 17#include "charlcd.h"
18
19 18
20enum hd44780_pin { 19enum hd44780_pin {
21 /* Order does matter due to writing to GPIO array subsets! */ 20 /* Order does matter due to writing to GPIO array subsets! */
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index 9c0bb771751d..a2fcde582e2a 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -74,7 +74,7 @@ struct ht16k33_priv {
74 struct ht16k33_fbdev fbdev; 74 struct ht16k33_fbdev fbdev;
75}; 75};
76 76
77static struct fb_fix_screeninfo ht16k33_fb_fix = { 77static const struct fb_fix_screeninfo ht16k33_fb_fix = {
78 .id = DRIVER_NAME, 78 .id = DRIVER_NAME,
79 .type = FB_TYPE_PACKED_PIXELS, 79 .type = FB_TYPE_PACKED_PIXELS,
80 .visual = FB_VISUAL_MONO10, 80 .visual = FB_VISUAL_MONO10,
@@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = {
85 .accel = FB_ACCEL_NONE, 85 .accel = FB_ACCEL_NONE,
86}; 86};
87 87
88static struct fb_var_screeninfo ht16k33_fb_var = { 88static const struct fb_var_screeninfo ht16k33_fb_var = {
89 .xres = HT16K33_MATRIX_LED_MAX_ROWS, 89 .xres = HT16K33_MATRIX_LED_MAX_ROWS,
90 .yres = HT16K33_MATRIX_LED_MAX_COLS, 90 .yres = HT16K33_MATRIX_LED_MAX_COLS,
91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS, 91 .xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index e06de63497cf..85965953683e 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -55,7 +55,7 @@
55#include <linux/io.h> 55#include <linux/io.h>
56#include <linux/uaccess.h> 56#include <linux/uaccess.h>
57 57
58#include <misc/charlcd.h> 58#include "charlcd.h"
59 59
60#define KEYPAD_MINOR 185 60#define KEYPAD_MINOR 185
61 61
@@ -1617,6 +1617,8 @@ static void panel_attach(struct parport *port)
1617 return; 1617 return;
1618 1618
1619err_lcd_unreg: 1619err_lcd_unreg:
1620 if (scan_timer.function)
1621 del_timer_sync(&scan_timer);
1620 if (lcd.enabled) 1622 if (lcd.enabled)
1621 charlcd_unregister(lcd.charlcd); 1623 charlcd_unregister(lcd.charlcd);
1622err_unreg_device: 1624err_unreg_device:
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index a4984136c19d..0fd6f97ee523 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -44,7 +44,7 @@ config REGMAP_IRQ
44 44
45config REGMAP_SOUNDWIRE 45config REGMAP_SOUNDWIRE
46 tristate 46 tristate
47 depends on SOUNDWIRE_BUS 47 depends on SOUNDWIRE
48 48
49config REGMAP_SCCB 49config REGMAP_SCCB
50 tristate 50 tristate
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9bd4ddd12b25..5b248763a672 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg)
322 thi->name[0], 322 thi->name[0],
323 resource->name); 323 resource->name);
324 324
325 allow_kernel_signal(DRBD_SIGKILL);
326 allow_kernel_signal(SIGXCPU);
325restart: 327restart:
326 retval = thi->function(thi); 328 retval = thi->function(thi);
327 329
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3327192bb71f..c8fb886aebd4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3038,6 +3038,17 @@ again:
3038 } 3038 }
3039 return true; 3039 return true;
3040 case RBD_OBJ_READ_PARENT: 3040 case RBD_OBJ_READ_PARENT:
3041 /*
3042 * The parent image is read only up to the overlap -- zero-fill
3043 * from the overlap to the end of the request.
3044 */
3045 if (!*result) {
3046 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
3047
3048 if (obj_overlap < obj_req->ex.oe_len)
3049 rbd_obj_zero_range(obj_req, obj_overlap,
3050 obj_req->ex.oe_len - obj_overlap);
3051 }
3041 return true; 3052 return true;
3042 default: 3053 default:
3043 BUG(); 3054 BUG();
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 3ac6a5d18071..b90dbcd99c03 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -965,6 +965,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
965 } 965 }
966 } 966 }
967 967
968 err = -ENOMEM;
968 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { 969 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
969 req = kzalloc(sizeof(*req), GFP_KERNEL); 970 req = kzalloc(sizeof(*req), GFP_KERNEL);
970 if (!req) 971 if (!req)
@@ -987,7 +988,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
987 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn); 988 err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
988 if (err) { 989 if (err) {
989 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn); 990 xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
990 return err; 991 goto fail;
991 } 992 }
992 993
993 return 0; 994 return 0;
@@ -1007,8 +1008,7 @@ fail:
1007 } 1008 }
1008 kfree(req); 1009 kfree(req);
1009 } 1010 }
1010 return -ENOMEM; 1011 return err;
1011
1012} 1012}
1013 1013
1014static int connect_ring(struct backend_info *be) 1014static int connect_ring(struct backend_info *be)
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 8b33128dccee..0875470a7806 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev)
99 return 0; 99 return 0;
100} 100}
101 101
102int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
103{
104 struct sk_buff *skb;
105 int err;
106
107 bt_dev_dbg(hdev, "QCA pre shutdown cmd");
108
109 skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
110 NULL, HCI_INIT_TIMEOUT);
111 if (IS_ERR(skb)) {
112 err = PTR_ERR(skb);
113 bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
114 return err;
115 }
116
117 kfree_skb(skb);
118
119 return 0;
120}
121EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
122
102static void qca_tlv_check_data(struct rome_config *config, 123static void qca_tlv_check_data(struct rome_config *config,
103 const struct firmware *fw) 124 const struct firmware *fw)
104{ 125{
@@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config,
119 BT_DBG("Length\t\t : %d bytes", length); 140 BT_DBG("Length\t\t : %d bytes", length);
120 141
121 config->dnld_mode = ROME_SKIP_EVT_NONE; 142 config->dnld_mode = ROME_SKIP_EVT_NONE;
143 config->dnld_type = ROME_SKIP_EVT_NONE;
122 144
123 switch (config->type) { 145 switch (config->type) {
124 case TLV_TYPE_PATCH: 146 case TLV_TYPE_PATCH:
@@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
268 290
269 evt = skb_put(skb, sizeof(*evt)); 291 evt = skb_put(skb, sizeof(*evt));
270 evt->ncmd = 1; 292 evt->ncmd = 1;
271 evt->opcode = QCA_HCI_CC_OPCODE; 293 evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
272 294
273 skb_put_u8(skb, QCA_HCI_CC_SUCCESS); 295 skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
274 296
@@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
323 */ 345 */
324 if (config->dnld_type == ROME_SKIP_EVT_VSE_CC || 346 if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
325 config->dnld_type == ROME_SKIP_EVT_VSE) 347 config->dnld_type == ROME_SKIP_EVT_VSE)
326 return qca_inject_cmd_complete_event(hdev); 348 ret = qca_inject_cmd_complete_event(hdev);
327 349
328out: 350out:
329 release_firmware(fw); 351 release_firmware(fw);
@@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
388 return err; 410 return err;
389 } 411 }
390 412
413 /* Give the controller some time to get ready to receive the NVM */
414 msleep(10);
415
391 /* Download NVM configuration */ 416 /* Download NVM configuration */
392 config.type = TLV_TYPE_NVM; 417 config.type = TLV_TYPE_NVM;
393 if (firmware_name) 418 if (firmware_name)
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index 6a291a7a5d96..69c5315a65fd 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -13,6 +13,7 @@
13#define EDL_PATCH_TLV_REQ_CMD (0x1E) 13#define EDL_PATCH_TLV_REQ_CMD (0x1E)
14#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) 14#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
15#define MAX_SIZE_PER_TLV_SEGMENT (243) 15#define MAX_SIZE_PER_TLV_SEGMENT (243)
16#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
16 17
17#define EDL_CMD_REQ_RES_EVT (0x00) 18#define EDL_CMD_REQ_RES_EVT (0x00)
18#define EDL_PATCH_VER_RES_EVT (0x19) 19#define EDL_PATCH_VER_RES_EVT (0x19)
@@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
135 const char *firmware_name); 136 const char *firmware_name);
136int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); 137int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
137int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); 138int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
139int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
138static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type) 140static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
139{ 141{
140 return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998; 142 return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
@@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
167{ 169{
168 return false; 170 return false;
169} 171}
172
173static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
174{
175 return -EOPNOTSUPP;
176}
170#endif 177#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3876fee6ad13..5cf0734eb31b 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2762,8 +2762,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
2762 fw_size = fw->size; 2762 fw_size = fw->size;
2763 2763
2764 /* The size of patch header is 30 bytes, should be skip */ 2764 /* The size of patch header is 30 bytes, should be skip */
2765 if (fw_size < 30) 2765 if (fw_size < 30) {
2766 err = -EINVAL;
2766 goto err_release_fw; 2767 goto err_release_fw;
2768 }
2767 2769
2768 fw_size -= 30; 2770 fw_size -= 30;
2769 fw_ptr += 30; 2771 fw_ptr += 30;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 82a0a3691a63..9a970fd1975a 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -705,7 +705,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
705 unsigned long flags; 705 unsigned long flags;
706 struct qca_data *qca = hu->priv; 706 struct qca_data *qca = hu->priv;
707 707
708 BT_DBG("hu %p want to sleep", hu); 708 BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
709 709
710 spin_lock_irqsave(&qca->hci_ibs_lock, flags); 710 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
711 711
@@ -720,7 +720,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
720 break; 720 break;
721 721
722 case HCI_IBS_RX_ASLEEP: 722 case HCI_IBS_RX_ASLEEP:
723 /* Fall through */ 723 break;
724 724
725 default: 725 default:
726 /* Any other state is illegal */ 726 /* Any other state is illegal */
@@ -912,7 +912,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
912 if (hdr->evt == HCI_EV_VENDOR) 912 if (hdr->evt == HCI_EV_VENDOR)
913 complete(&qca->drop_ev_comp); 913 complete(&qca->drop_ev_comp);
914 914
915 kfree(skb); 915 kfree_skb(skb);
916 916
917 return 0; 917 return 0;
918 } 918 }
@@ -1386,6 +1386,9 @@ static int qca_power_off(struct hci_dev *hdev)
1386{ 1386{
1387 struct hci_uart *hu = hci_get_drvdata(hdev); 1387 struct hci_uart *hu = hci_get_drvdata(hdev);
1388 1388
1389 /* Perform pre shutdown command */
1390 qca_send_pre_shutdown_cmd(hdev);
1391
1389 qca_power_shutdown(hu); 1392 qca_power_shutdown(hu);
1390 return 0; 1393 return 0;
1391} 1394}
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 19d7b6ff2f17..20c957185af2 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
456 size_t pdata_size; 456 size_t pdata_size;
457}; 457};
458 458
459static void hisi_lpc_acpi_remove(struct device *hostdev)
460{
461 struct acpi_device *adev = ACPI_COMPANION(hostdev);
462 struct acpi_device *child;
463
464 device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
465
466 list_for_each_entry(child, &adev->children, node)
467 acpi_device_clear_enumerated(child);
468}
469
459/* 470/*
460 * hisi_lpc_acpi_probe - probe children for ACPI FW 471 * hisi_lpc_acpi_probe - probe children for ACPI FW
461 * @hostdev: LPC host device pointer 472 * @hostdev: LPC host device pointer
@@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
555 return 0; 566 return 0;
556 567
557fail: 568fail:
558 device_for_each_child(hostdev, NULL, 569 hisi_lpc_acpi_remove(hostdev);
559 hisi_lpc_acpi_remove_subdev);
560 return ret; 570 return ret;
561} 571}
562 572
@@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
569{ 579{
570 return -ENODEV; 580 return -ENODEV;
571} 581}
582
583static void hisi_lpc_acpi_remove(struct device *hostdev)
584{
585}
572#endif // CONFIG_ACPI 586#endif // CONFIG_ACPI
573 587
574/* 588/*
@@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
606 range->fwnode = dev->fwnode; 620 range->fwnode = dev->fwnode;
607 range->flags = LOGIC_PIO_INDIRECT; 621 range->flags = LOGIC_PIO_INDIRECT;
608 range->size = PIO_INDIRECT_SIZE; 622 range->size = PIO_INDIRECT_SIZE;
623 range->hostdata = lpcdev;
624 range->ops = &hisi_lpc_ops;
625 lpcdev->io_host = range;
609 626
610 ret = logic_pio_register_range(range); 627 ret = logic_pio_register_range(range);
611 if (ret) { 628 if (ret) {
612 dev_err(dev, "register IO range failed (%d)!\n", ret); 629 dev_err(dev, "register IO range failed (%d)!\n", ret);
613 return ret; 630 return ret;
614 } 631 }
615 lpcdev->io_host = range;
616 632
617 /* register the LPC host PIO resources */ 633 /* register the LPC host PIO resources */
618 if (acpi_device) 634 if (acpi_device)
619 ret = hisi_lpc_acpi_probe(dev); 635 ret = hisi_lpc_acpi_probe(dev);
620 else 636 else
621 ret = of_platform_populate(dev->of_node, NULL, NULL, dev); 637 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
622 if (ret) 638 if (ret) {
639 logic_pio_unregister_range(range);
623 return ret; 640 return ret;
641 }
624 642
625 lpcdev->io_host->hostdata = lpcdev; 643 dev_set_drvdata(dev, lpcdev);
626 lpcdev->io_host->ops = &hisi_lpc_ops;
627 644
628 io_end = lpcdev->io_host->io_start + lpcdev->io_host->size; 645 io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
629 dev_info(dev, "registered range [%pa - %pa]\n", 646 dev_info(dev, "registered range [%pa - %pa]\n",
@@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
632 return ret; 649 return ret;
633} 650}
634 651
652static int hisi_lpc_remove(struct platform_device *pdev)
653{
654 struct device *dev = &pdev->dev;
655 struct acpi_device *acpi_device = ACPI_COMPANION(dev);
656 struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
657 struct logic_pio_hwaddr *range = lpcdev->io_host;
658
659 if (acpi_device)
660 hisi_lpc_acpi_remove(dev);
661 else
662 of_platform_depopulate(dev);
663
664 logic_pio_unregister_range(range);
665
666 return 0;
667}
668
635static const struct of_device_id hisi_lpc_of_match[] = { 669static const struct of_device_id hisi_lpc_of_match[] = {
636 { .compatible = "hisilicon,hip06-lpc", }, 670 { .compatible = "hisilicon,hip06-lpc", },
637 { .compatible = "hisilicon,hip07-lpc", }, 671 { .compatible = "hisilicon,hip07-lpc", },
@@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = {
645 .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match), 679 .acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
646 }, 680 },
647 .probe = hisi_lpc_probe, 681 .probe = hisi_lpc_probe,
682 .remove = hisi_lpc_remove,
648}; 683};
649builtin_platform_driver(hisi_lpc_driver); 684builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e6deabd8305d..2db474ab4c6b 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -949,7 +949,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
949 *best_mode = SYSC_IDLE_SMART_WKUP; 949 *best_mode = SYSC_IDLE_SMART_WKUP;
950 else if (idlemodes & BIT(SYSC_IDLE_SMART)) 950 else if (idlemodes & BIT(SYSC_IDLE_SMART))
951 *best_mode = SYSC_IDLE_SMART; 951 *best_mode = SYSC_IDLE_SMART;
952 else if (idlemodes & SYSC_IDLE_FORCE) 952 else if (idlemodes & BIT(SYSC_IDLE_FORCE))
953 *best_mode = SYSC_IDLE_FORCE; 953 *best_mode = SYSC_IDLE_FORCE;
954 else 954 else
955 return -EINVAL; 955 return -EINVAL;
@@ -1267,7 +1267,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1267 SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), 1267 SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
1268 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, 1268 SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
1269 0xffff00f0, 0), 1269 0xffff00f0, 0),
1270 SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0), 1270 SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
1271 SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
1271 SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), 1272 SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
1272 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), 1273 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
1273 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), 1274 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
@@ -1692,10 +1693,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
1692 if (error) 1693 if (error)
1693 return 0; 1694 return 0;
1694 1695
1695 if (val) 1696 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
1696 ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
1697 else
1698 ddata->cfg.sysc_val = ddata->cap->sysc_mask;
1699 1697
1700 return 0; 1698 return 0;
1701} 1699}
@@ -2385,27 +2383,27 @@ static int sysc_probe(struct platform_device *pdev)
2385 2383
2386 error = sysc_init_dts_quirks(ddata); 2384 error = sysc_init_dts_quirks(ddata);
2387 if (error) 2385 if (error)
2388 goto unprepare; 2386 return error;
2389 2387
2390 error = sysc_map_and_check_registers(ddata); 2388 error = sysc_map_and_check_registers(ddata);
2391 if (error) 2389 if (error)
2392 goto unprepare; 2390 return error;
2393 2391
2394 error = sysc_init_sysc_mask(ddata); 2392 error = sysc_init_sysc_mask(ddata);
2395 if (error) 2393 if (error)
2396 goto unprepare; 2394 return error;
2397 2395
2398 error = sysc_init_idlemodes(ddata); 2396 error = sysc_init_idlemodes(ddata);
2399 if (error) 2397 if (error)
2400 goto unprepare; 2398 return error;
2401 2399
2402 error = sysc_init_syss_mask(ddata); 2400 error = sysc_init_syss_mask(ddata);
2403 if (error) 2401 if (error)
2404 goto unprepare; 2402 return error;
2405 2403
2406 error = sysc_init_pdata(ddata); 2404 error = sysc_init_pdata(ddata);
2407 if (error) 2405 if (error)
2408 goto unprepare; 2406 return error;
2409 2407
2410 sysc_init_early_quirks(ddata); 2408 sysc_init_early_quirks(ddata);
2411 2409
@@ -2415,7 +2413,7 @@ static int sysc_probe(struct platform_device *pdev)
2415 2413
2416 error = sysc_init_resets(ddata); 2414 error = sysc_init_resets(ddata);
2417 if (error) 2415 if (error)
2418 return error; 2416 goto unprepare;
2419 2417
2420 error = sysc_init_module(ddata); 2418 error = sysc_init_module(ddata);
2421 if (error) 2419 if (error)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c0990703ce54..1c46babeb093 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name)
324 return NULL; 324 return NULL;
325} 325}
326 326
327#ifdef CONFIG_OF
328static int of_parse_clkspec(const struct device_node *np, int index,
329 const char *name, struct of_phandle_args *out_args);
330static struct clk_hw *
331of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
332#else
333static inline int of_parse_clkspec(const struct device_node *np, int index,
334 const char *name,
335 struct of_phandle_args *out_args)
336{
337 return -ENOENT;
338}
339static inline struct clk_hw *
340of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
341{
342 return ERR_PTR(-ENOENT);
343}
344#endif
345
327/** 346/**
328 * clk_core_get - Find the clk_core parent of a clk 347 * clk_core_get - Find the clk_core parent of a clk
329 * @core: clk to find parent of 348 * @core: clk to find parent of
@@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name)
355 * }; 374 * };
356 * 375 *
357 * Returns: -ENOENT when the provider can't be found or the clk doesn't 376 * Returns: -ENOENT when the provider can't be found or the clk doesn't
358 * exist in the provider. -EINVAL when the name can't be found. NULL when the 377 * exist in the provider or the name can't be found in the DT node or
359 * provider knows about the clk but it isn't provided on this system. 378 * in a clkdev lookup. NULL when the provider knows about the clk but it
379 * isn't provided on this system.
360 * A valid clk_core pointer when the clk can be found in the provider. 380 * A valid clk_core pointer when the clk can be found in the provider.
361 */ 381 */
362static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index) 382static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
@@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
367 struct device *dev = core->dev; 387 struct device *dev = core->dev;
368 const char *dev_id = dev ? dev_name(dev) : NULL; 388 const char *dev_id = dev ? dev_name(dev) : NULL;
369 struct device_node *np = core->of_node; 389 struct device_node *np = core->of_node;
390 struct of_phandle_args clkspec;
370 391
371 if (np && (name || index >= 0)) 392 if (np && (name || index >= 0) &&
372 hw = of_clk_get_hw(np, index, name); 393 !of_parse_clkspec(np, index, name, &clkspec)) {
373 394 hw = of_clk_get_hw_from_clkspec(&clkspec);
374 /* 395 of_node_put(clkspec.np);
375 * If the DT search above couldn't find the provider or the provider 396 } else if (name) {
376 * didn't know about this clk, fallback to looking up via clkdev based 397 /*
377 * clk_lookups 398 * If the DT search above couldn't find the provider fallback to
378 */ 399 * looking up via clkdev based clk_lookups.
379 if (PTR_ERR(hw) == -ENOENT && name) 400 */
380 hw = clk_find_hw(dev_id, name); 401 hw = clk_find_hw(dev_id, name);
402 }
381 403
382 if (IS_ERR(hw)) 404 if (IS_ERR(hw))
383 return ERR_CAST(hw); 405 return ERR_CAST(hw);
@@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
401 parent = ERR_PTR(-EPROBE_DEFER); 423 parent = ERR_PTR(-EPROBE_DEFER);
402 } else { 424 } else {
403 parent = clk_core_get(core, index); 425 parent = clk_core_get(core, index);
404 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT) 426 if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
405 parent = clk_core_lookup(entry->name); 427 parent = clk_core_lookup(entry->name);
406 } 428 }
407 429
@@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core,
1632 break; 1654 break;
1633 1655
1634 /* Fallback to comparing globally unique names */ 1656 /* Fallback to comparing globally unique names */
1635 if (!strcmp(parent->name, core->parents[i].name)) 1657 if (core->parents[i].name &&
1658 !strcmp(parent->name, core->parents[i].name))
1636 break; 1659 break;
1637 } 1660 }
1638 1661
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.c b/drivers/clk/samsung/clk-exynos5-subcmu.c
index 91db7894125d..65c82d922b05 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.c
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.c
@@ -14,7 +14,7 @@
14#include "clk-exynos5-subcmu.h" 14#include "clk-exynos5-subcmu.h"
15 15
16static struct samsung_clk_provider *ctx; 16static struct samsung_clk_provider *ctx;
17static const struct exynos5_subcmu_info *cmu; 17static const struct exynos5_subcmu_info **cmu;
18static int nr_cmus; 18static int nr_cmus;
19 19
20static void exynos5_subcmu_clk_save(void __iomem *base, 20static void exynos5_subcmu_clk_save(void __iomem *base,
@@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
56 * when OF-core populates all device-tree nodes. 56 * when OF-core populates all device-tree nodes.
57 */ 57 */
58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus, 58void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
59 const struct exynos5_subcmu_info *_cmu) 59 const struct exynos5_subcmu_info **_cmu)
60{ 60{
61 ctx = _ctx; 61 ctx = _ctx;
62 cmu = _cmu; 62 cmu = _cmu;
63 nr_cmus = _nr_cmus; 63 nr_cmus = _nr_cmus;
64 64
65 for (; _nr_cmus--; _cmu++) { 65 for (; _nr_cmus--; _cmu++) {
66 exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks, 66 exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
67 _cmu->nr_gate_clks); 67 (*_cmu)->nr_gate_clks);
68 exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs, 68 exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
69 _cmu->nr_suspend_regs); 69 (*_cmu)->nr_suspend_regs);
70 } 70 }
71} 71}
72 72
@@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev)
163 if (of_property_read_string(np, "label", &name) < 0) 163 if (of_property_read_string(np, "label", &name) < 0)
164 continue; 164 continue;
165 for (i = 0; i < nr_cmus; i++) 165 for (i = 0; i < nr_cmus; i++)
166 if (strcmp(cmu[i].pd_name, name) == 0) 166 if (strcmp(cmu[i]->pd_name, name) == 0)
167 exynos5_clk_register_subcmu(&pdev->dev, 167 exynos5_clk_register_subcmu(&pdev->dev,
168 &cmu[i], np); 168 cmu[i], np);
169 } 169 }
170 return 0; 170 return 0;
171} 171}
diff --git a/drivers/clk/samsung/clk-exynos5-subcmu.h b/drivers/clk/samsung/clk-exynos5-subcmu.h
index 755ee8aaa3de..9ae5356f25aa 100644
--- a/drivers/clk/samsung/clk-exynos5-subcmu.h
+++ b/drivers/clk/samsung/clk-exynos5-subcmu.h
@@ -21,6 +21,6 @@ struct exynos5_subcmu_info {
21}; 21};
22 22
23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus, 23void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
24 const struct exynos5_subcmu_info *cmu); 24 const struct exynos5_subcmu_info **cmu);
25 25
26#endif 26#endif
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index f2b896881768..931c70a4da19 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
681 .pd_name = "DISP1", 681 .pd_name = "DISP1",
682}; 682};
683 683
684static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
685 &exynos5250_disp_subcmu,
686};
687
684static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = { 688static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
685 /* sorted in descending order */ 689 /* sorted in descending order */
686 /* PLL_36XX_RATE(rate, m, p, s, k) */ 690 /* PLL_36XX_RATE(rate, m, p, s, k) */
@@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
843 847
844 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs, 848 samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
845 ARRAY_SIZE(exynos5250_clk_regs)); 849 ARRAY_SIZE(exynos5250_clk_regs));
846 exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu); 850 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
851 exynos5250_subcmus);
847 852
848 samsung_clk_of_add_provider(np, ctx); 853 samsung_clk_of_add_provider(np, ctx);
849 854
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 01bca5a498b2..7670cc596c74 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
534 GATE_BUS_TOP, 24, 0, 0), 534 GATE_BUS_TOP, 24, 0, 0),
535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", 535 GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0), 536 GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
537 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
538 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
539}; 537};
540 538
541static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = { 539static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
577 575
578static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = { 576static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
579 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), 577 GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
578 /* Maudio Block */
580 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", 579 GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
581 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0), 580 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
581 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
582 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
583 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
584 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
582}; 585};
583 586
584static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = { 587static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
@@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
890 /* GSCL Block */ 893 /* GSCL Block */
891 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), 894 DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
892 895
893 /* MSCL Block */
894 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
895
896 /* PSGEN */ 896 /* PSGEN */
897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), 897 DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), 898 DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
@@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", 1017 GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), 1018 GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
1019 1019
1020 /* Maudio Block */
1021 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1022 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1023 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1024 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1025
1026 /* FSYS Block */ 1020 /* FSYS Block */
1027 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), 1021 GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
1028 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), 1022 GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
@@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
1162 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", 1156 GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
1163 GATE_IP_GSCL1, 17, 0, 0), 1157 GATE_IP_GSCL1, 17, 0, 0),
1164 1158
1165 /* MSCL Block */
1166 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1167 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1168 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1169 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1170 GATE_IP_MSCL, 8, 0, 0),
1171 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1172 GATE_IP_MSCL, 9, 0, 0),
1173 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1174 GATE_IP_MSCL, 10, 0, 0),
1175
1176 /* ISP */ 1159 /* ISP */
1177 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", 1160 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
1178 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), 1161 GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
@@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
1281 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */ 1264 { DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
1282}; 1265};
1283 1266
1284static const struct exynos5_subcmu_info exynos5x_subcmus[] = { 1267static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
1285 { 1268 /* MSCL Block */
1286 .div_clks = exynos5x_disp_div_clks, 1269 GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
1287 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks), 1270 GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
1288 .gate_clks = exynos5x_disp_gate_clks, 1271 GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
1289 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks), 1272 GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
1290 .suspend_regs = exynos5x_disp_suspend_regs, 1273 GATE_IP_MSCL, 8, 0, 0),
1291 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs), 1274 GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
1292 .pd_name = "DISP", 1275 GATE_IP_MSCL, 9, 0, 0),
1293 }, { 1276 GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
1294 .div_clks = exynos5x_gsc_div_clks, 1277 GATE_IP_MSCL, 10, 0, 0),
1295 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks), 1278};
1296 .gate_clks = exynos5x_gsc_gate_clks, 1279
1297 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks), 1280static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
1298 .suspend_regs = exynos5x_gsc_suspend_regs, 1281 DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
1299 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs), 1282};
1300 .pd_name = "GSC", 1283
1301 }, { 1284static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
1302 .div_clks = exynos5x_mfc_div_clks, 1285 { GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
1303 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks), 1286 { SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
1304 .gate_clks = exynos5x_mfc_gate_clks, 1287 { DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
1305 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks), 1288};
1306 .suspend_regs = exynos5x_mfc_suspend_regs, 1289
1307 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs), 1290static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
1308 .pd_name = "MFC", 1291 GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
1309 }, 1292 SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
1293 GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
1294 GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
1295 GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
1296 GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
1297};
1298
1299static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
1300 { SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
1301};
1302
1303static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
1304 .div_clks = exynos5x_disp_div_clks,
1305 .nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
1306 .gate_clks = exynos5x_disp_gate_clks,
1307 .nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
1308 .suspend_regs = exynos5x_disp_suspend_regs,
1309 .nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
1310 .pd_name = "DISP",
1311};
1312
1313static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
1314 .div_clks = exynos5x_gsc_div_clks,
1315 .nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
1316 .gate_clks = exynos5x_gsc_gate_clks,
1317 .nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
1318 .suspend_regs = exynos5x_gsc_suspend_regs,
1319 .nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
1320 .pd_name = "GSC",
1321};
1322
1323static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
1324 .div_clks = exynos5x_mfc_div_clks,
1325 .nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
1326 .gate_clks = exynos5x_mfc_gate_clks,
1327 .nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
1328 .suspend_regs = exynos5x_mfc_suspend_regs,
1329 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
1330 .pd_name = "MFC",
1331};
1332
1333static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
1334 .div_clks = exynos5x_mscl_div_clks,
1335 .nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
1336 .gate_clks = exynos5x_mscl_gate_clks,
1337 .nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
1338 .suspend_regs = exynos5x_mscl_suspend_regs,
1339 .nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
1340 .pd_name = "MSC",
1341};
1342
1343static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
1344 .gate_clks = exynos5800_mau_gate_clks,
1345 .nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
1346 .suspend_regs = exynos5800_mau_suspend_regs,
1347 .nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
1348 .pd_name = "MAU",
1349};
1350
1351static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
1352 &exynos5x_disp_subcmu,
1353 &exynos5x_gsc_subcmu,
1354 &exynos5x_mfc_subcmu,
1355 &exynos5x_mscl_subcmu,
1356};
1357
1358static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
1359 &exynos5x_disp_subcmu,
1360 &exynos5x_gsc_subcmu,
1361 &exynos5x_mfc_subcmu,
1362 &exynos5x_mscl_subcmu,
1363 &exynos5800_mau_subcmu,
1310}; 1364};
1311 1365
1312static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = { 1366static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
@@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np,
1539 samsung_clk_extended_sleep_init(reg_base, 1593 samsung_clk_extended_sleep_init(reg_base,
1540 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs), 1594 exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
1541 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc)); 1595 exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
1542 if (soc == EXYNOS5800) 1596
1597 if (soc == EXYNOS5800) {
1543 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs, 1598 samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
1544 ARRAY_SIZE(exynos5800_clk_regs)); 1599 ARRAY_SIZE(exynos5800_clk_regs));
1545 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus), 1600
1546 exynos5x_subcmus); 1601 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
1602 exynos5800_subcmus);
1603 } else {
1604 exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
1605 exynos5x_subcmus);
1606 }
1547 1607
1548 samsung_clk_of_add_provider(np, ctx); 1608 samsung_clk_of_add_provider(np, ctx);
1549} 1609}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 5c50e723ecae..1a191eeeebba 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
38 if (socfpgaclk->fixed_div) { 38 if (socfpgaclk->fixed_div) {
39 div = socfpgaclk->fixed_div; 39 div = socfpgaclk->fixed_div;
40 } else { 40 } else {
41 if (!socfpgaclk->bypass_reg) 41 if (socfpgaclk->hw.reg)
42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); 42 div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
43 } 43 }
44 44
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8dda62367816..c28ebf2810f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2528,7 +2528,7 @@ static int cpufreq_boost_set_sw(int state)
2528 } 2528 }
2529 2529
2530 ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max); 2530 ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
2531 if (ret) 2531 if (ret < 0)
2532 break; 2532 break;
2533 } 2533 }
2534 2534
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index f79eede71c62..edefa669153f 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -540,6 +540,10 @@ int ccp_dev_suspend(struct sp_device *sp, pm_message_t state)
540 unsigned long flags; 540 unsigned long flags;
541 unsigned int i; 541 unsigned int i;
542 542
543 /* If there's no device there's nothing to do */
544 if (!ccp)
545 return 0;
546
543 spin_lock_irqsave(&ccp->cmd_lock, flags); 547 spin_lock_irqsave(&ccp->cmd_lock, flags);
544 548
545 ccp->suspending = 1; 549 ccp->suspending = 1;
@@ -564,6 +568,10 @@ int ccp_dev_resume(struct sp_device *sp)
564 unsigned long flags; 568 unsigned long flags;
565 unsigned int i; 569 unsigned int i;
566 570
571 /* If there's no device there's nothing to do */
572 if (!ccp)
573 return 0;
574
567 spin_lock_irqsave(&ccp->cmd_lock, flags); 575 spin_lock_irqsave(&ccp->cmd_lock, flags);
568 576
569 ccp->suspending = 0; 577 ccp->suspending = 0;
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index b6cc90cbc9dc..4e5f9f6e901b 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -50,7 +50,7 @@ struct dw_edma_burst {
50 50
51struct dw_edma_region { 51struct dw_edma_region {
52 phys_addr_t paddr; 52 phys_addr_t paddr;
53 dma_addr_t vaddr; 53 void __iomem *vaddr;
54 size_t sz; 54 size_t sz;
55}; 55};
56 56
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 4c96e1c948f2..dc85f55e1bb8 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -130,19 +130,19 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
130 chip->id = pdev->devfn; 130 chip->id = pdev->devfn;
131 chip->irq = pdev->irq; 131 chip->irq = pdev->irq;
132 132
133 dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar]; 133 dw->rg_region.vaddr = pcim_iomap_table(pdev)[pdata->rg_bar];
134 dw->rg_region.vaddr += pdata->rg_off; 134 dw->rg_region.vaddr += pdata->rg_off;
135 dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start; 135 dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
136 dw->rg_region.paddr += pdata->rg_off; 136 dw->rg_region.paddr += pdata->rg_off;
137 dw->rg_region.sz = pdata->rg_sz; 137 dw->rg_region.sz = pdata->rg_sz;
138 138
139 dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar]; 139 dw->ll_region.vaddr = pcim_iomap_table(pdev)[pdata->ll_bar];
140 dw->ll_region.vaddr += pdata->ll_off; 140 dw->ll_region.vaddr += pdata->ll_off;
141 dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start; 141 dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
142 dw->ll_region.paddr += pdata->ll_off; 142 dw->ll_region.paddr += pdata->ll_off;
143 dw->ll_region.sz = pdata->ll_sz; 143 dw->ll_region.sz = pdata->ll_sz;
144 144
145 dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar]; 145 dw->dt_region.vaddr = pcim_iomap_table(pdev)[pdata->dt_bar];
146 dw->dt_region.vaddr += pdata->dt_off; 146 dw->dt_region.vaddr += pdata->dt_off;
147 dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start; 147 dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
148 dw->dt_region.paddr += pdata->dt_off; 148 dw->dt_region.paddr += pdata->dt_off;
@@ -158,17 +158,17 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
158 pci_dbg(pdev, "Mode:\t%s\n", 158 pci_dbg(pdev, "Mode:\t%s\n",
159 dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll"); 159 dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
160 160
161 pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 161 pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
162 pdata->rg_bar, pdata->rg_off, pdata->rg_sz, 162 pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
163 &dw->rg_region.vaddr, &dw->rg_region.paddr); 163 dw->rg_region.vaddr, &dw->rg_region.paddr);
164 164
165 pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 165 pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
166 pdata->ll_bar, pdata->ll_off, pdata->ll_sz, 166 pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
167 &dw->ll_region.vaddr, &dw->ll_region.paddr); 167 dw->ll_region.vaddr, &dw->ll_region.paddr);
168 168
169 pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n", 169 pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
170 pdata->dt_bar, pdata->dt_off, pdata->dt_sz, 170 pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
171 &dw->dt_region.vaddr, &dw->dt_region.paddr); 171 dw->dt_region.vaddr, &dw->dt_region.paddr);
172 172
173 pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs); 173 pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
174 174
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 8a3180ed49a6..692de47b1670 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -25,7 +25,7 @@ enum dw_edma_control {
25 25
26static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw) 26static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27{ 27{
28 return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr; 28 return dw->rg_region.vaddr;
29} 29}
30 30
31#define SET(dw, name, value) \ 31#define SET(dw, name, value) \
@@ -192,13 +192,12 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
192static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk) 192static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
193{ 193{
194 struct dw_edma_burst *child; 194 struct dw_edma_burst *child;
195 struct dw_edma_v0_lli *lli; 195 struct dw_edma_v0_lli __iomem *lli;
196 struct dw_edma_v0_llp *llp; 196 struct dw_edma_v0_llp __iomem *llp;
197 u32 control = 0, i = 0; 197 u32 control = 0, i = 0;
198 u64 sar, dar, addr;
199 int j; 198 int j;
200 199
201 lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr; 200 lli = chunk->ll_region.vaddr;
202 201
203 if (chunk->cb) 202 if (chunk->cb)
204 control = DW_EDMA_V0_CB; 203 control = DW_EDMA_V0_CB;
@@ -214,17 +213,15 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
214 /* Transfer size */ 213 /* Transfer size */
215 SET_LL(&lli[i].transfer_size, child->sz); 214 SET_LL(&lli[i].transfer_size, child->sz);
216 /* SAR - low, high */ 215 /* SAR - low, high */
217 sar = cpu_to_le64(child->sar); 216 SET_LL(&lli[i].sar_low, lower_32_bits(child->sar));
218 SET_LL(&lli[i].sar_low, lower_32_bits(sar)); 217 SET_LL(&lli[i].sar_high, upper_32_bits(child->sar));
219 SET_LL(&lli[i].sar_high, upper_32_bits(sar));
220 /* DAR - low, high */ 218 /* DAR - low, high */
221 dar = cpu_to_le64(child->dar); 219 SET_LL(&lli[i].dar_low, lower_32_bits(child->dar));
222 SET_LL(&lli[i].dar_low, lower_32_bits(dar)); 220 SET_LL(&lli[i].dar_high, upper_32_bits(child->dar));
223 SET_LL(&lli[i].dar_high, upper_32_bits(dar));
224 i++; 221 i++;
225 } 222 }
226 223
227 llp = (struct dw_edma_v0_llp *)&lli[i]; 224 llp = (void __iomem *)&lli[i];
228 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB; 225 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
229 if (!chunk->cb) 226 if (!chunk->cb)
230 control |= DW_EDMA_V0_CB; 227 control |= DW_EDMA_V0_CB;
@@ -232,9 +229,8 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
232 /* Channel control */ 229 /* Channel control */
233 SET_LL(&llp->control, control); 230 SET_LL(&llp->control, control);
234 /* Linked list - low, high */ 231 /* Linked list - low, high */
235 addr = cpu_to_le64(chunk->ll_region.paddr); 232 SET_LL(&llp->llp_low, lower_32_bits(chunk->ll_region.paddr));
236 SET_LL(&llp->llp_low, lower_32_bits(addr)); 233 SET_LL(&llp->llp_high, upper_32_bits(chunk->ll_region.paddr));
237 SET_LL(&llp->llp_high, upper_32_bits(addr));
238} 234}
239 235
240void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first) 236void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -242,7 +238,6 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
242 struct dw_edma_chan *chan = chunk->chan; 238 struct dw_edma_chan *chan = chunk->chan;
243 struct dw_edma *dw = chan->chip->dw; 239 struct dw_edma *dw = chan->chip->dw;
244 u32 tmp; 240 u32 tmp;
245 u64 llp;
246 241
247 dw_edma_v0_core_write_chunk(chunk); 242 dw_edma_v0_core_write_chunk(chunk);
248 243
@@ -262,9 +257,10 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
262 SET_CH(dw, chan->dir, chan->id, ch_control1, 257 SET_CH(dw, chan->dir, chan->id, ch_control1,
263 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE)); 258 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
264 /* Linked list - low, high */ 259 /* Linked list - low, high */
265 llp = cpu_to_le64(chunk->ll_region.paddr); 260 SET_CH(dw, chan->dir, chan->id, llp_low,
266 SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp)); 261 lower_32_bits(chunk->ll_region.paddr));
267 SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp)); 262 SET_CH(dw, chan->dir, chan->id, llp_high,
263 upper_32_bits(chunk->ll_region.paddr));
268 } 264 }
269 /* Doorbell */ 265 /* Doorbell */
270 SET_RW(dw, chan->dir, doorbell, 266 SET_RW(dw, chan->dir, doorbell,
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 3226f528cc11..42739508c0d8 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -14,7 +14,7 @@
14#include "dw-edma-core.h" 14#include "dw-edma-core.h"
15 15
16#define REGS_ADDR(name) \ 16#define REGS_ADDR(name) \
17 ((dma_addr_t *)&regs->name) 17 ((void __force *)&regs->name)
18#define REGISTER(name) \ 18#define REGISTER(name) \
19 { #name, REGS_ADDR(name) } 19 { #name, REGS_ADDR(name) }
20 20
@@ -40,36 +40,37 @@
40 40
41static struct dentry *base_dir; 41static struct dentry *base_dir;
42static struct dw_edma *dw; 42static struct dw_edma *dw;
43static struct dw_edma_v0_regs *regs; 43static struct dw_edma_v0_regs __iomem *regs;
44 44
45static struct { 45static struct {
46 void *start; 46 void __iomem *start;
47 void *end; 47 void __iomem *end;
48} lim[2][EDMA_V0_MAX_NR_CH]; 48} lim[2][EDMA_V0_MAX_NR_CH];
49 49
50struct debugfs_entries { 50struct debugfs_entries {
51 char name[24]; 51 const char *name;
52 dma_addr_t *reg; 52 dma_addr_t *reg;
53}; 53};
54 54
55static int dw_edma_debugfs_u32_get(void *data, u64 *val) 55static int dw_edma_debugfs_u32_get(void *data, u64 *val)
56{ 56{
57 void __iomem *reg = (void __force __iomem *)data;
57 if (dw->mode == EDMA_MODE_LEGACY && 58 if (dw->mode == EDMA_MODE_LEGACY &&
58 data >= (void *)&regs->type.legacy.ch) { 59 reg >= (void __iomem *)&regs->type.legacy.ch) {
59 void *ptr = (void *)&regs->type.legacy.ch; 60 void __iomem *ptr = &regs->type.legacy.ch;
60 u32 viewport_sel = 0; 61 u32 viewport_sel = 0;
61 unsigned long flags; 62 unsigned long flags;
62 u16 ch; 63 u16 ch;
63 64
64 for (ch = 0; ch < dw->wr_ch_cnt; ch++) 65 for (ch = 0; ch < dw->wr_ch_cnt; ch++)
65 if (lim[0][ch].start >= data && data < lim[0][ch].end) { 66 if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
66 ptr += (data - lim[0][ch].start); 67 ptr += (reg - lim[0][ch].start);
67 goto legacy_sel_wr; 68 goto legacy_sel_wr;
68 } 69 }
69 70
70 for (ch = 0; ch < dw->rd_ch_cnt; ch++) 71 for (ch = 0; ch < dw->rd_ch_cnt; ch++)
71 if (lim[1][ch].start >= data && data < lim[1][ch].end) { 72 if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
72 ptr += (data - lim[1][ch].start); 73 ptr += (reg - lim[1][ch].start);
73 goto legacy_sel_rd; 74 goto legacy_sel_rd;
74 } 75 }
75 76
@@ -86,7 +87,7 @@ legacy_sel_wr:
86 87
87 raw_spin_unlock_irqrestore(&dw->lock, flags); 88 raw_spin_unlock_irqrestore(&dw->lock, flags);
88 } else { 89 } else {
89 *val = readl(data); 90 *val = readl(reg);
90 } 91 }
91 92
92 return 0; 93 return 0;
@@ -105,7 +106,7 @@ static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
105 } 106 }
106} 107}
107 108
108static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs, 109static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
109 struct dentry *dir) 110 struct dentry *dir)
110{ 111{
111 int nr_entries; 112 int nr_entries;
@@ -288,7 +289,7 @@ void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
288 if (!dw) 289 if (!dw)
289 return; 290 return;
290 291
291 regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr; 292 regs = dw->rg_region.vaddr;
292 if (!regs) 293 if (!regs)
293 return; 294 return;
294 295
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 23e0a356f167..ad72b3f42ffa 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1163 switch (chan->feature & FSL_DMA_IP_MASK) { 1163 switch (chan->feature & FSL_DMA_IP_MASK) {
1164 case FSL_DMA_IP_85XX: 1164 case FSL_DMA_IP_85XX:
1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1165 chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1166 /* Fall through */
1166 case FSL_DMA_IP_83XX: 1167 case FSL_DMA_IP_83XX:
1167 chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1168 chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1168 chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1169 chan->set_src_loop_size = fsl_chan_set_src_loop_size;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 89d710899010..de8bfd9a76e9 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -142,7 +142,7 @@ enum d40_events {
142 * when the DMA hw is powered off. 142 * when the DMA hw is powered off.
143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. 143 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
144 */ 144 */
145static u32 d40_backup_regs[] = { 145static __maybe_unused u32 d40_backup_regs[] = {
146 D40_DREG_LCPA, 146 D40_DREG_LCPA,
147 D40_DREG_LCLA, 147 D40_DREG_LCLA,
148 D40_DREG_PRMSE, 148 D40_DREG_PRMSE,
@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
211 211
212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b) 212#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
213 213
214static u32 d40_backup_regs_chan[] = { 214static __maybe_unused u32 d40_backup_regs_chan[] = {
215 D40_CHAN_REG_SSCFG, 215 D40_CHAN_REG_SSCFG,
216 D40_CHAN_REG_SSELT, 216 D40_CHAN_REG_SSELT,
217 D40_CHAN_REG_SSPTR, 217 D40_CHAN_REG_SSPTR,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d6e919d3936a..1311de74bfdd 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1366,7 +1366,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
1366 1366
1367 chan = &dmadev->chan[id]; 1367 chan = &dmadev->chan[id];
1368 if (!chan) { 1368 if (!chan) {
1369 dev_err(chan2dev(chan), "MDMA channel not initialized\n"); 1369 dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
1370 goto exit; 1370 goto exit;
1371 } 1371 }
1372 1372
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 2805853e963f..b33cf6e8ab8e 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -712,7 +712,7 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
712 return chan; 712 return chan;
713} 713}
714 714
715static int tegra_adma_runtime_suspend(struct device *dev) 715static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
716{ 716{
717 struct tegra_adma *tdma = dev_get_drvdata(dev); 717 struct tegra_adma *tdma = dev_get_drvdata(dev);
718 struct tegra_adma_chan_regs *ch_reg; 718 struct tegra_adma_chan_regs *ch_reg;
@@ -744,7 +744,7 @@ clk_disable:
744 return 0; 744 return 0;
745} 745}
746 746
747static int tegra_adma_runtime_resume(struct device *dev) 747static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
748{ 748{
749 struct tegra_adma *tdma = dev_get_drvdata(dev); 749 struct tegra_adma *tdma = dev_get_drvdata(dev);
750 struct tegra_adma_chan_regs *ch_reg; 750 struct tegra_adma_chan_regs *ch_reg;
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba2489d4ea24..ba27802efcd0 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1234,7 +1234,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1234 if (src_icg) { 1234 if (src_icg) {
1235 d->ccr |= CCR_SRC_AMODE_DBLIDX; 1235 d->ccr |= CCR_SRC_AMODE_DBLIDX;
1236 d->ei = 1; 1236 d->ei = 1;
1237 d->fi = src_icg; 1237 d->fi = src_icg + 1;
1238 } else if (xt->src_inc) { 1238 } else if (xt->src_inc) {
1239 d->ccr |= CCR_SRC_AMODE_POSTINC; 1239 d->ccr |= CCR_SRC_AMODE_POSTINC;
1240 d->fi = 0; 1240 d->fi = 0;
@@ -1249,7 +1249,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1249 if (dst_icg) { 1249 if (dst_icg) {
1250 d->ccr |= CCR_DST_AMODE_DBLIDX; 1250 d->ccr |= CCR_DST_AMODE_DBLIDX;
1251 sg->ei = 1; 1251 sg->ei = 1;
1252 sg->fi = dst_icg; 1252 sg->fi = dst_icg + 1;
1253 } else if (xt->dst_inc) { 1253 } else if (xt->dst_inc) {
1254 d->ccr |= CCR_DST_AMODE_POSTINC; 1254 d->ccr |= CCR_DST_AMODE_POSTINC;
1255 sg->fi = 0; 1255 sg->fi = 0;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 1db780c0f07b..3caae7f2cf56 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -927,17 +927,33 @@ fail:
927 return status; 927 return status;
928} 928}
929 929
930#define GET_EFI_CONFIG_TABLE(bits) \
931static void *get_efi_config_table##bits(efi_system_table_t *_sys_table, \
932 efi_guid_t guid) \
933{ \
934 efi_system_table_##bits##_t *sys_table; \
935 efi_config_table_##bits##_t *tables; \
936 int i; \
937 \
938 sys_table = (typeof(sys_table))_sys_table; \
939 tables = (typeof(tables))(unsigned long)sys_table->tables; \
940 \
941 for (i = 0; i < sys_table->nr_tables; i++) { \
942 if (efi_guidcmp(tables[i].guid, guid) != 0) \
943 continue; \
944 \
945 return (void *)(unsigned long)tables[i].table; \
946 } \
947 \
948 return NULL; \
949}
950GET_EFI_CONFIG_TABLE(32)
951GET_EFI_CONFIG_TABLE(64)
952
930void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid) 953void *get_efi_config_table(efi_system_table_t *sys_table, efi_guid_t guid)
931{ 954{
932 efi_config_table_t *tables = (efi_config_table_t *)sys_table->tables; 955 if (efi_is_64bit())
933 int i; 956 return get_efi_config_table64(sys_table, guid);
934 957 else
935 for (i = 0; i < sys_table->nr_tables; i++) { 958 return get_efi_config_table32(sys_table, guid);
936 if (efi_guidcmp(tables[i].guid, guid) != 0)
937 continue;
938
939 return (void *)tables[i].table;
940 }
941
942 return NULL;
943} 959}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 567fb98c0892..9762dd6d99fa 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -363,7 +363,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
363 /* Special handling for SPI GPIOs if used */ 363 /* Special handling for SPI GPIOs if used */
364 if (IS_ERR(desc)) 364 if (IS_ERR(desc))
365 desc = of_find_spi_gpio(dev, con_id, &of_flags); 365 desc = of_find_spi_gpio(dev, con_id, &of_flags);
366 if (IS_ERR(desc)) { 366 if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) {
367 /* This quirk looks up flags and all */ 367 /* This quirk looks up flags and all */
368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); 368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
369 if (!IS_ERR(desc)) 369 if (!IS_ERR(desc))
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f497003f119c..cca749010cd0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) 1091 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; 1092 lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) 1093 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
1094 lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN; 1094 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
1095 GPIOLINE_FLAG_IS_OUT);
1095 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) 1096 if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
1096 lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE; 1097 lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
1098 GPIOLINE_FLAG_IS_OUT);
1097 1099
1098 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) 1100 if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
1099 return -EFAULT; 1101 return -EFAULT;
@@ -1371,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1371 if (status) 1373 if (status)
1372 goto err_remove_from_list; 1374 goto err_remove_from_list;
1373 1375
1374 status = gpiochip_irqchip_init_valid_mask(chip);
1375 if (status)
1376 goto err_remove_from_list;
1377
1378 status = gpiochip_alloc_valid_mask(chip); 1376 status = gpiochip_alloc_valid_mask(chip);
1379 if (status) 1377 if (status)
1380 goto err_remove_irqchip_mask; 1378 goto err_remove_from_list;
1381
1382 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1383 if (status)
1384 goto err_free_gpiochip_mask;
1385 1379
1386 status = of_gpiochip_add(chip); 1380 status = of_gpiochip_add(chip);
1387 if (status) 1381 if (status)
1388 goto err_remove_chip; 1382 goto err_free_gpiochip_mask;
1389 1383
1390 status = gpiochip_init_valid_mask(chip); 1384 status = gpiochip_init_valid_mask(chip);
1391 if (status) 1385 if (status)
@@ -1411,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1411 1405
1412 machine_gpiochip_add(chip); 1406 machine_gpiochip_add(chip);
1413 1407
1408 status = gpiochip_irqchip_init_valid_mask(chip);
1409 if (status)
1410 goto err_remove_acpi_chip;
1411
1412 status = gpiochip_add_irqchip(chip, lock_key, request_key);
1413 if (status)
1414 goto err_remove_irqchip_mask;
1415
1414 /* 1416 /*
1415 * By first adding the chardev, and then adding the device, 1417 * By first adding the chardev, and then adding the device,
1416 * we get a device node entry in sysfs under 1418 * we get a device node entry in sysfs under
@@ -1422,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
1422 if (gpiolib_initialized) { 1424 if (gpiolib_initialized) {
1423 status = gpiochip_setup_dev(gdev); 1425 status = gpiochip_setup_dev(gdev);
1424 if (status) 1426 if (status)
1425 goto err_remove_acpi_chip; 1427 goto err_remove_irqchip;
1426 } 1428 }
1427 return 0; 1429 return 0;
1428 1430
1431err_remove_irqchip:
1432 gpiochip_irqchip_remove(chip);
1433err_remove_irqchip_mask:
1434 gpiochip_irqchip_free_valid_mask(chip);
1429err_remove_acpi_chip: 1435err_remove_acpi_chip:
1430 acpi_gpiochip_remove(chip); 1436 acpi_gpiochip_remove(chip);
1431err_remove_of_chip: 1437err_remove_of_chip:
1432 gpiochip_free_hogs(chip); 1438 gpiochip_free_hogs(chip);
1433 of_gpiochip_remove(chip); 1439 of_gpiochip_remove(chip);
1434err_remove_chip:
1435 gpiochip_irqchip_remove(chip);
1436err_free_gpiochip_mask: 1440err_free_gpiochip_mask:
1437 gpiochip_free_valid_mask(chip); 1441 gpiochip_free_valid_mask(chip);
1438err_remove_irqchip_mask:
1439 gpiochip_irqchip_free_valid_mask(chip);
1440err_remove_from_list: 1442err_remove_from_list:
1441 spin_lock_irqsave(&gpio_lock, flags); 1443 spin_lock_irqsave(&gpio_lock, flags);
1442 list_del(&gdev->list); 1444 list_del(&gdev->list);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 9b384a94d2f3..3e35a8f2c5e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -574,6 +574,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
574 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, 574 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
575 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 575 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
576 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, 578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, 579 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0, 0, 0, 0, 0 }, 580 { 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e4094f842e7..8b26c970a3cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1143,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1143 num_deps = chunk->length_dw * 4 / 1143 num_deps = chunk->length_dw * 4 /
1144 sizeof(struct drm_amdgpu_cs_chunk_sem); 1144 sizeof(struct drm_amdgpu_cs_chunk_sem);
1145 1145
1146 if (p->post_deps)
1147 return -EINVAL;
1148
1146 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1149 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1147 GFP_KERNEL); 1150 GFP_KERNEL);
1148 p->num_post_deps = 0; 1151 p->num_post_deps = 0;
@@ -1166,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1166 1169
1167 1170
1168static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p, 1171static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1169 struct amdgpu_cs_chunk 1172 struct amdgpu_cs_chunk *chunk)
1170 *chunk)
1171{ 1173{
1172 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps; 1174 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1173 unsigned num_deps; 1175 unsigned num_deps;
@@ -1177,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
1177 num_deps = chunk->length_dw * 4 / 1179 num_deps = chunk->length_dw * 4 /
1178 sizeof(struct drm_amdgpu_cs_chunk_syncobj); 1180 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1179 1181
1182 if (p->post_deps)
1183 return -EINVAL;
1184
1180 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps), 1185 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1181 GFP_KERNEL); 1186 GFP_KERNEL);
1182 p->num_post_deps = 0; 1187 p->num_post_deps = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f539a2a92774..7398b4850649 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -534,21 +534,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
534 struct drm_sched_entity *entity) 534 struct drm_sched_entity *entity)
535{ 535{
536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); 536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
537 unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); 537 struct dma_fence *other;
538 struct dma_fence *other = centity->fences[idx]; 538 unsigned idx;
539 long r;
539 540
540 if (other) { 541 spin_lock(&ctx->ring_lock);
541 signed long r; 542 idx = centity->sequence & (amdgpu_sched_jobs - 1);
542 r = dma_fence_wait(other, true); 543 other = dma_fence_get(centity->fences[idx]);
543 if (r < 0) { 544 spin_unlock(&ctx->ring_lock);
544 if (r != -ERESTARTSYS)
545 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
546 545
547 return r; 546 if (!other)
548 } 547 return 0;
549 }
550 548
551 return 0; 549 r = dma_fence_wait(other, true);
550 if (r < 0 && r != -ERESTARTSYS)
551 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
552
553 dma_fence_put(other);
554 return r;
552} 555}
553 556
554void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) 557void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1cf639a51178..c066e1d3f981 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -596,14 +596,18 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
596 case CHIP_VEGA20: 596 case CHIP_VEGA20:
597 break; 597 break;
598 case CHIP_RAVEN: 598 case CHIP_RAVEN:
599 if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) 599 if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
600 break; 600 &&((adev->gfx.rlc_fw_version != 106 &&
601 if ((adev->gfx.rlc_fw_version != 106 && 601 adev->gfx.rlc_fw_version < 531) ||
602 adev->gfx.rlc_fw_version < 531) || 602 (adev->gfx.rlc_fw_version == 53815) ||
603 (adev->gfx.rlc_fw_version == 53815) || 603 (adev->gfx.rlc_feature_version < 1) ||
604 (adev->gfx.rlc_feature_version < 1) || 604 !adev->gfx.rlc.is_rlc_v2_1))
605 !adev->gfx.rlc.is_rlc_v2_1)
606 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 605 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
606
607 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
608 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
609 AMD_PG_SUPPORT_CP |
610 AMD_PG_SUPPORT_RLC_SMU_HS;
607 break; 611 break;
608 default: 612 default:
609 break; 613 break;
@@ -4869,7 +4873,7 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
4869 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); 4873 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
4870 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); 4874 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
4871 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); 4875 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
4872 WREG32(mmSQ_CMD, value); 4876 WREG32_SOC15(GC, 0, mmSQ_CMD, value);
4873} 4877}
4874 4878
4875static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, 4879static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 662612f89c70..9922bce3fd89 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle)
552 AMD_CG_SUPPORT_BIF_LS; 552 AMD_CG_SUPPORT_BIF_LS;
553 adev->pg_flags = AMD_PG_SUPPORT_VCN | 553 adev->pg_flags = AMD_PG_SUPPORT_VCN |
554 AMD_PG_SUPPORT_VCN_DPG | 554 AMD_PG_SUPPORT_VCN_DPG |
555 AMD_PG_SUPPORT_MMHUB |
556 AMD_PG_SUPPORT_ATHUB; 555 AMD_PG_SUPPORT_ATHUB;
557 adev->external_rev_id = adev->rev_id + 0x1; 556 adev->external_rev_id = adev->rev_id + 0x1;
558 break; 557 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 23265414d448..04fbf05d7176 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle)
992 992
993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 993 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
994 } 994 }
995
996 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
997 adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
998 AMD_PG_SUPPORT_CP |
999 AMD_PG_SUPPORT_RLC_SMU_HS;
1000 break; 995 break;
1001 default: 996 default:
1002 /* FIXME: not supported yet */ 997 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4a29f72334d0..45be7a2132bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3131,13 +3131,25 @@ static enum dc_color_depth
3131convert_color_depth_from_display_info(const struct drm_connector *connector, 3131convert_color_depth_from_display_info(const struct drm_connector *connector,
3132 const struct drm_connector_state *state) 3132 const struct drm_connector_state *state)
3133{ 3133{
3134 uint32_t bpc = connector->display_info.bpc; 3134 uint8_t bpc = (uint8_t)connector->display_info.bpc;
3135
3136 /* Assume 8 bpc by default if no bpc is specified. */
3137 bpc = bpc ? bpc : 8;
3135 3138
3136 if (!state) 3139 if (!state)
3137 state = connector->state; 3140 state = connector->state;
3138 3141
3139 if (state) { 3142 if (state) {
3140 bpc = state->max_bpc; 3143 /*
3144 * Cap display bpc based on the user requested value.
3145 *
3146 * The value for state->max_bpc may not correctly updated
3147 * depending on when the connector gets added to the state
3148 * or if this was called outside of atomic check, so it
3149 * can't be used directly.
3150 */
3151 bpc = min(bpc, state->max_requested_bpc);
3152
3141 /* Round down to the nearest even number. */ 3153 /* Round down to the nearest even number. */
3142 bpc = bpc - (bpc & 1); 3154 bpc = bpc - (bpc & 1);
3143 } 3155 }
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index fa20201eef3a..cbc480a33376 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/mm.h>
26 27
27#include "dm_services.h" 28#include "dm_services.h"
28 29
@@ -1171,8 +1172,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
1171 1172
1172struct dc_state *dc_create_state(struct dc *dc) 1173struct dc_state *dc_create_state(struct dc *dc)
1173{ 1174{
1174 struct dc_state *context = kzalloc(sizeof(struct dc_state), 1175 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1175 GFP_KERNEL); 1176 GFP_KERNEL);
1176 1177
1177 if (!context) 1178 if (!context)
1178 return NULL; 1179 return NULL;
@@ -1192,11 +1193,11 @@ struct dc_state *dc_create_state(struct dc *dc)
1192struct dc_state *dc_copy_state(struct dc_state *src_ctx) 1193struct dc_state *dc_copy_state(struct dc_state *src_ctx)
1193{ 1194{
1194 int i, j; 1195 int i, j;
1195 struct dc_state *new_ctx = kmemdup(src_ctx, 1196 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
1196 sizeof(struct dc_state), GFP_KERNEL);
1197 1197
1198 if (!new_ctx) 1198 if (!new_ctx)
1199 return NULL; 1199 return NULL;
1200 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
1200 1201
1201 for (i = 0; i < MAX_PIPES; i++) { 1202 for (i = 0; i < MAX_PIPES; i++) {
1202 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 1203 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -1230,7 +1231,7 @@ static void dc_state_free(struct kref *kref)
1230{ 1231{
1231 struct dc_state *context = container_of(kref, struct dc_state, refcount); 1232 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1232 dc_resource_state_destruct(context); 1233 dc_resource_state_destruct(context);
1233 kfree(context); 1234 kvfree(context);
1234} 1235}
1235 1236
1236void dc_release_state(struct dc_state *context) 1237void dc_release_state(struct dc_state *context)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index f27c6fbb192e..90c4e87ac5ad 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -2101,7 +2101,11 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
2101 if (ret) 2101 if (ret)
2102 return ret; 2102 return ret;
2103 2103
2104 *query = metrics_table.CurrSocketPower << 8; 2104 /* For the 40.46 release, they changed the value name */
2105 if (hwmgr->smu_version == 0x282e00)
2106 *query = metrics_table.AverageSocketPower << 8;
2107 else
2108 *query = metrics_table.CurrSocketPower << 8;
2105 2109
2106 return ret; 2110 return ret;
2107} 2111}
@@ -2349,12 +2353,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
2349 data->dpm_table.soc_table.dpm_state.soft_max_level = 2353 data->dpm_table.soc_table.dpm_state.soft_max_level =
2350 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2354 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2351 2355
2352 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2356 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2357 FEATURE_DPM_UCLK_MASK |
2358 FEATURE_DPM_SOCCLK_MASK);
2353 PP_ASSERT_WITH_CODE(!ret, 2359 PP_ASSERT_WITH_CODE(!ret,
2354 "Failed to upload boot level to highest!", 2360 "Failed to upload boot level to highest!",
2355 return ret); 2361 return ret);
2356 2362
2357 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2363 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2364 FEATURE_DPM_UCLK_MASK |
2365 FEATURE_DPM_SOCCLK_MASK);
2358 PP_ASSERT_WITH_CODE(!ret, 2366 PP_ASSERT_WITH_CODE(!ret,
2359 "Failed to upload dpm max level to highest!", 2367 "Failed to upload dpm max level to highest!",
2360 return ret); 2368 return ret);
@@ -2387,12 +2395,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2387 data->dpm_table.soc_table.dpm_state.soft_max_level = 2395 data->dpm_table.soc_table.dpm_state.soft_max_level =
2388 data->dpm_table.soc_table.dpm_levels[soft_level].value; 2396 data->dpm_table.soc_table.dpm_levels[soft_level].value;
2389 2397
2390 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2398 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2399 FEATURE_DPM_UCLK_MASK |
2400 FEATURE_DPM_SOCCLK_MASK);
2391 PP_ASSERT_WITH_CODE(!ret, 2401 PP_ASSERT_WITH_CODE(!ret,
2392 "Failed to upload boot level to highest!", 2402 "Failed to upload boot level to highest!",
2393 return ret); 2403 return ret);
2394 2404
2395 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2405 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2406 FEATURE_DPM_UCLK_MASK |
2407 FEATURE_DPM_SOCCLK_MASK);
2396 PP_ASSERT_WITH_CODE(!ret, 2408 PP_ASSERT_WITH_CODE(!ret,
2397 "Failed to upload dpm max level to highest!", 2409 "Failed to upload dpm max level to highest!",
2398 return ret); 2410 return ret);
@@ -2403,14 +2415,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2403 2415
2404static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2416static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2405{ 2417{
2418 struct vega20_hwmgr *data =
2419 (struct vega20_hwmgr *)(hwmgr->backend);
2420 uint32_t soft_min_level, soft_max_level;
2406 int ret = 0; 2421 int ret = 0;
2407 2422
2408 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2423 /* gfxclk soft min/max settings */
2424 soft_min_level =
2425 vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2426 soft_max_level =
2427 vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2428
2429 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2430 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2431 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2432 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2433
2434 /* uclk soft min/max settings */
2435 soft_min_level =
2436 vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2437 soft_max_level =
2438 vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2439
2440 data->dpm_table.mem_table.dpm_state.soft_min_level =
2441 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2442 data->dpm_table.mem_table.dpm_state.soft_max_level =
2443 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2444
2445 /* socclk soft min/max settings */
2446 soft_min_level =
2447 vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2448 soft_max_level =
2449 vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2450
2451 data->dpm_table.soc_table.dpm_state.soft_min_level =
2452 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2453 data->dpm_table.soc_table.dpm_state.soft_max_level =
2454 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2455
2456 ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2457 FEATURE_DPM_UCLK_MASK |
2458 FEATURE_DPM_SOCCLK_MASK);
2409 PP_ASSERT_WITH_CODE(!ret, 2459 PP_ASSERT_WITH_CODE(!ret,
2410 "Failed to upload DPM Bootup Levels!", 2460 "Failed to upload DPM Bootup Levels!",
2411 return ret); 2461 return ret);
2412 2462
2413 ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); 2463 ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK |
2464 FEATURE_DPM_UCLK_MASK |
2465 FEATURE_DPM_SOCCLK_MASK);
2414 PP_ASSERT_WITH_CODE(!ret, 2466 PP_ASSERT_WITH_CODE(!ret,
2415 "Failed to upload DPM Max Levels!", 2467 "Failed to upload DPM Max Levels!",
2416 return ret); 2468 return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index a0f52c86d8c7..a78b2e295895 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -907,8 +907,6 @@ struct smu_funcs
907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) 907 ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
908#define smu_set_azalia_d3_pme(smu) \ 908#define smu_set_azalia_d3_pme(smu) \
909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) 909 ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
910#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
911 ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
912#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ 910#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
913 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) 911 ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
914#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ 912#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 5fde5cf65b42..53097961bf2b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
326 struct amdgpu_device *adev = smu->adev; 326 struct amdgpu_device *adev = smu->adev;
327 const struct smc_firmware_header_v1_0 *hdr; 327 const struct smc_firmware_header_v1_0 *hdr;
328 int ret, index; 328 int ret, index;
329 uint32_t size; 329 uint32_t size = 0;
330 uint16_t atom_table_size;
330 uint8_t frev, crev; 331 uint8_t frev, crev;
331 void *table; 332 void *table;
332 uint16_t version_major, version_minor; 333 uint16_t version_major, version_minor;
@@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
354 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 355 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
355 powerplayinfo); 356 powerplayinfo);
356 357
357 ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev, 358 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
358 (uint8_t **)&table); 359 (uint8_t **)&table);
359 if (ret) 360 if (ret)
360 return ret; 361 return ret;
362 size = atom_table_size;
361 } 363 }
362 364
363 if (!smu->smu_table.power_play_table) 365 if (!smu->smu_table.power_play_table)
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index dd6fd1c8bf24..6a14497257e4 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -3050,6 +3050,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
3050 3050
3051static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value) 3051static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
3052{ 3052{
3053 uint32_t smu_version;
3053 int ret = 0; 3054 int ret = 0;
3054 SmuMetrics_t metrics; 3055 SmuMetrics_t metrics;
3055 3056
@@ -3060,7 +3061,15 @@ static int vega20_get_gpu_power(struct smu_context *smu, uint32_t *value)
3060 if (ret) 3061 if (ret)
3061 return ret; 3062 return ret;
3062 3063
3063 *value = metrics.CurrSocketPower << 8; 3064 ret = smu_get_smc_version(smu, NULL, &smu_version);
3065 if (ret)
3066 return ret;
3067
3068 /* For the 40.46 release, they changed the value name */
3069 if (smu_version == 0x282e00)
3070 *value = metrics.AverageSocketPower << 8;
3071 else
3072 *value = metrics.CurrSocketPower << 8;
3064 3073
3065 return 0; 3074 return 0;
3066} 3075}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 5a118984de33..9d4d5075cc64 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
8#include <linux/iommu.h> 8#include <linux/iommu.h>
9#include <linux/of_device.h> 9#include <linux/of_device.h>
10#include <linux/of_graph.h> 10#include <linux/of_graph.h>
11#include <linux/of_reserved_mem.h>
11#include <linux/platform_device.h> 12#include <linux/platform_device.h>
12#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
13#ifdef CONFIG_DEBUG_FS 14#ifdef CONFIG_DEBUG_FS
@@ -126,7 +127,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
126 pipe->of_output_port = 127 pipe->of_output_port =
127 of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT); 128 of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
128 129
129 pipe->of_node = np; 130 pipe->of_node = of_node_get(np);
130 131
131 return 0; 132 return 0;
132} 133}
@@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
143 return mdev->irq; 144 return mdev->irq;
144 } 145 }
145 146
147 /* Get the optional framebuffer memory resource */
148 ret = of_reserved_mem_device_init(dev);
149 if (ret && ret != -ENODEV)
150 return ret;
151 ret = 0;
152
146 for_each_available_child_of_node(np, child) { 153 for_each_available_child_of_node(np, child) {
147 if (of_node_cmp(child->name, "pipeline") == 0) { 154 if (of_node_cmp(child->name, "pipeline") == 0) {
148 ret = komeda_parse_pipe_dt(mdev, child); 155 ret = komeda_parse_pipe_dt(mdev, child);
@@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
289 296
290 mdev->n_pipelines = 0; 297 mdev->n_pipelines = 0;
291 298
299 of_reserved_mem_device_release(dev);
300
292 if (funcs && funcs->cleanup) 301 if (funcs && funcs->cleanup)
293 funcs->cleanup(mdev); 302 funcs->cleanup(mdev);
294 303
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
index cd4d9f53ddef..c9a1edb9a000 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c
@@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
35 return NULL; 35 return NULL;
36} 36}
37 37
38u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
39{
40 u32 bpp;
41
42 switch (info->format) {
43 case DRM_FORMAT_YUV420_8BIT:
44 bpp = 12;
45 break;
46 case DRM_FORMAT_YUV420_10BIT:
47 bpp = 15;
48 break;
49 default:
50 bpp = info->cpp[0] * 8;
51 break;
52 }
53
54 return bpp;
55}
56
38/* Two assumptions 57/* Two assumptions
39 * 1. RGB always has YTR 58 * 1. RGB always has YTR
40 * 2. Tiled RGB always has SC 59 * 2. Tiled RGB always has SC
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
index 3631910d33b5..32273cf18f7c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
@@ -97,6 +97,9 @@ const struct komeda_format_caps *
97komeda_get_format_caps(struct komeda_format_caps_table *table, 97komeda_get_format_caps(struct komeda_format_caps_table *table,
98 u32 fourcc, u64 modifier); 98 u32 fourcc, u64 modifier);
99 99
100u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
101 u64 modifier);
102
100u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, 103u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
101 u32 layer_type, u32 *n_fmts); 104 u32 layer_type, u32 *n_fmts);
102 105
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 3b0a70ed6aa0..1b01a625f40e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
43 struct drm_framebuffer *fb = &kfb->base; 43 struct drm_framebuffer *fb = &kfb->base;
44 const struct drm_format_info *info = fb->format; 44 const struct drm_format_info *info = fb->format;
45 struct drm_gem_object *obj; 45 struct drm_gem_object *obj;
46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks; 46 u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
47 u64 min_size; 47 u64 min_size;
48 48
49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]); 49 obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
@@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE, 88 kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
89 alignment_header); 89 alignment_header);
90 90
91 bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
91 kfb->afbc_size = kfb->offset_payload + n_blocks * 92 kfb->afbc_size = kfb->offset_payload + n_blocks *
92 ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS, 93 ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
93 AFBC_SUPERBLK_ALIGNMENT); 94 AFBC_SUPERBLK_ALIGNMENT);
94 min_size = kfb->afbc_size + fb->offsets[0]; 95 min_size = kfb->afbc_size + fb->offsets[0];
95 if (min_size > obj->size) { 96 if (min_size > obj->size) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 419a8b0e5de8..69d9e26c60c8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
14#include <drm/drm_gem_cma_helper.h> 14#include <drm/drm_gem_cma_helper.h>
15#include <drm/drm_gem_framebuffer_helper.h> 15#include <drm/drm_gem_framebuffer_helper.h>
16#include <drm/drm_irq.h> 16#include <drm/drm_irq.h>
17#include <drm/drm_probe_helper.h>
17#include <drm/drm_vblank.h> 18#include <drm/drm_vblank.h>
18 19
19#include "komeda_dev.h" 20#include "komeda_dev.h"
@@ -146,7 +147,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
146 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); 147 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
147 struct komeda_plane_state *kplane_st; 148 struct komeda_plane_state *kplane_st;
148 struct drm_plane_state *plane_st; 149 struct drm_plane_state *plane_st;
149 struct drm_framebuffer *fb;
150 struct drm_plane *plane; 150 struct drm_plane *plane;
151 struct list_head zorder_list; 151 struct list_head zorder_list;
152 int order = 0, err; 152 int order = 0, err;
@@ -172,7 +172,6 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
172 172
173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) { 173 list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
174 plane_st = &kplane_st->base; 174 plane_st = &kplane_st->base;
175 fb = plane_st->fb;
176 plane = plane_st->plane; 175 plane = plane_st->plane;
177 176
178 plane_st->normalized_zpos = order++; 177 plane_st->normalized_zpos = order++;
@@ -205,7 +204,7 @@ static int komeda_kms_check(struct drm_device *dev,
205 struct drm_atomic_state *state) 204 struct drm_atomic_state *state)
206{ 205{
207 struct drm_crtc *crtc; 206 struct drm_crtc *crtc;
208 struct drm_crtc_state *old_crtc_st, *new_crtc_st; 207 struct drm_crtc_state *new_crtc_st;
209 int i, err; 208 int i, err;
210 209
211 err = drm_atomic_helper_check_modeset(dev, state); 210 err = drm_atomic_helper_check_modeset(dev, state);
@@ -216,7 +215,7 @@ static int komeda_kms_check(struct drm_device *dev,
216 * so need to add all affected_planes (even unchanged) to 215 * so need to add all affected_planes (even unchanged) to
217 * drm_atomic_state. 216 * drm_atomic_state.
218 */ 217 */
219 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) { 218 for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
220 err = drm_atomic_add_affected_planes(state, crtc); 219 err = drm_atomic_add_affected_planes(state, crtc);
221 if (err) 220 if (err)
222 return err; 221 return err;
@@ -307,24 +306,33 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
307 komeda_kms_irq_handler, IRQF_SHARED, 306 komeda_kms_irq_handler, IRQF_SHARED,
308 drm->driver->name, drm); 307 drm->driver->name, drm);
309 if (err) 308 if (err)
310 goto cleanup_mode_config; 309 goto free_component_binding;
311 310
312 err = mdev->funcs->enable_irq(mdev); 311 err = mdev->funcs->enable_irq(mdev);
313 if (err) 312 if (err)
314 goto cleanup_mode_config; 313 goto free_component_binding;
315 314
316 drm->irq_enabled = true; 315 drm->irq_enabled = true;
317 316
317 drm_kms_helper_poll_init(drm);
318
318 err = drm_dev_register(drm, 0); 319 err = drm_dev_register(drm, 0);
319 if (err) 320 if (err)
320 goto cleanup_mode_config; 321 goto free_interrupts;
321 322
322 return kms; 323 return kms;
323 324
324cleanup_mode_config: 325free_interrupts:
326 drm_kms_helper_poll_fini(drm);
325 drm->irq_enabled = false; 327 drm->irq_enabled = false;
328 mdev->funcs->disable_irq(mdev);
329free_component_binding:
330 component_unbind_all(mdev->dev, drm);
331cleanup_mode_config:
326 drm_mode_config_cleanup(drm); 332 drm_mode_config_cleanup(drm);
327 komeda_kms_cleanup_private_objs(kms); 333 komeda_kms_cleanup_private_objs(kms);
334 drm->dev_private = NULL;
335 drm_dev_put(drm);
328free_kms: 336free_kms:
329 kfree(kms); 337 kfree(kms);
330 return ERR_PTR(err); 338 return ERR_PTR(err);
@@ -335,12 +343,14 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
335 struct drm_device *drm = &kms->base; 343 struct drm_device *drm = &kms->base;
336 struct komeda_dev *mdev = drm->dev_private; 344 struct komeda_dev *mdev = drm->dev_private;
337 345
346 drm_dev_unregister(drm);
347 drm_kms_helper_poll_fini(drm);
348 drm_atomic_helper_shutdown(drm);
338 drm->irq_enabled = false; 349 drm->irq_enabled = false;
339 mdev->funcs->disable_irq(mdev); 350 mdev->funcs->disable_irq(mdev);
340 drm_dev_unregister(drm);
341 component_unbind_all(mdev->dev, drm); 351 component_unbind_all(mdev->dev, drm);
342 komeda_kms_cleanup_private_objs(kms);
343 drm_mode_config_cleanup(drm); 352 drm_mode_config_cleanup(drm);
353 komeda_kms_cleanup_private_objs(kms);
344 drm->dev_private = NULL; 354 drm->dev_private = NULL;
345 drm_dev_put(drm); 355 drm_dev_put(drm);
346} 356}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index a90bcbb3cb23..14b683164544 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -480,6 +480,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
480 struct seq_file *sf); 480 struct seq_file *sf);
481 481
482/* component APIs */ 482/* component APIs */
483extern __printf(10, 11)
483struct komeda_component * 484struct komeda_component *
484komeda_component_add(struct komeda_pipeline *pipe, 485komeda_component_add(struct komeda_pipeline *pipe,
485 size_t comp_sz, u32 id, u32 hw_id, 486 size_t comp_sz, u32 id, u32 hw_id,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index 617e1f7b8472..2851cac94d86 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -148,7 +148,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
148 if (!kcrtc->master->wb_layer) 148 if (!kcrtc->master->wb_layer)
149 return 0; 149 return 0;
150 150
151 kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL); 151 kwb_conn = kzalloc(sizeof(*kwb_conn), GFP_KERNEL);
152 if (!kwb_conn) 152 if (!kwb_conn)
153 return -ENOMEM; 153 return -ENOMEM;
154 154
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 4c7e31cb45ff..a5d1494a3dc4 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -131,8 +131,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
131 131
132 132
133 /* Enable extended register access */ 133 /* Enable extended register access */
134 ast_enable_mmio(dev);
135 ast_open_key(ast); 134 ast_open_key(ast);
135 ast_enable_mmio(dev);
136 136
137 /* Find out whether P2A works or whether to use device-tree */ 137 /* Find out whether P2A works or whether to use device-tree */
138 ast_detect_config_mode(dev, &scu_rev); 138 ast_detect_config_mode(dev, &scu_rev);
@@ -576,6 +576,9 @@ void ast_driver_unload(struct drm_device *dev)
576{ 576{
577 struct ast_private *ast = dev->dev_private; 577 struct ast_private *ast = dev->dev_private;
578 578
579 /* enable standard VGA decode */
580 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
581
579 ast_release_firmware(dev); 582 ast_release_firmware(dev);
580 kfree(ast->dp501_fw_addr); 583 kfree(ast->dp501_fw_addr);
581 ast_mode_fini(dev); 584 ast_mode_fini(dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index ffccbef962a4..a1cb020e07e5 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -604,7 +604,7 @@ static int ast_crtc_mode_set(struct drm_crtc *crtc,
604 return -EINVAL; 604 return -EINVAL;
605 ast_open_key(ast); 605 ast_open_key(ast);
606 606
607 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); 607 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
608 608
609 ast_set_std_reg(crtc, adjusted_mode, &vbios_mode); 609 ast_set_std_reg(crtc, adjusted_mode, &vbios_mode);
610 ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode); 610 ast_set_crtc_reg(crtc, adjusted_mode, &vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index f7d421359d56..c1d1ac51d1c2 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -46,7 +46,7 @@ void ast_enable_mmio(struct drm_device *dev)
46{ 46{
47 struct ast_private *ast = dev->dev_private; 47 struct ast_private *ast = dev->dev_private;
48 48
49 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x04); 49 ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
50} 50}
51 51
52 52
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a176f900..1cb1fa74cfbc 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
1465 else if (intel_crtc_has_dp_encoder(pipe_config)) 1465 else if (intel_crtc_has_dp_encoder(pipe_config))
1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 1466 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
1467 &pipe_config->dp_m_n); 1467 &pipe_config->dp_m_n);
1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) 1468 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
1469 dotclock = pipe_config->port_clock * 2 / 3; 1469 dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
1470 else 1470 else
1471 dotclock = pipe_config->port_clock; 1471 dotclock = pipe_config->port_clock;
1472 1472
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 60652ebbdf61..18e4cba76720 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -539,7 +539,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
539 539
540 intel_attach_force_audio_property(connector); 540 intel_attach_force_audio_property(connector);
541 intel_attach_broadcast_rgb_property(connector); 541 intel_attach_broadcast_rgb_property(connector);
542 drm_connector_attach_max_bpc_property(connector, 6, 12); 542
543 /*
544 * Reuse the prop from the SST connector because we're
545 * not allowed to create new props after device registration.
546 */
547 connector->max_bpc_property =
548 intel_dp->attached_connector->base.max_bpc_property;
549 if (connector->max_bpc_property)
550 drm_connector_attach_max_bpc_property(connector, 6, 12);
543 551
544 return connector; 552 return connector;
545 553
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index ffec807b8960..f413904a3e96 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -541,7 +541,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
541 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | 541 pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
542 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); 542 DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
543 DRM_INFO("PPS2 = 0x%08x\n", pps_val); 543 DRM_INFO("PPS2 = 0x%08x\n", pps_val);
544 if (encoder->type == INTEL_OUTPUT_EDP) { 544 if (cpu_transcoder == TRANSCODER_EDP) {
545 I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); 545 I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
546 /* 546 /*
547 * If 2 VDSC instances are needed, configure PPS for second 547 * If 2 VDSC instances are needed, configure PPS for second
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9f3fd7d96a69..75baff657e43 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1528,9 +1528,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1528 if (!intel_gvt_ggtt_validate_range(vgpu, 1528 if (!intel_gvt_ggtt_validate_range(vgpu,
1529 workload->wa_ctx.indirect_ctx.guest_gma, 1529 workload->wa_ctx.indirect_ctx.guest_gma,
1530 workload->wa_ctx.indirect_ctx.size)) { 1530 workload->wa_ctx.indirect_ctx.size)) {
1531 kmem_cache_free(s->workloads, workload);
1532 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", 1531 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1533 workload->wa_ctx.indirect_ctx.guest_gma); 1532 workload->wa_ctx.indirect_ctx.guest_gma);
1533 kmem_cache_free(s->workloads, workload);
1534 return ERR_PTR(-EINVAL); 1534 return ERR_PTR(-EINVAL);
1535 } 1535 }
1536 } 1536 }
@@ -1542,9 +1542,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1542 if (!intel_gvt_ggtt_validate_range(vgpu, 1542 if (!intel_gvt_ggtt_validate_range(vgpu,
1543 workload->wa_ctx.per_ctx.guest_gma, 1543 workload->wa_ctx.per_ctx.guest_gma,
1544 CACHELINE_BYTES)) { 1544 CACHELINE_BYTES)) {
1545 kmem_cache_free(s->workloads, workload);
1546 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", 1545 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1547 workload->wa_ctx.per_ctx.guest_gma); 1546 workload->wa_ctx.per_ctx.guest_gma);
1547 kmem_cache_free(s->workloads, workload);
1548 return ERR_PTR(-EINVAL); 1548 return ERR_PTR(-EINVAL);
1549 } 1549 }
1550 } 1550 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f62e3397d936..bac1ee94f63f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1598,6 +1598,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1598 1598
1599 pci_set_master(pdev); 1599 pci_set_master(pdev);
1600 1600
1601 /*
1602 * We don't have a max segment size, so set it to the max so sg's
1603 * debugging layer doesn't complain
1604 */
1605 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1606
1601 /* overlay on gen2 is broken and can't address above 1G */ 1607 /* overlay on gen2 is broken and can't address above 1G */
1602 if (IS_GEN(dev_priv, 2)) { 1608 if (IS_GEN(dev_priv, 2)) {
1603 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); 1609 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992b599d..724627afdedc 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -101,6 +101,9 @@ static struct _balloon_info_ bl_info;
101static void vgt_deballoon_space(struct i915_ggtt *ggtt, 101static void vgt_deballoon_space(struct i915_ggtt *ggtt,
102 struct drm_mm_node *node) 102 struct drm_mm_node *node)
103{ 103{
104 if (!drm_mm_node_allocated(node))
105 return;
106
104 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n", 107 DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
105 node->start, 108 node->start,
106 node->start + node->size, 109 node->start + node->size,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d58f7ec5d84..f11979879e7b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -829,7 +829,7 @@ struct intel_crtc_state {
829 829
830 /* 830 /*
831 * Frequence the dpll for the port should run at. Differs from the 831 * Frequence the dpll for the port should run at. Differs from the
832 * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also 832 * adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
833 * already multiplied by pixel_multiplier. 833 * already multiplied by pixel_multiplier.
834 */ 834 */
835 int port_clock; 835 int port_clock;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 95fdbd0fbcac..945bc20f1d33 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -17,6 +17,7 @@
17#include <linux/of_address.h> 17#include <linux/of_address.h>
18#include <linux/of_platform.h> 18#include <linux/of_platform.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/dma-mapping.h>
20 21
21#include "mtk_drm_crtc.h" 22#include "mtk_drm_crtc.h"
22#include "mtk_drm_ddp.h" 23#include "mtk_drm_ddp.h"
@@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
213 struct mtk_drm_private *private = drm->dev_private; 214 struct mtk_drm_private *private = drm->dev_private;
214 struct platform_device *pdev; 215 struct platform_device *pdev;
215 struct device_node *np; 216 struct device_node *np;
217 struct device *dma_dev;
216 int ret; 218 int ret;
217 219
218 if (!iommu_present(&platform_bus_type)) 220 if (!iommu_present(&platform_bus_type))
@@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
275 goto err_component_unbind; 277 goto err_component_unbind;
276 } 278 }
277 279
278 private->dma_dev = &pdev->dev; 280 dma_dev = &pdev->dev;
281 private->dma_dev = dma_dev;
282
283 /*
284 * Configure the DMA segment size to make sure we get contiguous IOVA
285 * when importing PRIME buffers.
286 */
287 if (!dma_dev->dma_parms) {
288 private->dma_parms_allocated = true;
289 dma_dev->dma_parms =
290 devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
291 GFP_KERNEL);
292 }
293 if (!dma_dev->dma_parms) {
294 ret = -ENOMEM;
295 goto err_component_unbind;
296 }
297
298 ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
299 if (ret) {
300 dev_err(dma_dev, "Failed to set DMA segment size\n");
301 goto err_unset_dma_parms;
302 }
279 303
280 /* 304 /*
281 * We don't use the drm_irq_install() helpers provided by the DRM 305 * We don't use the drm_irq_install() helpers provided by the DRM
@@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
285 drm->irq_enabled = true; 309 drm->irq_enabled = true;
286 ret = drm_vblank_init(drm, MAX_CRTC); 310 ret = drm_vblank_init(drm, MAX_CRTC);
287 if (ret < 0) 311 if (ret < 0)
288 goto err_component_unbind; 312 goto err_unset_dma_parms;
289 313
290 drm_kms_helper_poll_init(drm); 314 drm_kms_helper_poll_init(drm);
291 drm_mode_config_reset(drm); 315 drm_mode_config_reset(drm);
292 316
293 return 0; 317 return 0;
294 318
319err_unset_dma_parms:
320 if (private->dma_parms_allocated)
321 dma_dev->dma_parms = NULL;
295err_component_unbind: 322err_component_unbind:
296 component_unbind_all(drm->dev, drm); 323 component_unbind_all(drm->dev, drm);
297err_config_cleanup: 324err_config_cleanup:
@@ -302,9 +329,14 @@ err_config_cleanup:
302 329
303static void mtk_drm_kms_deinit(struct drm_device *drm) 330static void mtk_drm_kms_deinit(struct drm_device *drm)
304{ 331{
332 struct mtk_drm_private *private = drm->dev_private;
333
305 drm_kms_helper_poll_fini(drm); 334 drm_kms_helper_poll_fini(drm);
306 drm_atomic_helper_shutdown(drm); 335 drm_atomic_helper_shutdown(drm);
307 336
337 if (private->dma_parms_allocated)
338 private->dma_dev->dma_parms = NULL;
339
308 component_unbind_all(drm->dev, drm); 340 component_unbind_all(drm->dev, drm);
309 drm_mode_config_cleanup(drm); 341 drm_mode_config_cleanup(drm);
310} 342}
@@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = {
320 .compat_ioctl = drm_compat_ioctl, 352 .compat_ioctl = drm_compat_ioctl,
321}; 353};
322 354
355/*
356 * We need to override this because the device used to import the memory is
357 * not dev->dev, as drm_gem_prime_import() expects.
358 */
359struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
360 struct dma_buf *dma_buf)
361{
362 struct mtk_drm_private *private = dev->dev_private;
363
364 return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
365}
366
323static struct drm_driver mtk_drm_driver = { 367static struct drm_driver mtk_drm_driver = {
324 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 368 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
325 DRIVER_ATOMIC, 369 DRIVER_ATOMIC,
@@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = {
331 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 375 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
332 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 376 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
333 .gem_prime_export = drm_gem_prime_export, 377 .gem_prime_export = drm_gem_prime_export,
334 .gem_prime_import = drm_gem_prime_import, 378 .gem_prime_import = mtk_drm_gem_prime_import,
335 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table, 379 .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
336 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table, 380 .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
337 .gem_prime_mmap = mtk_drm_gem_mmap_buf, 381 .gem_prime_mmap = mtk_drm_gem_mmap_buf,
@@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
524 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); 568 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
525 if (!comp) { 569 if (!comp) {
526 ret = -ENOMEM; 570 ret = -ENOMEM;
571 of_node_put(node);
527 goto err_node; 572 goto err_node;
528 } 573 }
529 574
530 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL); 575 ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
531 if (ret) 576 if (ret) {
577 of_node_put(node);
532 goto err_node; 578 goto err_node;
579 }
533 580
534 private->ddp_comp[comp_id] = comp; 581 private->ddp_comp[comp_id] = comp;
535 } 582 }
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 598ff3e70446..e03fea12ff59 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -51,6 +51,8 @@ struct mtk_drm_private {
51 } commit; 51 } commit;
52 52
53 struct drm_atomic_state *suspend_state; 53 struct drm_atomic_state *suspend_state;
54
55 bool dma_parms_allocated;
54}; 56};
55 57
56extern struct platform_driver mtk_ddp_driver; 58extern struct platform_driver mtk_ddp_driver;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 126703816794..5c36c75232e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -771,16 +771,20 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
771 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); 771 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
772 int slots; 772 int slots;
773 773
774 /* When restoring duplicated states, we need to make sure that the 774 if (crtc_state->mode_changed || crtc_state->connectors_changed) {
775 * bw remains the same and avoid recalculating it, as the connector's 775 /*
776 * bpc may have changed after the state was duplicated 776 * When restoring duplicated states, we need to make sure that
777 */ 777 * the bw remains the same and avoid recalculating it, as the
778 if (!state->duplicated) 778 * connector's bpc may have changed after the state was
779 asyh->dp.pbn = 779 * duplicated
780 drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, 780 */
781 connector->display_info.bpc * 3); 781 if (!state->duplicated) {
782 const int bpp = connector->display_info.bpc * 3;
783 const int clock = crtc_state->adjusted_mode.clock;
784
785 asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, bpp);
786 }
782 787
783 if (crtc_state->mode_changed) {
784 slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, 788 slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
785 mstc->port, 789 mstc->port,
786 asyh->dp.pbn); 790 asyh->dp.pbn);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404fe660..a11637b0f6cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
40 u8 *ptr = msg->buf; 40 u8 *ptr = msg->buf;
41 41
42 while (remaining) { 42 while (remaining) {
43 u8 cnt = (remaining > 16) ? 16 : remaining; 43 u8 cnt, retries, cmd;
44 u8 cmd;
45 44
46 if (msg->flags & I2C_M_RD) 45 if (msg->flags & I2C_M_RD)
47 cmd = 1; 46 cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
51 if (mcnt || remaining > 16) 50 if (mcnt || remaining > 16)
52 cmd |= 4; /* MOT */ 51 cmd |= 4; /* MOT */
53 52
54 ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt); 53 for (retries = 0, cnt = 0;
55 if (ret < 0) { 54 retries < 32 && !cnt;
56 nvkm_i2c_aux_release(aux); 55 retries++) {
57 return ret; 56 cnt = min_t(u8, remaining, 16);
57 ret = aux->func->xfer(aux, true, cmd,
58 msg->addr, ptr, &cnt);
59 if (ret < 0)
60 goto out;
61 }
62 if (!cnt) {
63 AUX_TRACE(aux, "no data after 32 retries");
64 ret = -EIO;
65 goto out;
58 } 66 }
59 67
60 ptr += cnt; 68 ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
64 msg++; 72 msg++;
65 } 73 }
66 74
75 ret = num;
76out:
67 nvkm_i2c_aux_release(aux); 77 nvkm_i2c_aux_release(aux);
68 return num; 78 return ret;
69} 79}
70 80
71static u32 81static u32
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index de0f882f0f7b..14b41de44ebc 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,6 +4,7 @@
4 * Author: Archit Taneja <archit@ti.com> 4 * Author: Archit Taneja <archit@ti.com>
5 */ 5 */
6 6
7#include <linux/bitops.h>
7#include <linux/kernel.h> 8#include <linux/kernel.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/platform_device.h> 10#include <linux/platform_device.h>
@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out)
20{ 21{
21 struct device_node *remote_node; 22 struct device_node *remote_node;
22 23
23 remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); 24 remote_node = of_graph_get_remote_node(out->dev->of_node,
25 ffs(out->of_ports) - 1, 0);
24 if (!remote_node) { 26 if (!remote_node) {
25 dev_dbg(out->dev, "failed to find video sink\n"); 27 dev_dbg(out->dev, "failed to find video sink\n");
26 return 0; 28 return 0;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 288c59dae56a..1bad0a2cc5c6 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev)
669 if (omapdss_is_initialized() == false) 669 if (omapdss_is_initialized() == false)
670 return -EPROBE_DEFER; 670 return -EPROBE_DEFER;
671 671
672 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 672 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
673 if (ret) { 673 if (ret) {
674 dev_err(&pdev->dev, "Failed to set the DMA mask\n"); 674 dev_err(&pdev->dev, "Failed to set the DMA mask\n");
675 return ret; 675 return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index f33e349c4ec5..952201c6d821 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -59,6 +59,11 @@ module_param_named(num_heads, qxl_num_crtc, int, 0400);
59static struct drm_driver qxl_driver; 59static struct drm_driver qxl_driver;
60static struct pci_driver qxl_pci_driver; 60static struct pci_driver qxl_pci_driver;
61 61
62static bool is_vga(struct pci_dev *pdev)
63{
64 return pdev->class == PCI_CLASS_DISPLAY_VGA << 8;
65}
66
62static int 67static int
63qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 68qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64{ 69{
@@ -83,9 +88,17 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
83 if (ret) 88 if (ret)
84 goto disable_pci; 89 goto disable_pci;
85 90
91 if (is_vga(pdev)) {
92 ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO);
93 if (ret) {
94 DRM_ERROR("can't get legacy vga ioports\n");
95 goto disable_pci;
96 }
97 }
98
86 ret = qxl_device_init(qdev, &qxl_driver, pdev); 99 ret = qxl_device_init(qdev, &qxl_driver, pdev);
87 if (ret) 100 if (ret)
88 goto disable_pci; 101 goto put_vga;
89 102
90 ret = qxl_modeset_init(qdev); 103 ret = qxl_modeset_init(qdev);
91 if (ret) 104 if (ret)
@@ -105,6 +118,9 @@ modeset_cleanup:
105 qxl_modeset_fini(qdev); 118 qxl_modeset_fini(qdev);
106unload: 119unload:
107 qxl_device_fini(qdev); 120 qxl_device_fini(qdev);
121put_vga:
122 if (is_vga(pdev))
123 vga_put(pdev, VGA_RSRC_LEGACY_IO);
108disable_pci: 124disable_pci:
109 pci_disable_device(pdev); 125 pci_disable_device(pdev);
110free_dev: 126free_dev:
@@ -122,6 +138,8 @@ qxl_pci_remove(struct pci_dev *pdev)
122 138
123 qxl_modeset_fini(qdev); 139 qxl_modeset_fini(qdev);
124 qxl_device_fini(qdev); 140 qxl_device_fini(qdev);
141 if (is_vga(pdev))
142 vga_put(pdev, VGA_RSRC_LEGACY_IO);
125 143
126 dev->dev_private = NULL; 144 dev->dev_private = NULL;
127 kfree(qdev); 145 kfree(qdev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 1c62578590f4..082d02c84024 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
673 673
674 /* Locate the companion LVDS encoder for dual-link operation, if any. */ 674 /* Locate the companion LVDS encoder for dual-link operation, if any. */
675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0); 675 companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
676 if (!companion) { 676 if (!companion)
677 dev_err(dev, "Companion LVDS encoder not found\n"); 677 return 0;
678 return -ENXIO;
679 }
680 678
681 /* 679 /*
682 * Sanity check: the companion encoder must have the same compatible 680 * Sanity check: the companion encoder must have the same compatible
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 35ddbec1375a..671c90f34ede 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -95,7 +95,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
95 rmb(); /* for list_empty to work without lock */ 95 rmb(); /* for list_empty to work without lock */
96 96
97 if (list_empty(&entity->list) || 97 if (list_empty(&entity->list) ||
98 spsc_queue_peek(&entity->job_queue) == NULL) 98 spsc_queue_count(&entity->job_queue) == 0)
99 return true; 99 return true;
100 100
101 return false; 101 return false;
@@ -281,7 +281,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
281 /* Consumption of existing IBs wasn't completed. Forcefully 281 /* Consumption of existing IBs wasn't completed. Forcefully
282 * remove them here. 282 * remove them here.
283 */ 283 */
284 if (spsc_queue_peek(&entity->job_queue)) { 284 if (spsc_queue_count(&entity->job_queue)) {
285 if (sched) { 285 if (sched) {
286 /* Park the kernel for a moment to make sure it isn't processing 286 /* Park the kernel for a moment to make sure it isn't processing
287 * our enity. 287 * our enity.
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 64c43ee6bd92..df0cc8f46d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
314 /* R and B components are only 5 bits deep */ 314 /* R and B components are only 5 bits deep */
315 val |= SUN4I_TCON0_FRM_CTL_MODE_R; 315 val |= SUN4I_TCON0_FRM_CTL_MODE_R;
316 val |= SUN4I_TCON0_FRM_CTL_MODE_B; 316 val |= SUN4I_TCON0_FRM_CTL_MODE_B;
317 /* Fall through */
317 case MEDIA_BUS_FMT_RGB666_1X18: 318 case MEDIA_BUS_FMT_RGB666_1X18:
318 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: 319 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
319 /* Fall through: enable dithering */ 320 /* Fall through: enable dithering */
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a1fc8b520985..b889ad3e86e1 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
993 ret = sun6i_dsi_dcs_read(dsi, msg); 993 ret = sun6i_dsi_dcs_read(dsi, msg);
994 break; 994 break;
995 } 995 }
996 /* Else, fall through */
996 997
997 default: 998 default:
998 ret = -EINVAL; 999 ret = -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index b2da31310d24..09b526518f5a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
204 .interruptible = false, 204 .interruptible = false,
205 .no_wait_gpu = false 205 .no_wait_gpu = false
206 }; 206 };
207 size_t max_segment;
207 208
208 /* wtf swapping */ 209 /* wtf swapping */
209 if (bo->pages) 210 if (bo->pages)
@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
215 if (!bo->pages) 216 if (!bo->pages)
216 goto out; 217 goto out;
217 218
218 ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, 219 max_segment = virtio_max_dma_size(qdev->vdev);
219 nr_pages << PAGE_SHIFT, GFP_KERNEL); 220 max_segment &= PAGE_MASK;
221 if (max_segment > SCATTERLIST_MAX_SEGMENT)
222 max_segment = SCATTERLIST_MAX_SEGMENT;
223 ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0,
224 nr_pages << PAGE_SHIFT,
225 max_segment, GFP_KERNEL);
220 if (ret) 226 if (ret)
221 goto out; 227 goto out;
222 return 0; 228 return 0;
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 2310c96ccf4a..db1b55df0d13 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
1153 1153
1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); 1154 INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
1155 1155
1156 cp2112_gpio_direction_input(gc, d->hwirq);
1157
1158 if (!dev->gpio_poll) { 1156 if (!dev->gpio_poll) {
1159 dev->gpio_poll = true; 1157 dev->gpio_poll = true;
1160 schedule_delayed_work(&dev->gpio_poll_worker, 0); 1158 schedule_delayed_work(&dev->gpio_poll_worker, 0);
@@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
1204 return PTR_ERR(dev->desc[pin]); 1202 return PTR_ERR(dev->desc[pin]);
1205 } 1203 }
1206 1204
1205 ret = cp2112_gpio_direction_input(&dev->gc, pin);
1206 if (ret < 0) {
1207 dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
1208 goto err_desc;
1209 }
1210
1207 ret = gpiochip_lock_as_irq(&dev->gc, pin); 1211 ret = gpiochip_lock_as_irq(&dev->gc, pin);
1208 if (ret) { 1212 if (ret) {
1209 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n"); 1213 dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 21268c9fa71a..0179f7ed77e5 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3749,30 +3749,8 @@ static const struct hid_device_id hidpp_devices[] = {
3749 3749
3750 { L27MHZ_DEVICE(HID_ANY_ID) }, 3750 { L27MHZ_DEVICE(HID_ANY_ID) },
3751 3751
3752 { /* Logitech G203/Prodigy Gaming Mouse */
3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
3754 { /* Logitech G302 Gaming Mouse */
3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
3756 { /* Logitech G303 Gaming Mouse */
3757 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
3758 { /* Logitech G400 Gaming Mouse */
3759 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
3760 { /* Logitech G403 Wireless Gaming Mouse over USB */ 3752 { /* Logitech G403 Wireless Gaming Mouse over USB */
3761 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) }, 3753 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
3762 { /* Logitech G403 Gaming Mouse */
3763 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
3764 { /* Logitech G403 Hero Gaming Mouse over USB */
3765 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
3766 { /* Logitech G502 Proteus Core Gaming Mouse */
3767 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
3768 { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
3769 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
3770 { /* Logitech G502 Hero Gaming Mouse over USB */
3771 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
3772 { /* Logitech G700 Gaming Mouse over USB */
3773 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
3774 { /* Logitech G700s Gaming Mouse over USB */
3775 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
3776 { /* Logitech G703 Gaming Mouse over USB */ 3754 { /* Logitech G703 Gaming Mouse over USB */
3777 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) }, 3755 HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
3778 { /* Logitech G703 Hero Gaming Mouse over USB */ 3756 { /* Logitech G703 Hero Gaming Mouse over USB */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 1065692f90e2..5792a104000a 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -24,6 +24,7 @@
24#define ICL_MOBILE_DEVICE_ID 0x34FC 24#define ICL_MOBILE_DEVICE_ID 0x34FC
25#define SPT_H_DEVICE_ID 0xA135 25#define SPT_H_DEVICE_ID 0xA135
26#define CML_LP_DEVICE_ID 0x02FC 26#define CML_LP_DEVICE_ID 0x02FC
27#define EHL_Ax_DEVICE_ID 0x4BB3
27 28
28#define REVISION_ID_CHT_A0 0x6 29#define REVISION_ID_CHT_A0 0x6
29#define REVISION_ID_CHT_Ax_SI 0x0 30#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index aa80b4d3b740..279567baca3d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)}, 33 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 34 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)}, 35 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
36 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
36 {0, } 37 {0, }
37}; 38};
38MODULE_DEVICE_TABLE(pci, ish_pci_tbl); 39MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 7a8ddc999a8e..1713235d28cb 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
846 y >>= 1; 846 y >>= 1;
847 distance >>= 1; 847 distance >>= 1;
848 } 848 }
849 if (features->type == INTUOSHT2)
850 distance = features->distance_max - distance;
849 input_report_abs(input, ABS_X, x); 851 input_report_abs(input, ABS_X, x);
850 input_report_abs(input, ABS_Y, y); 852 input_report_abs(input, ABS_Y, y);
851 input_report_abs(input, ABS_DISTANCE, distance); 853 input_report_abs(input, ABS_DISTANCE, distance);
@@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1059 input_report_key(input, BTN_BASE2, (data[11] & 0x02)); 1061 input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1060 1062
1061 if (data[12] & 0x80) 1063 if (data[12] & 0x80)
1062 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f)); 1064 input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1063 else 1065 else
1064 input_report_abs(input, ABS_WHEEL, 0); 1066 input_report_abs(input, ABS_WHEEL, 0);
1065 1067
@@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1290 } 1292 }
1291 if (wacom->tool[0]) { 1293 if (wacom->tool[0]) {
1292 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); 1294 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
1293 if (wacom->features.type == INTUOSP2_BT) { 1295 if (wacom->features.type == INTUOSP2_BT ||
1296 wacom->features.type == INTUOSP2S_BT) {
1294 input_report_abs(pen_input, ABS_DISTANCE, 1297 input_report_abs(pen_input, ABS_DISTANCE,
1295 range ? frame[13] : wacom->features.distance_max); 1298 range ? frame[13] : wacom->features.distance_max);
1296 } else { 1299 } else {
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 5f9505a087f6..23f358cb7f49 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -26,7 +26,7 @@
26 26
27static unsigned long virt_to_hvpfn(void *addr) 27static unsigned long virt_to_hvpfn(void *addr)
28{ 28{
29 unsigned long paddr; 29 phys_addr_t paddr;
30 30
31 if (is_vmalloc_addr(addr)) 31 if (is_vmalloc_addr(addr))
32 paddr = page_to_phys(vmalloc_to_page(addr)) + 32 paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hv/hv_trace.h b/drivers/hv/hv_trace.h
index 999f80a63bff..e70783e33680 100644
--- a/drivers/hv/hv_trace.h
+++ b/drivers/hv/hv_trace.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2 2
3#undef TRACE_SYSTEM 3#undef TRACE_SYSTEM
4#define TRACE_SYSTEM hyperv 4#define TRACE_SYSTEM hyperv
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 362e70e9d145..fb16a622e8ab 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -146,8 +146,6 @@ struct hv_context {
146 */ 146 */
147 u64 guestid; 147 u64 guestid;
148 148
149 void *tsc_page;
150
151 struct hv_per_cpu_context __percpu *cpu_context; 149 struct hv_per_cpu_context __percpu *cpu_context;
152 150
153 /* 151 /*
diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h
index 574c16004cb2..13d9b141daaa 100644
--- a/drivers/hwtracing/intel_th/msu.h
+++ b/drivers/hwtracing/intel_th/msu.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures 3 * Intel(R) Trace Hub Memory Storage Unit (MSU) data structures
4 * 4 *
diff --git a/drivers/hwtracing/intel_th/pti.h b/drivers/hwtracing/intel_th/pti.h
index e9381babc84c..7dfc0431333b 100644
--- a/drivers/hwtracing/intel_th/pti.h
+++ b/drivers/hwtracing/intel_th/pti.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Intel(R) Trace Hub PTI output data structures 3 * Intel(R) Trace Hub PTI output data structures
4 * 4 *
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index d7fd76baec92..19ef2b0c682a 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -790,7 +790,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
790 790
791static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) 791static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
792{ 792{
793 u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 793 u32 val;
794
795 /* We do not support the SMBUS Quick command */
796 val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
794 797
795 if (adap->algo->reg_slave) 798 if (adap->algo->reg_slave)
796 val |= I2C_FUNC_SLAVE; 799 val |= I2C_FUNC_SLAVE;
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index e7f9305b2dd9..f5f001738df5 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
94 94
95 dev->disable_int(dev); 95 dev->disable_int(dev);
96 dev->disable(dev); 96 dev->disable(dev);
97 synchronize_irq(dev->irq);
97 dev->slave = NULL; 98 dev->slave = NULL;
98 pm_runtime_put(dev->dev); 99 pm_runtime_put(dev->dev);
99 100
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 35b302d983e0..959d4912ec0d 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -69,6 +69,7 @@ struct em_i2c_device {
69 struct completion msg_done; 69 struct completion msg_done;
70 struct clk *sclk; 70 struct clk *sclk;
71 struct i2c_client *slave; 71 struct i2c_client *slave;
72 int irq;
72}; 73};
73 74
74static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg) 75static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
@@ -339,6 +340,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
339 340
340 writeb(0, priv->base + I2C_OFS_SVA0); 341 writeb(0, priv->base + I2C_OFS_SVA0);
341 342
343 /*
344 * Wait for interrupt to finish. New slave irqs cannot happen because we
345 * cleared the slave address and, thus, only extension codes will be
346 * detected which do not use the slave ptr.
347 */
348 synchronize_irq(priv->irq);
342 priv->slave = NULL; 349 priv->slave = NULL;
343 350
344 return 0; 351 return 0;
@@ -355,7 +362,7 @@ static int em_i2c_probe(struct platform_device *pdev)
355{ 362{
356 struct em_i2c_device *priv; 363 struct em_i2c_device *priv;
357 struct resource *r; 364 struct resource *r;
358 int irq, ret; 365 int ret;
359 366
360 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 367 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
361 if (!priv) 368 if (!priv)
@@ -390,8 +397,8 @@ static int em_i2c_probe(struct platform_device *pdev)
390 397
391 em_i2c_reset(&priv->adap); 398 em_i2c_reset(&priv->adap);
392 399
393 irq = platform_get_irq(pdev, 0); 400 priv->irq = platform_get_irq(pdev, 0);
394 ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0, 401 ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
395 "em_i2c", priv); 402 "em_i2c", priv);
396 if (ret) 403 if (ret)
397 goto err_clk; 404 goto err_clk;
@@ -401,7 +408,8 @@ static int em_i2c_probe(struct platform_device *pdev)
401 if (ret) 408 if (ret)
402 goto err_clk; 409 goto err_clk;
403 410
404 dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq); 411 dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
412 priv->irq);
405 413
406 return 0; 414 return 0;
407 415
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f2956936c3f2..2e08b4722dc4 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1194,19 +1194,28 @@ static acpi_status check_acpi_smo88xx_device(acpi_handle obj_handle,
1194 int i; 1194 int i;
1195 1195
1196 status = acpi_get_object_info(obj_handle, &info); 1196 status = acpi_get_object_info(obj_handle, &info);
1197 if (!ACPI_SUCCESS(status) || !(info->valid & ACPI_VALID_HID)) 1197 if (ACPI_FAILURE(status))
1198 return AE_OK; 1198 return AE_OK;
1199 1199
1200 if (!(info->valid & ACPI_VALID_HID))
1201 goto smo88xx_not_found;
1202
1200 hid = info->hardware_id.string; 1203 hid = info->hardware_id.string;
1201 if (!hid) 1204 if (!hid)
1202 return AE_OK; 1205 goto smo88xx_not_found;
1203 1206
1204 i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid); 1207 i = match_string(acpi_smo8800_ids, ARRAY_SIZE(acpi_smo8800_ids), hid);
1205 if (i < 0) 1208 if (i < 0)
1206 return AE_OK; 1209 goto smo88xx_not_found;
1210
1211 kfree(info);
1207 1212
1208 *((bool *)return_value) = true; 1213 *((bool *)return_value) = true;
1209 return AE_CTRL_TERMINATE; 1214 return AE_CTRL_TERMINATE;
1215
1216smo88xx_not_found:
1217 kfree(info);
1218 return AE_OK;
1210} 1219}
1211 1220
1212static bool is_dell_system_with_lis3lv02d(void) 1221static bool is_dell_system_with_lis3lv02d(void)
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index b1b8b938d7f4..15f6cde6452f 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -273,8 +273,8 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
273} 273}
274 274
275/* Functions for DMA support */ 275/* Functions for DMA support */
276static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, 276static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
277 dma_addr_t phy_addr) 277 dma_addr_t phy_addr)
278{ 278{
279 struct imx_i2c_dma *dma; 279 struct imx_i2c_dma *dma;
280 struct dma_slave_config dma_sconfig; 280 struct dma_slave_config dma_sconfig;
@@ -283,7 +283,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
283 283
284 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); 284 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
285 if (!dma) 285 if (!dma)
286 return -ENOMEM; 286 return;
287 287
288 dma->chan_tx = dma_request_chan(dev, "tx"); 288 dma->chan_tx = dma_request_chan(dev, "tx");
289 if (IS_ERR(dma->chan_tx)) { 289 if (IS_ERR(dma->chan_tx)) {
@@ -328,7 +328,7 @@ static int i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
328 dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", 328 dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n",
329 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); 329 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
330 330
331 return 0; 331 return;
332 332
333fail_rx: 333fail_rx:
334 dma_release_channel(dma->chan_rx); 334 dma_release_channel(dma->chan_rx);
@@ -336,8 +336,6 @@ fail_tx:
336 dma_release_channel(dma->chan_tx); 336 dma_release_channel(dma->chan_tx);
337fail_al: 337fail_al:
338 devm_kfree(dev, dma); 338 devm_kfree(dev, dma);
339 /* return successfully if there is no dma support */
340 return ret == -ENODEV ? 0 : ret;
341} 339}
342 340
343static void i2c_imx_dma_callback(void *arg) 341static void i2c_imx_dma_callback(void *arg)
@@ -1165,17 +1163,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
1165 dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res); 1163 dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
1166 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", 1164 dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
1167 i2c_imx->adapter.name); 1165 i2c_imx->adapter.name);
1166 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1168 1167
1169 /* Init DMA config if supported */ 1168 /* Init DMA config if supported */
1170 ret = i2c_imx_dma_request(i2c_imx, phy_addr); 1169 i2c_imx_dma_request(i2c_imx, phy_addr);
1171 if (ret < 0)
1172 goto del_adapter;
1173 1170
1174 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1175 return 0; /* Return OK */ 1171 return 0; /* Return OK */
1176 1172
1177del_adapter:
1178 i2c_del_adapter(&i2c_imx->adapter);
1179clk_notifier_unregister: 1173clk_notifier_unregister:
1180 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); 1174 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
1181rpm_disable: 1175rpm_disable:
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 252edb433fdf..29eae1bf4f86 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = {
234 .max_num_msgs = 255, 234 .max_num_msgs = 255,
235}; 235};
236 236
237static const struct i2c_adapter_quirks mt8183_i2c_quirks = {
238 .flags = I2C_AQ_NO_ZERO_LEN,
239};
240
237static const struct mtk_i2c_compatible mt2712_compat = { 241static const struct mtk_i2c_compatible mt2712_compat = {
238 .regs = mt_i2c_regs_v1, 242 .regs = mt_i2c_regs_v1,
239 .pmic_i2c = 0, 243 .pmic_i2c = 0,
@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = {
298}; 302};
299 303
300static const struct mtk_i2c_compatible mt8183_compat = { 304static const struct mtk_i2c_compatible mt8183_compat = {
305 .quirks = &mt8183_i2c_quirks,
301 .regs = mt_i2c_regs_v2, 306 .regs = mt_i2c_regs_v2,
302 .pmic_i2c = 0, 307 .pmic_i2c = 0,
303 .dcm = 0, 308 .dcm = 0,
@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
870 875
871static u32 mtk_i2c_functionality(struct i2c_adapter *adap) 876static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
872{ 877{
873 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 878 if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
879 return I2C_FUNC_I2C |
880 (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
881 else
882 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
874} 883}
875 884
876static const struct i2c_algorithm mtk_i2c_algorithm = { 885static const struct i2c_algorithm mtk_i2c_algorithm = {
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c46c4bddc7ca..cba325eb852f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -91,7 +91,7 @@
91#define SB800_PIIX4_PORT_IDX_MASK 0x06 91#define SB800_PIIX4_PORT_IDX_MASK 0x06
92#define SB800_PIIX4_PORT_IDX_SHIFT 1 92#define SB800_PIIX4_PORT_IDX_SHIFT 1
93 93
94/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ 94/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
95#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 95#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
96#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 96#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
97#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 97#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
@@ -358,18 +358,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
358 /* Find which register is used for port selection */ 358 /* Find which register is used for port selection */
359 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD || 359 if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD ||
360 PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) { 360 PIIX4_dev->vendor == PCI_VENDOR_ID_HYGON) {
361 switch (PIIX4_dev->device) { 361 if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
362 case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: 362 (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
363 PIIX4_dev->revision >= 0x1F)) {
363 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; 364 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
364 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; 365 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
365 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; 366 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
366 break; 367 } else {
367 case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
368 default:
369 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; 368 piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
370 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; 369 piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
371 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; 370 piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
372 break;
373 } 371 }
374 } else { 372 } else {
375 if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2, 373 if (!request_muxed_region(SB800_PIIX4_SMB_IDX, 2,
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index d39a4606f72d..531c01100b56 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -139,6 +139,7 @@ struct rcar_i2c_priv {
139 enum dma_data_direction dma_direction; 139 enum dma_data_direction dma_direction;
140 140
141 struct reset_control *rstc; 141 struct reset_control *rstc;
142 int irq;
142}; 143};
143 144
144#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) 145#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -861,9 +862,11 @@ static int rcar_unreg_slave(struct i2c_client *slave)
861 862
862 WARN_ON(!priv->slave); 863 WARN_ON(!priv->slave);
863 864
865 /* disable irqs and ensure none is running before clearing ptr */
864 rcar_i2c_write(priv, ICSIER, 0); 866 rcar_i2c_write(priv, ICSIER, 0);
865 rcar_i2c_write(priv, ICSCR, 0); 867 rcar_i2c_write(priv, ICSCR, 0);
866 868
869 synchronize_irq(priv->irq);
867 priv->slave = NULL; 870 priv->slave = NULL;
868 871
869 pm_runtime_put(rcar_i2c_priv_to_dev(priv)); 872 pm_runtime_put(rcar_i2c_priv_to_dev(priv));
@@ -918,7 +921,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
918 struct i2c_adapter *adap; 921 struct i2c_adapter *adap;
919 struct device *dev = &pdev->dev; 922 struct device *dev = &pdev->dev;
920 struct i2c_timings i2c_t; 923 struct i2c_timings i2c_t;
921 int irq, ret; 924 int ret;
922 925
923 /* Otherwise logic will break because some bytes must always use PIO */ 926 /* Otherwise logic will break because some bytes must always use PIO */
924 BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length"); 927 BUILD_BUG_ON_MSG(RCAR_MIN_DMA_LEN < 3, "Invalid min DMA length");
@@ -984,10 +987,10 @@ static int rcar_i2c_probe(struct platform_device *pdev)
984 pm_runtime_put(dev); 987 pm_runtime_put(dev);
985 988
986 989
987 irq = platform_get_irq(pdev, 0); 990 priv->irq = platform_get_irq(pdev, 0);
988 ret = devm_request_irq(dev, irq, rcar_i2c_irq, 0, dev_name(dev), priv); 991 ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
989 if (ret < 0) { 992 if (ret < 0) {
990 dev_err(dev, "cannot get irq %d\n", irq); 993 dev_err(dev, "cannot get irq %d\n", priv->irq);
991 goto out_pm_disable; 994 goto out_pm_disable;
992 } 995 }
993 996
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
index 868755f82f88..2c21893905a3 100644
--- a/drivers/i2c/busses/i2c-stm32.h
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * i2c-stm32.h 3 * i2c-stm32.h
4 * 4 *
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index f26ed495d384..9c440fa6a3dd 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -832,7 +832,7 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
832 */ 832 */
833void i2c_unregister_device(struct i2c_client *client) 833void i2c_unregister_device(struct i2c_client *client)
834{ 834{
835 if (!client) 835 if (IS_ERR_OR_NULL(client))
836 return; 836 return;
837 837
838 if (client->dev.of_node) { 838 if (client->dev.of_node) {
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 0e3c6529fc4c..da073d72f649 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -480,7 +480,7 @@ static int max9611_init(struct max9611_dev *max9611)
480 if (ret) 480 if (ret)
481 return ret; 481 return ret;
482 482
483 regval = ret & MAX9611_TEMP_MASK; 483 regval &= MAX9611_TEMP_MASK;
484 484
485 if ((regval > MAX9611_TEMP_MAX_POS && 485 if ((regval > MAX9611_TEMP_MAX_POS &&
486 regval < MAX9611_TEMP_MIN_NEG) || 486 regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index e48f15cc9ab5..ff82863cbf42 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -276,11 +276,11 @@ static int adf4371_set_freq(struct adf4371_state *st, unsigned long long freq,
276 st->buf[0] = st->integer >> 8; 276 st->buf[0] = st->integer >> 8;
277 st->buf[1] = 0x40; /* REG12 default */ 277 st->buf[1] = 0x40; /* REG12 default */
278 st->buf[2] = 0x00; 278 st->buf[2] = 0x00;
279 st->buf[3] = st->fract2 & 0xFF; 279 st->buf[3] = st->fract1 & 0xFF;
280 st->buf[4] = st->fract2 >> 7; 280 st->buf[4] = st->fract1 >> 8;
281 st->buf[5] = st->fract2 >> 15; 281 st->buf[5] = st->fract1 >> 16;
282 st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) | 282 st->buf[6] = ADF4371_FRAC2WORD_L(st->fract2 & 0x7F) |
283 ADF4371_FRAC1WORD(st->fract1 >> 23); 283 ADF4371_FRAC1WORD(st->fract1 >> 24);
284 st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7); 284 st->buf[7] = ADF4371_FRAC2WORD_H(st->fract2 >> 7);
285 st->buf[8] = st->mod2 & 0xFF; 285 st->buf[8] = st->mod2 & 0xFF;
286 st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8); 286 st->buf[9] = ADF4371_MOD2WORD(st->mod2 >> 8);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 19f1730a4f24..a68d0ccf67a4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4724,10 +4724,14 @@ static int __init cma_init(void)
4724 if (ret) 4724 if (ret)
4725 goto err; 4725 goto err;
4726 4726
4727 cma_configfs_init(); 4727 ret = cma_configfs_init();
4728 if (ret)
4729 goto err_ib;
4728 4730
4729 return 0; 4731 return 0;
4730 4732
4733err_ib:
4734 ib_unregister_client(&cma_client);
4731err: 4735err:
4732 unregister_netdevice_notifier(&cma_nb); 4736 unregister_netdevice_notifier(&cma_nb);
4733 ib_sa_unregister_client(&sa_client); 4737 ib_sa_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 45d5164e9574..af8c85d18e62 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -38,6 +38,9 @@ int rdma_counter_set_auto_mode(struct ib_device *dev, u8 port,
38 int ret; 38 int ret;
39 39
40 port_counter = &dev->port_data[port].port_counter; 40 port_counter = &dev->port_data[port].port_counter;
41 if (!port_counter->hstats)
42 return -EOPNOTSUPP;
43
41 mutex_lock(&port_counter->lock); 44 mutex_lock(&port_counter->lock);
42 if (on) { 45 if (on) {
43 ret = __counter_set_mode(&port_counter->mode, 46 ret = __counter_set_mode(&port_counter->mode,
@@ -146,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
146 struct auto_mode_param *param = &counter->mode.param; 149 struct auto_mode_param *param = &counter->mode.param;
147 bool match = true; 150 bool match = true;
148 151
149 if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res)) 152 if (!rdma_is_visible_in_pid_ns(&qp->res))
150 return false; 153 return false;
151 154
152 /* Ensure that counter belong to right PID */ 155 /* Ensure that counter belongs to the right PID */
153 if (!rdma_is_kernel_res(&counter->res) && 156 if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
154 !rdma_is_kernel_res(&qp->res) &&
155 (task_pid_vnr(counter->res.task) != current->pid))
156 return false; 157 return false;
157 158
158 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE) 159 if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
@@ -421,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
421 return qp; 422 return qp;
422 423
423err: 424err:
424 rdma_restrack_put(&qp->res); 425 rdma_restrack_put(res);
425 return NULL; 426 return NULL;
426} 427}
427 428
@@ -509,6 +510,9 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
509 if (!rdma_is_port_valid(dev, port)) 510 if (!rdma_is_port_valid(dev, port))
510 return -EINVAL; 511 return -EINVAL;
511 512
513 if (!dev->port_data[port].port_counter.hstats)
514 return -EOPNOTSUPP;
515
512 qp = rdma_counter_get_qp(dev, qp_num); 516 qp = rdma_counter_get_qp(dev, qp_num);
513 if (!qp) 517 if (!qp)
514 return -ENOENT; 518 return -ENOENT;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 783e465e7c41..020c26976558 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) { 382 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
383 if (!names[i]) 383 if (!names[i])
384 continue; 384 continue;
385 curr = rdma_restrack_count(device, i, 385 curr = rdma_restrack_count(device, i);
386 task_active_pid_ns(current));
387 ret = fill_res_info_entry(msg, names[i], curr); 386 ret = fill_res_info_entry(msg, names[i], curr);
388 if (ret) 387 if (ret)
389 goto err; 388 goto err;
@@ -1952,12 +1951,16 @@ static int stat_get_doit_qp(struct sk_buff *skb, struct nlmsghdr *nlh,
1952 1951
1953 if (fill_nldev_handle(msg, device) || 1952 if (fill_nldev_handle(msg, device) ||
1954 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || 1953 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
1955 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) 1954 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) {
1955 ret = -EMSGSIZE;
1956 goto err_msg; 1956 goto err_msg;
1957 }
1957 1958
1958 if ((mode == RDMA_COUNTER_MODE_AUTO) && 1959 if ((mode == RDMA_COUNTER_MODE_AUTO) &&
1959 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) 1960 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) {
1961 ret = -EMSGSIZE;
1960 goto err_msg; 1962 goto err_msg;
1963 }
1961 1964
1962 nlmsg_end(msg, nlh); 1965 nlmsg_end(msg, nlh);
1963 ib_device_put(device); 1966 ib_device_put(device);
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index bddff426ee0f..a07665f7ef8c 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
107 * rdma_restrack_count() - the current usage of specific object 107 * rdma_restrack_count() - the current usage of specific object
108 * @dev: IB device 108 * @dev: IB device
109 * @type: actual type of object to operate 109 * @type: actual type of object to operate
110 * @ns: PID namespace
111 */ 110 */
112int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type, 111int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
113 struct pid_namespace *ns)
114{ 112{
115 struct rdma_restrack_root *rt = &dev->res[type]; 113 struct rdma_restrack_root *rt = &dev->res[type];
116 struct rdma_restrack_entry *e; 114 struct rdma_restrack_entry *e;
@@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
119 117
120 xa_lock(&rt->xa); 118 xa_lock(&rt->xa);
121 xas_for_each(&xas, e, U32_MAX) { 119 xas_for_each(&xas, e, U32_MAX) {
122 if (ns == &init_pid_ns || 120 if (!rdma_is_visible_in_pid_ns(e))
123 (!rdma_is_kernel_res(e) && 121 continue;
124 ns == task_active_pid_ns(e->task))) 122 cnt++;
125 cnt++;
126 } 123 }
127 xa_unlock(&rt->xa); 124 xa_unlock(&rt->xa);
128 return cnt; 125 return cnt;
@@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
360 */ 357 */
361 if (rdma_is_kernel_res(res)) 358 if (rdma_is_kernel_res(res))
362 return task_active_pid_ns(current) == &init_pid_ns; 359 return task_active_pid_ns(current) == &init_pid_ns;
363 return task_active_pid_ns(current) == task_active_pid_ns(res->task); 360
361 /* PID 0 means that resource is not found in current namespace */
362 return task_pid_vnr(res->task);
364} 363}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 08da840ed7ee..56553668256f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
379 379
380int ib_umem_page_count(struct ib_umem *umem) 380int ib_umem_page_count(struct ib_umem *umem)
381{ 381{
382 int i; 382 int i, n = 0;
383 int n;
384 struct scatterlist *sg; 383 struct scatterlist *sg;
385 384
386 if (umem->is_odp)
387 return ib_umem_num_pages(umem);
388
389 n = 0;
390 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) 385 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
391 n += sg_dma_len(sg) >> PAGE_SHIFT; 386 n += sg_dma_len(sg) >> PAGE_SHIFT;
392 387
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2a75c6f8d827..c0e15db34680 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -112,10 +112,6 @@ static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
112 * prevent any further fault handling on this MR. 112 * prevent any further fault handling on this MR.
113 */ 113 */
114 ib_umem_notifier_start_account(umem_odp); 114 ib_umem_notifier_start_account(umem_odp);
115 umem_odp->dying = 1;
116 /* Make sure that the fact the umem is dying is out before we release
117 * all pending page faults. */
118 smp_wmb();
119 complete_all(&umem_odp->notifier_completion); 115 complete_all(&umem_odp->notifier_completion);
120 umem_odp->umem.context->invalidate_range( 116 umem_odp->umem.context->invalidate_range(
121 umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp)); 117 umem_odp, ib_umem_start(umem_odp), ib_umem_end(umem_odp));
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 48b04d2f175f..60c8f76aab33 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
136 spin_unlock_irqrestore(&cmdq->lock, flags); 136 spin_unlock_irqrestore(&cmdq->lock, flags);
137 return -EBUSY; 137 return -EBUSY;
138 } 138 }
139
140 size = req->cmd_size;
141 /* change the cmd_size to the number of 16byte cmdq unit.
142 * req->cmd_size is modified here
143 */
144 bnxt_qplib_set_cmd_slots(req);
145
139 memset(resp, 0, sizeof(*resp)); 146 memset(resp, 0, sizeof(*resp));
140 crsqe->resp = (struct creq_qp_event *)resp; 147 crsqe->resp = (struct creq_qp_event *)resp;
141 crsqe->resp->cookie = req->cookie; 148 crsqe->resp->cookie = req->cookie;
@@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
150 157
151 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 158 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
152 preq = (u8 *)req; 159 preq = (u8 *)req;
153 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
154 do { 160 do {
155 /* Locate the next cmdq slot */ 161 /* Locate the next cmdq slot */
156 sw_prod = HWQ_CMP(cmdq->prod, cmdq); 162 sw_prod = HWQ_CMP(cmdq->prod, cmdq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 2138533bb642..dfeadc192e17 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -55,9 +55,7 @@
55 do { \ 55 do { \
56 memset(&(req), 0, sizeof((req))); \ 56 memset(&(req), 0, sizeof((req))); \
57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \ 57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \
58 (req).cmd_size = (sizeof((req)) + \ 58 (req).cmd_size = sizeof((req)); \
59 BNXT_QPLIB_CMDQE_UNITS - 1) / \
60 BNXT_QPLIB_CMDQE_UNITS; \
61 (req).flags = cpu_to_le16(cmd_flags); \ 59 (req).flags = cpu_to_le16(cmd_flags); \
62 } while (0) 60 } while (0)
63 61
@@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
95 BNXT_QPLIB_CMDQE_UNITS); 93 BNXT_QPLIB_CMDQE_UNITS);
96} 94}
97 95
96/* Set the cmd_size to a factor of CMDQE unit */
97static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
98{
99 req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
100 BNXT_QPLIB_CMDQE_UNITS;
101}
102
98#define MAX_CMDQ_IDX(depth) ((depth) - 1) 103#define MAX_CMDQ_IDX(depth) ((depth) - 1)
99 104
100static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth) 105static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c
index 93613e5def9b..986c12153e62 100644
--- a/drivers/infiniband/hw/hfi1/fault.c
+++ b/drivers/infiniband/hw/hfi1/fault.c
@@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
141 if (!data) 141 if (!data)
142 return -ENOMEM; 142 return -ENOMEM;
143 copy = min(len, datalen - 1); 143 copy = min(len, datalen - 1);
144 if (copy_from_user(data, buf, copy)) 144 if (copy_from_user(data, buf, copy)) {
145 return -EFAULT; 145 ret = -EFAULT;
146 goto free_data;
147 }
146 148
147 ret = debugfs_file_get(file->f_path.dentry); 149 ret = debugfs_file_get(file->f_path.dentry);
148 if (unlikely(ret)) 150 if (unlikely(ret))
149 return ret; 151 goto free_data;
150 ptr = data; 152 ptr = data;
151 token = ptr; 153 token = ptr;
152 for (ptr = data; *ptr; ptr = end + 1, token = ptr) { 154 for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
@@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
195 ret = len; 197 ret = len;
196 198
197 debugfs_file_put(file->f_path.dentry); 199 debugfs_file_put(file->f_path.dentry);
200free_data:
198 kfree(data); 201 kfree(data);
199 return ret; 202 return ret;
200} 203}
@@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
214 return -ENOMEM; 217 return -ENOMEM;
215 ret = debugfs_file_get(file->f_path.dentry); 218 ret = debugfs_file_get(file->f_path.dentry);
216 if (unlikely(ret)) 219 if (unlikely(ret))
217 return ret; 220 goto free_data;
218 bit = find_first_bit(fault->opcodes, bitsize); 221 bit = find_first_bit(fault->opcodes, bitsize);
219 while (bit < bitsize) { 222 while (bit < bitsize) {
220 zero = find_next_zero_bit(fault->opcodes, bitsize, bit); 223 zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
@@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
232 data[size - 1] = '\n'; 235 data[size - 1] = '\n';
233 data[size] = '\0'; 236 data[size] = '\0';
234 ret = simple_read_from_buffer(buf, len, pos, data, size); 237 ret = simple_read_from_buffer(buf, len, pos, data, size);
238free_data:
235 kfree(data); 239 kfree(data);
236 return ret; 240 return ret;
237} 241}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 996fc298207e..6141f4edc6bf 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
2574 hfi1_kern_clear_hw_flow(priv->rcd, qp); 2574 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2575} 2575}
2576 2576
2577static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd, 2577static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
2578 struct hfi1_packet *packet, u8 rcv_type,
2579 u8 opcode)
2580{ 2578{
2581 struct rvt_qp *qp = packet->qp; 2579 struct rvt_qp *qp = packet->qp;
2582 struct hfi1_qp_priv *qpriv = qp->priv;
2583 u32 ipsn;
2584 struct ib_other_headers *ohdr = packet->ohdr;
2585 struct rvt_ack_entry *e;
2586 struct tid_rdma_request *req;
2587 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2588 u32 i;
2589 2580
2590 if (rcv_type >= RHF_RCV_TYPE_IB) 2581 if (rcv_type >= RHF_RCV_TYPE_IB)
2591 goto done; 2582 goto done;
@@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
2602 if (rcv_type == RHF_RCV_TYPE_EAGER) { 2593 if (rcv_type == RHF_RCV_TYPE_EAGER) {
2603 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1); 2594 hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
2604 hfi1_schedule_send(qp); 2595 hfi1_schedule_send(qp);
2605 goto done_unlock;
2606 }
2607
2608 /*
2609 * For TID READ response, error out QP after freeing the tid
2610 * resources.
2611 */
2612 if (opcode == TID_OP(READ_RESP)) {
2613 ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
2614 if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
2615 cmp_psn(ipsn, qp->s_psn) < 0) {
2616 hfi1_kern_read_tid_flow_free(qp);
2617 spin_unlock(&qp->s_lock);
2618 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2619 goto done;
2620 }
2621 goto done_unlock;
2622 }
2623
2624 /*
2625 * Error out the qp for TID RDMA WRITE
2626 */
2627 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
2628 for (i = 0; i < rvt_max_atomic(rdi); i++) {
2629 e = &qp->s_ack_queue[i];
2630 if (e->opcode == TID_OP(WRITE_REQ)) {
2631 req = ack_to_tid_req(e);
2632 hfi1_kern_exp_rcv_clear_all(req);
2633 }
2634 } 2596 }
2635 spin_unlock(&qp->s_lock);
2636 rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
2637 goto done;
2638 2597
2639done_unlock: 2598 /* Since no payload is delivered, just drop the packet */
2640 spin_unlock(&qp->s_lock); 2599 spin_unlock(&qp->s_lock);
2641done: 2600done:
2642 return true; 2601 return true;
@@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2687 u32 fpsn; 2646 u32 fpsn;
2688 2647
2689 lockdep_assert_held(&qp->r_lock); 2648 lockdep_assert_held(&qp->r_lock);
2649 spin_lock(&qp->s_lock);
2690 /* If the psn is out of valid range, drop the packet */ 2650 /* If the psn is out of valid range, drop the packet */
2691 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 || 2651 if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
2692 cmp_psn(ibpsn, qp->s_psn) > 0) 2652 cmp_psn(ibpsn, qp->s_psn) > 0)
2693 return ret; 2653 goto s_unlock;
2694 2654
2695 spin_lock(&qp->s_lock);
2696 /* 2655 /*
2697 * Note that NAKs implicitly ACK outstanding SEND and RDMA write 2656 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
2698 * requests and implicitly NAK RDMA read and atomic requests issued 2657 * requests and implicitly NAK RDMA read and atomic requests issued
@@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2740 2699
2741 wqe = do_rc_completion(qp, wqe, ibp); 2700 wqe = do_rc_completion(qp, wqe, ibp);
2742 if (qp->s_acked == qp->s_tail) 2701 if (qp->s_acked == qp->s_tail)
2743 break; 2702 goto s_unlock;
2744 } 2703 }
2745 2704
2705 if (qp->s_acked == qp->s_tail)
2706 goto s_unlock;
2707
2746 /* Handle the eflags for the request */ 2708 /* Handle the eflags for the request */
2747 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ) 2709 if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
2748 goto s_unlock; 2710 goto s_unlock;
@@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2922 if (lnh == HFI1_LRH_GRH) 2884 if (lnh == HFI1_LRH_GRH)
2923 goto r_unlock; 2885 goto r_unlock;
2924 2886
2925 if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode)) 2887 if (tid_rdma_tid_err(packet, rcv_type))
2926 goto r_unlock; 2888 goto r_unlock;
2927 } 2889 }
2928 2890
@@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2942 */ 2904 */
2943 spin_lock(&qp->s_lock); 2905 spin_lock(&qp->s_lock);
2944 qpriv = qp->priv; 2906 qpriv = qp->priv;
2907 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
2908 qpriv->r_tid_tail == qpriv->r_tid_head)
2909 goto unlock;
2945 e = &qp->s_ack_queue[qpriv->r_tid_tail]; 2910 e = &qp->s_ack_queue[qpriv->r_tid_tail];
2911 if (e->opcode != TID_OP(WRITE_REQ))
2912 goto unlock;
2946 req = ack_to_tid_req(e); 2913 req = ack_to_tid_req(e);
2914 if (req->comp_seg == req->cur_seg)
2915 goto unlock;
2947 flow = &req->flows[req->clear_tail]; 2916 flow = &req->flows[req->clear_tail];
2948 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn); 2917 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2949 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn); 2918 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
@@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4509 struct rvt_swqe *wqe; 4478 struct rvt_swqe *wqe;
4510 struct tid_rdma_request *req; 4479 struct tid_rdma_request *req;
4511 struct tid_rdma_flow *flow; 4480 struct tid_rdma_flow *flow;
4512 u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn; 4481 u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
4513 unsigned long flags; 4482 unsigned long flags;
4514 u16 fidx; 4483 u16 fidx;
4515 4484
@@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4538 ack_kpsn--; 4507 ack_kpsn--;
4539 } 4508 }
4540 4509
4510 if (unlikely(qp->s_acked == qp->s_tail))
4511 goto ack_op_err;
4512
4541 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); 4513 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
4542 4514
4543 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE) 4515 if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
@@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
4550 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow); 4522 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4551 4523
4552 /* Drop stale ACK/NAK */ 4524 /* Drop stale ACK/NAK */
4553 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0) 4525 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
4526 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
4554 goto ack_op_err; 4527 goto ack_op_err;
4555 4528
4556 while (cmp_psn(ack_kpsn, 4529 while (cmp_psn(ack_kpsn,
@@ -4712,7 +4685,12 @@ done:
4712 switch ((aeth >> IB_AETH_CREDIT_SHIFT) & 4685 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
4713 IB_AETH_CREDIT_MASK) { 4686 IB_AETH_CREDIT_MASK) {
4714 case 0: /* PSN sequence error */ 4687 case 0: /* PSN sequence error */
4688 if (!req->flows)
4689 break;
4715 flow = &req->flows[req->acked_tail]; 4690 flow = &req->flows[req->acked_tail];
4691 flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
4692 if (cmp_psn(psn, flpsn) > 0)
4693 break;
4716 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, 4694 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4717 flow); 4695 flow);
4718 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2])); 4696 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 68c951491a08..57079110af9b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1677,8 +1677,6 @@ tx_err:
1677 tx_buf_size, DMA_TO_DEVICE); 1677 tx_buf_size, DMA_TO_DEVICE);
1678 kfree(tun_qp->tx_ring[i].buf.addr); 1678 kfree(tun_qp->tx_ring[i].buf.addr);
1679 } 1679 }
1680 kfree(tun_qp->tx_ring);
1681 tun_qp->tx_ring = NULL;
1682 i = MLX4_NUM_TUNNEL_BUFS; 1680 i = MLX4_NUM_TUNNEL_BUFS;
1683err: 1681err:
1684 while (i > 0) { 1682 while (i > 0) {
@@ -1687,6 +1685,8 @@ err:
1687 rx_buf_size, DMA_FROM_DEVICE); 1685 rx_buf_size, DMA_FROM_DEVICE);
1688 kfree(tun_qp->ring[i].addr); 1686 kfree(tun_qp->ring[i].addr);
1689 } 1687 }
1688 kfree(tun_qp->tx_ring);
1689 tun_qp->tx_ring = NULL;
1690 kfree(tun_qp->ring); 1690 kfree(tun_qp->ring);
1691 tun_qp->ring = NULL; 1691 tun_qp->ring = NULL;
1692 return -ENOMEM; 1692 return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ec4370f99381..af5bbb35c058 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2026,7 +2026,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
2026 event_sub->eventfd = 2026 event_sub->eventfd =
2027 eventfd_ctx_fdget(redirect_fd); 2027 eventfd_ctx_fdget(redirect_fd);
2028 2028
2029 if (IS_ERR(event_sub)) { 2029 if (IS_ERR(event_sub->eventfd)) {
2030 err = PTR_ERR(event_sub->eventfd); 2030 err = PTR_ERR(event_sub->eventfd);
2031 event_sub->eventfd = NULL; 2031 event_sub->eventfd = NULL;
2032 goto err; 2032 goto err;
@@ -2644,12 +2644,13 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2644 struct devx_async_event_file *ev_file = filp->private_data; 2644 struct devx_async_event_file *ev_file = filp->private_data;
2645 struct devx_event_subscription *event_sub, *event_sub_tmp; 2645 struct devx_event_subscription *event_sub, *event_sub_tmp;
2646 struct devx_async_event_data *entry, *tmp; 2646 struct devx_async_event_data *entry, *tmp;
2647 struct mlx5_ib_dev *dev = ev_file->dev;
2647 2648
2648 mutex_lock(&ev_file->dev->devx_event_table.event_xa_lock); 2649 mutex_lock(&dev->devx_event_table.event_xa_lock);
2649 /* delete the subscriptions which are related to this FD */ 2650 /* delete the subscriptions which are related to this FD */
2650 list_for_each_entry_safe(event_sub, event_sub_tmp, 2651 list_for_each_entry_safe(event_sub, event_sub_tmp,
2651 &ev_file->subscribed_events_list, file_list) { 2652 &ev_file->subscribed_events_list, file_list) {
2652 devx_cleanup_subscription(ev_file->dev, event_sub); 2653 devx_cleanup_subscription(dev, event_sub);
2653 if (event_sub->eventfd) 2654 if (event_sub->eventfd)
2654 eventfd_ctx_put(event_sub->eventfd); 2655 eventfd_ctx_put(event_sub->eventfd);
2655 2656
@@ -2658,7 +2659,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2658 kfree_rcu(event_sub, rcu); 2659 kfree_rcu(event_sub, rcu);
2659 } 2660 }
2660 2661
2661 mutex_unlock(&ev_file->dev->devx_event_table.event_xa_lock); 2662 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2662 2663
2663 /* free the pending events allocation */ 2664 /* free the pending events allocation */
2664 if (!ev_file->omit_data) { 2665 if (!ev_file->omit_data) {
@@ -2670,7 +2671,7 @@ static int devx_async_event_close(struct inode *inode, struct file *filp)
2670 } 2671 }
2671 2672
2672 uverbs_close_fd(filp); 2673 uverbs_close_fd(filp);
2673 put_device(&ev_file->dev->ib_dev.dev); 2674 put_device(&dev->ib_dev.dev);
2674 return 0; 2675 return 0;
2675} 2676}
2676 2677
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e12a4404096b..0569bcab02d4 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 1023 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1024 1024
1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 1025 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1026 if (MLX5_CAP_GEN(mdev, pg)) 1026 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1027 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1028 props->odp_caps = dev->odp_caps; 1028 props->odp_caps = dev->odp_caps;
1029 } 1029 }
@@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN; 6139 dev->port[i].roce.last_port_state = IB_PORT_DOWN;
6140 } 6140 }
6141 6141
6142 mlx5_ib_internal_fill_odp_caps(dev);
6143
6142 err = mlx5_ib_init_multiport_master(dev); 6144 err = mlx5_ib_init_multiport_master(dev);
6143 if (err) 6145 if (err)
6144 return err; 6146 return err;
@@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6563 6565
6564static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev) 6566static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6565{ 6567{
6566 mlx5_ib_internal_fill_odp_caps(dev);
6567
6568 return mlx5_ib_odp_init_one(dev); 6568 return mlx5_ib_odp_init_one(dev);
6569} 6569}
6570 6570
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index fe1a76d8531c..a40e0abf2338 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
57 int entry; 57 int entry;
58 58
59 if (umem->is_odp) { 59 if (umem->is_odp) {
60 unsigned int page_shift = to_ib_umem_odp(umem)->page_shift; 60 struct ib_umem_odp *odp = to_ib_umem_odp(umem);
61 unsigned int page_shift = odp->page_shift;
61 62
62 *ncont = ib_umem_page_count(umem); 63 *ncont = ib_umem_odp_num_pages(odp);
63 *count = *ncont << (page_shift - PAGE_SHIFT); 64 *count = *ncont << (page_shift - PAGE_SHIFT);
64 *shift = page_shift; 65 *shift = page_shift;
65 if (order) 66 if (order)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f6a53455bf8b..9ae587b74b12 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1475,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1475 bool dyn_bfreg); 1475 bool dyn_bfreg);
1476 1476
1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); 1477int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
1478
1479static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
1480 bool do_modify_atomic)
1481{
1482 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1483 return false;
1484
1485 if (do_modify_atomic &&
1486 MLX5_CAP_GEN(dev->mdev, atomic) &&
1487 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1488 return false;
1489
1490 return true;
1491}
1478#endif /* MLX5_IB_H */ 1492#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index b74fad08412f..3401f5f6792e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1293,9 +1293,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1293 if (err < 0) 1293 if (err < 0)
1294 return ERR_PTR(err); 1294 return ERR_PTR(err);
1295 1295
1296 use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) && 1296 use_umr = mlx5_ib_can_use_umr(dev, true);
1297 (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
1298 !MLX5_CAP_GEN(dev->mdev, atomic));
1299 1297
1300 if (order <= mr_cache_max_order(dev) && use_umr) { 1298 if (order <= mr_cache_max_order(dev) && use_umr) {
1301 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, 1299 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
@@ -1448,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1448 goto err; 1446 goto err;
1449 } 1447 }
1450 1448
1451 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { 1449 if (!mlx5_ib_can_use_umr(dev, true) ||
1450 (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
1452 /* 1451 /*
1453 * UMR can't be used - MKey needs to be replaced. 1452 * UMR can't be used - MKey needs to be replaced.
1454 */ 1453 */
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 81da82050d05..0a59912a4cef 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
301 301
302 memset(caps, 0, sizeof(*caps)); 302 memset(caps, 0, sizeof(*caps));
303 303
304 if (!MLX5_CAP_GEN(dev->mdev, pg)) 304 if (!MLX5_CAP_GEN(dev->mdev, pg) ||
305 !mlx5_ib_can_use_umr(dev, true))
305 return; 306 return;
306 307
307 caps->general_caps = IB_ODP_SUPPORT; 308 caps->general_caps = IB_ODP_SUPPORT;
@@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
355 356
356 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) && 357 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
357 MLX5_CAP_GEN(dev->mdev, null_mkey) && 358 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
358 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) 359 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
360 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
359 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT; 361 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
360 362
361 return; 363 return;
@@ -579,7 +581,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
579 u32 flags) 581 u32 flags)
580{ 582{
581 int npages = 0, current_seq, page_shift, ret, np; 583 int npages = 0, current_seq, page_shift, ret, np;
582 bool implicit = false;
583 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem); 584 struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
584 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; 585 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
585 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH; 586 bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
@@ -594,7 +595,6 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
594 if (IS_ERR(odp)) 595 if (IS_ERR(odp))
595 return PTR_ERR(odp); 596 return PTR_ERR(odp);
596 mr = odp->private; 597 mr = odp->private;
597 implicit = true;
598 } else { 598 } else {
599 odp = odp_mr; 599 odp = odp_mr;
600 } 600 }
@@ -682,19 +682,15 @@ next_mr:
682 682
683out: 683out:
684 if (ret == -EAGAIN) { 684 if (ret == -EAGAIN) {
685 if (implicit || !odp->dying) { 685 unsigned long timeout = msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
686 unsigned long timeout = 686
687 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT); 687 if (!wait_for_completion_timeout(&odp->notifier_completion,
688 688 timeout)) {
689 if (!wait_for_completion_timeout( 689 mlx5_ib_warn(
690 &odp->notifier_completion, 690 dev,
691 timeout)) { 691 "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
692 mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n", 692 current_seq, odp->notifiers_seq,
693 current_seq, odp->notifiers_seq, odp->notifiers_count); 693 odp->notifiers_count);
694 }
695 } else {
696 /* The MR is being killed, kill the QP as well. */
697 ret = -EFAULT;
698 } 694 }
699 } 695 }
700 696
@@ -1628,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1628{ 1624{
1629 int ret = 0; 1625 int ret = 0;
1630 1626
1631 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) 1627 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1632 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); 1628 return ret;
1629
1630 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1633 1631
1634 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) { 1632 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1635 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey); 1633 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
@@ -1639,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1639 } 1637 }
1640 } 1638 }
1641 1639
1642 if (!MLX5_CAP_GEN(dev->mdev, pg))
1643 return ret;
1644
1645 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq); 1640 ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1646 1641
1647 return ret; 1642 return ret;
@@ -1649,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1649 1644
1650void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev) 1645void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1651{ 1646{
1652 if (!MLX5_CAP_GEN(dev->mdev, pg)) 1647 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1653 return; 1648 return;
1654 1649
1655 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq); 1650 mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 379328b2598f..72869ff4a334 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
4162 MLX5_IB_UMR_OCTOWORD; 4162 MLX5_IB_UMR_OCTOWORD;
4163} 4163}
4164 4164
4165static __be64 frwr_mkey_mask(void) 4165static __be64 frwr_mkey_mask(bool atomic)
4166{ 4166{
4167 u64 result; 4167 u64 result;
4168 4168
@@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
4175 MLX5_MKEY_MASK_LW | 4175 MLX5_MKEY_MASK_LW |
4176 MLX5_MKEY_MASK_RR | 4176 MLX5_MKEY_MASK_RR |
4177 MLX5_MKEY_MASK_RW | 4177 MLX5_MKEY_MASK_RW |
4178 MLX5_MKEY_MASK_A |
4179 MLX5_MKEY_MASK_SMALL_FENCE | 4178 MLX5_MKEY_MASK_SMALL_FENCE |
4180 MLX5_MKEY_MASK_FREE; 4179 MLX5_MKEY_MASK_FREE;
4181 4180
4181 if (atomic)
4182 result |= MLX5_MKEY_MASK_A;
4183
4182 return cpu_to_be64(result); 4184 return cpu_to_be64(result);
4183} 4185}
4184 4186
@@ -4204,7 +4206,7 @@ static __be64 sig_mkey_mask(void)
4204} 4206}
4205 4207
4206static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, 4208static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4207 struct mlx5_ib_mr *mr, u8 flags) 4209 struct mlx5_ib_mr *mr, u8 flags, bool atomic)
4208{ 4210{
4209 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4211 int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4210 4212
@@ -4212,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
4212 4214
4213 umr->flags = flags; 4215 umr->flags = flags;
4214 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size)); 4216 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
4215 umr->mkey_mask = frwr_mkey_mask(); 4217 umr->mkey_mask = frwr_mkey_mask(atomic);
4216} 4218}
4217 4219
4218static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr) 4220static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
@@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4811{ 4813{
4812 struct mlx5_ib_mr *mr = to_mmr(wr->mr); 4814 struct mlx5_ib_mr *mr = to_mmr(wr->mr);
4813 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); 4815 struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
4816 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
4814 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; 4817 int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
4815 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; 4818 bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
4819 bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
4816 u8 flags = 0; 4820 u8 flags = 0;
4817 4821
4822 if (!mlx5_ib_can_use_umr(dev, atomic)) {
4823 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4824 "Fast update of %s for MR is disabled\n",
4825 (MLX5_CAP_GEN(dev->mdev,
4826 umr_modify_entity_size_disabled)) ?
4827 "entity size" :
4828 "atomic access");
4829 return -EINVAL;
4830 }
4831
4818 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { 4832 if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
4819 mlx5_ib_warn(to_mdev(qp->ibqp.device), 4833 mlx5_ib_warn(to_mdev(qp->ibqp.device),
4820 "Invalid IB_SEND_INLINE send flag\n"); 4834 "Invalid IB_SEND_INLINE send flag\n");
@@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
4826 if (umr_inline) 4840 if (umr_inline)
4827 flags |= MLX5_UMR_INLINE; 4841 flags |= MLX5_UMR_INLINE;
4828 4842
4829 set_reg_umr_seg(*seg, mr, flags); 4843 set_reg_umr_seg(*seg, mr, flags, atomic);
4830 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 4844 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
4831 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 4845 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
4832 handle_post_send_edge(&qp->sq, seg, *size, cur_edge); 4846 handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig
index dace276aea14..b622fc62f2cd 100644
--- a/drivers/infiniband/sw/siw/Kconfig
+++ b/drivers/infiniband/sw/siw/Kconfig
@@ -1,6 +1,6 @@
1config RDMA_SIW 1config RDMA_SIW
2 tristate "Software RDMA over TCP/IP (iWARP) driver" 2 tristate "Software RDMA over TCP/IP (iWARP) driver"
3 depends on INET && INFINIBAND && LIBCRC32C && 64BIT 3 depends on INET && INFINIBAND && LIBCRC32C
4 select DMA_VIRT_OPS 4 select DMA_VIRT_OPS
5 help 5 help
6 This driver implements the iWARP RDMA transport over 6 This driver implements the iWARP RDMA transport over
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 03fd7b2f595f..dba4535494ab 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -138,9 +138,9 @@ struct siw_umem {
138}; 138};
139 139
140struct siw_pble { 140struct siw_pble {
141 u64 addr; /* Address of assigned user buffer */ 141 dma_addr_t addr; /* Address of assigned buffer */
142 u64 size; /* Size of this entry */ 142 unsigned int size; /* Size of this entry */
143 u64 pbl_off; /* Total offset from start of PBL */ 143 unsigned long pbl_off; /* Total offset from start of PBL */
144}; 144};
145 145
146struct siw_pbl { 146struct siw_pbl {
@@ -214,7 +214,7 @@ struct siw_wqe {
214struct siw_cq { 214struct siw_cq {
215 struct ib_cq base_cq; 215 struct ib_cq base_cq;
216 spinlock_t lock; 216 spinlock_t lock;
217 u64 *notify; 217 struct siw_cq_ctrl *notify;
218 struct siw_cqe *queue; 218 struct siw_cqe *queue;
219 u32 cq_put; 219 u32 cq_put;
220 u32 cq_get; 220 u32 cq_get;
@@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) 734 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
735 735
736#define siw_dbg_cep(cep, fmt, ...) \ 736#define siw_dbg_cep(cep, fmt, ...) \
737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ 737 ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
738 cep, __func__, ##__VA_ARGS__) 738 cep, __func__, ##__VA_ARGS__)
739 739
740void siw_cq_flush(struct siw_cq *cq); 740void siw_cq_flush(struct siw_cq *cq);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 9ce8a1b925d2..8c1931a57f4a 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
355 getname_local(cep->sock, &event.local_addr); 355 getname_local(cep->sock, &event.local_addr);
356 getname_peer(cep->sock, &event.remote_addr); 356 getname_peer(cep->sock, &event.remote_addr);
357 } 357 }
358 siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n", 358 siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
359 cep->qp ? qp_id(cep->qp) : -1, id, reason, status); 359 cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
360 360
361 return id->event_handler(id, &event); 361 return id->event_handler(id, &event);
362} 362}
@@ -947,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep)
947 siw_cep_get(new_cep); 947 siw_cep_get(new_cep);
948 new_s->sk->sk_user_data = new_cep; 948 new_s->sk->sk_user_data = new_cep;
949 949
950 siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
951
952 if (siw_tcp_nagle == false) { 950 if (siw_tcp_nagle == false) {
953 int val = 1; 951 int val = 1;
954 952
@@ -1011,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w)
1011 cep = work->cep; 1009 cep = work->cep;
1012 1010
1013 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n", 1011 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1014 cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state); 1012 cep->qp ? qp_id(cep->qp) : UINT_MAX,
1013 work->type, cep->state);
1015 1014
1016 siw_cep_set_inuse(cep); 1015 siw_cep_set_inuse(cep);
1017 1016
@@ -1145,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w)
1145 } 1144 }
1146 if (release_cep) { 1145 if (release_cep) {
1147 siw_dbg_cep(cep, 1146 siw_dbg_cep(cep,
1148 "release: timer=%s, QP[%u], id 0x%p\n", 1147 "release: timer=%s, QP[%u]\n",
1149 cep->mpa_timer ? "y" : "n", 1148 cep->mpa_timer ? "y" : "n",
1150 cep->qp ? qp_id(cep->qp) : -1, cep->cm_id); 1149 cep->qp ? qp_id(cep->qp) : UINT_MAX);
1151 1150
1152 siw_cancel_mpatimer(cep); 1151 siw_cancel_mpatimer(cep);
1153 1152
@@ -1211,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1211 else 1210 else
1212 delay = MPAREP_TIMEOUT; 1211 delay = MPAREP_TIMEOUT;
1213 } 1212 }
1214 siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n", 1213 siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1215 cep->qp ? qp_id(cep->qp) : -1, type, work, delay); 1214 cep->qp ? qp_id(cep->qp) : -1, type, delay);
1216 1215
1217 queue_delayed_work(siw_cm_wq, &work->work, delay); 1216 queue_delayed_work(siw_cm_wq, &work->work, delay);
1218 1217
@@ -1376,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1376 } 1375 }
1377 if (v4) 1376 if (v4)
1378 siw_dbg_qp(qp, 1377 siw_dbg_qp(qp,
1379 "id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n", 1378 "pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
1380 id, pd_len, 1379 pd_len,
1381 &((struct sockaddr_in *)(laddr))->sin_addr, 1380 &((struct sockaddr_in *)(laddr))->sin_addr,
1382 ntohs(((struct sockaddr_in *)(laddr))->sin_port), 1381 ntohs(((struct sockaddr_in *)(laddr))->sin_port),
1383 &((struct sockaddr_in *)(raddr))->sin_addr, 1382 &((struct sockaddr_in *)(raddr))->sin_addr,
1384 ntohs(((struct sockaddr_in *)(raddr))->sin_port)); 1383 ntohs(((struct sockaddr_in *)(raddr))->sin_port));
1385 else 1384 else
1386 siw_dbg_qp(qp, 1385 siw_dbg_qp(qp,
1387 "id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n", 1386 "pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
1388 id, pd_len, 1387 pd_len,
1389 &((struct sockaddr_in6 *)(laddr))->sin6_addr, 1388 &((struct sockaddr_in6 *)(laddr))->sin6_addr,
1390 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port), 1389 ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
1391 &((struct sockaddr_in6 *)(raddr))->sin6_addr, 1390 &((struct sockaddr_in6 *)(raddr))->sin6_addr,
@@ -1508,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1508 if (rv >= 0) { 1507 if (rv >= 0) {
1509 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT); 1508 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1510 if (!rv) { 1509 if (!rv) {
1511 siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id, 1510 siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1512 qp_id(qp));
1513 siw_cep_set_free(cep); 1511 siw_cep_set_free(cep);
1514 return 0; 1512 return 0;
1515 } 1513 }
1516 } 1514 }
1517error: 1515error:
1518 siw_dbg_qp(qp, "failed: %d\n", rv); 1516 siw_dbg(id->device, "failed: %d\n", rv);
1519 1517
1520 if (cep) { 1518 if (cep) {
1521 siw_socket_disassoc(s); 1519 siw_socket_disassoc(s);
@@ -1540,7 +1538,8 @@ error:
1540 } else if (s) { 1538 } else if (s) {
1541 sock_release(s); 1539 sock_release(s);
1542 } 1540 }
1543 siw_qp_put(qp); 1541 if (qp)
1542 siw_qp_put(qp);
1544 1543
1545 return rv; 1544 return rv;
1546} 1545}
@@ -1580,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1580 siw_cancel_mpatimer(cep); 1579 siw_cancel_mpatimer(cep);
1581 1580
1582 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1581 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1583 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1582 siw_dbg_cep(cep, "out of state\n");
1584 1583
1585 siw_cep_set_free(cep); 1584 siw_cep_set_free(cep);
1586 siw_cep_put(cep); 1585 siw_cep_put(cep);
@@ -1601,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1601 up_write(&qp->state_lock); 1600 up_write(&qp->state_lock);
1602 goto error; 1601 goto error;
1603 } 1602 }
1604 siw_dbg_cep(cep, "id 0x%p\n", id); 1603 siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1605 1604
1606 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) { 1605 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1607 siw_dbg_cep(cep, "peer allows GSO on TX\n"); 1606 siw_dbg_cep(cep, "peer allows GSO on TX\n");
@@ -1611,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1611 params->ird > sdev->attrs.max_ird) { 1610 params->ird > sdev->attrs.max_ird) {
1612 siw_dbg_cep( 1611 siw_dbg_cep(
1613 cep, 1612 cep,
1614 "id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n", 1613 "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1615 id, qp_id(qp), params->ord, sdev->attrs.max_ord, 1614 qp_id(qp), params->ord, sdev->attrs.max_ord,
1616 params->ird, sdev->attrs.max_ird); 1615 params->ird, sdev->attrs.max_ird);
1617 rv = -EINVAL; 1616 rv = -EINVAL;
1618 up_write(&qp->state_lock); 1617 up_write(&qp->state_lock);
@@ -1624,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1624 if (params->private_data_len > max_priv_data) { 1623 if (params->private_data_len > max_priv_data) {
1625 siw_dbg_cep( 1624 siw_dbg_cep(
1626 cep, 1625 cep,
1627 "id 0x%p, [QP %u]: private data length: %d (max %d)\n", 1626 "[QP %u]: private data length: %d (max %d)\n",
1628 id, qp_id(qp), params->private_data_len, max_priv_data); 1627 qp_id(qp), params->private_data_len, max_priv_data);
1629 rv = -EINVAL; 1628 rv = -EINVAL;
1630 up_write(&qp->state_lock); 1629 up_write(&qp->state_lock);
1631 goto error; 1630 goto error;
@@ -1679,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1679 qp_attrs.flags = SIW_MPA_CRC; 1678 qp_attrs.flags = SIW_MPA_CRC;
1680 qp_attrs.state = SIW_QP_STATE_RTS; 1679 qp_attrs.state = SIW_QP_STATE_RTS;
1681 1680
1682 siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp)); 1681 siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1683 1682
1684 /* Associate QP with CEP */ 1683 /* Associate QP with CEP */
1685 siw_cep_get(cep); 1684 siw_cep_get(cep);
@@ -1700,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1700 if (rv) 1699 if (rv)
1701 goto error; 1700 goto error;
1702 1701
1703 siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n", 1702 siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1704 id, qp_id(qp), params->private_data_len); 1703 qp_id(qp), params->private_data_len);
1705 1704
1706 rv = siw_send_mpareqrep(cep, params->private_data, 1705 rv = siw_send_mpareqrep(cep, params->private_data,
1707 params->private_data_len); 1706 params->private_data_len);
@@ -1759,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1759 siw_cancel_mpatimer(cep); 1758 siw_cancel_mpatimer(cep);
1760 1759
1761 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) { 1760 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1762 siw_dbg_cep(cep, "id 0x%p: out of state\n", id); 1761 siw_dbg_cep(cep, "out of state\n");
1763 1762
1764 siw_cep_set_free(cep); 1763 siw_cep_set_free(cep);
1765 siw_cep_put(cep); /* put last reference */ 1764 siw_cep_put(cep); /* put last reference */
1766 1765
1767 return -ECONNRESET; 1766 return -ECONNRESET;
1768 } 1767 }
1769 siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state, 1768 siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1770 pd_len); 1769 pd_len);
1771 1770
1772 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) { 1771 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
@@ -1804,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1804 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val, 1803 rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
1805 sizeof(s_val)); 1804 sizeof(s_val));
1806 if (rv) { 1805 if (rv) {
1807 siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv); 1806 siw_dbg(id->device, "setsockopt error: %d\n", rv);
1808 goto error; 1807 goto error;
1809 } 1808 }
1810 rv = s->ops->bind(s, laddr, addr_family == AF_INET ? 1809 rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
1811 sizeof(struct sockaddr_in) : 1810 sizeof(struct sockaddr_in) :
1812 sizeof(struct sockaddr_in6)); 1811 sizeof(struct sockaddr_in6));
1813 if (rv) { 1812 if (rv) {
1814 siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv); 1813 siw_dbg(id->device, "socket bind error: %d\n", rv);
1815 goto error; 1814 goto error;
1816 } 1815 }
1817 cep = siw_cep_alloc(sdev); 1816 cep = siw_cep_alloc(sdev);
@@ -1824,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1824 rv = siw_cm_alloc_work(cep, backlog); 1823 rv = siw_cm_alloc_work(cep, backlog);
1825 if (rv) { 1824 if (rv) {
1826 siw_dbg(id->device, 1825 siw_dbg(id->device,
1827 "id 0x%p: alloc_work error %d, backlog %d\n", id, 1826 "alloc_work error %d, backlog %d\n",
1828 rv, backlog); 1827 rv, backlog);
1829 goto error; 1828 goto error;
1830 } 1829 }
1831 rv = s->ops->listen(s, backlog); 1830 rv = s->ops->listen(s, backlog);
1832 if (rv) { 1831 if (rv) {
1833 siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv); 1832 siw_dbg(id->device, "listen error %d\n", rv);
1834 goto error; 1833 goto error;
1835 } 1834 }
1836 cep->cm_id = id; 1835 cep->cm_id = id;
@@ -1914,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
1914 1913
1915 list_del(p); 1914 list_del(p);
1916 1915
1917 siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id, 1916 siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1918 cep->state);
1919 1917
1920 siw_cep_set_inuse(cep); 1918 siw_cep_set_inuse(cep);
1921 1919
@@ -1952,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1952 struct net_device *dev = to_siw_dev(id->device)->netdev; 1950 struct net_device *dev = to_siw_dev(id->device)->netdev;
1953 int rv = 0, listeners = 0; 1951 int rv = 0, listeners = 0;
1954 1952
1955 siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog); 1953 siw_dbg(id->device, "backlog %d\n", backlog);
1956 1954
1957 /* 1955 /*
1958 * For each attached address of the interface, create a 1956 * For each attached address of the interface, create a
@@ -1964,12 +1962,16 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1964 struct sockaddr_in s_laddr, *s_raddr; 1962 struct sockaddr_in s_laddr, *s_raddr;
1965 const struct in_ifaddr *ifa; 1963 const struct in_ifaddr *ifa;
1966 1964
1965 if (!in_dev) {
1966 rv = -ENODEV;
1967 goto out;
1968 }
1967 memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr)); 1969 memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
1968 s_raddr = (struct sockaddr_in *)&id->remote_addr; 1970 s_raddr = (struct sockaddr_in *)&id->remote_addr;
1969 1971
1970 siw_dbg(id->device, 1972 siw_dbg(id->device,
1971 "id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n", 1973 "laddr %pI4:%d, raddr %pI4:%d\n",
1972 id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port), 1974 &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
1973 &s_raddr->sin_addr, ntohs(s_raddr->sin_port)); 1975 &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
1974 1976
1975 rtnl_lock(); 1977 rtnl_lock();
@@ -1993,22 +1995,27 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
1993 struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr), 1995 struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
1994 *s_raddr = &to_sockaddr_in6(id->remote_addr); 1996 *s_raddr = &to_sockaddr_in6(id->remote_addr);
1995 1997
1998 if (!in6_dev) {
1999 rv = -ENODEV;
2000 goto out;
2001 }
1996 siw_dbg(id->device, 2002 siw_dbg(id->device,
1997 "id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n", 2003 "laddr %pI6:%d, raddr %pI6:%d\n",
1998 id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port), 2004 &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
1999 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port)); 2005 &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
2000 2006
2001 read_lock_bh(&in6_dev->lock); 2007 rtnl_lock();
2002 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { 2008 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
2003 struct sockaddr_in6 bind_addr; 2009 if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
2004 2010 continue;
2005 if (ipv6_addr_any(&s_laddr->sin6_addr) || 2011 if (ipv6_addr_any(&s_laddr->sin6_addr) ||
2006 ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) { 2012 ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
2007 bind_addr.sin6_family = AF_INET6; 2013 struct sockaddr_in6 bind_addr = {
2008 bind_addr.sin6_port = s_laddr->sin6_port; 2014 .sin6_family = AF_INET6,
2009 bind_addr.sin6_flowinfo = 0; 2015 .sin6_port = s_laddr->sin6_port,
2010 bind_addr.sin6_addr = ifp->addr; 2016 .sin6_flowinfo = 0,
2011 bind_addr.sin6_scope_id = dev->ifindex; 2017 .sin6_addr = ifp->addr,
2018 .sin6_scope_id = dev->ifindex };
2012 2019
2013 rv = siw_listen_address(id, backlog, 2020 rv = siw_listen_address(id, backlog,
2014 (struct sockaddr *)&bind_addr, 2021 (struct sockaddr *)&bind_addr,
@@ -2017,28 +2024,26 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
2017 listeners++; 2024 listeners++;
2018 } 2025 }
2019 } 2026 }
2020 read_unlock_bh(&in6_dev->lock); 2027 rtnl_unlock();
2021
2022 in6_dev_put(in6_dev); 2028 in6_dev_put(in6_dev);
2023 } else { 2029 } else {
2024 return -EAFNOSUPPORT; 2030 rv = -EAFNOSUPPORT;
2025 } 2031 }
2032out:
2026 if (listeners) 2033 if (listeners)
2027 rv = 0; 2034 rv = 0;
2028 else if (!rv) 2035 else if (!rv)
2029 rv = -EINVAL; 2036 rv = -EINVAL;
2030 2037
2031 siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK"); 2038 siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
2032 2039
2033 return rv; 2040 return rv;
2034} 2041}
2035 2042
2036int siw_destroy_listen(struct iw_cm_id *id) 2043int siw_destroy_listen(struct iw_cm_id *id)
2037{ 2044{
2038 siw_dbg(id->device, "id 0x%p\n", id);
2039
2040 if (!id->provider_data) { 2045 if (!id->provider_data) {
2041 siw_dbg(id->device, "id 0x%p: no cep(s)\n", id); 2046 siw_dbg(id->device, "no cep(s)\n");
2042 return 0; 2047 return 0;
2043 } 2048 }
2044 siw_drop_listeners(id); 2049 siw_drop_listeners(id);
diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c
index e381ae9b7d62..d8db3bee9da7 100644
--- a/drivers/infiniband/sw/siw/siw_cq.c
+++ b/drivers/infiniband/sw/siw/siw_cq.c
@@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
71 wc->wc_flags = IB_WC_WITH_INVALIDATE; 71 wc->wc_flags = IB_WC_WITH_INVALIDATE;
72 } 72 }
73 wc->qp = cqe->base_qp; 73 wc->qp = cqe->base_qp;
74 siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n", 74 siw_dbg_cq(cq,
75 "idx %u, type %d, flags %2x, id 0x%pK\n",
75 cq->cq_get % cq->num_cqe, cqe->opcode, 76 cq->cq_get % cq->num_cqe, cqe->opcode,
76 cqe->flags, (void *)cqe->id); 77 cqe->flags, (void *)(uintptr_t)cqe->id);
77 } 78 }
78 WRITE_ONCE(cqe->flags, 0); 79 WRITE_ONCE(cqe->flags, 0);
79 cq->cq_get++; 80 cq->cq_get++;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index d0f140daf659..05a92f997f60 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -160,10 +160,8 @@ static int siw_init_cpulist(void)
160 160
161out_err: 161out_err:
162 siw_cpu_info.num_nodes = 0; 162 siw_cpu_info.num_nodes = 0;
163 while (i) { 163 while (--i >= 0)
164 kfree(siw_cpu_info.tx_valid_cpus[i]); 164 kfree(siw_cpu_info.tx_valid_cpus[i]);
165 siw_cpu_info.tx_valid_cpus[i--] = NULL;
166 }
167 kfree(siw_cpu_info.tx_valid_cpus); 165 kfree(siw_cpu_info.tx_valid_cpus);
168 siw_cpu_info.tx_valid_cpus = NULL; 166 siw_cpu_info.tx_valid_cpus = NULL;
169 167
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index 67171c82b0c4..87a56039f0ef 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
197 */ 197 */
198 if (addr < mem->va || addr + len > mem->va + mem->len) { 198 if (addr < mem->va || addr + len > mem->va + mem->len) {
199 siw_dbg_pd(pd, "MEM interval len %d\n", len); 199 siw_dbg_pd(pd, "MEM interval len %d\n", len);
200 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n", 200 siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
201 (unsigned long long)addr, 201 (void *)(uintptr_t)addr,
202 (unsigned long long)(addr + len)); 202 (void *)(uintptr_t)(addr + len));
203 siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n", 203 siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
204 (unsigned long long)mem->va, 204 (void *)(uintptr_t)mem->va,
205 (unsigned long long)(mem->va + mem->len), 205 (void *)(uintptr_t)(mem->va + mem->len),
206 mem->stag); 206 mem->stag);
207 207
208 return -E_BASE_BOUNDS; 208 return -E_BASE_BOUNDS;
@@ -330,7 +330,7 @@ out:
330 * Optionally, provides remaining len within current element, and 330 * Optionally, provides remaining len within current element, and
331 * current PBL index for later resume at same element. 331 * current PBL index for later resume at same element.
332 */ 332 */
333u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx) 333dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
334{ 334{
335 int i = idx ? *idx : 0; 335 int i = idx ? *idx : 0;
336 336
diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h
index f43daf280891..db138c8423da 100644
--- a/drivers/infiniband/sw/siw/siw_mem.h
+++ b/drivers/infiniband/sw/siw/siw_mem.h
@@ -9,7 +9,7 @@
9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable); 9struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
10void siw_umem_release(struct siw_umem *umem, bool dirty); 10void siw_umem_release(struct siw_umem *umem, bool dirty);
11struct siw_pbl *siw_pbl_alloc(u32 num_buf); 11struct siw_pbl *siw_pbl_alloc(u32 num_buf);
12u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); 12dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); 13struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); 14int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
15int siw_invalidate_stag(struct ib_pd *pd, u32 stag); 15int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index e27bd5b35b96..430314c8abd9 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -949,7 +949,7 @@ skip_irq:
949 rv = -EINVAL; 949 rv = -EINVAL;
950 goto out; 950 goto out;
951 } 951 }
952 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 952 wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
953 wqe->sqe.sge[0].lkey = 0; 953 wqe->sqe.sge[0].lkey = 0;
954 wqe->sqe.num_sge = 1; 954 wqe->sqe.num_sge = 1;
955 } 955 }
@@ -1013,18 +1013,24 @@ out:
1013 */ 1013 */
1014static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) 1014static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags)
1015{ 1015{
1016 u64 cq_notify; 1016 u32 cq_notify;
1017 1017
1018 if (!cq->base_cq.comp_handler) 1018 if (!cq->base_cq.comp_handler)
1019 return false; 1019 return false;
1020 1020
1021 cq_notify = READ_ONCE(*cq->notify); 1021 /* Read application shared notification state */
1022 cq_notify = READ_ONCE(cq->notify->flags);
1022 1023
1023 if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || 1024 if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) ||
1024 ((cq_notify & SIW_NOTIFY_SOLICITED) && 1025 ((cq_notify & SIW_NOTIFY_SOLICITED) &&
1025 (flags & SIW_WQE_SOLICITED))) { 1026 (flags & SIW_WQE_SOLICITED))) {
1026 /* dis-arm CQ */ 1027 /*
1027 smp_store_mb(*cq->notify, SIW_NOTIFY_NOT); 1028 * CQ notification is one-shot: Since the
1029 * current CQE causes user notification,
1030 * the CQ gets dis-aremd and must be re-aremd
1031 * by the user for a new notification.
1032 */
1033 WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT);
1028 1034
1029 return true; 1035 return true;
1030 } 1036 }
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index f87657a11657..c0a887240325 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
38 38
39 p = siw_get_upage(umem, dest_addr); 39 p = siw_get_upage(umem, dest_addr);
40 if (unlikely(!p)) { 40 if (unlikely(!p)) {
41 pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", 41 pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
42 __func__, qp_id(rx_qp(srx)), 42 __func__, qp_id(rx_qp(srx)),
43 (void *)dest_addr, (void *)umem->fp_addr); 43 (void *)(uintptr_t)dest_addr,
44 (void *)(uintptr_t)umem->fp_addr);
44 /* siw internal error */ 45 /* siw internal error */
45 srx->skb_copied += copied; 46 srx->skb_copied += copied;
46 srx->skb_new -= copied; 47 srx->skb_new -= copied;
@@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
50 pg_off = dest_addr & ~PAGE_MASK; 51 pg_off = dest_addr & ~PAGE_MASK;
51 bytes = min(len, (int)PAGE_SIZE - pg_off); 52 bytes = min(len, (int)PAGE_SIZE - pg_off);
52 53
53 siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); 54 siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
54 55
55 dest = kmap_atomic(p); 56 dest = kmap_atomic(p);
56 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, 57 rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
@@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
104{ 105{
105 int rv; 106 int rv;
106 107
107 siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); 108 siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
108 109
109 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); 110 rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
110 if (unlikely(rv)) { 111 if (unlikely(rv)) {
111 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", 112 pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
112 qp_id(rx_qp(srx)), __func__, len, kva, rv); 113 qp_id(rx_qp(srx)), __func__, len, kva, rv);
113 114
114 return rv; 115 return rv;
@@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
132 133
133 while (len) { 134 while (len) {
134 int bytes; 135 int bytes;
135 u64 buf_addr = 136 dma_addr_t buf_addr =
136 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx); 137 siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
137 if (!buf_addr) 138 if (!buf_addr)
138 break; 139 break;
@@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp)
485 mem_p = *mem; 486 mem_p = *mem;
486 if (mem_p->mem_obj == NULL) 487 if (mem_p->mem_obj == NULL)
487 rv = siw_rx_kva(srx, 488 rv = siw_rx_kva(srx,
488 (void *)(sge->laddr + frx->sge_off), 489 (void *)(uintptr_t)(sge->laddr + frx->sge_off),
489 sge_bytes); 490 sge_bytes);
490 else if (!mem_p->is_pbl) 491 else if (!mem_p->is_pbl)
491 rv = siw_rx_umem(srx, mem_p->umem, 492 rv = siw_rx_umem(srx, mem_p->umem,
492 sge->laddr + frx->sge_off, sge_bytes); 493 sge->laddr + frx->sge_off, sge_bytes);
@@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp)
598 599
599 if (mem->mem_obj == NULL) 600 if (mem->mem_obj == NULL)
600 rv = siw_rx_kva(srx, 601 rv = siw_rx_kva(srx,
601 (void *)(srx->ddp_to + srx->fpdu_part_rcvd), 602 (void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
602 bytes); 603 bytes);
603 else if (!mem->is_pbl) 604 else if (!mem->is_pbl)
604 rv = siw_rx_umem(srx, mem->umem, 605 rv = siw_rx_umem(srx, mem->umem,
605 srx->ddp_to + srx->fpdu_part_rcvd, bytes); 606 srx->ddp_to + srx->fpdu_part_rcvd, bytes);
@@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp)
841 bytes = min(srx->fpdu_part_rem, srx->skb_new); 842 bytes = min(srx->fpdu_part_rem, srx->skb_new);
842 843
843 if (mem_p->mem_obj == NULL) 844 if (mem_p->mem_obj == NULL)
844 rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed), 845 rv = siw_rx_kva(srx,
845 bytes); 846 (void *)(uintptr_t)(sge->laddr + wqe->processed),
847 bytes);
846 else if (!mem_p->is_pbl) 848 else if (!mem_p->is_pbl)
847 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, 849 rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
848 bytes); 850 bytes);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 43020d2040fc..438a2917a47c 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
26{ 26{
27 struct siw_pbl *pbl = mem->pbl; 27 struct siw_pbl *pbl = mem->pbl;
28 u64 offset = addr - mem->va; 28 u64 offset = addr - mem->va;
29 u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); 29 dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
30 30
31 if (paddr) 31 if (paddr)
32 return virt_to_page(paddr); 32 return virt_to_page(paddr);
@@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
37/* 37/*
38 * Copy short payload at provided destination payload address 38 * Copy short payload at provided destination payload address
39 */ 39 */
40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr) 40static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
41{ 41{
42 struct siw_wqe *wqe = &c_tx->wqe_active; 42 struct siw_wqe *wqe = &c_tx->wqe_active;
43 struct siw_sge *sge = &wqe->sqe.sge[0]; 43 struct siw_sge *sge = &wqe->sqe.sge[0];
@@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
50 return 0; 50 return 0;
51 51
52 if (tx_flags(wqe) & SIW_WQE_INLINE) { 52 if (tx_flags(wqe) & SIW_WQE_INLINE) {
53 memcpy((void *)paddr, &wqe->sqe.sge[1], bytes); 53 memcpy(paddr, &wqe->sqe.sge[1], bytes);
54 } else { 54 } else {
55 struct siw_mem *mem = wqe->mem[0]; 55 struct siw_mem *mem = wqe->mem[0];
56 56
57 if (!mem->mem_obj) { 57 if (!mem->mem_obj) {
58 /* Kernel client using kva */ 58 /* Kernel client using kva */
59 memcpy((void *)paddr, (void *)sge->laddr, bytes); 59 memcpy(paddr,
60 (const void *)(uintptr_t)sge->laddr, bytes);
60 } else if (c_tx->in_syscall) { 61 } else if (c_tx->in_syscall) {
61 if (copy_from_user((void *)paddr, 62 if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
62 (const void __user *)sge->laddr,
63 bytes)) 63 bytes))
64 return -EFAULT; 64 return -EFAULT;
65 } else { 65 } else {
@@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
79 buffer = kmap_atomic(p); 79 buffer = kmap_atomic(p);
80 80
81 if (likely(PAGE_SIZE - off >= bytes)) { 81 if (likely(PAGE_SIZE - off >= bytes)) {
82 memcpy((void *)paddr, buffer + off, bytes); 82 memcpy(paddr, buffer + off, bytes);
83 kunmap_atomic(buffer); 83 kunmap_atomic(buffer);
84 } else { 84 } else {
85 unsigned long part = bytes - (PAGE_SIZE - off); 85 unsigned long part = bytes - (PAGE_SIZE - off);
86 86
87 memcpy((void *)paddr, buffer + off, part); 87 memcpy(paddr, buffer + off, part);
88 kunmap_atomic(buffer); 88 kunmap_atomic(buffer);
89 89
90 if (!mem->is_pbl) 90 if (!mem->is_pbl)
@@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
98 return -EFAULT; 98 return -EFAULT;
99 99
100 buffer = kmap_atomic(p); 100 buffer = kmap_atomic(p);
101 memcpy((void *)(paddr + part), buffer, 101 memcpy(paddr + part, buffer,
102 bytes - part); 102 bytes - part);
103 kunmap_atomic(buffer); 103 kunmap_atomic(buffer);
104 } 104 }
@@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
166 c_tx->ctrl_len = sizeof(struct iwarp_send); 166 c_tx->ctrl_len = sizeof(struct iwarp_send);
167 167
168 crc = (char *)&c_tx->pkt.send_pkt.crc; 168 crc = (char *)&c_tx->pkt.send_pkt.crc;
169 data = siw_try_1seg(c_tx, (u64)crc); 169 data = siw_try_1seg(c_tx, crc);
170 break; 170 break;
171 171
172 case SIW_OP_SEND_REMOTE_INV: 172 case SIW_OP_SEND_REMOTE_INV:
@@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv); 189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
190 190
191 crc = (char *)&c_tx->pkt.send_pkt.crc; 191 crc = (char *)&c_tx->pkt.send_pkt.crc;
192 data = siw_try_1seg(c_tx, (u64)crc); 192 data = siw_try_1seg(c_tx, crc);
193 break; 193 break;
194 194
195 case SIW_OP_WRITE: 195 case SIW_OP_WRITE:
@@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); 201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
202 202
203 crc = (char *)&c_tx->pkt.write_pkt.crc; 203 crc = (char *)&c_tx->pkt.write_pkt.crc;
204 data = siw_try_1seg(c_tx, (u64)crc); 204 data = siw_try_1seg(c_tx, crc);
205 break; 205 break;
206 206
207 case SIW_OP_READ_RESPONSE: 207 case SIW_OP_READ_RESPONSE:
@@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); 216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
217 217
218 crc = (char *)&c_tx->pkt.write_pkt.crc; 218 crc = (char *)&c_tx->pkt.write_pkt.crc;
219 data = siw_try_1seg(c_tx, (u64)crc); 219 data = siw_try_1seg(c_tx, crc);
220 break; 220 break;
221 221
222 default: 222 default:
@@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
398 398
399#define MAX_TRAILER (MPA_CRC_SIZE + 4) 399#define MAX_TRAILER (MPA_CRC_SIZE + 4)
400 400
401static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps) 401static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
402{ 402{
403 if (hdr_len) { 403 while (kmap_mask) {
404 ++pages; 404 if (kmap_mask & BIT(0))
405 --num_maps; 405 kunmap(*pp);
406 } 406 pp++;
407 while (num_maps-- > 0) { 407 kmap_mask >>= 1;
408 kunmap(*pages);
409 pages++;
410 } 408 }
411} 409}
412 410
@@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
437 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, 435 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
438 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, 436 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
439 pbl_idx = c_tx->pbl_idx; 437 pbl_idx = c_tx->pbl_idx;
438 unsigned long kmap_mask = 0L;
440 439
441 if (c_tx->state == SIW_SEND_HDR) { 440 if (c_tx->state == SIW_SEND_HDR) {
442 if (c_tx->use_sendpage) { 441 if (c_tx->use_sendpage) {
@@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
463 462
464 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { 463 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
465 mem = wqe->mem[sge_idx]; 464 mem = wqe->mem[sge_idx];
466 if (!mem->mem_obj) 465 is_kva = mem->mem_obj == NULL ? 1 : 0;
467 is_kva = 1;
468 } else { 466 } else {
469 is_kva = 1; 467 is_kva = 1;
470 } 468 }
@@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
473 * tx from kernel virtual address: either inline data 471 * tx from kernel virtual address: either inline data
474 * or memory region with assigned kernel buffer 472 * or memory region with assigned kernel buffer
475 */ 473 */
476 iov[seg].iov_base = (void *)(sge->laddr + sge_off); 474 iov[seg].iov_base =
475 (void *)(uintptr_t)(sge->laddr + sge_off);
477 iov[seg].iov_len = sge_len; 476 iov[seg].iov_len = sge_len;
478 477
479 if (do_crc) 478 if (do_crc)
@@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
500 p = siw_get_upage(mem->umem, 499 p = siw_get_upage(mem->umem,
501 sge->laddr + sge_off); 500 sge->laddr + sge_off);
502 if (unlikely(!p)) { 501 if (unlikely(!p)) {
503 if (hdr_len) 502 siw_unmap_pages(page_array, kmap_mask);
504 seg--;
505 if (!c_tx->use_sendpage && seg) {
506 siw_unmap_pages(page_array,
507 hdr_len, seg);
508 }
509 wqe->processed -= c_tx->bytes_unsent; 503 wqe->processed -= c_tx->bytes_unsent;
510 rv = -EFAULT; 504 rv = -EFAULT;
511 goto done_crc; 505 goto done_crc;
@@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
515 if (!c_tx->use_sendpage) { 509 if (!c_tx->use_sendpage) {
516 iov[seg].iov_base = kmap(p) + fp_off; 510 iov[seg].iov_base = kmap(p) + fp_off;
517 iov[seg].iov_len = plen; 511 iov[seg].iov_len = plen;
512
513 /* Remember for later kunmap() */
514 kmap_mask |= BIT(seg);
515
518 if (do_crc) 516 if (do_crc)
519 crypto_shash_update( 517 crypto_shash_update(
520 c_tx->mpa_crc_hd, 518 c_tx->mpa_crc_hd,
@@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
526 page_address(p) + fp_off, 524 page_address(p) + fp_off,
527 plen); 525 plen);
528 } else { 526 } else {
529 u64 pa = ((sge->laddr + sge_off) & PAGE_MASK); 527 u64 va = sge->laddr + sge_off;
530 528
531 page_array[seg] = virt_to_page(pa); 529 page_array[seg] = virt_to_page(va & PAGE_MASK);
532 if (do_crc) 530 if (do_crc)
533 crypto_shash_update( 531 crypto_shash_update(
534 c_tx->mpa_crc_hd, 532 c_tx->mpa_crc_hd,
535 (void *)(sge->laddr + sge_off), 533 (void *)(uintptr_t)va,
536 plen); 534 plen);
537 } 535 }
538 536
@@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
543 541
544 if (++seg > (int)MAX_ARRAY) { 542 if (++seg > (int)MAX_ARRAY) {
545 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); 543 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
546 if (!is_kva && !c_tx->use_sendpage) { 544 siw_unmap_pages(page_array, kmap_mask);
547 siw_unmap_pages(page_array, hdr_len,
548 seg - 1);
549 }
550 wqe->processed -= c_tx->bytes_unsent; 545 wqe->processed -= c_tx->bytes_unsent;
551 rv = -EMSGSIZE; 546 rv = -EMSGSIZE;
552 goto done_crc; 547 goto done_crc;
@@ -597,8 +592,7 @@ sge_done:
597 } else { 592 } else {
598 rv = kernel_sendmsg(s, &msg, iov, seg + 1, 593 rv = kernel_sendmsg(s, &msg, iov, seg + 1,
599 hdr_len + data_len + trl_len); 594 hdr_len + data_len + trl_len);
600 if (!is_kva) 595 siw_unmap_pages(page_array, kmap_mask);
601 siw_unmap_pages(page_array, hdr_len, seg);
602 } 596 }
603 if (rv < (int)hdr_len) { 597 if (rv < (int)hdr_len) {
604 /* Not even complete hdr pushed or negative rv */ 598 /* Not even complete hdr pushed or negative rv */
@@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
829 rv = -EINVAL; 823 rv = -EINVAL;
830 goto tx_error; 824 goto tx_error;
831 } 825 }
832 wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1]; 826 wqe->sqe.sge[0].laddr =
827 (u64)(uintptr_t)&wqe->sqe.sge[1];
833 } 828 }
834 } 829 }
835 wqe->wr_status = SIW_WR_INPROGRESS; 830 wqe->wr_status = SIW_WR_INPROGRESS;
@@ -924,7 +919,7 @@ tx_error:
924 919
925static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) 920static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
926{ 921{
927 struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr; 922 struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
928 struct siw_device *sdev = to_siw_dev(pd->device); 923 struct siw_device *sdev = to_siw_dev(pd->device);
929 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); 924 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
930 int rv = 0; 925 int rv = 0;
@@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
954 mem->stag = sqe->rkey; 949 mem->stag = sqe->rkey;
955 mem->perms = sqe->access; 950 mem->perms = sqe->access;
956 951
957 siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n", 952 siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
958 mem->va, base_mr->iova);
959 mem->va = base_mr->iova; 953 mem->va = base_mr->iova;
960 mem->stag_valid = 1; 954 mem->stag_valid = 1;
961out: 955out:
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 32dc79d0e898..da52c90e06d4 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
424 */ 424 */
425 qp->srq = to_siw_srq(attrs->srq); 425 qp->srq = to_siw_srq(attrs->srq);
426 qp->attrs.rq_size = 0; 426 qp->attrs.rq_size = 0;
427 siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n", 427 siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
428 qp->qp_num, qp->srq);
429 } else if (num_rqe) { 428 } else if (num_rqe) {
430 if (qp->kernel_verbs) 429 if (qp->kernel_verbs)
431 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe)); 430 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
@@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
610 base_ucontext); 609 base_ucontext);
611 struct siw_qp_attrs qp_attrs; 610 struct siw_qp_attrs qp_attrs;
612 611
613 siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep); 612 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
614 613
615 /* 614 /*
616 * Mark QP as in process of destruction to prevent from 615 * Mark QP as in process of destruction to prevent from
@@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
662 void *kbuf = &sqe->sge[1]; 661 void *kbuf = &sqe->sge[1];
663 int num_sge = core_wr->num_sge, bytes = 0; 662 int num_sge = core_wr->num_sge, bytes = 0;
664 663
665 sqe->sge[0].laddr = (u64)kbuf; 664 sqe->sge[0].laddr = (uintptr_t)kbuf;
666 sqe->sge[0].lkey = 0; 665 sqe->sge[0].lkey = 0;
667 666
668 while (num_sge--) { 667 while (num_sge--) {
@@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
825 break; 824 break;
826 825
827 case IB_WR_REG_MR: 826 case IB_WR_REG_MR:
828 sqe->base_mr = (uint64_t)reg_wr(wr)->mr; 827 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
829 sqe->rkey = reg_wr(wr)->key; 828 sqe->rkey = reg_wr(wr)->key;
830 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; 829 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
831 sqe->opcode = SIW_OP_REG_MR; 830 sqe->opcode = SIW_OP_REG_MR;
@@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
842 rv = -EINVAL; 841 rv = -EINVAL;
843 break; 842 break;
844 } 843 }
845 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", 844 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
846 sqe->opcode, sqe->flags, (void *)sqe->id); 845 sqe->opcode, sqe->flags,
846 (void *)(uintptr_t)sqe->id);
847 847
848 if (unlikely(rv < 0)) 848 if (unlikely(rv < 0))
849 break; 849 break;
@@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1049 1049
1050 spin_lock_init(&cq->lock); 1050 spin_lock_init(&cq->lock);
1051 1051
1052 cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify; 1052 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
1053 1053
1054 if (udata) { 1054 if (udata) {
1055 struct siw_uresp_create_cq uresp = {}; 1055 struct siw_uresp_create_cq uresp = {};
@@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1141 siw_dbg_cq(cq, "flags: 0x%02x\n", flags); 1141 siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1142 1142
1143 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) 1143 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
1144 /* CQ event for next solicited completion */ 1144 /*
1145 smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED); 1145 * Enable CQ event for next solicited completion.
1146 * and make it visible to all associated producers.
1147 */
1148 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
1146 else 1149 else
1147 /* CQ event for any signalled completion */ 1150 /*
1148 smp_store_mb(*cq->notify, SIW_NOTIFY_ALL); 1151 * Enable CQ event for any signalled completion.
1152 * and make it visible to all associated producers.
1153 */
1154 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
1149 1155
1150 if (flags & IB_CQ_REPORT_MISSED_EVENTS) 1156 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1151 return cq->cq_put - cq->cq_get; 1157 return cq->cq_put - cq->cq_get;
@@ -1199,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1199 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK); 1205 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1200 int rv; 1206 int rv;
1201 1207
1202 siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n", 1208 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1203 (unsigned long long)start, (unsigned long long)rnic_va, 1209 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
1204 (unsigned long long)len); 1210 (unsigned long long)len);
1205 1211
1206 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) { 1212 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
@@ -1357,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1357 struct siw_mem *mem = mr->mem; 1363 struct siw_mem *mem = mr->mem;
1358 struct siw_pbl *pbl = mem->pbl; 1364 struct siw_pbl *pbl = mem->pbl;
1359 struct siw_pble *pble; 1365 struct siw_pble *pble;
1360 u64 pbl_size; 1366 unsigned long pbl_size;
1361 int i, rv; 1367 int i, rv;
1362 1368
1363 if (!pbl) { 1369 if (!pbl) {
@@ -1396,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1396 pbl_size += sg_dma_len(slp); 1402 pbl_size += sg_dma_len(slp);
1397 } 1403 }
1398 siw_dbg_mem(mem, 1404 siw_dbg_mem(mem,
1399 "sge[%d], size %llu, addr 0x%016llx, total %llu\n", 1405 "sge[%d], size %u, addr 0x%p, total %lu\n",
1400 i, pble->size, pble->addr, pbl_size); 1406 i, pble->size, (void *)(uintptr_t)pble->addr,
1407 pbl_size);
1401 } 1408 }
1402 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); 1409 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1403 if (rv > 0) { 1410 if (rv > 0) {
1404 mem->len = base_mr->length; 1411 mem->len = base_mr->length;
1405 mem->va = base_mr->iova; 1412 mem->va = base_mr->iova;
1406 siw_dbg_mem(mem, 1413 siw_dbg_mem(mem,
1407 "%llu bytes, start 0x%016llx, %u SLE to %u entries\n", 1414 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1408 mem->len, mem->va, num_sle, pbl->num_buf); 1415 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1416 pbl->num_buf);
1409 } 1417 }
1410 return rv; 1418 return rv;
1411} 1419}
@@ -1523,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq,
1523 } 1531 }
1524 spin_lock_init(&srq->lock); 1532 spin_lock_init(&srq->lock);
1525 1533
1526 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq); 1534 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
1527 1535
1528 return 0; 1536 return 0;
1529 1537
@@ -1644,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1644 1652
1645 if (unlikely(!srq->kernel_verbs)) { 1653 if (unlikely(!srq->kernel_verbs)) {
1646 siw_dbg_pd(base_srq->pd, 1654 siw_dbg_pd(base_srq->pd,
1647 "[SRQ 0x%p]: no kernel post_recv for mapped srq\n", 1655 "[SRQ]: no kernel post_recv for mapped srq\n");
1648 srq);
1649 rv = -EINVAL; 1656 rv = -EINVAL;
1650 goto out; 1657 goto out;
1651 } 1658 }
@@ -1667,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1667 } 1674 }
1668 if (unlikely(wr->num_sge > srq->max_sge)) { 1675 if (unlikely(wr->num_sge > srq->max_sge)) {
1669 siw_dbg_pd(base_srq->pd, 1676 siw_dbg_pd(base_srq->pd,
1670 "[SRQ 0x%p]: too many sge's: %d\n", srq, 1677 "[SRQ]: too many sge's: %d\n", wr->num_sge);
1671 wr->num_sge);
1672 rv = -EINVAL; 1678 rv = -EINVAL;
1673 break; 1679 break;
1674 } 1680 }
@@ -1687,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1687 spin_unlock_irqrestore(&srq->lock, flags); 1693 spin_unlock_irqrestore(&srq->lock, flags);
1688out: 1694out:
1689 if (unlikely(rv < 0)) { 1695 if (unlikely(rv < 0)) {
1690 siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv); 1696 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
1691 *bad_wr = wr; 1697 *bad_wr = wr;
1692 } 1698 }
1693 return rv; 1699 return rv;
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 88ae7c2ac3c8..e486a8a74c40 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
237 237
238static void hv_kbd_on_channel_callback(void *context) 238static void hv_kbd_on_channel_callback(void *context)
239{ 239{
240 struct vmpacket_descriptor *desc;
240 struct hv_device *hv_dev = context; 241 struct hv_device *hv_dev = context;
241 void *buffer;
242 int bufferlen = 0x100; /* Start with sensible size */
243 u32 bytes_recvd; 242 u32 bytes_recvd;
244 u64 req_id; 243 u64 req_id;
245 int error;
246 244
247 buffer = kmalloc(bufferlen, GFP_ATOMIC); 245 foreach_vmbus_pkt(desc, hv_dev->channel) {
248 if (!buffer) 246 bytes_recvd = desc->len8 * 8;
249 return; 247 req_id = desc->trans_id;
250
251 while (1) {
252 error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
253 &bytes_recvd, &req_id);
254 switch (error) {
255 case 0:
256 if (bytes_recvd == 0) {
257 kfree(buffer);
258 return;
259 }
260
261 hv_kbd_handle_received_packet(hv_dev, buffer,
262 bytes_recvd, req_id);
263 break;
264 248
265 case -ENOBUFS: 249 hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
266 kfree(buffer); 250 req_id);
267 /* Handle large packet */
268 bufferlen = bytes_recvd;
269 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
270 if (!buffer)
271 return;
272 break;
273 }
274 } 251 }
275} 252}
276 253
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index a9a9fabd3968..c5c93e48b4db 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1186,8 +1186,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
1186 ste_live = true; 1186 ste_live = true;
1187 break; 1187 break;
1188 case STRTAB_STE_0_CFG_ABORT: 1188 case STRTAB_STE_0_CFG_ABORT:
1189 if (disable_bypass) 1189 BUG_ON(!disable_bypass);
1190 break; 1190 break;
1191 default: 1191 default:
1192 BUG(); /* STE corruption */ 1192 BUG(); /* STE corruption */
1193 } 1193 }
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index a7f9c3edbcb2..f68a62c3c32b 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -459,13 +459,11 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
459{ 459{
460 struct iommu_domain *domain = iommu_get_dma_domain(dev); 460 struct iommu_domain *domain = iommu_get_dma_domain(dev);
461 struct iommu_dma_cookie *cookie = domain->iova_cookie; 461 struct iommu_dma_cookie *cookie = domain->iova_cookie;
462 size_t iova_off = 0; 462 struct iova_domain *iovad = &cookie->iovad;
463 size_t iova_off = iova_offset(iovad, phys);
463 dma_addr_t iova; 464 dma_addr_t iova;
464 465
465 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { 466 size = iova_align(iovad, size + iova_off);
466 iova_off = iova_offset(&cookie->iovad, phys);
467 size = iova_align(&cookie->iovad, size + iova_off);
468 }
469 467
470 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 468 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
471 if (!iova) 469 if (!iova)
@@ -574,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
574 struct iova_domain *iovad = &cookie->iovad; 572 struct iova_domain *iovad = &cookie->iovad;
575 bool coherent = dev_is_dma_coherent(dev); 573 bool coherent = dev_is_dma_coherent(dev);
576 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 574 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
577 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 575 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
578 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 576 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
579 struct page **pages; 577 struct page **pages;
580 struct sg_table sgt; 578 struct sg_table sgt;
@@ -764,7 +762,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
764 * - and wouldn't make the resulting output segment too long 762 * - and wouldn't make the resulting output segment too long
765 */ 763 */
766 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 764 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
767 (cur_len + s_length <= max_len)) { 765 (max_len - cur_len >= s_length)) {
768 /* ...then concatenate it with the previous one */ 766 /* ...then concatenate it with the previous one */
769 cur_len += s_length; 767 cur_len += s_length;
770 } else { 768 } else {
@@ -967,15 +965,18 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
967{ 965{
968 bool coherent = dev_is_dma_coherent(dev); 966 bool coherent = dev_is_dma_coherent(dev);
969 size_t alloc_size = PAGE_ALIGN(size); 967 size_t alloc_size = PAGE_ALIGN(size);
968 int node = dev_to_node(dev);
970 struct page *page = NULL; 969 struct page *page = NULL;
971 void *cpu_addr; 970 void *cpu_addr;
972 971
973 page = dma_alloc_contiguous(dev, alloc_size, gfp); 972 page = dma_alloc_contiguous(dev, alloc_size, gfp);
974 if (!page) 973 if (!page)
974 page = alloc_pages_node(node, gfp, get_order(alloc_size));
975 if (!page)
975 return NULL; 976 return NULL;
976 977
977 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 978 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
978 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 979 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
979 980
980 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 981 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
981 VM_USERMAP, prot, __builtin_return_address(0)); 982 VM_USERMAP, prot, __builtin_return_address(0));
@@ -1035,7 +1036,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1035 unsigned long pfn, off = vma->vm_pgoff; 1036 unsigned long pfn, off = vma->vm_pgoff;
1036 int ret; 1037 int ret;
1037 1038
1038 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 1039 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1039 1040
1040 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1041 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1041 return ret; 1042 return ret;
@@ -1147,16 +1148,21 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1147 if (!msi_page) 1148 if (!msi_page)
1148 return NULL; 1149 return NULL;
1149 1150
1150 iova = __iommu_dma_map(dev, msi_addr, size, prot); 1151 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1151 if (iova == DMA_MAPPING_ERROR) 1152 if (!iova)
1152 goto out_free_page; 1153 goto out_free_page;
1153 1154
1155 if (iommu_map(domain, iova, msi_addr, size, prot))
1156 goto out_free_iova;
1157
1154 INIT_LIST_HEAD(&msi_page->list); 1158 INIT_LIST_HEAD(&msi_page->list);
1155 msi_page->phys = msi_addr; 1159 msi_page->phys = msi_addr;
1156 msi_page->iova = iova; 1160 msi_page->iova = iova;
1157 list_add(&msi_page->list, &cookie->msi_page_list); 1161 list_add(&msi_page->list, &cookie->msi_page_list);
1158 return msi_page; 1162 return msi_page;
1159 1163
1164out_free_iova:
1165 iommu_dma_free_iova(cookie, iova, size);
1160out_free_page: 1166out_free_page:
1161 kfree(msi_page); 1167 kfree(msi_page);
1162 return NULL; 1168 return NULL;
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 2b25d9c59336..471f05d452e0 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -235,7 +235,7 @@ static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
235 tbl_wlk.ctx_entry = context; 235 tbl_wlk.ctx_entry = context;
236 m->private = &tbl_wlk; 236 m->private = &tbl_wlk;
237 237
238 if (pasid_supported(iommu) && is_pasid_enabled(context)) { 238 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
239 pasid_dir_ptr = context->lo & VTD_PAGE_MASK; 239 pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
240 pasid_dir_size = get_pasid_dir_size(context); 240 pasid_dir_size = get_pasid_dir_size(context);
241 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size); 241 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bdaed2da8a55..12d094d08c0a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3449,6 +3449,7 @@ static bool iommu_need_mapping(struct device *dev)
3449 dmar_domain = to_dmar_domain(domain); 3449 dmar_domain = to_dmar_domain(domain);
3450 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 3450 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
3451 } 3451 }
3452 dmar_remove_one_dev_info(dev);
3452 get_private_domain_for_dev(dev); 3453 get_private_domain_for_dev(dev);
3453 } 3454 }
3454 3455
@@ -4790,7 +4791,8 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4790 4791
4791 /* free the private domain */ 4792 /* free the private domain */
4792 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN && 4793 if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
4793 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) 4794 !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
4795 list_empty(&domain->devices))
4794 domain_exit(info->domain); 4796 domain_exit(info->domain);
4795 4797
4796 free_devinfo_mem(info); 4798 free_devinfo_mem(info);
@@ -4803,7 +4805,8 @@ static void dmar_remove_one_dev_info(struct device *dev)
4803 4805
4804 spin_lock_irqsave(&device_domain_lock, flags); 4806 spin_lock_irqsave(&device_domain_lock, flags);
4805 info = dev->archdata.iommu; 4807 info = dev->archdata.iommu;
4806 __dmar_remove_one_dev_info(info); 4808 if (info)
4809 __dmar_remove_one_dev_info(info);
4807 spin_unlock_irqrestore(&device_domain_lock, flags); 4810 spin_unlock_irqrestore(&device_domain_lock, flags);
4808} 4811}
4809 4812
@@ -5281,6 +5284,7 @@ static int intel_iommu_add_device(struct device *dev)
5281 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) { 5284 if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
5282 ret = iommu_request_dm_for_dev(dev); 5285 ret = iommu_request_dm_for_dev(dev);
5283 if (ret) { 5286 if (ret) {
5287 dmar_remove_one_dev_info(dev);
5284 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 5288 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5285 domain_add_dev_info(si_domain, dev); 5289 domain_add_dev_info(si_domain, dev);
5286 dev_info(dev, 5290 dev_info(dev,
@@ -5291,6 +5295,7 @@ static int intel_iommu_add_device(struct device *dev)
5291 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) { 5295 if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
5292 ret = iommu_request_dma_domain_for_dev(dev); 5296 ret = iommu_request_dma_domain_for_dev(dev);
5293 if (ret) { 5297 if (ret) {
5298 dmar_remove_one_dev_info(dev);
5294 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; 5299 dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
5295 if (!get_private_domain_for_dev(dev)) { 5300 if (!get_private_domain_for_dev(dev)) {
5296 dev_warn(dev, 5301 dev_warn(dev,
@@ -5316,6 +5321,8 @@ static void intel_iommu_remove_device(struct device *dev)
5316 if (!iommu) 5321 if (!iommu)
5317 return; 5322 return;
5318 5323
5324 dmar_remove_one_dev_info(dev);
5325
5319 iommu_group_remove_device(dev); 5326 iommu_group_remove_device(dev);
5320 5327
5321 iommu_device_unlink(&iommu->iommu, dev); 5328 iommu_device_unlink(&iommu->iommu, dev);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b6b5acc92ca2..2a48ea3f1b30 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1599 unsigned long freed; 1599 unsigned long freed;
1600 1600
1601 c = container_of(shrink, struct dm_bufio_client, shrinker); 1601 c = container_of(shrink, struct dm_bufio_client, shrinker);
1602 if (!dm_bufio_trylock(c)) 1602 if (sc->gfp_mask & __GFP_FS)
1603 dm_bufio_lock(c);
1604 else if (!dm_bufio_trylock(c))
1603 return SHRINK_STOP; 1605 return SHRINK_STOP;
1604 1606
1605 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask); 1607 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 845f376a72d9..8288887b7f94 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -25,6 +25,7 @@ struct dust_device {
25 unsigned long long badblock_count; 25 unsigned long long badblock_count;
26 spinlock_t dust_lock; 26 spinlock_t dust_lock;
27 unsigned int blksz; 27 unsigned int blksz;
28 int sect_per_block_shift;
28 unsigned int sect_per_block; 29 unsigned int sect_per_block;
29 sector_t start; 30 sector_t start;
30 bool fail_read_on_bb:1; 31 bool fail_read_on_bb:1;
@@ -79,7 +80,7 @@ static int dust_remove_block(struct dust_device *dd, unsigned long long block)
79 unsigned long flags; 80 unsigned long flags;
80 81
81 spin_lock_irqsave(&dd->dust_lock, flags); 82 spin_lock_irqsave(&dd->dust_lock, flags);
82 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 83 bblock = dust_rb_search(&dd->badblocklist, block);
83 84
84 if (bblock == NULL) { 85 if (bblock == NULL) {
85 if (!dd->quiet_mode) { 86 if (!dd->quiet_mode) {
@@ -113,7 +114,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block)
113 } 114 }
114 115
115 spin_lock_irqsave(&dd->dust_lock, flags); 116 spin_lock_irqsave(&dd->dust_lock, flags);
116 bblock->bb = block * dd->sect_per_block; 117 bblock->bb = block;
117 if (!dust_rb_insert(&dd->badblocklist, bblock)) { 118 if (!dust_rb_insert(&dd->badblocklist, bblock)) {
118 if (!dd->quiet_mode) { 119 if (!dd->quiet_mode) {
119 DMERR("%s: block %llu already in badblocklist", 120 DMERR("%s: block %llu already in badblocklist",
@@ -138,7 +139,7 @@ static int dust_query_block(struct dust_device *dd, unsigned long long block)
138 unsigned long flags; 139 unsigned long flags;
139 140
140 spin_lock_irqsave(&dd->dust_lock, flags); 141 spin_lock_irqsave(&dd->dust_lock, flags);
141 bblock = dust_rb_search(&dd->badblocklist, block * dd->sect_per_block); 142 bblock = dust_rb_search(&dd->badblocklist, block);
142 if (bblock != NULL) 143 if (bblock != NULL)
143 DMINFO("%s: block %llu found in badblocklist", __func__, block); 144 DMINFO("%s: block %llu found in badblocklist", __func__, block);
144 else 145 else
@@ -165,6 +166,7 @@ static int dust_map_read(struct dust_device *dd, sector_t thisblock,
165 int ret = DM_MAPIO_REMAPPED; 166 int ret = DM_MAPIO_REMAPPED;
166 167
167 if (fail_read_on_bb) { 168 if (fail_read_on_bb) {
169 thisblock >>= dd->sect_per_block_shift;
168 spin_lock_irqsave(&dd->dust_lock, flags); 170 spin_lock_irqsave(&dd->dust_lock, flags);
169 ret = __dust_map_read(dd, thisblock); 171 ret = __dust_map_read(dd, thisblock);
170 spin_unlock_irqrestore(&dd->dust_lock, flags); 172 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -195,6 +197,7 @@ static int dust_map_write(struct dust_device *dd, sector_t thisblock,
195 unsigned long flags; 197 unsigned long flags;
196 198
197 if (fail_read_on_bb) { 199 if (fail_read_on_bb) {
200 thisblock >>= dd->sect_per_block_shift;
198 spin_lock_irqsave(&dd->dust_lock, flags); 201 spin_lock_irqsave(&dd->dust_lock, flags);
199 __dust_map_write(dd, thisblock); 202 __dust_map_write(dd, thisblock);
200 spin_unlock_irqrestore(&dd->dust_lock, flags); 203 spin_unlock_irqrestore(&dd->dust_lock, flags);
@@ -331,6 +334,8 @@ static int dust_ctr(struct dm_target *ti, unsigned int argc, char **argv)
331 dd->blksz = blksz; 334 dd->blksz = blksz;
332 dd->start = tmp; 335 dd->start = tmp;
333 336
337 dd->sect_per_block_shift = __ffs(sect_per_block);
338
334 /* 339 /*
335 * Whether to fail a read on a "bad" block. 340 * Whether to fail a read on a "bad" block.
336 * Defaults to false; enabled later by message. 341 * Defaults to false; enabled later by message.
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index b1b0de402dfc..9118ab85cb3a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1943,7 +1943,22 @@ offload_to_thread:
1943 queue_work(ic->wait_wq, &dio->work); 1943 queue_work(ic->wait_wq, &dio->work);
1944 return; 1944 return;
1945 } 1945 }
1946 if (journal_read_pos != NOT_FOUND)
1947 dio->range.n_sectors = ic->sectors_per_block;
1946 wait_and_add_new_range(ic, &dio->range); 1948 wait_and_add_new_range(ic, &dio->range);
1949 /*
1950 * wait_and_add_new_range drops the spinlock, so the journal
1951 * may have been changed arbitrarily. We need to recheck.
1952 * To simplify the code, we restrict I/O size to just one block.
1953 */
1954 if (journal_read_pos != NOT_FOUND) {
1955 sector_t next_sector;
1956 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1957 if (unlikely(new_pos != journal_read_pos)) {
1958 remove_range_unlocked(ic, &dio->range);
1959 goto retry;
1960 }
1961 }
1947 } 1962 }
1948 spin_unlock_irq(&ic->endio_wait.lock); 1963 spin_unlock_irq(&ic->endio_wait.lock);
1949 1964
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index df2011de7be2..1bbe4a34ef4c 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -566,8 +566,10 @@ static int run_io_job(struct kcopyd_job *job)
566 * no point in continuing. 566 * no point in continuing.
567 */ 567 */
568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) && 568 if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
569 job->master_job->write_err) 569 job->master_job->write_err) {
570 job->write_err = job->master_job->write_err;
570 return -EIO; 571 return -EIO;
572 }
571 573
572 io_job_start(job->kc->throttle); 574 io_job_start(job->kc->throttle);
573 575
@@ -619,6 +621,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
619 else 621 else
620 job->read_err = 1; 622 job->read_err = 1;
621 push(&kc->complete_jobs, job); 623 push(&kc->complete_jobs, job);
624 wake(kc);
622 break; 625 break;
623 } 626 }
624 627
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 8a60a4a070ac..1f933dd197cd 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3194,7 +3194,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3194 */ 3194 */
3195 r = rs_prepare_reshape(rs); 3195 r = rs_prepare_reshape(rs);
3196 if (r) 3196 if (r)
3197 return r; 3197 goto bad;
3198 3198
3199 /* Reshaping ain't recovery, so disable recovery */ 3199 /* Reshaping ain't recovery, so disable recovery */
3200 rs_setup_recovery(rs, MaxSector); 3200 rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 7b6c3ee9e755..8820931ec7d2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1342,7 +1342,7 @@ void dm_table_event(struct dm_table *t)
1342} 1342}
1343EXPORT_SYMBOL(dm_table_event); 1343EXPORT_SYMBOL(dm_table_event);
1344 1344
1345sector_t dm_table_get_size(struct dm_table *t) 1345inline sector_t dm_table_get_size(struct dm_table *t)
1346{ 1346{
1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; 1347 return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1348} 1348}
@@ -1367,6 +1367,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1367 unsigned int l, n = 0, k = 0; 1367 unsigned int l, n = 0, k = 0;
1368 sector_t *node; 1368 sector_t *node;
1369 1369
1370 if (unlikely(sector >= dm_table_get_size(t)))
1371 return &t->targets[t->num_targets];
1372
1370 for (l = 0; l < t->depth; l++) { 1373 for (l = 0; l < t->depth; l++) {
1371 n = get_child(n, k); 1374 n = get_child(n, k);
1372 node = get_node(t, l, n); 1375 node = get_node(t, l, n);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 8545dcee9fd0..595a73110e17 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -34,7 +35,7 @@
34 * (1) Super block (1 block) 35 * (1) Super block (1 block)
35 * (2) Chunk mapping table (nr_map_blocks) 36 * (2) Chunk mapping table (nr_map_blocks)
36 * (3) Bitmap blocks (nr_bitmap_blocks) 37 * (3) Bitmap blocks (nr_bitmap_blocks)
37 * All metadata blocks are stored in conventional zones, starting from the 38 * All metadata blocks are stored in conventional zones, starting from
38 * the first conventional zone found on disk. 39 * the first conventional zone found on disk.
39 */ 40 */
40struct dmz_super { 41struct dmz_super {
@@ -233,7 +234,7 @@ void dmz_unlock_map(struct dmz_metadata *zmd)
233 * Lock/unlock metadata access. This is a "read" lock on a semaphore 234 * Lock/unlock metadata access. This is a "read" lock on a semaphore
234 * that prevents metadata flush from running while metadata are being 235 * that prevents metadata flush from running while metadata are being
235 * modified. The actual metadata write mutual exclusion is achieved with 236 * modified. The actual metadata write mutual exclusion is achieved with
236 * the map lock and zone styate management (active and reclaim state are 237 * the map lock and zone state management (active and reclaim state are
237 * mutually exclusive). 238 * mutually exclusive).
238 */ 239 */
239void dmz_lock_metadata(struct dmz_metadata *zmd) 240void dmz_lock_metadata(struct dmz_metadata *zmd)
@@ -402,15 +403,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
402 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; 403 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
403 struct bio *bio; 404 struct bio *bio;
404 405
406 if (dmz_bdev_is_dying(zmd->dev))
407 return ERR_PTR(-EIO);
408
405 /* Get a new block and a BIO to read it */ 409 /* Get a new block and a BIO to read it */
406 mblk = dmz_alloc_mblock(zmd, mblk_no); 410 mblk = dmz_alloc_mblock(zmd, mblk_no);
407 if (!mblk) 411 if (!mblk)
408 return NULL; 412 return ERR_PTR(-ENOMEM);
409 413
410 bio = bio_alloc(GFP_NOIO, 1); 414 bio = bio_alloc(GFP_NOIO, 1);
411 if (!bio) { 415 if (!bio) {
412 dmz_free_mblock(zmd, mblk); 416 dmz_free_mblock(zmd, mblk);
413 return NULL; 417 return ERR_PTR(-ENOMEM);
414 } 418 }
415 419
416 spin_lock(&zmd->mblk_lock); 420 spin_lock(&zmd->mblk_lock);
@@ -541,8 +545,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
541 if (!mblk) { 545 if (!mblk) {
542 /* Cache miss: read the block from disk */ 546 /* Cache miss: read the block from disk */
543 mblk = dmz_get_mblock_slow(zmd, mblk_no); 547 mblk = dmz_get_mblock_slow(zmd, mblk_no);
544 if (!mblk) 548 if (IS_ERR(mblk))
545 return ERR_PTR(-ENOMEM); 549 return mblk;
546 } 550 }
547 551
548 /* Wait for on-going read I/O and check for error */ 552 /* Wait for on-going read I/O and check for error */
@@ -570,16 +574,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
570/* 574/*
571 * Issue a metadata block write BIO. 575 * Issue a metadata block write BIO.
572 */ 576 */
573static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, 577static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
574 unsigned int set) 578 unsigned int set)
575{ 579{
576 sector_t block = zmd->sb[set].block + mblk->no; 580 sector_t block = zmd->sb[set].block + mblk->no;
577 struct bio *bio; 581 struct bio *bio;
578 582
583 if (dmz_bdev_is_dying(zmd->dev))
584 return -EIO;
585
579 bio = bio_alloc(GFP_NOIO, 1); 586 bio = bio_alloc(GFP_NOIO, 1);
580 if (!bio) { 587 if (!bio) {
581 set_bit(DMZ_META_ERROR, &mblk->state); 588 set_bit(DMZ_META_ERROR, &mblk->state);
582 return; 589 return -ENOMEM;
583 } 590 }
584 591
585 set_bit(DMZ_META_WRITING, &mblk->state); 592 set_bit(DMZ_META_WRITING, &mblk->state);
@@ -591,6 +598,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
591 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); 598 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
592 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0); 599 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
593 submit_bio(bio); 600 submit_bio(bio);
601
602 return 0;
594} 603}
595 604
596/* 605/*
@@ -602,6 +611,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
602 struct bio *bio; 611 struct bio *bio;
603 int ret; 612 int ret;
604 613
614 if (dmz_bdev_is_dying(zmd->dev))
615 return -EIO;
616
605 bio = bio_alloc(GFP_NOIO, 1); 617 bio = bio_alloc(GFP_NOIO, 1);
606 if (!bio) 618 if (!bio)
607 return -ENOMEM; 619 return -ENOMEM;
@@ -659,22 +671,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
659{ 671{
660 struct dmz_mblock *mblk; 672 struct dmz_mblock *mblk;
661 struct blk_plug plug; 673 struct blk_plug plug;
662 int ret = 0; 674 int ret = 0, nr_mblks_submitted = 0;
663 675
664 /* Issue writes */ 676 /* Issue writes */
665 blk_start_plug(&plug); 677 blk_start_plug(&plug);
666 list_for_each_entry(mblk, write_list, link) 678 list_for_each_entry(mblk, write_list, link) {
667 dmz_write_mblock(zmd, mblk, set); 679 ret = dmz_write_mblock(zmd, mblk, set);
680 if (ret)
681 break;
682 nr_mblks_submitted++;
683 }
668 blk_finish_plug(&plug); 684 blk_finish_plug(&plug);
669 685
670 /* Wait for completion */ 686 /* Wait for completion */
671 list_for_each_entry(mblk, write_list, link) { 687 list_for_each_entry(mblk, write_list, link) {
688 if (!nr_mblks_submitted)
689 break;
672 wait_on_bit_io(&mblk->state, DMZ_META_WRITING, 690 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
673 TASK_UNINTERRUPTIBLE); 691 TASK_UNINTERRUPTIBLE);
674 if (test_bit(DMZ_META_ERROR, &mblk->state)) { 692 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
675 clear_bit(DMZ_META_ERROR, &mblk->state); 693 clear_bit(DMZ_META_ERROR, &mblk->state);
676 ret = -EIO; 694 ret = -EIO;
677 } 695 }
696 nr_mblks_submitted--;
678 } 697 }
679 698
680 /* Flush drive cache (this will also sync data) */ 699 /* Flush drive cache (this will also sync data) */
@@ -736,6 +755,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
736 */ 755 */
737 dmz_lock_flush(zmd); 756 dmz_lock_flush(zmd);
738 757
758 if (dmz_bdev_is_dying(zmd->dev)) {
759 ret = -EIO;
760 goto out;
761 }
762
739 /* Get dirty blocks */ 763 /* Get dirty blocks */
740 spin_lock(&zmd->mblk_lock); 764 spin_lock(&zmd->mblk_lock);
741 list_splice_init(&zmd->mblk_dirty_list, &write_list); 765 list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1542,7 +1566,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1542 struct dm_zone *zone; 1566 struct dm_zone *zone;
1543 1567
1544 if (list_empty(&zmd->map_rnd_list)) 1568 if (list_empty(&zmd->map_rnd_list))
1545 return NULL; 1569 return ERR_PTR(-EBUSY);
1546 1570
1547 list_for_each_entry(zone, &zmd->map_rnd_list, link) { 1571 list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1548 if (dmz_is_buf(zone)) 1572 if (dmz_is_buf(zone))
@@ -1553,7 +1577,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1553 return dzone; 1577 return dzone;
1554 } 1578 }
1555 1579
1556 return NULL; 1580 return ERR_PTR(-EBUSY);
1557} 1581}
1558 1582
1559/* 1583/*
@@ -1564,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1564 struct dm_zone *zone; 1588 struct dm_zone *zone;
1565 1589
1566 if (list_empty(&zmd->map_seq_list)) 1590 if (list_empty(&zmd->map_seq_list))
1567 return NULL; 1591 return ERR_PTR(-EBUSY);
1568 1592
1569 list_for_each_entry(zone, &zmd->map_seq_list, link) { 1593 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1570 if (!zone->bzone) 1594 if (!zone->bzone)
@@ -1573,7 +1597,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1573 return zone; 1597 return zone;
1574 } 1598 }
1575 1599
1576 return NULL; 1600 return ERR_PTR(-EBUSY);
1577} 1601}
1578 1602
1579/* 1603/*
@@ -1628,9 +1652,13 @@ again:
1628 if (op != REQ_OP_WRITE) 1652 if (op != REQ_OP_WRITE)
1629 goto out; 1653 goto out;
1630 1654
1631 /* Alloate a random zone */ 1655 /* Allocate a random zone */
1632 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1656 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1633 if (!dzone) { 1657 if (!dzone) {
1658 if (dmz_bdev_is_dying(zmd->dev)) {
1659 dzone = ERR_PTR(-EIO);
1660 goto out;
1661 }
1634 dmz_wait_for_free_zones(zmd); 1662 dmz_wait_for_free_zones(zmd);
1635 goto again; 1663 goto again;
1636 } 1664 }
@@ -1725,9 +1753,13 @@ again:
1725 if (bzone) 1753 if (bzone)
1726 goto out; 1754 goto out;
1727 1755
1728 /* Alloate a random zone */ 1756 /* Allocate a random zone */
1729 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND); 1757 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1730 if (!bzone) { 1758 if (!bzone) {
1759 if (dmz_bdev_is_dying(zmd->dev)) {
1760 bzone = ERR_PTR(-EIO);
1761 goto out;
1762 }
1731 dmz_wait_for_free_zones(zmd); 1763 dmz_wait_for_free_zones(zmd);
1732 goto again; 1764 goto again;
1733 } 1765 }
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95eb075..d240d7ca8a8a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -37,7 +38,7 @@ enum {
37/* 38/*
38 * Number of seconds of target BIO inactivity to consider the target idle. 39 * Number of seconds of target BIO inactivity to consider the target idle.
39 */ 40 */
40#define DMZ_IDLE_PERIOD (10UL * HZ) 41#define DMZ_IDLE_PERIOD (10UL * HZ)
41 42
42/* 43/*
43 * Percentage of unmapped (free) random zones below which reclaim starts 44 * Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +135,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
134 set_bit(DM_KCOPYD_WRITE_SEQ, &flags); 135 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
135 136
136 while (block < end_block) { 137 while (block < end_block) {
138 if (dev->flags & DMZ_BDEV_DYING)
139 return -EIO;
140
137 /* Get a valid region from the source zone */ 141 /* Get a valid region from the source zone */
138 ret = dmz_first_valid_block(zmd, src_zone, &block); 142 ret = dmz_first_valid_block(zmd, src_zone, &block);
139 if (ret <= 0) 143 if (ret <= 0)
@@ -215,7 +219,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
215 219
216 dmz_unlock_flush(zmd); 220 dmz_unlock_flush(zmd);
217 221
218 return 0; 222 return ret;
219} 223}
220 224
221/* 225/*
@@ -259,7 +263,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
259 263
260 dmz_unlock_flush(zmd); 264 dmz_unlock_flush(zmd);
261 265
262 return 0; 266 return ret;
263} 267}
264 268
265/* 269/*
@@ -312,7 +316,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
312 316
313 dmz_unlock_flush(zmd); 317 dmz_unlock_flush(zmd);
314 318
315 return 0; 319 return ret;
316} 320}
317 321
318/* 322/*
@@ -334,7 +338,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
334/* 338/*
335 * Find a candidate zone for reclaim and process it. 339 * Find a candidate zone for reclaim and process it.
336 */ 340 */
337static void dmz_reclaim(struct dmz_reclaim *zrc) 341static int dmz_do_reclaim(struct dmz_reclaim *zrc)
338{ 342{
339 struct dmz_metadata *zmd = zrc->metadata; 343 struct dmz_metadata *zmd = zrc->metadata;
340 struct dm_zone *dzone; 344 struct dm_zone *dzone;
@@ -344,8 +348,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
344 348
345 /* Get a data zone */ 349 /* Get a data zone */
346 dzone = dmz_get_zone_for_reclaim(zmd); 350 dzone = dmz_get_zone_for_reclaim(zmd);
347 if (!dzone) 351 if (IS_ERR(dzone))
348 return; 352 return PTR_ERR(dzone);
349 353
350 start = jiffies; 354 start = jiffies;
351 355
@@ -391,13 +395,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
391out: 395out:
392 if (ret) { 396 if (ret) {
393 dmz_unlock_zone_reclaim(dzone); 397 dmz_unlock_zone_reclaim(dzone);
394 return; 398 return ret;
395 } 399 }
396 400
397 (void) dmz_flush_metadata(zrc->metadata); 401 ret = dmz_flush_metadata(zrc->metadata);
402 if (ret) {
403 dmz_dev_debug(zrc->dev,
404 "Metadata flush for zone %u failed, err %d\n",
405 dmz_id(zmd, rzone), ret);
406 return ret;
407 }
398 408
399 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms", 409 dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
400 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start)); 410 dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
411 return 0;
401} 412}
402 413
403/* 414/*
@@ -427,7 +438,7 @@ static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
427 return false; 438 return false;
428 439
429 /* 440 /*
430 * If the percentage of unmappped random zones is low, 441 * If the percentage of unmapped random zones is low,
431 * reclaim even if the target is busy. 442 * reclaim even if the target is busy.
432 */ 443 */
433 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND; 444 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
@@ -442,6 +453,10 @@ static void dmz_reclaim_work(struct work_struct *work)
442 struct dmz_metadata *zmd = zrc->metadata; 453 struct dmz_metadata *zmd = zrc->metadata;
443 unsigned int nr_rnd, nr_unmap_rnd; 454 unsigned int nr_rnd, nr_unmap_rnd;
444 unsigned int p_unmap_rnd; 455 unsigned int p_unmap_rnd;
456 int ret;
457
458 if (dmz_bdev_is_dying(zrc->dev))
459 return;
445 460
446 if (!dmz_should_reclaim(zrc)) { 461 if (!dmz_should_reclaim(zrc)) {
447 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); 462 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +486,17 @@ static void dmz_reclaim_work(struct work_struct *work)
471 (dmz_target_idle(zrc) ? "Idle" : "Busy"), 486 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
472 p_unmap_rnd, nr_unmap_rnd, nr_rnd); 487 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
473 488
474 dmz_reclaim(zrc); 489 ret = dmz_do_reclaim(zrc);
490 if (ret) {
491 dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
492 if (ret == -EIO)
493 /*
494 * LLD might be performing some error handling sequence
495 * at the underlying device. To not interfere, do not
496 * attempt to schedule the next reclaim run immediately.
497 */
498 return;
499 }
475 500
476 dmz_schedule_reclaim(zrc); 501 dmz_schedule_reclaim(zrc);
477} 502}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 51d029bbb740..31478fef6032 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -133,6 +134,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
133 134
134 refcount_inc(&bioctx->ref); 135 refcount_inc(&bioctx->ref);
135 generic_make_request(clone); 136 generic_make_request(clone);
137 if (clone->bi_status == BLK_STS_IOERR)
138 return -EIO;
136 139
137 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone)) 140 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
138 zone->wp_block += nr_blocks; 141 zone->wp_block += nr_blocks;
@@ -277,8 +280,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
277 280
278 /* Get the buffer zone. One will be allocated if needed */ 281 /* Get the buffer zone. One will be allocated if needed */
279 bzone = dmz_get_chunk_buffer(zmd, zone); 282 bzone = dmz_get_chunk_buffer(zmd, zone);
280 if (!bzone) 283 if (IS_ERR(bzone))
281 return -ENOSPC; 284 return PTR_ERR(bzone);
282 285
283 if (dmz_is_readonly(bzone)) 286 if (dmz_is_readonly(bzone))
284 return -EROFS; 287 return -EROFS;
@@ -389,6 +392,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
389 392
390 dmz_lock_metadata(zmd); 393 dmz_lock_metadata(zmd);
391 394
395 if (dmz->dev->flags & DMZ_BDEV_DYING) {
396 ret = -EIO;
397 goto out;
398 }
399
392 /* 400 /*
393 * Get the data zone mapping the chunk. There may be no 401 * Get the data zone mapping the chunk. There may be no
394 * mapping for read and discard. If a mapping is obtained, 402 * mapping for read and discard. If a mapping is obtained,
@@ -493,6 +501,8 @@ static void dmz_flush_work(struct work_struct *work)
493 501
494 /* Flush dirty metadata blocks */ 502 /* Flush dirty metadata blocks */
495 ret = dmz_flush_metadata(dmz->metadata); 503 ret = dmz_flush_metadata(dmz->metadata);
504 if (ret)
505 dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
496 506
497 /* Process queued flush requests */ 507 /* Process queued flush requests */
498 while (1) { 508 while (1) {
@@ -513,22 +523,24 @@ static void dmz_flush_work(struct work_struct *work)
513 * Get a chunk work and start it to process a new BIO. 523 * Get a chunk work and start it to process a new BIO.
514 * If the BIO chunk has no work yet, create one. 524 * If the BIO chunk has no work yet, create one.
515 */ 525 */
516static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio) 526static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
517{ 527{
518 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio); 528 unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
519 struct dm_chunk_work *cw; 529 struct dm_chunk_work *cw;
530 int ret = 0;
520 531
521 mutex_lock(&dmz->chunk_lock); 532 mutex_lock(&dmz->chunk_lock);
522 533
523 /* Get the BIO chunk work. If one is not active yet, create one */ 534 /* Get the BIO chunk work. If one is not active yet, create one */
524 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk); 535 cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
525 if (!cw) { 536 if (!cw) {
526 int ret;
527 537
528 /* Create a new chunk work */ 538 /* Create a new chunk work */
529 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO); 539 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
530 if (!cw) 540 if (unlikely(!cw)) {
541 ret = -ENOMEM;
531 goto out; 542 goto out;
543 }
532 544
533 INIT_WORK(&cw->work, dmz_chunk_work); 545 INIT_WORK(&cw->work, dmz_chunk_work);
534 refcount_set(&cw->refcount, 0); 546 refcount_set(&cw->refcount, 0);
@@ -539,7 +551,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
539 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw); 551 ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
540 if (unlikely(ret)) { 552 if (unlikely(ret)) {
541 kfree(cw); 553 kfree(cw);
542 cw = NULL;
543 goto out; 554 goto out;
544 } 555 }
545 } 556 }
@@ -547,10 +558,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
547 bio_list_add(&cw->bio_list, bio); 558 bio_list_add(&cw->bio_list, bio);
548 dmz_get_chunk_work(cw); 559 dmz_get_chunk_work(cw);
549 560
561 dmz_reclaim_bio_acc(dmz->reclaim);
550 if (queue_work(dmz->chunk_wq, &cw->work)) 562 if (queue_work(dmz->chunk_wq, &cw->work))
551 dmz_get_chunk_work(cw); 563 dmz_get_chunk_work(cw);
552out: 564out:
553 mutex_unlock(&dmz->chunk_lock); 565 mutex_unlock(&dmz->chunk_lock);
566 return ret;
567}
568
569/*
570 * Check the backing device availability. If it's on the way out,
571 * start failing I/O. Reclaim and metadata components also call this
572 * function to cleanly abort operation in the event of such failure.
573 */
574bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
575{
576 struct gendisk *disk;
577
578 if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
579 disk = dmz_dev->bdev->bd_disk;
580 if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
581 dmz_dev_warn(dmz_dev, "Backing device queue dying");
582 dmz_dev->flags |= DMZ_BDEV_DYING;
583 } else if (disk->fops->check_events) {
584 if (disk->fops->check_events(disk, 0) &
585 DISK_EVENT_MEDIA_CHANGE) {
586 dmz_dev_warn(dmz_dev, "Backing device offline");
587 dmz_dev->flags |= DMZ_BDEV_DYING;
588 }
589 }
590 }
591
592 return dmz_dev->flags & DMZ_BDEV_DYING;
554} 593}
555 594
556/* 595/*
@@ -564,6 +603,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
564 sector_t sector = bio->bi_iter.bi_sector; 603 sector_t sector = bio->bi_iter.bi_sector;
565 unsigned int nr_sectors = bio_sectors(bio); 604 unsigned int nr_sectors = bio_sectors(bio);
566 sector_t chunk_sector; 605 sector_t chunk_sector;
606 int ret;
607
608 if (dmz_bdev_is_dying(dmz->dev))
609 return DM_MAPIO_KILL;
567 610
568 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks", 611 dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
569 bio_op(bio), (unsigned long long)sector, nr_sectors, 612 bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +644,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
601 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector); 644 dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
602 645
603 /* Now ready to handle this BIO */ 646 /* Now ready to handle this BIO */
604 dmz_reclaim_bio_acc(dmz->reclaim); 647 ret = dmz_queue_chunk_work(dmz, bio);
605 dmz_queue_chunk_work(dmz, bio); 648 if (ret) {
649 dmz_dev_debug(dmz->dev,
650 "BIO op %d, can't process chunk %llu, err %i\n",
651 bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
652 ret);
653 return DM_MAPIO_REQUEUE;
654 }
606 655
607 return DM_MAPIO_SUBMITTED; 656 return DM_MAPIO_SUBMITTED;
608} 657}
@@ -855,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
855{ 904{
856 struct dmz_target *dmz = ti->private; 905 struct dmz_target *dmz = ti->private;
857 906
907 if (dmz_bdev_is_dying(dmz->dev))
908 return -ENODEV;
909
858 *bdev = dmz->dev->bdev; 910 *bdev = dmz->dev->bdev;
859 911
860 return 0; 912 return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49c9a08..d8e70b0ade35 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 * 4 *
@@ -56,6 +57,8 @@ struct dmz_dev {
56 57
57 unsigned int nr_zones; 58 unsigned int nr_zones;
58 59
60 unsigned int flags;
61
59 sector_t zone_nr_sectors; 62 sector_t zone_nr_sectors;
60 unsigned int zone_nr_sectors_shift; 63 unsigned int zone_nr_sectors_shift;
61 64
@@ -67,6 +70,9 @@ struct dmz_dev {
67 (dev)->zone_nr_sectors_shift) 70 (dev)->zone_nr_sectors_shift)
68#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1)) 71#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
69 72
73/* Device flags. */
74#define DMZ_BDEV_DYING (1 << 0)
75
70/* 76/*
71 * Zone descriptor. 77 * Zone descriptor.
72 */ 78 */
@@ -245,4 +251,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
245void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc); 251void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
246void dmz_schedule_reclaim(struct dmz_reclaim *zrc); 252void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
247 253
254/*
255 * Functions defined in dm-zoned-target.c
256 */
257bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
258
248#endif /* DM_ZONED_H */ 259#endif /* DM_ZONED_H */
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b319757b1e..8aae0624a297 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
628 628
629 new_parent = shadow_current(s); 629 new_parent = shadow_current(s);
630 630
631 pn = dm_block_data(new_parent);
632 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
633 sizeof(__le64) : s->info->value_type.size;
634
635 /* create & init the left block */
631 r = new_block(s->info, &left); 636 r = new_block(s->info, &left);
632 if (r < 0) 637 if (r < 0)
633 return r; 638 return r;
634 639
640 ln = dm_block_data(left);
641 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
642
643 ln->header.flags = pn->header.flags;
644 ln->header.nr_entries = cpu_to_le32(nr_left);
645 ln->header.max_entries = pn->header.max_entries;
646 ln->header.value_size = pn->header.value_size;
647 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
648 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
649
650 /* create & init the right block */
635 r = new_block(s->info, &right); 651 r = new_block(s->info, &right);
636 if (r < 0) { 652 if (r < 0) {
637 unlock_block(s->info, left); 653 unlock_block(s->info, left);
638 return r; 654 return r;
639 } 655 }
640 656
641 pn = dm_block_data(new_parent);
642 ln = dm_block_data(left);
643 rn = dm_block_data(right); 657 rn = dm_block_data(right);
644
645 nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
646 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left; 658 nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
647 659
648 ln->header.flags = pn->header.flags;
649 ln->header.nr_entries = cpu_to_le32(nr_left);
650 ln->header.max_entries = pn->header.max_entries;
651 ln->header.value_size = pn->header.value_size;
652
653 rn->header.flags = pn->header.flags; 660 rn->header.flags = pn->header.flags;
654 rn->header.nr_entries = cpu_to_le32(nr_right); 661 rn->header.nr_entries = cpu_to_le32(nr_right);
655 rn->header.max_entries = pn->header.max_entries; 662 rn->header.max_entries = pn->header.max_entries;
656 rn->header.value_size = pn->header.value_size; 663 rn->header.value_size = pn->header.value_size;
657
658 memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
659 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0])); 664 memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
660
661 size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
662 sizeof(__le64) : s->info->value_type.size;
663 memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
664 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left), 665 memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
665 nr_right * size); 666 nr_right * size);
666 667
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec449243966..25328582cc48 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
249 } 249 }
250 250
251 if (smm->recursion_count == 1) 251 if (smm->recursion_count == 1)
252 apply_bops(smm); 252 r = apply_bops(smm);
253 253
254 smm->recursion_count--; 254 smm->recursion_count--;
255 255
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 29e3f5da59c1..11ec048929e8 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -253,8 +253,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
253 */ 253 */
254 254
255 pixsize = vout->bpp * vout->vrfb_bpp; 255 pixsize = vout->bpp * vout->vrfb_bpp;
256 dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) - 256 dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp;
257 (vout->pix.width * vout->bpp)) + 1;
258 257
259 xt->src_start = vout->buf_phy_addr[vb->i]; 258 xt->src_start = vout->buf_phy_addr[vb->i];
260 xt->dst_start = vout->vrfb_context[vb->i].paddr[0]; 259 xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 601cefb5c9d8..050478cabc95 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -729,7 +729,7 @@ static int rk808_remove(struct i2c_client *client)
729 return 0; 729 return 0;
730} 730}
731 731
732static int rk8xx_suspend(struct device *dev) 732static int __maybe_unused rk8xx_suspend(struct device *dev)
733{ 733{
734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 734 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
735 int ret = 0; 735 int ret = 0;
@@ -749,7 +749,7 @@ static int rk8xx_suspend(struct device *dev)
749 return ret; 749 return ret;
750} 750}
751 751
752static int rk8xx_resume(struct device *dev) 752static int __maybe_unused rk8xx_resume(struct device *dev)
753{ 753{
754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); 754 struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
755 int ret = 0; 755 int ret = 0;
@@ -768,7 +768,7 @@ static int rk8xx_resume(struct device *dev)
768 768
769 return ret; 769 return ret;
770} 770}
771SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume); 771static SIMPLE_DEV_PM_OPS(rk8xx_pm_ops, rk8xx_suspend, rk8xx_resume);
772 772
773static struct i2c_driver rk808_i2c_driver = { 773static struct i2c_driver rk808_i2c_driver = {
774 .driver = { 774 .driver = {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 6abfc8e92fcc..16900357afc2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -465,6 +465,7 @@ config PCI_ENDPOINT_TEST
465 465
466config XILINX_SDFEC 466config XILINX_SDFEC
467 tristate "Xilinx SDFEC 16" 467 tristate "Xilinx SDFEC 16"
468 depends on HAS_IOMEM
468 help 469 help
469 This option enables support for the Xilinx SDFEC (Soft Decision 470 This option enables support for the Xilinx SDFEC (Soft Decision
470 Forward Error Correction) driver. This enables a char driver 471 Forward Error Correction) driver. This enables a char driver
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 0c4894dd9c02..7a8f9d0b71b5 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -970,7 +970,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
970 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); 970 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
971 if (rc) { 971 if (rc) {
972 dev_err(hdev->dev, "failed to initialize kernel context\n"); 972 dev_err(hdev->dev, "failed to initialize kernel context\n");
973 goto free_ctx; 973 kfree(hdev->kernel_ctx);
974 goto mmu_fini;
974 } 975 }
975 976
976 rc = hl_cb_pool_init(hdev); 977 rc = hl_cb_pool_init(hdev);
@@ -1053,8 +1054,6 @@ release_ctx:
1053 if (hl_ctx_put(hdev->kernel_ctx) != 1) 1054 if (hl_ctx_put(hdev->kernel_ctx) != 1)
1054 dev_err(hdev->dev, 1055 dev_err(hdev->dev,
1055 "kernel ctx is still alive on initialization failure\n"); 1056 "kernel ctx is still alive on initialization failure\n");
1056free_ctx:
1057 kfree(hdev->kernel_ctx);
1058mmu_fini: 1057mmu_fini:
1059 hl_mmu_fini(hdev); 1058 hl_mmu_fini(hdev);
1060eq_fini: 1059eq_fini:
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index a0e181714891..271c5c8f53b4 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -2729,9 +2729,10 @@ void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2729 GOYA_ASYNC_EVENT_ID_PI_UPDATE); 2729 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2730} 2730}
2731 2731
2732void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val) 2732void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2733{ 2733{
2734 /* Not needed in Goya */ 2734 /* The QMANs are on the SRAM so need to copy to IO space */
2735 memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2735} 2736}
2736 2737
2737static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, 2738static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
@@ -3313,9 +3314,11 @@ static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3313 int rc; 3314 int rc;
3314 3315
3315 dev_dbg(hdev->dev, "DMA packet details:\n"); 3316 dev_dbg(hdev->dev, "DMA packet details:\n");
3316 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); 3317 dev_dbg(hdev->dev, "source == 0x%llx\n",
3317 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); 3318 le64_to_cpu(user_dma_pkt->src_addr));
3318 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); 3319 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3320 le64_to_cpu(user_dma_pkt->dst_addr));
3321 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3319 3322
3320 ctl = le32_to_cpu(user_dma_pkt->ctl); 3323 ctl = le32_to_cpu(user_dma_pkt->ctl);
3321 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> 3324 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
@@ -3344,9 +3347,11 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3344 struct packet_lin_dma *user_dma_pkt) 3347 struct packet_lin_dma *user_dma_pkt)
3345{ 3348{
3346 dev_dbg(hdev->dev, "DMA packet details:\n"); 3349 dev_dbg(hdev->dev, "DMA packet details:\n");
3347 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); 3350 dev_dbg(hdev->dev, "source == 0x%llx\n",
3348 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); 3351 le64_to_cpu(user_dma_pkt->src_addr));
3349 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); 3352 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3353 le64_to_cpu(user_dma_pkt->dst_addr));
3354 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3350 3355
3351 /* 3356 /*
3352 * WA for HW-23. 3357 * WA for HW-23.
@@ -3386,7 +3391,8 @@ static int goya_validate_wreg32(struct hl_device *hdev,
3386 3391
3387 dev_dbg(hdev->dev, "WREG32 packet details:\n"); 3392 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3388 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset); 3393 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3389 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value); 3394 dev_dbg(hdev->dev, "value == 0x%x\n",
3395 le32_to_cpu(wreg_pkt->value));
3390 3396
3391 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) { 3397 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3392 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n", 3398 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
@@ -3428,12 +3434,13 @@ static int goya_validate_cb(struct hl_device *hdev,
3428 while (cb_parsed_length < parser->user_cb_size) { 3434 while (cb_parsed_length < parser->user_cb_size) {
3429 enum packet_id pkt_id; 3435 enum packet_id pkt_id;
3430 u16 pkt_size; 3436 u16 pkt_size;
3431 void *user_pkt; 3437 struct goya_packet *user_pkt;
3432 3438
3433 user_pkt = (void *) (uintptr_t) 3439 user_pkt = (struct goya_packet *) (uintptr_t)
3434 (parser->user_cb->kernel_address + cb_parsed_length); 3440 (parser->user_cb->kernel_address + cb_parsed_length);
3435 3441
3436 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & 3442 pkt_id = (enum packet_id) (
3443 (le64_to_cpu(user_pkt->header) &
3437 PACKET_HEADER_PACKET_ID_MASK) >> 3444 PACKET_HEADER_PACKET_ID_MASK) >>
3438 PACKET_HEADER_PACKET_ID_SHIFT); 3445 PACKET_HEADER_PACKET_ID_SHIFT);
3439 3446
@@ -3453,7 +3460,8 @@ static int goya_validate_cb(struct hl_device *hdev,
3453 * need to validate here as well because patch_cb() is 3460 * need to validate here as well because patch_cb() is
3454 * not called in MMU path while this function is called 3461 * not called in MMU path while this function is called
3455 */ 3462 */
3456 rc = goya_validate_wreg32(hdev, parser, user_pkt); 3463 rc = goya_validate_wreg32(hdev,
3464 parser, (struct packet_wreg32 *) user_pkt);
3457 break; 3465 break;
3458 3466
3459 case PACKET_WREG_BULK: 3467 case PACKET_WREG_BULK:
@@ -3481,10 +3489,10 @@ static int goya_validate_cb(struct hl_device *hdev,
3481 case PACKET_LIN_DMA: 3489 case PACKET_LIN_DMA:
3482 if (is_mmu) 3490 if (is_mmu)
3483 rc = goya_validate_dma_pkt_mmu(hdev, parser, 3491 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3484 user_pkt); 3492 (struct packet_lin_dma *) user_pkt);
3485 else 3493 else
3486 rc = goya_validate_dma_pkt_no_mmu(hdev, parser, 3494 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3487 user_pkt); 3495 (struct packet_lin_dma *) user_pkt);
3488 break; 3496 break;
3489 3497
3490 case PACKET_MSG_LONG: 3498 case PACKET_MSG_LONG:
@@ -3657,15 +3665,16 @@ static int goya_patch_cb(struct hl_device *hdev,
3657 enum packet_id pkt_id; 3665 enum packet_id pkt_id;
3658 u16 pkt_size; 3666 u16 pkt_size;
3659 u32 new_pkt_size = 0; 3667 u32 new_pkt_size = 0;
3660 void *user_pkt, *kernel_pkt; 3668 struct goya_packet *user_pkt, *kernel_pkt;
3661 3669
3662 user_pkt = (void *) (uintptr_t) 3670 user_pkt = (struct goya_packet *) (uintptr_t)
3663 (parser->user_cb->kernel_address + cb_parsed_length); 3671 (parser->user_cb->kernel_address + cb_parsed_length);
3664 kernel_pkt = (void *) (uintptr_t) 3672 kernel_pkt = (struct goya_packet *) (uintptr_t)
3665 (parser->patched_cb->kernel_address + 3673 (parser->patched_cb->kernel_address +
3666 cb_patched_cur_length); 3674 cb_patched_cur_length);
3667 3675
3668 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & 3676 pkt_id = (enum packet_id) (
3677 (le64_to_cpu(user_pkt->header) &
3669 PACKET_HEADER_PACKET_ID_MASK) >> 3678 PACKET_HEADER_PACKET_ID_MASK) >>
3670 PACKET_HEADER_PACKET_ID_SHIFT); 3679 PACKET_HEADER_PACKET_ID_SHIFT);
3671 3680
@@ -3680,15 +3689,18 @@ static int goya_patch_cb(struct hl_device *hdev,
3680 3689
3681 switch (pkt_id) { 3690 switch (pkt_id) {
3682 case PACKET_LIN_DMA: 3691 case PACKET_LIN_DMA:
3683 rc = goya_patch_dma_packet(hdev, parser, user_pkt, 3692 rc = goya_patch_dma_packet(hdev, parser,
3684 kernel_pkt, &new_pkt_size); 3693 (struct packet_lin_dma *) user_pkt,
3694 (struct packet_lin_dma *) kernel_pkt,
3695 &new_pkt_size);
3685 cb_patched_cur_length += new_pkt_size; 3696 cb_patched_cur_length += new_pkt_size;
3686 break; 3697 break;
3687 3698
3688 case PACKET_WREG_32: 3699 case PACKET_WREG_32:
3689 memcpy(kernel_pkt, user_pkt, pkt_size); 3700 memcpy(kernel_pkt, user_pkt, pkt_size);
3690 cb_patched_cur_length += pkt_size; 3701 cb_patched_cur_length += pkt_size;
3691 rc = goya_validate_wreg32(hdev, parser, kernel_pkt); 3702 rc = goya_validate_wreg32(hdev, parser,
3703 (struct packet_wreg32 *) kernel_pkt);
3692 break; 3704 break;
3693 3705
3694 case PACKET_WREG_BULK: 3706 case PACKET_WREG_BULK:
@@ -4352,6 +4364,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4352 size_t total_pkt_size; 4364 size_t total_pkt_size;
4353 long result; 4365 long result;
4354 int rc; 4366 int rc;
4367 int irq_num_entries, irq_arr_index;
4368 __le32 *goya_irq_arr;
4355 4369
4356 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) + 4370 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4357 irq_arr_size; 4371 irq_arr_size;
@@ -4369,8 +4383,16 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4369 if (!pkt) 4383 if (!pkt)
4370 return -ENOMEM; 4384 return -ENOMEM;
4371 4385
4372 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0])); 4386 irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4373 memcpy(&pkt->irqs, irq_arr, irq_arr_size); 4387 pkt->length = cpu_to_le32(irq_num_entries);
4388
4389 /* We must perform any necessary endianness conversation on the irq
4390 * array being passed to the goya hardware
4391 */
4392 for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4393 irq_arr_index < irq_num_entries ; irq_arr_index++)
4394 goya_irq_arr[irq_arr_index] =
4395 cpu_to_le32(irq_arr[irq_arr_index]);
4374 4396
4375 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << 4397 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4376 ARMCP_PKT_CTL_OPCODE_SHIFT); 4398 ARMCP_PKT_CTL_OPCODE_SHIFT);
@@ -5042,7 +5064,7 @@ static const struct hl_asic_funcs goya_funcs = {
5042 .resume = goya_resume, 5064 .resume = goya_resume,
5043 .cb_mmap = goya_cb_mmap, 5065 .cb_mmap = goya_cb_mmap,
5044 .ring_doorbell = goya_ring_doorbell, 5066 .ring_doorbell = goya_ring_doorbell,
5045 .flush_pq_write = goya_flush_pq_write, 5067 .pqe_write = goya_pqe_write,
5046 .asic_dma_alloc_coherent = goya_dma_alloc_coherent, 5068 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5047 .asic_dma_free_coherent = goya_dma_free_coherent, 5069 .asic_dma_free_coherent = goya_dma_free_coherent,
5048 .get_int_queue_base = goya_get_int_queue_base, 5070 .get_int_queue_base = goya_get_int_queue_base,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index f8c611883dc1..d7f48c9c41cd 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -177,7 +177,7 @@ int goya_late_init(struct hl_device *hdev);
177void goya_late_fini(struct hl_device *hdev); 177void goya_late_fini(struct hl_device *hdev);
178 178
179void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 179void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
180void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val); 180void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd);
181void goya_update_eq_ci(struct hl_device *hdev, u32 val); 181void goya_update_eq_ci(struct hl_device *hdev, u32 val);
182void goya_restore_phase_topology(struct hl_device *hdev); 182void goya_restore_phase_topology(struct hl_device *hdev);
183int goya_context_switch(struct hl_device *hdev, u32 asid); 183int goya_context_switch(struct hl_device *hdev, u32 asid);
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 6a4c64b97f38..ce83adafcf2d 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -441,7 +441,11 @@ enum hl_pll_frequency {
441 * @resume: handles IP specific H/W or SW changes for resume. 441 * @resume: handles IP specific H/W or SW changes for resume.
442 * @cb_mmap: maps a CB. 442 * @cb_mmap: maps a CB.
443 * @ring_doorbell: increment PI on a given QMAN. 443 * @ring_doorbell: increment PI on a given QMAN.
444 * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed. 444 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
445 * function because the PQs are located in different memory areas
446 * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
447 * writing the PQE must match the destination memory area
448 * properties.
445 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling 449 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
446 * dma_alloc_coherent(). This is ASIC function because 450 * dma_alloc_coherent(). This is ASIC function because
447 * its implementation is not trivial when the driver 451 * its implementation is not trivial when the driver
@@ -510,7 +514,8 @@ struct hl_asic_funcs {
510 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, 514 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
511 u64 kaddress, phys_addr_t paddress, u32 size); 515 u64 kaddress, phys_addr_t paddress, u32 size);
512 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); 516 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
513 void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val); 517 void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
518 struct hl_bd *bd);
514 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size, 519 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
515 dma_addr_t *dma_handle, gfp_t flag); 520 dma_addr_t *dma_handle, gfp_t flag);
516 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size, 521 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index e3b5517897ea..5f5673b74985 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -290,23 +290,19 @@ static void int_hw_queue_schedule_job(struct hl_cs_job *job)
290 struct hl_device *hdev = job->cs->ctx->hdev; 290 struct hl_device *hdev = job->cs->ctx->hdev;
291 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; 291 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
292 struct hl_bd bd; 292 struct hl_bd bd;
293 u64 *pi, *pbd = (u64 *) &bd; 293 __le64 *pi;
294 294
295 bd.ctl = 0; 295 bd.ctl = 0;
296 bd.len = __cpu_to_le32(job->job_cb_size); 296 bd.len = cpu_to_le32(job->job_cb_size);
297 bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb); 297 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
298 298
299 pi = (u64 *) (uintptr_t) (q->kernel_address + 299 pi = (__le64 *) (uintptr_t) (q->kernel_address +
300 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd))); 300 ((q->pi & (q->int_queue_len - 1)) * sizeof(bd)));
301 301
302 pi[0] = pbd[0];
303 pi[1] = pbd[1];
304
305 q->pi++; 302 q->pi++;
306 q->pi &= ((q->int_queue_len << 1) - 1); 303 q->pi &= ((q->int_queue_len << 1) - 1);
307 304
308 /* Flush PQ entry write. Relevant only for specific ASICs */ 305 hdev->asic_funcs->pqe_write(hdev, pi, &bd);
309 hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]);
310 306
311 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); 307 hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi);
312} 308}
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h
index a14407b975e4..ef54bad20509 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/misc/habanalabs/include/goya/goya_packets.h
@@ -52,6 +52,19 @@ enum goya_dma_direction {
52#define GOYA_PKT_CTL_MB_SHIFT 31 52#define GOYA_PKT_CTL_MB_SHIFT 31
53#define GOYA_PKT_CTL_MB_MASK 0x80000000 53#define GOYA_PKT_CTL_MB_MASK 0x80000000
54 54
55/* All packets have, at least, an 8-byte header, which contains
56 * the packet type. The kernel driver uses the packet header for packet
57 * validation and to perform any necessary required preparation before
58 * sending them off to the hardware.
59 */
60struct goya_packet {
61 __le64 header;
62 /* The rest of the packet data follows. Use the corresponding
63 * packet_XXX struct to deference the data, based on packet type
64 */
65 u8 contents[0];
66};
67
55struct packet_nop { 68struct packet_nop {
56 __le32 reserved; 69 __le32 reserved;
57 __le32 ctl; 70 __le32 ctl;
diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c
index ea9f72ff456c..199791b57caf 100644
--- a/drivers/misc/habanalabs/irq.c
+++ b/drivers/misc/habanalabs/irq.c
@@ -80,8 +80,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
80 struct hl_cs_job *job; 80 struct hl_cs_job *job;
81 bool shadow_index_valid; 81 bool shadow_index_valid;
82 u16 shadow_index; 82 u16 shadow_index;
83 u32 *cq_entry; 83 struct hl_cq_entry *cq_entry, *cq_base;
84 u32 *cq_base;
85 84
86 if (hdev->disabled) { 85 if (hdev->disabled) {
87 dev_dbg(hdev->dev, 86 dev_dbg(hdev->dev,
@@ -90,29 +89,29 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
90 return IRQ_HANDLED; 89 return IRQ_HANDLED;
91 } 90 }
92 91
93 cq_base = (u32 *) (uintptr_t) cq->kernel_address; 92 cq_base = (struct hl_cq_entry *) (uintptr_t) cq->kernel_address;
94 93
95 while (1) { 94 while (1) {
96 bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK) 95 bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
96 CQ_ENTRY_READY_MASK)
97 >> CQ_ENTRY_READY_SHIFT); 97 >> CQ_ENTRY_READY_SHIFT);
98 98
99 if (!entry_ready) 99 if (!entry_ready)
100 break; 100 break;
101 101
102 cq_entry = (u32 *) &cq_base[cq->ci]; 102 cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
103 103
104 /* 104 /* Make sure we read CQ entry contents after we've
105 * Make sure we read CQ entry contents after we've
106 * checked the ownership bit. 105 * checked the ownership bit.
107 */ 106 */
108 dma_rmb(); 107 dma_rmb();
109 108
110 shadow_index_valid = 109 shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
111 ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK) 110 CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
112 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT); 111 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
113 112
114 shadow_index = (u16) 113 shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
115 ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK) 114 CQ_ENTRY_SHADOW_INDEX_MASK)
116 >> CQ_ENTRY_SHADOW_INDEX_SHIFT); 115 >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
117 116
118 queue = &hdev->kernel_queues[cq->hw_queue_id]; 117 queue = &hdev->kernel_queues[cq->hw_queue_id];
@@ -122,8 +121,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
122 queue_work(hdev->cq_wq, &job->finish_work); 121 queue_work(hdev->cq_wq, &job->finish_work);
123 } 122 }
124 123
125 /* 124 /* Update ci of the context's queue. There is no
126 * Update ci of the context's queue. There is no
127 * need to protect it with spinlock because this update is 125 * need to protect it with spinlock because this update is
128 * done only inside IRQ and there is a different IRQ per 126 * done only inside IRQ and there is a different IRQ per
129 * queue 127 * queue
@@ -131,7 +129,8 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
131 queue->ci = hl_queue_inc_ptr(queue->ci); 129 queue->ci = hl_queue_inc_ptr(queue->ci);
132 130
133 /* Clear CQ entry ready bit */ 131 /* Clear CQ entry ready bit */
134 cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK; 132 cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
133 ~CQ_ENTRY_READY_MASK);
135 134
136 cq->ci = hl_cq_inc_ptr(cq->ci); 135 cq->ci = hl_cq_inc_ptr(cq->ci);
137 136
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 42d237cae1dc..365fb0cb8dff 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -1629,6 +1629,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
1629 dev_dbg(hdev->dev, 1629 dev_dbg(hdev->dev,
1630 "page list 0x%p of asid %d is still alive\n", 1630 "page list 0x%p of asid %d is still alive\n",
1631 phys_pg_list, ctx->asid); 1631 phys_pg_list, ctx->asid);
1632 atomic64_sub(phys_pg_list->total_size,
1633 &hdev->dram_used_mem);
1632 free_phys_pg_pack(hdev, phys_pg_list); 1634 free_phys_pg_pack(hdev, phys_pg_list);
1633 idr_remove(&vm->phys_pg_pack_handles, i); 1635 idr_remove(&vm->phys_pg_pack_handles, i);
1634 } 1636 }
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index d681e8aaca83..fe914ff5f5d6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1292,6 +1292,12 @@ int mmc_attach_sd(struct mmc_host *host)
1292 goto err; 1292 goto err;
1293 } 1293 }
1294 1294
1295 /*
1296 * Some SD cards claims an out of spec VDD voltage range. Let's treat
1297 * these bits as being in-valid and especially also bit7.
1298 */
1299 ocr &= ~0x7FFF;
1300
1295 rocr = mmc_select_voltage(host, ocr); 1301 rocr = mmc_select_voltage(host, ocr);
1296 1302
1297 /* 1303 /*
diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c
index 163d1cf4367e..44139fceac24 100644
--- a/drivers/mmc/host/sdhci-cadence.c
+++ b/drivers/mmc/host/sdhci-cadence.c
@@ -369,6 +369,7 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
369 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning; 369 host->mmc_host_ops.execute_tuning = sdhci_cdns_execute_tuning;
370 host->mmc_host_ops.hs400_enhanced_strobe = 370 host->mmc_host_ops.hs400_enhanced_strobe =
371 sdhci_cdns_hs400_enhanced_strobe; 371 sdhci_cdns_hs400_enhanced_strobe;
372 sdhci_enable_v4_mode(host);
372 373
373 sdhci_get_of_property(pdev); 374 sdhci_get_of_property(pdev);
374 375
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index d4e7e8b7be77..e7d1920729fb 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -357,6 +357,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
357 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 357 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
358 pm_runtime_use_autosuspend(&pdev->dev); 358 pm_runtime_use_autosuspend(&pdev->dev);
359 359
360 /* HS200 is broken at this moment */
361 host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
362
360 ret = sdhci_add_host(host); 363 ret = sdhci_add_host(host);
361 if (ret) 364 if (ret)
362 goto pm_runtime_disable; 365 goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 83a4767ca680..d07b9793380f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -217,10 +217,11 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host,
217 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host); 217 struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
218 u32 div, val, mask; 218 u32 div, val, mask;
219 219
220 div = sdhci_sprd_calc_div(sprd_host->base_rate, clk); 220 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
221 221
222 clk |= ((div & 0x300) >> 2) | ((div & 0xFF) << 8); 222 div = sdhci_sprd_calc_div(sprd_host->base_rate, clk);
223 sdhci_enable_clk(host, clk); 223 div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8);
224 sdhci_enable_clk(host, div);
224 225
225 /* enable auto gate sdhc_enable_auto_gate */ 226 /* enable auto gate sdhc_enable_auto_gate */
226 val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); 227 val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI);
@@ -373,6 +374,11 @@ static unsigned int sdhci_sprd_get_max_timeout_count(struct sdhci_host *host)
373 return 1 << 31; 374 return 1 << 31;
374} 375}
375 376
377static unsigned int sdhci_sprd_get_ro(struct sdhci_host *host)
378{
379 return 0;
380}
381
376static struct sdhci_ops sdhci_sprd_ops = { 382static struct sdhci_ops sdhci_sprd_ops = {
377 .read_l = sdhci_sprd_readl, 383 .read_l = sdhci_sprd_readl,
378 .write_l = sdhci_sprd_writel, 384 .write_l = sdhci_sprd_writel,
@@ -385,6 +391,7 @@ static struct sdhci_ops sdhci_sprd_ops = {
385 .set_uhs_signaling = sdhci_sprd_set_uhs_signaling, 391 .set_uhs_signaling = sdhci_sprd_set_uhs_signaling,
386 .hw_reset = sdhci_sprd_hw_reset, 392 .hw_reset = sdhci_sprd_hw_reset,
387 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count, 393 .get_max_timeout_count = sdhci_sprd_get_max_timeout_count,
394 .get_ro = sdhci_sprd_get_ro,
388}; 395};
389 396
390static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq) 397static void sdhci_sprd_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -501,9 +508,12 @@ static void sdhci_sprd_phy_param_parse(struct sdhci_sprd_host *sprd_host,
501} 508}
502 509
503static const struct sdhci_pltfm_data sdhci_sprd_pdata = { 510static const struct sdhci_pltfm_data sdhci_sprd_pdata = {
504 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, 511 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
512 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
513 SDHCI_QUIRK_MISSING_CAPS,
505 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 | 514 .quirks2 = SDHCI_QUIRK2_BROKEN_HS200 |
506 SDHCI_QUIRK2_USE_32BIT_BLK_CNT, 515 SDHCI_QUIRK2_USE_32BIT_BLK_CNT |
516 SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
507 .ops = &sdhci_sprd_ops, 517 .ops = &sdhci_sprd_ops,
508}; 518};
509 519
@@ -605,6 +615,16 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
605 615
606 sdhci_enable_v4_mode(host); 616 sdhci_enable_v4_mode(host);
607 617
618 /*
619 * Supply the existing CAPS, but clear the UHS-I modes. This
620 * will allow these modes to be specified only by device
621 * tree properties through mmc_of_parse().
622 */
623 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
624 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
625 host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
626 SDHCI_SUPPORT_DDR50);
627
608 ret = sdhci_setup_host(host); 628 ret = sdhci_setup_host(host);
609 if (ret) 629 if (ret)
610 goto pm_runtime_disable; 630 goto pm_runtime_disable;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index f4d4761cf20a..02d8f524bb9e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -258,6 +258,16 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
258 } 258 }
259} 259}
260 260
261static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
262{
263 /*
264 * Write-enable shall be assumed if GPIO is missing in a board's
265 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
266 * Tegra.
267 */
268 return mmc_gpio_get_ro(host->mmc);
269}
270
261static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) 271static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
262{ 272{
263 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 273 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -1224,6 +1234,7 @@ static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1224}; 1234};
1225 1235
1226static const struct sdhci_ops tegra_sdhci_ops = { 1236static const struct sdhci_ops tegra_sdhci_ops = {
1237 .get_ro = tegra_sdhci_get_ro,
1227 .read_w = tegra_sdhci_readw, 1238 .read_w = tegra_sdhci_readw,
1228 .write_l = tegra_sdhci_writel, 1239 .write_l = tegra_sdhci_writel,
1229 .set_clock = tegra_sdhci_set_clock, 1240 .set_clock = tegra_sdhci_set_clock,
@@ -1279,6 +1290,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1279}; 1290};
1280 1291
1281static const struct sdhci_ops tegra114_sdhci_ops = { 1292static const struct sdhci_ops tegra114_sdhci_ops = {
1293 .get_ro = tegra_sdhci_get_ro,
1282 .read_w = tegra_sdhci_readw, 1294 .read_w = tegra_sdhci_readw,
1283 .write_w = tegra_sdhci_writew, 1295 .write_w = tegra_sdhci_writew,
1284 .write_l = tegra_sdhci_writel, 1296 .write_l = tegra_sdhci_writel,
@@ -1332,6 +1344,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1332}; 1344};
1333 1345
1334static const struct sdhci_ops tegra210_sdhci_ops = { 1346static const struct sdhci_ops tegra210_sdhci_ops = {
1347 .get_ro = tegra_sdhci_get_ro,
1335 .read_w = tegra_sdhci_readw, 1348 .read_w = tegra_sdhci_readw,
1336 .write_w = tegra210_sdhci_writew, 1349 .write_w = tegra210_sdhci_writew,
1337 .write_l = tegra_sdhci_writel, 1350 .write_l = tegra_sdhci_writel,
@@ -1366,6 +1379,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1366}; 1379};
1367 1380
1368static const struct sdhci_ops tegra186_sdhci_ops = { 1381static const struct sdhci_ops tegra186_sdhci_ops = {
1382 .get_ro = tegra_sdhci_get_ro,
1369 .read_w = tegra_sdhci_readw, 1383 .read_w = tegra_sdhci_readw,
1370 .write_l = tegra_sdhci_writel, 1384 .write_l = tegra_sdhci_writel,
1371 .set_clock = tegra_sdhci_set_clock, 1385 .set_clock = tegra_sdhci_set_clock,
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index b4e3caf7d799..a4d8968d133d 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -1,5 +1,6 @@
1menuconfig MTD_HYPERBUS 1menuconfig MTD_HYPERBUS
2 tristate "HyperBus support" 2 tristate "HyperBus support"
3 depends on HAS_IOMEM
3 select MTD_CFI 4 select MTD_CFI
4 select MTD_MAP_BANK_WIDTH_2 5 select MTD_MAP_BANK_WIDTH_2
5 select MTD_CFI_AMDSTD 6 select MTD_CFI_AMDSTD
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 895510d40ce4..47602af4ee34 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -81,6 +81,7 @@ static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *r
81 default: 81 default:
82 printk(KERN_WARNING "SA1100 flash: unknown base address " 82 printk(KERN_WARNING "SA1100 flash: unknown base address "
83 "0x%08lx, assuming CS0\n", phys); 83 "0x%08lx, assuming CS0\n", phys);
84 /* Fall through */
84 85
85 case SA1100_CS0_PHYS: 86 case SA1100_CS0_PHYS:
86 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; 87 subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 03cc788511d5..654bdc41fc99 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -3780,8 +3780,6 @@ static int spi_nor_init_params(struct spi_nor *nor,
3780 default: 3780 default:
3781 /* Kept only for backward compatibility purpose. */ 3781 /* Kept only for backward compatibility purpose. */
3782 params->quad_enable = spansion_quad_enable; 3782 params->quad_enable = spansion_quad_enable;
3783 if (nor->clear_sr_bp)
3784 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
3785 break; 3783 break;
3786 } 3784 }
3787 3785
@@ -4035,6 +4033,9 @@ static int spi_nor_init(struct spi_nor *nor)
4035 int err; 4033 int err;
4036 4034
4037 if (nor->clear_sr_bp) { 4035 if (nor->clear_sr_bp) {
4036 if (nor->quad_enable == spansion_quad_enable)
4037 nor->clear_sr_bp = spi_nor_spansion_clear_sr_bp;
4038
4038 err = nor->clear_sr_bp(nor); 4039 err = nor->clear_sr_bp(nor);
4039 if (err) { 4040 if (err) {
4040 dev_err(nor->dev, 4041 dev_err(nor->dev,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 02fd7822c14a..931d9d935686 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1126,6 +1126,8 @@ static void bond_compute_features(struct bonding *bond)
1126done: 1126done:
1127 bond_dev->vlan_features = vlan_features; 1127 bond_dev->vlan_features = vlan_features;
1128 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1128 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1129 NETIF_F_HW_VLAN_CTAG_TX |
1130 NETIF_F_HW_VLAN_STAG_TX |
1129 NETIF_F_GSO_UDP_L4; 1131 NETIF_F_GSO_UDP_L4;
1130 bond_dev->mpls_features = mpls_features; 1132 bond_dev->mpls_features = mpls_features;
1131 bond_dev->gso_max_segs = gso_max_segs; 1133 bond_dev->gso_max_segs = gso_max_segs;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 3811fdbda13e..28c963a21dac 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -478,6 +478,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
478 unsigned long *supported, 478 unsigned long *supported,
479 struct phylink_link_state *state) 479 struct phylink_link_state *state)
480{ 480{
481 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
481 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 482 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
482 483
483 if (!phy_interface_mode_is_rgmii(state->interface) && 484 if (!phy_interface_mode_is_rgmii(state->interface) &&
@@ -487,8 +488,10 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
487 state->interface != PHY_INTERFACE_MODE_INTERNAL && 488 state->interface != PHY_INTERFACE_MODE_INTERNAL &&
488 state->interface != PHY_INTERFACE_MODE_MOCA) { 489 state->interface != PHY_INTERFACE_MODE_MOCA) {
489 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); 490 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
490 dev_err(ds->dev, 491 if (port != core_readl(priv, CORE_IMP0_PRT_ID))
491 "Unsupported interface: %d\n", state->interface); 492 dev_err(ds->dev,
493 "Unsupported interface: %d for port %d\n",
494 state->interface, port);
492 return; 495 return;
493 } 496 }
494 497
@@ -526,6 +529,9 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
526 u32 id_mode_dis = 0, port_mode; 529 u32 id_mode_dis = 0, port_mode;
527 u32 reg, offset; 530 u32 reg, offset;
528 531
532 if (port == core_readl(priv, CORE_IMP0_PRT_ID))
533 return;
534
529 if (priv->type == BCM7445_DEVICE_ID) 535 if (priv->type == BCM7445_DEVICE_ID)
530 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); 536 offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
531 else 537 else
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index d073baffc20b..df976b259e43 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1223,12 +1223,8 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1223{ 1223{
1224 struct sja1105_private *priv = ds->priv; 1224 struct sja1105_private *priv = ds->priv;
1225 struct device *dev = ds->dev; 1225 struct device *dev = ds->dev;
1226 u16 rx_vid, tx_vid;
1227 int i; 1226 int i;
1228 1227
1229 rx_vid = dsa_8021q_rx_vid(ds, port);
1230 tx_vid = dsa_8021q_tx_vid(ds, port);
1231
1232 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) { 1228 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1233 struct sja1105_l2_lookup_entry l2_lookup = {0}; 1229 struct sja1105_l2_lookup_entry l2_lookup = {0};
1234 u8 macaddr[ETH_ALEN]; 1230 u8 macaddr[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e47ea92e2ae3..d10b421ed1f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3057,12 +3057,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
3057 /* if VF indicate to PF this function is going down (PF will delete sp 3057 /* if VF indicate to PF this function is going down (PF will delete sp
3058 * elements and clear initializations 3058 * elements and clear initializations
3059 */ 3059 */
3060 if (IS_VF(bp)) 3060 if (IS_VF(bp)) {
3061 bnx2x_clear_vlan_info(bp);
3061 bnx2x_vfpf_close_vf(bp); 3062 bnx2x_vfpf_close_vf(bp);
3062 else if (unload_mode != UNLOAD_RECOVERY) 3063 } else if (unload_mode != UNLOAD_RECOVERY) {
3063 /* if this is a normal/close unload need to clean up chip*/ 3064 /* if this is a normal/close unload need to clean up chip*/
3064 bnx2x_chip_cleanup(bp, unload_mode, keep_link); 3065 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3065 else { 3066 } else {
3066 /* Send the UNLOAD_REQUEST to the MCP */ 3067 /* Send the UNLOAD_REQUEST to the MCP */
3067 bnx2x_send_unload_req(bp, unload_mode); 3068 bnx2x_send_unload_req(bp, unload_mode);
3068 3069
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c2f6e44e9a3f..8b08cb18e363 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
425void bnx2x_disable_close_the_gate(struct bnx2x *bp); 425void bnx2x_disable_close_the_gate(struct bnx2x *bp);
426int bnx2x_init_hw_func_cnic(struct bnx2x *bp); 426int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
427 427
428void bnx2x_clear_vlan_info(struct bnx2x *bp);
429
428/** 430/**
429 * bnx2x_sp_event - handle ramrods completion. 431 * bnx2x_sp_event - handle ramrods completion.
430 * 432 *
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2cc14db8f0ec..192ff8d5da32 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8482,11 +8482,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8482 return rc; 8482 return rc;
8483} 8483}
8484 8484
8485void bnx2x_clear_vlan_info(struct bnx2x *bp)
8486{
8487 struct bnx2x_vlan_entry *vlan;
8488
8489 /* Mark that hw forgot all entries */
8490 list_for_each_entry(vlan, &bp->vlan_reg, link)
8491 vlan->hw = false;
8492
8493 bp->vlan_cnt = 0;
8494}
8495
8485static int bnx2x_del_all_vlans(struct bnx2x *bp) 8496static int bnx2x_del_all_vlans(struct bnx2x *bp)
8486{ 8497{
8487 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj; 8498 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8488 unsigned long ramrod_flags = 0, vlan_flags = 0; 8499 unsigned long ramrod_flags = 0, vlan_flags = 0;
8489 struct bnx2x_vlan_entry *vlan;
8490 int rc; 8500 int rc;
8491 8501
8492 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 8502 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8495,10 +8505,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
8495 if (rc) 8505 if (rc)
8496 return rc; 8506 return rc;
8497 8507
8498 /* Mark that hw forgot all entries */ 8508 bnx2x_clear_vlan_info(bp);
8499 list_for_each_entry(vlan, &bp->vlan_reg, link)
8500 vlan->hw = false;
8501 bp->vlan_cnt = 0;
8502 8509
8503 return 0; 8510 return 0;
8504} 8511}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7070349915bc..8dce4069472b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2021,9 +2021,9 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2021 if (bnapi->events & BNXT_RX_EVENT) { 2021 if (bnapi->events & BNXT_RX_EVENT) {
2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; 2022 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2023 2023
2024 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2025 if (bnapi->events & BNXT_AGG_EVENT) 2024 if (bnapi->events & BNXT_AGG_EVENT)
2026 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 2025 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2026 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2027 } 2027 }
2028 bnapi->events = 0; 2028 bnapi->events = 0;
2029} 2029}
@@ -5064,6 +5064,7 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5064 5064
5065static int bnxt_hwrm_ring_alloc(struct bnxt *bp) 5065static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5066{ 5066{
5067 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5067 int i, rc = 0; 5068 int i, rc = 0;
5068 u32 type; 5069 u32 type;
5069 5070
@@ -5139,7 +5140,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5139 if (rc) 5140 if (rc)
5140 goto err_out; 5141 goto err_out;
5141 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); 5142 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5142 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); 5143 /* If we have agg rings, post agg buffers first. */
5144 if (!agg_rings)
5145 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5143 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; 5146 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5144 if (bp->flags & BNXT_FLAG_CHIP_P5) { 5147 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5145 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 5148 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -5158,7 +5161,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5158 } 5161 }
5159 } 5162 }
5160 5163
5161 if (bp->flags & BNXT_FLAG_AGG_RINGS) { 5164 if (agg_rings) {
5162 type = HWRM_RING_ALLOC_AGG; 5165 type = HWRM_RING_ALLOC_AGG;
5163 for (i = 0; i < bp->rx_nr_rings; i++) { 5166 for (i = 0; i < bp->rx_nr_rings; i++) {
5164 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; 5167 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -5174,6 +5177,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5174 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, 5177 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5175 ring->fw_ring_id); 5178 ring->fw_ring_id);
5176 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); 5179 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5180 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5177 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; 5181 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5178 } 5182 }
5179 } 5183 }
@@ -7016,19 +7020,29 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7016 bnxt_hwrm_vnic_set_rss(bp, i, false); 7020 bnxt_hwrm_vnic_set_rss(bp, i, false);
7017} 7021}
7018 7022
7019static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, 7023static void bnxt_clear_vnic(struct bnxt *bp)
7020 bool irq_re_init)
7021{ 7024{
7022 if (bp->vnic_info) { 7025 if (!bp->vnic_info)
7023 bnxt_hwrm_clear_vnic_filter(bp); 7026 return;
7027
7028 bnxt_hwrm_clear_vnic_filter(bp);
7029 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7024 /* clear all RSS setting before free vnic ctx */ 7030 /* clear all RSS setting before free vnic ctx */
7025 bnxt_hwrm_clear_vnic_rss(bp); 7031 bnxt_hwrm_clear_vnic_rss(bp);
7026 bnxt_hwrm_vnic_ctx_free(bp); 7032 bnxt_hwrm_vnic_ctx_free(bp);
7027 /* before free the vnic, undo the vnic tpa settings */
7028 if (bp->flags & BNXT_FLAG_TPA)
7029 bnxt_set_tpa(bp, false);
7030 bnxt_hwrm_vnic_free(bp);
7031 } 7033 }
7034 /* before free the vnic, undo the vnic tpa settings */
7035 if (bp->flags & BNXT_FLAG_TPA)
7036 bnxt_set_tpa(bp, false);
7037 bnxt_hwrm_vnic_free(bp);
7038 if (bp->flags & BNXT_FLAG_CHIP_P5)
7039 bnxt_hwrm_vnic_ctx_free(bp);
7040}
7041
7042static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7043 bool irq_re_init)
7044{
7045 bnxt_clear_vnic(bp);
7032 bnxt_hwrm_ring_free(bp, close_path); 7046 bnxt_hwrm_ring_free(bp, close_path);
7033 bnxt_hwrm_ring_grp_free(bp); 7047 bnxt_hwrm_ring_grp_free(bp);
7034 if (irq_re_init) { 7048 if (irq_re_init) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 549c90d3e465..c05d663212b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -98,10 +98,13 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
98 if (idx) 98 if (idx)
99 req->dimensions = cpu_to_le16(1); 99 req->dimensions = cpu_to_le16(1);
100 100
101 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) 101 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
102 memcpy(data_addr, buf, bytesize); 102 memcpy(data_addr, buf, bytesize);
103 103 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
104 rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); 104 } else {
105 rc = hwrm_send_message_silent(bp, msg, msg_len,
106 HWRM_CMD_TIMEOUT);
107 }
105 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE)) 108 if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
106 memcpy(buf, data_addr, bytesize); 109 memcpy(buf, data_addr, bytesize);
107 110
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index c7ee63d69679..8445a0cce849 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2016,21 +2016,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
2016 mutex_lock(&bp->hwrm_cmd_lock); 2016 mutex_lock(&bp->hwrm_cmd_lock);
2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), 2017 hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
2018 INSTALL_PACKAGE_TIMEOUT); 2018 INSTALL_PACKAGE_TIMEOUT);
2019 if (hwrm_err) 2019 if (hwrm_err) {
2020 goto flash_pkg_exit;
2021
2022 if (resp->error_code) {
2023 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; 2020 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
2024 2021
2025 if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { 2022 if (resp->error_code && error_code ==
2023 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
2026 install.flags |= cpu_to_le16( 2024 install.flags |= cpu_to_le16(
2027 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); 2025 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2028 hwrm_err = _hwrm_send_message(bp, &install, 2026 hwrm_err = _hwrm_send_message(bp, &install,
2029 sizeof(install), 2027 sizeof(install),
2030 INSTALL_PACKAGE_TIMEOUT); 2028 INSTALL_PACKAGE_TIMEOUT);
2031 if (hwrm_err)
2032 goto flash_pkg_exit;
2033 } 2029 }
2030 if (hwrm_err)
2031 goto flash_pkg_exit;
2034 } 2032 }
2035 2033
2036 if (resp->result) { 2034 if (resp->result) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 6fe4a7174271..dd621f6bd127 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1236,7 +1236,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
1236static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, 1236static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1237 u16 src_fid) 1237 u16 src_fid)
1238{ 1238{
1239 flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; 1239 flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1240} 1240}
1241 1241
1242static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, 1242static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
@@ -1285,9 +1285,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1285 goto free_node; 1285 goto free_node;
1286 1286
1287 bnxt_tc_set_src_fid(bp, flow, src_fid); 1287 bnxt_tc_set_src_fid(bp, flow, src_fid);
1288 1288 bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
1289 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
1290 bnxt_tc_set_flow_dir(bp, flow, src_fid);
1291 1289
1292 if (!bnxt_tc_can_offload(bp, flow)) { 1290 if (!bnxt_tc_can_offload(bp, flow)) {
1293 rc = -EOPNOTSUPP; 1291 rc = -EOPNOTSUPP;
@@ -1407,7 +1405,7 @@ static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1407 * 2. 15th bit of flow_handle must specify the flow 1405 * 2. 15th bit of flow_handle must specify the flow
1408 * direction (TX/RX). 1406 * direction (TX/RX).
1409 */ 1407 */
1410 if (flow_node->flow.dir == BNXT_DIR_RX) 1408 if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
1411 handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | 1409 handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1412 CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; 1410 CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1413 else 1411 else
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index ffec57d1a5ec..4f05305052f2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -23,6 +23,9 @@ struct bnxt_tc_l2_key {
23 __be16 inner_vlan_tci; 23 __be16 inner_vlan_tci;
24 __be16 ether_type; 24 __be16 ether_type;
25 u8 num_vlans; 25 u8 num_vlans;
26 u8 dir;
27#define BNXT_DIR_RX 1
28#define BNXT_DIR_TX 0
26}; 29};
27 30
28struct bnxt_tc_l3_key { 31struct bnxt_tc_l3_key {
@@ -98,9 +101,6 @@ struct bnxt_tc_flow {
98 101
99 /* flow applicable to pkts ingressing on this fid */ 102 /* flow applicable to pkts ingressing on this fid */
100 u16 src_fid; 103 u16 src_fid;
101 u8 dir;
102#define BNXT_DIR_RX 1
103#define BNXT_DIR_TX 0
104 struct bnxt_tc_l2_key l2_key; 104 struct bnxt_tc_l2_key l2_key;
105 struct bnxt_tc_l2_key l2_mask; 105 struct bnxt_tc_l2_key l2_mask;
106 struct bnxt_tc_l3_key l3_key; 106 struct bnxt_tc_l3_key l3_key;
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 73632b843749..b821c9e1604c 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -10,7 +10,7 @@
10 10
11#include "cavium_ptp.h" 11#include "cavium_ptp.h"
12 12
13#define DRV_NAME "Cavium PTP Driver" 13#define DRV_NAME "cavium_ptp"
14 14
15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C 15#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
16#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E 16#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 032224178b64..6dd65f9b347c 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -237,8 +237,10 @@ int octeon_setup_iq(struct octeon_device *oct,
237 } 237 }
238 238
239 oct->num_iqs++; 239 oct->num_iqs++;
240 if (oct->fn_list.enable_io_queues(oct)) 240 if (oct->fn_list.enable_io_queues(oct)) {
241 octeon_delete_instr_queue(oct, iq_no);
241 return 1; 242 return 1;
243 }
242 244
243 return 0; 245 return 0;
244} 246}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 02959035ed3f..d692251ee252 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3236,8 +3236,10 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
3236 return -ENOMEM; 3236 return -ENOMEM;
3237 3237
3238 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz); 3238 err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
3239 if (err) 3239 if (err) {
3240 kvfree(t);
3240 return err; 3241 return err;
3242 }
3241 3243
3242 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz); 3244 bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
3243 kvfree(t); 3245 kvfree(t);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index 133acca0bf31..092da2d90026 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -167,7 +167,7 @@ struct nps_enet_priv {
167}; 167};
168 168
169/** 169/**
170 * nps_reg_set - Sets ENET register with provided value. 170 * nps_enet_reg_set - Sets ENET register with provided value.
171 * @priv: Pointer to EZchip ENET private data structure. 171 * @priv: Pointer to EZchip ENET private data structure.
172 * @reg: Register offset from base address. 172 * @reg: Register offset from base address.
173 * @value: Value to set in register. 173 * @value: Value to set in register.
@@ -179,7 +179,7 @@ static inline void nps_enet_reg_set(struct nps_enet_priv *priv,
179} 179}
180 180
181/** 181/**
182 * nps_reg_get - Gets value of specified ENET register. 182 * nps_enet_reg_get - Gets value of specified ENET register.
183 * @priv: Pointer to EZchip ENET private data structure. 183 * @priv: Pointer to EZchip ENET private data structure.
184 * @reg: Register offset from base address. 184 * @reg: Register offset from base address.
185 * 185 *
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 497298752381..aca95f64bde8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -50,7 +50,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
50 u64_stats_fetch_begin(&priv->tx[ring].statss); 50 u64_stats_fetch_begin(&priv->tx[ring].statss);
51 s->tx_packets += priv->tx[ring].pkt_done; 51 s->tx_packets += priv->tx[ring].pkt_done;
52 s->tx_bytes += priv->tx[ring].bytes_done; 52 s->tx_bytes += priv->tx[ring].bytes_done;
53 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 53 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
54 start)); 54 start));
55 } 55 }
56 } 56 }
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index d654c234aaf7..c5be4ebd8437 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1605,7 +1605,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1605 struct net_device *netdev; 1605 struct net_device *netdev;
1606 struct ibmveth_adapter *adapter; 1606 struct ibmveth_adapter *adapter;
1607 unsigned char *mac_addr_p; 1607 unsigned char *mac_addr_p;
1608 unsigned int *mcastFilterSize_p; 1608 __be32 *mcastFilterSize_p;
1609 long ret; 1609 long ret;
1610 unsigned long ret_attr; 1610 unsigned long ret_attr;
1611 1611
@@ -1627,8 +1627,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1627 return -EINVAL; 1627 return -EINVAL;
1628 } 1628 }
1629 1629
1630 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, 1630 mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1631 VETH_MCAST_FILTER_SIZE, NULL); 1631 VETH_MCAST_FILTER_SIZE,
1632 NULL);
1632 if (!mcastFilterSize_p) { 1633 if (!mcastFilterSize_p) {
1633 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE " 1634 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1634 "attribute\n"); 1635 "attribute\n");
@@ -1645,7 +1646,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1645 1646
1646 adapter->vdev = dev; 1647 adapter->vdev = dev;
1647 adapter->netdev = netdev; 1648 adapter->netdev = netdev;
1648 adapter->mcastFilterSize = *mcastFilterSize_p; 1649 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1649 adapter->pool_config = 0; 1650 adapter->pool_config = 0;
1650 1651
1651 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); 1652 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3da680073265..cebd20f3128d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1568,6 +1568,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], 1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569 (u64)tx_buff->indir_dma, 1569 (u64)tx_buff->indir_dma,
1570 (u64)num_entries); 1570 (u64)num_entries);
1571 dma_unmap_single(dev, tx_buff->indir_dma,
1572 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1571 } else { 1573 } else {
1572 tx_buff->num_entries = num_entries; 1574 tx_buff->num_entries = num_entries;
1573 lpar_rc = send_subcrq(adapter, handle_array[queue_num], 1575 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
@@ -2788,7 +2790,6 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2788 union sub_crq *next; 2790 union sub_crq *next;
2789 int index; 2791 int index;
2790 int i, j; 2792 int i, j;
2791 u8 *first;
2792 2793
2793restart_loop: 2794restart_loop:
2794 while (pending_scrq(adapter, scrq)) { 2795 while (pending_scrq(adapter, scrq)) {
@@ -2818,14 +2819,6 @@ restart_loop:
2818 2819
2819 txbuff->data_dma[j] = 0; 2820 txbuff->data_dma[j] = 0;
2820 } 2821 }
2821 /* if sub_crq was sent indirectly */
2822 first = &txbuff->indir_arr[0].generic.first;
2823 if (*first == IBMVNIC_CRQ_CMD) {
2824 dma_unmap_single(dev, txbuff->indir_dma,
2825 sizeof(txbuff->indir_arr),
2826 DMA_TO_DEVICE);
2827 *first = 0;
2828 }
2829 2822
2830 if (txbuff->last_frag) { 2823 if (txbuff->last_frag) {
2831 dev_kfree_skb_any(txbuff->skb); 2824 dev_kfree_skb_any(txbuff->skb);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index cbaf712d6529..7882148abb43 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7897,11 +7897,8 @@ static void ixgbe_service_task(struct work_struct *work)
7897 return; 7897 return;
7898 } 7898 }
7899 if (ixgbe_check_fw_error(adapter)) { 7899 if (ixgbe_check_fw_error(adapter)) {
7900 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 7900 if (!test_bit(__IXGBE_DOWN, &adapter->state))
7901 rtnl_lock();
7902 unregister_netdev(adapter->netdev); 7901 unregister_netdev(adapter->netdev);
7903 rtnl_unlock();
7904 }
7905 ixgbe_service_event_complete(adapter); 7902 ixgbe_service_event_complete(adapter);
7906 return; 7903 return;
7907 } 7904 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 6c01314e87b0..db3552f2d087 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1187,7 +1187,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1187 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); 1187 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1188 if (err) { 1188 if (err) {
1189 en_err(priv, "Failed to allocate RSS indirection QP\n"); 1189 en_err(priv, "Failed to allocate RSS indirection QP\n");
1190 goto rss_err; 1190 goto qp_alloc_err;
1191 } 1191 }
1192 1192
1193 rss_map->indir_qp->event = mlx4_en_sqp_event; 1193 rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1241,6 +1241,7 @@ indir_err:
1241 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp); 1241 MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
1242 mlx4_qp_remove(mdev->dev, rss_map->indir_qp); 1242 mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1243 mlx4_qp_free(mdev->dev, rss_map->indir_qp); 1243 mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1244qp_alloc_err:
1244 kfree(rss_map->indir_qp); 1245 kfree(rss_map->indir_qp);
1245 rss_map->indir_qp = NULL; 1246 rss_map->indir_qp = NULL;
1246rss_err: 1247rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index ce1be2a84231..65bec19a438f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -184,8 +184,13 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
184 184
185struct mlx5e_tx_wqe { 185struct mlx5e_tx_wqe {
186 struct mlx5_wqe_ctrl_seg ctrl; 186 struct mlx5_wqe_ctrl_seg ctrl;
187 struct mlx5_wqe_eth_seg eth; 187 union {
188 struct mlx5_wqe_data_seg data[0]; 188 struct {
189 struct mlx5_wqe_eth_seg eth;
190 struct mlx5_wqe_data_seg data[0];
191 };
192 u8 tls_progress_params_ctx[0];
193 };
189}; 194};
190 195
191struct mlx5e_rx_wqe_ll { 196struct mlx5e_rx_wqe_ll {
@@ -1100,6 +1105,8 @@ u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
1100u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); 1105u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
1101int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, 1106int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1102 struct ethtool_ts_info *info); 1107 struct ethtool_ts_info *info);
1108int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1109 struct ethtool_flash *flash);
1103void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, 1110void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
1104 struct ethtool_pauseparam *pauseparam); 1111 struct ethtool_pauseparam *pauseparam);
1105int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, 1112int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index f3d98748b211..c7f86453c638 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -76,26 +76,21 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
76 u8 state; 76 u8 state;
77 int err; 77 int err;
78 78
79 if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
80 return 0;
81
82 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); 79 err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
83 if (err) { 80 if (err) {
84 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", 81 netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n",
85 sq->sqn, err); 82 sq->sqn, err);
86 return err; 83 goto out;
87 } 84 }
88 85
89 if (state != MLX5_SQC_STATE_ERR) { 86 if (state != MLX5_SQC_STATE_ERR)
90 netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); 87 goto out;
91 return -EINVAL;
92 }
93 88
94 mlx5e_tx_disable_queue(sq->txq); 89 mlx5e_tx_disable_queue(sq->txq);
95 90
96 err = mlx5e_wait_for_sq_flush(sq); 91 err = mlx5e_wait_for_sq_flush(sq);
97 if (err) 92 if (err)
98 return err; 93 goto out;
99 94
100 /* At this point, no new packets will arrive from the stack as TXQ is 95 /* At this point, no new packets will arrive from the stack as TXQ is
101 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all 96 * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all
@@ -104,13 +99,17 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
104 99
105 err = mlx5e_sq_to_ready(sq, state); 100 err = mlx5e_sq_to_ready(sq, state);
106 if (err) 101 if (err)
107 return err; 102 goto out;
108 103
109 mlx5e_reset_txqsq_cc_pc(sq); 104 mlx5e_reset_txqsq_cc_pc(sq);
110 sq->stats->recover++; 105 sq->stats->recover++;
106 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
111 mlx5e_activate_txqsq(sq); 107 mlx5e_activate_txqsq(sq);
112 108
113 return 0; 109 return 0;
110out:
111 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
112 return err;
114} 113}
115 114
116static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, 115static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index aaffa6f68dc0..7f78c004d12f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -143,7 +143,10 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
143{ 143{
144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); 144 set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
145 /* TX queue is created active. */ 145 /* TX queue is created active. */
146
147 spin_lock(&c->xskicosq_lock);
146 mlx5e_trigger_irq(&c->xskicosq); 148 mlx5e_trigger_irq(&c->xskicosq);
149 spin_unlock(&c->xskicosq_lock);
147} 150}
148 151
149void mlx5e_deactivate_xsk(struct mlx5e_channel *c) 152void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 407da83474ef..b7298f9ee3d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -11,12 +11,14 @@
11#include "accel/tls.h" 11#include "accel/tls.h"
12 12
13#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ 13#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
14 (sizeof(struct mlx5e_umr_wqe) + MLX5_ST_SZ_BYTES(tls_static_params)) 14 (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
15 MLX5_ST_SZ_BYTES(tls_static_params))
15#define MLX5E_KTLS_STATIC_WQEBBS \ 16#define MLX5E_KTLS_STATIC_WQEBBS \
16 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) 17 (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
17 18
18#define MLX5E_KTLS_PROGRESS_WQE_SZ \ 19#define MLX5E_KTLS_PROGRESS_WQE_SZ \
19 (sizeof(struct mlx5e_tx_wqe) + MLX5_ST_SZ_BYTES(tls_progress_params)) 20 (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
21 MLX5_ST_SZ_BYTES(tls_progress_params))
20#define MLX5E_KTLS_PROGRESS_WQEBBS \ 22#define MLX5E_KTLS_PROGRESS_WQEBBS \
21 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) 23 (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
22#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2 24#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 3766545ce259..7833ddef0427 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -69,7 +69,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | 69 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
70 STATIC_PARAMS_DS_CNT); 70 STATIC_PARAMS_DS_CNT);
71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 71 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
72 cseg->imm = cpu_to_be32(priv_tx->tisn); 72 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
73 73
74 ucseg->flags = MLX5_UMR_INLINE; 74 ucseg->flags = MLX5_UMR_INLINE;
75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); 75 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
@@ -80,7 +80,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
80static void 80static void
81fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) 81fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
82{ 82{
83 MLX5_SET(tls_progress_params, ctx, pd, priv_tx->tisn); 83 MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
84 MLX5_SET(tls_progress_params, ctx, record_tracker_state, 84 MLX5_SET(tls_progress_params, ctx, record_tracker_state,
85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); 85 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
86 MLX5_SET(tls_progress_params, ctx, auth_state, 86 MLX5_SET(tls_progress_params, ctx, auth_state,
@@ -104,18 +104,20 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
104 PROGRESS_PARAMS_DS_CNT); 104 PROGRESS_PARAMS_DS_CNT);
105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 105 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
106 106
107 fill_progress_params_ctx(wqe->data, priv_tx); 107 fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
108} 108}
109 109
110static void tx_fill_wi(struct mlx5e_txqsq *sq, 110static void tx_fill_wi(struct mlx5e_txqsq *sq,
111 u16 pi, u8 num_wqebbs, 111 u16 pi, u8 num_wqebbs,
112 skb_frag_t *resync_dump_frag) 112 skb_frag_t *resync_dump_frag,
113 u32 num_bytes)
113{ 114{
114 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 115 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
115 116
116 wi->skb = NULL; 117 wi->skb = NULL;
117 wi->num_wqebbs = num_wqebbs; 118 wi->num_wqebbs = num_wqebbs;
118 wi->resync_dump_frag = resync_dump_frag; 119 wi->resync_dump_frag = resync_dump_frag;
120 wi->num_bytes = num_bytes;
119} 121}
120 122
121void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) 123void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -143,7 +145,7 @@ post_static_params(struct mlx5e_txqsq *sq,
143 145
144 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); 146 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
145 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); 147 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
146 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL); 148 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
147 sq->pc += MLX5E_KTLS_STATIC_WQEBBS; 149 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
148} 150}
149 151
@@ -157,7 +159,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
157 159
158 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); 160 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
159 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); 161 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
160 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL); 162 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
161 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; 163 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
162} 164}
163 165
@@ -248,43 +250,37 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
248 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); 250 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
249} 251}
250 252
253struct mlx5e_dump_wqe {
254 struct mlx5_wqe_ctrl_seg ctrl;
255 struct mlx5_wqe_data_seg data;
256};
257
251static int 258static int
252tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb, 259tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
253 skb_frag_t *frag, u32 tisn, bool first) 260 skb_frag_t *frag, u32 tisn, bool first)
254{ 261{
255 struct mlx5_wqe_ctrl_seg *cseg; 262 struct mlx5_wqe_ctrl_seg *cseg;
256 struct mlx5_wqe_eth_seg *eseg;
257 struct mlx5_wqe_data_seg *dseg; 263 struct mlx5_wqe_data_seg *dseg;
258 struct mlx5e_tx_wqe *wqe; 264 struct mlx5e_dump_wqe *wqe;
259 dma_addr_t dma_addr = 0; 265 dma_addr_t dma_addr = 0;
260 u16 ds_cnt, ds_cnt_inl;
261 u8 num_wqebbs; 266 u8 num_wqebbs;
262 u16 pi, ihs; 267 u16 ds_cnt;
263 int fsz; 268 int fsz;
264 269 u16 pi;
265 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
266 ihs = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
267 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
268 ds_cnt += ds_cnt_inl;
269 ds_cnt += 1; /* one frag */
270 270
271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); 271 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
272 272
273 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
273 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 274 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
274 275
275 cseg = &wqe->ctrl; 276 cseg = &wqe->ctrl;
276 eseg = &wqe->eth; 277 dseg = &wqe->data;
277 dseg = wqe->data;
278 278
279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); 279 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 280 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
281 cseg->imm = cpu_to_be32(tisn); 281 cseg->tisn = cpu_to_be32(tisn << 8);
282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; 282 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
283 283
284 eseg->inline_hdr.sz = cpu_to_be16(ihs);
285 memcpy(eseg->inline_hdr.start, skb->data, ihs);
286 dseg += ds_cnt_inl;
287
288 fsz = skb_frag_size(frag); 284 fsz = skb_frag_size(frag);
289 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 285 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
290 DMA_TO_DEVICE); 286 DMA_TO_DEVICE);
@@ -296,7 +292,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, struct sk_buff *skb,
296 dseg->byte_count = cpu_to_be32(fsz); 292 dseg->byte_count = cpu_to_be32(fsz);
297 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 293 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
298 294
299 tx_fill_wi(sq, pi, num_wqebbs, frag); 295 tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
300 sq->pc += num_wqebbs; 296 sq->pc += num_wqebbs;
301 297
302 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS, 298 WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
@@ -323,7 +319,7 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
323 struct mlx5_wq_cyc *wq = &sq->wq; 319 struct mlx5_wq_cyc *wq = &sq->wq;
324 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 320 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
325 321
326 tx_fill_wi(sq, pi, 1, NULL); 322 tx_fill_wi(sq, pi, 1, NULL, 0);
327 323
328 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); 324 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
329} 325}
@@ -434,7 +430,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
434 priv_tx->expected_seq = seq + datalen; 430 priv_tx->expected_seq = seq + datalen;
435 431
436 cseg = &(*wqe)->ctrl; 432 cseg = &(*wqe)->ctrl;
437 cseg->imm = cpu_to_be32(priv_tx->tisn); 433 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
438 434
439 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; 435 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
440 stats->tls_encrypted_bytes += datalen; 436 stats->tls_encrypted_bytes += datalen;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 8657e0f26995..2c75b2752f58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
437 return &arfs_t->rules_hash[bucket_idx]; 437 return &arfs_t->rules_hash[bucket_idx];
438} 438}
439 439
440static u8 arfs_get_ip_proto(const struct sk_buff *skb)
441{
442 return (skb->protocol == htons(ETH_P_IP)) ?
443 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
444}
445
446static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, 440static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
447 u8 ip_proto, __be16 etype) 441 u8 ip_proto, __be16 etype)
448{ 442{
@@ -602,31 +596,9 @@ out:
602 arfs_may_expire_flow(priv); 596 arfs_may_expire_flow(priv);
603} 597}
604 598
605/* return L4 destination port from ip4/6 packets */
606static __be16 arfs_get_dst_port(const struct sk_buff *skb)
607{
608 char *transport_header;
609
610 transport_header = skb_transport_header(skb);
611 if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
612 return ((struct tcphdr *)transport_header)->dest;
613 return ((struct udphdr *)transport_header)->dest;
614}
615
616/* return L4 source port from ip4/6 packets */
617static __be16 arfs_get_src_port(const struct sk_buff *skb)
618{
619 char *transport_header;
620
621 transport_header = skb_transport_header(skb);
622 if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
623 return ((struct tcphdr *)transport_header)->source;
624 return ((struct udphdr *)transport_header)->source;
625}
626
627static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, 599static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
628 struct arfs_table *arfs_t, 600 struct arfs_table *arfs_t,
629 const struct sk_buff *skb, 601 const struct flow_keys *fk,
630 u16 rxq, u32 flow_id) 602 u16 rxq, u32 flow_id)
631{ 603{
632 struct arfs_rule *rule; 604 struct arfs_rule *rule;
@@ -641,19 +613,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
641 INIT_WORK(&rule->arfs_work, arfs_handle_work); 613 INIT_WORK(&rule->arfs_work, arfs_handle_work);
642 614
643 tuple = &rule->tuple; 615 tuple = &rule->tuple;
644 tuple->etype = skb->protocol; 616 tuple->etype = fk->basic.n_proto;
617 tuple->ip_proto = fk->basic.ip_proto;
645 if (tuple->etype == htons(ETH_P_IP)) { 618 if (tuple->etype == htons(ETH_P_IP)) {
646 tuple->src_ipv4 = ip_hdr(skb)->saddr; 619 tuple->src_ipv4 = fk->addrs.v4addrs.src;
647 tuple->dst_ipv4 = ip_hdr(skb)->daddr; 620 tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
648 } else { 621 } else {
649 memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 622 memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
650 sizeof(struct in6_addr)); 623 sizeof(struct in6_addr));
651 memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 624 memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 sizeof(struct in6_addr)); 625 sizeof(struct in6_addr));
653 } 626 }
654 tuple->ip_proto = arfs_get_ip_proto(skb); 627 tuple->src_port = fk->ports.src;
655 tuple->src_port = arfs_get_src_port(skb); 628 tuple->dst_port = fk->ports.dst;
656 tuple->dst_port = arfs_get_dst_port(skb);
657 629
658 rule->flow_id = flow_id; 630 rule->flow_id = flow_id;
659 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; 631 rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -664,37 +636,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
664 return rule; 636 return rule;
665} 637}
666 638
667static bool arfs_cmp_ips(struct arfs_tuple *tuple, 639static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
668 const struct sk_buff *skb)
669{ 640{
670 if (tuple->etype == htons(ETH_P_IP) && 641 if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
671 tuple->src_ipv4 == ip_hdr(skb)->saddr && 642 return false;
672 tuple->dst_ipv4 == ip_hdr(skb)->daddr) 643 if (tuple->etype != fk->basic.n_proto)
673 return true; 644 return false;
674 if (tuple->etype == htons(ETH_P_IPV6) && 645 if (tuple->etype == htons(ETH_P_IP))
675 (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, 646 return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
676 sizeof(struct in6_addr))) && 647 tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
677 (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, 648 if (tuple->etype == htons(ETH_P_IPV6))
678 sizeof(struct in6_addr)))) 649 return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
679 return true; 650 sizeof(struct in6_addr)) &&
651 !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
652 sizeof(struct in6_addr));
680 return false; 653 return false;
681} 654}
682 655
683static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, 656static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
684 const struct sk_buff *skb) 657 const struct flow_keys *fk)
685{ 658{
686 struct arfs_rule *arfs_rule; 659 struct arfs_rule *arfs_rule;
687 struct hlist_head *head; 660 struct hlist_head *head;
688 __be16 src_port = arfs_get_src_port(skb);
689 __be16 dst_port = arfs_get_dst_port(skb);
690 661
691 head = arfs_hash_bucket(arfs_t, src_port, dst_port); 662 head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
692 hlist_for_each_entry(arfs_rule, head, hlist) { 663 hlist_for_each_entry(arfs_rule, head, hlist) {
693 if (arfs_rule->tuple.src_port == src_port && 664 if (arfs_cmp(&arfs_rule->tuple, fk))
694 arfs_rule->tuple.dst_port == dst_port &&
695 arfs_cmp_ips(&arfs_rule->tuple, skb)) {
696 return arfs_rule; 665 return arfs_rule;
697 }
698 } 666 }
699 667
700 return NULL; 668 return NULL;
@@ -707,20 +675,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
707 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; 675 struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
708 struct arfs_table *arfs_t; 676 struct arfs_table *arfs_t;
709 struct arfs_rule *arfs_rule; 677 struct arfs_rule *arfs_rule;
678 struct flow_keys fk;
679
680 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
681 return -EPROTONOSUPPORT;
710 682
711 if (skb->protocol != htons(ETH_P_IP) && 683 if (fk.basic.n_proto != htons(ETH_P_IP) &&
712 skb->protocol != htons(ETH_P_IPV6)) 684 fk.basic.n_proto != htons(ETH_P_IPV6))
713 return -EPROTONOSUPPORT; 685 return -EPROTONOSUPPORT;
714 686
715 if (skb->encapsulation) 687 if (skb->encapsulation)
716 return -EPROTONOSUPPORT; 688 return -EPROTONOSUPPORT;
717 689
718 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); 690 arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
719 if (!arfs_t) 691 if (!arfs_t)
720 return -EPROTONOSUPPORT; 692 return -EPROTONOSUPPORT;
721 693
722 spin_lock_bh(&arfs->arfs_lock); 694 spin_lock_bh(&arfs->arfs_lock);
723 arfs_rule = arfs_find_rule(arfs_t, skb); 695 arfs_rule = arfs_find_rule(arfs_t, &fk);
724 if (arfs_rule) { 696 if (arfs_rule) {
725 if (arfs_rule->rxq == rxq_index) { 697 if (arfs_rule->rxq == rxq_index) {
726 spin_unlock_bh(&arfs->arfs_lock); 698 spin_unlock_bh(&arfs->arfs_lock);
@@ -728,8 +700,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
728 } 700 }
729 arfs_rule->rxq = rxq_index; 701 arfs_rule->rxq = rxq_index;
730 } else { 702 } else {
731 arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, 703 arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
732 rxq_index, flow_id);
733 if (!arfs_rule) { 704 if (!arfs_rule) {
734 spin_unlock_bh(&arfs->arfs_lock); 705 spin_unlock_bh(&arfs->arfs_lock);
735 return -ENOMEM; 706 return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 03bed714bac3..20e628c907e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1081,6 +1081,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1081 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : 1081 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
1082 mlx5e_port_speed2linkmodes(mdev, speed, !ext); 1082 mlx5e_port_speed2linkmodes(mdev, speed, !ext);
1083 1083
1084 if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
1085 autoneg != AUTONEG_ENABLE) {
1086 netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
1087 __func__);
1088 err = -EINVAL;
1089 goto out;
1090 }
1091
1084 link_modes = link_modes & eproto.cap; 1092 link_modes = link_modes & eproto.cap;
1085 if (!link_modes) { 1093 if (!link_modes) {
1086 netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", 1094 netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
@@ -1338,6 +1346,9 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
1338 struct mlx5_core_dev *mdev = priv->mdev; 1346 struct mlx5_core_dev *mdev = priv->mdev;
1339 int err; 1347 int err;
1340 1348
1349 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
1350 return -EOPNOTSUPP;
1351
1341 if (pauseparam->autoneg) 1352 if (pauseparam->autoneg)
1342 return -EINVAL; 1353 return -EINVAL;
1343 1354
@@ -1679,6 +1690,40 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
1679 return 0; 1690 return 0;
1680} 1691}
1681 1692
1693int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1694 struct ethtool_flash *flash)
1695{
1696 struct mlx5_core_dev *mdev = priv->mdev;
1697 struct net_device *dev = priv->netdev;
1698 const struct firmware *fw;
1699 int err;
1700
1701 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
1702 return -EOPNOTSUPP;
1703
1704 err = request_firmware_direct(&fw, flash->data, &dev->dev);
1705 if (err)
1706 return err;
1707
1708 dev_hold(dev);
1709 rtnl_unlock();
1710
1711 err = mlx5_firmware_flash(mdev, fw, NULL);
1712 release_firmware(fw);
1713
1714 rtnl_lock();
1715 dev_put(dev);
1716 return err;
1717}
1718
1719static int mlx5e_flash_device(struct net_device *dev,
1720 struct ethtool_flash *flash)
1721{
1722 struct mlx5e_priv *priv = netdev_priv(dev);
1723
1724 return mlx5e_ethtool_flash_device(priv, flash);
1725}
1726
1682static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, 1727static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
1683 bool is_rx_cq) 1728 bool is_rx_cq)
1684{ 1729{
@@ -1961,6 +2006,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
1961 .set_wol = mlx5e_set_wol, 2006 .set_wol = mlx5e_set_wol,
1962 .get_module_info = mlx5e_get_module_info, 2007 .get_module_info = mlx5e_get_module_info,
1963 .get_module_eeprom = mlx5e_get_module_eeprom, 2008 .get_module_eeprom = mlx5e_get_module_eeprom,
2009 .flash_device = mlx5e_flash_device,
1964 .get_priv_flags = mlx5e_get_priv_flags, 2010 .get_priv_flags = mlx5e_get_priv_flags,
1965 .set_priv_flags = mlx5e_set_priv_flags, 2011 .set_priv_flags = mlx5e_set_priv_flags,
1966 .self_test = mlx5e_self_test, 2012 .self_test = mlx5e_self_test,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6c712c5be4d8..9d5f6e56188f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1321,7 +1321,6 @@ err_free_txqsq:
1321void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) 1321void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1322{ 1322{
1323 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); 1323 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1324 clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
1325 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1324 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1326 netdev_tx_reset_queue(sq->txq); 1325 netdev_tx_reset_queue(sq->txq);
1327 netif_tx_start_queue(sq->txq); 1326 netif_tx_start_queue(sq->txq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7ecfc53cf5f6..00b2d4a86159 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1480,7 +1480,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1480 struct mlx5_flow_spec *spec, 1480 struct mlx5_flow_spec *spec,
1481 struct flow_cls_offload *f, 1481 struct flow_cls_offload *f,
1482 struct net_device *filter_dev, 1482 struct net_device *filter_dev,
1483 u8 *match_level, u8 *tunnel_match_level) 1483 u8 *inner_match_level, u8 *outer_match_level)
1484{ 1484{
1485 struct netlink_ext_ack *extack = f->common.extack; 1485 struct netlink_ext_ack *extack = f->common.extack;
1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1486 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1495,8 +1495,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1495 struct flow_dissector *dissector = rule->match.dissector; 1495 struct flow_dissector *dissector = rule->match.dissector;
1496 u16 addr_type = 0; 1496 u16 addr_type = 0;
1497 u8 ip_proto = 0; 1497 u8 ip_proto = 0;
1498 u8 *match_level;
1498 1499
1499 *match_level = MLX5_MATCH_NONE; 1500 match_level = outer_match_level;
1500 1501
1501 if (dissector->used_keys & 1502 if (dissector->used_keys &
1502 ~(BIT(FLOW_DISSECTOR_KEY_META) | 1503 ~(BIT(FLOW_DISSECTOR_KEY_META) |
@@ -1524,12 +1525,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1524 } 1525 }
1525 1526
1526 if (mlx5e_get_tc_tun(filter_dev)) { 1527 if (mlx5e_get_tc_tun(filter_dev)) {
1527 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) 1528 if (parse_tunnel_attr(priv, spec, f, filter_dev,
1529 outer_match_level))
1528 return -EOPNOTSUPP; 1530 return -EOPNOTSUPP;
1529 1531
1530 /* In decap flow, header pointers should point to the inner 1532 /* At this point, header pointers should point to the inner
1531 * headers, outer header were already set by parse_tunnel_attr 1533 * headers, outer header were already set by parse_tunnel_attr
1532 */ 1534 */
1535 match_level = inner_match_level;
1533 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1536 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
1534 spec); 1537 spec);
1535 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, 1538 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
@@ -1831,35 +1834,41 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1831 struct flow_cls_offload *f, 1834 struct flow_cls_offload *f,
1832 struct net_device *filter_dev) 1835 struct net_device *filter_dev)
1833{ 1836{
1837 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
1834 struct netlink_ext_ack *extack = f->common.extack; 1838 struct netlink_ext_ack *extack = f->common.extack;
1835 struct mlx5_core_dev *dev = priv->mdev; 1839 struct mlx5_core_dev *dev = priv->mdev;
1836 struct mlx5_eswitch *esw = dev->priv.eswitch; 1840 struct mlx5_eswitch *esw = dev->priv.eswitch;
1837 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1841 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1838 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1839 struct mlx5_eswitch_rep *rep; 1842 struct mlx5_eswitch_rep *rep;
1840 int err; 1843 int err;
1841 1844
1842 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); 1845 inner_match_level = MLX5_MATCH_NONE;
1846 outer_match_level = MLX5_MATCH_NONE;
1847
1848 err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
1849 &outer_match_level);
1850 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
1851 outer_match_level : inner_match_level;
1843 1852
1844 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1853 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1845 rep = rpriv->rep; 1854 rep = rpriv->rep;
1846 if (rep->vport != MLX5_VPORT_UPLINK && 1855 if (rep->vport != MLX5_VPORT_UPLINK &&
1847 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && 1856 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1848 esw->offloads.inline_mode < match_level)) { 1857 esw->offloads.inline_mode < non_tunnel_match_level)) {
1849 NL_SET_ERR_MSG_MOD(extack, 1858 NL_SET_ERR_MSG_MOD(extack,
1850 "Flow is not offloaded due to min inline setting"); 1859 "Flow is not offloaded due to min inline setting");
1851 netdev_warn(priv->netdev, 1860 netdev_warn(priv->netdev,
1852 "Flow is not offloaded due to min inline setting, required %d actual %d\n", 1861 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1853 match_level, esw->offloads.inline_mode); 1862 non_tunnel_match_level, esw->offloads.inline_mode);
1854 return -EOPNOTSUPP; 1863 return -EOPNOTSUPP;
1855 } 1864 }
1856 } 1865 }
1857 1866
1858 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { 1867 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1859 flow->esw_attr->match_level = match_level; 1868 flow->esw_attr->inner_match_level = inner_match_level;
1860 flow->esw_attr->tunnel_match_level = tunnel_match_level; 1869 flow->esw_attr->outer_match_level = outer_match_level;
1861 } else { 1870 } else {
1862 flow->nic_attr->match_level = match_level; 1871 flow->nic_attr->match_level = non_tunnel_match_level;
1863 } 1872 }
1864 1873
1865 return err; 1874 return err;
@@ -3158,7 +3167,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
3158 3167
3159 esw_attr->parse_attr = parse_attr; 3168 esw_attr->parse_attr = parse_attr;
3160 esw_attr->chain = f->common.chain_index; 3169 esw_attr->chain = f->common.chain_index;
3161 esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; 3170 esw_attr->prio = f->common.prio;
3162 3171
3163 esw_attr->in_rep = in_rep; 3172 esw_attr->in_rep = in_rep;
3164 esw_attr->in_mdev = in_mdev; 3173 esw_attr->in_mdev = in_mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a38e8a3c7c9a..04685dbb280c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -377,8 +377,8 @@ struct mlx5_esw_flow_attr {
377 struct mlx5_termtbl_handle *termtbl; 377 struct mlx5_termtbl_handle *termtbl;
378 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 378 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
379 u32 mod_hdr_id; 379 u32 mod_hdr_id;
380 u8 match_level; 380 u8 inner_match_level;
381 u8 tunnel_match_level; 381 u8 outer_match_level;
382 struct mlx5_fc *counter; 382 struct mlx5_fc *counter;
383 u32 chain; 383 u32 chain;
384 u16 prio; 384 u16 prio;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 089ae4d48a82..0323fd078271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -207,14 +207,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
207 207
208 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
209 209
210 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { 210 if (attr->outer_match_level != MLX5_MATCH_NONE)
211 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
212 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
213 if (attr->match_level != MLX5_MATCH_NONE)
214 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
215 } else if (attr->match_level != MLX5_MATCH_NONE) {
216 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 211 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
217 } 212 if (attr->inner_match_level != MLX5_MATCH_NONE)
213 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
218 214
219 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 215 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
220 flow_act.modify_id = attr->mod_hdr_id; 216 flow_act.modify_id = attr->mod_hdr_id;
@@ -290,7 +286,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
290 mlx5_eswitch_set_rule_source_port(esw, spec, attr); 286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
291 287
292 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; 288 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
293 if (attr->match_level != MLX5_MATCH_NONE) 289 if (attr->outer_match_level != MLX5_MATCH_NONE)
294 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; 290 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
295 291
296 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); 292 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 9314777d99e3..d685122d9ff7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -590,7 +590,8 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
590 data_size = crdump_size - offset; 590 data_size = crdump_size - offset;
591 else 591 else
592 data_size = MLX5_CR_DUMP_CHUNK_SIZE; 592 data_size = MLX5_CR_DUMP_CHUNK_SIZE;
593 err = devlink_fmsg_binary_put(fmsg, cr_data, data_size); 593 err = devlink_fmsg_binary_put(fmsg, (char *)cr_data + offset,
594 data_size);
594 if (err) 595 if (err)
595 goto free_data; 596 goto free_data;
596 } 597 }
@@ -700,6 +701,16 @@ static void poll_health(struct timer_list *t)
700 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 701 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
701 goto out; 702 goto out;
702 703
704 fatal_error = check_fatal_sensors(dev);
705
706 if (fatal_error && !health->fatal_error) {
707 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
708 dev->priv.health.fatal_error = fatal_error;
709 print_health_info(dev);
710 mlx5_trigger_health_work(dev);
711 goto out;
712 }
713
703 count = ioread32be(health->health_counter); 714 count = ioread32be(health->health_counter);
704 if (count == health->prev) 715 if (count == health->prev)
705 ++health->miss_counter; 716 ++health->miss_counter;
@@ -718,15 +729,6 @@ static void poll_health(struct timer_list *t)
718 if (health->synd && health->synd != prev_synd) 729 if (health->synd && health->synd != prev_synd)
719 queue_work(health->wq, &health->report_work); 730 queue_work(health->wq, &health->report_work);
720 731
721 fatal_error = check_fatal_sensors(dev);
722
723 if (fatal_error && !health->fatal_error) {
724 mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
725 dev->priv.health.fatal_error = fatal_error;
726 print_health_info(dev);
727 mlx5_trigger_health_work(dev);
728 }
729
730out: 732out:
731 mod_timer(&health->timer, get_next_poll_jiffies()); 733 mod_timer(&health->timer, get_next_poll_jiffies());
732} 734}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index ebd81f6b556e..90cb50fe17fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -122,6 +122,14 @@ static int mlx5i_get_ts_info(struct net_device *netdev,
122 return mlx5e_ethtool_get_ts_info(priv, info); 122 return mlx5e_ethtool_get_ts_info(priv, info);
123} 123}
124 124
125static int mlx5i_flash_device(struct net_device *netdev,
126 struct ethtool_flash *flash)
127{
128 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
129
130 return mlx5e_ethtool_flash_device(priv, flash);
131}
132
125enum mlx5_ptys_width { 133enum mlx5_ptys_width {
126 MLX5_PTYS_WIDTH_1X = 1 << 0, 134 MLX5_PTYS_WIDTH_1X = 1 << 0,
127 MLX5_PTYS_WIDTH_2X = 1 << 1, 135 MLX5_PTYS_WIDTH_2X = 1 << 1,
@@ -233,6 +241,7 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
233 .get_ethtool_stats = mlx5i_get_ethtool_stats, 241 .get_ethtool_stats = mlx5i_get_ethtool_stats,
234 .get_ringparam = mlx5i_get_ringparam, 242 .get_ringparam = mlx5i_get_ringparam,
235 .set_ringparam = mlx5i_set_ringparam, 243 .set_ringparam = mlx5i_set_ringparam,
244 .flash_device = mlx5i_flash_device,
236 .get_channels = mlx5i_get_channels, 245 .get_channels = mlx5i_get_channels,
237 .set_channels = mlx5i_set_channels, 246 .set_channels = mlx5i_set_channels,
238 .get_coalesce = mlx5i_get_coalesce, 247 .get_coalesce = mlx5i_get_coalesce,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index ea9ee88491e5..ea1d4d26ece0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -27,6 +27,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
27 case 128: 27 case 128:
28 general_obj_key_size = 28 general_obj_key_size =
29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128; 29 MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
30 key_p += sz_bytes;
30 break; 31 break;
31 case 256: 32 case 256:
32 general_obj_key_size = 33 general_obj_key_size =
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index e8ac90564dbe..84a87d059333 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -471,7 +471,7 @@ int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
471void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, 471void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
472 unsigned int priority) 472 unsigned int priority)
473{ 473{
474 rulei->priority = priority >> 16; 474 rulei->priority = priority;
475} 475}
476 476
477void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei, 477void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 63b07edd9d81..38bb1cfe4e8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -29,7 +29,7 @@
29 29
30struct mlxsw_sp_ptp_state { 30struct mlxsw_sp_ptp_state {
31 struct mlxsw_sp *mlxsw_sp; 31 struct mlxsw_sp *mlxsw_sp;
32 struct rhashtable unmatched_ht; 32 struct rhltable unmatched_ht;
33 spinlock_t unmatched_lock; /* protects the HT */ 33 spinlock_t unmatched_lock; /* protects the HT */
34 struct delayed_work ht_gc_dw; 34 struct delayed_work ht_gc_dw;
35 u32 gc_cycle; 35 u32 gc_cycle;
@@ -45,7 +45,7 @@ struct mlxsw_sp1_ptp_key {
45 45
46struct mlxsw_sp1_ptp_unmatched { 46struct mlxsw_sp1_ptp_unmatched {
47 struct mlxsw_sp1_ptp_key key; 47 struct mlxsw_sp1_ptp_key key;
48 struct rhash_head ht_node; 48 struct rhlist_head ht_node;
49 struct rcu_head rcu; 49 struct rcu_head rcu;
50 struct sk_buff *skb; 50 struct sk_buff *skb;
51 u64 timestamp; 51 u64 timestamp;
@@ -359,7 +359,7 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
359/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on 359/* Returns NULL on successful insertion, a pointer on conflict, or an ERR_PTR on
360 * error. 360 * error.
361 */ 361 */
362static struct mlxsw_sp1_ptp_unmatched * 362static int
363mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp, 363mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
364 struct mlxsw_sp1_ptp_key key, 364 struct mlxsw_sp1_ptp_key key,
365 struct sk_buff *skb, 365 struct sk_buff *skb,
@@ -368,41 +368,51 @@ mlxsw_sp1_ptp_unmatched_save(struct mlxsw_sp *mlxsw_sp,
368 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL; 368 int cycles = MLXSW_SP1_PTP_HT_GC_TIMEOUT / MLXSW_SP1_PTP_HT_GC_INTERVAL;
369 struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state; 369 struct mlxsw_sp_ptp_state *ptp_state = mlxsw_sp->ptp_state;
370 struct mlxsw_sp1_ptp_unmatched *unmatched; 370 struct mlxsw_sp1_ptp_unmatched *unmatched;
371 struct mlxsw_sp1_ptp_unmatched *conflict; 371 int err;
372 372
373 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC); 373 unmatched = kzalloc(sizeof(*unmatched), GFP_ATOMIC);
374 if (!unmatched) 374 if (!unmatched)
375 return ERR_PTR(-ENOMEM); 375 return -ENOMEM;
376 376
377 unmatched->key = key; 377 unmatched->key = key;
378 unmatched->skb = skb; 378 unmatched->skb = skb;
379 unmatched->timestamp = timestamp; 379 unmatched->timestamp = timestamp;
380 unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles; 380 unmatched->gc_cycle = mlxsw_sp->ptp_state->gc_cycle + cycles;
381 381
382 conflict = rhashtable_lookup_get_insert_fast(&ptp_state->unmatched_ht, 382 err = rhltable_insert(&ptp_state->unmatched_ht, &unmatched->ht_node,
383 &unmatched->ht_node, 383 mlxsw_sp1_ptp_unmatched_ht_params);
384 mlxsw_sp1_ptp_unmatched_ht_params); 384 if (err)
385 if (conflict)
386 kfree(unmatched); 385 kfree(unmatched);
387 386
388 return conflict; 387 return err;
389} 388}
390 389
391static struct mlxsw_sp1_ptp_unmatched * 390static struct mlxsw_sp1_ptp_unmatched *
392mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp, 391mlxsw_sp1_ptp_unmatched_lookup(struct mlxsw_sp *mlxsw_sp,
393 struct mlxsw_sp1_ptp_key key) 392 struct mlxsw_sp1_ptp_key key, int *p_length)
394{ 393{
395 return rhashtable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key, 394 struct mlxsw_sp1_ptp_unmatched *unmatched, *last = NULL;
396 mlxsw_sp1_ptp_unmatched_ht_params); 395 struct rhlist_head *tmp, *list;
396 int length = 0;
397
398 list = rhltable_lookup(&mlxsw_sp->ptp_state->unmatched_ht, &key,
399 mlxsw_sp1_ptp_unmatched_ht_params);
400 rhl_for_each_entry_rcu(unmatched, tmp, list, ht_node) {
401 last = unmatched;
402 length++;
403 }
404
405 *p_length = length;
406 return last;
397} 407}
398 408
399static int 409static int
400mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp, 410mlxsw_sp1_ptp_unmatched_remove(struct mlxsw_sp *mlxsw_sp,
401 struct mlxsw_sp1_ptp_unmatched *unmatched) 411 struct mlxsw_sp1_ptp_unmatched *unmatched)
402{ 412{
403 return rhashtable_remove_fast(&mlxsw_sp->ptp_state->unmatched_ht, 413 return rhltable_remove(&mlxsw_sp->ptp_state->unmatched_ht,
404 &unmatched->ht_node, 414 &unmatched->ht_node,
405 mlxsw_sp1_ptp_unmatched_ht_params); 415 mlxsw_sp1_ptp_unmatched_ht_params);
406} 416}
407 417
408/* This function is called in the following scenarios: 418/* This function is called in the following scenarios:
@@ -489,75 +499,38 @@ static void mlxsw_sp1_ptp_got_piece(struct mlxsw_sp *mlxsw_sp,
489 struct mlxsw_sp1_ptp_key key, 499 struct mlxsw_sp1_ptp_key key,
490 struct sk_buff *skb, u64 timestamp) 500 struct sk_buff *skb, u64 timestamp)
491{ 501{
492 struct mlxsw_sp1_ptp_unmatched *unmatched, *conflict; 502 struct mlxsw_sp1_ptp_unmatched *unmatched;
503 int length;
493 int err; 504 int err;
494 505
495 rcu_read_lock(); 506 rcu_read_lock();
496 507
497 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key);
498
499 spin_lock(&mlxsw_sp->ptp_state->unmatched_lock); 508 spin_lock(&mlxsw_sp->ptp_state->unmatched_lock);
500 509
501 if (unmatched) { 510 unmatched = mlxsw_sp1_ptp_unmatched_lookup(mlxsw_sp, key, &length);
502 /* There was an unmatched entry when we looked, but it may have
503 * been removed before we took the lock.
504 */
505 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
506 if (err)
507 unmatched = NULL;
508 }
509
510 if (!unmatched) {
511 /* We have no unmatched entry, but one may have been added after
512 * we looked, but before we took the lock.
513 */
514 unmatched = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
515 skb, timestamp);
516 if (IS_ERR(unmatched)) {
517 if (skb)
518 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
519 key.local_port,
520 key.ingress, NULL);
521 unmatched = NULL;
522 } else if (unmatched) {
523 /* Save just told us, under lock, that the entry is
524 * there, so this has to work.
525 */
526 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp,
527 unmatched);
528 WARN_ON_ONCE(err);
529 }
530 }
531
532 /* If unmatched is non-NULL here, it comes either from the lookup, or
533 * from the save attempt above. In either case the entry was removed
534 * from the hash table. If unmatched is NULL, a new unmatched entry was
535 * added to the hash table, and there was no conflict.
536 */
537
538 if (skb && unmatched && unmatched->timestamp) { 511 if (skb && unmatched && unmatched->timestamp) {
539 unmatched->skb = skb; 512 unmatched->skb = skb;
540 } else if (timestamp && unmatched && unmatched->skb) { 513 } else if (timestamp && unmatched && unmatched->skb) {
541 unmatched->timestamp = timestamp; 514 unmatched->timestamp = timestamp;
542 } else if (unmatched) { 515 } else {
543 /* unmatched holds an older entry of the same type: either an 516 /* Either there is no entry to match, or one that is there is
544 * skb if we are handling skb, or a timestamp if we are handling 517 * incompatible.
545 * timestamp. We can't match that up, so save what we have.
546 */ 518 */
547 conflict = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key, 519 if (length < 100)
548 skb, timestamp); 520 err = mlxsw_sp1_ptp_unmatched_save(mlxsw_sp, key,
549 if (IS_ERR(conflict)) { 521 skb, timestamp);
550 if (skb) 522 else
551 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb, 523 err = -E2BIG;
552 key.local_port, 524 if (err && skb)
553 key.ingress, NULL); 525 mlxsw_sp1_ptp_packet_finish(mlxsw_sp, skb,
554 } else { 526 key.local_port,
555 /* Above, we removed an object with this key from the 527 key.ingress, NULL);
556 * hash table, under lock, so conflict can not be a 528 unmatched = NULL;
557 * valid pointer. 529 }
558 */ 530
559 WARN_ON_ONCE(conflict); 531 if (unmatched) {
560 } 532 err = mlxsw_sp1_ptp_unmatched_remove(mlxsw_sp, unmatched);
533 WARN_ON_ONCE(err);
561 } 534 }
562 535
563 spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock); 536 spin_unlock(&mlxsw_sp->ptp_state->unmatched_lock);
@@ -669,9 +642,8 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
669 local_bh_disable(); 642 local_bh_disable();
670 643
671 spin_lock(&ptp_state->unmatched_lock); 644 spin_lock(&ptp_state->unmatched_lock);
672 err = rhashtable_remove_fast(&ptp_state->unmatched_ht, 645 err = rhltable_remove(&ptp_state->unmatched_ht, &unmatched->ht_node,
673 &unmatched->ht_node, 646 mlxsw_sp1_ptp_unmatched_ht_params);
674 mlxsw_sp1_ptp_unmatched_ht_params);
675 spin_unlock(&ptp_state->unmatched_lock); 647 spin_unlock(&ptp_state->unmatched_lock);
676 648
677 if (err) 649 if (err)
@@ -702,7 +674,7 @@ static void mlxsw_sp1_ptp_ht_gc(struct work_struct *work)
702 ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw); 674 ptp_state = container_of(dwork, struct mlxsw_sp_ptp_state, ht_gc_dw);
703 gc_cycle = ptp_state->gc_cycle++; 675 gc_cycle = ptp_state->gc_cycle++;
704 676
705 rhashtable_walk_enter(&ptp_state->unmatched_ht, &iter); 677 rhltable_walk_enter(&ptp_state->unmatched_ht, &iter);
706 rhashtable_walk_start(&iter); 678 rhashtable_walk_start(&iter);
707 while ((obj = rhashtable_walk_next(&iter))) { 679 while ((obj = rhashtable_walk_next(&iter))) {
708 if (IS_ERR(obj)) 680 if (IS_ERR(obj))
@@ -855,8 +827,8 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
855 827
856 spin_lock_init(&ptp_state->unmatched_lock); 828 spin_lock_init(&ptp_state->unmatched_lock);
857 829
858 err = rhashtable_init(&ptp_state->unmatched_ht, 830 err = rhltable_init(&ptp_state->unmatched_ht,
859 &mlxsw_sp1_ptp_unmatched_ht_params); 831 &mlxsw_sp1_ptp_unmatched_ht_params);
860 if (err) 832 if (err)
861 goto err_hashtable_init; 833 goto err_hashtable_init;
862 834
@@ -891,7 +863,7 @@ err_fifo_clr:
891err_mtptpt1_set: 863err_mtptpt1_set:
892 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); 864 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
893err_mtptpt_set: 865err_mtptpt_set:
894 rhashtable_destroy(&ptp_state->unmatched_ht); 866 rhltable_destroy(&ptp_state->unmatched_ht);
895err_hashtable_init: 867err_hashtable_init:
896 kfree(ptp_state); 868 kfree(ptp_state);
897 return ERR_PTR(err); 869 return ERR_PTR(err);
@@ -906,8 +878,8 @@ void mlxsw_sp1_ptp_fini(struct mlxsw_sp_ptp_state *ptp_state)
906 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false); 878 mlxsw_sp1_ptp_set_fifo_clr_on_trap(mlxsw_sp, false);
907 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0); 879 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP1, 0);
908 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0); 880 mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0, 0);
909 rhashtable_free_and_destroy(&ptp_state->unmatched_ht, 881 rhltable_free_and_destroy(&ptp_state->unmatched_ht,
910 &mlxsw_sp1_ptp_unmatched_free_fn, NULL); 882 &mlxsw_sp1_ptp_unmatched_free_fn, NULL);
911 kfree(ptp_state); 883 kfree(ptp_state);
912} 884}
913 885
diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c
index 39aca1ab4687..86fc6e6b46dd 100644
--- a/drivers/net/ethernet/mscc/ocelot_ace.c
+++ b/drivers/net/ethernet/mscc/ocelot_ace.c
@@ -317,7 +317,7 @@ static void is2_action_set(struct vcap_data *data,
317 break; 317 break;
318 case OCELOT_ACL_ACTION_TRAP: 318 case OCELOT_ACL_ACTION_TRAP:
319 VCAP_ACT_SET(PORT_MASK, 0x0); 319 VCAP_ACT_SET(PORT_MASK, 0x0);
320 VCAP_ACT_SET(MASK_MODE, 0x0); 320 VCAP_ACT_SET(MASK_MODE, 0x1);
321 VCAP_ACT_SET(POLICE_ENA, 0x0); 321 VCAP_ACT_SET(POLICE_ENA, 0x0);
322 VCAP_ACT_SET(POLICE_IDX, 0x0); 322 VCAP_ACT_SET(POLICE_IDX, 0x0);
323 VCAP_ACT_SET(CPU_QU_NUM, 0x0); 323 VCAP_ACT_SET(CPU_QU_NUM, 0x0);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 59487d446a09..b894bc0c9c16 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -13,12 +13,6 @@ struct ocelot_port_block {
13 struct ocelot_port *port; 13 struct ocelot_port *port;
14}; 14};
15 15
16static u16 get_prio(u32 prio)
17{
18 /* prio starts from 0x1000 while the ids starts from 0 */
19 return prio >> 16;
20}
21
22static int ocelot_flower_parse_action(struct flow_cls_offload *f, 16static int ocelot_flower_parse_action(struct flow_cls_offload *f,
23 struct ocelot_ace_rule *rule) 17 struct ocelot_ace_rule *rule)
24{ 18{
@@ -168,7 +162,7 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
168 } 162 }
169 163
170finished_key_parsing: 164finished_key_parsing:
171 ocelot_rule->prio = get_prio(f->common.prio); 165 ocelot_rule->prio = f->common.prio;
172 ocelot_rule->id = f->cookie; 166 ocelot_rule->id = f->cookie;
173 return ocelot_flower_parse_action(f, ocelot_rule); 167 return ocelot_flower_parse_action(f, ocelot_rule);
174} 168}
@@ -218,7 +212,7 @@ static int ocelot_flower_destroy(struct flow_cls_offload *f,
218 struct ocelot_ace_rule rule; 212 struct ocelot_ace_rule rule;
219 int ret; 213 int ret;
220 214
221 rule.prio = get_prio(f->common.prio); 215 rule.prio = f->common.prio;
222 rule.port = port_block->port; 216 rule.port = port_block->port;
223 rule.id = f->cookie; 217 rule.id = f->cookie;
224 218
@@ -236,7 +230,7 @@ static int ocelot_flower_stats_update(struct flow_cls_offload *f,
236 struct ocelot_ace_rule rule; 230 struct ocelot_ace_rule rule;
237 int ret; 231 int ret;
238 232
239 rule.prio = get_prio(f->common.prio); 233 rule.prio = f->common.prio;
240 rule.port = port_block->port; 234 rule.port = port_block->port;
241 rule.id = f->cookie; 235 rule.id = f->cookie;
242 ret = ocelot_ace_rule_stats_update(&rule); 236 ret = ocelot_ace_rule_stats_update(&rule);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index d8b7fba96d58..337b0cbfd153 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3919,7 +3919,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3919 * setup (if available). */ 3919 * setup (if available). */
3920 status = myri10ge_request_irq(mgp); 3920 status = myri10ge_request_irq(mgp);
3921 if (status != 0) 3921 if (status != 0)
3922 goto abort_with_firmware; 3922 goto abort_with_slices;
3923 myri10ge_free_irq(mgp); 3923 myri10ge_free_irq(mgp);
3924 3924
3925 /* Save configuration space to be restored if the 3925 /* Save configuration space to be restored if the
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index e209f150c5f2..9917d64694c6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1416,6 +1416,13 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
1416 1416
1417 switch (f->command) { 1417 switch (f->command) {
1418 case FLOW_BLOCK_BIND: 1418 case FLOW_BLOCK_BIND:
1419 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1420 if (cb_priv &&
1421 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1422 cb_priv,
1423 &nfp_block_cb_list))
1424 return -EBUSY;
1425
1419 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); 1426 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1420 if (!cb_priv) 1427 if (!cb_priv)
1421 return -ENOMEM; 1428 return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 86e968cd5ffd..124a43dc136a 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -93,7 +93,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
93 return -EOPNOTSUPP; 93 return -EOPNOTSUPP;
94 } 94 }
95 95
96 if (flow->common.prio != (1 << 16)) { 96 if (flow->common.prio != 1) {
97 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority"); 97 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
98 return -EOPNOTSUPP; 98 return -EOPNOTSUPP;
99 } 99 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 829dd60ab937..1efff7f68ef6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1325,7 +1325,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1325 &drv_version); 1325 &drv_version);
1326 if (rc) { 1326 if (rc) {
1327 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1327 DP_NOTICE(cdev, "Failed sending drv version command\n");
1328 return rc; 1328 goto err4;
1329 } 1329 }
1330 } 1330 }
1331 1331
@@ -1333,6 +1333,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
1333 1333
1334 return 0; 1334 return 0;
1335 1335
1336err4:
1337 qed_ll2_dealloc_if(cdev);
1336err3: 1338err3:
1337 qed_hw_stop(cdev); 1339 qed_hw_stop(cdev);
1338err2: 1340err2:
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index e1dd6ea60d67..bae0074ab9aa 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5921,6 +5921,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
5921 skb = napi_alloc_skb(&tp->napi, pkt_size); 5921 skb = napi_alloc_skb(&tp->napi, pkt_size);
5922 if (skb) 5922 if (skb)
5923 skb_copy_to_linear_data(skb, data, pkt_size); 5923 skb_copy_to_linear_data(skb, data, pkt_size);
5924 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
5924 5925
5925 return skb; 5926 return skb;
5926} 5927}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ef8f08931fe8..6cacd5e893ac 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1,7 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* Renesas Ethernet AVB device driver 2/* Renesas Ethernet AVB device driver
3 * 3 *
4 * Copyright (C) 2014-2015 Renesas Electronics Corporation 4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
5 * Copyright (C) 2015 Renesas Solutions Corp. 5 * Copyright (C) 2015 Renesas Solutions Corp.
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> 6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
7 * 7 *
@@ -513,7 +513,10 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
513 kfree(ts_skb); 513 kfree(ts_skb);
514 if (tag == tfa_tag) { 514 if (tag == tfa_tag) {
515 skb_tstamp_tx(skb, &shhwtstamps); 515 skb_tstamp_tx(skb, &shhwtstamps);
516 dev_consume_skb_any(skb);
516 break; 517 break;
518 } else {
519 dev_kfree_skb_any(skb);
517 } 520 }
518 } 521 }
519 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); 522 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
@@ -1564,7 +1567,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1564 } 1567 }
1565 goto unmap; 1568 goto unmap;
1566 } 1569 }
1567 ts_skb->skb = skb; 1570 ts_skb->skb = skb_get(skb);
1568 ts_skb->tag = priv->ts_skb_tag++; 1571 ts_skb->tag = priv->ts_skb_tag++;
1569 priv->ts_skb_tag &= 0x3ff; 1572 priv->ts_skb_tag &= 0x3ff;
1570 list_add_tail(&ts_skb->list, &priv->ts_skb_list); 1573 list_add_tail(&ts_skb->list, &priv->ts_skb_list);
@@ -1693,6 +1696,7 @@ static int ravb_close(struct net_device *ndev)
1693 /* Clear the timestamp list */ 1696 /* Clear the timestamp list */
1694 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { 1697 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
1695 list_del(&ts_skb->list); 1698 list_del(&ts_skb->list);
1699 kfree_skb(ts_skb->skb);
1696 kfree(ts_skb); 1700 kfree(ts_skb);
1697 } 1701 }
1698 1702
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 37c0bc699cd9..6c305b6ecad0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -94,7 +94,7 @@ static int tc_fill_entry(struct stmmac_priv *priv,
94 struct stmmac_tc_entry *entry, *frag = NULL; 94 struct stmmac_tc_entry *entry, *frag = NULL;
95 struct tc_u32_sel *sel = cls->knode.sel; 95 struct tc_u32_sel *sel = cls->knode.sel;
96 u32 off, data, mask, real_off, rem; 96 u32 off, data, mask, real_off, rem;
97 u32 prio = cls->common.prio; 97 u32 prio = cls->common.prio << 16;
98 int ret; 98 int ret;
99 99
100 /* Only 1 match per entry */ 100 /* Only 1 match per entry */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 32a89744972d..a46b8b2e44e1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2775,6 +2775,7 @@ static int cpsw_probe(struct platform_device *pdev)
2775 if (!cpsw) 2775 if (!cpsw)
2776 return -ENOMEM; 2776 return -ENOMEM;
2777 2777
2778 platform_set_drvdata(pdev, cpsw);
2778 cpsw->dev = dev; 2779 cpsw->dev = dev;
2779 2780
2780 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW); 2781 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
@@ -2879,7 +2880,6 @@ static int cpsw_probe(struct platform_device *pdev)
2879 goto clean_cpts; 2880 goto clean_cpts;
2880 } 2881 }
2881 2882
2882 platform_set_drvdata(pdev, cpsw);
2883 priv = netdev_priv(ndev); 2883 priv = netdev_priv(ndev);
2884 priv->cpsw = cpsw; 2884 priv->cpsw = cpsw;
2885 priv->ndev = ndev; 2885 priv->ndev = ndev;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 8479a440527b..12466a72cefc 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1504,7 +1504,7 @@ tc35815_rx(struct net_device *dev, int limit)
1504 pci_unmap_single(lp->pci_dev, 1504 pci_unmap_single(lp->pci_dev,
1505 lp->rx_skbs[cur_bd].skb_dma, 1505 lp->rx_skbs[cur_bd].skb_dma,
1506 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1506 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1507 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN) 1507 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
1508 memmove(skb->data, skb->data - NET_IP_ALIGN, 1508 memmove(skb->data, skb->data - NET_IP_ALIGN,
1509 pkt_len); 1509 pkt_len);
1510 data = skb_put(skb, pkt_len); 1510 data = skb_put(skb, pkt_len);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 78a7de3fb622..c62f474b6d08 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -371,9 +371,10 @@ tsi108_stat_carry_one(int carry, int carry_bit, int carry_shift,
371static void tsi108_stat_carry(struct net_device *dev) 371static void tsi108_stat_carry(struct net_device *dev)
372{ 372{
373 struct tsi108_prv_data *data = netdev_priv(dev); 373 struct tsi108_prv_data *data = netdev_priv(dev);
374 unsigned long flags;
374 u32 carry1, carry2; 375 u32 carry1, carry2;
375 376
376 spin_lock_irq(&data->misclock); 377 spin_lock_irqsave(&data->misclock, flags);
377 378
378 carry1 = TSI_READ(TSI108_STAT_CARRY1); 379 carry1 = TSI_READ(TSI108_STAT_CARRY1);
379 carry2 = TSI_READ(TSI108_STAT_CARRY2); 380 carry2 = TSI_READ(TSI108_STAT_CARRY2);
@@ -441,7 +442,7 @@ static void tsi108_stat_carry(struct net_device *dev)
441 TSI108_STAT_TXPAUSEDROP_CARRY, 442 TSI108_STAT_TXPAUSEDROP_CARRY,
442 &data->tx_pause_drop); 443 &data->tx_pause_drop);
443 444
444 spin_unlock_irq(&data->misclock); 445 spin_unlock_irqrestore(&data->misclock, flags);
445} 446}
446 447
447/* Read a stat counter atomically with respect to carries. 448/* Read a stat counter atomically with respect to carries.
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 3544e1991579..e8fce6d715ef 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1239,12 +1239,15 @@ static void netvsc_get_stats64(struct net_device *net,
1239 struct rtnl_link_stats64 *t) 1239 struct rtnl_link_stats64 *t)
1240{ 1240{
1241 struct net_device_context *ndev_ctx = netdev_priv(net); 1241 struct net_device_context *ndev_ctx = netdev_priv(net);
1242 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); 1242 struct netvsc_device *nvdev;
1243 struct netvsc_vf_pcpu_stats vf_tot; 1243 struct netvsc_vf_pcpu_stats vf_tot;
1244 int i; 1244 int i;
1245 1245
1246 rcu_read_lock();
1247
1248 nvdev = rcu_dereference(ndev_ctx->nvdev);
1246 if (!nvdev) 1249 if (!nvdev)
1247 return; 1250 goto out;
1248 1251
1249 netdev_stats_to_stats64(t, &net->stats); 1252 netdev_stats_to_stats64(t, &net->stats);
1250 1253
@@ -1283,6 +1286,8 @@ static void netvsc_get_stats64(struct net_device *net,
1283 t->rx_packets += packets; 1286 t->rx_packets += packets;
1284 t->multicast += multicast; 1287 t->multicast += multicast;
1285 } 1288 }
1289out:
1290 rcu_read_unlock();
1286} 1291}
1287 1292
1288static int netvsc_set_mac_addr(struct net_device *ndev, void *p) 1293static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b41696e16bdc..c20e7ef18bc9 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
802 err = hwsim_subscribe_all_others(phy); 802 err = hwsim_subscribe_all_others(phy);
803 if (err < 0) { 803 if (err < 0) {
804 mutex_unlock(&hwsim_phys_lock); 804 mutex_unlock(&hwsim_phys_lock);
805 goto err_reg; 805 goto err_subscribe;
806 } 806 }
807 } 807 }
808 list_add_tail(&phy->list, &hwsim_phys); 808 list_add_tail(&phy->list, &hwsim_phys);
@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
812 812
813 return idx; 813 return idx;
814 814
815err_subscribe:
816 ieee802154_unregister_hw(phy->hw);
815err_reg: 817err_reg:
816 kfree(pib); 818 kfree(pib);
817err_pib: 819err_pib:
@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void)
901 return 0; 903 return 0;
902 904
903platform_drv: 905platform_drv:
904 genl_unregister_family(&hwsim_genl_family);
905platform_dev:
906 platform_device_unregister(mac802154hwsim_dev); 906 platform_device_unregister(mac802154hwsim_dev);
907platform_dev:
908 genl_unregister_family(&hwsim_genl_family);
907 return rc; 909 return rc;
908} 910}
909 911
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index c5c417a3c0ce..bcc40a236624 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -73,46 +73,47 @@ static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
73 debugfs_remove_recursive(nsim_dev_port->ddir); 73 debugfs_remove_recursive(nsim_dev_port->ddir);
74} 74}
75 75
76static struct net *nsim_devlink_net(struct devlink *devlink)
77{
78 return &init_net;
79}
80
76static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) 81static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
77{ 82{
78 struct nsim_dev *nsim_dev = priv; 83 struct net *net = priv;
79 84
80 return nsim_fib_get_val(nsim_dev->fib_data, 85 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
81 NSIM_RESOURCE_IPV4_FIB, false);
82} 86}
83 87
84static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) 88static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
85{ 89{
86 struct nsim_dev *nsim_dev = priv; 90 struct net *net = priv;
87 91
88 return nsim_fib_get_val(nsim_dev->fib_data, 92 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
89 NSIM_RESOURCE_IPV4_FIB_RULES, false);
90} 93}
91 94
92static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) 95static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
93{ 96{
94 struct nsim_dev *nsim_dev = priv; 97 struct net *net = priv;
95 98
96 return nsim_fib_get_val(nsim_dev->fib_data, 99 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
97 NSIM_RESOURCE_IPV6_FIB, false);
98} 100}
99 101
100static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) 102static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
101{ 103{
102 struct nsim_dev *nsim_dev = priv; 104 struct net *net = priv;
103 105
104 return nsim_fib_get_val(nsim_dev->fib_data, 106 return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
105 NSIM_RESOURCE_IPV6_FIB_RULES, false);
106} 107}
107 108
108static int nsim_dev_resources_register(struct devlink *devlink) 109static int nsim_dev_resources_register(struct devlink *devlink)
109{ 110{
110 struct nsim_dev *nsim_dev = devlink_priv(devlink);
111 struct devlink_resource_size_params params = { 111 struct devlink_resource_size_params params = {
112 .size_max = (u64)-1, 112 .size_max = (u64)-1,
113 .size_granularity = 1, 113 .size_granularity = 1,
114 .unit = DEVLINK_RESOURCE_UNIT_ENTRY 114 .unit = DEVLINK_RESOURCE_UNIT_ENTRY
115 }; 115 };
116 struct net *net = nsim_devlink_net(devlink);
116 int err; 117 int err;
117 u64 n; 118 u64 n;
118 119
@@ -126,8 +127,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
126 goto out; 127 goto out;
127 } 128 }
128 129
129 n = nsim_fib_get_val(nsim_dev->fib_data, 130 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
130 NSIM_RESOURCE_IPV4_FIB, true);
131 err = devlink_resource_register(devlink, "fib", n, 131 err = devlink_resource_register(devlink, "fib", n,
132 NSIM_RESOURCE_IPV4_FIB, 132 NSIM_RESOURCE_IPV4_FIB,
133 NSIM_RESOURCE_IPV4, &params); 133 NSIM_RESOURCE_IPV4, &params);
@@ -136,8 +136,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
136 return err; 136 return err;
137 } 137 }
138 138
139 n = nsim_fib_get_val(nsim_dev->fib_data, 139 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
140 NSIM_RESOURCE_IPV4_FIB_RULES, true);
141 err = devlink_resource_register(devlink, "fib-rules", n, 140 err = devlink_resource_register(devlink, "fib-rules", n,
142 NSIM_RESOURCE_IPV4_FIB_RULES, 141 NSIM_RESOURCE_IPV4_FIB_RULES,
143 NSIM_RESOURCE_IPV4, &params); 142 NSIM_RESOURCE_IPV4, &params);
@@ -156,8 +155,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
156 goto out; 155 goto out;
157 } 156 }
158 157
159 n = nsim_fib_get_val(nsim_dev->fib_data, 158 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
160 NSIM_RESOURCE_IPV6_FIB, true);
161 err = devlink_resource_register(devlink, "fib", n, 159 err = devlink_resource_register(devlink, "fib", n,
162 NSIM_RESOURCE_IPV6_FIB, 160 NSIM_RESOURCE_IPV6_FIB,
163 NSIM_RESOURCE_IPV6, &params); 161 NSIM_RESOURCE_IPV6, &params);
@@ -166,8 +164,7 @@ static int nsim_dev_resources_register(struct devlink *devlink)
166 return err; 164 return err;
167 } 165 }
168 166
169 n = nsim_fib_get_val(nsim_dev->fib_data, 167 n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
170 NSIM_RESOURCE_IPV6_FIB_RULES, true);
171 err = devlink_resource_register(devlink, "fib-rules", n, 168 err = devlink_resource_register(devlink, "fib-rules", n,
172 NSIM_RESOURCE_IPV6_FIB_RULES, 169 NSIM_RESOURCE_IPV6_FIB_RULES,
173 NSIM_RESOURCE_IPV6, &params); 170 NSIM_RESOURCE_IPV6, &params);
@@ -179,19 +176,19 @@ static int nsim_dev_resources_register(struct devlink *devlink)
179 devlink_resource_occ_get_register(devlink, 176 devlink_resource_occ_get_register(devlink,
180 NSIM_RESOURCE_IPV4_FIB, 177 NSIM_RESOURCE_IPV4_FIB,
181 nsim_dev_ipv4_fib_resource_occ_get, 178 nsim_dev_ipv4_fib_resource_occ_get,
182 nsim_dev); 179 net);
183 devlink_resource_occ_get_register(devlink, 180 devlink_resource_occ_get_register(devlink,
184 NSIM_RESOURCE_IPV4_FIB_RULES, 181 NSIM_RESOURCE_IPV4_FIB_RULES,
185 nsim_dev_ipv4_fib_rules_res_occ_get, 182 nsim_dev_ipv4_fib_rules_res_occ_get,
186 nsim_dev); 183 net);
187 devlink_resource_occ_get_register(devlink, 184 devlink_resource_occ_get_register(devlink,
188 NSIM_RESOURCE_IPV6_FIB, 185 NSIM_RESOURCE_IPV6_FIB,
189 nsim_dev_ipv6_fib_resource_occ_get, 186 nsim_dev_ipv6_fib_resource_occ_get,
190 nsim_dev); 187 net);
191 devlink_resource_occ_get_register(devlink, 188 devlink_resource_occ_get_register(devlink,
192 NSIM_RESOURCE_IPV6_FIB_RULES, 189 NSIM_RESOURCE_IPV6_FIB_RULES,
193 nsim_dev_ipv6_fib_rules_res_occ_get, 190 nsim_dev_ipv6_fib_rules_res_occ_get,
194 nsim_dev); 191 net);
195out: 192out:
196 return err; 193 return err;
197} 194}
@@ -199,11 +196,11 @@ out:
199static int nsim_dev_reload(struct devlink *devlink, 196static int nsim_dev_reload(struct devlink *devlink,
200 struct netlink_ext_ack *extack) 197 struct netlink_ext_ack *extack)
201{ 198{
202 struct nsim_dev *nsim_dev = devlink_priv(devlink);
203 enum nsim_resource_id res_ids[] = { 199 enum nsim_resource_id res_ids[] = {
204 NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, 200 NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
205 NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES 201 NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
206 }; 202 };
203 struct net *net = nsim_devlink_net(devlink);
207 int i; 204 int i;
208 205
209 for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { 206 for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
@@ -212,8 +209,7 @@ static int nsim_dev_reload(struct devlink *devlink,
212 209
213 err = devlink_resource_size_get(devlink, res_ids[i], &val); 210 err = devlink_resource_size_get(devlink, res_ids[i], &val);
214 if (!err) { 211 if (!err) {
215 err = nsim_fib_set_max(nsim_dev->fib_data, 212 err = nsim_fib_set_max(net, res_ids[i], val, extack);
216 res_ids[i], val, extack);
217 if (err) 213 if (err)
218 return err; 214 return err;
219 } 215 }
@@ -285,15 +281,9 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
285 mutex_init(&nsim_dev->port_list_lock); 281 mutex_init(&nsim_dev->port_list_lock);
286 nsim_dev->fw_update_status = true; 282 nsim_dev->fw_update_status = true;
287 283
288 nsim_dev->fib_data = nsim_fib_create();
289 if (IS_ERR(nsim_dev->fib_data)) {
290 err = PTR_ERR(nsim_dev->fib_data);
291 goto err_devlink_free;
292 }
293
294 err = nsim_dev_resources_register(devlink); 284 err = nsim_dev_resources_register(devlink);
295 if (err) 285 if (err)
296 goto err_fib_destroy; 286 goto err_devlink_free;
297 287
298 err = devlink_register(devlink, &nsim_bus_dev->dev); 288 err = devlink_register(devlink, &nsim_bus_dev->dev);
299 if (err) 289 if (err)
@@ -315,8 +305,6 @@ err_dl_unregister:
315 devlink_unregister(devlink); 305 devlink_unregister(devlink);
316err_resources_unregister: 306err_resources_unregister:
317 devlink_resources_unregister(devlink, NULL); 307 devlink_resources_unregister(devlink, NULL);
318err_fib_destroy:
319 nsim_fib_destroy(nsim_dev->fib_data);
320err_devlink_free: 308err_devlink_free:
321 devlink_free(devlink); 309 devlink_free(devlink);
322 return ERR_PTR(err); 310 return ERR_PTR(err);
@@ -330,7 +318,6 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
330 nsim_dev_debugfs_exit(nsim_dev); 318 nsim_dev_debugfs_exit(nsim_dev);
331 devlink_unregister(devlink); 319 devlink_unregister(devlink);
332 devlink_resources_unregister(devlink, NULL); 320 devlink_resources_unregister(devlink, NULL);
333 nsim_fib_destroy(nsim_dev->fib_data);
334 mutex_destroy(&nsim_dev->port_list_lock); 321 mutex_destroy(&nsim_dev->port_list_lock);
335 devlink_free(devlink); 322 devlink_free(devlink);
336} 323}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 8c57ba747772..f61d094746c0 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,6 +18,7 @@
18#include <net/ip_fib.h> 18#include <net/ip_fib.h>
19#include <net/ip6_fib.h> 19#include <net/ip6_fib.h>
20#include <net/fib_rules.h> 20#include <net/fib_rules.h>
21#include <net/netns/generic.h>
21 22
22#include "netdevsim.h" 23#include "netdevsim.h"
23 24
@@ -32,14 +33,15 @@ struct nsim_per_fib_data {
32}; 33};
33 34
34struct nsim_fib_data { 35struct nsim_fib_data {
35 struct notifier_block fib_nb;
36 struct nsim_per_fib_data ipv4; 36 struct nsim_per_fib_data ipv4;
37 struct nsim_per_fib_data ipv6; 37 struct nsim_per_fib_data ipv6;
38}; 38};
39 39
40u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, 40static unsigned int nsim_fib_net_id;
41 enum nsim_resource_id res_id, bool max) 41
42u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
42{ 43{
44 struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
43 struct nsim_fib_entry *entry; 45 struct nsim_fib_entry *entry;
44 46
45 switch (res_id) { 47 switch (res_id) {
@@ -62,10 +64,10 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
62 return max ? entry->max : entry->num; 64 return max ? entry->max : entry->num;
63} 65}
64 66
65int nsim_fib_set_max(struct nsim_fib_data *fib_data, 67int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
66 enum nsim_resource_id res_id, u64 val,
67 struct netlink_ext_ack *extack) 68 struct netlink_ext_ack *extack)
68{ 69{
70 struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
69 struct nsim_fib_entry *entry; 71 struct nsim_fib_entry *entry;
70 int err = 0; 72 int err = 0;
71 73
@@ -118,9 +120,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
118 return err; 120 return err;
119} 121}
120 122
121static int nsim_fib_rule_event(struct nsim_fib_data *data, 123static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
122 struct fib_notifier_info *info, bool add)
123{ 124{
125 struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
124 struct netlink_ext_ack *extack = info->extack; 126 struct netlink_ext_ack *extack = info->extack;
125 int err = 0; 127 int err = 0;
126 128
@@ -155,9 +157,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
155 return err; 157 return err;
156} 158}
157 159
158static int nsim_fib_event(struct nsim_fib_data *data, 160static int nsim_fib_event(struct fib_notifier_info *info, bool add)
159 struct fib_notifier_info *info, bool add)
160{ 161{
162 struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
161 struct netlink_ext_ack *extack = info->extack; 163 struct netlink_ext_ack *extack = info->extack;
162 int err = 0; 164 int err = 0;
163 165
@@ -176,22 +178,18 @@ static int nsim_fib_event(struct nsim_fib_data *data,
176static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event, 178static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
177 void *ptr) 179 void *ptr)
178{ 180{
179 struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
180 fib_nb);
181 struct fib_notifier_info *info = ptr; 181 struct fib_notifier_info *info = ptr;
182 int err = 0; 182 int err = 0;
183 183
184 switch (event) { 184 switch (event) {
185 case FIB_EVENT_RULE_ADD: /* fall through */ 185 case FIB_EVENT_RULE_ADD: /* fall through */
186 case FIB_EVENT_RULE_DEL: 186 case FIB_EVENT_RULE_DEL:
187 err = nsim_fib_rule_event(data, info, 187 err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
188 event == FIB_EVENT_RULE_ADD);
189 break; 188 break;
190 189
191 case FIB_EVENT_ENTRY_ADD: /* fall through */ 190 case FIB_EVENT_ENTRY_ADD: /* fall through */
192 case FIB_EVENT_ENTRY_DEL: 191 case FIB_EVENT_ENTRY_DEL:
193 err = nsim_fib_event(data, info, 192 err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
194 event == FIB_EVENT_ENTRY_ADD);
195 break; 193 break;
196 } 194 }
197 195
@@ -201,23 +199,30 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
201/* inconsistent dump, trying again */ 199/* inconsistent dump, trying again */
202static void nsim_fib_dump_inconsistent(struct notifier_block *nb) 200static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
203{ 201{
204 struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data, 202 struct nsim_fib_data *data;
205 fib_nb); 203 struct net *net;
204
205 rcu_read_lock();
206 for_each_net_rcu(net) {
207 data = net_generic(net, nsim_fib_net_id);
208
209 data->ipv4.fib.num = 0ULL;
210 data->ipv4.rules.num = 0ULL;
206 211
207 data->ipv4.fib.num = 0ULL; 212 data->ipv6.fib.num = 0ULL;
208 data->ipv4.rules.num = 0ULL; 213 data->ipv6.rules.num = 0ULL;
209 data->ipv6.fib.num = 0ULL; 214 }
210 data->ipv6.rules.num = 0ULL; 215 rcu_read_unlock();
211} 216}
212 217
213struct nsim_fib_data *nsim_fib_create(void) 218static struct notifier_block nsim_fib_nb = {
214{ 219 .notifier_call = nsim_fib_event_nb,
215 struct nsim_fib_data *data; 220};
216 int err;
217 221
218 data = kzalloc(sizeof(*data), GFP_KERNEL); 222/* Initialize per network namespace state */
219 if (!data) 223static int __net_init nsim_fib_netns_init(struct net *net)
220 return ERR_PTR(-ENOMEM); 224{
225 struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
221 226
222 data->ipv4.fib.max = (u64)-1; 227 data->ipv4.fib.max = (u64)-1;
223 data->ipv4.rules.max = (u64)-1; 228 data->ipv4.rules.max = (u64)-1;
@@ -225,22 +230,37 @@ struct nsim_fib_data *nsim_fib_create(void)
225 data->ipv6.fib.max = (u64)-1; 230 data->ipv6.fib.max = (u64)-1;
226 data->ipv6.rules.max = (u64)-1; 231 data->ipv6.rules.max = (u64)-1;
227 232
228 data->fib_nb.notifier_call = nsim_fib_event_nb; 233 return 0;
229 err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent); 234}
230 if (err) {
231 pr_err("Failed to register fib notifier\n");
232 goto err_out;
233 }
234 235
235 return data; 236static struct pernet_operations nsim_fib_net_ops = {
237 .init = nsim_fib_netns_init,
238 .id = &nsim_fib_net_id,
239 .size = sizeof(struct nsim_fib_data),
240};
236 241
237err_out: 242void nsim_fib_exit(void)
238 kfree(data); 243{
239 return ERR_PTR(err); 244 unregister_pernet_subsys(&nsim_fib_net_ops);
245 unregister_fib_notifier(&nsim_fib_nb);
240} 246}
241 247
242void nsim_fib_destroy(struct nsim_fib_data *data) 248int nsim_fib_init(void)
243{ 249{
244 unregister_fib_notifier(&data->fib_nb); 250 int err;
245 kfree(data); 251
252 err = register_pernet_subsys(&nsim_fib_net_ops);
253 if (err < 0) {
254 pr_err("Failed to register pernet subsystem\n");
255 goto err_out;
256 }
257
258 err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
259 if (err < 0) {
260 pr_err("Failed to register fib notifier\n");
261 goto err_out;
262 }
263
264err_out:
265 return err;
246} 266}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 0740940f41b1..55f57f76d01b 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -357,12 +357,18 @@ static int __init nsim_module_init(void)
357 if (err) 357 if (err)
358 goto err_dev_exit; 358 goto err_dev_exit;
359 359
360 err = rtnl_link_register(&nsim_link_ops); 360 err = nsim_fib_init();
361 if (err) 361 if (err)
362 goto err_bus_exit; 362 goto err_bus_exit;
363 363
364 err = rtnl_link_register(&nsim_link_ops);
365 if (err)
366 goto err_fib_exit;
367
364 return 0; 368 return 0;
365 369
370err_fib_exit:
371 nsim_fib_exit();
366err_bus_exit: 372err_bus_exit:
367 nsim_bus_exit(); 373 nsim_bus_exit();
368err_dev_exit: 374err_dev_exit:
@@ -373,6 +379,7 @@ err_dev_exit:
373static void __exit nsim_module_exit(void) 379static void __exit nsim_module_exit(void)
374{ 380{
375 rtnl_link_unregister(&nsim_link_ops); 381 rtnl_link_unregister(&nsim_link_ops);
382 nsim_fib_exit();
376 nsim_bus_exit(); 383 nsim_bus_exit();
377 nsim_dev_exit(); 384 nsim_dev_exit();
378} 385}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 79c05af2a7c0..9404637d34b7 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -169,12 +169,10 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
169int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, 169int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
170 unsigned int port_index); 170 unsigned int port_index);
171 171
172struct nsim_fib_data *nsim_fib_create(void); 172int nsim_fib_init(void);
173void nsim_fib_destroy(struct nsim_fib_data *fib_data); 173void nsim_fib_exit(void);
174u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, 174u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
175 enum nsim_resource_id res_id, bool max); 175int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
176int nsim_fib_set_max(struct nsim_fib_data *fib_data,
177 enum nsim_resource_id res_id, u64 val,
178 struct netlink_ext_ack *extack); 176 struct netlink_ext_ack *extack);
179 177
180#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) 178#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 222ccd9ecfce..6ad8b1c63c34 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -257,36 +257,20 @@ static int at803x_config_init(struct phy_device *phydev)
257 * after HW reset: RX delay enabled and TX delay disabled 257 * after HW reset: RX delay enabled and TX delay disabled
258 * after SW reset: RX delay enabled, while TX delay retains the 258 * after SW reset: RX delay enabled, while TX delay retains the
259 * value before reset. 259 * value before reset.
260 *
261 * So let's first disable the RX and TX delays in PHY and enable
262 * them based on the mode selected (this also takes care of RGMII
263 * mode where we expect delays to be disabled)
264 */ 260 */
265
266 ret = at803x_disable_rx_delay(phydev);
267 if (ret < 0)
268 return ret;
269 ret = at803x_disable_tx_delay(phydev);
270 if (ret < 0)
271 return ret;
272
273 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 261 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
274 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { 262 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
275 /* If RGMII_ID or RGMII_RXID are specified enable RX delay,
276 * otherwise keep it disabled
277 */
278 ret = at803x_enable_rx_delay(phydev); 263 ret = at803x_enable_rx_delay(phydev);
279 if (ret < 0) 264 else
280 return ret; 265 ret = at803x_disable_rx_delay(phydev);
281 } 266 if (ret < 0)
267 return ret;
282 268
283 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || 269 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
284 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { 270 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
285 /* If RGMII_ID or RGMII_TXID are specified enable TX delay,
286 * otherwise keep it disabled
287 */
288 ret = at803x_enable_tx_delay(phydev); 271 ret = at803x_enable_tx_delay(phydev);
289 } 272 else
273 ret = at803x_disable_tx_delay(phydev);
290 274
291 return ret; 275 return ret;
292} 276}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index b9d4145781ca..58bb25e4af10 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -219,6 +219,20 @@ int genphy_c45_read_link(struct phy_device *phydev)
219 int val, devad; 219 int val, devad;
220 bool link = true; 220 bool link = true;
221 221
222 if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
223 val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
224 if (val < 0)
225 return val;
226
227 /* Autoneg is being started, therefore disregard current
228 * link status and report link as down.
229 */
230 if (val & MDIO_AN_CTRL1_RESTART) {
231 phydev->link = 0;
232 return 0;
233 }
234 }
235
222 while (mmd_mask && link) { 236 while (mmd_mask && link) {
223 devad = __ffs(mmd_mask); 237 devad = __ffs(mmd_mask);
224 mmd_mask &= ~BIT(devad); 238 mmd_mask &= ~BIT(devad);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 7ddd91df99e3..27ebc2c6c2d0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1752,7 +1752,17 @@ EXPORT_SYMBOL(genphy_aneg_done);
1752 */ 1752 */
1753int genphy_update_link(struct phy_device *phydev) 1753int genphy_update_link(struct phy_device *phydev)
1754{ 1754{
1755 int status; 1755 int status = 0, bmcr;
1756
1757 bmcr = phy_read(phydev, MII_BMCR);
1758 if (bmcr < 0)
1759 return bmcr;
1760
1761 /* Autoneg is being started, therefore disregard BMSR value and
1762 * report link as down.
1763 */
1764 if (bmcr & BMCR_ANRESTART)
1765 goto done;
1756 1766
1757 /* The link state is latched low so that momentary link 1767 /* The link state is latched low so that momentary link
1758 * drops can be detected. Do not double-read the status 1768 * drops can be detected. Do not double-read the status
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index abfa0da9bbd2..e8089def5a46 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1004,6 +1004,8 @@ static void __team_compute_features(struct team *team)
1004 1004
1005 team->dev->vlan_features = vlan_features; 1005 team->dev->vlan_features = vlan_features;
1006 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | 1006 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1007 NETIF_F_HW_VLAN_CTAG_TX |
1008 NETIF_F_HW_VLAN_STAG_TX |
1007 NETIF_F_GSO_UDP_L4; 1009 NETIF_F_GSO_UDP_L4;
1008 team->dev->hard_header_len = max_hard_header_len; 1010 team->dev->hard_header_len = max_hard_header_len;
1009 1011
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 5519248a791e..32b08b18e120 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -163,7 +163,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
163 } 163 }
164 if (!timeout) { 164 if (!timeout) {
165 dev_err(&udev->dev, "firmware not ready in time\n"); 165 dev_err(&udev->dev, "firmware not ready in time\n");
166 return -ETIMEDOUT; 166 ret = -ETIMEDOUT;
167 goto err;
167 } 168 }
168 169
169 /* enable ethernet mode (?) */ 170 /* enable ethernet mode (?) */
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index d62b6706a537..fc5895f85cee 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -113,16 +113,16 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
113 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1), 113 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
114 usb_buf, 24); 114 usb_buf, 24);
115 if (status != 0) 115 if (status != 0)
116 return status; 116 goto out;
117 117
118 memcpy(usb_buf, init_msg_2, 12); 118 memcpy(usb_buf, init_msg_2, 12);
119 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2), 119 status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
120 usb_buf, 28); 120 usb_buf, 28);
121 if (status != 0) 121 if (status != 0)
122 return status; 122 goto out;
123 123
124 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN); 124 memcpy(ethernet_addr, usb_buf + 10, ETH_ALEN);
125 125out:
126 kfree(usb_buf); 126 kfree(usb_buf);
127 return status; 127 return status;
128} 128}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 3d92ea6fcc02..f033fee225a1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3792,7 +3792,7 @@ static int lan78xx_probe(struct usb_interface *intf,
3792 ret = register_netdev(netdev); 3792 ret = register_netdev(netdev);
3793 if (ret != 0) { 3793 if (ret != 0) {
3794 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3794 netif_err(dev, probe, netdev, "couldn't register the device\n");
3795 goto out3; 3795 goto out4;
3796 } 3796 }
3797 3797
3798 usb_set_intfdata(intf, dev); 3798 usb_set_intfdata(intf, dev);
@@ -3807,12 +3807,14 @@ static int lan78xx_probe(struct usb_interface *intf,
3807 3807
3808 ret = lan78xx_phy_init(dev); 3808 ret = lan78xx_phy_init(dev);
3809 if (ret < 0) 3809 if (ret < 0)
3810 goto out4; 3810 goto out5;
3811 3811
3812 return 0; 3812 return 0;
3813 3813
3814out4: 3814out5:
3815 unregister_netdev(netdev); 3815 unregister_netdev(netdev);
3816out4:
3817 usb_free_urb(dev->urb_intr);
3816out3: 3818out3:
3817 lan78xx_unbind(dev, intf); 3819 lan78xx_unbind(dev, intf);
3818out2: 3820out2:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 0cc03a9ff545..eee0f5007ee3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -799,8 +799,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
799 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), 799 ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
800 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, 800 RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
801 value, index, tmp, size, 500); 801 value, index, tmp, size, 500);
802 if (ret < 0)
803 memset(data, 0xff, size);
804 else
805 memcpy(data, tmp, size);
802 806
803 memcpy(data, tmp, size);
804 kfree(tmp); 807 kfree(tmp);
805 808
806 return ret; 809 return ret;
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e9fc168bb734..489cba9b284d 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -351,13 +351,15 @@ int i2400m_barker_db_init(const char *_options)
351 } 351 }
352 result = i2400m_barker_db_add(barker); 352 result = i2400m_barker_db_add(barker);
353 if (result < 0) 353 if (result < 0)
354 goto error_add; 354 goto error_parse_add;
355 } 355 }
356 kfree(options_orig); 356 kfree(options_orig);
357 } 357 }
358 return 0; 358 return 0;
359 359
360error_parse_add:
360error_parse: 361error_parse:
362 kfree(options_orig);
361error_add: 363error_add:
362 kfree(i2400m_barker_db); 364 kfree(i2400m_barker_db);
363 return result; 365 return result;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index cb22d447fcb8..fe776e35b9d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -554,7 +554,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
554 cpu_to_le32(vif->bss_conf.use_short_slot ? 554 cpu_to_le32(vif->bss_conf.use_short_slot ?
555 MAC_FLG_SHORT_SLOT : 0); 555 MAC_FLG_SHORT_SLOT : 0);
556 556
557 cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); 557 cmd->filter_flags = 0;
558 558
559 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 559 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); 560 u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
@@ -623,6 +623,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
623 /* We need the dtim_period to set the MAC as associated */ 623 /* We need the dtim_period to set the MAC as associated */
624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && 624 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
625 !force_assoc_off) { 625 !force_assoc_off) {
626 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
627 u8 ap_sta_id = mvmvif->ap_sta_id;
626 u32 dtim_offs; 628 u32 dtim_offs;
627 629
628 /* 630 /*
@@ -658,6 +660,29 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
658 dtim_offs); 660 dtim_offs);
659 661
660 ctxt_sta->is_assoc = cpu_to_le32(1); 662 ctxt_sta->is_assoc = cpu_to_le32(1);
663
664 /*
665 * allow multicast data frames only as long as the station is
666 * authorized, i.e., GTK keys are already installed (if needed)
667 */
668 if (ap_sta_id < IWL_MVM_STATION_COUNT) {
669 struct ieee80211_sta *sta;
670
671 rcu_read_lock();
672
673 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
674 if (!IS_ERR_OR_NULL(sta)) {
675 struct iwl_mvm_sta *mvmsta =
676 iwl_mvm_sta_from_mac80211(sta);
677
678 if (mvmsta->sta_state ==
679 IEEE80211_STA_AUTHORIZED)
680 cmd.filter_flags |=
681 cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
682 }
683
684 rcu_read_unlock();
685 }
661 } else { 686 } else {
662 ctxt_sta->is_assoc = cpu_to_le32(0); 687 ctxt_sta->is_assoc = cpu_to_le32(0);
663 688
@@ -703,7 +728,8 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
703 MAC_FILTER_IN_CONTROL_AND_MGMT | 728 MAC_FILTER_IN_CONTROL_AND_MGMT |
704 MAC_FILTER_IN_BEACON | 729 MAC_FILTER_IN_BEACON |
705 MAC_FILTER_IN_PROBE_REQUEST | 730 MAC_FILTER_IN_PROBE_REQUEST |
706 MAC_FILTER_IN_CRC32); 731 MAC_FILTER_IN_CRC32 |
732 MAC_FILTER_ACCEPT_GRP);
707 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS); 733 ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
708 734
709 /* Allocate sniffer station */ 735 /* Allocate sniffer station */
@@ -727,7 +753,8 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
727 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); 753 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
728 754
729 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON | 755 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
730 MAC_FILTER_IN_PROBE_REQUEST); 756 MAC_FILTER_IN_PROBE_REQUEST |
757 MAC_FILTER_ACCEPT_GRP);
731 758
732 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ 759 /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
733 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); 760 cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 1c904b5226aa..a7bc00d1296f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -3327,10 +3327,20 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
3327 /* enable beacon filtering */ 3327 /* enable beacon filtering */
3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3328 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
3329 3329
3330 /*
3331 * Now that the station is authorized, i.e., keys were already
3332 * installed, need to indicate to the FW that
3333 * multicast data frames can be forwarded to the driver
3334 */
3335 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3336
3330 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3337 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3331 true); 3338 true);
3332 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3339 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
3333 new_state == IEEE80211_STA_ASSOC) { 3340 new_state == IEEE80211_STA_ASSOC) {
3341 /* Multicast data frames are no longer allowed */
3342 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3343
3334 /* disable beacon filtering */ 3344 /* disable beacon filtering */
3335 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3345 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3336 WARN_ON(ret && 3346 WARN_ON(ret &&
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index de711c1160d3..7c5aaeaf7fe5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1063,6 +1063,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) 1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; 1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0;
1065 } 1065 }
1066
1067 /* same thing for QuZ... */
1068 if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
1069 if (cfg == &iwl_ax101_cfg_qu_hr)
1070 cfg = &iwl_ax101_cfg_quz_hr;
1071 else if (cfg == &iwl_ax201_cfg_qu_hr)
1072 cfg = &iwl_ax201_cfg_quz_hr;
1073 else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
1074 cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
1075 else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
1076 cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
1077 else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
1078 cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
1079 else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1080 cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
1081 }
1082
1066#endif 1083#endif
1067 1084
1068 pci_set_drvdata(pdev, iwl_trans); 1085 pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f5df5b370d78..935e35dafce5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3603,6 +3603,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && 3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3605 ((trans->cfg != &iwl_ax200_cfg_cc && 3605 ((trans->cfg != &iwl_ax200_cfg_cc &&
3606 trans->cfg != &iwl_ax201_cfg_qu_hr &&
3606 trans->cfg != &killer1650x_2ax_cfg && 3607 trans->cfg != &killer1650x_2ax_cfg &&
3607 trans->cfg != &killer1650w_2ax_cfg && 3608 trans->cfg != &killer1650w_2ax_cfg &&
3608 trans->cfg != &iwl_ax201_cfg_quz_hr) || 3609 trans->cfg != &iwl_ax201_cfg_quz_hr) ||
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 38d110338987..9ef6b8fe03c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -99,10 +99,7 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
99 u16 len = byte_cnt; 99 u16 len = byte_cnt;
100 __le16 bc_ent; 100 __le16 bc_ent;
101 101
102 if (trans_pcie->bc_table_dword) 102 if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
103 len = DIV_ROUND_UP(len, 4);
104
105 if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
106 return; 103 return;
107 104
108 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + 105 filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
@@ -117,11 +114,20 @@ void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
117 */ 114 */
118 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; 115 num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
119 116
120 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); 117 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
121 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) 118 /* Starting from 22560, the HW expects bytes */
119 WARN_ON(trans_pcie->bc_table_dword);
120 WARN_ON(len > 0x3FFF);
121 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; 122 scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
123 else 123 } else {
124 /* Until 22560, the HW expects DW */
125 WARN_ON(!trans_pcie->bc_table_dword);
126 len = DIV_ROUND_UP(len, 4);
127 WARN_ON(len > 0xFFF);
128 bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
124 scd_bc_tbl->tfd_offset[idx] = bc_ent; 129 scd_bc_tbl->tfd_offset[idx] = bc_ent;
130 }
125} 131}
126 132
127/* 133/*
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 627ed1fc7b15..645f4d15fb61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -136,11 +136,11 @@ static const struct ieee80211_ops mt76x0u_ops = {
136 .release_buffered_frames = mt76_release_buffered_frames, 136 .release_buffered_frames = mt76_release_buffered_frames,
137}; 137};
138 138
139static int mt76x0u_init_hardware(struct mt76x02_dev *dev) 139static int mt76x0u_init_hardware(struct mt76x02_dev *dev, bool reset)
140{ 140{
141 int err; 141 int err;
142 142
143 mt76x0_chip_onoff(dev, true, true); 143 mt76x0_chip_onoff(dev, true, reset);
144 144
145 if (!mt76x02_wait_for_mac(&dev->mt76)) 145 if (!mt76x02_wait_for_mac(&dev->mt76))
146 return -ETIMEDOUT; 146 return -ETIMEDOUT;
@@ -173,7 +173,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
173 if (err < 0) 173 if (err < 0)
174 goto out_err; 174 goto out_err;
175 175
176 err = mt76x0u_init_hardware(dev); 176 err = mt76x0u_init_hardware(dev, true);
177 if (err < 0) 177 if (err < 0)
178 goto out_err; 178 goto out_err;
179 179
@@ -309,7 +309,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
309 if (ret < 0) 309 if (ret < 0)
310 goto err; 310 goto err;
311 311
312 ret = mt76x0u_init_hardware(dev); 312 ret = mt76x0u_init_hardware(dev, false);
313 if (ret) 313 if (ret)
314 goto err; 314 goto err;
315 315
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index c9b957ac5733..ecbe78b8027b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -6095,6 +6095,15 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
6095 } 6095 }
6096 6096
6097 /* 6097 /*
6098 * Clear encryption initialization vectors on start, but keep them
6099 * for watchdog reset. Otherwise we will have wrong IVs and not be
6100 * able to keep connections after reset.
6101 */
6102 if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags))
6103 for (i = 0; i < 256; i++)
6104 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
6105
6106 /*
6098 * Clear all beacons 6107 * Clear all beacons
6099 */ 6108 */
6100 for (i = 0; i < 8; i++) 6109 for (i = 0; i < 8; i++)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 7e43690a861c..2b216edd0c7d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -658,6 +658,7 @@ enum rt2x00_state_flags {
658 DEVICE_STATE_ENABLED_RADIO, 658 DEVICE_STATE_ENABLED_RADIO,
659 DEVICE_STATE_SCANNING, 659 DEVICE_STATE_SCANNING,
660 DEVICE_STATE_FLUSHING, 660 DEVICE_STATE_FLUSHING,
661 DEVICE_STATE_RESET,
661 662
662 /* 663 /*
663 * Driver configuration 664 * Driver configuration
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 35414f97a978..9d158237ac67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1256,13 +1256,14 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1256 1256
1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) 1257int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1258{ 1258{
1259 int retval; 1259 int retval = 0;
1260 1260
1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { 1261 if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) {
1262 /* 1262 /*
1263 * This is special case for ieee80211_restart_hw(), otherwise 1263 * This is special case for ieee80211_restart_hw(), otherwise
1264 * mac80211 never call start() two times in row without stop(); 1264 * mac80211 never call start() two times in row without stop();
1265 */ 1265 */
1266 set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1266 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); 1267 rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev);
1267 rt2x00lib_stop(rt2x00dev); 1268 rt2x00lib_stop(rt2x00dev);
1268 } 1269 }
@@ -1273,14 +1274,14 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1273 */ 1274 */
1274 retval = rt2x00lib_load_firmware(rt2x00dev); 1275 retval = rt2x00lib_load_firmware(rt2x00dev);
1275 if (retval) 1276 if (retval)
1276 return retval; 1277 goto out;
1277 1278
1278 /* 1279 /*
1279 * Initialize the device. 1280 * Initialize the device.
1280 */ 1281 */
1281 retval = rt2x00lib_initialize(rt2x00dev); 1282 retval = rt2x00lib_initialize(rt2x00dev);
1282 if (retval) 1283 if (retval)
1283 return retval; 1284 goto out;
1284 1285
1285 rt2x00dev->intf_ap_count = 0; 1286 rt2x00dev->intf_ap_count = 0;
1286 rt2x00dev->intf_sta_count = 0; 1287 rt2x00dev->intf_sta_count = 0;
@@ -1289,11 +1290,13 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1289 /* Enable the radio */ 1290 /* Enable the radio */
1290 retval = rt2x00lib_enable_radio(rt2x00dev); 1291 retval = rt2x00lib_enable_radio(rt2x00dev);
1291 if (retval) 1292 if (retval)
1292 return retval; 1293 goto out;
1293 1294
1294 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); 1295 set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags);
1295 1296
1296 return 0; 1297out:
1298 clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags);
1299 return retval;
1297} 1300}
1298 1301
1299void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) 1302void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1d9940d4e8c7..c9262ffeefe4 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; 925 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
926 nskb = xenvif_alloc_skb(0); 926 nskb = xenvif_alloc_skb(0);
927 if (unlikely(nskb == NULL)) { 927 if (unlikely(nskb == NULL)) {
928 skb_shinfo(skb)->nr_frags = 0;
928 kfree_skb(skb); 929 kfree_skb(skb);
929 xenvif_tx_err(queue, &txreq, extra_count, idx); 930 xenvif_tx_err(queue, &txreq, extra_count, idx);
930 if (net_ratelimit()) 931 if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
940 941
941 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 942 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
942 /* Failure in xenvif_set_skb_gso is fatal. */ 943 /* Failure in xenvif_set_skb_gso is fatal. */
944 skb_shinfo(skb)->nr_frags = 0;
943 kfree_skb(skb); 945 kfree_skb(skb);
944 kfree_skb(nskb); 946 kfree_skb(nskb);
945 break; 947 break;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8f3fbe5ca937..d3d6b7bd6903 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1286,6 +1286,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1286 */ 1286 */
1287 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1287 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1288 mutex_lock(&ctrl->scan_lock); 1288 mutex_lock(&ctrl->scan_lock);
1289 mutex_lock(&ctrl->subsys->lock);
1290 nvme_mpath_start_freeze(ctrl->subsys);
1291 nvme_mpath_wait_freeze(ctrl->subsys);
1289 nvme_start_freeze(ctrl); 1292 nvme_start_freeze(ctrl);
1290 nvme_wait_freeze(ctrl); 1293 nvme_wait_freeze(ctrl);
1291 } 1294 }
@@ -1316,6 +1319,8 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1316 nvme_update_formats(ctrl); 1319 nvme_update_formats(ctrl);
1317 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1320 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1318 nvme_unfreeze(ctrl); 1321 nvme_unfreeze(ctrl);
1322 nvme_mpath_unfreeze(ctrl->subsys);
1323 mutex_unlock(&ctrl->subsys->lock);
1319 mutex_unlock(&ctrl->scan_lock); 1324 mutex_unlock(&ctrl->scan_lock);
1320 } 1325 }
1321 if (effects & NVME_CMD_EFFECTS_CCC) 1326 if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1715,6 +1720,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
1715 if (ns->head->disk) { 1720 if (ns->head->disk) {
1716 nvme_update_disk_info(ns->head->disk, ns, id); 1721 nvme_update_disk_info(ns->head->disk, ns, id);
1717 blk_queue_stack_limits(ns->head->disk->queue, ns->queue); 1722 blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
1723 revalidate_disk(ns->head->disk);
1718 } 1724 }
1719#endif 1725#endif
1720} 1726}
@@ -2251,6 +2257,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
2251 .vid = 0x1179, 2257 .vid = 0x1179,
2252 .mn = "THNSF5256GPUK TOSHIBA", 2258 .mn = "THNSF5256GPUK TOSHIBA",
2253 .quirks = NVME_QUIRK_NO_APST, 2259 .quirks = NVME_QUIRK_NO_APST,
2260 },
2261 {
2262 /*
2263 * This LiteON CL1-3D*-Q11 firmware version has a race
2264 * condition associated with actions related to suspend to idle
2265 * LiteON has resolved the problem in future firmware
2266 */
2267 .vid = 0x14a4,
2268 .fr = "22301111",
2269 .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
2254 } 2270 }
2255}; 2271};
2256 2272
@@ -2487,6 +2503,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
2487 if (ret) { 2503 if (ret) {
2488 dev_err(ctrl->device, 2504 dev_err(ctrl->device,
2489 "failed to register subsystem device.\n"); 2505 "failed to register subsystem device.\n");
2506 put_device(&subsys->dev);
2490 goto out_unlock; 2507 goto out_unlock;
2491 } 2508 }
2492 ida_init(&subsys->ns_ida); 2509 ida_init(&subsys->ns_ida);
@@ -2509,7 +2526,6 @@ out_put_subsystem:
2509 nvme_put_subsystem(subsys); 2526 nvme_put_subsystem(subsys);
2510out_unlock: 2527out_unlock:
2511 mutex_unlock(&nvme_subsystems_lock); 2528 mutex_unlock(&nvme_subsystems_lock);
2512 put_device(&subsys->dev);
2513 return ret; 2529 return ret;
2514} 2530}
2515 2531
@@ -2591,6 +2607,9 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2591 goto out_free; 2607 goto out_free;
2592 } 2608 }
2593 2609
2610 if (!(ctrl->ops->flags & NVME_F_FABRICS))
2611 ctrl->cntlid = le16_to_cpu(id->cntlid);
2612
2594 if (!ctrl->identified) { 2613 if (!ctrl->identified) {
2595 int i; 2614 int i;
2596 2615
@@ -2691,7 +2710,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2691 goto out_free; 2710 goto out_free;
2692 } 2711 }
2693 } else { 2712 } else {
2694 ctrl->cntlid = le16_to_cpu(id->cntlid);
2695 ctrl->hmpre = le32_to_cpu(id->hmpre); 2713 ctrl->hmpre = le32_to_cpu(id->hmpre);
2696 ctrl->hmmin = le32_to_cpu(id->hmmin); 2714 ctrl->hmmin = le32_to_cpu(id->hmmin);
2697 ctrl->hmminds = le32_to_cpu(id->hmminds); 2715 ctrl->hmminds = le32_to_cpu(id->hmminds);
@@ -3571,6 +3589,13 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
3571 struct nvme_ns *ns, *next; 3589 struct nvme_ns *ns, *next;
3572 LIST_HEAD(ns_list); 3590 LIST_HEAD(ns_list);
3573 3591
3592 /*
3593 * make sure to requeue I/O to all namespaces as these
3594 * might result from the scan itself and must complete
3595 * for the scan_work to make progress
3596 */
3597 nvme_mpath_clear_ctrl_paths(ctrl);
3598
3574 /* prevent racing with ns scanning */ 3599 /* prevent racing with ns scanning */
3575 flush_work(&ctrl->scan_work); 3600 flush_work(&ctrl->scan_work);
3576 3601
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 4f0d0d12744e..af831d3d15d0 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -12,6 +12,36 @@ module_param(multipath, bool, 0444);
12MODULE_PARM_DESC(multipath, 12MODULE_PARM_DESC(multipath,
13 "turn on native support for multiple controllers per subsystem"); 13 "turn on native support for multiple controllers per subsystem");
14 14
15void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
16{
17 struct nvme_ns_head *h;
18
19 lockdep_assert_held(&subsys->lock);
20 list_for_each_entry(h, &subsys->nsheads, entry)
21 if (h->disk)
22 blk_mq_unfreeze_queue(h->disk->queue);
23}
24
25void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
26{
27 struct nvme_ns_head *h;
28
29 lockdep_assert_held(&subsys->lock);
30 list_for_each_entry(h, &subsys->nsheads, entry)
31 if (h->disk)
32 blk_mq_freeze_queue_wait(h->disk->queue);
33}
34
35void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
36{
37 struct nvme_ns_head *h;
38
39 lockdep_assert_held(&subsys->lock);
40 list_for_each_entry(h, &subsys->nsheads, entry)
41 if (h->disk)
42 blk_freeze_queue_start(h->disk->queue);
43}
44
15/* 45/*
16 * If multipathing is enabled we need to always use the subsystem instance 46 * If multipathing is enabled we need to always use the subsystem instance
17 * number for numbering our devices to avoid conflicts between subsystems that 47 * number for numbering our devices to avoid conflicts between subsystems that
@@ -104,18 +134,34 @@ static const char *nvme_ana_state_names[] = {
104 [NVME_ANA_CHANGE] = "change", 134 [NVME_ANA_CHANGE] = "change",
105}; 135};
106 136
107void nvme_mpath_clear_current_path(struct nvme_ns *ns) 137bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
108{ 138{
109 struct nvme_ns_head *head = ns->head; 139 struct nvme_ns_head *head = ns->head;
140 bool changed = false;
110 int node; 141 int node;
111 142
112 if (!head) 143 if (!head)
113 return; 144 goto out;
114 145
115 for_each_node(node) { 146 for_each_node(node) {
116 if (ns == rcu_access_pointer(head->current_path[node])) 147 if (ns == rcu_access_pointer(head->current_path[node])) {
117 rcu_assign_pointer(head->current_path[node], NULL); 148 rcu_assign_pointer(head->current_path[node], NULL);
149 changed = true;
150 }
118 } 151 }
152out:
153 return changed;
154}
155
156void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
157{
158 struct nvme_ns *ns;
159
160 mutex_lock(&ctrl->scan_lock);
161 list_for_each_entry(ns, &ctrl->namespaces, list)
162 if (nvme_mpath_clear_current_path(ns))
163 kblockd_schedule_work(&ns->head->requeue_work);
164 mutex_unlock(&ctrl->scan_lock);
119} 165}
120 166
121static bool nvme_path_is_disabled(struct nvme_ns *ns) 167static bool nvme_path_is_disabled(struct nvme_ns *ns)
@@ -226,6 +272,24 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
226 return ns; 272 return ns;
227} 273}
228 274
275static bool nvme_available_path(struct nvme_ns_head *head)
276{
277 struct nvme_ns *ns;
278
279 list_for_each_entry_rcu(ns, &head->list, siblings) {
280 switch (ns->ctrl->state) {
281 case NVME_CTRL_LIVE:
282 case NVME_CTRL_RESETTING:
283 case NVME_CTRL_CONNECTING:
284 /* fallthru */
285 return true;
286 default:
287 break;
288 }
289 }
290 return false;
291}
292
229static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, 293static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
230 struct bio *bio) 294 struct bio *bio)
231{ 295{
@@ -252,14 +316,14 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
252 disk_devt(ns->head->disk), 316 disk_devt(ns->head->disk),
253 bio->bi_iter.bi_sector); 317 bio->bi_iter.bi_sector);
254 ret = direct_make_request(bio); 318 ret = direct_make_request(bio);
255 } else if (!list_empty_careful(&head->list)) { 319 } else if (nvme_available_path(head)) {
256 dev_warn_ratelimited(dev, "no path available - requeuing I/O\n"); 320 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
257 321
258 spin_lock_irq(&head->requeue_lock); 322 spin_lock_irq(&head->requeue_lock);
259 bio_list_add(&head->requeue_list, bio); 323 bio_list_add(&head->requeue_list, bio);
260 spin_unlock_irq(&head->requeue_lock); 324 spin_unlock_irq(&head->requeue_lock);
261 } else { 325 } else {
262 dev_warn_ratelimited(dev, "no path - failing I/O\n"); 326 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
263 327
264 bio->bi_status = BLK_STS_IOERR; 328 bio->bi_status = BLK_STS_IOERR;
265 bio_endio(bio); 329 bio_endio(bio);
@@ -364,6 +428,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
364 srcu_read_unlock(&head->srcu, srcu_idx); 428 srcu_read_unlock(&head->srcu, srcu_idx);
365 } 429 }
366 430
431 synchronize_srcu(&ns->head->srcu);
367 kblockd_schedule_work(&ns->head->requeue_work); 432 kblockd_schedule_work(&ns->head->requeue_work);
368} 433}
369 434
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 26b563f9985b..2d678fb968c7 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -92,6 +92,11 @@ enum nvme_quirks {
92 * Broken Write Zeroes. 92 * Broken Write Zeroes.
93 */ 93 */
94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
95
96 /*
97 * Force simple suspend/resume path.
98 */
99 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10),
95}; 100};
96 101
97/* 102/*
@@ -490,6 +495,9 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
490 return ctrl->ana_log_buf != NULL; 495 return ctrl->ana_log_buf != NULL;
491} 496}
492 497
498void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
499void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
500void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
493void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 501void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
494 struct nvme_ctrl *ctrl, int *flags); 502 struct nvme_ctrl *ctrl, int *flags);
495void nvme_failover_req(struct request *req); 503void nvme_failover_req(struct request *req);
@@ -500,7 +508,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
500int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 508int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
501void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 509void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
502void nvme_mpath_stop(struct nvme_ctrl *ctrl); 510void nvme_mpath_stop(struct nvme_ctrl *ctrl);
503void nvme_mpath_clear_current_path(struct nvme_ns *ns); 511bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
512void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
504struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 513struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
505 514
506static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 515static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -548,7 +557,11 @@ static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
548static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 557static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
549{ 558{
550} 559}
551static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 560static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
561{
562 return false;
563}
564static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
552{ 565{
553} 566}
554static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 567static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -568,6 +581,15 @@ static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
568static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 581static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
569{ 582{
570} 583}
584static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
585{
586}
587static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
588{
589}
590static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
591{
592}
571#endif /* CONFIG_NVME_MULTIPATH */ 593#endif /* CONFIG_NVME_MULTIPATH */
572 594
573#ifdef CONFIG_NVM 595#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index db160cee42ad..732d5b63ec05 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2695,7 +2695,7 @@ static void nvme_async_probe(void *data, async_cookie_t cookie)
2695{ 2695{
2696 struct nvme_dev *dev = data; 2696 struct nvme_dev *dev = data;
2697 2697
2698 nvme_reset_ctrl_sync(&dev->ctrl); 2698 flush_work(&dev->ctrl.reset_work);
2699 flush_work(&dev->ctrl.scan_work); 2699 flush_work(&dev->ctrl.scan_work);
2700 nvme_put_ctrl(&dev->ctrl); 2700 nvme_put_ctrl(&dev->ctrl);
2701} 2701}
@@ -2761,6 +2761,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2761 2761
2762 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2762 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2763 2763
2764 nvme_reset_ctrl(&dev->ctrl);
2764 nvme_get_ctrl(&dev->ctrl); 2765 nvme_get_ctrl(&dev->ctrl);
2765 async_schedule(nvme_async_probe, dev); 2766 async_schedule(nvme_async_probe, dev);
2766 2767
@@ -2846,7 +2847,7 @@ static int nvme_resume(struct device *dev)
2846 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 2847 struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
2847 struct nvme_ctrl *ctrl = &ndev->ctrl; 2848 struct nvme_ctrl *ctrl = &ndev->ctrl;
2848 2849
2849 if (pm_resume_via_firmware() || !ctrl->npss || 2850 if (ndev->last_ps == U32_MAX ||
2850 nvme_set_power_state(ctrl, ndev->last_ps) != 0) 2851 nvme_set_power_state(ctrl, ndev->last_ps) != 0)
2851 nvme_reset_ctrl(ctrl); 2852 nvme_reset_ctrl(ctrl);
2852 return 0; 2853 return 0;
@@ -2859,6 +2860,8 @@ static int nvme_suspend(struct device *dev)
2859 struct nvme_ctrl *ctrl = &ndev->ctrl; 2860 struct nvme_ctrl *ctrl = &ndev->ctrl;
2860 int ret = -EBUSY; 2861 int ret = -EBUSY;
2861 2862
2863 ndev->last_ps = U32_MAX;
2864
2862 /* 2865 /*
2863 * The platform does not remove power for a kernel managed suspend so 2866 * The platform does not remove power for a kernel managed suspend so
2864 * use host managed nvme power settings for lowest idle power if 2867 * use host managed nvme power settings for lowest idle power if
@@ -2866,8 +2869,15 @@ static int nvme_suspend(struct device *dev)
2866 * shutdown. But if the firmware is involved after the suspend or the 2869 * shutdown. But if the firmware is involved after the suspend or the
2867 * device does not support any non-default power states, shut down the 2870 * device does not support any non-default power states, shut down the
2868 * device fully. 2871 * device fully.
2872 *
2873 * If ASPM is not enabled for the device, shut down the device and allow
2874 * the PCI bus layer to put it into D3 in order to take the PCIe link
2875 * down, so as to allow the platform to achieve its minimum low-power
2876 * state (which may not be possible if the link is up).
2869 */ 2877 */
2870 if (pm_suspend_via_firmware() || !ctrl->npss) { 2878 if (pm_suspend_via_firmware() || !ctrl->npss ||
2879 !pcie_aspm_enabled(pdev) ||
2880 (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
2871 nvme_dev_disable(ndev, true); 2881 nvme_dev_disable(ndev, true);
2872 return 0; 2882 return 0;
2873 } 2883 }
@@ -2880,7 +2890,6 @@ static int nvme_suspend(struct device *dev)
2880 ctrl->state != NVME_CTRL_ADMIN_ONLY) 2890 ctrl->state != NVME_CTRL_ADMIN_ONLY)
2881 goto unfreeze; 2891 goto unfreeze;
2882 2892
2883 ndev->last_ps = 0;
2884 ret = nvme_get_power_state(ctrl, &ndev->last_ps); 2893 ret = nvme_get_power_state(ctrl, &ndev->last_ps);
2885 if (ret < 0) 2894 if (ret < 0)
2886 goto unfreeze; 2895 goto unfreeze;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a249db528d54..1a6449bc547b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -562,13 +562,17 @@ out_destroy_cm_id:
562 return ret; 562 return ret;
563} 563}
564 564
565static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
566{
567 rdma_disconnect(queue->cm_id);
568 ib_drain_qp(queue->qp);
569}
570
565static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) 571static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
566{ 572{
567 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) 573 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
568 return; 574 return;
569 575 __nvme_rdma_stop_queue(queue);
570 rdma_disconnect(queue->cm_id);
571 ib_drain_qp(queue->qp);
572} 576}
573 577
574static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) 578static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -607,11 +611,13 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
607 else 611 else
608 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 612 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
609 613
610 if (!ret) 614 if (!ret) {
611 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); 615 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
612 else 616 } else {
617 __nvme_rdma_stop_queue(queue);
613 dev_info(ctrl->ctrl.device, 618 dev_info(ctrl->ctrl.device,
614 "failed to connect queue: %d ret=%d\n", idx, ret); 619 "failed to connect queue: %d ret=%d\n", idx, ret);
620 }
615 return ret; 621 return ret;
616} 622}
617 623
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index cd52b9f15376..98613a45bd3b 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -675,6 +675,7 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent,
675 675
676found: 676found:
677 list_del(&p->entry); 677 list_del(&p->entry);
678 nvmet_port_del_ctrls(port, subsys);
678 nvmet_port_disc_changed(port, subsys); 679 nvmet_port_disc_changed(port, subsys);
679 680
680 if (list_empty(&port->subsystems)) 681 if (list_empty(&port->subsystems))
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index dad0243c7c96..3a67e244e568 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -46,6 +46,9 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
46 u16 status; 46 u16 status;
47 47
48 switch (errno) { 48 switch (errno) {
49 case 0:
50 status = NVME_SC_SUCCESS;
51 break;
49 case -ENOSPC: 52 case -ENOSPC:
50 req->error_loc = offsetof(struct nvme_rw_command, length); 53 req->error_loc = offsetof(struct nvme_rw_command, length);
51 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; 54 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
@@ -280,6 +283,18 @@ void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
280} 283}
281EXPORT_SYMBOL_GPL(nvmet_unregister_transport); 284EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
282 285
286void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
287{
288 struct nvmet_ctrl *ctrl;
289
290 mutex_lock(&subsys->lock);
291 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
292 if (ctrl->port == port)
293 ctrl->ops->delete_ctrl(ctrl);
294 }
295 mutex_unlock(&subsys->lock);
296}
297
283int nvmet_enable_port(struct nvmet_port *port) 298int nvmet_enable_port(struct nvmet_port *port)
284{ 299{
285 const struct nvmet_fabrics_ops *ops; 300 const struct nvmet_fabrics_ops *ops;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index b16dc3981c69..0940c5024a34 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -654,6 +654,14 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
654 mutex_lock(&nvme_loop_ports_mutex); 654 mutex_lock(&nvme_loop_ports_mutex);
655 list_del_init(&port->entry); 655 list_del_init(&port->entry);
656 mutex_unlock(&nvme_loop_ports_mutex); 656 mutex_unlock(&nvme_loop_ports_mutex);
657
658 /*
659 * Ensure any ctrls that are in the process of being
660 * deleted are in fact deleted before we return
661 * and free the port. This is to prevent active
662 * ctrls from using a port after it's freed.
663 */
664 flush_workqueue(nvme_delete_wq);
657} 665}
658 666
659static const struct nvmet_fabrics_ops nvme_loop_ops = { 667static const struct nvmet_fabrics_ops nvme_loop_ops = {
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 6ee66c610739..c51f8dd01dc4 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -418,6 +418,9 @@ void nvmet_port_send_ana_event(struct nvmet_port *port);
418int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); 418int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
419void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); 419void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
420 420
421void nvmet_port_del_ctrls(struct nvmet_port *port,
422 struct nvmet_subsys *subsys);
423
421int nvmet_enable_port(struct nvmet_port *port); 424int nvmet_enable_port(struct nvmet_port *port);
422void nvmet_disable_port(struct nvmet_port *port); 425void nvmet_disable_port(struct nvmet_port *port);
423 426
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7f84bb4903ca..a296eaf52a5b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -277,7 +277,7 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw);
277 * of_irq_parse_one - Resolve an interrupt for a device 277 * of_irq_parse_one - Resolve an interrupt for a device
278 * @device: the device whose interrupt is to be resolved 278 * @device: the device whose interrupt is to be resolved
279 * @index: index of the interrupt to resolve 279 * @index: index of the interrupt to resolve
280 * @out_irq: structure of_irq filled by this function 280 * @out_irq: structure of_phandle_args filled by this function
281 * 281 *
282 * This function resolves an interrupt for a node by walking the interrupt tree, 282 * This function resolves an interrupt for a node by walking the interrupt tree,
283 * finding which interrupt controller node it is attached to, and returning the 283 * finding which interrupt controller node it is attached to, and returning the
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
index c1b67dd7cd6e..83c766233181 100644
--- a/drivers/of/resolver.c
+++ b/drivers/of/resolver.c
@@ -206,16 +206,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
206 for_each_child_of_node(local_fixups, child) { 206 for_each_child_of_node(local_fixups, child) {
207 207
208 for_each_child_of_node(overlay, overlay_child) 208 for_each_child_of_node(overlay, overlay_child)
209 if (!node_name_cmp(child, overlay_child)) 209 if (!node_name_cmp(child, overlay_child)) {
210 of_node_put(overlay_child);
210 break; 211 break;
212 }
211 213
212 if (!overlay_child) 214 if (!overlay_child) {
215 of_node_put(child);
213 return -EINVAL; 216 return -EINVAL;
217 }
214 218
215 err = adjust_local_phandle_references(child, overlay_child, 219 err = adjust_local_phandle_references(child, overlay_child,
216 phandle_delta); 220 phandle_delta);
217 if (err) 221 if (err) {
222 of_node_put(child);
218 return err; 223 return err;
224 }
219 } 225 }
220 226
221 return 0; 227 return 0;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e44af7f4d37f..464f8f92653f 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -1170,6 +1170,26 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp)
1170module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, 1170module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
1171 NULL, 0644); 1171 NULL, 0644);
1172 1172
1173/**
1174 * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
1175 * @pdev: Target device.
1176 */
1177bool pcie_aspm_enabled(struct pci_dev *pdev)
1178{
1179 struct pci_dev *bridge = pci_upstream_bridge(pdev);
1180 bool ret;
1181
1182 if (!bridge)
1183 return false;
1184
1185 mutex_lock(&aspm_lock);
1186 ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
1187 mutex_unlock(&aspm_lock);
1188
1189 return ret;
1190}
1191EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
1192
1173#ifdef CONFIG_PCIEASPM_DEBUG 1193#ifdef CONFIG_PCIEASPM_DEBUG
1174static ssize_t link_state_show(struct device *dev, 1194static ssize_t link_state_show(struct device *dev,
1175 struct device_attribute *attr, 1195 struct device_attribute *attr,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 208aacf39329..44c4ae1abd00 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5256,7 +5256,7 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5256 */ 5256 */
5257 if (ioread32(map + 0x2240c) & 0x2) { 5257 if (ioread32(map + 0x2240c) & 0x2) {
5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); 5258 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5259 ret = pci_reset_function(pdev); 5259 ret = pci_reset_bus(pdev);
5260 if (ret < 0) 5260 if (ret < 0)
5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret); 5261 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5262 } 5262 }
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index e504d255d5ce..430731cdf827 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -707,7 +707,7 @@ static int cros_ec_ishtp_reset(struct ishtp_cl_device *cl_device)
707 */ 707 */
708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device) 708static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
709{ 709{
710 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 710 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 711 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 712 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
713 713
@@ -722,7 +722,7 @@ static int __maybe_unused cros_ec_ishtp_suspend(struct device *device)
722 */ 722 */
723static int __maybe_unused cros_ec_ishtp_resume(struct device *device) 723static int __maybe_unused cros_ec_ishtp_resume(struct device *device)
724{ 724{
725 struct ishtp_cl_device *cl_device = dev_get_drvdata(device); 725 struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device); 726 struct ishtp_cl *cros_ish_cl = ishtp_get_drvdata(cl_device);
727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl); 727 struct ishtp_cl_data *client_data = ishtp_get_client_data(cros_ish_cl);
728 728
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index 30de448de802..86d88aec94a1 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -742,6 +742,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di,
742 USB_CH_IP_CUR_LVL_1P5; 742 USB_CH_IP_CUR_LVL_1P5;
743 break; 743 break;
744 } 744 }
745 /* Else, fall through */
745 case USB_STAT_HM_IDGND: 746 case USB_STAT_HM_IDGND:
746 dev_err(di->dev, "USB Type - Charging not allowed\n"); 747 dev_err(di->dev, "USB Type - Charging not allowed\n");
747 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05; 748 di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index c7ee07ce3615..28db887d38ed 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -629,6 +629,7 @@ struct qeth_seqno {
629struct qeth_reply { 629struct qeth_reply {
630 struct list_head list; 630 struct list_head list;
631 struct completion received; 631 struct completion received;
632 spinlock_t lock;
632 int (*callback)(struct qeth_card *, struct qeth_reply *, 633 int (*callback)(struct qeth_card *, struct qeth_reply *,
633 unsigned long); 634 unsigned long);
634 u32 seqno; 635 u32 seqno;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4d0caeebc802..6502b148541e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -544,6 +544,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
544 if (reply) { 544 if (reply) {
545 refcount_set(&reply->refcnt, 1); 545 refcount_set(&reply->refcnt, 1);
546 init_completion(&reply->received); 546 init_completion(&reply->received);
547 spin_lock_init(&reply->lock);
547 } 548 }
548 return reply; 549 return reply;
549} 550}
@@ -799,6 +800,13 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
799 800
800 if (!reply->callback) { 801 if (!reply->callback) {
801 rc = 0; 802 rc = 0;
803 goto no_callback;
804 }
805
806 spin_lock_irqsave(&reply->lock, flags);
807 if (reply->rc) {
808 /* Bail out when the requestor has already left: */
809 rc = reply->rc;
802 } else { 810 } else {
803 if (cmd) { 811 if (cmd) {
804 reply->offset = (u16)((char *)cmd - (char *)iob->data); 812 reply->offset = (u16)((char *)cmd - (char *)iob->data);
@@ -807,7 +815,9 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
807 rc = reply->callback(card, reply, (unsigned long)iob); 815 rc = reply->callback(card, reply, (unsigned long)iob);
808 } 816 }
809 } 817 }
818 spin_unlock_irqrestore(&reply->lock, flags);
810 819
820no_callback:
811 if (rc <= 0) 821 if (rc <= 0)
812 qeth_notify_reply(reply, rc); 822 qeth_notify_reply(reply, rc);
813 qeth_put_reply(reply); 823 qeth_put_reply(reply);
@@ -1749,6 +1759,16 @@ static int qeth_send_control_data(struct qeth_card *card,
1749 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1759 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1750 1760
1751 qeth_dequeue_reply(card, reply); 1761 qeth_dequeue_reply(card, reply);
1762
1763 if (reply_cb) {
1764 /* Wait until the callback for a late reply has completed: */
1765 spin_lock_irq(&reply->lock);
1766 if (rc)
1767 /* Zap any callback that's still pending: */
1768 reply->rc = rc;
1769 spin_unlock_irq(&reply->lock);
1770 }
1771
1752 if (!rc) 1772 if (!rc)
1753 rc = reply->rc; 1773 rc = reply->rc;
1754 qeth_put_reply(reply); 1774 qeth_put_reply(reply);
@@ -4354,6 +4374,10 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4354 get_user(req_len, &ureq->hdr.req_len)) 4374 get_user(req_len, &ureq->hdr.req_len))
4355 return -EFAULT; 4375 return -EFAULT;
4356 4376
4377 /* Sanitize user input, to avoid overflows in iob size calculation: */
4378 if (req_len > QETH_BUFSIZE)
4379 return -EINVAL;
4380
4357 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); 4381 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4358 if (!iob) 4382 if (!iob)
4359 return -ENOMEM; 4383 return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index abcad097ff2f..f47b4b281b14 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -459,6 +459,7 @@ static void sas_discover_domain(struct work_struct *work)
459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); 459 pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
460 /* Fall through */ 460 /* Fall through */
461#endif 461#endif
462 /* Fall through - only for the #else condition above. */
462 default: 463 default:
463 error = -ENXIO; 464 error = -ENXIO;
464 pr_err("unhandled device %d\n", dev->dev_type); 465 pr_err("unhandled device %d\n", dev->dev_type);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2c3bb8a966e5..bade2e025ecf 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -824,6 +824,7 @@ struct lpfc_hba {
824 uint32_t cfg_cq_poll_threshold; 824 uint32_t cfg_cq_poll_threshold;
825 uint32_t cfg_cq_max_proc_limit; 825 uint32_t cfg_cq_max_proc_limit;
826 uint32_t cfg_fcp_cpu_map; 826 uint32_t cfg_fcp_cpu_map;
827 uint32_t cfg_fcp_mq_threshold;
827 uint32_t cfg_hdw_queue; 828 uint32_t cfg_hdw_queue;
828 uint32_t cfg_irq_chann; 829 uint32_t cfg_irq_chann;
829 uint32_t cfg_suppress_rsp; 830 uint32_t cfg_suppress_rsp;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ea62322ffe2b..8d8c495b5b60 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5709,6 +5709,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5709 "Embed NVME Command in WQE"); 5709 "Embed NVME Command in WQE");
5710 5710
5711/* 5711/*
5712 * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
5713 * the driver will advertise it supports to the SCSI layer.
5714 *
5715 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5716 * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
5717 *
5718 * Value range is [0,128]. Default value is 8.
5719 */
5720LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5721 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
5722 "Set the number of SCSI Queues advertised");
5723
5724/*
5712 * lpfc_hdw_queue: Set the number of Hardware Queues the driver 5725 * lpfc_hdw_queue: Set the number of Hardware Queues the driver
5713 * will advertise it supports to the NVME and SCSI layers. This also 5726 * will advertise it supports to the NVME and SCSI layers. This also
5714 * will map to the number of CQ/WQ pairs the driver will create. 5727 * will map to the number of CQ/WQ pairs the driver will create.
@@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
6030 &dev_attr_lpfc_cq_poll_threshold, 6043 &dev_attr_lpfc_cq_poll_threshold,
6031 &dev_attr_lpfc_cq_max_proc_limit, 6044 &dev_attr_lpfc_cq_max_proc_limit,
6032 &dev_attr_lpfc_fcp_cpu_map, 6045 &dev_attr_lpfc_fcp_cpu_map,
6046 &dev_attr_lpfc_fcp_mq_threshold,
6033 &dev_attr_lpfc_hdw_queue, 6047 &dev_attr_lpfc_hdw_queue,
6034 &dev_attr_lpfc_irq_chann, 6048 &dev_attr_lpfc_irq_chann,
6035 &dev_attr_lpfc_suppress_rsp, 6049 &dev_attr_lpfc_suppress_rsp,
@@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
7112 /* Initialize first burst. Target vs Initiator are different. */ 7126 /* Initialize first burst. Target vs Initiator are different. */
7113 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); 7127 lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
7114 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); 7128 lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
7129 lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
7115 lpfc_hdw_queue_init(phba, lpfc_hdw_queue); 7130 lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
7116 lpfc_irq_chann_init(phba, lpfc_irq_chann); 7131 lpfc_irq_chann_init(phba, lpfc_irq_chann);
7117 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); 7132 lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index faf43b1d3dbe..1ac98becb5ba 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4309 shost->max_cmd_len = 16; 4309 shost->max_cmd_len = 16;
4310 4310
4311 if (phba->sli_rev == LPFC_SLI_REV4) { 4311 if (phba->sli_rev == LPFC_SLI_REV4) {
4312 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) 4312 if (!phba->cfg_fcp_mq_threshold ||
4313 shost->nr_hw_queues = phba->cfg_hdw_queue; 4313 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4314 else 4314 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4315 shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; 4315
4316 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4317 phba->cfg_fcp_mq_threshold);
4316 4318
4317 shost->dma_boundary = 4319 shost->dma_boundary =
4318 phba->sli4_hba.pc_sli4_params.sge_supp_len-1; 4320 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
@@ -10776,12 +10778,31 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
10776 /* This loop sets up all CPUs that are affinitized with a 10778 /* This loop sets up all CPUs that are affinitized with a
10777 * irq vector assigned to the driver. All affinitized CPUs 10779 * irq vector assigned to the driver. All affinitized CPUs
10778 * will get a link to that vectors IRQ and EQ. 10780 * will get a link to that vectors IRQ and EQ.
10781 *
10782 * NULL affinity mask handling:
10783 * If irq count is greater than one, log an error message.
10784 * If the null mask is received for the first irq, find the
10785 * first present cpu, and assign the eq index to ensure at
10786 * least one EQ is assigned.
10779 */ 10787 */
10780 for (idx = 0; idx < phba->cfg_irq_chann; idx++) { 10788 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10781 /* Get a CPU mask for all CPUs affinitized to this vector */ 10789 /* Get a CPU mask for all CPUs affinitized to this vector */
10782 maskp = pci_irq_get_affinity(phba->pcidev, idx); 10790 maskp = pci_irq_get_affinity(phba->pcidev, idx);
10783 if (!maskp) 10791 if (!maskp) {
10784 continue; 10792 if (phba->cfg_irq_chann > 1)
10793 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10794 "3329 No affinity mask found "
10795 "for vector %d (%d)\n",
10796 idx, phba->cfg_irq_chann);
10797 if (!idx) {
10798 cpu = cpumask_first(cpu_present_mask);
10799 cpup = &phba->sli4_hba.cpu_map[cpu];
10800 cpup->eq = idx;
10801 cpup->irq = pci_irq_vector(phba->pcidev, idx);
10802 cpup->flag |= LPFC_CPU_FIRST_IRQ;
10803 }
10804 break;
10805 }
10785 10806
10786 i = 0; 10807 i = 0;
10787 /* Loop through all CPUs associated with vector idx */ 10808 /* Loop through all CPUs associated with vector idx */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3aeca387b22a..329f7aa7e169 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -44,6 +44,11 @@
44#define LPFC_HBA_HDWQ_MAX 128 44#define LPFC_HBA_HDWQ_MAX 128
45#define LPFC_HBA_HDWQ_DEF 0 45#define LPFC_HBA_HDWQ_DEF 0
46 46
47/* FCP MQ queue count limiting */
48#define LPFC_FCP_MQ_THRESHOLD_MIN 0
49#define LPFC_FCP_MQ_THRESHOLD_MAX 128
50#define LPFC_FCP_MQ_THRESHOLD_DEF 8
51
47/* Common buffer size to accomidate SCSI and NVME IO buffers */ 52/* Common buffer size to accomidate SCSI and NVME IO buffers */
48#define LPFC_COMMON_IO_BUF_SZ 768 53#define LPFC_COMMON_IO_BUF_SZ 768
49 54
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8d560c562e9c..6b7b390b2e52 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, 2956 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
2957 vha->gnl.ldma); 2957 vha->gnl.ldma);
2958 2958
2959 vha->gnl.l = NULL;
2960
2959 vfree(vha->scan.l); 2961 vfree(vha->scan.l);
2960 2962
2961 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { 2963 if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2e58cff9d200..98e60a34afd9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3440,6 +3440,12 @@ skip_dpc:
3440 return 0; 3440 return 0;
3441 3441
3442probe_failed: 3442probe_failed:
3443 if (base_vha->gnl.l) {
3444 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3445 base_vha->gnl.l, base_vha->gnl.ldma);
3446 base_vha->gnl.l = NULL;
3447 }
3448
3443 if (base_vha->timer_active) 3449 if (base_vha->timer_active)
3444 qla2x00_stop_timer(base_vha); 3450 qla2x00_stop_timer(base_vha);
3445 base_vha->flags.online = 0; 3451 base_vha->flags.online = 0;
@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
3673 if (!atomic_read(&pdev->enable_cnt)) { 3679 if (!atomic_read(&pdev->enable_cnt)) {
3674 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, 3680 dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
3675 base_vha->gnl.l, base_vha->gnl.ldma); 3681 base_vha->gnl.l, base_vha->gnl.ldma);
3676 3682 base_vha->gnl.l = NULL;
3677 scsi_host_put(base_vha->host); 3683 scsi_host_put(base_vha->host);
3678 kfree(ha); 3684 kfree(ha);
3679 pci_set_drvdata(pdev, NULL); 3685 pci_set_drvdata(pdev, NULL);
@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
3713 dma_free_coherent(&ha->pdev->dev, 3719 dma_free_coherent(&ha->pdev->dev,
3714 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); 3720 base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
3715 3721
3722 base_vha->gnl.l = NULL;
3723
3716 vfree(base_vha->scan.l); 3724 vfree(base_vha->scan.l);
3717 3725
3718 if (IS_QLAFX00(ha)) 3726 if (IS_QLAFX00(ha))
@@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
4816 "Alloc failed for scan database.\n"); 4824 "Alloc failed for scan database.\n");
4817 dma_free_coherent(&ha->pdev->dev, vha->gnl.size, 4825 dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
4818 vha->gnl.l, vha->gnl.ldma); 4826 vha->gnl.l, vha->gnl.ldma);
4827 vha->gnl.l = NULL;
4819 scsi_remove_host(vha->host); 4828 scsi_remove_host(vha->host);
4820 return NULL; 4829 return NULL;
4821 } 4830 }
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e274053109d0..029da74bb2f5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 7062static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
7063 struct ufs_vreg *vreg) 7063 struct ufs_vreg *vreg)
7064{ 7064{
7065 if (!vreg)
7066 return 0;
7067
7065 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 7068 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
7066} 7069}
7067 7070
diff --git a/drivers/soc/ixp4xx/Kconfig b/drivers/soc/ixp4xx/Kconfig
index de2e62c3310a..e3eb19b85fa4 100644
--- a/drivers/soc/ixp4xx/Kconfig
+++ b/drivers/soc/ixp4xx/Kconfig
@@ -1,4 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2if ARCH_IXP4XX || COMPILE_TEST
3
2menu "IXP4xx SoC drivers" 4menu "IXP4xx SoC drivers"
3 5
4config IXP4XX_QMGR 6config IXP4XX_QMGR
@@ -15,3 +17,5 @@ config IXP4XX_NPE
15 and is automatically selected by Ethernet and HSS drivers. 17 and is automatically selected by Ethernet and HSS drivers.
16 18
17endmenu 19endmenu
20
21endif
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index bb77c220b6f8..ccc6d53fe788 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -141,7 +141,7 @@ static int __init am43xx_map_gic(void)
141} 141}
142 142
143#ifdef CONFIG_SUSPEND 143#ifdef CONFIG_SUSPEND
144struct wkup_m3_wakeup_src rtc_wake_src(void) 144static struct wkup_m3_wakeup_src rtc_wake_src(void)
145{ 145{
146 u32 i; 146 u32 i;
147 147
@@ -157,7 +157,7 @@ struct wkup_m3_wakeup_src rtc_wake_src(void)
157 return rtc_ext_wakeup; 157 return rtc_ext_wakeup;
158} 158}
159 159
160int am33xx_rtc_only_idle(unsigned long wfi_flags) 160static int am33xx_rtc_only_idle(unsigned long wfi_flags)
161{ 161{
162 omap_rtc_power_off_program(&omap_rtc->dev); 162 omap_rtc_power_off_program(&omap_rtc->dev);
163 am33xx_do_wfi_sram(wfi_flags); 163 am33xx_do_wfi_sram(wfi_flags);
@@ -252,7 +252,7 @@ static int am33xx_pm_begin(suspend_state_t state)
252 if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) { 252 if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
253 nvmem = devm_nvmem_device_get(&omap_rtc->dev, 253 nvmem = devm_nvmem_device_get(&omap_rtc->dev,
254 "omap_rtc_scratch0"); 254 "omap_rtc_scratch0");
255 if (nvmem) 255 if (!IS_ERR(nvmem))
256 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 256 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
257 (void *)&rtc_magic_val); 257 (void *)&rtc_magic_val);
258 rtc_only_idle = 1; 258 rtc_only_idle = 1;
@@ -278,9 +278,12 @@ static void am33xx_pm_end(void)
278 struct nvmem_device *nvmem; 278 struct nvmem_device *nvmem;
279 279
280 nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0"); 280 nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
281 if (IS_ERR(nvmem))
282 return;
283
281 m3_ipc->ops->finish_low_power(m3_ipc); 284 m3_ipc->ops->finish_low_power(m3_ipc);
282 if (rtc_only_idle) { 285 if (rtc_only_idle) {
283 if (retrigger_irq) 286 if (retrigger_irq) {
284 /* 287 /*
285 * 32 bits of Interrupt Set-Pending correspond to 32 288 * 32 bits of Interrupt Set-Pending correspond to 32
286 * 32 interrupts. Compute the bit offset of the 289 * 32 interrupts. Compute the bit offset of the
@@ -291,8 +294,10 @@ static void am33xx_pm_end(void)
291 writel_relaxed(1 << (retrigger_irq & 31), 294 writel_relaxed(1 << (retrigger_irq & 31),
292 gic_dist_base + GIC_INT_SET_PENDING_BASE 295 gic_dist_base + GIC_INT_SET_PENDING_BASE
293 + retrigger_irq / 32 * 4); 296 + retrigger_irq / 32 * 4);
294 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4, 297 }
295 (void *)&val); 298
299 nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
300 (void *)&val);
296 } 301 }
297 302
298 rtc_only_idle = 0; 303 rtc_only_idle = 0;
@@ -415,7 +420,7 @@ static int am33xx_pm_rtc_setup(void)
415 420
416 nvmem = devm_nvmem_device_get(&omap_rtc->dev, 421 nvmem = devm_nvmem_device_get(&omap_rtc->dev,
417 "omap_rtc_scratch0"); 422 "omap_rtc_scratch0");
418 if (nvmem) { 423 if (!IS_ERR(nvmem)) {
419 nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 424 nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
420 4, (void *)&rtc_magic_val); 425 4, (void *)&rtc_magic_val);
421 if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC) 426 if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig
index 3a01cfd70fdc..f518273cfbe3 100644
--- a/drivers/soundwire/Kconfig
+++ b/drivers/soundwire/Kconfig
@@ -4,7 +4,7 @@
4# 4#
5 5
6menuconfig SOUNDWIRE 6menuconfig SOUNDWIRE
7 bool "SoundWire support" 7 tristate "SoundWire support"
8 help 8 help
9 SoundWire is a 2-Pin interface with data and clock line ratified 9 SoundWire is a 2-Pin interface with data and clock line ratified
10 by the MIPI Alliance. SoundWire is used for transporting data 10 by the MIPI Alliance. SoundWire is used for transporting data
@@ -17,17 +17,12 @@ if SOUNDWIRE
17 17
18comment "SoundWire Devices" 18comment "SoundWire Devices"
19 19
20config SOUNDWIRE_BUS
21 tristate
22 select REGMAP_SOUNDWIRE
23
24config SOUNDWIRE_CADENCE 20config SOUNDWIRE_CADENCE
25 tristate 21 tristate
26 22
27config SOUNDWIRE_INTEL 23config SOUNDWIRE_INTEL
28 tristate "Intel SoundWire Master driver" 24 tristate "Intel SoundWire Master driver"
29 select SOUNDWIRE_CADENCE 25 select SOUNDWIRE_CADENCE
30 select SOUNDWIRE_BUS
31 depends on X86 && ACPI && SND_SOC 26 depends on X86 && ACPI && SND_SOC
32 help 27 help
33 SoundWire Intel Master driver. 28 SoundWire Intel Master driver.
diff --git a/drivers/soundwire/Makefile b/drivers/soundwire/Makefile
index fd99a831b92a..45b7e5001653 100644
--- a/drivers/soundwire/Makefile
+++ b/drivers/soundwire/Makefile
@@ -5,7 +5,7 @@
5 5
6#Bus Objs 6#Bus Objs
7soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o 7soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
8obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o 8obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
9 9
10#Cadence Objs 10#Cadence Objs
11soundwire-cadence-objs := cadence_master.o 11soundwire-cadence-objs := cadence_master.o
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index ff4badc9b3de..60e8bdee5c75 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -81,8 +81,8 @@
81 81
82#define CDNS_MCP_INTSET 0x4C 82#define CDNS_MCP_INTSET 0x4C
83 83
84#define CDNS_SDW_SLAVE_STAT 0x50 84#define CDNS_MCP_SLAVE_STAT 0x50
85#define CDNS_MCP_SLAVE_STAT_MASK BIT(1, 0) 85#define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0)
86 86
87#define CDNS_MCP_SLAVE_INTSTAT0 0x54 87#define CDNS_MCP_SLAVE_INTSTAT0 0x54
88#define CDNS_MCP_SLAVE_INTSTAT1 0x58 88#define CDNS_MCP_SLAVE_INTSTAT1 0x58
@@ -96,8 +96,8 @@
96#define CDNS_MCP_SLAVE_INTMASK0 0x5C 96#define CDNS_MCP_SLAVE_INTMASK0 0x5C
97#define CDNS_MCP_SLAVE_INTMASK1 0x60 97#define CDNS_MCP_SLAVE_INTMASK1 0x60
98 98
99#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(30, 0) 99#define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0)
100#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(16, 0) 100#define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0)
101 101
102#define CDNS_MCP_PORT_INTSTAT 0x64 102#define CDNS_MCP_PORT_INTSTAT 0x64
103#define CDNS_MCP_PDI_STAT 0x6C 103#define CDNS_MCP_PDI_STAT 0x6C
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee91300..caf4d4df4bd3 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
342static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec, 342static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
343 unsigned int flags) 343 unsigned int flags)
344{ 344{
345 int divider, base, prescale; 345 unsigned int divider, base, prescale;
346 346
347 /* This function needs improvment */ 347 /* This function needs improvement */
348 /* Don't know if divider==0 works. */ 348 /* Don't know if divider==0 works. */
349 349
350 for (prescale = 0; prescale < 16; prescale++) { 350 for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
358 divider = (*nanosec) / base; 358 divider = (*nanosec) / base;
359 break; 359 break;
360 case CMDF_ROUND_UP: 360 case CMDF_ROUND_UP:
361 divider = (*nanosec) / base; 361 divider = DIV_ROUND_UP(*nanosec, base);
362 break; 362 break;
363 } 363 }
364 if (divider < 65536) { 364 if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
368 } 368 }
369 369
370 prescale = 15; 370 prescale = 15;
371 base = timer_base * (1 << prescale); 371 base = timer_base * (prescale + 1);
372 divider = 65535; 372 divider = 65535;
373 *nanosec = divider * base; 373 *nanosec = divider * base;
374 return (prescale << 16) | (divider); 374 return (prescale << 16) | (divider);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 04eda111920e..661bb9358364 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1132 struct se_cmd *se_cmd = cmd->se_cmd; 1132 struct se_cmd *se_cmd = cmd->se_cmd;
1133 struct tcmu_dev *udev = cmd->tcmu_dev; 1133 struct tcmu_dev *udev = cmd->tcmu_dev;
1134 bool read_len_valid = false; 1134 bool read_len_valid = false;
1135 uint32_t read_len = se_cmd->data_length; 1135 uint32_t read_len;
1136 1136
1137 /* 1137 /*
1138 * cmd has been completed already from timeout, just reclaim 1138 * cmd has been completed already from timeout, just reclaim
1139 * data area space and free cmd 1139 * data area space and free cmd
1140 */ 1140 */
1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1141 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1142 WARN_ON_ONCE(se_cmd);
1142 goto out; 1143 goto out;
1144 }
1143 1145
1144 list_del_init(&cmd->queue_entry); 1146 list_del_init(&cmd->queue_entry);
1145 1147
@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1152 goto done; 1154 goto done;
1153 } 1155 }
1154 1156
1157 read_len = se_cmd->data_length;
1155 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1158 if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1156 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1159 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1157 read_len_valid = true; 1160 read_len_valid = true;
@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1307 */ 1310 */
1308 scsi_status = SAM_STAT_CHECK_CONDITION; 1311 scsi_status = SAM_STAT_CHECK_CONDITION;
1309 list_del_init(&cmd->queue_entry); 1312 list_del_init(&cmd->queue_entry);
1313 cmd->se_cmd = NULL;
1310 } else { 1314 } else {
1311 list_del_init(&cmd->queue_entry); 1315 list_del_init(&cmd->queue_entry);
1312 idr_remove(&udev->commands, id); 1316 idr_remove(&udev->commands, id);
@@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2022 2026
2023 idr_remove(&udev->commands, i); 2027 idr_remove(&udev->commands, i);
2024 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2028 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2029 WARN_ON(!cmd->se_cmd);
2025 list_del_init(&cmd->queue_entry); 2030 list_del_init(&cmd->queue_entry);
2026 if (err_level == 1) { 2031 if (err_level == 1) {
2027 /* 2032 /*
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index b5abfe89190c..df8812c30640 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -454,9 +454,11 @@ err_clk:
454 imx_disable_unprepare_clks(dev); 454 imx_disable_unprepare_clks(dev);
455disable_hsic_regulator: 455disable_hsic_regulator:
456 if (data->hsic_pad_regulator) 456 if (data->hsic_pad_regulator)
457 ret = regulator_disable(data->hsic_pad_regulator); 457 /* don't overwrite original ret (cf. EPROBE_DEFER) */
458 regulator_disable(data->hsic_pad_regulator);
458 if (pdata.flags & CI_HDRC_PMQOS) 459 if (pdata.flags & CI_HDRC_PMQOS)
459 pm_qos_remove_request(&data->pm_qos_req); 460 pm_qos_remove_request(&data->pm_qos_req);
461 data->ci_pdev = NULL;
460 return ret; 462 return ret;
461} 463}
462 464
@@ -469,14 +471,17 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
469 pm_runtime_disable(&pdev->dev); 471 pm_runtime_disable(&pdev->dev);
470 pm_runtime_put_noidle(&pdev->dev); 472 pm_runtime_put_noidle(&pdev->dev);
471 } 473 }
472 ci_hdrc_remove_device(data->ci_pdev); 474 if (data->ci_pdev)
475 ci_hdrc_remove_device(data->ci_pdev);
473 if (data->override_phy_control) 476 if (data->override_phy_control)
474 usb_phy_shutdown(data->phy); 477 usb_phy_shutdown(data->phy);
475 imx_disable_unprepare_clks(&pdev->dev); 478 if (data->ci_pdev) {
476 if (data->plat_data->flags & CI_HDRC_PMQOS) 479 imx_disable_unprepare_clks(&pdev->dev);
477 pm_qos_remove_request(&data->pm_qos_req); 480 if (data->plat_data->flags & CI_HDRC_PMQOS)
478 if (data->hsic_pad_regulator) 481 pm_qos_remove_request(&data->pm_qos_req);
479 regulator_disable(data->hsic_pad_regulator); 482 if (data->hsic_pad_regulator)
483 regulator_disable(data->hsic_pad_regulator);
484 }
480 485
481 return 0; 486 return 0;
482} 487}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 183b41753c98..62f4fb9b362f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1301,10 +1301,6 @@ made_compressed_probe:
1301 tty_port_init(&acm->port); 1301 tty_port_init(&acm->port);
1302 acm->port.ops = &acm_port_ops; 1302 acm->port.ops = &acm_port_ops;
1303 1303
1304 minor = acm_alloc_minor(acm);
1305 if (minor < 0)
1306 goto alloc_fail1;
1307
1308 ctrlsize = usb_endpoint_maxp(epctrl); 1304 ctrlsize = usb_endpoint_maxp(epctrl);
1309 readsize = usb_endpoint_maxp(epread) * 1305 readsize = usb_endpoint_maxp(epread) *
1310 (quirks == SINGLE_RX_URB ? 1 : 2); 1306 (quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1312,6 +1308,13 @@ made_compressed_probe:
1312 acm->writesize = usb_endpoint_maxp(epwrite) * 20; 1308 acm->writesize = usb_endpoint_maxp(epwrite) * 20;
1313 acm->control = control_interface; 1309 acm->control = control_interface;
1314 acm->data = data_interface; 1310 acm->data = data_interface;
1311
1312 usb_get_intf(acm->control); /* undone in destruct() */
1313
1314 minor = acm_alloc_minor(acm);
1315 if (minor < 0)
1316 goto alloc_fail1;
1317
1315 acm->minor = minor; 1318 acm->minor = minor;
1316 acm->dev = usb_dev; 1319 acm->dev = usb_dev;
1317 if (h.usb_cdc_acm_descriptor) 1320 if (h.usb_cdc_acm_descriptor)
@@ -1458,7 +1461,6 @@ skip_countries:
1458 usb_driver_claim_interface(&acm_driver, data_interface, acm); 1461 usb_driver_claim_interface(&acm_driver, data_interface, acm);
1459 usb_set_intfdata(data_interface, acm); 1462 usb_set_intfdata(data_interface, acm);
1460 1463
1461 usb_get_intf(control_interface);
1462 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, 1464 tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
1463 &control_interface->dev); 1465 &control_interface->dev);
1464 if (IS_ERR(tty_dev)) { 1466 if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 1359b78a624e..6cf22c27f2d2 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -66,9 +66,7 @@ int hcd_buffer_create(struct usb_hcd *hcd)
66 char name[16]; 66 char name[16];
67 int i, size; 67 int i, size;
68 68
69 if (!IS_ENABLED(CONFIG_HAS_DMA) || 69 if (hcd->localmem_pool || !hcd_uses_dma(hcd))
70 (!is_device_dma_capable(hcd->self.sysdev) &&
71 !hcd->localmem_pool))
72 return 0; 70 return 0;
73 71
74 for (i = 0; i < HCD_BUFFER_POOLS; i++) { 72 for (i = 0; i < HCD_BUFFER_POOLS; i++) {
@@ -129,8 +127,7 @@ void *hcd_buffer_alloc(
129 return gen_pool_dma_alloc(hcd->localmem_pool, size, dma); 127 return gen_pool_dma_alloc(hcd->localmem_pool, size, dma);
130 128
131 /* some USB hosts just use PIO */ 129 /* some USB hosts just use PIO */
132 if (!IS_ENABLED(CONFIG_HAS_DMA) || 130 if (!hcd_uses_dma(hcd)) {
133 !is_device_dma_capable(bus->sysdev)) {
134 *dma = ~(dma_addr_t) 0; 131 *dma = ~(dma_addr_t) 0;
135 return kmalloc(size, mem_flags); 132 return kmalloc(size, mem_flags);
136 } 133 }
@@ -160,8 +157,7 @@ void hcd_buffer_free(
160 return; 157 return;
161 } 158 }
162 159
163 if (!IS_ENABLED(CONFIG_HAS_DMA) || 160 if (!hcd_uses_dma(hcd)) {
164 !is_device_dma_capable(bus->sysdev)) {
165 kfree(addr); 161 kfree(addr);
166 return; 162 return;
167 } 163 }
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f73b672..558890ada0e5 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
193 intf->minor = minor; 193 intf->minor = minor;
194 break; 194 break;
195 } 195 }
196 up_write(&minor_rwsem); 196 if (intf->minor < 0) {
197 if (intf->minor < 0) 197 up_write(&minor_rwsem);
198 return -EXFULL; 198 return -EXFULL;
199 }
199 200
200 /* create a usb class device for this usb interface */ 201 /* create a usb class device for this usb interface */
201 snprintf(name, sizeof(name), class_driver->name, minor - minor_base); 202 snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
203 MKDEV(USB_MAJOR, minor), class_driver, 204 MKDEV(USB_MAJOR, minor), class_driver,
204 "%s", kbasename(name)); 205 "%s", kbasename(name));
205 if (IS_ERR(intf->usb_dev)) { 206 if (IS_ERR(intf->usb_dev)) {
206 down_write(&minor_rwsem);
207 usb_minors[minor] = NULL; 207 usb_minors[minor] = NULL;
208 intf->minor = -1; 208 intf->minor = -1;
209 up_write(&minor_rwsem);
210 retval = PTR_ERR(intf->usb_dev); 209 retval = PTR_ERR(intf->usb_dev);
211 } 210 }
211 up_write(&minor_rwsem);
212 return retval; 212 return retval;
213} 213}
214EXPORT_SYMBOL_GPL(usb_register_dev); 214EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
234 return; 234 return;
235 235
236 dev_dbg(&intf->dev, "removing %d minor\n", intf->minor); 236 dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
237 device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
237 238
238 down_write(&minor_rwsem); 239 down_write(&minor_rwsem);
239 usb_minors[intf->minor] = NULL; 240 usb_minors[intf->minor] = NULL;
240 up_write(&minor_rwsem); 241 up_write(&minor_rwsem);
241 242
242 device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
243 intf->usb_dev = NULL; 243 intf->usb_dev = NULL;
244 intf->minor = -1; 244 intf->minor = -1;
245 destroy_usb_class(); 245 destroy_usb_class();
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2ccbc2f83570..8592c0344fe8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1412,7 +1412,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1412 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1412 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1413 if (hcd->self.uses_pio_for_control) 1413 if (hcd->self.uses_pio_for_control)
1414 return ret; 1414 return ret;
1415 if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { 1415 if (hcd_uses_dma(hcd)) {
1416 if (is_vmalloc_addr(urb->setup_packet)) { 1416 if (is_vmalloc_addr(urb->setup_packet)) {
1417 WARN_ONCE(1, "setup packet is not dma capable\n"); 1417 WARN_ONCE(1, "setup packet is not dma capable\n");
1418 return -EAGAIN; 1418 return -EAGAIN;
@@ -1446,7 +1446,7 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1446 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1446 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1447 if (urb->transfer_buffer_length != 0 1447 if (urb->transfer_buffer_length != 0
1448 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 1448 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1449 if (IS_ENABLED(CONFIG_HAS_DMA) && hcd->self.uses_dma) { 1449 if (hcd_uses_dma(hcd)) {
1450 if (urb->num_sgs) { 1450 if (urb->num_sgs) {
1451 int n; 1451 int n;
1452 1452
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e844bb7b5676..5adf489428aa 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2218,14 +2218,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
2218 (struct usb_cdc_dmm_desc *)buffer; 2218 (struct usb_cdc_dmm_desc *)buffer;
2219 break; 2219 break;
2220 case USB_CDC_MDLM_TYPE: 2220 case USB_CDC_MDLM_TYPE:
2221 if (elength < sizeof(struct usb_cdc_mdlm_desc *)) 2221 if (elength < sizeof(struct usb_cdc_mdlm_desc))
2222 goto next_desc; 2222 goto next_desc;
2223 if (desc) 2223 if (desc)
2224 return -EINVAL; 2224 return -EINVAL;
2225 desc = (struct usb_cdc_mdlm_desc *)buffer; 2225 desc = (struct usb_cdc_mdlm_desc *)buffer;
2226 break; 2226 break;
2227 case USB_CDC_MDLM_DETAIL_TYPE: 2227 case USB_CDC_MDLM_DETAIL_TYPE:
2228 if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) 2228 if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
2229 goto next_desc; 2229 goto next_desc;
2230 if (detail) 2230 if (detail)
2231 return -EINVAL; 2231 return -EINVAL;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index ee144ff8af5b..111787a137ee 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4608,7 +4608,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
4608 4608
4609 buf = urb->transfer_buffer; 4609 buf = urb->transfer_buffer;
4610 4610
4611 if (hcd->self.uses_dma) { 4611 if (hcd_uses_dma(hcd)) {
4612 if (!buf && (urb->transfer_dma & 3)) { 4612 if (!buf && (urb->transfer_dma & 3)) {
4613 dev_err(hsotg->dev, 4613 dev_err(hsotg->dev,
4614 "%s: unaligned transfer with no transfer_buffer", 4614 "%s: unaligned transfer with no transfer_buffer",
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 9118b42c70b6..76883ff4f5bb 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1976,6 +1976,7 @@ void composite_disconnect(struct usb_gadget *gadget)
1976 * disconnect callbacks? 1976 * disconnect callbacks?
1977 */ 1977 */
1978 spin_lock_irqsave(&cdev->lock, flags); 1978 spin_lock_irqsave(&cdev->lock, flags);
1979 cdev->suspended = 0;
1979 if (cdev->config) 1980 if (cdev->config)
1980 reset_config(cdev); 1981 reset_config(cdev);
1981 if (cdev->driver->disconnect) 1982 if (cdev->driver->disconnect)
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 29cc5693e05c..7c96c4665178 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -261,7 +261,7 @@ struct fsg_common;
261struct fsg_common { 261struct fsg_common {
262 struct usb_gadget *gadget; 262 struct usb_gadget *gadget;
263 struct usb_composite_dev *cdev; 263 struct usb_composite_dev *cdev;
264 struct fsg_dev *fsg, *new_fsg; 264 struct fsg_dev *fsg;
265 wait_queue_head_t io_wait; 265 wait_queue_head_t io_wait;
266 wait_queue_head_t fsg_wait; 266 wait_queue_head_t fsg_wait;
267 267
@@ -290,6 +290,7 @@ struct fsg_common {
290 unsigned int bulk_out_maxpacket; 290 unsigned int bulk_out_maxpacket;
291 enum fsg_state state; /* For exception handling */ 291 enum fsg_state state; /* For exception handling */
292 unsigned int exception_req_tag; 292 unsigned int exception_req_tag;
293 void *exception_arg;
293 294
294 enum data_direction data_dir; 295 enum data_direction data_dir;
295 u32 data_size; 296 u32 data_size;
@@ -391,7 +392,8 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
391 392
392/* These routines may be called in process context or in_irq */ 393/* These routines may be called in process context or in_irq */
393 394
394static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 395static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
396 void *arg)
395{ 397{
396 unsigned long flags; 398 unsigned long flags;
397 399
@@ -404,6 +406,7 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
404 if (common->state <= new_state) { 406 if (common->state <= new_state) {
405 common->exception_req_tag = common->ep0_req_tag; 407 common->exception_req_tag = common->ep0_req_tag;
406 common->state = new_state; 408 common->state = new_state;
409 common->exception_arg = arg;
407 if (common->thread_task) 410 if (common->thread_task)
408 send_sig_info(SIGUSR1, SEND_SIG_PRIV, 411 send_sig_info(SIGUSR1, SEND_SIG_PRIV,
409 common->thread_task); 412 common->thread_task);
@@ -411,6 +414,10 @@ static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
411 spin_unlock_irqrestore(&common->lock, flags); 414 spin_unlock_irqrestore(&common->lock, flags);
412} 415}
413 416
417static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
418{
419 __raise_exception(common, new_state, NULL);
420}
414 421
415/*-------------------------------------------------------------------------*/ 422/*-------------------------------------------------------------------------*/
416 423
@@ -2285,16 +2292,16 @@ reset:
2285static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2292static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2286{ 2293{
2287 struct fsg_dev *fsg = fsg_from_func(f); 2294 struct fsg_dev *fsg = fsg_from_func(f);
2288 fsg->common->new_fsg = fsg; 2295
2289 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2296 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
2290 return USB_GADGET_DELAYED_STATUS; 2297 return USB_GADGET_DELAYED_STATUS;
2291} 2298}
2292 2299
2293static void fsg_disable(struct usb_function *f) 2300static void fsg_disable(struct usb_function *f)
2294{ 2301{
2295 struct fsg_dev *fsg = fsg_from_func(f); 2302 struct fsg_dev *fsg = fsg_from_func(f);
2296 fsg->common->new_fsg = NULL; 2303
2297 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2304 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
2298} 2305}
2299 2306
2300 2307
@@ -2307,6 +2314,7 @@ static void handle_exception(struct fsg_common *common)
2307 enum fsg_state old_state; 2314 enum fsg_state old_state;
2308 struct fsg_lun *curlun; 2315 struct fsg_lun *curlun;
2309 unsigned int exception_req_tag; 2316 unsigned int exception_req_tag;
2317 struct fsg_dev *new_fsg;
2310 2318
2311 /* 2319 /*
2312 * Clear the existing signals. Anything but SIGUSR1 is converted 2320 * Clear the existing signals. Anything but SIGUSR1 is converted
@@ -2360,6 +2368,7 @@ static void handle_exception(struct fsg_common *common)
2360 common->next_buffhd_to_fill = &common->buffhds[0]; 2368 common->next_buffhd_to_fill = &common->buffhds[0];
2361 common->next_buffhd_to_drain = &common->buffhds[0]; 2369 common->next_buffhd_to_drain = &common->buffhds[0];
2362 exception_req_tag = common->exception_req_tag; 2370 exception_req_tag = common->exception_req_tag;
2371 new_fsg = common->exception_arg;
2363 old_state = common->state; 2372 old_state = common->state;
2364 common->state = FSG_STATE_NORMAL; 2373 common->state = FSG_STATE_NORMAL;
2365 2374
@@ -2413,8 +2422,8 @@ static void handle_exception(struct fsg_common *common)
2413 break; 2422 break;
2414 2423
2415 case FSG_STATE_CONFIG_CHANGE: 2424 case FSG_STATE_CONFIG_CHANGE:
2416 do_set_interface(common, common->new_fsg); 2425 do_set_interface(common, new_fsg);
2417 if (common->new_fsg) 2426 if (new_fsg)
2418 usb_composite_setup_continue(common->cdev); 2427 usb_composite_setup_continue(common->cdev);
2419 break; 2428 break;
2420 2429
@@ -2989,8 +2998,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2989 2998
2990 DBG(fsg, "unbind\n"); 2999 DBG(fsg, "unbind\n");
2991 if (fsg->common->fsg == fsg) { 3000 if (fsg->common->fsg == fsg) {
2992 fsg->common->new_fsg = NULL; 3001 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
2993 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2994 /* FIXME: make interruptible or killable somehow? */ 3002 /* FIXME: make interruptible or killable somehow? */
2995 wait_event(common->fsg_wait, common->fsg != fsg); 3003 wait_event(common->fsg_wait, common->fsg != fsg);
2996 } 3004 }
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 87062d22134d..1f4c3fbd1df8 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/sizes.h> 20#include <linux/sizes.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/string.h>
22#include <linux/sys_soc.h> 23#include <linux/sys_soc.h>
23#include <linux/uaccess.h> 24#include <linux/uaccess.h>
24#include <linux/usb/ch9.h> 25#include <linux/usb/ch9.h>
@@ -2450,9 +2451,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
2450 if (usb3->forced_b_device) 2451 if (usb3->forced_b_device)
2451 return -EBUSY; 2452 return -EBUSY;
2452 2453
2453 if (!strncmp(buf, "host", strlen("host"))) 2454 if (sysfs_streq(buf, "host"))
2454 new_mode_is_host = true; 2455 new_mode_is_host = true;
2455 else if (!strncmp(buf, "peripheral", strlen("peripheral"))) 2456 else if (sysfs_streq(buf, "peripheral"))
2456 new_mode_is_host = false; 2457 new_mode_is_host = false;
2457 else 2458 else
2458 return -EINVAL; 2459 return -EINVAL;
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 77cc36efae95..0dbfa5c10703 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1629,6 +1629,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1629 /* see what we found out */ 1629 /* see what we found out */
1630 temp = check_reset_complete(fotg210, wIndex, status_reg, 1630 temp = check_reset_complete(fotg210, wIndex, status_reg,
1631 fotg210_readl(fotg210, status_reg)); 1631 fotg210_readl(fotg210, status_reg));
1632
1633 /* restart schedule */
1634 fotg210->command |= CMD_RUN;
1635 fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1632 } 1636 }
1633 1637
1634 if (!(temp & (PORT_RESUME|PORT_RESET))) { 1638 if (!(temp & (PORT_RESUME|PORT_RESET))) {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c1582fbd1150..38e920ac7f82 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
968 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) }, 968 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
969 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) }, 969 { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
970 970
971 /* Motorola devices */
972 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
973 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
974 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
975 { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
971 976
972 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, 977 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
973 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, 978 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
1549 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */ 1554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1550 .driver_info = RSVD(2) }, 1555 .driver_info = RSVD(2) },
1551 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ 1556 { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
1557 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
1552 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, 1558 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1553 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, 1559 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, 1560 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
1952 .driver_info = RSVD(4) }, 1958 .driver_info = RSVD(4) },
1953 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 1959 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1954 .driver_info = RSVD(4) }, 1960 .driver_info = RSVD(4) },
1961 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
1962 .driver_info = RSVD(4) },
1955 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1963 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1956 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1964 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1957 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 1965 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1958 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */ 1966 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1959 .driver_info = RSVD(4) }, 1967 .driver_info = RSVD(4) },
1968 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
1969 .driver_info = RSVD(4) },
1960 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1970 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1961 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1971 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1962 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1972 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/drivers/video/fbdev/acornfb.c b/drivers/video/fbdev/acornfb.c
index 92f23e3bc27a..7cacae5a8797 100644
--- a/drivers/video/fbdev/acornfb.c
+++ b/drivers/video/fbdev/acornfb.c
@@ -858,6 +858,7 @@ static void acornfb_parse_dram(char *opt)
858 case 'M': 858 case 'M':
859 case 'm': 859 case 'm':
860 size *= 1024; 860 size *= 1024;
861 /* Fall through */
861 case 'K': 862 case 'K':
862 case 'k': 863 case 'k':
863 size *= 1024; 864 size *= 1024;
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 4eacfb1ce1ac..eb729d704836 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
168 soft_margin = new_margin; 168 soft_margin = new_margin;
169 reload = soft_margin * (mem_fclk_21285 / 256); 169 reload = soft_margin * (mem_fclk_21285 / 256);
170 watchdog_ping(); 170 watchdog_ping();
171 /* Fall */ 171 /* Fall through */
172 case WDIOC_GETTIMEOUT: 172 case WDIOC_GETTIMEOUT:
173 ret = put_user(soft_margin, int_arg); 173 ret = put_user(soft_margin, int_arg);
174 break; 174 break;
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index a2a87117d262..fd5133e26a38 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
74 cell = rcu_dereference_raw(net->ws_cell); 74 cell = rcu_dereference_raw(net->ws_cell);
75 if (cell) { 75 if (cell) {
76 afs_get_cell(cell); 76 afs_get_cell(cell);
77 ret = 0;
77 break; 78 break;
78 } 79 }
79 ret = -EDESTADDRREQ; 80 ret = -EDESTADDRREQ;
@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
108 109
109 done_seqretry(&net->cells_lock, seq); 110 done_seqretry(&net->cells_lock, seq);
110 111
112 if (ret != 0 && cell)
113 afs_put_cell(net, cell);
114
111 return ret == 0 ? cell : ERR_PTR(ret); 115 return ret == 0 ? cell : ERR_PTR(ret);
112} 116}
113 117
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 4f1b6f466ff5..b86195e4dc6c 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
505 struct afs_call *call = container_of(work, struct afs_call, work); 505 struct afs_call *call = container_of(work, struct afs_call, work);
506 struct afs_uuid *r = call->request; 506 struct afs_uuid *r = call->request;
507 507
508 struct {
509 __be32 match;
510 } reply;
511
512 _enter(""); 508 _enter("");
513 509
514 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) 510 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
515 reply.match = htonl(0); 511 afs_send_empty_reply(call);
516 else 512 else
517 reply.match = htonl(1); 513 rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
514 1, 1, "K-1");
518 515
519 afs_send_simple_reply(call, &reply, sizeof(reply));
520 afs_put_call(call); 516 afs_put_call(call);
521 _leave(""); 517 _leave("");
522} 518}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e640d67274be..139b4e3cc946 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
440 * iterate through the data blob that lists the contents of an AFS directory 440 * iterate through the data blob that lists the contents of an AFS directory
441 */ 441 */
442static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, 442static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
443 struct key *key) 443 struct key *key, afs_dataversion_t *_dir_version)
444{ 444{
445 struct afs_vnode *dvnode = AFS_FS_I(dir); 445 struct afs_vnode *dvnode = AFS_FS_I(dir);
446 struct afs_xdr_dir_page *dbuf; 446 struct afs_xdr_dir_page *dbuf;
@@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
460 req = afs_read_dir(dvnode, key); 460 req = afs_read_dir(dvnode, key);
461 if (IS_ERR(req)) 461 if (IS_ERR(req))
462 return PTR_ERR(req); 462 return PTR_ERR(req);
463 *_dir_version = req->data_version;
463 464
464 /* round the file position up to the next entry boundary */ 465 /* round the file position up to the next entry boundary */
465 ctx->pos += sizeof(union afs_xdr_dirent) - 1; 466 ctx->pos += sizeof(union afs_xdr_dirent) - 1;
@@ -514,7 +515,10 @@ out:
514 */ 515 */
515static int afs_readdir(struct file *file, struct dir_context *ctx) 516static int afs_readdir(struct file *file, struct dir_context *ctx)
516{ 517{
517 return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file)); 518 afs_dataversion_t dir_version;
519
520 return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
521 &dir_version);
518} 522}
519 523
520/* 524/*
@@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
555 * - just returns the FID the dentry name maps to if found 559 * - just returns the FID the dentry name maps to if found
556 */ 560 */
557static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, 561static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
558 struct afs_fid *fid, struct key *key) 562 struct afs_fid *fid, struct key *key,
563 afs_dataversion_t *_dir_version)
559{ 564{
560 struct afs_super_info *as = dir->i_sb->s_fs_info; 565 struct afs_super_info *as = dir->i_sb->s_fs_info;
561 struct afs_lookup_one_cookie cookie = { 566 struct afs_lookup_one_cookie cookie = {
@@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
568 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); 573 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
569 574
570 /* search the directory */ 575 /* search the directory */
571 ret = afs_dir_iterate(dir, &cookie.ctx, key); 576 ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
572 if (ret < 0) { 577 if (ret < 0) {
573 _leave(" = %d [iter]", ret); 578 _leave(" = %d [iter]", ret);
574 return ret; 579 return ret;
@@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
642 struct afs_server *server; 647 struct afs_server *server;
643 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; 648 struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
644 struct inode *inode = NULL, *ti; 649 struct inode *inode = NULL, *ti;
650 afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
645 int ret, i; 651 int ret, i;
646 652
647 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); 653 _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
669 cookie->fids[i].vid = as->volume->vid; 675 cookie->fids[i].vid = as->volume->vid;
670 676
671 /* search the directory */ 677 /* search the directory */
672 ret = afs_dir_iterate(dir, &cookie->ctx, key); 678 ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
673 if (ret < 0) { 679 if (ret < 0) {
674 inode = ERR_PTR(ret); 680 inode = ERR_PTR(ret);
675 goto out; 681 goto out;
676 } 682 }
677 683
684 dentry->d_fsdata = (void *)(unsigned long)data_version;
685
678 inode = ERR_PTR(-ENOENT); 686 inode = ERR_PTR(-ENOENT);
679 if (!cookie->found) 687 if (!cookie->found)
680 goto out; 688 goto out;
@@ -951,7 +959,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
951 inode ? AFS_FS_I(inode) : NULL); 959 inode ? AFS_FS_I(inode) : NULL);
952 } else { 960 } else {
953 trace_afs_lookup(dvnode, &dentry->d_name, 961 trace_afs_lookup(dvnode, &dentry->d_name,
954 inode ? AFS_FS_I(inode) : NULL); 962 IS_ERR_OR_NULL(inode) ? NULL
963 : AFS_FS_I(inode));
955 } 964 }
956 return d; 965 return d;
957} 966}
@@ -968,7 +977,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
968 struct dentry *parent; 977 struct dentry *parent;
969 struct inode *inode; 978 struct inode *inode;
970 struct key *key; 979 struct key *key;
971 long dir_version, de_version; 980 afs_dataversion_t dir_version;
981 long de_version;
972 int ret; 982 int ret;
973 983
974 if (flags & LOOKUP_RCU) 984 if (flags & LOOKUP_RCU)
@@ -1014,20 +1024,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
1014 * on a 32-bit system, we only have 32 bits in the dentry to store the 1024 * on a 32-bit system, we only have 32 bits in the dentry to store the
1015 * version. 1025 * version.
1016 */ 1026 */
1017 dir_version = (long)dir->status.data_version; 1027 dir_version = dir->status.data_version;
1018 de_version = (long)dentry->d_fsdata; 1028 de_version = (long)dentry->d_fsdata;
1019 if (de_version == dir_version) 1029 if (de_version == (long)dir_version)
1020 goto out_valid; 1030 goto out_valid_noupdate;
1021 1031
1022 dir_version = (long)dir->invalid_before; 1032 dir_version = dir->invalid_before;
1023 if (de_version - dir_version >= 0) 1033 if (de_version - (long)dir_version >= 0)
1024 goto out_valid; 1034 goto out_valid;
1025 1035
1026 _debug("dir modified"); 1036 _debug("dir modified");
1027 afs_stat_v(dir, n_reval); 1037 afs_stat_v(dir, n_reval);
1028 1038
1029 /* search the directory for this vnode */ 1039 /* search the directory for this vnode */
1030 ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key); 1040 ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
1031 switch (ret) { 1041 switch (ret) {
1032 case 0: 1042 case 0:
1033 /* the filename maps to something */ 1043 /* the filename maps to something */
@@ -1080,7 +1090,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
1080 } 1090 }
1081 1091
1082out_valid: 1092out_valid:
1083 dentry->d_fsdata = (void *)dir_version; 1093 dentry->d_fsdata = (void *)(unsigned long)dir_version;
1094out_valid_noupdate:
1084 dput(parent); 1095 dput(parent);
1085 key_put(key); 1096 key_put(key);
1086 _leave(" = 1 [valid]"); 1097 _leave(" = 1 [valid]");
@@ -1186,6 +1197,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
1186} 1197}
1187 1198
1188/* 1199/*
1200 * Note that a dentry got changed. We need to set d_fsdata to the data version
1201 * number derived from the result of the operation. It doesn't matter if
1202 * d_fsdata goes backwards as we'll just revalidate.
1203 */
1204static void afs_update_dentry_version(struct afs_fs_cursor *fc,
1205 struct dentry *dentry,
1206 struct afs_status_cb *scb)
1207{
1208 if (fc->ac.error == 0)
1209 dentry->d_fsdata =
1210 (void *)(unsigned long)scb->status.data_version;
1211}
1212
1213/*
1189 * create a directory on an AFS filesystem 1214 * create a directory on an AFS filesystem
1190 */ 1215 */
1191static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1216static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -1227,6 +1252,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1227 afs_check_for_remote_deletion(&fc, dvnode); 1252 afs_check_for_remote_deletion(&fc, dvnode);
1228 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1253 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1229 &data_version, &scb[0]); 1254 &data_version, &scb[0]);
1255 afs_update_dentry_version(&fc, dentry, &scb[0]);
1230 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1256 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1231 ret = afs_end_vnode_operation(&fc); 1257 ret = afs_end_vnode_operation(&fc);
1232 if (ret < 0) 1258 if (ret < 0)
@@ -1319,6 +1345,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
1319 1345
1320 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1346 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1321 &data_version, scb); 1347 &data_version, scb);
1348 afs_update_dentry_version(&fc, dentry, scb);
1322 ret = afs_end_vnode_operation(&fc); 1349 ret = afs_end_vnode_operation(&fc);
1323 if (ret == 0) { 1350 if (ret == 0) {
1324 afs_dir_remove_subdir(dentry); 1351 afs_dir_remove_subdir(dentry);
@@ -1458,6 +1485,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
1458 &data_version, &scb[0]); 1485 &data_version, &scb[0]);
1459 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, 1486 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
1460 &data_version_2, &scb[1]); 1487 &data_version_2, &scb[1]);
1488 afs_update_dentry_version(&fc, dentry, &scb[0]);
1461 ret = afs_end_vnode_operation(&fc); 1489 ret = afs_end_vnode_operation(&fc);
1462 if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) 1490 if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
1463 ret = afs_dir_remove_link(dvnode, dentry, key); 1491 ret = afs_dir_remove_link(dvnode, dentry, key);
@@ -1526,6 +1554,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
1526 afs_check_for_remote_deletion(&fc, dvnode); 1554 afs_check_for_remote_deletion(&fc, dvnode);
1527 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1555 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1528 &data_version, &scb[0]); 1556 &data_version, &scb[0]);
1557 afs_update_dentry_version(&fc, dentry, &scb[0]);
1529 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1558 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1530 ret = afs_end_vnode_operation(&fc); 1559 ret = afs_end_vnode_operation(&fc);
1531 if (ret < 0) 1560 if (ret < 0)
@@ -1607,6 +1636,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
1607 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, 1636 afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
1608 NULL, &scb[1]); 1637 NULL, &scb[1]);
1609 ihold(&vnode->vfs_inode); 1638 ihold(&vnode->vfs_inode);
1639 afs_update_dentry_version(&fc, dentry, &scb[0]);
1610 d_instantiate(dentry, &vnode->vfs_inode); 1640 d_instantiate(dentry, &vnode->vfs_inode);
1611 1641
1612 mutex_unlock(&vnode->io_lock); 1642 mutex_unlock(&vnode->io_lock);
@@ -1686,6 +1716,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
1686 afs_check_for_remote_deletion(&fc, dvnode); 1716 afs_check_for_remote_deletion(&fc, dvnode);
1687 afs_vnode_commit_status(&fc, dvnode, fc.cb_break, 1717 afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
1688 &data_version, &scb[0]); 1718 &data_version, &scb[0]);
1719 afs_update_dentry_version(&fc, dentry, &scb[0]);
1689 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); 1720 afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
1690 ret = afs_end_vnode_operation(&fc); 1721 ret = afs_end_vnode_operation(&fc);
1691 if (ret < 0) 1722 if (ret < 0)
@@ -1791,6 +1822,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1791 } 1822 }
1792 } 1823 }
1793 1824
1825 /* This bit is potentially nasty as there's a potential race with
1826 * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry
1827 * to reflect it's new parent's new data_version after the op, but
1828 * d_revalidate may see old_dentry between the op having taken place
1829 * and the version being updated.
1830 *
1831 * So drop the old_dentry for now to make other threads go through
1832 * lookup instead - which we hold a lock against.
1833 */
1834 d_drop(old_dentry);
1835
1794 ret = -ERESTARTSYS; 1836 ret = -ERESTARTSYS;
1795 if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { 1837 if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
1796 afs_dataversion_t orig_data_version; 1838 afs_dataversion_t orig_data_version;
@@ -1802,9 +1844,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1802 if (orig_dvnode != new_dvnode) { 1844 if (orig_dvnode != new_dvnode) {
1803 if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { 1845 if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
1804 afs_end_vnode_operation(&fc); 1846 afs_end_vnode_operation(&fc);
1805 goto error_rehash; 1847 goto error_rehash_old;
1806 } 1848 }
1807 new_data_version = new_dvnode->status.data_version; 1849 new_data_version = new_dvnode->status.data_version + 1;
1808 } else { 1850 } else {
1809 new_data_version = orig_data_version; 1851 new_data_version = orig_data_version;
1810 new_scb = &scb[0]; 1852 new_scb = &scb[0];
@@ -1827,7 +1869,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1827 } 1869 }
1828 ret = afs_end_vnode_operation(&fc); 1870 ret = afs_end_vnode_operation(&fc);
1829 if (ret < 0) 1871 if (ret < 0)
1830 goto error_rehash; 1872 goto error_rehash_old;
1831 } 1873 }
1832 1874
1833 if (ret == 0) { 1875 if (ret == 0) {
@@ -1853,10 +1895,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
1853 drop_nlink(new_inode); 1895 drop_nlink(new_inode);
1854 spin_unlock(&new_inode->i_lock); 1896 spin_unlock(&new_inode->i_lock);
1855 } 1897 }
1898
1899 /* Now we can update d_fsdata on the dentries to reflect their
1900 * new parent's data_version.
1901 *
1902 * Note that if we ever implement RENAME_EXCHANGE, we'll have
1903 * to update both dentries with opposing dir versions.
1904 */
1905 if (new_dvnode != orig_dvnode) {
1906 afs_update_dentry_version(&fc, old_dentry, &scb[1]);
1907 afs_update_dentry_version(&fc, new_dentry, &scb[1]);
1908 } else {
1909 afs_update_dentry_version(&fc, old_dentry, &scb[0]);
1910 afs_update_dentry_version(&fc, new_dentry, &scb[0]);
1911 }
1856 d_move(old_dentry, new_dentry); 1912 d_move(old_dentry, new_dentry);
1857 goto error_tmp; 1913 goto error_tmp;
1858 } 1914 }
1859 1915
1916error_rehash_old:
1917 d_rehash(new_dentry);
1860error_rehash: 1918error_rehash:
1861 if (rehash) 1919 if (rehash)
1862 d_rehash(rehash); 1920 d_rehash(rehash);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 56b69576274d..dd3c55c9101c 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req)
191 int i; 191 int i;
192 192
193 if (refcount_dec_and_test(&req->usage)) { 193 if (refcount_dec_and_test(&req->usage)) {
194 for (i = 0; i < req->nr_pages; i++) 194 if (req->pages) {
195 if (req->pages[i]) 195 for (i = 0; i < req->nr_pages; i++)
196 put_page(req->pages[i]); 196 if (req->pages[i])
197 if (req->pages != req->array) 197 put_page(req->pages[i]);
198 kfree(req->pages); 198 if (req->pages != req->array)
199 kfree(req->pages);
200 }
199 kfree(req); 201 kfree(req);
200 } 202 }
201} 203}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index d7e0fd3c00df..cfb0ac4bd039 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
56 struct afs_uuid__xdr *xdr; 56 struct afs_uuid__xdr *xdr;
57 struct afs_uuid *uuid; 57 struct afs_uuid *uuid;
58 int j; 58 int j;
59 int n = entry->nr_servers;
59 60
60 tmp = ntohl(uvldb->serverFlags[i]); 61 tmp = ntohl(uvldb->serverFlags[i]);
61 if (tmp & AFS_VLSF_DONTUSE || 62 if (tmp & AFS_VLSF_DONTUSE ||
62 (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) 63 (new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
63 continue; 64 continue;
64 if (tmp & AFS_VLSF_RWVOL) { 65 if (tmp & AFS_VLSF_RWVOL) {
65 entry->fs_mask[i] |= AFS_VOL_VTM_RW; 66 entry->fs_mask[n] |= AFS_VOL_VTM_RW;
66 if (vlflags & AFS_VLF_BACKEXISTS) 67 if (vlflags & AFS_VLF_BACKEXISTS)
67 entry->fs_mask[i] |= AFS_VOL_VTM_BAK; 68 entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
68 } 69 }
69 if (tmp & AFS_VLSF_ROVOL) 70 if (tmp & AFS_VLSF_ROVOL)
70 entry->fs_mask[i] |= AFS_VOL_VTM_RO; 71 entry->fs_mask[n] |= AFS_VOL_VTM_RO;
71 if (!entry->fs_mask[i]) 72 if (!entry->fs_mask[n])
72 continue; 73 continue;
73 74
74 xdr = &uvldb->serverNumber[i]; 75 xdr = &uvldb->serverNumber[i];
75 uuid = (struct afs_uuid *)&entry->fs_server[i]; 76 uuid = (struct afs_uuid *)&entry->fs_server[n];
76 uuid->time_low = xdr->time_low; 77 uuid->time_low = xdr->time_low;
77 uuid->time_mid = htons(ntohl(xdr->time_mid)); 78 uuid->time_mid = htons(ntohl(xdr->time_mid));
78 uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version)); 79 uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 2575503170fc..ca2452806ebf 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -2171,7 +2171,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode); 2171 key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
2172 2172
2173 size = round_up(acl->size, 4); 2173 size = round_up(acl->size, 4);
2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus, 2174 call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
2175 sizeof(__be32) * 2 + 2175 sizeof(__be32) * 2 +
2176 sizeof(struct yfs_xdr_YFSFid) + 2176 sizeof(struct yfs_xdr_YFSFid) +
2177 sizeof(__be32) + size, 2177 sizeof(__be32) + size,
diff --git a/fs/block_dev.c b/fs/block_dev.c
index eb657ab94060..677cb364d33f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
345 struct bio *bio; 345 struct bio *bio;
346 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; 346 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
347 bool is_read = (iov_iter_rw(iter) == READ), is_sync; 347 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
348 bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
349 loff_t pos = iocb->ki_pos; 348 loff_t pos = iocb->ki_pos;
350 blk_qc_t qc = BLK_QC_T_NONE; 349 blk_qc_t qc = BLK_QC_T_NONE;
351 gfp_t gfp; 350 int ret = 0;
352 int ret;
353 351
354 if ((pos | iov_iter_alignment(iter)) & 352 if ((pos | iov_iter_alignment(iter)) &
355 (bdev_logical_block_size(bdev) - 1)) 353 (bdev_logical_block_size(bdev) - 1))
356 return -EINVAL; 354 return -EINVAL;
357 355
358 if (nowait) 356 bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
359 gfp = GFP_NOWAIT;
360 else
361 gfp = GFP_KERNEL;
362
363 bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
364 if (!bio)
365 return -EAGAIN;
366 357
367 dio = container_of(bio, struct blkdev_dio, bio); 358 dio = container_of(bio, struct blkdev_dio, bio);
368 dio->is_sync = is_sync = is_sync_kiocb(iocb); 359 dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -384,7 +375,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
384 if (!is_poll) 375 if (!is_poll)
385 blk_start_plug(&plug); 376 blk_start_plug(&plug);
386 377
387 ret = 0;
388 for (;;) { 378 for (;;) {
389 bio_set_dev(bio, bdev); 379 bio_set_dev(bio, bdev);
390 bio->bi_iter.bi_sector = pos >> 9; 380 bio->bi_iter.bi_sector = pos >> 9;
@@ -409,14 +399,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
409 task_io_account_write(bio->bi_iter.bi_size); 399 task_io_account_write(bio->bi_iter.bi_size);
410 } 400 }
411 401
412 /* 402 dio->size += bio->bi_iter.bi_size;
413 * Tell underlying layer to not block for resource shortage.
414 * And if we would have blocked, return error inline instead
415 * of through the bio->bi_end_io() callback.
416 */
417 if (nowait)
418 bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
419
420 pos += bio->bi_iter.bi_size; 403 pos += bio->bi_iter.bi_size;
421 404
422 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES); 405 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
@@ -428,13 +411,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
428 polled = true; 411 polled = true;
429 } 412 }
430 413
431 dio->size += bio->bi_iter.bi_size;
432 qc = submit_bio(bio); 414 qc = submit_bio(bio);
433 if (qc == BLK_QC_T_EAGAIN) {
434 dio->size -= bio->bi_iter.bi_size;
435 ret = -EAGAIN;
436 goto error;
437 }
438 415
439 if (polled) 416 if (polled)
440 WRITE_ONCE(iocb->ki_cookie, qc); 417 WRITE_ONCE(iocb->ki_cookie, qc);
@@ -455,19 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
455 atomic_inc(&dio->ref); 432 atomic_inc(&dio->ref);
456 } 433 }
457 434
458 dio->size += bio->bi_iter.bi_size; 435 submit_bio(bio);
459 qc = submit_bio(bio); 436 bio = bio_alloc(GFP_KERNEL, nr_pages);
460 if (qc == BLK_QC_T_EAGAIN) {
461 dio->size -= bio->bi_iter.bi_size;
462 ret = -EAGAIN;
463 goto error;
464 }
465
466 bio = bio_alloc(gfp, nr_pages);
467 if (!bio) {
468 ret = -EAGAIN;
469 goto error;
470 }
471 } 437 }
472 438
473 if (!is_poll) 439 if (!is_poll)
@@ -487,7 +453,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
487 } 453 }
488 __set_current_state(TASK_RUNNING); 454 __set_current_state(TASK_RUNNING);
489 455
490out:
491 if (!ret) 456 if (!ret)
492 ret = blk_status_to_errno(dio->bio.bi_status); 457 ret = blk_status_to_errno(dio->bio.bi_status);
493 if (likely(!ret)) 458 if (likely(!ret))
@@ -495,10 +460,6 @@ out:
495 460
496 bio_put(&dio->bio); 461 bio_put(&dio->bio);
497 return ret; 462 return ret;
498error:
499 if (!is_poll)
500 blk_finish_plug(&plug);
501 goto out;
502} 463}
503 464
504static ssize_t 465static ssize_t
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 299e11e6c554..94660063a162 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -401,7 +401,6 @@ struct btrfs_dev_replace {
401struct raid_kobject { 401struct raid_kobject {
402 u64 flags; 402 u64 flags;
403 struct kobject kobj; 403 struct kobject kobj;
404 struct list_head list;
405}; 404};
406 405
407/* 406/*
@@ -915,8 +914,6 @@ struct btrfs_fs_info {
915 u32 thread_pool_size; 914 u32 thread_pool_size;
916 915
917 struct kobject *space_info_kobj; 916 struct kobject *space_info_kobj;
918 struct list_head pending_raid_kobjs;
919 spinlock_t pending_raid_kobjs_lock; /* uncontended */
920 917
921 u64 total_pinned; 918 u64 total_pinned;
922 919
@@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
2698int btrfs_make_block_group(struct btrfs_trans_handle *trans, 2695int btrfs_make_block_group(struct btrfs_trans_handle *trans,
2699 u64 bytes_used, u64 type, u64 chunk_offset, 2696 u64 bytes_used, u64 type, u64 chunk_offset,
2700 u64 size); 2697 u64 size);
2701void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
2702struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( 2698struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
2703 struct btrfs_fs_info *fs_info, 2699 struct btrfs_fs_info *fs_info,
2704 const u64 chunk_offset); 2700 const u64 chunk_offset);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5f7ee70b3d1a..97beb351a10c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb,
2683 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2683 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2684 INIT_LIST_HEAD(&fs_info->delalloc_roots); 2684 INIT_LIST_HEAD(&fs_info->delalloc_roots);
2685 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2685 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2686 INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
2687 spin_lock_init(&fs_info->pending_raid_kobjs_lock);
2688 spin_lock_init(&fs_info->delalloc_root_lock); 2686 spin_lock_init(&fs_info->delalloc_root_lock);
2689 spin_lock_init(&fs_info->trans_lock); 2687 spin_lock_init(&fs_info->trans_lock);
2690 spin_lock_init(&fs_info->fs_roots_radix_lock); 2688 spin_lock_init(&fs_info->fs_roots_radix_lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d3b58e388535..8b7eb22d508a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/sched/mm.h>
7#include <linux/sched/signal.h> 8#include <linux/sched/signal.h>
8#include <linux/pagemap.h> 9#include <linux/pagemap.h>
9#include <linux/writeback.h> 10#include <linux/writeback.h>
@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
7888 return 0; 7889 return 0;
7889} 7890}
7890 7891
7891/* link_block_group will queue up kobjects to add when we're reclaim-safe */
7892void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
7893{
7894 struct btrfs_space_info *space_info;
7895 struct raid_kobject *rkobj;
7896 LIST_HEAD(list);
7897 int ret = 0;
7898
7899 spin_lock(&fs_info->pending_raid_kobjs_lock);
7900 list_splice_init(&fs_info->pending_raid_kobjs, &list);
7901 spin_unlock(&fs_info->pending_raid_kobjs_lock);
7902
7903 list_for_each_entry(rkobj, &list, list) {
7904 space_info = btrfs_find_space_info(fs_info, rkobj->flags);
7905
7906 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
7907 "%s", btrfs_bg_type_to_raid_name(rkobj->flags));
7908 if (ret) {
7909 kobject_put(&rkobj->kobj);
7910 break;
7911 }
7912 }
7913 if (ret)
7914 btrfs_warn(fs_info,
7915 "failed to add kobject for block cache, ignoring");
7916}
7917
7918static void link_block_group(struct btrfs_block_group_cache *cache) 7892static void link_block_group(struct btrfs_block_group_cache *cache)
7919{ 7893{
7920 struct btrfs_space_info *space_info = cache->space_info; 7894 struct btrfs_space_info *space_info = cache->space_info;
@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
7929 up_write(&space_info->groups_sem); 7903 up_write(&space_info->groups_sem);
7930 7904
7931 if (first) { 7905 if (first) {
7932 struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); 7906 struct raid_kobject *rkobj;
7907 unsigned int nofs_flag;
7908 int ret;
7909
7910 /*
7911 * Setup a NOFS context because kobject_add(), deep in its call
7912 * chain, does GFP_KERNEL allocations, and we are often called
7913 * in a context where if reclaim is triggered we can deadlock
7914 * (we are either holding a transaction handle or some lock
7915 * required for a transaction commit).
7916 */
7917 nofs_flag = memalloc_nofs_save();
7918 rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
7933 if (!rkobj) { 7919 if (!rkobj) {
7920 memalloc_nofs_restore(nofs_flag);
7934 btrfs_warn(cache->fs_info, 7921 btrfs_warn(cache->fs_info,
7935 "couldn't alloc memory for raid level kobject"); 7922 "couldn't alloc memory for raid level kobject");
7936 return; 7923 return;
7937 } 7924 }
7938 rkobj->flags = cache->flags; 7925 rkobj->flags = cache->flags;
7939 kobject_init(&rkobj->kobj, &btrfs_raid_ktype); 7926 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
7940 7927 ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
7941 spin_lock(&fs_info->pending_raid_kobjs_lock); 7928 btrfs_bg_type_to_raid_name(rkobj->flags));
7942 list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); 7929 memalloc_nofs_restore(nofs_flag);
7943 spin_unlock(&fs_info->pending_raid_kobjs_lock); 7930 if (ret) {
7931 kobject_put(&rkobj->kobj);
7932 btrfs_warn(fs_info,
7933 "failed to add kobject for block cache, ignoring");
7934 return;
7935 }
7944 space_info->block_group_kobjs[index] = &rkobj->kobj; 7936 space_info->block_group_kobjs[index] = &rkobj->kobj;
7945 } 7937 }
7946} 7938}
@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
8206 inc_block_group_ro(cache, 1); 8198 inc_block_group_ro(cache, 1);
8207 } 8199 }
8208 8200
8209 btrfs_add_raid_kobjects(info);
8210 btrfs_init_global_block_rsv(info); 8201 btrfs_init_global_block_rsv(info);
8211 ret = check_chunk_block_group_mappings(info); 8202 ret = check_chunk_block_group_mappings(info);
8212error: 8203error:
@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8975 struct btrfs_device *device; 8966 struct btrfs_device *device;
8976 struct list_head *devices; 8967 struct list_head *devices;
8977 u64 group_trimmed; 8968 u64 group_trimmed;
8969 u64 range_end = U64_MAX;
8978 u64 start; 8970 u64 start;
8979 u64 end; 8971 u64 end;
8980 u64 trimmed = 0; 8972 u64 trimmed = 0;
@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8984 int dev_ret = 0; 8976 int dev_ret = 0;
8985 int ret = 0; 8977 int ret = 0;
8986 8978
8979 /*
8980 * Check range overflow if range->len is set.
8981 * The default range->len is U64_MAX.
8982 */
8983 if (range->len != U64_MAX &&
8984 check_add_overflow(range->start, range->len, &range_end))
8985 return -EINVAL;
8986
8987 cache = btrfs_lookup_first_block_group(fs_info, range->start); 8987 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8988 for (; cache; cache = next_block_group(cache)) { 8988 for (; cache; cache = next_block_group(cache)) {
8989 if (cache->key.objectid >= (range->start + range->len)) { 8989 if (cache->key.objectid >= range_end) {
8990 btrfs_put_block_group(cache); 8990 btrfs_put_block_group(cache);
8991 break; 8991 break;
8992 } 8992 }
8993 8993
8994 start = max(range->start, cache->key.objectid); 8994 start = max(range->start, cache->key.objectid);
8995 end = min(range->start + range->len, 8995 end = min(range_end, cache->key.objectid + cache->key.offset);
8996 cache->key.objectid + cache->key.offset);
8997 8996
8998 if (end - start >= range->minlen) { 8997 if (end - start >= range->minlen) {
8999 if (!block_group_cache_done(cache)) { 8998 if (!block_group_cache_done(cache)) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d74b74ca07af..a447d3ec48d5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3087 if (ret) 3087 if (ret)
3088 return ret; 3088 return ret;
3089 3089
3090 /*
3091 * We add the kobjects here (and after forcing data chunk creation)
3092 * since relocation is the only place we'll create chunks of a new
3093 * type at runtime. The only place where we'll remove the last
3094 * chunk of a type is the call immediately below this one. Even
3095 * so, we're protected against races with the cleaner thread since
3096 * we're covered by the delete_unused_bgs_mutex.
3097 */
3098 btrfs_add_raid_kobjects(fs_info);
3099
3100 trans = btrfs_start_trans_remove_block_group(root->fs_info, 3090 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3101 chunk_offset); 3091 chunk_offset);
3102 if (IS_ERR(trans)) { 3092 if (IS_ERR(trans)) {
@@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3223 btrfs_end_transaction(trans); 3213 btrfs_end_transaction(trans);
3224 if (ret < 0) 3214 if (ret < 0)
3225 return ret; 3215 return ret;
3226
3227 btrfs_add_raid_kobjects(fs_info);
3228
3229 return 1; 3216 return 1;
3230 } 3217 }
3231 } 3218 }
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e078cc55b989..b3c8b886bf64 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ get_more_pages:
913 if (page_offset(page) >= ceph_wbc.i_size) { 913 if (page_offset(page) >= ceph_wbc.i_size) {
914 dout("%p page eof %llu\n", 914 dout("%p page eof %llu\n",
915 page, ceph_wbc.i_size); 915 page, ceph_wbc.i_size);
916 if (ceph_wbc.size_stable || 916 if ((ceph_wbc.size_stable ||
917 page_offset(page) >= i_size_read(inode)) 917 page_offset(page) >= i_size_read(inode)) &&
918 clear_page_dirty_for_io(page))
918 mapping->a_ops->invalidatepage(page, 919 mapping->a_ops->invalidatepage(page,
919 0, PAGE_SIZE); 920 0, PAGE_SIZE);
920 unlock_page(page); 921 unlock_page(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d98dcd976c80..ce0f5658720a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1301,6 +1301,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1301{ 1301{
1302 struct ceph_inode_info *ci = cap->ci; 1302 struct ceph_inode_info *ci = cap->ci;
1303 struct inode *inode = &ci->vfs_inode; 1303 struct inode *inode = &ci->vfs_inode;
1304 struct ceph_buffer *old_blob = NULL;
1304 struct cap_msg_args arg; 1305 struct cap_msg_args arg;
1305 int held, revoking; 1306 int held, revoking;
1306 int wake = 0; 1307 int wake = 0;
@@ -1365,7 +1366,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1365 ci->i_requested_max_size = arg.max_size; 1366 ci->i_requested_max_size = arg.max_size;
1366 1367
1367 if (flushing & CEPH_CAP_XATTR_EXCL) { 1368 if (flushing & CEPH_CAP_XATTR_EXCL) {
1368 __ceph_build_xattrs_blob(ci); 1369 old_blob = __ceph_build_xattrs_blob(ci);
1369 arg.xattr_version = ci->i_xattrs.version; 1370 arg.xattr_version = ci->i_xattrs.version;
1370 arg.xattr_buf = ci->i_xattrs.blob; 1371 arg.xattr_buf = ci->i_xattrs.blob;
1371 } else { 1372 } else {
@@ -1409,6 +1410,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1409 1410
1410 spin_unlock(&ci->i_ceph_lock); 1411 spin_unlock(&ci->i_ceph_lock);
1411 1412
1413 ceph_buffer_put(old_blob);
1414
1412 ret = send_cap_msg(&arg); 1415 ret = send_cap_msg(&arg);
1413 if (ret < 0) { 1416 if (ret < 0) {
1414 dout("error sending cap msg, must requeue %p\n", inode); 1417 dout("error sending cap msg, must requeue %p\n", inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 791f84a13bb8..18500edefc56 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -736,6 +736,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
736 int issued, new_issued, info_caps; 736 int issued, new_issued, info_caps;
737 struct timespec64 mtime, atime, ctime; 737 struct timespec64 mtime, atime, ctime;
738 struct ceph_buffer *xattr_blob = NULL; 738 struct ceph_buffer *xattr_blob = NULL;
739 struct ceph_buffer *old_blob = NULL;
739 struct ceph_string *pool_ns = NULL; 740 struct ceph_string *pool_ns = NULL;
740 struct ceph_cap *new_cap = NULL; 741 struct ceph_cap *new_cap = NULL;
741 int err = 0; 742 int err = 0;
@@ -881,7 +882,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
881 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 882 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
882 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 883 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
883 if (ci->i_xattrs.blob) 884 if (ci->i_xattrs.blob)
884 ceph_buffer_put(ci->i_xattrs.blob); 885 old_blob = ci->i_xattrs.blob;
885 ci->i_xattrs.blob = xattr_blob; 886 ci->i_xattrs.blob = xattr_blob;
886 if (xattr_blob) 887 if (xattr_blob)
887 memcpy(ci->i_xattrs.blob->vec.iov_base, 888 memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1022,8 +1023,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
1022out: 1023out:
1023 if (new_cap) 1024 if (new_cap)
1024 ceph_put_cap(mdsc, new_cap); 1025 ceph_put_cap(mdsc, new_cap);
1025 if (xattr_blob) 1026 ceph_buffer_put(old_blob);
1026 ceph_buffer_put(xattr_blob); 1027 ceph_buffer_put(xattr_blob);
1027 ceph_put_string(pool_ns); 1028 ceph_put_string(pool_ns);
1028 return err; 1029 return err;
1029} 1030}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ac9b53b89365..5083e238ad15 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
111 req->r_wait_for_completion = ceph_lock_wait_for_completion; 111 req->r_wait_for_completion = ceph_lock_wait_for_completion;
112 112
113 err = ceph_mdsc_do_request(mdsc, inode, req); 113 err = ceph_mdsc_do_request(mdsc, inode, req);
114 114 if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
115 if (operation == CEPH_MDS_OP_GETFILELOCK) {
116 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid); 115 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
117 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) 116 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
118 fl->fl_type = F_RDLCK; 117 fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4c6494eb02b5..ccfcc66aaf44 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -465,6 +465,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
465 struct inode *inode = &ci->vfs_inode; 465 struct inode *inode = &ci->vfs_inode;
466 struct ceph_cap_snap *capsnap; 466 struct ceph_cap_snap *capsnap;
467 struct ceph_snap_context *old_snapc, *new_snapc; 467 struct ceph_snap_context *old_snapc, *new_snapc;
468 struct ceph_buffer *old_blob = NULL;
468 int used, dirty; 469 int used, dirty;
469 470
470 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); 471 capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -541,7 +542,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
541 capsnap->gid = inode->i_gid; 542 capsnap->gid = inode->i_gid;
542 543
543 if (dirty & CEPH_CAP_XATTR_EXCL) { 544 if (dirty & CEPH_CAP_XATTR_EXCL) {
544 __ceph_build_xattrs_blob(ci); 545 old_blob = __ceph_build_xattrs_blob(ci);
545 capsnap->xattr_blob = 546 capsnap->xattr_blob =
546 ceph_buffer_get(ci->i_xattrs.blob); 547 ceph_buffer_get(ci->i_xattrs.blob);
547 capsnap->xattr_version = ci->i_xattrs.version; 548 capsnap->xattr_version = ci->i_xattrs.version;
@@ -584,6 +585,7 @@ update_snapc:
584 } 585 }
585 spin_unlock(&ci->i_ceph_lock); 586 spin_unlock(&ci->i_ceph_lock);
586 587
588 ceph_buffer_put(old_blob);
587 kfree(capsnap); 589 kfree(capsnap);
588 ceph_put_snap_context(old_snapc); 590 ceph_put_snap_context(old_snapc);
589} 591}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d2352fd95dbc..6b9f1ee7de85 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -926,7 +926,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); 926int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); 927ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); 928extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
929extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); 929extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); 930extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
931extern const struct xattr_handler *ceph_xattr_handlers[]; 931extern const struct xattr_handler *ceph_xattr_handlers[];
932 932
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 37b458a9af3a..939eab7aa219 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -754,12 +754,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
754 754
755/* 755/*
756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob 756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob
757 * and swap into place. 757 * and swap into place. It returns the old i_xattrs.blob (or NULL) so
758 * that it can be freed by the caller as the i_ceph_lock is likely to be
759 * held.
758 */ 760 */
759void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) 761struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
760{ 762{
761 struct rb_node *p; 763 struct rb_node *p;
762 struct ceph_inode_xattr *xattr = NULL; 764 struct ceph_inode_xattr *xattr = NULL;
765 struct ceph_buffer *old_blob = NULL;
763 void *dest; 766 void *dest;
764 767
765 dout("__build_xattrs_blob %p\n", &ci->vfs_inode); 768 dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -790,12 +793,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
790 dest - ci->i_xattrs.prealloc_blob->vec.iov_base; 793 dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
791 794
792 if (ci->i_xattrs.blob) 795 if (ci->i_xattrs.blob)
793 ceph_buffer_put(ci->i_xattrs.blob); 796 old_blob = ci->i_xattrs.blob;
794 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 797 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
795 ci->i_xattrs.prealloc_blob = NULL; 798 ci->i_xattrs.prealloc_blob = NULL;
796 ci->i_xattrs.dirty = false; 799 ci->i_xattrs.dirty = false;
797 ci->i_xattrs.version++; 800 ci->i_xattrs.version++;
798 } 801 }
802
803 return old_blob;
799} 804}
800 805
801static inline int __get_request_mask(struct inode *in) { 806static inline int __get_request_mask(struct inode *in) {
@@ -1036,6 +1041,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
1036 struct ceph_inode_info *ci = ceph_inode(inode); 1041 struct ceph_inode_info *ci = ceph_inode(inode);
1037 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1042 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1038 struct ceph_cap_flush *prealloc_cf = NULL; 1043 struct ceph_cap_flush *prealloc_cf = NULL;
1044 struct ceph_buffer *old_blob = NULL;
1039 int issued; 1045 int issued;
1040 int err; 1046 int err;
1041 int dirty = 0; 1047 int dirty = 0;
@@ -1109,13 +1115,15 @@ retry:
1109 struct ceph_buffer *blob; 1115 struct ceph_buffer *blob;
1110 1116
1111 spin_unlock(&ci->i_ceph_lock); 1117 spin_unlock(&ci->i_ceph_lock);
1112 dout(" preaallocating new blob size=%d\n", required_blob_size); 1118 ceph_buffer_put(old_blob); /* Shouldn't be required */
1119 dout(" pre-allocating new blob size=%d\n", required_blob_size);
1113 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1120 blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
1114 if (!blob) 1121 if (!blob)
1115 goto do_sync_unlocked; 1122 goto do_sync_unlocked;
1116 spin_lock(&ci->i_ceph_lock); 1123 spin_lock(&ci->i_ceph_lock);
1124 /* prealloc_blob can't be released while holding i_ceph_lock */
1117 if (ci->i_xattrs.prealloc_blob) 1125 if (ci->i_xattrs.prealloc_blob)
1118 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 1126 old_blob = ci->i_xattrs.prealloc_blob;
1119 ci->i_xattrs.prealloc_blob = blob; 1127 ci->i_xattrs.prealloc_blob = blob;
1120 goto retry; 1128 goto retry;
1121 } 1129 }
@@ -1131,6 +1139,7 @@ retry:
1131 } 1139 }
1132 1140
1133 spin_unlock(&ci->i_ceph_lock); 1141 spin_unlock(&ci->i_ceph_lock);
1142 ceph_buffer_put(old_blob);
1134 if (lock_snap_rwsem) 1143 if (lock_snap_rwsem)
1135 up_read(&mdsc->snap_rwsem); 1144 up_read(&mdsc->snap_rwsem);
1136 if (dirty) 1145 if (dirty)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4b21a90015a9..99caf77df4a2 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
152extern const struct export_operations cifs_export_ops; 152extern const struct export_operations cifs_export_ops;
153#endif /* CONFIG_CIFS_NFSD_EXPORT */ 153#endif /* CONFIG_CIFS_NFSD_EXPORT */
154 154
155#define CIFS_VERSION "2.21" 155#define CIFS_VERSION "2.22"
156#endif /* _CIFSFS_H */ 156#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e23234207fc2..592a6cea2b79 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -579,6 +579,7 @@ extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
579 unsigned int *len, unsigned int *offset); 579 unsigned int *len, unsigned int *offset);
580 580
581void extract_unc_hostname(const char *unc, const char **h, size_t *len); 581void extract_unc_hostname(const char *unc, const char **h, size_t *len);
582int copy_path_name(char *dst, const char *src);
582 583
583#ifdef CONFIG_CIFS_DFS_UPCALL 584#ifdef CONFIG_CIFS_DFS_UPCALL
584static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, 585static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e2f95965065d..3907653e63c7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -942,10 +942,8 @@ PsxDelete:
942 PATH_MAX, nls_codepage, remap); 942 PATH_MAX, nls_codepage, remap);
943 name_len++; /* trailing null */ 943 name_len++; /* trailing null */
944 name_len *= 2; 944 name_len *= 2;
945 } else { /* BB add path length overrun check */ 945 } else {
946 name_len = strnlen(fileName, PATH_MAX); 946 name_len = copy_path_name(pSMB->FileName, fileName);
947 name_len++; /* trailing null */
948 strncpy(pSMB->FileName, fileName, name_len);
949 } 947 }
950 948
951 params = 6 + name_len; 949 params = 6 + name_len;
@@ -1015,10 +1013,8 @@ DelFileRetry:
1015 remap); 1013 remap);
1016 name_len++; /* trailing null */ 1014 name_len++; /* trailing null */
1017 name_len *= 2; 1015 name_len *= 2;
1018 } else { /* BB improve check for buffer overruns BB */ 1016 } else {
1019 name_len = strnlen(name, PATH_MAX); 1017 name_len = copy_path_name(pSMB->fileName, name);
1020 name_len++; /* trailing null */
1021 strncpy(pSMB->fileName, name, name_len);
1022 } 1018 }
1023 pSMB->SearchAttributes = 1019 pSMB->SearchAttributes =
1024 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM); 1020 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
@@ -1062,10 +1058,8 @@ RmDirRetry:
1062 remap); 1058 remap);
1063 name_len++; /* trailing null */ 1059 name_len++; /* trailing null */
1064 name_len *= 2; 1060 name_len *= 2;
1065 } else { /* BB improve check for buffer overruns BB */ 1061 } else {
1066 name_len = strnlen(name, PATH_MAX); 1062 name_len = copy_path_name(pSMB->DirName, name);
1067 name_len++; /* trailing null */
1068 strncpy(pSMB->DirName, name, name_len);
1069 } 1063 }
1070 1064
1071 pSMB->BufferFormat = 0x04; 1065 pSMB->BufferFormat = 0x04;
@@ -1107,10 +1101,8 @@ MkDirRetry:
1107 remap); 1101 remap);
1108 name_len++; /* trailing null */ 1102 name_len++; /* trailing null */
1109 name_len *= 2; 1103 name_len *= 2;
1110 } else { /* BB improve check for buffer overruns BB */ 1104 } else {
1111 name_len = strnlen(name, PATH_MAX); 1105 name_len = copy_path_name(pSMB->DirName, name);
1112 name_len++; /* trailing null */
1113 strncpy(pSMB->DirName, name, name_len);
1114 } 1106 }
1115 1107
1116 pSMB->BufferFormat = 0x04; 1108 pSMB->BufferFormat = 0x04;
@@ -1157,10 +1149,8 @@ PsxCreat:
1157 PATH_MAX, nls_codepage, remap); 1149 PATH_MAX, nls_codepage, remap);
1158 name_len++; /* trailing null */ 1150 name_len++; /* trailing null */
1159 name_len *= 2; 1151 name_len *= 2;
1160 } else { /* BB improve the check for buffer overruns BB */ 1152 } else {
1161 name_len = strnlen(name, PATH_MAX); 1153 name_len = copy_path_name(pSMB->FileName, name);
1162 name_len++; /* trailing null */
1163 strncpy(pSMB->FileName, name, name_len);
1164 } 1154 }
1165 1155
1166 params = 6 + name_len; 1156 params = 6 + name_len;
@@ -1324,11 +1314,9 @@ OldOpenRetry:
1324 fileName, PATH_MAX, nls_codepage, remap); 1314 fileName, PATH_MAX, nls_codepage, remap);
1325 name_len++; /* trailing null */ 1315 name_len++; /* trailing null */
1326 name_len *= 2; 1316 name_len *= 2;
1327 } else { /* BB improve check for buffer overruns BB */ 1317 } else {
1328 count = 0; /* no pad */ 1318 count = 0; /* no pad */
1329 name_len = strnlen(fileName, PATH_MAX); 1319 name_len = copy_path_name(pSMB->fileName, fileName);
1330 name_len++; /* trailing null */
1331 strncpy(pSMB->fileName, fileName, name_len);
1332 } 1320 }
1333 if (*pOplock & REQ_OPLOCK) 1321 if (*pOplock & REQ_OPLOCK)
1334 pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK); 1322 pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
@@ -1442,11 +1430,8 @@ openRetry:
1442 /* BB improve check for buffer overruns BB */ 1430 /* BB improve check for buffer overruns BB */
1443 /* no pad */ 1431 /* no pad */
1444 count = 0; 1432 count = 0;
1445 name_len = strnlen(path, PATH_MAX); 1433 name_len = copy_path_name(req->fileName, path);
1446 /* trailing null */
1447 name_len++;
1448 req->NameLength = cpu_to_le16(name_len); 1434 req->NameLength = cpu_to_le16(name_len);
1449 strncpy(req->fileName, path, name_len);
1450 } 1435 }
1451 1436
1452 if (*oplock & REQ_OPLOCK) 1437 if (*oplock & REQ_OPLOCK)
@@ -2812,15 +2797,10 @@ renameRetry:
2812 remap); 2797 remap);
2813 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2798 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2814 name_len2 *= 2; /* convert to bytes */ 2799 name_len2 *= 2; /* convert to bytes */
2815 } else { /* BB improve the check for buffer overruns BB */ 2800 } else {
2816 name_len = strnlen(from_name, PATH_MAX); 2801 name_len = copy_path_name(pSMB->OldFileName, from_name);
2817 name_len++; /* trailing null */ 2802 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
2818 strncpy(pSMB->OldFileName, from_name, name_len);
2819 name_len2 = strnlen(to_name, PATH_MAX);
2820 name_len2++; /* trailing null */
2821 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 2803 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
2822 strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
2823 name_len2++; /* trailing null */
2824 name_len2++; /* signature byte */ 2804 name_len2++; /* signature byte */
2825 } 2805 }
2826 2806
@@ -2962,15 +2942,10 @@ copyRetry:
2962 toName, PATH_MAX, nls_codepage, remap); 2942 toName, PATH_MAX, nls_codepage, remap);
2963 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 2943 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
2964 name_len2 *= 2; /* convert to bytes */ 2944 name_len2 *= 2; /* convert to bytes */
2965 } else { /* BB improve the check for buffer overruns BB */ 2945 } else {
2966 name_len = strnlen(fromName, PATH_MAX); 2946 name_len = copy_path_name(pSMB->OldFileName, fromName);
2967 name_len++; /* trailing null */
2968 strncpy(pSMB->OldFileName, fromName, name_len);
2969 name_len2 = strnlen(toName, PATH_MAX);
2970 name_len2++; /* trailing null */
2971 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 2947 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
2972 strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2); 2948 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
2973 name_len2++; /* trailing null */
2974 name_len2++; /* signature byte */ 2949 name_len2++; /* signature byte */
2975 } 2950 }
2976 2951
@@ -3021,10 +2996,8 @@ createSymLinkRetry:
3021 name_len++; /* trailing null */ 2996 name_len++; /* trailing null */
3022 name_len *= 2; 2997 name_len *= 2;
3023 2998
3024 } else { /* BB improve the check for buffer overruns BB */ 2999 } else {
3025 name_len = strnlen(fromName, PATH_MAX); 3000 name_len = copy_path_name(pSMB->FileName, fromName);
3026 name_len++; /* trailing null */
3027 strncpy(pSMB->FileName, fromName, name_len);
3028 } 3001 }
3029 params = 6 + name_len; 3002 params = 6 + name_len;
3030 pSMB->MaxSetupCount = 0; 3003 pSMB->MaxSetupCount = 0;
@@ -3044,10 +3017,8 @@ createSymLinkRetry:
3044 PATH_MAX, nls_codepage, remap); 3017 PATH_MAX, nls_codepage, remap);
3045 name_len_target++; /* trailing null */ 3018 name_len_target++; /* trailing null */
3046 name_len_target *= 2; 3019 name_len_target *= 2;
3047 } else { /* BB improve the check for buffer overruns BB */ 3020 } else {
3048 name_len_target = strnlen(toName, PATH_MAX); 3021 name_len_target = copy_path_name(data_offset, toName);
3049 name_len_target++; /* trailing null */
3050 strncpy(data_offset, toName, name_len_target);
3051 } 3022 }
3052 3023
3053 pSMB->MaxParameterCount = cpu_to_le16(2); 3024 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3109,10 +3080,8 @@ createHardLinkRetry:
3109 name_len++; /* trailing null */ 3080 name_len++; /* trailing null */
3110 name_len *= 2; 3081 name_len *= 2;
3111 3082
3112 } else { /* BB improve the check for buffer overruns BB */ 3083 } else {
3113 name_len = strnlen(toName, PATH_MAX); 3084 name_len = copy_path_name(pSMB->FileName, toName);
3114 name_len++; /* trailing null */
3115 strncpy(pSMB->FileName, toName, name_len);
3116 } 3085 }
3117 params = 6 + name_len; 3086 params = 6 + name_len;
3118 pSMB->MaxSetupCount = 0; 3087 pSMB->MaxSetupCount = 0;
@@ -3131,10 +3100,8 @@ createHardLinkRetry:
3131 PATH_MAX, nls_codepage, remap); 3100 PATH_MAX, nls_codepage, remap);
3132 name_len_target++; /* trailing null */ 3101 name_len_target++; /* trailing null */
3133 name_len_target *= 2; 3102 name_len_target *= 2;
3134 } else { /* BB improve the check for buffer overruns BB */ 3103 } else {
3135 name_len_target = strnlen(fromName, PATH_MAX); 3104 name_len_target = copy_path_name(data_offset, fromName);
3136 name_len_target++; /* trailing null */
3137 strncpy(data_offset, fromName, name_len_target);
3138 } 3105 }
3139 3106
3140 pSMB->MaxParameterCount = cpu_to_le16(2); 3107 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3213,15 +3180,10 @@ winCreateHardLinkRetry:
3213 remap); 3180 remap);
3214 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ; 3181 name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
3215 name_len2 *= 2; /* convert to bytes */ 3182 name_len2 *= 2; /* convert to bytes */
3216 } else { /* BB improve the check for buffer overruns BB */ 3183 } else {
3217 name_len = strnlen(from_name, PATH_MAX); 3184 name_len = copy_path_name(pSMB->OldFileName, from_name);
3218 name_len++; /* trailing null */
3219 strncpy(pSMB->OldFileName, from_name, name_len);
3220 name_len2 = strnlen(to_name, PATH_MAX);
3221 name_len2++; /* trailing null */
3222 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */ 3185 pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
3223 strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2); 3186 name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
3224 name_len2++; /* trailing null */
3225 name_len2++; /* signature byte */ 3187 name_len2++; /* signature byte */
3226 } 3188 }
3227 3189
@@ -3271,10 +3233,8 @@ querySymLinkRetry:
3271 remap); 3233 remap);
3272 name_len++; /* trailing null */ 3234 name_len++; /* trailing null */
3273 name_len *= 2; 3235 name_len *= 2;
3274 } else { /* BB improve the check for buffer overruns BB */ 3236 } else {
3275 name_len = strnlen(searchName, PATH_MAX); 3237 name_len = copy_path_name(pSMB->FileName, searchName);
3276 name_len++; /* trailing null */
3277 strncpy(pSMB->FileName, searchName, name_len);
3278 } 3238 }
3279 3239
3280 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 3240 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3691,10 +3651,8 @@ queryAclRetry:
3691 name_len *= 2; 3651 name_len *= 2;
3692 pSMB->FileName[name_len] = 0; 3652 pSMB->FileName[name_len] = 0;
3693 pSMB->FileName[name_len+1] = 0; 3653 pSMB->FileName[name_len+1] = 0;
3694 } else { /* BB improve the check for buffer overruns BB */ 3654 } else {
3695 name_len = strnlen(searchName, PATH_MAX); 3655 name_len = copy_path_name(pSMB->FileName, searchName);
3696 name_len++; /* trailing null */
3697 strncpy(pSMB->FileName, searchName, name_len);
3698 } 3656 }
3699 3657
3700 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 3658 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3776,10 +3734,8 @@ setAclRetry:
3776 PATH_MAX, nls_codepage, remap); 3734 PATH_MAX, nls_codepage, remap);
3777 name_len++; /* trailing null */ 3735 name_len++; /* trailing null */
3778 name_len *= 2; 3736 name_len *= 2;
3779 } else { /* BB improve the check for buffer overruns BB */ 3737 } else {
3780 name_len = strnlen(fileName, PATH_MAX); 3738 name_len = copy_path_name(pSMB->FileName, fileName);
3781 name_len++; /* trailing null */
3782 strncpy(pSMB->FileName, fileName, name_len);
3783 } 3739 }
3784 params = 6 + name_len; 3740 params = 6 + name_len;
3785 pSMB->MaxParameterCount = cpu_to_le16(2); 3741 pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4184,9 +4140,7 @@ QInfRetry:
4184 name_len++; /* trailing null */ 4140 name_len++; /* trailing null */
4185 name_len *= 2; 4141 name_len *= 2;
4186 } else { 4142 } else {
4187 name_len = strnlen(search_name, PATH_MAX); 4143 name_len = copy_path_name(pSMB->FileName, search_name);
4188 name_len++; /* trailing null */
4189 strncpy(pSMB->FileName, search_name, name_len);
4190 } 4144 }
4191 pSMB->BufferFormat = 0x04; 4145 pSMB->BufferFormat = 0x04;
4192 name_len++; /* account for buffer type byte */ 4146 name_len++; /* account for buffer type byte */
@@ -4321,10 +4275,8 @@ QPathInfoRetry:
4321 PATH_MAX, nls_codepage, remap); 4275 PATH_MAX, nls_codepage, remap);
4322 name_len++; /* trailing null */ 4276 name_len++; /* trailing null */
4323 name_len *= 2; 4277 name_len *= 2;
4324 } else { /* BB improve the check for buffer overruns BB */ 4278 } else {
4325 name_len = strnlen(search_name, PATH_MAX); 4279 name_len = copy_path_name(pSMB->FileName, search_name);
4326 name_len++; /* trailing null */
4327 strncpy(pSMB->FileName, search_name, name_len);
4328 } 4280 }
4329 4281
4330 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 4282 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4490,10 +4442,8 @@ UnixQPathInfoRetry:
4490 PATH_MAX, nls_codepage, remap); 4442 PATH_MAX, nls_codepage, remap);
4491 name_len++; /* trailing null */ 4443 name_len++; /* trailing null */
4492 name_len *= 2; 4444 name_len *= 2;
4493 } else { /* BB improve the check for buffer overruns BB */ 4445 } else {
4494 name_len = strnlen(searchName, PATH_MAX); 4446 name_len = copy_path_name(pSMB->FileName, searchName);
4495 name_len++; /* trailing null */
4496 strncpy(pSMB->FileName, searchName, name_len);
4497 } 4447 }
4498 4448
4499 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */; 4449 params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4593,17 +4543,16 @@ findFirstRetry:
4593 pSMB->FileName[name_len+1] = 0; 4543 pSMB->FileName[name_len+1] = 0;
4594 name_len += 2; 4544 name_len += 2;
4595 } 4545 }
4596 } else { /* BB add check for overrun of SMB buf BB */ 4546 } else {
4597 name_len = strnlen(searchName, PATH_MAX); 4547 name_len = copy_path_name(pSMB->FileName, searchName);
4598/* BB fix here and in unicode clause above ie
4599 if (name_len > buffersize-header)
4600 free buffer exit; BB */
4601 strncpy(pSMB->FileName, searchName, name_len);
4602 if (msearch) { 4548 if (msearch) {
4603 pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb); 4549 if (WARN_ON_ONCE(name_len > PATH_MAX-2))
4604 pSMB->FileName[name_len+1] = '*'; 4550 name_len = PATH_MAX-2;
4605 pSMB->FileName[name_len+2] = 0; 4551 /* overwrite nul byte */
4606 name_len += 3; 4552 pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
4553 pSMB->FileName[name_len] = '*';
4554 pSMB->FileName[name_len+1] = 0;
4555 name_len += 2;
4607 } 4556 }
4608 } 4557 }
4609 4558
@@ -4898,10 +4847,8 @@ GetInodeNumberRetry:
4898 remap); 4847 remap);
4899 name_len++; /* trailing null */ 4848 name_len++; /* trailing null */
4900 name_len *= 2; 4849 name_len *= 2;
4901 } else { /* BB improve the check for buffer overruns BB */ 4850 } else {
4902 name_len = strnlen(search_name, PATH_MAX); 4851 name_len = copy_path_name(pSMB->FileName, search_name);
4903 name_len++; /* trailing null */
4904 strncpy(pSMB->FileName, search_name, name_len);
4905 } 4852 }
4906 4853
4907 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ; 4854 params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -5008,9 +4955,7 @@ getDFSRetry:
5008 name_len++; /* trailing null */ 4955 name_len++; /* trailing null */
5009 name_len *= 2; 4956 name_len *= 2;
5010 } else { /* BB improve the check for buffer overruns BB */ 4957 } else { /* BB improve the check for buffer overruns BB */
5011 name_len = strnlen(search_name, PATH_MAX); 4958 name_len = copy_path_name(pSMB->RequestFileName, search_name);
5012 name_len++; /* trailing null */
5013 strncpy(pSMB->RequestFileName, search_name, name_len);
5014 } 4959 }
5015 4960
5016 if (ses->server->sign) 4961 if (ses->server->sign)
@@ -5663,10 +5608,8 @@ SetEOFRetry:
5663 PATH_MAX, cifs_sb->local_nls, remap); 5608 PATH_MAX, cifs_sb->local_nls, remap);
5664 name_len++; /* trailing null */ 5609 name_len++; /* trailing null */
5665 name_len *= 2; 5610 name_len *= 2;
5666 } else { /* BB improve the check for buffer overruns BB */ 5611 } else {
5667 name_len = strnlen(file_name, PATH_MAX); 5612 name_len = copy_path_name(pSMB->FileName, file_name);
5668 name_len++; /* trailing null */
5669 strncpy(pSMB->FileName, file_name, name_len);
5670 } 5613 }
5671 params = 6 + name_len; 5614 params = 6 + name_len;
5672 data_count = sizeof(struct file_end_of_file_info); 5615 data_count = sizeof(struct file_end_of_file_info);
@@ -5959,10 +5902,8 @@ SetTimesRetry:
5959 PATH_MAX, nls_codepage, remap); 5902 PATH_MAX, nls_codepage, remap);
5960 name_len++; /* trailing null */ 5903 name_len++; /* trailing null */
5961 name_len *= 2; 5904 name_len *= 2;
5962 } else { /* BB improve the check for buffer overruns BB */ 5905 } else {
5963 name_len = strnlen(fileName, PATH_MAX); 5906 name_len = copy_path_name(pSMB->FileName, fileName);
5964 name_len++; /* trailing null */
5965 strncpy(pSMB->FileName, fileName, name_len);
5966 } 5907 }
5967 5908
5968 params = 6 + name_len; 5909 params = 6 + name_len;
@@ -6040,10 +5981,8 @@ SetAttrLgcyRetry:
6040 PATH_MAX, nls_codepage); 5981 PATH_MAX, nls_codepage);
6041 name_len++; /* trailing null */ 5982 name_len++; /* trailing null */
6042 name_len *= 2; 5983 name_len *= 2;
6043 } else { /* BB improve the check for buffer overruns BB */ 5984 } else {
6044 name_len = strnlen(fileName, PATH_MAX); 5985 name_len = copy_path_name(pSMB->fileName, fileName);
6045 name_len++; /* trailing null */
6046 strncpy(pSMB->fileName, fileName, name_len);
6047 } 5986 }
6048 pSMB->attr = cpu_to_le16(dos_attrs); 5987 pSMB->attr = cpu_to_le16(dos_attrs);
6049 pSMB->BufferFormat = 0x04; 5988 pSMB->BufferFormat = 0x04;
@@ -6203,10 +6142,8 @@ setPermsRetry:
6203 PATH_MAX, nls_codepage, remap); 6142 PATH_MAX, nls_codepage, remap);
6204 name_len++; /* trailing null */ 6143 name_len++; /* trailing null */
6205 name_len *= 2; 6144 name_len *= 2;
6206 } else { /* BB improve the check for buffer overruns BB */ 6145 } else {
6207 name_len = strnlen(file_name, PATH_MAX); 6146 name_len = copy_path_name(pSMB->FileName, file_name);
6208 name_len++; /* trailing null */
6209 strncpy(pSMB->FileName, file_name, name_len);
6210 } 6147 }
6211 6148
6212 params = 6 + name_len; 6149 params = 6 + name_len;
@@ -6298,10 +6235,8 @@ QAllEAsRetry:
6298 PATH_MAX, nls_codepage, remap); 6235 PATH_MAX, nls_codepage, remap);
6299 list_len++; /* trailing null */ 6236 list_len++; /* trailing null */
6300 list_len *= 2; 6237 list_len *= 2;
6301 } else { /* BB improve the check for buffer overruns BB */ 6238 } else {
6302 list_len = strnlen(searchName, PATH_MAX); 6239 list_len = copy_path_name(pSMB->FileName, searchName);
6303 list_len++; /* trailing null */
6304 strncpy(pSMB->FileName, searchName, list_len);
6305 } 6240 }
6306 6241
6307 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */; 6242 params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
@@ -6480,10 +6415,8 @@ SetEARetry:
6480 PATH_MAX, nls_codepage, remap); 6415 PATH_MAX, nls_codepage, remap);
6481 name_len++; /* trailing null */ 6416 name_len++; /* trailing null */
6482 name_len *= 2; 6417 name_len *= 2;
6483 } else { /* BB improve the check for buffer overruns BB */ 6418 } else {
6484 name_len = strnlen(fileName, PATH_MAX); 6419 name_len = copy_path_name(pSMB->FileName, fileName);
6485 name_len++; /* trailing null */
6486 strncpy(pSMB->FileName, fileName, name_len);
6487 } 6420 }
6488 6421
6489 params = 6 + name_len; 6422 params = 6 + name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a15a6e738eb5..5299effa6f7d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1113,7 +1113,7 @@ cifs_demultiplex_thread(void *p)
1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv); 1113 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1114 1114
1115 set_freezable(); 1115 set_freezable();
1116 allow_signal(SIGKILL); 1116 allow_kernel_signal(SIGKILL);
1117 while (server->tcpStatus != CifsExiting) { 1117 while (server->tcpStatus != CifsExiting) {
1118 if (try_to_freeze()) 1118 if (try_to_freeze())
1119 continue; 1119 continue;
@@ -2981,6 +2981,7 @@ static int
2981cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) 2981cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
2982{ 2982{
2983 int rc = 0; 2983 int rc = 0;
2984 int is_domain = 0;
2984 const char *delim, *payload; 2985 const char *delim, *payload;
2985 char *desc; 2986 char *desc;
2986 ssize_t len; 2987 ssize_t len;
@@ -3028,6 +3029,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
3028 rc = PTR_ERR(key); 3029 rc = PTR_ERR(key);
3029 goto out_err; 3030 goto out_err;
3030 } 3031 }
3032 is_domain = 1;
3031 } 3033 }
3032 3034
3033 down_read(&key->sem); 3035 down_read(&key->sem);
@@ -3085,6 +3087,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
3085 goto out_key_put; 3087 goto out_key_put;
3086 } 3088 }
3087 3089
3090 /*
3091 * If we have a domain key then we must set the domainName in the
3092 * for the request.
3093 */
3094 if (is_domain && ses->domainName) {
3095 vol->domainname = kstrndup(ses->domainName,
3096 strlen(ses->domainName),
3097 GFP_KERNEL);
3098 if (!vol->domainname) {
3099 cifs_dbg(FYI, "Unable to allocate %zd bytes for "
3100 "domain\n", len);
3101 rc = -ENOMEM;
3102 kfree(vol->username);
3103 vol->username = NULL;
3104 kzfree(vol->password);
3105 vol->password = NULL;
3106 goto out_key_put;
3107 }
3108 }
3109
3088out_key_put: 3110out_key_put:
3089 up_read(&key->sem); 3111 up_read(&key->sem);
3090 key_put(key); 3112 key_put(key);
@@ -4209,16 +4231,19 @@ build_unc_path_to_root(const struct smb_vol *vol,
4209 strlen(vol->prepath) + 1 : 0; 4231 strlen(vol->prepath) + 1 : 0;
4210 unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); 4232 unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
4211 4233
4234 if (unc_len > MAX_TREE_SIZE)
4235 return ERR_PTR(-EINVAL);
4236
4212 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); 4237 full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
4213 if (full_path == NULL) 4238 if (full_path == NULL)
4214 return ERR_PTR(-ENOMEM); 4239 return ERR_PTR(-ENOMEM);
4215 4240
4216 strncpy(full_path, vol->UNC, unc_len); 4241 memcpy(full_path, vol->UNC, unc_len);
4217 pos = full_path + unc_len; 4242 pos = full_path + unc_len;
4218 4243
4219 if (pplen) { 4244 if (pplen) {
4220 *pos = CIFS_DIR_SEP(cifs_sb); 4245 *pos = CIFS_DIR_SEP(cifs_sb);
4221 strncpy(pos + 1, vol->prepath, pplen); 4246 memcpy(pos + 1, vol->prepath, pplen);
4222 pos += pplen; 4247 pos += pplen;
4223 } 4248 }
4224 4249
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f26a48dd2e39..be424e81e3ad 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -69,11 +69,10 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
69 return full_path; 69 return full_path;
70 70
71 if (dfsplen) 71 if (dfsplen)
72 strncpy(full_path, tcon->treeName, dfsplen); 72 memcpy(full_path, tcon->treeName, dfsplen);
73 full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb); 73 full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
74 strncpy(full_path + dfsplen + 1, vol->prepath, pplen); 74 memcpy(full_path + dfsplen + 1, vol->prepath, pplen);
75 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); 75 convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
76 full_path[dfsplen + pplen] = 0; /* add trailing null */
77 return full_path; 76 return full_path;
78} 77}
79 78
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index f383877a6511..5ad83bdb9bea 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1011,3 +1011,25 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1011 *h = unc; 1011 *h = unc;
1012 *len = end - unc; 1012 *len = end - unc;
1013} 1013}
1014
1015/**
1016 * copy_path_name - copy src path to dst, possibly truncating
1017 *
1018 * returns number of bytes written (including trailing nul)
1019 */
1020int copy_path_name(char *dst, const char *src)
1021{
1022 int name_len;
1023
1024 /*
1025 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1026 * will truncate and strlen(dst) will be PATH_MAX-1
1027 */
1028 name_len = strscpy(dst, src, PATH_MAX);
1029 if (WARN_ON_ONCE(name_len < 0))
1030 name_len = PATH_MAX-1;
1031
1032 /* we count the trailing nul */
1033 name_len++;
1034 return name_len;
1035}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index dcd49ad60c83..4c764ff7edd2 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -159,13 +159,16 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
159 const struct nls_table *nls_cp) 159 const struct nls_table *nls_cp)
160{ 160{
161 char *bcc_ptr = *pbcc_area; 161 char *bcc_ptr = *pbcc_area;
162 int len;
162 163
163 /* copy user */ 164 /* copy user */
164 /* BB what about null user mounts - check that we do this BB */ 165 /* BB what about null user mounts - check that we do this BB */
165 /* copy user */ 166 /* copy user */
166 if (ses->user_name != NULL) { 167 if (ses->user_name != NULL) {
167 strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN); 168 len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
168 bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN); 169 if (WARN_ON_ONCE(len < 0))
170 len = CIFS_MAX_USERNAME_LEN - 1;
171 bcc_ptr += len;
169 } 172 }
170 /* else null user mount */ 173 /* else null user mount */
171 *bcc_ptr = 0; 174 *bcc_ptr = 0;
@@ -173,8 +176,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
173 176
174 /* copy domain */ 177 /* copy domain */
175 if (ses->domainName != NULL) { 178 if (ses->domainName != NULL) {
176 strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 179 len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
177 bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); 180 if (WARN_ON_ONCE(len < 0))
181 len = CIFS_MAX_DOMAINNAME_LEN - 1;
182 bcc_ptr += len;
178 } /* else we will send a null domain name 183 } /* else we will send a null domain name
179 so the server will default to its own domain */ 184 so the server will default to its own domain */
180 *bcc_ptr = 0; 185 *bcc_ptr = 0;
@@ -242,9 +247,10 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
242 247
243 kfree(ses->serverOS); 248 kfree(ses->serverOS);
244 249
245 ses->serverOS = kzalloc(len + 1, GFP_KERNEL); 250 ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
246 if (ses->serverOS) { 251 if (ses->serverOS) {
247 strncpy(ses->serverOS, bcc_ptr, len); 252 memcpy(ses->serverOS, bcc_ptr, len);
253 ses->serverOS[len] = 0;
248 if (strncmp(ses->serverOS, "OS/2", 4) == 0) 254 if (strncmp(ses->serverOS, "OS/2", 4) == 0)
249 cifs_dbg(FYI, "OS/2 server\n"); 255 cifs_dbg(FYI, "OS/2 server\n");
250 } 256 }
@@ -258,9 +264,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
258 264
259 kfree(ses->serverNOS); 265 kfree(ses->serverNOS);
260 266
261 ses->serverNOS = kzalloc(len + 1, GFP_KERNEL); 267 ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
262 if (ses->serverNOS) 268 if (ses->serverNOS) {
263 strncpy(ses->serverNOS, bcc_ptr, len); 269 memcpy(ses->serverNOS, bcc_ptr, len);
270 ses->serverNOS[len] = 0;
271 }
264 272
265 bcc_ptr += len + 1; 273 bcc_ptr += len + 1;
266 bleft -= len + 1; 274 bleft -= len + 1;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d542f1cf4428..cfb48bd088e1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -679,6 +679,13 @@ static void io_put_req(struct io_kiocb *req)
679 io_free_req(req); 679 io_free_req(req);
680} 680}
681 681
682static unsigned io_cqring_events(struct io_cq_ring *ring)
683{
684 /* See comment at the top of this file */
685 smp_rmb();
686 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
687}
688
682/* 689/*
683 * Find and free completed poll iocbs 690 * Find and free completed poll iocbs
684 */ 691 */
@@ -771,7 +778,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
771static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, 778static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
772 long min) 779 long min)
773{ 780{
774 while (!list_empty(&ctx->poll_list)) { 781 while (!list_empty(&ctx->poll_list) && !need_resched()) {
775 int ret; 782 int ret;
776 783
777 ret = io_do_iopoll(ctx, nr_events, min); 784 ret = io_do_iopoll(ctx, nr_events, min);
@@ -798,6 +805,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
798 unsigned int nr_events = 0; 805 unsigned int nr_events = 0;
799 806
800 io_iopoll_getevents(ctx, &nr_events, 1); 807 io_iopoll_getevents(ctx, &nr_events, 1);
808
809 /*
810 * Ensure we allow local-to-the-cpu processing to take place,
811 * in this case we need to ensure that we reap all events.
812 */
813 cond_resched();
801 } 814 }
802 mutex_unlock(&ctx->uring_lock); 815 mutex_unlock(&ctx->uring_lock);
803} 816}
@@ -805,11 +818,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
805static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, 818static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
806 long min) 819 long min)
807{ 820{
808 int ret = 0; 821 int iters, ret = 0;
809 822
823 /*
824 * We disallow the app entering submit/complete with polling, but we
825 * still need to lock the ring to prevent racing with polled issue
826 * that got punted to a workqueue.
827 */
828 mutex_lock(&ctx->uring_lock);
829
830 iters = 0;
810 do { 831 do {
811 int tmin = 0; 832 int tmin = 0;
812 833
834 /*
835 * Don't enter poll loop if we already have events pending.
836 * If we do, we can potentially be spinning for commands that
837 * already triggered a CQE (eg in error).
838 */
839 if (io_cqring_events(ctx->cq_ring))
840 break;
841
842 /*
843 * If a submit got punted to a workqueue, we can have the
844 * application entering polling for a command before it gets
845 * issued. That app will hold the uring_lock for the duration
846 * of the poll right here, so we need to take a breather every
847 * now and then to ensure that the issue has a chance to add
848 * the poll to the issued list. Otherwise we can spin here
849 * forever, while the workqueue is stuck trying to acquire the
850 * very same mutex.
851 */
852 if (!(++iters & 7)) {
853 mutex_unlock(&ctx->uring_lock);
854 mutex_lock(&ctx->uring_lock);
855 }
856
813 if (*nr_events < min) 857 if (*nr_events < min)
814 tmin = min - *nr_events; 858 tmin = min - *nr_events;
815 859
@@ -819,6 +863,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
819 ret = 0; 863 ret = 0;
820 } while (min && !*nr_events && !need_resched()); 864 } while (min && !*nr_events && !need_resched());
821 865
866 mutex_unlock(&ctx->uring_lock);
822 return ret; 867 return ret;
823} 868}
824 869
@@ -1097,10 +1142,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1097 1142
1098 iter->bvec = bvec + seg_skip; 1143 iter->bvec = bvec + seg_skip;
1099 iter->nr_segs -= seg_skip; 1144 iter->nr_segs -= seg_skip;
1100 iter->count -= (seg_skip << PAGE_SHIFT); 1145 iter->count -= bvec->bv_len + offset;
1101 iter->iov_offset = offset & ~PAGE_MASK; 1146 iter->iov_offset = offset & ~PAGE_MASK;
1102 if (iter->iov_offset)
1103 iter->count -= iter->iov_offset;
1104 } 1147 }
1105 } 1148 }
1106 1149
@@ -2025,6 +2068,15 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2025{ 2068{
2026 int ret; 2069 int ret;
2027 2070
2071 ret = io_req_defer(ctx, req, s->sqe);
2072 if (ret) {
2073 if (ret != -EIOCBQUEUED) {
2074 io_free_req(req);
2075 io_cqring_add_event(ctx, s->sqe->user_data, ret);
2076 }
2077 return 0;
2078 }
2079
2028 ret = __io_submit_sqe(ctx, req, s, true); 2080 ret = __io_submit_sqe(ctx, req, s, true);
2029 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { 2081 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
2030 struct io_uring_sqe *sqe_copy; 2082 struct io_uring_sqe *sqe_copy;
@@ -2097,13 +2149,6 @@ err:
2097 return; 2149 return;
2098 } 2150 }
2099 2151
2100 ret = io_req_defer(ctx, req, s->sqe);
2101 if (ret) {
2102 if (ret != -EIOCBQUEUED)
2103 goto err_req;
2104 return;
2105 }
2106
2107 /* 2152 /*
2108 * If we already have a head request, queue this one for async 2153 * If we already have a head request, queue this one for async
2109 * submittal once the head completes. If we don't have a head but 2154 * submittal once the head completes. If we don't have a head but
@@ -2280,15 +2325,7 @@ static int io_sq_thread(void *data)
2280 unsigned nr_events = 0; 2325 unsigned nr_events = 0;
2281 2326
2282 if (ctx->flags & IORING_SETUP_IOPOLL) { 2327 if (ctx->flags & IORING_SETUP_IOPOLL) {
2283 /*
2284 * We disallow the app entering submit/complete
2285 * with polling, but we still need to lock the
2286 * ring to prevent racing with polled issue
2287 * that got punted to a workqueue.
2288 */
2289 mutex_lock(&ctx->uring_lock);
2290 io_iopoll_check(ctx, &nr_events, 0); 2328 io_iopoll_check(ctx, &nr_events, 0);
2291 mutex_unlock(&ctx->uring_lock);
2292 } else { 2329 } else {
2293 /* 2330 /*
2294 * Normal IO, just pretend everything completed. 2331 * Normal IO, just pretend everything completed.
@@ -2433,13 +2470,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2433 return submit; 2470 return submit;
2434} 2471}
2435 2472
2436static unsigned io_cqring_events(struct io_cq_ring *ring)
2437{
2438 /* See comment at the top of this file */
2439 smp_rmb();
2440 return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
2441}
2442
2443/* 2473/*
2444 * Wait until events become available, if we don't already have some. The 2474 * Wait until events become available, if we don't already have some. The
2445 * application must reap them itself, as they reside on the shared cq ring. 2475 * application must reap them itself, as they reside on the shared cq ring.
@@ -3190,9 +3220,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3190 min_complete = min(min_complete, ctx->cq_entries); 3220 min_complete = min(min_complete, ctx->cq_entries);
3191 3221
3192 if (ctx->flags & IORING_SETUP_IOPOLL) { 3222 if (ctx->flags & IORING_SETUP_IOPOLL) {
3193 mutex_lock(&ctx->uring_lock);
3194 ret = io_iopoll_check(ctx, &nr_events, min_complete); 3223 ret = io_iopoll_check(ctx, &nr_events, min_complete);
3195 mutex_unlock(&ctx->uring_lock);
3196 } else { 3224 } else {
3197 ret = io_cqring_wait(ctx, min_complete, sig, sigsz); 3225 ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3198 } 3226 }
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8d501093660f..0adfd8840110 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode)) 1487 if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
1488 nfs_file_set_open_context(file, ctx); 1488 nfs_file_set_open_context(file, ctx);
1489 else 1489 else
1490 err = -ESTALE; 1490 err = -EOPENSTALE;
1491out: 1491out:
1492 return err; 1492 return err;
1493} 1493}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0cb442406168..222d7115db71 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
401 unsigned long bytes = 0; 401 unsigned long bytes = 0;
402 struct nfs_direct_req *dreq = hdr->dreq; 402 struct nfs_direct_req *dreq = hdr->dreq;
403 403
404 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
405 goto out_put;
406
407 spin_lock(&dreq->lock); 404 spin_lock(&dreq->lock);
408 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0)) 405 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
409 dreq->error = hdr->error; 406 dreq->error = hdr->error;
410 else 407
408 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
409 spin_unlock(&dreq->lock);
410 goto out_put;
411 }
412
413 if (hdr->good_bytes != 0)
411 nfs_direct_good_bytes(dreq, hdr); 414 nfs_direct_good_bytes(dreq, hdr);
412 415
416 if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
417 dreq->error = 0;
418
413 spin_unlock(&dreq->lock); 419 spin_unlock(&dreq->lock);
414 420
415 while (!list_empty(&hdr->pages)) { 421 while (!list_empty(&hdr->pages)) {
@@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
782 bool request_commit = false; 788 bool request_commit = false;
783 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 789 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
784 790
785 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
786 goto out_put;
787
788 nfs_init_cinfo_from_dreq(&cinfo, dreq); 791 nfs_init_cinfo_from_dreq(&cinfo, dreq);
789 792
790 spin_lock(&dreq->lock); 793 spin_lock(&dreq->lock);
791 794
792 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) 795 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
793 dreq->error = hdr->error; 796 dreq->error = hdr->error;
794 if (dreq->error == 0) { 797
798 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
799 spin_unlock(&dreq->lock);
800 goto out_put;
801 }
802
803 if (hdr->good_bytes != 0) {
795 nfs_direct_good_bytes(dreq, hdr); 804 nfs_direct_good_bytes(dreq, hdr);
796 if (nfs_write_need_commit(hdr)) { 805 if (nfs_write_need_commit(hdr)) {
797 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) 806 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index b04e20d28162..5657b7f2611f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/nfs_fs.h> 10#include <linux/nfs_fs.h>
11#include <linux/nfs_mount.h>
11#include <linux/nfs_page.h> 12#include <linux/nfs_page.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/sched/mm.h> 14#include <linux/sched/mm.h>
@@ -928,7 +929,9 @@ retry:
928 pgm = &pgio->pg_mirrors[0]; 929 pgm = &pgio->pg_mirrors[0];
929 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 930 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
930 931
931 pgio->pg_maxretrans = io_maxretrans; 932 if (NFS_SERVER(pgio->pg_inode)->flags &
933 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
934 pgio->pg_maxretrans = io_maxretrans;
932 return; 935 return;
933out_nolseg: 936out_nolseg:
934 if (pgio->pg_error < 0) 937 if (pgio->pg_error < 0)
@@ -940,6 +943,7 @@ out_mds:
940 pgio->pg_lseg); 943 pgio->pg_lseg);
941 pnfs_put_lseg(pgio->pg_lseg); 944 pnfs_put_lseg(pgio->pg_lseg);
942 pgio->pg_lseg = NULL; 945 pgio->pg_lseg = NULL;
946 pgio->pg_maxretrans = 0;
943 nfs_pageio_reset_read_mds(pgio); 947 nfs_pageio_reset_read_mds(pgio);
944} 948}
945 949
@@ -1000,7 +1004,9 @@ retry:
1000 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; 1004 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
1001 } 1005 }
1002 1006
1003 pgio->pg_maxretrans = io_maxretrans; 1007 if (NFS_SERVER(pgio->pg_inode)->flags &
1008 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
1009 pgio->pg_maxretrans = io_maxretrans;
1004 return; 1010 return;
1005 1011
1006out_mds: 1012out_mds:
@@ -1010,6 +1016,7 @@ out_mds:
1010 pgio->pg_lseg); 1016 pgio->pg_lseg);
1011 pnfs_put_lseg(pgio->pg_lseg); 1017 pnfs_put_lseg(pgio->pg_lseg);
1012 pgio->pg_lseg = NULL; 1018 pgio->pg_lseg = NULL;
1019 pgio->pg_maxretrans = 0;
1013 nfs_pageio_reset_write_mds(pgio); 1020 nfs_pageio_reset_write_mds(pgio);
1014} 1021}
1015 1022
@@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1148 break; 1155 break;
1149 case -NFS4ERR_RETRY_UNCACHED_REP: 1156 case -NFS4ERR_RETRY_UNCACHED_REP:
1150 break; 1157 break;
1151 case -EAGAIN:
1152 return -NFS4ERR_RESET_TO_PNFS;
1153 /* Invalidate Layout errors */ 1158 /* Invalidate Layout errors */
1154 case -NFS4ERR_PNFS_NO_LAYOUT: 1159 case -NFS4ERR_PNFS_NO_LAYOUT:
1155 case -ESTALE: /* mapped NFS4ERR_STALE */ 1160 case -ESTALE: /* mapped NFS4ERR_STALE */
@@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1210 case -EBADHANDLE: 1215 case -EBADHANDLE:
1211 case -ELOOP: 1216 case -ELOOP:
1212 case -ENOSPC: 1217 case -ENOSPC:
1213 case -EAGAIN:
1214 break; 1218 break;
1215 case -EJUKEBOX: 1219 case -EJUKEBOX:
1216 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1220 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1445 ff_layout_read_prepare_common(task, hdr); 1449 ff_layout_read_prepare_common(task, hdr);
1446} 1450}
1447 1451
1448static void
1449ff_layout_io_prepare_transmit(struct rpc_task *task,
1450 void *data)
1451{
1452 struct nfs_pgio_header *hdr = data;
1453
1454 if (!pnfs_is_valid_lseg(hdr->lseg))
1455 rpc_exit(task, -EAGAIN);
1456}
1457
1458static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1452static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1459{ 1453{
1460 struct nfs_pgio_header *hdr = data; 1454 struct nfs_pgio_header *hdr = data;
@@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data)
1740 1734
1741static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { 1735static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1742 .rpc_call_prepare = ff_layout_read_prepare_v3, 1736 .rpc_call_prepare = ff_layout_read_prepare_v3,
1743 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1744 .rpc_call_done = ff_layout_read_call_done, 1737 .rpc_call_done = ff_layout_read_call_done,
1745 .rpc_count_stats = ff_layout_read_count_stats, 1738 .rpc_count_stats = ff_layout_read_count_stats,
1746 .rpc_release = ff_layout_read_release, 1739 .rpc_release = ff_layout_read_release,
@@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1748 1741
1749static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { 1742static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1750 .rpc_call_prepare = ff_layout_read_prepare_v4, 1743 .rpc_call_prepare = ff_layout_read_prepare_v4,
1751 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1752 .rpc_call_done = ff_layout_read_call_done, 1744 .rpc_call_done = ff_layout_read_call_done,
1753 .rpc_count_stats = ff_layout_read_count_stats, 1745 .rpc_count_stats = ff_layout_read_count_stats,
1754 .rpc_release = ff_layout_read_release, 1746 .rpc_release = ff_layout_read_release,
@@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1756 1748
1757static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { 1749static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1758 .rpc_call_prepare = ff_layout_write_prepare_v3, 1750 .rpc_call_prepare = ff_layout_write_prepare_v3,
1759 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1760 .rpc_call_done = ff_layout_write_call_done, 1751 .rpc_call_done = ff_layout_write_call_done,
1761 .rpc_count_stats = ff_layout_write_count_stats, 1752 .rpc_count_stats = ff_layout_write_count_stats,
1762 .rpc_release = ff_layout_write_release, 1753 .rpc_release = ff_layout_write_release,
@@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1764 1755
1765static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { 1756static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1766 .rpc_call_prepare = ff_layout_write_prepare_v4, 1757 .rpc_call_prepare = ff_layout_write_prepare_v4,
1767 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
1768 .rpc_call_done = ff_layout_write_call_done, 1758 .rpc_call_done = ff_layout_write_call_done,
1769 .rpc_count_stats = ff_layout_write_count_stats, 1759 .rpc_count_stats = ff_layout_write_count_stats,
1770 .rpc_release = ff_layout_write_release, 1760 .rpc_release = ff_layout_write_release,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8a1758200b57..c764cfe456e5 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1403,12 +1403,21 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1404 return 0; 1404 return 0;
1405 1405
1406 /* No fileid? Just exit */
1407 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
1408 return 0;
1406 /* Has the inode gone and changed behind our back? */ 1409 /* Has the inode gone and changed behind our back? */
1407 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) 1410 if (nfsi->fileid != fattr->fileid) {
1411 /* Is this perhaps the mounted-on fileid? */
1412 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1413 nfsi->fileid == fattr->mounted_on_fileid)
1414 return 0;
1408 return -ESTALE; 1415 return -ESTALE;
1416 }
1409 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) 1417 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
1410 return -ESTALE; 1418 return -ESTALE;
1411 1419
1420
1412 if (!nfs_file_has_buffered_writers(nfsi)) { 1421 if (!nfs_file_has_buffered_writers(nfsi)) {
1413 /* Verify a few of the more important attributes */ 1422 /* Verify a few of the more important attributes */
1414 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr)) 1423 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
@@ -1768,18 +1777,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
1768EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc); 1777EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
1769 1778
1770 1779
1771static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
1772 struct nfs_fattr *fattr)
1773{
1774 bool ret1 = true, ret2 = true;
1775
1776 if (fattr->valid & NFS_ATTR_FATTR_FILEID)
1777 ret1 = (nfsi->fileid == fattr->fileid);
1778 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1779 ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
1780 return ret1 || ret2;
1781}
1782
1783/* 1780/*
1784 * Many nfs protocol calls return the new file attributes after 1781 * Many nfs protocol calls return the new file attributes after
1785 * an operation. Here we update the inode to reflect the state 1782 * an operation. Here we update the inode to reflect the state
@@ -1810,7 +1807,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1810 nfs_display_fhandle_hash(NFS_FH(inode)), 1807 nfs_display_fhandle_hash(NFS_FH(inode)),
1811 atomic_read(&inode->i_count), fattr->valid); 1808 atomic_read(&inode->i_count), fattr->valid);
1812 1809
1813 if (!nfs_fileid_valid(nfsi, fattr)) { 1810 /* No fileid? Just exit */
1811 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID))
1812 return 0;
1813 /* Has the inode gone and changed behind our back? */
1814 if (nfsi->fileid != fattr->fileid) {
1815 /* Is this perhaps the mounted-on fileid? */
1816 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1817 nfsi->fileid == fattr->mounted_on_fileid)
1818 return 0;
1814 printk(KERN_ERR "NFS: server %s error: fileid changed\n" 1819 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
1815 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n", 1820 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
1816 NFS_SERVER(inode)->nfs_client->cl_hostname, 1821 NFS_SERVER(inode)->nfs_client->cl_hostname,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index a2346a2f8361..e64f810223be 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -775,3 +775,13 @@ static inline bool nfs_error_is_fatal(int err)
775 } 775 }
776} 776}
777 777
778static inline bool nfs_error_is_fatal_on_server(int err)
779{
780 switch (err) {
781 case 0:
782 case -ERESTARTSYS:
783 case -EINTR:
784 return false;
785 }
786 return nfs_error_is_fatal(err);
787}
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 96db471ca2e5..339663d04bf8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
73 if (IS_ERR(inode)) { 73 if (IS_ERR(inode)) {
74 err = PTR_ERR(inode); 74 err = PTR_ERR(inode);
75 switch (err) { 75 switch (err) {
76 case -EPERM:
77 case -EACCES:
78 case -EDQUOT:
79 case -ENOSPC:
80 case -EROFS:
81 goto out_put_ctx;
82 default: 76 default:
77 goto out_put_ctx;
78 case -ENOENT:
79 case -ESTALE:
80 case -EISDIR:
81 case -ENOTDIR:
82 case -ELOOP:
83 goto out_drop; 83 goto out_drop;
84 } 84 }
85 } 85 }
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ed4e1b07447b..20b3717cd7ca 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
590 } 590 }
591 591
592 hdr->res.fattr = &hdr->fattr; 592 hdr->res.fattr = &hdr->fattr;
593 hdr->res.count = count; 593 hdr->res.count = 0;
594 hdr->res.eof = 0; 594 hdr->res.eof = 0;
595 hdr->res.verf = &hdr->verf; 595 hdr->res.verf = &hdr->verf;
596 nfs_fattr_init(&hdr->fattr); 596 nfs_fattr_init(&hdr->fattr);
@@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, 1251int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
1252 struct nfs_pgio_header *hdr) 1252 struct nfs_pgio_header *hdr)
1253{ 1253{
1254 LIST_HEAD(failed); 1254 LIST_HEAD(pages);
1255 1255
1256 desc->pg_io_completion = hdr->io_completion; 1256 desc->pg_io_completion = hdr->io_completion;
1257 desc->pg_dreq = hdr->dreq; 1257 desc->pg_dreq = hdr->dreq;
1258 while (!list_empty(&hdr->pages)) { 1258 list_splice_init(&hdr->pages, &pages);
1259 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 1259 while (!list_empty(&pages)) {
1260 struct nfs_page *req = nfs_list_entry(pages.next);
1260 1261
1261 if (!nfs_pageio_add_request(desc, req)) 1262 if (!nfs_pageio_add_request(desc, req))
1262 nfs_list_move_request(req, &failed); 1263 break;
1263 } 1264 }
1264 nfs_pageio_complete(desc); 1265 nfs_pageio_complete(desc);
1265 if (!list_empty(&failed)) { 1266 if (!list_empty(&pages)) {
1266 list_move(&failed, &hdr->pages); 1267 int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
1267 return desc->pg_error < 0 ? desc->pg_error : -EIO; 1268 hdr->completion_ops->error_cleanup(&pages, err);
1269 nfs_set_pgio_error(hdr, err, hdr->io_start);
1270 return err;
1268 } 1271 }
1269 return 0; 1272 return 0;
1270} 1273}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index c0046c348910..82af4809b869 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
627 /* Add this address as an alias */ 627 /* Add this address as an alias */
628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args, 628 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
629 rpc_clnt_test_and_add_xprt, NULL); 629 rpc_clnt_test_and_add_xprt, NULL);
630 } else 630 continue;
631 clp = get_v3_ds_connect(mds_srv, 631 }
632 (struct sockaddr *)&da->da_addr, 632 clp = get_v3_ds_connect(mds_srv,
633 da->da_addrlen, IPPROTO_TCP, 633 (struct sockaddr *)&da->da_addr,
634 timeo, retrans); 634 da->da_addrlen, IPPROTO_TCP,
635 timeo, retrans);
636 if (IS_ERR(clp))
637 continue;
638 clp->cl_rpcclient->cl_softerr = 0;
639 clp->cl_rpcclient->cl_softrtry = 0;
635 } 640 }
636 641
637 if (IS_ERR(clp)) { 642 if (IS_ERR(clp)) {
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 5552fa8b6e12..0f7288b94633 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
594 /* Emulate the eof flag, which isn't normally needed in NFSv2 594 /* Emulate the eof flag, which isn't normally needed in NFSv2
595 * as it is guaranteed to always return the file attributes 595 * as it is guaranteed to always return the file attributes
596 */ 596 */
597 if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) 597 if ((hdr->res.count == 0 && hdr->args.count > 0) ||
598 hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
598 hdr->res.eof = 1; 599 hdr->res.eof = 1;
599 } 600 }
600 return 0; 601 return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
615 616
616static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 617static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
617{ 618{
618 if (task->tk_status >= 0) 619 if (task->tk_status >= 0) {
620 hdr->res.count = hdr->args.count;
619 nfs_writeback_update_inode(hdr); 621 nfs_writeback_update_inode(hdr);
622 }
620 return 0; 623 return 0;
621} 624}
622 625
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c19841c82b6a..cfe0b586eadd 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
91} 91}
92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 92EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
93 93
94static void nfs_readpage_release(struct nfs_page *req) 94static void nfs_readpage_release(struct nfs_page *req, int error)
95{ 95{
96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); 96 struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
97 struct page *page = req->wb_page;
97 98
98 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, 99 dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
99 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, 100 (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
100 (long long)req_offset(req)); 101 (long long)req_offset(req));
101 102
103 if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
104 SetPageError(page);
102 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { 105 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
103 if (PageUptodate(req->wb_page)) 106 struct address_space *mapping = page_file_mapping(page);
104 nfs_readpage_to_fscache(inode, req->wb_page, 0);
105 107
106 unlock_page(req->wb_page); 108 if (PageUptodate(page))
109 nfs_readpage_to_fscache(inode, page, 0);
110 else if (!PageError(page) && !PagePrivate(page))
111 generic_error_remove_page(mapping, page);
112 unlock_page(page);
107 } 113 }
108 nfs_release_request(req); 114 nfs_release_request(req);
109} 115}
@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
131 &nfs_async_read_completion_ops); 137 &nfs_async_read_completion_ops);
132 if (!nfs_pageio_add_request(&pgio, new)) { 138 if (!nfs_pageio_add_request(&pgio, new)) {
133 nfs_list_remove_request(new); 139 nfs_list_remove_request(new);
134 nfs_readpage_release(new); 140 nfs_readpage_release(new, pgio.pg_error);
135 } 141 }
136 nfs_pageio_complete(&pgio); 142 nfs_pageio_complete(&pgio);
137 143
@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
153static void nfs_read_completion(struct nfs_pgio_header *hdr) 159static void nfs_read_completion(struct nfs_pgio_header *hdr)
154{ 160{
155 unsigned long bytes = 0; 161 unsigned long bytes = 0;
162 int error;
156 163
157 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 164 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
158 goto out; 165 goto out;
@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
179 zero_user_segment(page, start, end); 186 zero_user_segment(page, start, end);
180 } 187 }
181 } 188 }
189 error = 0;
182 bytes += req->wb_bytes; 190 bytes += req->wb_bytes;
183 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { 191 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
184 if (bytes <= hdr->good_bytes) 192 if (bytes <= hdr->good_bytes)
185 nfs_page_group_set_uptodate(req); 193 nfs_page_group_set_uptodate(req);
194 else {
195 error = hdr->error;
196 xchg(&nfs_req_openctx(req)->error, error);
197 }
186 } else 198 } else
187 nfs_page_group_set_uptodate(req); 199 nfs_page_group_set_uptodate(req);
188 nfs_list_remove_request(req); 200 nfs_list_remove_request(req);
189 nfs_readpage_release(req); 201 nfs_readpage_release(req, error);
190 } 202 }
191out: 203out:
192 hdr->release(hdr); 204 hdr->release(hdr);
@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
213 while (!list_empty(head)) { 225 while (!list_empty(head)) {
214 req = nfs_list_entry(head->next); 226 req = nfs_list_entry(head->next);
215 nfs_list_remove_request(req); 227 nfs_list_remove_request(req);
216 nfs_readpage_release(req); 228 nfs_readpage_release(req, error);
217 } 229 }
218} 230}
219 231
@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
337 goto out; 349 goto out;
338 } 350 }
339 351
352 xchg(&ctx->error, 0);
340 error = nfs_readpage_async(ctx, inode, page); 353 error = nfs_readpage_async(ctx, inode, page);
341 354 if (!error) {
355 error = wait_on_page_locked_killable(page);
356 if (!PageUptodate(page) && !error)
357 error = xchg(&ctx->error, 0);
358 }
342out: 359out:
343 put_nfs_open_context(ctx); 360 put_nfs_open_context(ctx);
344 return error; 361 return error;
@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
372 zero_user_segment(page, len, PAGE_SIZE); 389 zero_user_segment(page, len, PAGE_SIZE);
373 if (!nfs_pageio_add_request(desc->pgio, new)) { 390 if (!nfs_pageio_add_request(desc->pgio, new)) {
374 nfs_list_remove_request(new); 391 nfs_list_remove_request(new);
375 nfs_readpage_release(new);
376 error = desc->pgio->pg_error; 392 error = desc->pgio->pg_error;
393 nfs_readpage_release(new, error);
377 goto out; 394 goto out;
378 } 395 }
379 return 0; 396 return 0;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 92d9cadc6102..85ca49549b39 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 57static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
58static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 58static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
59static const struct nfs_rw_ops nfs_rw_write_ops; 59static const struct nfs_rw_ops nfs_rw_write_ops;
60static void nfs_inode_remove_request(struct nfs_page *req);
60static void nfs_clear_request_commit(struct nfs_page *req); 61static void nfs_clear_request_commit(struct nfs_page *req);
61static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 62static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
62 struct inode *inode); 63 struct inode *inode);
@@ -591,23 +592,13 @@ release_request:
591 592
592static void nfs_write_error(struct nfs_page *req, int error) 593static void nfs_write_error(struct nfs_page *req, int error)
593{ 594{
595 nfs_set_pageerror(page_file_mapping(req->wb_page));
594 nfs_mapping_set_error(req->wb_page, error); 596 nfs_mapping_set_error(req->wb_page, error);
597 nfs_inode_remove_request(req);
595 nfs_end_page_writeback(req); 598 nfs_end_page_writeback(req);
596 nfs_release_request(req); 599 nfs_release_request(req);
597} 600}
598 601
599static bool
600nfs_error_is_fatal_on_server(int err)
601{
602 switch (err) {
603 case 0:
604 case -ERESTARTSYS:
605 case -EINTR:
606 return false;
607 }
608 return nfs_error_is_fatal(err);
609}
610
611/* 602/*
612 * Find an associated nfs write request, and prepare to flush it out 603 * Find an associated nfs write request, and prepare to flush it out
613 * May return an error if the user signalled nfs_wait_on_request(). 604 * May return an error if the user signalled nfs_wait_on_request().
@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
615static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 606static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
616 struct page *page) 607 struct page *page)
617{ 608{
618 struct address_space *mapping;
619 struct nfs_page *req; 609 struct nfs_page *req;
620 int ret = 0; 610 int ret = 0;
621 611
@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
630 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 620 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
631 621
632 /* If there is a fatal error that covers this write, just exit */ 622 /* If there is a fatal error that covers this write, just exit */
633 ret = 0; 623 ret = pgio->pg_error;
634 mapping = page_file_mapping(page); 624 if (nfs_error_is_fatal_on_server(ret))
635 if (test_bit(AS_ENOSPC, &mapping->flags) ||
636 test_bit(AS_EIO, &mapping->flags))
637 goto out_launder; 625 goto out_launder;
638 626
627 ret = 0;
639 if (!nfs_pageio_add_request(pgio, req)) { 628 if (!nfs_pageio_add_request(pgio, req)) {
640 ret = pgio->pg_error; 629 ret = pgio->pg_error;
641 /* 630 /*
@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
647 } else 636 } else
648 ret = -EAGAIN; 637 ret = -EAGAIN;
649 nfs_redirty_request(req); 638 nfs_redirty_request(req);
639 pgio->pg_error = 0;
650 } else 640 } else
651 nfs_add_stats(page_file_mapping(page)->host, 641 nfs_add_stats(page_file_mapping(page)->host,
652 NFSIOS_WRITEPAGES, 1); 642 NFSIOS_WRITEPAGES, 1);
@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
666 ret = nfs_page_async_flush(pgio, page); 656 ret = nfs_page_async_flush(pgio, page);
667 if (ret == -EAGAIN) { 657 if (ret == -EAGAIN) {
668 redirty_page_for_writepage(wbc, page); 658 redirty_page_for_writepage(wbc, page);
669 ret = 0; 659 ret = AOP_WRITEPAGE_ACTIVATE;
670 } 660 }
671 return ret; 661 return ret;
672} 662}
@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
685 nfs_pageio_init_write(&pgio, inode, 0, 675 nfs_pageio_init_write(&pgio, inode, 0,
686 false, &nfs_async_write_completion_ops); 676 false, &nfs_async_write_completion_ops);
687 err = nfs_do_writepage(page, wbc, &pgio); 677 err = nfs_do_writepage(page, wbc, &pgio);
678 pgio.pg_error = 0;
688 nfs_pageio_complete(&pgio); 679 nfs_pageio_complete(&pgio);
689 if (err < 0) 680 if (err < 0)
690 return err; 681 return err;
691 if (pgio.pg_error < 0) 682 if (nfs_error_is_fatal(pgio.pg_error))
692 return pgio.pg_error; 683 return pgio.pg_error;
693 return 0; 684 return 0;
694} 685}
@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
698 int ret; 689 int ret;
699 690
700 ret = nfs_writepage_locked(page, wbc); 691 ret = nfs_writepage_locked(page, wbc);
701 unlock_page(page); 692 if (ret != AOP_WRITEPAGE_ACTIVATE)
693 unlock_page(page);
702 return ret; 694 return ret;
703} 695}
704 696
@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
707 int ret; 699 int ret;
708 700
709 ret = nfs_do_writepage(page, wbc, data); 701 ret = nfs_do_writepage(page, wbc, data);
710 unlock_page(page); 702 if (ret != AOP_WRITEPAGE_ACTIVATE)
703 unlock_page(page);
711 return ret; 704 return ret;
712} 705}
713 706
@@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
733 &nfs_async_write_completion_ops); 726 &nfs_async_write_completion_ops);
734 pgio.pg_io_completion = ioc; 727 pgio.pg_io_completion = ioc;
735 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 728 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
729 pgio.pg_error = 0;
736 nfs_pageio_complete(&pgio); 730 nfs_pageio_complete(&pgio);
737 nfs_io_completion_put(ioc); 731 nfs_io_completion_put(ioc);
738 732
739 if (err < 0) 733 if (err < 0)
740 goto out_err; 734 goto out_err;
741 err = pgio.pg_error; 735 err = pgio.pg_error;
742 if (err < 0) 736 if (nfs_error_is_fatal(err))
743 goto out_err; 737 goto out_err;
744 return 0; 738 return 0;
745out_err: 739out_err:
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 26ad75ae2be0..96352ab7bd81 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -571,7 +571,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
571 */ 571 */
572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) 572static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
573{ 573{
574 struct nfsd_net *nn = v; 574 struct nfsd_net *nn = m->private;
575 575
576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries); 576 seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
577 seq_printf(m, "num entries: %u\n", 577 seq_printf(m, "num entries: %u\n",
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 13c548733860..3cf4f6aa48d6 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1171,13 +1171,17 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
1171 return inode; 1171 return inode;
1172} 1172}
1173 1173
1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1174static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
1175{ 1175{
1176 struct inode *inode; 1176 struct inode *inode;
1177 1177
1178 inode = nfsd_get_inode(dir->i_sb, mode); 1178 inode = nfsd_get_inode(dir->i_sb, mode);
1179 if (!inode) 1179 if (!inode)
1180 return -ENOMEM; 1180 return -ENOMEM;
1181 if (ncl) {
1182 inode->i_private = ncl;
1183 kref_get(&ncl->cl_ref);
1184 }
1181 d_add(dentry, inode); 1185 d_add(dentry, inode);
1182 inc_nlink(dir); 1186 inc_nlink(dir);
1183 fsnotify_mkdir(dir, dentry); 1187 fsnotify_mkdir(dir, dentry);
@@ -1194,17 +1198,14 @@ static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *nc
1194 dentry = d_alloc_name(parent, name); 1198 dentry = d_alloc_name(parent, name);
1195 if (!dentry) 1199 if (!dentry)
1196 goto out_err; 1200 goto out_err;
1197 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600); 1201 ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
1198 if (ret) 1202 if (ret)
1199 goto out_err; 1203 goto out_err;
1200 if (ncl) {
1201 d_inode(dentry)->i_private = ncl;
1202 kref_get(&ncl->cl_ref);
1203 }
1204out: 1204out:
1205 inode_unlock(dir); 1205 inode_unlock(dir);
1206 return dentry; 1206 return dentry;
1207out_err: 1207out_err:
1208 dput(dentry);
1208 dentry = ERR_PTR(ret); 1209 dentry = ERR_PTR(ret);
1209 goto out; 1210 goto out;
1210} 1211}
@@ -1214,11 +1215,9 @@ static void clear_ncl(struct inode *inode)
1214 struct nfsdfs_client *ncl = inode->i_private; 1215 struct nfsdfs_client *ncl = inode->i_private;
1215 1216
1216 inode->i_private = NULL; 1217 inode->i_private = NULL;
1217 synchronize_rcu();
1218 kref_put(&ncl->cl_ref, ncl->cl_release); 1218 kref_put(&ncl->cl_ref, ncl->cl_release);
1219} 1219}
1220 1220
1221
1222static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode) 1221static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
1223{ 1222{
1224 struct nfsdfs_client *nc = inode->i_private; 1223 struct nfsdfs_client *nc = inode->i_private;
@@ -1232,9 +1231,9 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
1232{ 1231{
1233 struct nfsdfs_client *nc; 1232 struct nfsdfs_client *nc;
1234 1233
1235 rcu_read_lock(); 1234 inode_lock_shared(inode);
1236 nc = __get_nfsdfs_client(inode); 1235 nc = __get_nfsdfs_client(inode);
1237 rcu_read_unlock(); 1236 inode_unlock_shared(inode);
1238 return nc; 1237 return nc;
1239} 1238}
1240/* from __rpc_unlink */ 1239/* from __rpc_unlink */
diff --git a/fs/read_write.c b/fs/read_write.c
index 1f5088dec566..5bbf587f5bc1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1811,10 +1811,7 @@ static int generic_remap_check_len(struct inode *inode_in,
1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; 1811 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
1812} 1812}
1813 1813
1814/* 1814/* Read a page's worth of file data into the page cache. */
1815 * Read a page's worth of file data into the page cache. Return the page
1816 * locked.
1817 */
1818static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) 1815static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1819{ 1816{
1820 struct page *page; 1817 struct page *page;
@@ -1826,11 +1823,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
1826 put_page(page); 1823 put_page(page);
1827 return ERR_PTR(-EIO); 1824 return ERR_PTR(-EIO);
1828 } 1825 }
1829 lock_page(page);
1830 return page; 1826 return page;
1831} 1827}
1832 1828
1833/* 1829/*
1830 * Lock two pages, ensuring that we lock in offset order if the pages are from
1831 * the same file.
1832 */
1833static void vfs_lock_two_pages(struct page *page1, struct page *page2)
1834{
1835 /* Always lock in order of increasing index. */
1836 if (page1->index > page2->index)
1837 swap(page1, page2);
1838
1839 lock_page(page1);
1840 if (page1 != page2)
1841 lock_page(page2);
1842}
1843
1844/* Unlock two pages, being careful not to unlock the same page twice. */
1845static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
1846{
1847 unlock_page(page1);
1848 if (page1 != page2)
1849 unlock_page(page2);
1850}
1851
1852/*
1834 * Compare extents of two files to see if they are the same. 1853 * Compare extents of two files to see if they are the same.
1835 * Caller must have locked both inodes to prevent write races. 1854 * Caller must have locked both inodes to prevent write races.
1836 */ 1855 */
@@ -1867,10 +1886,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1867 dest_page = vfs_dedupe_get_page(dest, destoff); 1886 dest_page = vfs_dedupe_get_page(dest, destoff);
1868 if (IS_ERR(dest_page)) { 1887 if (IS_ERR(dest_page)) {
1869 error = PTR_ERR(dest_page); 1888 error = PTR_ERR(dest_page);
1870 unlock_page(src_page);
1871 put_page(src_page); 1889 put_page(src_page);
1872 goto out_error; 1890 goto out_error;
1873 } 1891 }
1892
1893 vfs_lock_two_pages(src_page, dest_page);
1894
1895 /*
1896 * Now that we've locked both pages, make sure they're still
1897 * mapped to the file data we're interested in. If not,
1898 * someone is invalidating pages on us and we lose.
1899 */
1900 if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
1901 src_page->mapping != src->i_mapping ||
1902 dest_page->mapping != dest->i_mapping) {
1903 same = false;
1904 goto unlock;
1905 }
1906
1874 src_addr = kmap_atomic(src_page); 1907 src_addr = kmap_atomic(src_page);
1875 dest_addr = kmap_atomic(dest_page); 1908 dest_addr = kmap_atomic(dest_page);
1876 1909
@@ -1882,8 +1915,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
1882 1915
1883 kunmap_atomic(dest_addr); 1916 kunmap_atomic(dest_addr);
1884 kunmap_atomic(src_addr); 1917 kunmap_atomic(src_addr);
1885 unlock_page(dest_page); 1918unlock:
1886 unlock_page(src_page); 1919 vfs_unlock_two_pages(src_page, dest_page);
1887 put_page(dest_page); 1920 put_page(dest_page);
1888 put_page(src_page); 1921 put_page(src_page);
1889 1922
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 04f09689cd6d..1600034a929b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
119 } 119 }
120 if (seq_has_overflowed(m)) 120 if (seq_has_overflowed(m))
121 goto Eoverflow; 121 goto Eoverflow;
122 p = m->op->next(m, p, &m->index);
122 if (pos + m->count > offset) { 123 if (pos + m->count > offset) {
123 m->from = offset - pos; 124 m->from = offset - pos;
124 m->count -= m->from; 125 m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
126 } 127 }
127 pos += m->count; 128 pos += m->count;
128 m->count = 0; 129 m->count = 0;
129 p = m->op->next(m, p, &m->index);
130 if (pos == offset) 130 if (pos == offset)
131 break; 131 break;
132 } 132 }
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 80d7301ab76d..c0b84e960b20 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -51,7 +51,7 @@
51static void shrink_liability(struct ubifs_info *c, int nr_to_write) 51static void shrink_liability(struct ubifs_info *c, int nr_to_write)
52{ 52{
53 down_read(&c->vfs_sb->s_umount); 53 down_read(&c->vfs_sb->s_umount);
54 writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); 54 writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE);
55 up_read(&c->vfs_sb->s_umount); 55 up_read(&c->vfs_sb->s_umount);
56} 56}
57 57
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index b52624e28fa1..3b4b4114f208 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,6 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph) 129static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
130{ 130{
131 if (orph->del) { 131 if (orph->del) {
132 spin_unlock(&c->orphan_lock);
133 dbg_gen("deleted twice ino %lu", orph->inum); 132 dbg_gen("deleted twice ino %lu", orph->inum);
134 return; 133 return;
135 } 134 }
@@ -138,7 +137,6 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
138 orph->del = 1; 137 orph->del = 1;
139 orph->dnext = c->orph_dnext; 138 orph->dnext = c->orph_dnext;
140 c->orph_dnext = orph; 139 c->orph_dnext = orph;
141 spin_unlock(&c->orphan_lock);
142 dbg_gen("delete later ino %lu", orph->inum); 140 dbg_gen("delete later ino %lu", orph->inum);
143 return; 141 return;
144 } 142 }
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 2c0803b0ac3a..8c1d571334bc 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -609,6 +609,10 @@ static int init_constants_early(struct ubifs_info *c)
609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 609 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
610 if (c->max_bu_buf_len > c->leb_size) 610 if (c->max_bu_buf_len > c->leb_size)
611 c->max_bu_buf_len = c->leb_size; 611 c->max_bu_buf_len = c->leb_size;
612
613 /* Log is ready, preserve one LEB for commits. */
614 c->min_log_bytes = c->leb_size;
615
612 return 0; 616 return 0;
613} 617}
614 618
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ccbdbd62f0d8..fe6d804a38dc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
880 /* len == 0 means wake all */ 880 /* len == 0 means wake all */
881 struct userfaultfd_wake_range range = { .len = 0, }; 881 struct userfaultfd_wake_range range = { .len = 0, };
882 unsigned long new_flags; 882 unsigned long new_flags;
883 bool still_valid;
883 884
884 WRITE_ONCE(ctx->released, true); 885 WRITE_ONCE(ctx->released, true);
885 886
@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
895 * taking the mmap_sem for writing. 896 * taking the mmap_sem for writing.
896 */ 897 */
897 down_write(&mm->mmap_sem); 898 down_write(&mm->mmap_sem);
898 if (!mmget_still_valid(mm)) 899 still_valid = mmget_still_valid(mm);
899 goto skip_mm;
900 prev = NULL; 900 prev = NULL;
901 for (vma = mm->mmap; vma; vma = vma->vm_next) { 901 for (vma = mm->mmap; vma; vma = vma->vm_next) {
902 cond_resched(); 902 cond_resched();
@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
907 continue; 907 continue;
908 } 908 }
909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP); 909 new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
910 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end, 910 if (still_valid) {
911 new_flags, vma->anon_vma, 911 prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
912 vma->vm_file, vma->vm_pgoff, 912 new_flags, vma->anon_vma,
913 vma_policy(vma), 913 vma->vm_file, vma->vm_pgoff,
914 NULL_VM_UFFD_CTX); 914 vma_policy(vma),
915 if (prev) 915 NULL_VM_UFFD_CTX);
916 vma = prev; 916 if (prev)
917 else 917 vma = prev;
918 prev = vma; 918 else
919 prev = vma;
920 }
919 vma->vm_flags = new_flags; 921 vma->vm_flags = new_flags;
920 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; 922 vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
921 } 923 }
922skip_mm:
923 up_write(&mm->mmap_sem); 924 up_write(&mm->mmap_sem);
924 mmput(mm); 925 mmput(mm);
925wakeup: 926wakeup:
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index baf0b72c0a37..07aad70f3931 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3835,15 +3835,28 @@ xfs_bmapi_read(
3835 XFS_STATS_INC(mp, xs_blk_mapr); 3835 XFS_STATS_INC(mp, xs_blk_mapr);
3836 3836
3837 ifp = XFS_IFORK_PTR(ip, whichfork); 3837 ifp = XFS_IFORK_PTR(ip, whichfork);
3838 if (!ifp) {
3839 /* No CoW fork? Return a hole. */
3840 if (whichfork == XFS_COW_FORK) {
3841 mval->br_startoff = bno;
3842 mval->br_startblock = HOLESTARTBLOCK;
3843 mval->br_blockcount = len;
3844 mval->br_state = XFS_EXT_NORM;
3845 *nmap = 1;
3846 return 0;
3847 }
3838 3848
3839 /* No CoW fork? Return a hole. */ 3849 /*
3840 if (whichfork == XFS_COW_FORK && !ifp) { 3850 * A missing attr ifork implies that the inode says we're in
3841 mval->br_startoff = bno; 3851 * extents or btree format but failed to pass the inode fork
3842 mval->br_startblock = HOLESTARTBLOCK; 3852 * verifier while trying to load it. Treat that as a file
3843 mval->br_blockcount = len; 3853 * corruption too.
3844 mval->br_state = XFS_EXT_NORM; 3854 */
3845 *nmap = 1; 3855#ifdef DEBUG
3846 return 0; 3856 xfs_alert(mp, "%s: inode %llu missing fork %d",
3857 __func__, ip->i_ino, whichfork);
3858#endif /* DEBUG */
3859 return -EFSCORRUPTED;
3847 } 3860 }
3848 3861
3849 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 3862 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index d1c77fd0815d..0bf56e94bfe9 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -487,10 +487,8 @@ xfs_da3_split(
487 ASSERT(state->path.active == 0); 487 ASSERT(state->path.active == 0);
488 oldblk = &state->path.blk[0]; 488 oldblk = &state->path.blk[0];
489 error = xfs_da3_root_split(state, oldblk, addblk); 489 error = xfs_da3_root_split(state, oldblk, addblk);
490 if (error) { 490 if (error)
491 addblk->bp = NULL; 491 goto out;
492 return error; /* GROT: dir is inconsistent */
493 }
494 492
495 /* 493 /*
496 * Update pointers to the node which used to be block 0 and just got 494 * Update pointers to the node which used to be block 0 and just got
@@ -505,7 +503,10 @@ xfs_da3_split(
505 */ 503 */
506 node = oldblk->bp->b_addr; 504 node = oldblk->bp->b_addr;
507 if (node->hdr.info.forw) { 505 if (node->hdr.info.forw) {
508 ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno); 506 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
507 error = -EFSCORRUPTED;
508 goto out;
509 }
509 node = addblk->bp->b_addr; 510 node = addblk->bp->b_addr;
510 node->hdr.info.back = cpu_to_be32(oldblk->blkno); 511 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
511 xfs_trans_log_buf(state->args->trans, addblk->bp, 512 xfs_trans_log_buf(state->args->trans, addblk->bp,
@@ -514,15 +515,19 @@ xfs_da3_split(
514 } 515 }
515 node = oldblk->bp->b_addr; 516 node = oldblk->bp->b_addr;
516 if (node->hdr.info.back) { 517 if (node->hdr.info.back) {
517 ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno); 518 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
519 error = -EFSCORRUPTED;
520 goto out;
521 }
518 node = addblk->bp->b_addr; 522 node = addblk->bp->b_addr;
519 node->hdr.info.forw = cpu_to_be32(oldblk->blkno); 523 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
520 xfs_trans_log_buf(state->args->trans, addblk->bp, 524 xfs_trans_log_buf(state->args->trans, addblk->bp,
521 XFS_DA_LOGRANGE(node, &node->hdr.info, 525 XFS_DA_LOGRANGE(node, &node->hdr.info,
522 sizeof(node->hdr.info))); 526 sizeof(node->hdr.info)));
523 } 527 }
528out:
524 addblk->bp = NULL; 529 addblk->bp = NULL;
525 return 0; 530 return error;
526} 531}
527 532
528/* 533/*
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index afcc6642690a..1fc44efc344d 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry(
741 ents = dp->d_ops->leaf_ents_p(leaf); 741 ents = dp->d_ops->leaf_ents_p(leaf);
742 742
743 xfs_dir3_leaf_check(dp, bp); 743 xfs_dir3_leaf_check(dp, bp);
744 ASSERT(leafhdr.count > 0); 744 if (leafhdr.count <= 0)
745 return -EFSCORRUPTED;
745 746
746 /* 747 /*
747 * Look up the hash value in the leaf entries. 748 * Look up the hash value in the leaf entries.
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7fcf7569743f..7bd7534f5051 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -547,63 +547,12 @@ xfs_file_compat_ioctl(
547 struct inode *inode = file_inode(filp); 547 struct inode *inode = file_inode(filp);
548 struct xfs_inode *ip = XFS_I(inode); 548 struct xfs_inode *ip = XFS_I(inode);
549 struct xfs_mount *mp = ip->i_mount; 549 struct xfs_mount *mp = ip->i_mount;
550 void __user *arg = (void __user *)p; 550 void __user *arg = compat_ptr(p);
551 int error; 551 int error;
552 552
553 trace_xfs_file_compat_ioctl(ip); 553 trace_xfs_file_compat_ioctl(ip);
554 554
555 switch (cmd) { 555 switch (cmd) {
556 /* No size or alignment issues on any arch */
557 case XFS_IOC_DIOINFO:
558 case XFS_IOC_FSGEOMETRY_V4:
559 case XFS_IOC_FSGEOMETRY:
560 case XFS_IOC_AG_GEOMETRY:
561 case XFS_IOC_FSGETXATTR:
562 case XFS_IOC_FSSETXATTR:
563 case XFS_IOC_FSGETXATTRA:
564 case XFS_IOC_FSSETDM:
565 case XFS_IOC_GETBMAP:
566 case XFS_IOC_GETBMAPA:
567 case XFS_IOC_GETBMAPX:
568 case XFS_IOC_FSCOUNTS:
569 case XFS_IOC_SET_RESBLKS:
570 case XFS_IOC_GET_RESBLKS:
571 case XFS_IOC_FSGROWFSLOG:
572 case XFS_IOC_GOINGDOWN:
573 case XFS_IOC_ERROR_INJECTION:
574 case XFS_IOC_ERROR_CLEARALL:
575 case FS_IOC_GETFSMAP:
576 case XFS_IOC_SCRUB_METADATA:
577 case XFS_IOC_BULKSTAT:
578 case XFS_IOC_INUMBERS:
579 return xfs_file_ioctl(filp, cmd, p);
580#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
581 /*
582 * These are handled fine if no alignment issues. To support x32
583 * which uses native 64-bit alignment we must emit these cases in
584 * addition to the ia-32 compat set below.
585 */
586 case XFS_IOC_ALLOCSP:
587 case XFS_IOC_FREESP:
588 case XFS_IOC_RESVSP:
589 case XFS_IOC_UNRESVSP:
590 case XFS_IOC_ALLOCSP64:
591 case XFS_IOC_FREESP64:
592 case XFS_IOC_RESVSP64:
593 case XFS_IOC_UNRESVSP64:
594 case XFS_IOC_FSGEOMETRY_V1:
595 case XFS_IOC_FSGROWFSDATA:
596 case XFS_IOC_FSGROWFSRT:
597 case XFS_IOC_ZERO_RANGE:
598#ifdef CONFIG_X86_X32
599 /*
600 * x32 special: this gets a different cmd number from the ia-32 compat
601 * case below; the associated data will match native 64-bit alignment.
602 */
603 case XFS_IOC_SWAPEXT:
604#endif
605 return xfs_file_ioctl(filp, cmd, p);
606#endif
607#if defined(BROKEN_X86_ALIGNMENT) 556#if defined(BROKEN_X86_ALIGNMENT)
608 case XFS_IOC_ALLOCSP_32: 557 case XFS_IOC_ALLOCSP_32:
609 case XFS_IOC_FREESP_32: 558 case XFS_IOC_FREESP_32:
@@ -705,6 +654,7 @@ xfs_file_compat_ioctl(
705 case XFS_IOC_FSSETDM_BY_HANDLE_32: 654 case XFS_IOC_FSSETDM_BY_HANDLE_32:
706 return xfs_compat_fssetdm_by_handle(filp, arg); 655 return xfs_compat_fssetdm_by_handle(filp, arg);
707 default: 656 default:
708 return -ENOIOCTLCMD; 657 /* try the native version */
658 return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
709 } 659 }
710} 660}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ff3c1fae5357..fe285d123d69 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -793,6 +793,7 @@ xfs_setattr_nonsize(
793 793
794out_cancel: 794out_cancel:
795 xfs_trans_cancel(tp); 795 xfs_trans_cancel(tp);
796 xfs_iunlock(ip, XFS_ILOCK_EXCL);
796out_dqrele: 797out_dqrele:
797 xfs_qm_dqrele(udqp); 798 xfs_qm_dqrele(udqp);
798 xfs_qm_dqrele(gdqp); 799 xfs_qm_dqrele(gdqp);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 00e9f5c388d3..7fc3c1ad36bc 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -429,10 +429,7 @@ xfs_log_reserve(
429 429
430 ASSERT(*ticp == NULL); 430 ASSERT(*ticp == NULL);
431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 431 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
432 KM_SLEEP | KM_MAYFAIL); 432 KM_SLEEP);
433 if (!tic)
434 return -ENOMEM;
435
436 *ticp = tic; 433 *ticp = tic;
437 434
438 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt 435 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 0c954cad7449..a339bd5fa260 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -32,7 +32,7 @@ xfs_break_leased_layouts(
32 struct xfs_inode *ip = XFS_I(inode); 32 struct xfs_inode *ip = XFS_I(inode);
33 int error; 33 int error;
34 34
35 while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { 35 while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
36 xfs_iunlock(ip, *iolock); 36 xfs_iunlock(ip, *iolock);
37 *did_unlock = true; 37 *did_unlock = true;
38 error = break_layout(inode, true); 38 error = break_layout(inode, true);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index c4ec7afd1170..edbe37b7f636 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1190,11 +1190,11 @@ xfs_reflink_remap_blocks(
1190} 1190}
1191 1191
1192/* 1192/*
1193 * Grab the exclusive iolock for a data copy from src to dest, making 1193 * Grab the exclusive iolock for a data copy from src to dest, making sure to
1194 * sure to abide vfs locking order (lowest pointer value goes first) and 1194 * abide vfs locking order (lowest pointer value goes first) and breaking the
1195 * breaking the pnfs layout leases on dest before proceeding. The loop 1195 * layout leases before proceeding. The loop is needed because we cannot call
1196 * is needed because we cannot call the blocking break_layout() with the 1196 * the blocking break_layout() with the iolocks held, and therefore have to
1197 * src iolock held, and therefore have to back out both locks. 1197 * back out both locks.
1198 */ 1198 */
1199static int 1199static int
1200xfs_iolock_two_inodes_and_break_layout( 1200xfs_iolock_two_inodes_and_break_layout(
@@ -1203,33 +1203,44 @@ xfs_iolock_two_inodes_and_break_layout(
1203{ 1203{
1204 int error; 1204 int error;
1205 1205
1206retry: 1206 if (src > dest)
1207 if (src < dest) { 1207 swap(src, dest);
1208 inode_lock_shared(src);
1209 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1210 } else {
1211 /* src >= dest */
1212 inode_lock(dest);
1213 }
1214 1208
1215 error = break_layout(dest, false); 1209retry:
1216 if (error == -EWOULDBLOCK) { 1210 /* Wait to break both inodes' layouts before we start locking. */
1217 inode_unlock(dest); 1211 error = break_layout(src, true);
1218 if (src < dest) 1212 if (error)
1219 inode_unlock_shared(src); 1213 return error;
1214 if (src != dest) {
1220 error = break_layout(dest, true); 1215 error = break_layout(dest, true);
1221 if (error) 1216 if (error)
1222 return error; 1217 return error;
1223 goto retry;
1224 } 1218 }
1219
1220 /* Lock one inode and make sure nobody got in and leased it. */
1221 inode_lock(src);
1222 error = break_layout(src, false);
1225 if (error) { 1223 if (error) {
1224 inode_unlock(src);
1225 if (error == -EWOULDBLOCK)
1226 goto retry;
1227 return error;
1228 }
1229
1230 if (src == dest)
1231 return 0;
1232
1233 /* Lock the other inode and make sure nobody got in and leased it. */
1234 inode_lock_nested(dest, I_MUTEX_NONDIR2);
1235 error = break_layout(dest, false);
1236 if (error) {
1237 inode_unlock(src);
1226 inode_unlock(dest); 1238 inode_unlock(dest);
1227 if (src < dest) 1239 if (error == -EWOULDBLOCK)
1228 inode_unlock_shared(src); 1240 goto retry;
1229 return error; 1241 return error;
1230 } 1242 }
1231 if (src > dest) 1243
1232 inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
1233 return 0; 1244 return 0;
1234} 1245}
1235 1246
@@ -1247,10 +1258,10 @@ xfs_reflink_remap_unlock(
1247 1258
1248 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); 1259 xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
1249 if (!same_inode) 1260 if (!same_inode)
1250 xfs_iunlock(src, XFS_MMAPLOCK_SHARED); 1261 xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
1251 inode_unlock(inode_out); 1262 inode_unlock(inode_out);
1252 if (!same_inode) 1263 if (!same_inode)
1253 inode_unlock_shared(inode_in); 1264 inode_unlock(inode_in);
1254} 1265}
1255 1266
1256/* 1267/*
@@ -1325,7 +1336,7 @@ xfs_reflink_remap_prep(
1325 if (same_inode) 1336 if (same_inode)
1326 xfs_ilock(src, XFS_MMAPLOCK_EXCL); 1337 xfs_ilock(src, XFS_MMAPLOCK_EXCL);
1327 else 1338 else
1328 xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest, 1339 xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
1329 XFS_MMAPLOCK_EXCL); 1340 XFS_MMAPLOCK_EXCL);
1330 1341
1331 /* Check file eligibility and prepare for block sharing. */ 1342 /* Check file eligibility and prepare for block sharing. */
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index bb6cb347018c..f6947da70d71 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -19,9 +19,24 @@
19 19
20#define p4d_alloc(mm, pgd, address) (pgd) 20#define p4d_alloc(mm, pgd, address) (pgd)
21#define p4d_offset(pgd, start) (pgd) 21#define p4d_offset(pgd, start) (pgd)
22#define p4d_none(p4d) 0 22
23#define p4d_bad(p4d) 0 23#ifndef __ASSEMBLY__
24#define p4d_present(p4d) 1 24static inline int p4d_none(p4d_t p4d)
25{
26 return 0;
27}
28
29static inline int p4d_bad(p4d_t p4d)
30{
31 return 0;
32}
33
34static inline int p4d_present(p4d_t p4d)
35{
36 return 1;
37}
38#endif
39
25#define p4d_ERROR(p4d) do { } while (0) 40#define p4d_ERROR(p4d) do { } while (0)
26#define p4d_clear(p4d) pgd_clear(p4d) 41#define p4d_clear(p4d) pgd_clear(p4d)
27#define p4d_val(p4d) pgd_val(p4d) 42#define p4d_val(p4d) pgd_val(p4d)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1b1fa1557e68..feff3fe4467e 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -311,7 +311,6 @@ enum req_flag_bits {
311 __REQ_RAHEAD, /* read ahead, can fail anytime */ 311 __REQ_RAHEAD, /* read ahead, can fail anytime */
312 __REQ_BACKGROUND, /* background IO */ 312 __REQ_BACKGROUND, /* background IO */
313 __REQ_NOWAIT, /* Don't wait if request will block */ 313 __REQ_NOWAIT, /* Don't wait if request will block */
314 __REQ_NOWAIT_INLINE, /* Return would-block error inline */
315 /* 314 /*
316 * When a shared kthread needs to issue a bio for a cgroup, doing 315 * When a shared kthread needs to issue a bio for a cgroup, doing
317 * so synchronously can lead to priority inversions as the kthread 316 * so synchronously can lead to priority inversions as the kthread
@@ -346,7 +345,6 @@ enum req_flag_bits {
346#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 345#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
347#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) 346#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
348#define REQ_NOWAIT (1ULL << __REQ_NOWAIT) 347#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
349#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
350#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) 348#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
351 349
352#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) 350#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
@@ -420,13 +418,12 @@ static inline int op_stat_group(unsigned int op)
420 418
421typedef unsigned int blk_qc_t; 419typedef unsigned int blk_qc_t;
422#define BLK_QC_T_NONE -1U 420#define BLK_QC_T_NONE -1U
423#define BLK_QC_T_EAGAIN -2U
424#define BLK_QC_T_SHIFT 16 421#define BLK_QC_T_SHIFT 16
425#define BLK_QC_T_INTERNAL (1U << 31) 422#define BLK_QC_T_INTERNAL (1U << 31)
426 423
427static inline bool blk_qc_t_valid(blk_qc_t cookie) 424static inline bool blk_qc_t_valid(blk_qc_t cookie)
428{ 425{
429 return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; 426 return cookie != BLK_QC_T_NONE;
430} 427}
431 428
432static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) 429static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 5e58bb29b1a3..11cdc7c60480 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -30,7 +30,8 @@ static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
30 30
31static inline void ceph_buffer_put(struct ceph_buffer *b) 31static inline void ceph_buffer_put(struct ceph_buffer *b)
32{ 32{
33 kref_put(&b->kref, ceph_buffer_release); 33 if (b)
34 kref_put(&b->kref, ceph_buffer_release);
34} 35}
35 36
36extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); 37extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index c05d4e661489..03f8e98e3bcc 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -160,10 +160,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size, 160static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
161 gfp_t gfp) 161 gfp_t gfp)
162{ 162{
163 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 163 return NULL;
164 size_t align = get_order(PAGE_ALIGN(size));
165
166 return alloc_pages_node(node, gfp, align);
167} 164}
168 165
169static inline void dma_free_contiguous(struct device *dev, struct page *page, 166static inline void dma_free_contiguous(struct device *dev, struct page *page,
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 3813211a9aad..0bff3d7fac92 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
42 dma_addr_t dma_addr, unsigned long attrs); 42 dma_addr_t dma_addr, unsigned long attrs);
43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 43long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
44 dma_addr_t dma_addr); 44 dma_addr_t dma_addr);
45
46#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
47pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 45pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
48 unsigned long attrs); 46 unsigned long attrs);
47
48#ifdef CONFIG_MMU
49pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
49#else 50#else
50# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) 51static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
51#endif 52 unsigned long attrs)
53{
54 return prot; /* no protection bits supported without page tables */
55}
56#endif /* CONFIG_MMU */
52 57
53#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC 58#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
54void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 59void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fb07b503dc45..f33881688f42 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
510} 510}
511extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 511extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
512 struct vm_area_struct *vma, unsigned long addr, 512 struct vm_area_struct *vma, unsigned long addr,
513 int node, bool hugepage); 513 int node);
514#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
515 alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
516#else 514#else
517#define alloc_pages(gfp_mask, order) \ 515#define alloc_pages(gfp_mask, order) \
518 alloc_pages_node(numa_node_id(), gfp_mask, order) 516 alloc_pages_node(numa_node_id(), gfp_mask, order)
519#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ 517#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
520 alloc_pages(gfp_mask, order)
521#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
522 alloc_pages(gfp_mask, order) 518 alloc_pages(gfp_mask, order)
523#endif 519#endif
524#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 520#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
525#define alloc_page_vma(gfp_mask, vma, addr) \ 521#define alloc_page_vma(gfp_mask, vma, addr) \
526 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) 522 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
527#define alloc_page_vma_node(gfp_mask, vma, addr, node) \ 523#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
528 alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) 524 alloc_pages_vma(gfp_mask, 0, vma, addr, node)
529 525
530extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 526extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
531extern unsigned long get_zeroed_page(gfp_t gfp_mask); 527extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 40915b461f18..f757a58191a6 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -241,30 +241,6 @@ static inline int irq_to_gpio(unsigned irq)
241 return -EINVAL; 241 return -EINVAL;
242} 242}
243 243
244static inline int
245gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
246 unsigned int gpio_offset, unsigned int pin_offset,
247 unsigned int npins)
248{
249 WARN_ON(1);
250 return -EINVAL;
251}
252
253static inline int
254gpiochip_add_pingroup_range(struct gpio_chip *chip,
255 struct pinctrl_dev *pctldev,
256 unsigned int gpio_offset, const char *pin_group)
257{
258 WARN_ON(1);
259 return -EINVAL;
260}
261
262static inline void
263gpiochip_remove_pin_ranges(struct gpio_chip *chip)
264{
265 WARN_ON(1);
266}
267
268static inline int devm_gpio_request(struct device *dev, unsigned gpio, 244static inline int devm_gpio_request(struct device *dev, unsigned gpio,
269 const char *label) 245 const char *label)
270{ 246{
diff --git a/include/linux/key.h b/include/linux/key.h
index 91f391cd272e..50028338a4cc 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -94,11 +94,11 @@ struct keyring_index_key {
94 union { 94 union {
95 struct { 95 struct {
96#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */ 96#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
97 u8 desc_len; 97 u16 desc_len;
98 char desc[sizeof(long) - 1]; /* First few chars of description */ 98 char desc[sizeof(long) - 2]; /* First few chars of description */
99#else 99#else
100 char desc[sizeof(long) - 1]; /* First few chars of description */ 100 char desc[sizeof(long) - 2]; /* First few chars of description */
101 u8 desc_len; 101 u16 desc_len;
102#endif 102#endif
103 }; 103 };
104 unsigned long x; 104 unsigned long x;
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index cbd9d8495690..88e1e6304a71 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -117,6 +117,7 @@ struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
117unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, 117unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
118 resource_size_t hw_addr, resource_size_t size); 118 resource_size_t hw_addr, resource_size_t size);
119int logic_pio_register_range(struct logic_pio_hwaddr *newrange); 119int logic_pio_register_range(struct logic_pio_hwaddr *newrange);
120void logic_pio_unregister_range(struct logic_pio_hwaddr *range);
120resource_size_t logic_pio_to_hwaddr(unsigned long pio); 121resource_size_t logic_pio_to_hwaddr(unsigned long pio);
121unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr); 122unsigned long logic_pio_trans_cpuaddr(resource_size_t hw_addr);
122 123
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 44c41462be33..2cd4359cb38c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -668,6 +668,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
668 668
669void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, 669void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
670 int val); 670 int val);
671void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
671 672
672static inline void mod_lruvec_state(struct lruvec *lruvec, 673static inline void mod_lruvec_state(struct lruvec *lruvec,
673 enum node_stat_item idx, int val) 674 enum node_stat_item idx, int val)
@@ -1072,6 +1073,14 @@ static inline void mod_lruvec_page_state(struct page *page,
1072 mod_node_page_state(page_pgdat(page), idx, val); 1073 mod_node_page_state(page_pgdat(page), idx, val);
1073} 1074}
1074 1075
1076static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1077 int val)
1078{
1079 struct page *page = virt_to_head_page(p);
1080
1081 __mod_node_page_state(page_pgdat(page), idx, val);
1082}
1083
1075static inline 1084static inline
1076unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1085unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1077 gfp_t gfp_mask, 1086 gfp_t gfp_mask,
@@ -1159,6 +1168,16 @@ static inline void __dec_lruvec_page_state(struct page *page,
1159 __mod_lruvec_page_state(page, idx, -1); 1168 __mod_lruvec_page_state(page, idx, -1);
1160} 1169}
1161 1170
1171static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1172{
1173 __mod_lruvec_slab_state(p, idx, 1);
1174}
1175
1176static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1177{
1178 __mod_lruvec_slab_state(p, idx, -1);
1179}
1180
1162/* idx can be of type enum memcg_stat_item or node_stat_item */ 1181/* idx can be of type enum memcg_stat_item or node_stat_item */
1163static inline void inc_memcg_state(struct mem_cgroup *memcg, 1182static inline void inc_memcg_state(struct mem_cgroup *memcg,
1164 int idx) 1183 int idx)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 5228c62af416..bac395f1d00a 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
139struct mempolicy *get_task_policy(struct task_struct *p); 139struct mempolicy *get_task_policy(struct task_struct *p);
140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 140struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
141 unsigned long addr); 141 unsigned long addr);
142struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
143 unsigned long addr);
142bool vma_policy_mof(struct vm_area_struct *vma); 144bool vma_policy_mof(struct vm_area_struct *vma);
143 145
144extern void numa_default_policy(void); 146extern void numa_default_policy(void);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index ce9839c8bc1a..c2f056b5766d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -446,11 +446,11 @@ enum {
446}; 446};
447 447
448enum { 448enum {
449 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20, 449 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
450}; 450};
451 451
452enum { 452enum {
453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20, 453 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
454}; 454};
455 455
456enum { 456enum {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index ec571fd7fcf8..b8b570c30b5e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10054,9 +10054,8 @@ struct mlx5_ifc_tls_static_params_bits {
10054}; 10054};
10055 10055
10056struct mlx5_ifc_tls_progress_params_bits { 10056struct mlx5_ifc_tls_progress_params_bits {
10057 u8 valid[0x1]; 10057 u8 reserved_at_0[0x8];
10058 u8 reserved_at_1[0x7]; 10058 u8 tisn[0x18];
10059 u8 pd[0x18];
10060 10059
10061 u8 next_record_tcp_sn[0x20]; 10060 u8 next_record_tcp_sn[0x20];
10062 10061
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 3a37a89eb7a7..6a7a1083b6fb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -159,7 +159,16 @@ struct page {
159 /** @pgmap: Points to the hosting device page map. */ 159 /** @pgmap: Points to the hosting device page map. */
160 struct dev_pagemap *pgmap; 160 struct dev_pagemap *pgmap;
161 void *zone_device_data; 161 void *zone_device_data;
162 unsigned long _zd_pad_1; /* uses mapping */ 162 /*
163 * ZONE_DEVICE private pages are counted as being
164 * mapped so the next 3 words hold the mapping, index,
165 * and private fields from the source anonymous or
166 * page cache page while the page is migrated to device
167 * private memory.
168 * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
169 * use the mapping, index, and private fields when
170 * pmem backed DAX files are mapped.
171 */
163 }; 172 };
164 173
165 /** @rcu_head: You can use this to free a page by RCU. */ 174 /** @rcu_head: You can use this to free a page by RCU. */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index d77d717c620c..3f38c30d2f13 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -215,8 +215,9 @@ enum node_stat_item {
215 NR_INACTIVE_FILE, /* " " " " " */ 215 NR_INACTIVE_FILE, /* " " " " " */
216 NR_ACTIVE_FILE, /* " " " " " */ 216 NR_ACTIVE_FILE, /* " " " " " */
217 NR_UNEVICTABLE, /* " " " " " */ 217 NR_UNEVICTABLE, /* " " " " " */
218 NR_SLAB_RECLAIMABLE, 218 NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
219 NR_SLAB_UNRECLAIMABLE, 219 NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
220 * memcg_flush_percpu_vmstats() first. */
220 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 221 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
221 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 222 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
222 WORKINGSET_NODES, 223 WORKINGSET_NODES,
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index 7a6871ac8784..74c6f9241944 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -4,6 +4,9 @@
4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net> 4 * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
5 */ 5 */
6 6
7#ifndef _NF_CONNTRACK_H323_TYPES_H
8#define _NF_CONNTRACK_H323_TYPES_H
9
7typedef struct TransportAddress_ipAddress { /* SEQUENCE */ 10typedef struct TransportAddress_ipAddress { /* SEQUENCE */
8 int options; /* No use */ 11 int options; /* No use */
9 unsigned int ip; 12 unsigned int ip;
@@ -931,3 +934,5 @@ typedef struct RasMessage { /* CHOICE */
931 InfoRequestResponse infoRequestResponse; 934 InfoRequestResponse infoRequestResponse;
932 }; 935 };
933} RasMessage; 936} RasMessage;
937
938#endif /* _NF_CONNTRACK_H323_TYPES_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 9e700d9f9f28..82e4cd1b7ac3 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1567,8 +1567,10 @@ extern bool pcie_ports_native;
1567 1567
1568#ifdef CONFIG_PCIEASPM 1568#ifdef CONFIG_PCIEASPM
1569bool pcie_aspm_support_enabled(void); 1569bool pcie_aspm_support_enabled(void);
1570bool pcie_aspm_enabled(struct pci_dev *pdev);
1570#else 1571#else
1571static inline bool pcie_aspm_support_enabled(void) { return false; } 1572static inline bool pcie_aspm_support_enabled(void) { return false; }
1573static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1572#endif 1574#endif
1573 1575
1574#ifdef CONFIG_PCIEAER 1576#ifdef CONFIG_PCIEAER
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b5d99482d3fe..1a5f88316b08 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -282,6 +282,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
282extern void exit_signals(struct task_struct *tsk); 282extern void exit_signals(struct task_struct *tsk);
283extern void kernel_sigaction(int, __sighandler_t); 283extern void kernel_sigaction(int, __sighandler_t);
284 284
285#define SIG_KTHREAD ((__force __sighandler_t)2)
286#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
287
285static inline void allow_signal(int sig) 288static inline void allow_signal(int sig)
286{ 289{
287 /* 290 /*
@@ -289,7 +292,17 @@ static inline void allow_signal(int sig)
289 * know it'll be handled, so that they don't get converted to 292 * know it'll be handled, so that they don't get converted to
290 * SIGKILL or just silently dropped. 293 * SIGKILL or just silently dropped.
291 */ 294 */
292 kernel_sigaction(sig, (__force __sighandler_t)2); 295 kernel_sigaction(sig, SIG_KTHREAD);
296}
297
298static inline void allow_kernel_signal(int sig)
299{
300 /*
301 * Kernel threads handle their own signals. Let the signal code
302 * know signals sent by the kernel will be handled, so that they
303 * don't get silently dropped.
304 */
305 kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
293} 306}
294 307
295static inline void disallow_signal(int sig) 308static inline void disallow_signal(int sig)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d8af86d995d6..ba5583522d24 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1374,6 +1374,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1374 to->l4_hash = from->l4_hash; 1374 to->l4_hash = from->l4_hash;
1375}; 1375};
1376 1376
1377static inline void skb_copy_decrypted(struct sk_buff *to,
1378 const struct sk_buff *from)
1379{
1380#ifdef CONFIG_TLS_DEVICE
1381 to->decrypted = from->decrypted;
1382#endif
1383}
1384
1377#ifdef NET_SKBUFF_DATA_USES_OFFSET 1385#ifdef NET_SKBUFF_DATA_USES_OFFSET
1378static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 1386static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1379{ 1387{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 97523818cb14..fc0bed59fc84 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -292,6 +292,9 @@ struct ucred {
292#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ 292#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
293#define MSG_EOF MSG_FIN 293#define MSG_EOF MSG_FIN
294#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ 294#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
295#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
296 * plain text and require encryption
297 */
295 298
296#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ 299#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
297#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 300#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index baa3ecdb882f..27536b961552 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -98,7 +98,6 @@ typedef void (*rpc_action)(struct rpc_task *);
98 98
99struct rpc_call_ops { 99struct rpc_call_ops {
100 void (*rpc_call_prepare)(struct rpc_task *, void *); 100 void (*rpc_call_prepare)(struct rpc_task *, void *);
101 void (*rpc_call_prepare_transmit)(struct rpc_task *, void *);
102 void (*rpc_call_done)(struct rpc_task *, void *); 101 void (*rpc_call_done)(struct rpc_task *, void *);
103 void (*rpc_count_stats)(struct rpc_task *, void *); 102 void (*rpc_count_stats)(struct rpc_task *, void *);
104 void (*rpc_release)(void *); 103 void (*rpc_release)(void *);
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 7acb953298a7..84ff2844df2a 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -57,6 +57,7 @@ struct tk_read_base {
57 * @cs_was_changed_seq: The sequence number of clocksource change events 57 * @cs_was_changed_seq: The sequence number of clocksource change events
58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second 58 * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds 59 * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
60 * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset
60 * @cycle_interval: Number of clock cycles in one NTP interval 61 * @cycle_interval: Number of clock cycles in one NTP interval
61 * @xtime_interval: Number of clock shifted nano seconds in one NTP 62 * @xtime_interval: Number of clock shifted nano seconds in one NTP
62 * interval. 63 * interval.
@@ -84,6 +85,9 @@ struct tk_read_base {
84 * 85 *
85 * wall_to_monotonic is no longer the boot time, getboottime must be 86 * wall_to_monotonic is no longer the boot time, getboottime must be
86 * used instead. 87 * used instead.
88 *
89 * @monotonic_to_boottime is a timespec64 representation of @offs_boot to
90 * accelerate the VDSO update for CLOCK_BOOTTIME.
87 */ 91 */
88struct timekeeper { 92struct timekeeper {
89 struct tk_read_base tkr_mono; 93 struct tk_read_base tkr_mono;
@@ -99,6 +103,7 @@ struct timekeeper {
99 u8 cs_was_changed_seq; 103 u8 cs_was_changed_seq;
100 ktime_t next_leap_ktime; 104 ktime_t next_leap_ktime;
101 u64 raw_sec; 105 u64 raw_sec;
106 struct timespec64 monotonic_to_boot;
102 107
103 /* The following members are for timekeeping internal use */ 108 /* The following members are for timekeeping internal use */
104 u64 cycle_interval; 109 u64 cycle_interval;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5150436783e8..30a8cdcfd4a4 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -548,6 +548,7 @@ extern int trace_event_get_offsets(struct trace_event_call *call);
548 548
549#define is_signed_type(type) (((type)(-1)) < (type)1) 549#define is_signed_type(type) (((type)(-1)) < (type)1)
550 550
551int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
551int trace_set_clr_event(const char *system, const char *event, int set); 552int trace_set_clr_event(const char *system, const char *event, int set);
552 553
553/* 554/*
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 83d35d993e8c..e87826e23d59 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1457,7 +1457,7 @@ typedef void (*usb_complete_t)(struct urb *);
1457 * field rather than determining a dma address themselves. 1457 * field rather than determining a dma address themselves.
1458 * 1458 *
1459 * Note that transfer_buffer must still be set if the controller 1459 * Note that transfer_buffer must still be set if the controller
1460 * does not support DMA (as indicated by bus.uses_dma) and when talking 1460 * does not support DMA (as indicated by hcd_uses_dma()) and when talking
1461 * to root hub. If you have to trasfer between highmem zone and the device 1461 * to root hub. If you have to trasfer between highmem zone and the device
1462 * on such controller, create a bounce buffer or bail out with an error. 1462 * on such controller, create a bounce buffer or bail out with an error.
1463 * If transfer_buffer cannot be set (is in highmem) and the controller is DMA 1463 * If transfer_buffer cannot be set (is in highmem) and the controller is DMA
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index bab27ccc8ff5..a20e7815d814 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -422,6 +422,9 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd,
422 return hcd->high_prio_bh.completing_ep == ep; 422 return hcd->high_prio_bh.completing_ep == ep;
423} 423}
424 424
425#define hcd_uses_dma(hcd) \
426 (IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma)
427
425extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); 428extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
426extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb, 429extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
427 int status); 430 int status);
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index f37d12877754..adcc6a97db61 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -308,6 +308,7 @@ do { \
308 \ 308 \
309 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 309 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
310 R##_e = X##_e; \ 310 R##_e = X##_e; \
311 /* Fall through */ \
311 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \ 312 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_NORMAL): \
312 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ 313 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
313 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ 314 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
@@ -318,6 +319,7 @@ do { \
318 \ 319 \
319 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \ 320 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NORMAL): \
320 R##_e = Y##_e; \ 321 R##_e = Y##_e; \
322 /* Fall through */ \
321 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \ 323 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_NAN): \
322 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ 324 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
323 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ 325 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
@@ -415,6 +417,7 @@ do { \
415 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \ 417 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_INF): \
416 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \ 418 case _FP_CLS_COMBINE(FP_CLS_NAN,FP_CLS_ZERO): \
417 R##_s = X##_s; \ 419 R##_s = X##_s; \
420 /* Fall through */ \
418 \ 421 \
419 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \ 422 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_INF): \
420 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ 423 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
@@ -428,6 +431,7 @@ do { \
428 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \ 431 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NAN): \
429 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \ 432 case _FP_CLS_COMBINE(FP_CLS_ZERO,FP_CLS_NAN): \
430 R##_s = Y##_s; \ 433 R##_s = Y##_s; \
434 /* Fall through */ \
431 \ 435 \
432 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \ 436 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_INF): \
433 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 437 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
@@ -493,6 +497,7 @@ do { \
493 \ 497 \
494 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \ 498 case _FP_CLS_COMBINE(FP_CLS_NORMAL,FP_CLS_ZERO): \
495 FP_SET_EXCEPTION(FP_EX_DIVZERO); \ 499 FP_SET_EXCEPTION(FP_EX_DIVZERO); \
500 /* Fall through */ \
496 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \ 501 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_ZERO): \
497 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \ 502 case _FP_CLS_COMBINE(FP_CLS_INF,FP_CLS_NORMAL): \
498 R##_c = FP_CLS_INF; \ 503 R##_c = FP_CLS_INF; \
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index becdad576859..3f62b347b04a 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -206,7 +206,7 @@ static inline int ipv6_mc_may_pull(struct sk_buff *skb,
206 unsigned int len) 206 unsigned int len)
207{ 207{
208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len) 208 if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
209 return -EINVAL; 209 return 0;
210 210
211 return pskb_may_pull(skb, len); 211 return pskb_may_pull(skb, len);
212} 212}
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ded574b32c20..ffc95b382eb5 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -278,6 +278,7 @@ struct hci_dev {
278 __u16 conn_info_min_age; 278 __u16 conn_info_min_age;
279 __u16 conn_info_max_age; 279 __u16 conn_info_max_age;
280 __u16 auth_payload_timeout; 280 __u16 auth_payload_timeout;
281 __u8 min_enc_key_size;
281 __u8 ssp_debug_mode; 282 __u8 ssp_debug_mode;
282 __u8 hw_error_code; 283 __u8 hw_error_code;
283 __u32 clock; 284 __u32 clock;
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 010f26b31c89..bac79e817776 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -171,7 +171,7 @@ int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
171void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, 171void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
172 struct sk_buff *parent); 172 struct sk_buff *parent);
173void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, 173void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
174 void *reasm_data); 174 void *reasm_data, bool try_coalesce);
175struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); 175struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
176 176
177#endif 177#endif
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 4a9da951a794..ab40d7afdc54 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -52,7 +52,7 @@ struct bpf_prog;
52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) 52#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
53 53
54struct net { 54struct net {
55 refcount_t passive; /* To decided when the network 55 refcount_t passive; /* To decide when the network
56 * namespace should be freed. 56 * namespace should be freed.
57 */ 57 */
58 refcount_t count; /* To decided when the network 58 refcount_t count; /* To decided when the network
@@ -61,7 +61,6 @@ struct net {
61 spinlock_t rules_mod_lock; 61 spinlock_t rules_mod_lock;
62 62
63 u32 hash_mix; 63 u32 hash_mix;
64 atomic64_t cookie_gen;
65 64
66 struct list_head list; /* list of network namespaces */ 65 struct list_head list; /* list of network namespaces */
67 struct list_head exit_list; /* To linked to call pernet exit 66 struct list_head exit_list; /* To linked to call pernet exit
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 9b624566b82d..475d6f28ca67 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -421,8 +421,7 @@ struct nft_set {
421 unsigned char *udata; 421 unsigned char *udata;
422 /* runtime data below here */ 422 /* runtime data below here */
423 const struct nft_set_ops *ops ____cacheline_aligned; 423 const struct nft_set_ops *ops ____cacheline_aligned;
424 u16 flags:13, 424 u16 flags:14,
425 bound:1,
426 genmask:2; 425 genmask:2;
427 u8 klen; 426 u8 klen;
428 u8 dlen; 427 u8 dlen;
@@ -1348,12 +1347,15 @@ struct nft_trans_rule {
1348struct nft_trans_set { 1347struct nft_trans_set {
1349 struct nft_set *set; 1348 struct nft_set *set;
1350 u32 set_id; 1349 u32 set_id;
1350 bool bound;
1351}; 1351};
1352 1352
1353#define nft_trans_set(trans) \ 1353#define nft_trans_set(trans) \
1354 (((struct nft_trans_set *)trans->data)->set) 1354 (((struct nft_trans_set *)trans->data)->set)
1355#define nft_trans_set_id(trans) \ 1355#define nft_trans_set_id(trans) \
1356 (((struct nft_trans_set *)trans->data)->set_id) 1356 (((struct nft_trans_set *)trans->data)->set_id)
1357#define nft_trans_set_bound(trans) \
1358 (((struct nft_trans_set *)trans->data)->bound)
1357 1359
1358struct nft_trans_chain { 1360struct nft_trans_chain {
1359 bool update; 1361 bool update;
@@ -1384,12 +1386,15 @@ struct nft_trans_table {
1384struct nft_trans_elem { 1386struct nft_trans_elem {
1385 struct nft_set *set; 1387 struct nft_set *set;
1386 struct nft_set_elem elem; 1388 struct nft_set_elem elem;
1389 bool bound;
1387}; 1390};
1388 1391
1389#define nft_trans_elem_set(trans) \ 1392#define nft_trans_elem_set(trans) \
1390 (((struct nft_trans_elem *)trans->data)->set) 1393 (((struct nft_trans_elem *)trans->data)->set)
1391#define nft_trans_elem(trans) \ 1394#define nft_trans_elem(trans) \
1392 (((struct nft_trans_elem *)trans->data)->elem) 1395 (((struct nft_trans_elem *)trans->data)->elem)
1396#define nft_trans_elem_set_bound(trans) \
1397 (((struct nft_trans_elem *)trans->data)->bound)
1393 1398
1394struct nft_trans_obj { 1399struct nft_trans_obj {
1395 struct nft_object *obj; 1400 struct nft_object *obj;
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index 3196663a10e3..c8b9dec376f5 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -73,4 +73,6 @@ int nft_flow_rule_offload_commit(struct net *net);
73 (__reg)->key = __key; \ 73 (__reg)->key = __key; \
74 memset(&(__reg)->mask, 0xff, (__reg)->len); 74 memset(&(__reg)->mask, 0xff, (__reg)->len);
75 75
76int nft_chain_offload_priority(struct nft_base_chain *basechain);
77
76#endif 78#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index e4650e5b64a1..b140c8f1be22 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -684,9 +684,8 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
684 const struct nla_policy *policy, 684 const struct nla_policy *policy,
685 struct netlink_ext_ack *extack) 685 struct netlink_ext_ack *extack)
686{ 686{
687 return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), 687 return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
688 nlmsg_attrlen(nlh, hdrlen), policy, 688 NL_VALIDATE_STRICT, extack);
689 NL_VALIDATE_STRICT, extack);
690} 689}
691 690
692/** 691/**
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 25f1f9a8419b..95f766c31c90 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -141,12 +141,6 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh)
141 141
142 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 142 nh_grp = rcu_dereference_rtnl(nh->nh_grp);
143 rc = nh_grp->num_nh; 143 rc = nh_grp->num_nh;
144 } else {
145 const struct nh_info *nhi;
146
147 nhi = rcu_dereference_rtnl(nh->nh_info);
148 if (nhi->reject_nh)
149 rc = 0;
150 } 144 }
151 145
152 return rc; 146 return rc;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e429809ca90d..98be18ef1ed3 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -646,7 +646,7 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
646{ 646{
647 cls_common->chain_index = tp->chain->index; 647 cls_common->chain_index = tp->chain->index;
648 cls_common->protocol = tp->protocol; 648 cls_common->protocol = tp->protocol;
649 cls_common->prio = tp->prio; 649 cls_common->prio = tp->prio >> 16;
650 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 650 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
651 cls_common->extack = extack; 651 cls_common->extack = extack;
652} 652}
diff --git a/include/net/route.h b/include/net/route.h
index 630a0493f1f3..dfce19c9fa96 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -233,7 +233,7 @@ void rt_del_uncached_list(struct rtable *rt);
233 233
234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 234int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
235 u32 table_id, struct fib_info *fi, 235 u32 table_id, struct fib_info *fi,
236 int *fa_index, int fa_start); 236 int *fa_index, int fa_start, unsigned int flags);
237 237
238static inline void ip_rt_put(struct rtable *rt) 238static inline void ip_rt_put(struct rtable *rt)
239{ 239{
diff --git a/include/net/sock.h b/include/net/sock.h
index 228db3998e46..2c53f1a1d905 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2482,6 +2482,7 @@ static inline bool sk_fullsock(const struct sock *sk)
2482 2482
2483/* Checks if this SKB belongs to an HW offloaded socket 2483/* Checks if this SKB belongs to an HW offloaded socket
2484 * and whether any SW fallbacks are required based on dev. 2484 * and whether any SW fallbacks are required based on dev.
2485 * Check decrypted mark in case skb_orphan() cleared socket.
2485 */ 2486 */
2486static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, 2487static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2487 struct net_device *dev) 2488 struct net_device *dev)
@@ -2489,8 +2490,15 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb,
2489#ifdef CONFIG_SOCK_VALIDATE_XMIT 2490#ifdef CONFIG_SOCK_VALIDATE_XMIT
2490 struct sock *sk = skb->sk; 2491 struct sock *sk = skb->sk;
2491 2492
2492 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) 2493 if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
2493 skb = sk->sk_validate_xmit_skb(sk, dev, skb); 2494 skb = sk->sk_validate_xmit_skb(sk, dev, skb);
2495#ifdef CONFIG_TLS_DEVICE
2496 } else if (unlikely(skb->decrypted)) {
2497 pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n");
2498 kfree_skb(skb);
2499 skb = NULL;
2500#endif
2501 }
2494#endif 2502#endif
2495 2503
2496 return skb; 2504 return skb;
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index b0fc6b26bdf5..83df1ec6664e 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -105,8 +105,7 @@ struct rdma_restrack_entry {
105}; 105};
106 106
107int rdma_restrack_count(struct ib_device *dev, 107int rdma_restrack_count(struct ib_device *dev,
108 enum rdma_restrack_type type, 108 enum rdma_restrack_type type);
109 struct pid_namespace *ns);
110 109
111void rdma_restrack_kadd(struct rdma_restrack_entry *res); 110void rdma_restrack_kadd(struct rdma_restrack_entry *res);
112void rdma_restrack_uadd(struct rdma_restrack_entry *res); 111void rdma_restrack_uadd(struct rdma_restrack_entry *res);
diff --git a/include/soc/arc/mcip.h b/include/soc/arc/mcip.h
index 50f49e043668..d1a93c73f006 100644
--- a/include/soc/arc/mcip.h
+++ b/include/soc/arc/mcip.h
@@ -46,7 +46,9 @@ struct mcip_cmd {
46#define CMD_IDU_ENABLE 0x71 46#define CMD_IDU_ENABLE 0x71
47#define CMD_IDU_DISABLE 0x72 47#define CMD_IDU_DISABLE 0x72
48#define CMD_IDU_SET_MODE 0x74 48#define CMD_IDU_SET_MODE 0x74
49#define CMD_IDU_READ_MODE 0x75
49#define CMD_IDU_SET_DEST 0x76 50#define CMD_IDU_SET_DEST 0x76
51#define CMD_IDU_ACK_CIRQ 0x79
50#define CMD_IDU_SET_MASK 0x7C 52#define CMD_IDU_SET_MASK 0x7C
51 53
52#define IDU_M_TRIG_LEVEL 0x0 54#define IDU_M_TRIG_LEVEL 0x0
@@ -119,4 +121,13 @@ static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
119 __mcip_cmd(cmd, param); 121 __mcip_cmd(cmd, param);
120} 122}
121 123
124/*
125 * Read MCIP register
126 */
127static inline unsigned int __mcip_cmd_read(unsigned int cmd, unsigned int param)
128{
129 __mcip_cmd(cmd, param);
130 return read_aux_reg(ARC_REG_MCIP_READBACK);
131}
132
122#endif 133#endif
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index cc1d060cbf13..fa06b528c73c 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -498,10 +498,10 @@ rxrpc_tx_points;
498#define E_(a, b) { a, b } 498#define E_(a, b) { a, b }
499 499
500TRACE_EVENT(rxrpc_local, 500TRACE_EVENT(rxrpc_local,
501 TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op, 501 TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
502 int usage, const void *where), 502 int usage, const void *where),
503 503
504 TP_ARGS(local, op, usage, where), 504 TP_ARGS(local_debug_id, op, usage, where),
505 505
506 TP_STRUCT__entry( 506 TP_STRUCT__entry(
507 __field(unsigned int, local ) 507 __field(unsigned int, local )
@@ -511,7 +511,7 @@ TRACE_EVENT(rxrpc_local,
511 ), 511 ),
512 512
513 TP_fast_assign( 513 TP_fast_assign(
514 __entry->local = local->debug_id; 514 __entry->local = local_debug_id;
515 __entry->op = op; 515 __entry->op = op;
516 __entry->usage = usage; 516 __entry->usage = usage;
517 __entry->where = where; 517 __entry->where = where;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index fa1c753dcdbc..a5aa7d3ac6a1 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1466,8 +1466,8 @@ union bpf_attr {
1466 * If no cookie has been set yet, generate a new cookie. Once 1466 * If no cookie has been set yet, generate a new cookie. Once
1467 * generated, the socket cookie remains stable for the life of the 1467 * generated, the socket cookie remains stable for the life of the
1468 * socket. This helper can be useful for monitoring per socket 1468 * socket. This helper can be useful for monitoring per socket
1469 * networking traffic statistics as it provides a unique socket 1469 * networking traffic statistics as it provides a global socket
1470 * identifier per namespace. 1470 * identifier that can be assumed unique.
1471 * Return 1471 * Return
1472 * A 8-byte long non-decreasing number on success, or 0 if the 1472 * A 8-byte long non-decreasing number on success, or 0 if the
1473 * socket field is missing inside *skb*. 1473 * socket field is missing inside *skb*.
diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h
index a18b719f49d4..784ba0b9690a 100644
--- a/include/uapi/linux/jffs2.h
+++ b/include/uapi/linux/jffs2.h
@@ -77,11 +77,6 @@
77 77
78#define JFFS2_ACL_VERSION 0x0001 78#define JFFS2_ACL_VERSION 0x0001
79 79
80// Maybe later...
81//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
82//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
83
84
85#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at 80#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
86 mount time, don't wait for it to 81 mount time, don't wait for it to
87 happen later */ 82 happen later */
diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h
index 5c8a4d760ee3..b5123ab8d54a 100644
--- a/include/uapi/linux/netfilter/xt_nfacct.h
+++ b/include/uapi/linux/netfilter/xt_nfacct.h
@@ -11,4 +11,9 @@ struct xt_nfacct_match_info {
11 struct nf_acct *nfacct; 11 struct nf_acct *nfacct;
12}; 12};
13 13
14struct xt_nfacct_match_info_v1 {
15 char name[NFACCT_NAME_MAX];
16 struct nf_acct *nfacct __attribute__((aligned(8)));
17};
18
14#endif /* _XT_NFACCT_MATCH_H */ 19#endif /* _XT_NFACCT_MATCH_H */
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index fd6b5f66e2c5..cba368e55863 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -250,6 +250,7 @@ struct rds_info_rdma_connection {
250 __u32 rdma_mr_max; 250 __u32 rdma_mr_max;
251 __u32 rdma_mr_size; 251 __u32 rdma_mr_size;
252 __u8 tos; 252 __u8 tos;
253 __u8 sl;
253 __u32 cache_allocs; 254 __u32 cache_allocs;
254}; 255};
255 256
@@ -265,6 +266,7 @@ struct rds6_info_rdma_connection {
265 __u32 rdma_mr_max; 266 __u32 rdma_mr_max;
266 __u32 rdma_mr_size; 267 __u32 rdma_mr_size;
267 __u8 tos; 268 __u8 tos;
269 __u8 sl;
268 __u32 cache_allocs; 270 __u32 cache_allocs;
269}; 271};
270 272
diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h
index 7de68f1dc707..af735f55b291 100644
--- a/include/uapi/rdma/siw-abi.h
+++ b/include/uapi/rdma/siw-abi.h
@@ -180,6 +180,7 @@ struct siw_cqe {
180 * to control CQ arming. 180 * to control CQ arming.
181 */ 181 */
182struct siw_cq_ctrl { 182struct siw_cq_ctrl {
183 __aligned_u64 notify; 183 __u32 flags;
184 __u32 pad;
184}; 185};
185#endif 186#endif
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5d141f16f6fa..272071e9112f 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1707 if (err) 1707 if (err)
1708 goto free_used_maps; 1708 goto free_used_maps;
1709 1709
1710 err = bpf_prog_new_fd(prog); 1710 /* Upon success of bpf_prog_alloc_id(), the BPF prog is
1711 if (err < 0) { 1711 * effectively publicly exposed. However, retrieving via
1712 /* failed to allocate fd. 1712 * bpf_prog_get_fd_by_id() will take another reference,
1713 * bpf_prog_put() is needed because the above 1713 * therefore it cannot be gone underneath us.
1714 * bpf_prog_alloc_id() has published the prog 1714 *
1715 * to the userspace and the userspace may 1715 * Only for the time /after/ successful bpf_prog_new_fd()
1716 * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID. 1716 * and before returning to userspace, we might just hold
1717 */ 1717 * one reference and any parallel close on that fd could
1718 bpf_prog_put(prog); 1718 * rip everything out. Hence, below notifications must
1719 return err; 1719 * happen before bpf_prog_new_fd().
1720 } 1720 *
1721 1721 * Also, any failure handling from this point onwards must
1722 * be using bpf_prog_put() given the program is exposed.
1723 */
1722 bpf_prog_kallsyms_add(prog); 1724 bpf_prog_kallsyms_add(prog);
1723 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0); 1725 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
1726
1727 err = bpf_prog_new_fd(prog);
1728 if (err < 0)
1729 bpf_prog_put(prog);
1724 return err; 1730 return err;
1725 1731
1726free_used_maps: 1732free_used_maps:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c84d83f86141..b5c14c9d7b98 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
985 reg->smax_value = S64_MAX; 985 reg->smax_value = S64_MAX;
986 reg->umin_value = 0; 986 reg->umin_value = 0;
987 reg->umax_value = U64_MAX; 987 reg->umax_value = U64_MAX;
988
989 /* constant backtracking is enabled for root only for now */
990 reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
991} 988}
992 989
993/* Mark a register as having a completely unknown (scalar) value. */ 990/* Mark a register as having a completely unknown (scalar) value. */
@@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
1014 __mark_reg_not_init(regs + regno); 1011 __mark_reg_not_init(regs + regno);
1015 return; 1012 return;
1016 } 1013 }
1017 __mark_reg_unknown(regs + regno); 1014 regs += regno;
1015 __mark_reg_unknown(regs);
1016 /* constant backtracking is enabled for root without bpf2bpf calls */
1017 regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1018 true : false;
1018} 1019}
1019 1020
1020static void __mark_reg_not_init(struct bpf_reg_state *reg) 1021static void __mark_reg_not_init(struct bpf_reg_state *reg)
diff --git a/kernel/configs.c b/kernel/configs.c
index b062425ccf8d..c09ea4c995e1 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
1/* 2/*
2 * kernel/configs.c 3 * kernel/configs.c
3 * Echo the kernel .config file used to build the kernel 4 * Echo the kernel .config file used to build the kernel
@@ -6,21 +7,6 @@
6 * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net> 7 * Copyright (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
7 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com> 8 * Copyright (C) 2002 Al Stone <ahs3@fc.hp.com>
8 * Copyright (C) 2002 Hewlett-Packard Company 9 * Copyright (C) 2002 Hewlett-Packard Company
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 * NON INFRINGEMENT. See the GNU General Public License for more
19 * details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */ 10 */
25 11
26#include <linux/kernel.h> 12#include <linux/kernel.h>
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 2bd410f934b3..69cfb4345388 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -230,9 +230,7 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
230 */ 230 */
231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) 231struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
232{ 232{
233 int node = dev ? dev_to_node(dev) : NUMA_NO_NODE; 233 size_t count = size >> PAGE_SHIFT;
234 size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
235 size_t align = get_order(PAGE_ALIGN(size));
236 struct page *page = NULL; 234 struct page *page = NULL;
237 struct cma *cma = NULL; 235 struct cma *cma = NULL;
238 236
@@ -243,14 +241,12 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
243 241
244 /* CMA can be used only in the context which permits sleeping */ 242 /* CMA can be used only in the context which permits sleeping */
245 if (cma && gfpflags_allow_blocking(gfp)) { 243 if (cma && gfpflags_allow_blocking(gfp)) {
244 size_t align = get_order(size);
246 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT); 245 size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
247 246
248 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); 247 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
249 } 248 }
250 249
251 /* Fallback allocation of normal pages */
252 if (!page)
253 page = alloc_pages_node(node, gfp, align);
254 return page; 250 return page;
255} 251}
256 252
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 59bdceea3737..706113c6bebc 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
47{ 47{
48 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); 48 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
49 49
50 if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
51 max_dma = dev->bus_dma_mask;
52
53 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 50 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
54} 51}
55 52
@@ -88,6 +85,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
88struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 85struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
89 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
90{ 87{
88 size_t alloc_size = PAGE_ALIGN(size);
89 int node = dev_to_node(dev);
91 struct page *page = NULL; 90 struct page *page = NULL;
92 u64 phys_mask; 91 u64 phys_mask;
93 92
@@ -98,8 +97,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
98 gfp &= ~__GFP_ZERO; 97 gfp &= ~__GFP_ZERO;
99 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 98 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
100 &phys_mask); 99 &phys_mask);
100 page = dma_alloc_contiguous(dev, alloc_size, gfp);
101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
102 dma_free_contiguous(dev, page, alloc_size);
103 page = NULL;
104 }
101again: 105again:
102 page = dma_alloc_contiguous(dev, size, gfp); 106 if (!page)
107 page = alloc_pages_node(node, gfp, get_order(alloc_size));
103 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 108 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
104 dma_free_contiguous(dev, page, size); 109 dma_free_contiguous(dev, page, size);
105 page = NULL; 110 page = NULL;
@@ -130,10 +135,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
130 if (!page) 135 if (!page)
131 return NULL; 136 return NULL;
132 137
133 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 138 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
139 !force_dma_unencrypted(dev)) {
134 /* remove any dirty cache lines on the kernel alias */ 140 /* remove any dirty cache lines on the kernel alias */
135 if (!PageHighMem(page)) 141 if (!PageHighMem(page))
136 arch_dma_prep_coherent(page, size); 142 arch_dma_prep_coherent(page, size);
143 *dma_handle = phys_to_dma(dev, page_to_phys(page));
137 /* return the page pointer as the opaque cookie */ 144 /* return the page pointer as the opaque cookie */
138 return page; 145 return page;
139 } 146 }
@@ -178,7 +185,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
178{ 185{
179 unsigned int page_order = get_order(size); 186 unsigned int page_order = get_order(size);
180 187
181 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { 188 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
189 !force_dma_unencrypted(dev)) {
182 /* cpu_addr is a struct page cookie, not a kernel address */ 190 /* cpu_addr is a struct page cookie, not a kernel address */
183 __dma_direct_free_pages(dev, size, cpu_addr); 191 __dma_direct_free_pages(dev, size, cpu_addr);
184 return; 192 return;
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index b945239621d8..b0038ca3aa92 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
150} 150}
151EXPORT_SYMBOL(dma_get_sgtable_attrs); 151EXPORT_SYMBOL(dma_get_sgtable_attrs);
152 152
153#ifdef CONFIG_MMU
154/*
155 * Return the page attributes used for mapping dma_alloc_* memory, either in
156 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
157 */
158pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
159{
160 if (dev_is_dma_coherent(dev) ||
161 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
162 (attrs & DMA_ATTR_NON_CONSISTENT)))
163 return prot;
164 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
165 return arch_dma_mmap_pgprot(dev, prot, attrs);
166 return pgprot_noncached(prot);
167}
168#endif /* CONFIG_MMU */
169
153/* 170/*
154 * Create userspace mapping for the DMA-coherent memory. 171 * Create userspace mapping for the DMA-coherent memory.
155 */ 172 */
@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
164 unsigned long pfn; 181 unsigned long pfn;
165 int ret = -ENXIO; 182 int ret = -ENXIO;
166 183
167 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 184 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
168 185
169 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 186 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
170 return ret; 187 return ret;
diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
index a594aec07882..ffe78f0b2fe4 100644
--- a/kernel/dma/remap.c
+++ b/kernel/dma/remap.c
@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
218 218
219 /* create a coherent mapping */ 219 /* create a coherent mapping */
220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP, 220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
221 arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs), 221 dma_pgprot(dev, PAGE_KERNEL, attrs),
222 __builtin_return_address(0)); 222 __builtin_return_address(0));
223 if (!ret) { 223 if (!ret) {
224 __dma_direct_free_pages(dev, size, page); 224 __dma_direct_free_pages(dev, size, page);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 9484e88dabc2..9be995fc3c5a 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -295,6 +295,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
295 } 295 }
296} 296}
297 297
298static void irq_sysfs_del(struct irq_desc *desc)
299{
300 /*
301 * If irq_sysfs_init() has not yet been invoked (early boot), then
302 * irq_kobj_base is NULL and the descriptor was never added.
303 * kobject_del() complains about a object with no parent, so make
304 * it conditional.
305 */
306 if (irq_kobj_base)
307 kobject_del(&desc->kobj);
308}
309
298static int __init irq_sysfs_init(void) 310static int __init irq_sysfs_init(void)
299{ 311{
300 struct irq_desc *desc; 312 struct irq_desc *desc;
@@ -325,6 +337,7 @@ static struct kobj_type irq_kobj_type = {
325}; 337};
326 338
327static void irq_sysfs_add(int irq, struct irq_desc *desc) {} 339static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
340static void irq_sysfs_del(struct irq_desc *desc) {}
328 341
329#endif /* CONFIG_SYSFS */ 342#endif /* CONFIG_SYSFS */
330 343
@@ -438,7 +451,7 @@ static void free_desc(unsigned int irq)
438 * The sysfs entry must be serialized against a concurrent 451 * The sysfs entry must be serialized against a concurrent
439 * irq_sysfs_init() as well. 452 * irq_sysfs_init() as well.
440 */ 453 */
441 kobject_del(&desc->kobj); 454 irq_sysfs_del(desc);
442 delete_irq_desc(irq); 455 delete_irq_desc(irq);
443 456
444 /* 457 /*
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 95a260f9214b..136ce049c4ad 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
263{ 263{
264 char namebuf[KSYM_NAME_LEN]; 264 char namebuf[KSYM_NAME_LEN];
265 265
266 if (is_ksym_addr(addr)) 266 if (is_ksym_addr(addr)) {
267 return !!get_symbol_pos(addr, symbolsize, offset); 267 get_symbol_pos(addr, symbolsize, offset);
268 return 1;
269 }
268 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || 270 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
269 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); 271 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
270} 272}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 9873fc627d61..d9770a5393c8 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -470,6 +470,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
470 */ 470 */
471static void do_optimize_kprobes(void) 471static void do_optimize_kprobes(void)
472{ 472{
473 lockdep_assert_held(&text_mutex);
473 /* 474 /*
474 * The optimization/unoptimization refers online_cpus via 475 * The optimization/unoptimization refers online_cpus via
475 * stop_machine() and cpu-hotplug modifies online_cpus. 476 * stop_machine() and cpu-hotplug modifies online_cpus.
@@ -487,9 +488,7 @@ static void do_optimize_kprobes(void)
487 list_empty(&optimizing_list)) 488 list_empty(&optimizing_list))
488 return; 489 return;
489 490
490 mutex_lock(&text_mutex);
491 arch_optimize_kprobes(&optimizing_list); 491 arch_optimize_kprobes(&optimizing_list);
492 mutex_unlock(&text_mutex);
493} 492}
494 493
495/* 494/*
@@ -500,6 +499,7 @@ static void do_unoptimize_kprobes(void)
500{ 499{
501 struct optimized_kprobe *op, *tmp; 500 struct optimized_kprobe *op, *tmp;
502 501
502 lockdep_assert_held(&text_mutex);
503 /* See comment in do_optimize_kprobes() */ 503 /* See comment in do_optimize_kprobes() */
504 lockdep_assert_cpus_held(); 504 lockdep_assert_cpus_held();
505 505
@@ -507,7 +507,6 @@ static void do_unoptimize_kprobes(void)
507 if (list_empty(&unoptimizing_list)) 507 if (list_empty(&unoptimizing_list))
508 return; 508 return;
509 509
510 mutex_lock(&text_mutex);
511 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 510 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
512 /* Loop free_list for disarming */ 511 /* Loop free_list for disarming */
513 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 512 list_for_each_entry_safe(op, tmp, &freeing_list, list) {
@@ -524,7 +523,6 @@ static void do_unoptimize_kprobes(void)
524 } else 523 } else
525 list_del_init(&op->list); 524 list_del_init(&op->list);
526 } 525 }
527 mutex_unlock(&text_mutex);
528} 526}
529 527
530/* Reclaim all kprobes on the free_list */ 528/* Reclaim all kprobes on the free_list */
@@ -556,6 +554,7 @@ static void kprobe_optimizer(struct work_struct *work)
556{ 554{
557 mutex_lock(&kprobe_mutex); 555 mutex_lock(&kprobe_mutex);
558 cpus_read_lock(); 556 cpus_read_lock();
557 mutex_lock(&text_mutex);
559 /* Lock modules while optimizing kprobes */ 558 /* Lock modules while optimizing kprobes */
560 mutex_lock(&module_mutex); 559 mutex_lock(&module_mutex);
561 560
@@ -583,6 +582,7 @@ static void kprobe_optimizer(struct work_struct *work)
583 do_free_cleaned_kprobes(); 582 do_free_cleaned_kprobes();
584 583
585 mutex_unlock(&module_mutex); 584 mutex_unlock(&module_mutex);
585 mutex_unlock(&text_mutex);
586 cpus_read_unlock(); 586 cpus_read_unlock();
587 mutex_unlock(&kprobe_mutex); 587 mutex_unlock(&kprobe_mutex);
588 588
diff --git a/kernel/module.c b/kernel/module.c
index 5933395af9a0..9ee93421269c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -65,9 +65,9 @@
65/* 65/*
66 * Modules' sections will be aligned on page boundaries 66 * Modules' sections will be aligned on page boundaries
67 * to ensure complete separation of code and data, but 67 * to ensure complete separation of code and data, but
68 * only when CONFIG_STRICT_MODULE_RWX=y 68 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
69 */ 69 */
70#ifdef CONFIG_STRICT_MODULE_RWX 70#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
71# define debug_align(X) ALIGN(X, PAGE_SIZE) 71# define debug_align(X) ALIGN(X, PAGE_SIZE)
72#else 72#else
73# define debug_align(X) (X) 73# define debug_align(X) (X)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b037f195473..010d578118d6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3904,7 +3904,7 @@ void __noreturn do_task_dead(void)
3904 3904
3905static inline void sched_submit_work(struct task_struct *tsk) 3905static inline void sched_submit_work(struct task_struct *tsk)
3906{ 3906{
3907 if (!tsk->state || tsk_is_pi_blocked(tsk)) 3907 if (!tsk->state)
3908 return; 3908 return;
3909 3909
3910 /* 3910 /*
@@ -3920,6 +3920,9 @@ static inline void sched_submit_work(struct task_struct *tsk)
3920 preempt_enable_no_resched(); 3920 preempt_enable_no_resched();
3921 } 3921 }
3922 3922
3923 if (tsk_is_pi_blocked(tsk))
3924 return;
3925
3923 /* 3926 /*
3924 * If we are going to sleep and we have plugged IO queued, 3927 * If we are going to sleep and we have plugged IO queued,
3925 * make sure to submit it to avoid deadlocks. 3928 * make sure to submit it to avoid deadlocks.
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 636ca6f88c8e..867b4bb6d4be 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -40,6 +40,7 @@ struct sugov_policy {
40 struct task_struct *thread; 40 struct task_struct *thread;
41 bool work_in_progress; 41 bool work_in_progress;
42 42
43 bool limits_changed;
43 bool need_freq_update; 44 bool need_freq_update;
44}; 45};
45 46
@@ -89,8 +90,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
89 !cpufreq_this_cpu_can_update(sg_policy->policy)) 90 !cpufreq_this_cpu_can_update(sg_policy->policy))
90 return false; 91 return false;
91 92
92 if (unlikely(sg_policy->need_freq_update)) 93 if (unlikely(sg_policy->limits_changed)) {
94 sg_policy->limits_changed = false;
95 sg_policy->need_freq_update = true;
93 return true; 96 return true;
97 }
94 98
95 delta_ns = time - sg_policy->last_freq_update_time; 99 delta_ns = time - sg_policy->last_freq_update_time;
96 100
@@ -437,7 +441,7 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
437static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) 441static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
438{ 442{
439 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) 443 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
440 sg_policy->need_freq_update = true; 444 sg_policy->limits_changed = true;
441} 445}
442 446
443static void sugov_update_single(struct update_util_data *hook, u64 time, 447static void sugov_update_single(struct update_util_data *hook, u64 time,
@@ -457,7 +461,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
457 if (!sugov_should_update_freq(sg_policy, time)) 461 if (!sugov_should_update_freq(sg_policy, time))
458 return; 462 return;
459 463
460 busy = sugov_cpu_is_busy(sg_cpu); 464 /* Limits may have changed, don't skip frequency update */
465 busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
461 466
462 util = sugov_get_util(sg_cpu); 467 util = sugov_get_util(sg_cpu);
463 max = sg_cpu->max; 468 max = sg_cpu->max;
@@ -831,6 +836,7 @@ static int sugov_start(struct cpufreq_policy *policy)
831 sg_policy->last_freq_update_time = 0; 836 sg_policy->last_freq_update_time = 0;
832 sg_policy->next_freq = 0; 837 sg_policy->next_freq = 0;
833 sg_policy->work_in_progress = false; 838 sg_policy->work_in_progress = false;
839 sg_policy->limits_changed = false;
834 sg_policy->need_freq_update = false; 840 sg_policy->need_freq_update = false;
835 sg_policy->cached_raw_freq = 0; 841 sg_policy->cached_raw_freq = 0;
836 842
@@ -879,7 +885,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
879 mutex_unlock(&sg_policy->work_lock); 885 mutex_unlock(&sg_policy->work_lock);
880 } 886 }
881 887
882 sg_policy->need_freq_update = true; 888 sg_policy->limits_changed = true;
883} 889}
884 890
885struct cpufreq_governor schedutil_gov = { 891struct cpufreq_governor schedutil_gov = {
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 23fbbcc414d5..6e52b67b420e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1131,7 +1131,15 @@ static void psi_trigger_destroy(struct kref *ref)
1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock 1131 * deadlock while waiting for psi_poll_work to acquire trigger_lock
1132 */ 1132 */
1133 if (kworker_to_destroy) { 1133 if (kworker_to_destroy) {
1134 /*
1135 * After the RCU grace period has expired, the worker
1136 * can no longer be found through group->poll_kworker.
1137 * But it might have been already scheduled before
1138 * that - deschedule it cleanly before destroying it.
1139 */
1134 kthread_cancel_delayed_work_sync(&group->poll_work); 1140 kthread_cancel_delayed_work_sync(&group->poll_work);
1141 atomic_set(&group->poll_scheduled, 0);
1142
1135 kthread_destroy_worker(kworker_to_destroy); 1143 kthread_destroy_worker(kworker_to_destroy);
1136 } 1144 }
1137 kfree(t); 1145 kfree(t);
diff --git a/kernel/signal.c b/kernel/signal.c
index e667be6907d7..534fec266a33 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -90,6 +90,11 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
91 return true; 91 return true;
92 92
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
93 return sig_handler_ignored(handler, sig); 98 return sig_handler_ignored(handler, sig);
94} 99}
95 100
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d911c8470149..ca69290bee2a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -146,6 +146,11 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 146static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
147{ 147{
148 tk->offs_boot = ktime_add(tk->offs_boot, delta); 148 tk->offs_boot = ktime_add(tk->offs_boot, delta);
149 /*
150 * Timespec representation for VDSO update to avoid 64bit division
151 * on every update.
152 */
153 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
149} 154}
150 155
151/* 156/*
diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
index 8cf3596a4ce6..4bc37ac3bb05 100644
--- a/kernel/time/vsyscall.c
+++ b/kernel/time/vsyscall.c
@@ -17,7 +17,7 @@ static inline void update_vdso_data(struct vdso_data *vdata,
17 struct timekeeper *tk) 17 struct timekeeper *tk)
18{ 18{
19 struct vdso_timestamp *vdso_ts; 19 struct vdso_timestamp *vdso_ts;
20 u64 nsec; 20 u64 nsec, sec;
21 21
22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; 22 vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last;
23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; 23 vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask;
@@ -45,23 +45,27 @@ static inline void update_vdso_data(struct vdso_data *vdata,
45 } 45 }
46 vdso_ts->nsec = nsec; 46 vdso_ts->nsec = nsec;
47 47
48 /* CLOCK_MONOTONIC_RAW */ 48 /* Copy MONOTONIC time for BOOTTIME */
49 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; 49 sec = vdso_ts->sec;
50 vdso_ts->sec = tk->raw_sec; 50 /* Add the boot offset */
51 vdso_ts->nsec = tk->tkr_raw.xtime_nsec; 51 sec += tk->monotonic_to_boot.tv_sec;
52 nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
52 53
53 /* CLOCK_BOOTTIME */ 54 /* CLOCK_BOOTTIME */
54 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; 55 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
55 vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 56 vdso_ts->sec = sec;
56 nsec = tk->tkr_mono.xtime_nsec; 57
57 nsec += ((u64)(tk->wall_to_monotonic.tv_nsec +
58 ktime_to_ns(tk->offs_boot)) << tk->tkr_mono.shift);
59 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { 58 while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
60 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); 59 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
61 vdso_ts->sec++; 60 vdso_ts->sec++;
62 } 61 }
63 vdso_ts->nsec = nsec; 62 vdso_ts->nsec = nsec;
64 63
64 /* CLOCK_MONOTONIC_RAW */
65 vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
66 vdso_ts->sec = tk->raw_sec;
67 vdso_ts->nsec = tk->tkr_raw.xtime_nsec;
68
65 /* CLOCK_TAI */ 69 /* CLOCK_TAI */
66 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; 70 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
67 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; 71 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca34503f178..f9821a3374e9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3095,6 +3095,14 @@ t_probe_next(struct seq_file *m, loff_t *pos)
3095 hnd = &iter->probe_entry->hlist; 3095 hnd = &iter->probe_entry->hlist;
3096 3096
3097 hash = iter->probe->ops.func_hash->filter_hash; 3097 hash = iter->probe->ops.func_hash->filter_hash;
3098
3099 /*
3100 * A probe being registered may temporarily have an empty hash
3101 * and it's at the end of the func_probes list.
3102 */
3103 if (!hash || hash == EMPTY_HASH)
3104 return NULL;
3105
3098 size = 1 << hash->size_bits; 3106 size = 1 << hash->size_bits;
3099 3107
3100 retry: 3108 retry:
@@ -4320,12 +4328,21 @@ register_ftrace_function_probe(char *glob, struct trace_array *tr,
4320 4328
4321 mutex_unlock(&ftrace_lock); 4329 mutex_unlock(&ftrace_lock);
4322 4330
4331 /*
4332 * Note, there's a small window here that the func_hash->filter_hash
4333 * may be NULL or empty. Need to be carefule when reading the loop.
4334 */
4323 mutex_lock(&probe->ops.func_hash->regex_lock); 4335 mutex_lock(&probe->ops.func_hash->regex_lock);
4324 4336
4325 orig_hash = &probe->ops.func_hash->filter_hash; 4337 orig_hash = &probe->ops.func_hash->filter_hash;
4326 old_hash = *orig_hash; 4338 old_hash = *orig_hash;
4327 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4339 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4328 4340
4341 if (!hash) {
4342 ret = -ENOMEM;
4343 goto out;
4344 }
4345
4329 ret = ftrace_match_records(hash, glob, strlen(glob)); 4346 ret = ftrace_match_records(hash, glob, strlen(glob));
4330 4347
4331 /* Nothing found? */ 4348 /* Nothing found? */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 525a97fbbc60..563e80f9006a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1567,9 +1567,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1567 1567
1568/** 1568/**
1569 * update_max_tr_single - only copy one trace over, and reset the rest 1569 * update_max_tr_single - only copy one trace over, and reset the rest
1570 * @tr - tracer 1570 * @tr: tracer
1571 * @tsk - task with the latency 1571 * @tsk: task with the latency
1572 * @cpu - the cpu of the buffer to copy. 1572 * @cpu: the cpu of the buffer to copy.
1573 * 1573 *
1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr. 1574 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1575 */ 1575 */
@@ -1767,7 +1767,7 @@ static void __init apply_trace_boot_options(void);
1767 1767
1768/** 1768/**
1769 * register_tracer - register a tracer with the ftrace system. 1769 * register_tracer - register a tracer with the ftrace system.
1770 * @type - the plugin for the tracer 1770 * @type: the plugin for the tracer
1771 * 1771 *
1772 * Register a new plugin tracer. 1772 * Register a new plugin tracer.
1773 */ 1773 */
@@ -2230,9 +2230,9 @@ static bool tracing_record_taskinfo_skip(int flags)
2230/** 2230/**
2231 * tracing_record_taskinfo - record the task info of a task 2231 * tracing_record_taskinfo - record the task info of a task
2232 * 2232 *
2233 * @task - task to record 2233 * @task: task to record
2234 * @flags - TRACE_RECORD_CMDLINE for recording comm 2234 * @flags: TRACE_RECORD_CMDLINE for recording comm
2235 * - TRACE_RECORD_TGID for recording tgid 2235 * TRACE_RECORD_TGID for recording tgid
2236 */ 2236 */
2237void tracing_record_taskinfo(struct task_struct *task, int flags) 2237void tracing_record_taskinfo(struct task_struct *task, int flags)
2238{ 2238{
@@ -2258,10 +2258,10 @@ void tracing_record_taskinfo(struct task_struct *task, int flags)
2258/** 2258/**
2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch 2259 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2260 * 2260 *
2261 * @prev - previous task during sched_switch 2261 * @prev: previous task during sched_switch
2262 * @next - next task during sched_switch 2262 * @next: next task during sched_switch
2263 * @flags - TRACE_RECORD_CMDLINE for recording comm 2263 * @flags: TRACE_RECORD_CMDLINE for recording comm
2264 * TRACE_RECORD_TGID for recording tgid 2264 * TRACE_RECORD_TGID for recording tgid
2265 */ 2265 */
2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev, 2266void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2267 struct task_struct *next, int flags) 2267 struct task_struct *next, int flags)
@@ -3072,7 +3072,9 @@ static void trace_printk_start_stop_comm(int enabled)
3072 3072
3073/** 3073/**
3074 * trace_vbprintk - write binary msg to tracing buffer 3074 * trace_vbprintk - write binary msg to tracing buffer
3075 * 3075 * @ip: The address of the caller
3076 * @fmt: The string format to write to the buffer
3077 * @args: Arguments for @fmt
3076 */ 3078 */
3077int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 3079int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3078{ 3080{
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index c7506bc81b75..648930823b57 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -787,7 +787,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
787 return ret; 787 return ret;
788} 788}
789 789
790static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set) 790int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
791{ 791{
792 char *event = NULL, *sub = NULL, *match; 792 char *event = NULL, *sub = NULL, *match;
793 int ret; 793 int ret;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index dbef0d135075..fb6bfbc5bf86 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -895,7 +895,8 @@ void trace_probe_cleanup(struct trace_probe *tp)
895 for (i = 0; i < tp->nr_args; i++) 895 for (i = 0; i < tp->nr_args; i++)
896 traceprobe_free_probe_arg(&tp->args[i]); 896 traceprobe_free_probe_arg(&tp->args[i]);
897 897
898 kfree(call->class->system); 898 if (call->class)
899 kfree(call->class->system);
899 kfree(call->name); 900 kfree(call->name);
900 kfree(call->print_fmt); 901 kfree(call->print_fmt);
901} 902}
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 117ad0e7fbf4..70dab9ac7827 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -68,7 +68,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
68{ 68{
69 size /= esize; 69 size /= esize;
70 70
71 size = roundup_pow_of_two(size); 71 if (!is_power_of_2(size))
72 size = rounddown_pow_of_two(size);
72 73
73 fifo->in = 0; 74 fifo->in = 0;
74 fifo->out = 0; 75 fifo->out = 0;
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index feea48fd1a0d..905027574e5d 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -35,7 +35,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
35 struct logic_pio_hwaddr *range; 35 struct logic_pio_hwaddr *range;
36 resource_size_t start; 36 resource_size_t start;
37 resource_size_t end; 37 resource_size_t end;
38 resource_size_t mmio_sz = 0; 38 resource_size_t mmio_end = 0;
39 resource_size_t iio_sz = MMIO_UPPER_LIMIT; 39 resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 int ret = 0; 40 int ret = 0;
41 41
@@ -46,7 +46,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
46 end = new_range->hw_start + new_range->size; 46 end = new_range->hw_start + new_range->size;
47 47
48 mutex_lock(&io_range_mutex); 48 mutex_lock(&io_range_mutex);
49 list_for_each_entry_rcu(range, &io_range_list, list) { 49 list_for_each_entry(range, &io_range_list, list) {
50 if (range->fwnode == new_range->fwnode) { 50 if (range->fwnode == new_range->fwnode) {
51 /* range already there */ 51 /* range already there */
52 goto end_register; 52 goto end_register;
@@ -56,7 +56,7 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
56 /* for MMIO ranges we need to check for overlap */ 56 /* for MMIO ranges we need to check for overlap */
57 if (start >= range->hw_start + range->size || 57 if (start >= range->hw_start + range->size ||
58 end < range->hw_start) { 58 end < range->hw_start) {
59 mmio_sz += range->size; 59 mmio_end = range->io_start + range->size;
60 } else { 60 } else {
61 ret = -EFAULT; 61 ret = -EFAULT;
62 goto end_register; 62 goto end_register;
@@ -69,16 +69,16 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
69 69
70 /* range not registered yet, check for available space */ 70 /* range not registered yet, check for available space */
71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) { 71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
72 if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) { 72 if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
73 /* if it's too big check if 64K space can be reserved */ 73 /* if it's too big check if 64K space can be reserved */
74 if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) { 74 if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
75 ret = -E2BIG; 75 ret = -E2BIG;
76 goto end_register; 76 goto end_register;
77 } 77 }
78 new_range->size = SZ_64K; 78 new_range->size = SZ_64K;
79 pr_warn("Requested IO range too big, new size set to 64K\n"); 79 pr_warn("Requested IO range too big, new size set to 64K\n");
80 } 80 }
81 new_range->io_start = mmio_sz; 81 new_range->io_start = mmio_end;
82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) { 82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) { 83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
84 ret = -E2BIG; 84 ret = -E2BIG;
@@ -99,6 +99,20 @@ end_register:
99} 99}
100 100
101/** 101/**
102 * logic_pio_unregister_range - unregister a logical PIO range for a host
103 * @range: pointer to the IO range which has been already registered.
104 *
105 * Unregister a previously-registered IO range node.
106 */
107void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
108{
109 mutex_lock(&io_range_mutex);
110 list_del_rcu(&range->list);
111 mutex_unlock(&io_range_mutex);
112 synchronize_rcu();
113}
114
115/**
102 * find_io_range_by_fwnode - find logical PIO range for given FW node 116 * find_io_range_by_fwnode - find logical PIO range for given FW node
103 * @fwnode: FW node handle associated with logical PIO range 117 * @fwnode: FW node handle associated with logical PIO range
104 * 118 *
@@ -108,26 +122,38 @@ end_register:
108 */ 122 */
109struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode) 123struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
110{ 124{
111 struct logic_pio_hwaddr *range; 125 struct logic_pio_hwaddr *range, *found_range = NULL;
112 126
127 rcu_read_lock();
113 list_for_each_entry_rcu(range, &io_range_list, list) { 128 list_for_each_entry_rcu(range, &io_range_list, list) {
114 if (range->fwnode == fwnode) 129 if (range->fwnode == fwnode) {
115 return range; 130 found_range = range;
131 break;
132 }
116 } 133 }
117 return NULL; 134 rcu_read_unlock();
135
136 return found_range;
118} 137}
119 138
120/* Return a registered range given an input PIO token */ 139/* Return a registered range given an input PIO token */
121static struct logic_pio_hwaddr *find_io_range(unsigned long pio) 140static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
122{ 141{
123 struct logic_pio_hwaddr *range; 142 struct logic_pio_hwaddr *range, *found_range = NULL;
124 143
144 rcu_read_lock();
125 list_for_each_entry_rcu(range, &io_range_list, list) { 145 list_for_each_entry_rcu(range, &io_range_list, list) {
126 if (in_range(pio, range->io_start, range->size)) 146 if (in_range(pio, range->io_start, range->size)) {
127 return range; 147 found_range = range;
148 break;
149 }
128 } 150 }
129 pr_err("PIO entry token %lx invalid\n", pio); 151 rcu_read_unlock();
130 return NULL; 152
153 if (!found_range)
154 pr_err("PIO entry token 0x%lx invalid\n", pio);
155
156 return found_range;
131} 157}
132 158
133/** 159/**
@@ -180,14 +206,23 @@ unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
180{ 206{
181 struct logic_pio_hwaddr *range; 207 struct logic_pio_hwaddr *range;
182 208
209 rcu_read_lock();
183 list_for_each_entry_rcu(range, &io_range_list, list) { 210 list_for_each_entry_rcu(range, &io_range_list, list) {
184 if (range->flags != LOGIC_PIO_CPU_MMIO) 211 if (range->flags != LOGIC_PIO_CPU_MMIO)
185 continue; 212 continue;
186 if (in_range(addr, range->hw_start, range->size)) 213 if (in_range(addr, range->hw_start, range->size)) {
187 return addr - range->hw_start + range->io_start; 214 unsigned long cpuaddr;
215
216 cpuaddr = addr - range->hw_start + range->io_start;
217
218 rcu_read_unlock();
219 return cpuaddr;
220 }
188 } 221 }
189 pr_err("addr %llx not registered in io_range_list\n", 222 rcu_read_unlock();
190 (unsigned long long) addr); 223
224 pr_err("addr %pa not registered in io_range_list\n", &addr);
225
191 return ~0UL; 226 return ~0UL;
192} 227}
193 228
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1334ede667a8..de1f15969e27 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -32,6 +32,7 @@
32#include <linux/shmem_fs.h> 32#include <linux/shmem_fs.h>
33#include <linux/oom.h> 33#include <linux/oom.h>
34#include <linux/numa.h> 34#include <linux/numa.h>
35#include <linux/page_owner.h>
35 36
36#include <asm/tlb.h> 37#include <asm/tlb.h>
37#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
@@ -644,30 +645,40 @@ release:
644 * available 645 * available
645 * never: never stall for any thp allocation 646 * never: never stall for any thp allocation
646 */ 647 */
647static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) 648static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
648{ 649{
649 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); 650 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
651 gfp_t this_node = 0;
652
653#ifdef CONFIG_NUMA
654 struct mempolicy *pol;
655 /*
656 * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
657 * specified, to express a general desire to stay on the current
658 * node for optimistic allocation attempts. If the defrag mode
659 * and/or madvise hint requires the direct reclaim then we prefer
660 * to fallback to other node rather than node reclaim because that
661 * can lead to excessive reclaim even though there is free memory
662 * on other nodes. We expect that NUMA preferences are specified
663 * by memory policies.
664 */
665 pol = get_vma_policy(vma, addr);
666 if (pol->mode != MPOL_BIND)
667 this_node = __GFP_THISNODE;
668 mpol_cond_put(pol);
669#endif
650 670
651 /* Always do synchronous compaction */
652 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags)) 671 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
653 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); 672 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
654
655 /* Kick kcompactd and fail quickly */
656 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) 673 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
657 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; 674 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
658
659 /* Synchronous compaction if madvised, otherwise kick kcompactd */
660 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) 675 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
661 return GFP_TRANSHUGE_LIGHT | 676 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
662 (vma_madvised ? __GFP_DIRECT_RECLAIM : 677 __GFP_KSWAPD_RECLAIM | this_node);
663 __GFP_KSWAPD_RECLAIM);
664
665 /* Only do synchronous compaction if madvised */
666 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) 678 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
667 return GFP_TRANSHUGE_LIGHT | 679 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
668 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); 680 this_node);
669 681 return GFP_TRANSHUGE_LIGHT | this_node;
670 return GFP_TRANSHUGE_LIGHT;
671} 682}
672 683
673/* Caller must hold page table lock. */ 684/* Caller must hold page table lock. */
@@ -739,8 +750,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
739 pte_free(vma->vm_mm, pgtable); 750 pte_free(vma->vm_mm, pgtable);
740 return ret; 751 return ret;
741 } 752 }
742 gfp = alloc_hugepage_direct_gfpmask(vma); 753 gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
743 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); 754 page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
744 if (unlikely(!page)) { 755 if (unlikely(!page)) {
745 count_vm_event(THP_FAULT_FALLBACK); 756 count_vm_event(THP_FAULT_FALLBACK);
746 return VM_FAULT_FALLBACK; 757 return VM_FAULT_FALLBACK;
@@ -1347,8 +1358,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
1347alloc: 1358alloc:
1348 if (__transparent_hugepage_enabled(vma) && 1359 if (__transparent_hugepage_enabled(vma) &&
1349 !transparent_hugepage_debug_cow()) { 1360 !transparent_hugepage_debug_cow()) {
1350 huge_gfp = alloc_hugepage_direct_gfpmask(vma); 1361 huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
1351 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); 1362 new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
1363 haddr, numa_node_id());
1352 } else 1364 } else
1353 new_page = NULL; 1365 new_page = NULL;
1354 1366
@@ -2505,6 +2517,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2505 } 2517 }
2506 2518
2507 ClearPageCompound(head); 2519 ClearPageCompound(head);
2520
2521 split_page_owner(head, HPAGE_PMD_ORDER);
2522
2508 /* See comment in __split_huge_page_tail() */ 2523 /* See comment in __split_huge_page_tail() */
2509 if (PageAnon(head)) { 2524 if (PageAnon(head)) {
2510 /* Additional pin to swap cache */ 2525 /* Additional pin to swap cache */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ede7e7f5d1ab..6d7296dd11b8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3856,6 +3856,25 @@ retry:
3856 3856
3857 page = alloc_huge_page(vma, haddr, 0); 3857 page = alloc_huge_page(vma, haddr, 0);
3858 if (IS_ERR(page)) { 3858 if (IS_ERR(page)) {
3859 /*
3860 * Returning error will result in faulting task being
3861 * sent SIGBUS. The hugetlb fault mutex prevents two
3862 * tasks from racing to fault in the same page which
3863 * could result in false unable to allocate errors.
3864 * Page migration does not take the fault mutex, but
3865 * does a clear then write of pte's under page table
3866 * lock. Page fault code could race with migration,
3867 * notice the clear pte and try to allocate a page
3868 * here. Before returning error, get ptl and make
3869 * sure there really is no pte entry.
3870 */
3871 ptl = huge_pte_lock(h, mm, ptep);
3872 if (!huge_pte_none(huge_ptep_get(ptep))) {
3873 ret = 0;
3874 spin_unlock(ptl);
3875 goto out;
3876 }
3877 spin_unlock(ptl);
3859 ret = vmf_error(PTR_ERR(page)); 3878 ret = vmf_error(PTR_ERR(page));
3860 goto out; 3879 goto out;
3861 } 3880 }
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 2277b82902d8..95d16a42db6b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -407,8 +407,14 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
407 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 407 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
408 return shadow_byte < 0 || 408 return shadow_byte < 0 ||
409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE; 409 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
410 else 410
411 return tag != (u8)shadow_byte; 411 /* else CONFIG_KASAN_SW_TAGS: */
412 if ((u8)shadow_byte == KASAN_TAG_INVALID)
413 return true;
414 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
415 return true;
416
417 return false;
412} 418}
413 419
414static bool __kasan_slab_free(struct kmem_cache *cache, void *object, 420static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 6e9e8cca663e..f6e602918dac 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1966,6 +1966,7 @@ static void kmemleak_disable(void)
1966 1966
1967 /* stop any memory operation tracing */ 1967 /* stop any memory operation tracing */
1968 kmemleak_enabled = 0; 1968 kmemleak_enabled = 0;
1969 kmemleak_early_log = 0;
1969 1970
1970 /* check whether it is too early for a kernel thread */ 1971 /* check whether it is too early for a kernel thread */
1971 if (kmemleak_initialized) 1972 if (kmemleak_initialized)
@@ -2009,7 +2010,6 @@ void __init kmemleak_init(void)
2009 2010
2010#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF 2011#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2011 if (!kmemleak_skip_disable) { 2012 if (!kmemleak_skip_disable) {
2012 kmemleak_early_log = 0;
2013 kmemleak_disable(); 2013 kmemleak_disable();
2014 return; 2014 return;
2015 } 2015 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cdbb7a84cb6e..9ec5e12486a7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -752,15 +752,13 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
752 /* Update memcg */ 752 /* Update memcg */
753 __mod_memcg_state(memcg, idx, val); 753 __mod_memcg_state(memcg, idx, val);
754 754
755 /* Update lruvec */
756 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
757
755 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 758 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
756 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 759 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
757 struct mem_cgroup_per_node *pi; 760 struct mem_cgroup_per_node *pi;
758 761
759 /*
760 * Batch local counters to keep them in sync with
761 * the hierarchical ones.
762 */
763 __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
764 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 762 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
765 atomic_long_add(x, &pi->lruvec_stat[idx]); 763 atomic_long_add(x, &pi->lruvec_stat[idx]);
766 x = 0; 764 x = 0;
@@ -768,6 +766,26 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
768 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); 766 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
769} 767}
770 768
769void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
770{
771 struct page *page = virt_to_head_page(p);
772 pg_data_t *pgdat = page_pgdat(page);
773 struct mem_cgroup *memcg;
774 struct lruvec *lruvec;
775
776 rcu_read_lock();
777 memcg = memcg_from_slab_page(page);
778
779 /* Untracked pages have no memcg, no lruvec. Update only the node */
780 if (!memcg || memcg == root_mem_cgroup) {
781 __mod_node_page_state(pgdat, idx, val);
782 } else {
783 lruvec = mem_cgroup_lruvec(pgdat, memcg);
784 __mod_lruvec_state(lruvec, idx, val);
785 }
786 rcu_read_unlock();
787}
788
771/** 789/**
772 * __count_memcg_events - account VM events in a cgroup 790 * __count_memcg_events - account VM events in a cgroup
773 * @memcg: the memory cgroup 791 * @memcg: the memory cgroup
@@ -1130,26 +1148,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
1130 css_put(&prev->css); 1148 css_put(&prev->css);
1131} 1149}
1132 1150
1133static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) 1151static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1152 struct mem_cgroup *dead_memcg)
1134{ 1153{
1135 struct mem_cgroup *memcg = dead_memcg;
1136 struct mem_cgroup_reclaim_iter *iter; 1154 struct mem_cgroup_reclaim_iter *iter;
1137 struct mem_cgroup_per_node *mz; 1155 struct mem_cgroup_per_node *mz;
1138 int nid; 1156 int nid;
1139 int i; 1157 int i;
1140 1158
1141 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 1159 for_each_node(nid) {
1142 for_each_node(nid) { 1160 mz = mem_cgroup_nodeinfo(from, nid);
1143 mz = mem_cgroup_nodeinfo(memcg, nid); 1161 for (i = 0; i <= DEF_PRIORITY; i++) {
1144 for (i = 0; i <= DEF_PRIORITY; i++) { 1162 iter = &mz->iter[i];
1145 iter = &mz->iter[i]; 1163 cmpxchg(&iter->position,
1146 cmpxchg(&iter->position, 1164 dead_memcg, NULL);
1147 dead_memcg, NULL);
1148 }
1149 } 1165 }
1150 } 1166 }
1151} 1167}
1152 1168
1169static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1170{
1171 struct mem_cgroup *memcg = dead_memcg;
1172 struct mem_cgroup *last;
1173
1174 do {
1175 __invalidate_reclaim_iterators(memcg, dead_memcg);
1176 last = memcg;
1177 } while ((memcg = parent_mem_cgroup(memcg)));
1178
1179 /*
1180 * When cgruop1 non-hierarchy mode is used,
1181 * parent_mem_cgroup() does not walk all the way up to the
1182 * cgroup root (root_mem_cgroup). So we have to handle
1183 * dead_memcg from cgroup root separately.
1184 */
1185 if (last != root_mem_cgroup)
1186 __invalidate_reclaim_iterators(root_mem_cgroup,
1187 dead_memcg);
1188}
1189
1153/** 1190/**
1154 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy 1191 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1155 * @memcg: hierarchy root 1192 * @memcg: hierarchy root
@@ -3221,6 +3258,72 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3221 } 3258 }
3222} 3259}
3223 3260
3261static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg, bool slab_only)
3262{
3263 unsigned long stat[MEMCG_NR_STAT];
3264 struct mem_cgroup *mi;
3265 int node, cpu, i;
3266 int min_idx, max_idx;
3267
3268 if (slab_only) {
3269 min_idx = NR_SLAB_RECLAIMABLE;
3270 max_idx = NR_SLAB_UNRECLAIMABLE;
3271 } else {
3272 min_idx = 0;
3273 max_idx = MEMCG_NR_STAT;
3274 }
3275
3276 for (i = min_idx; i < max_idx; i++)
3277 stat[i] = 0;
3278
3279 for_each_online_cpu(cpu)
3280 for (i = min_idx; i < max_idx; i++)
3281 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu);
3282
3283 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3284 for (i = min_idx; i < max_idx; i++)
3285 atomic_long_add(stat[i], &mi->vmstats[i]);
3286
3287 if (!slab_only)
3288 max_idx = NR_VM_NODE_STAT_ITEMS;
3289
3290 for_each_node(node) {
3291 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3292 struct mem_cgroup_per_node *pi;
3293
3294 for (i = min_idx; i < max_idx; i++)
3295 stat[i] = 0;
3296
3297 for_each_online_cpu(cpu)
3298 for (i = min_idx; i < max_idx; i++)
3299 stat[i] += per_cpu(
3300 pn->lruvec_stat_cpu->count[i], cpu);
3301
3302 for (pi = pn; pi; pi = parent_nodeinfo(pi, node))
3303 for (i = min_idx; i < max_idx; i++)
3304 atomic_long_add(stat[i], &pi->lruvec_stat[i]);
3305 }
3306}
3307
3308static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg)
3309{
3310 unsigned long events[NR_VM_EVENT_ITEMS];
3311 struct mem_cgroup *mi;
3312 int cpu, i;
3313
3314 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3315 events[i] = 0;
3316
3317 for_each_online_cpu(cpu)
3318 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3319 events[i] += per_cpu(memcg->vmstats_percpu->events[i],
3320 cpu);
3321
3322 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
3323 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
3324 atomic_long_add(events[i], &mi->vmevents[i]);
3325}
3326
3224#ifdef CONFIG_MEMCG_KMEM 3327#ifdef CONFIG_MEMCG_KMEM
3225static int memcg_online_kmem(struct mem_cgroup *memcg) 3328static int memcg_online_kmem(struct mem_cgroup *memcg)
3226{ 3329{
@@ -3270,7 +3373,14 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
3270 if (!parent) 3373 if (!parent)
3271 parent = root_mem_cgroup; 3374 parent = root_mem_cgroup;
3272 3375
3376 /*
3377 * Deactivate and reparent kmem_caches. Then flush percpu
3378 * slab statistics to have precise values at the parent and
3379 * all ancestor levels. It's required to keep slab stats
3380 * accurate after the reparenting of kmem_caches.
3381 */
3273 memcg_deactivate_kmem_caches(memcg, parent); 3382 memcg_deactivate_kmem_caches(memcg, parent);
3383 memcg_flush_percpu_vmstats(memcg, true);
3274 3384
3275 kmemcg_id = memcg->kmemcg_id; 3385 kmemcg_id = memcg->kmemcg_id;
3276 BUG_ON(kmemcg_id < 0); 3386 BUG_ON(kmemcg_id < 0);
@@ -4643,6 +4753,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
4643{ 4753{
4644 int node; 4754 int node;
4645 4755
4756 /*
4757 * Flush percpu vmstats and vmevents to guarantee the value correctness
4758 * on parent's and all ancestor levels.
4759 */
4760 memcg_flush_percpu_vmstats(memcg, false);
4761 memcg_flush_percpu_vmevents(memcg);
4646 for_each_node(node) 4762 for_each_node(node)
4647 free_mem_cgroup_per_node_info(memcg, node); 4763 free_mem_cgroup_per_node_info(memcg, node);
4648 free_percpu(memcg->vmstats_percpu); 4764 free_percpu(memcg->vmstats_percpu);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f48693f75b37..65e0874fce17 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -403,7 +403,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
403 }, 403 },
404}; 404};
405 405
406static void migrate_page_add(struct page *page, struct list_head *pagelist, 406static int migrate_page_add(struct page *page, struct list_head *pagelist,
407 unsigned long flags); 407 unsigned long flags);
408 408
409struct queue_pages { 409struct queue_pages {
@@ -429,11 +429,14 @@ static inline bool queue_pages_required(struct page *page,
429} 429}
430 430
431/* 431/*
432 * queue_pages_pmd() has three possible return values: 432 * queue_pages_pmd() has four possible return values:
433 * 1 - pages are placed on the right node or queued successfully. 433 * 0 - pages are placed on the right node or queued successfully.
434 * 0 - THP was split. 434 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
435 * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing 435 * specified.
436 * page was already on a node that does not follow the policy. 436 * 2 - THP was split.
437 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
438 * existing page was already on a node that does not follow the
439 * policy.
437 */ 440 */
438static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 441static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
439 unsigned long end, struct mm_walk *walk) 442 unsigned long end, struct mm_walk *walk)
@@ -451,23 +454,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
451 if (is_huge_zero_page(page)) { 454 if (is_huge_zero_page(page)) {
452 spin_unlock(ptl); 455 spin_unlock(ptl);
453 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); 456 __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
457 ret = 2;
454 goto out; 458 goto out;
455 } 459 }
456 if (!queue_pages_required(page, qp)) { 460 if (!queue_pages_required(page, qp))
457 ret = 1;
458 goto unlock; 461 goto unlock;
459 }
460 462
461 ret = 1;
462 flags = qp->flags; 463 flags = qp->flags;
463 /* go to thp migration */ 464 /* go to thp migration */
464 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 465 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
465 if (!vma_migratable(walk->vma)) { 466 if (!vma_migratable(walk->vma) ||
466 ret = -EIO; 467 migrate_page_add(page, qp->pagelist, flags)) {
468 ret = 1;
467 goto unlock; 469 goto unlock;
468 } 470 }
469
470 migrate_page_add(page, qp->pagelist, flags);
471 } else 471 } else
472 ret = -EIO; 472 ret = -EIO;
473unlock: 473unlock:
@@ -479,6 +479,13 @@ out:
479/* 479/*
480 * Scan through pages checking if pages follow certain conditions, 480 * Scan through pages checking if pages follow certain conditions,
481 * and move them to the pagelist if they do. 481 * and move them to the pagelist if they do.
482 *
483 * queue_pages_pte_range() has three possible return values:
484 * 0 - pages are placed on the right node or queued successfully.
485 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
486 * specified.
487 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
488 * on a node that does not follow the policy.
482 */ 489 */
483static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, 490static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
484 unsigned long end, struct mm_walk *walk) 491 unsigned long end, struct mm_walk *walk)
@@ -488,17 +495,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
488 struct queue_pages *qp = walk->private; 495 struct queue_pages *qp = walk->private;
489 unsigned long flags = qp->flags; 496 unsigned long flags = qp->flags;
490 int ret; 497 int ret;
498 bool has_unmovable = false;
491 pte_t *pte; 499 pte_t *pte;
492 spinlock_t *ptl; 500 spinlock_t *ptl;
493 501
494 ptl = pmd_trans_huge_lock(pmd, vma); 502 ptl = pmd_trans_huge_lock(pmd, vma);
495 if (ptl) { 503 if (ptl) {
496 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 504 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
497 if (ret > 0) 505 if (ret != 2)
498 return 0;
499 else if (ret < 0)
500 return ret; 506 return ret;
501 } 507 }
508 /* THP was split, fall through to pte walk */
502 509
503 if (pmd_trans_unstable(pmd)) 510 if (pmd_trans_unstable(pmd))
504 return 0; 511 return 0;
@@ -519,14 +526,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
519 if (!queue_pages_required(page, qp)) 526 if (!queue_pages_required(page, qp))
520 continue; 527 continue;
521 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 528 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
522 if (!vma_migratable(vma)) 529 /* MPOL_MF_STRICT must be specified if we get here */
530 if (!vma_migratable(vma)) {
531 has_unmovable = true;
523 break; 532 break;
524 migrate_page_add(page, qp->pagelist, flags); 533 }
534
535 /*
536 * Do not abort immediately since there may be
537 * temporary off LRU pages in the range. Still
538 * need migrate other LRU pages.
539 */
540 if (migrate_page_add(page, qp->pagelist, flags))
541 has_unmovable = true;
525 } else 542 } else
526 break; 543 break;
527 } 544 }
528 pte_unmap_unlock(pte - 1, ptl); 545 pte_unmap_unlock(pte - 1, ptl);
529 cond_resched(); 546 cond_resched();
547
548 if (has_unmovable)
549 return 1;
550
530 return addr != end ? -EIO : 0; 551 return addr != end ? -EIO : 0;
531} 552}
532 553
@@ -639,7 +660,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
639 * 660 *
640 * If pages found in a given range are on a set of nodes (determined by 661 * If pages found in a given range are on a set of nodes (determined by
641 * @nodes and @flags,) it's isolated and queued to the pagelist which is 662 * @nodes and @flags,) it's isolated and queued to the pagelist which is
642 * passed via @private.) 663 * passed via @private.
664 *
665 * queue_pages_range() has three possible return values:
666 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
667 * specified.
668 * 0 - queue pages successfully or no misplaced page.
669 * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
643 */ 670 */
644static int 671static int
645queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 672queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -940,7 +967,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
940/* 967/*
941 * page migration, thp tail pages can be passed. 968 * page migration, thp tail pages can be passed.
942 */ 969 */
943static void migrate_page_add(struct page *page, struct list_head *pagelist, 970static int migrate_page_add(struct page *page, struct list_head *pagelist,
944 unsigned long flags) 971 unsigned long flags)
945{ 972{
946 struct page *head = compound_head(page); 973 struct page *head = compound_head(page);
@@ -953,8 +980,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
953 mod_node_page_state(page_pgdat(head), 980 mod_node_page_state(page_pgdat(head),
954 NR_ISOLATED_ANON + page_is_file_cache(head), 981 NR_ISOLATED_ANON + page_is_file_cache(head),
955 hpage_nr_pages(head)); 982 hpage_nr_pages(head));
983 } else if (flags & MPOL_MF_STRICT) {
984 /*
985 * Non-movable page may reach here. And, there may be
986 * temporary off LRU pages or non-LRU movable pages.
987 * Treat them as unmovable pages since they can't be
988 * isolated, so they can't be moved at the moment. It
989 * should return -EIO for this case too.
990 */
991 return -EIO;
956 } 992 }
957 } 993 }
994
995 return 0;
958} 996}
959 997
960/* page allocation callback for NUMA node migration */ 998/* page allocation callback for NUMA node migration */
@@ -1142,8 +1180,8 @@ static struct page *new_page(struct page *page, unsigned long start)
1142 } else if (PageTransHuge(page)) { 1180 } else if (PageTransHuge(page)) {
1143 struct page *thp; 1181 struct page *thp;
1144 1182
1145 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, 1183 thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
1146 HPAGE_PMD_ORDER); 1184 address, numa_node_id());
1147 if (!thp) 1185 if (!thp)
1148 return NULL; 1186 return NULL;
1149 prep_transhuge_page(thp); 1187 prep_transhuge_page(thp);
@@ -1157,9 +1195,10 @@ static struct page *new_page(struct page *page, unsigned long start)
1157} 1195}
1158#else 1196#else
1159 1197
1160static void migrate_page_add(struct page *page, struct list_head *pagelist, 1198static int migrate_page_add(struct page *page, struct list_head *pagelist,
1161 unsigned long flags) 1199 unsigned long flags)
1162{ 1200{
1201 return -EIO;
1163} 1202}
1164 1203
1165int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 1204int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1182,6 +1221,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1182 struct mempolicy *new; 1221 struct mempolicy *new;
1183 unsigned long end; 1222 unsigned long end;
1184 int err; 1223 int err;
1224 int ret;
1185 LIST_HEAD(pagelist); 1225 LIST_HEAD(pagelist);
1186 1226
1187 if (flags & ~(unsigned long)MPOL_MF_VALID) 1227 if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1243,10 +1283,15 @@ static long do_mbind(unsigned long start, unsigned long len,
1243 if (err) 1283 if (err)
1244 goto mpol_out; 1284 goto mpol_out;
1245 1285
1246 err = queue_pages_range(mm, start, end, nmask, 1286 ret = queue_pages_range(mm, start, end, nmask,
1247 flags | MPOL_MF_INVERT, &pagelist); 1287 flags | MPOL_MF_INVERT, &pagelist);
1248 if (!err) 1288
1249 err = mbind_range(mm, start, end, new); 1289 if (ret < 0) {
1290 err = -EIO;
1291 goto up_out;
1292 }
1293
1294 err = mbind_range(mm, start, end, new);
1250 1295
1251 if (!err) { 1296 if (!err) {
1252 int nr_failed = 0; 1297 int nr_failed = 0;
@@ -1259,13 +1304,14 @@ static long do_mbind(unsigned long start, unsigned long len,
1259 putback_movable_pages(&pagelist); 1304 putback_movable_pages(&pagelist);
1260 } 1305 }
1261 1306
1262 if (nr_failed && (flags & MPOL_MF_STRICT)) 1307 if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1263 err = -EIO; 1308 err = -EIO;
1264 } else 1309 } else
1265 putback_movable_pages(&pagelist); 1310 putback_movable_pages(&pagelist);
1266 1311
1312up_out:
1267 up_write(&mm->mmap_sem); 1313 up_write(&mm->mmap_sem);
1268 mpol_out: 1314mpol_out:
1269 mpol_put(new); 1315 mpol_put(new);
1270 return err; 1316 return err;
1271} 1317}
@@ -1688,7 +1734,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1688 * freeing by another task. It is the caller's responsibility to free the 1734 * freeing by another task. It is the caller's responsibility to free the
1689 * extra reference for shared policies. 1735 * extra reference for shared policies.
1690 */ 1736 */
1691static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, 1737struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1692 unsigned long addr) 1738 unsigned long addr)
1693{ 1739{
1694 struct mempolicy *pol = __get_vma_policy(vma, addr); 1740 struct mempolicy *pol = __get_vma_policy(vma, addr);
@@ -2037,7 +2083,6 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2037 * @vma: Pointer to VMA or NULL if not available. 2083 * @vma: Pointer to VMA or NULL if not available.
2038 * @addr: Virtual Address of the allocation. Must be inside the VMA. 2084 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2039 * @node: Which node to prefer for allocation (modulo policy). 2085 * @node: Which node to prefer for allocation (modulo policy).
2040 * @hugepage: for hugepages try only the preferred node if possible
2041 * 2086 *
2042 * This function allocates a page from the kernel page pool and applies 2087 * This function allocates a page from the kernel page pool and applies
2043 * a NUMA policy associated with the VMA or the current process. 2088 * a NUMA policy associated with the VMA or the current process.
@@ -2048,7 +2093,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2048 */ 2093 */
2049struct page * 2094struct page *
2050alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 2095alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2051 unsigned long addr, int node, bool hugepage) 2096 unsigned long addr, int node)
2052{ 2097{
2053 struct mempolicy *pol; 2098 struct mempolicy *pol;
2054 struct page *page; 2099 struct page *page;
@@ -2066,31 +2111,6 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2066 goto out; 2111 goto out;
2067 } 2112 }
2068 2113
2069 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2070 int hpage_node = node;
2071
2072 /*
2073 * For hugepage allocation and non-interleave policy which
2074 * allows the current node (or other explicitly preferred
2075 * node) we only try to allocate from the current/preferred
2076 * node and don't fall back to other nodes, as the cost of
2077 * remote accesses would likely offset THP benefits.
2078 *
2079 * If the policy is interleave, or does not allow the current
2080 * node in its nodemask, we allocate the standard way.
2081 */
2082 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2083 hpage_node = pol->v.preferred_node;
2084
2085 nmask = policy_nodemask(gfp, pol);
2086 if (!nmask || node_isset(hpage_node, *nmask)) {
2087 mpol_cond_put(pol);
2088 page = __alloc_pages_node(hpage_node,
2089 gfp | __GFP_THISNODE, order);
2090 goto out;
2091 }
2092 }
2093
2094 nmask = policy_nodemask(gfp, pol); 2114 nmask = policy_nodemask(gfp, pol);
2095 preferred_nid = policy_node(gfp, pol, node); 2115 preferred_nid = policy_node(gfp, pol, node);
2096 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask); 2116 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
diff --git a/mm/memremap.c b/mm/memremap.c
index 86432650f829..ed70c4e8e52a 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -403,6 +403,30 @@ void __put_devmap_managed_page(struct page *page)
403 403
404 mem_cgroup_uncharge(page); 404 mem_cgroup_uncharge(page);
405 405
406 /*
407 * When a device_private page is freed, the page->mapping field
408 * may still contain a (stale) mapping value. For example, the
409 * lower bits of page->mapping may still identify the page as
410 * an anonymous page. Ultimately, this entire field is just
411 * stale and wrong, and it will cause errors if not cleared.
412 * One example is:
413 *
414 * migrate_vma_pages()
415 * migrate_vma_insert_page()
416 * page_add_new_anon_rmap()
417 * __page_set_anon_rmap()
418 * ...checks page->mapping, via PageAnon(page) call,
419 * and incorrectly concludes that the page is an
420 * anonymous page. Therefore, it incorrectly,
421 * silently fails to set up the new anon rmap.
422 *
423 * For other types of ZONE_DEVICE pages, migration is either
424 * handled differently or not done at all, so there is no need
425 * to clear page->mapping.
426 */
427 if (is_device_private_page(page))
428 page->mapping = NULL;
429
406 page->pgmap->ops->page_free(page); 430 page->pgmap->ops->page_free(page);
407 } else if (!count) 431 } else if (!count)
408 __put_page(page); 432 __put_page(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 272c6de1bf4e..9c9194959271 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2238,27 +2238,12 @@ static int move_freepages(struct zone *zone,
2238 unsigned int order; 2238 unsigned int order;
2239 int pages_moved = 0; 2239 int pages_moved = 0;
2240 2240
2241#ifndef CONFIG_HOLES_IN_ZONE
2242 /*
2243 * page_zone is not safe to call in this context when
2244 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
2245 * anyway as we check zone boundaries in move_freepages_block().
2246 * Remove at a later date when no bug reports exist related to
2247 * grouping pages by mobility
2248 */
2249 VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
2250 pfn_valid(page_to_pfn(end_page)) &&
2251 page_zone(start_page) != page_zone(end_page));
2252#endif
2253 for (page = start_page; page <= end_page;) { 2241 for (page = start_page; page <= end_page;) {
2254 if (!pfn_valid_within(page_to_pfn(page))) { 2242 if (!pfn_valid_within(page_to_pfn(page))) {
2255 page++; 2243 page++;
2256 continue; 2244 continue;
2257 } 2245 }
2258 2246
2259 /* Make sure we are not inadvertently changing nodes */
2260 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2261
2262 if (!PageBuddy(page)) { 2247 if (!PageBuddy(page)) {
2263 /* 2248 /*
2264 * We assume that pages that could be isolated for 2249 * We assume that pages that could be isolated for
@@ -2273,6 +2258,10 @@ static int move_freepages(struct zone *zone,
2273 continue; 2258 continue;
2274 } 2259 }
2275 2260
2261 /* Make sure we are not inadvertently changing nodes */
2262 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2263 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2264
2276 order = page_order(page); 2265 order = page_order(page);
2277 move_to_free_area(page, &zone->free_area[order], migratetype); 2266 move_to_free_area(page, &zone->free_area[order], migratetype);
2278 page += 1 << order; 2267 page += 1 << order;
diff --git a/mm/rmap.c b/mm/rmap.c
index e5dfe2ae6b0d..003377e24232 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1475,7 +1475,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1475 /* 1475 /*
1476 * No need to invalidate here it will synchronize on 1476 * No need to invalidate here it will synchronize on
1477 * against the special swap migration pte. 1477 * against the special swap migration pte.
1478 *
1479 * The assignment to subpage above was computed from a
1480 * swap PTE which results in an invalid pointer.
1481 * Since only PAGE_SIZE pages can currently be
1482 * migrated, just set it to page. This will need to be
1483 * changed when hugepage migrations to device private
1484 * memory are supported.
1478 */ 1485 */
1486 subpage = page;
1479 goto discard; 1487 goto discard;
1480 } 1488 }
1481 1489
diff --git a/mm/shmem.c b/mm/shmem.c
index 626d8c74b973..2bed4761f279 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1466,7 +1466,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
1466 1466
1467 shmem_pseudo_vma_init(&pvma, info, hindex); 1467 shmem_pseudo_vma_init(&pvma, info, hindex);
1468 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1468 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1469 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1469 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
1470 shmem_pseudo_vma_destroy(&pvma); 1470 shmem_pseudo_vma_destroy(&pvma);
1471 if (page) 1471 if (page)
1472 prep_transhuge_page(page); 1472 prep_transhuge_page(page);
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 2a09796edef8..98e924864554 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -147,7 +147,7 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
147 bool to_user) 147 bool to_user)
148{ 148{
149 /* Reject if object wraps past end of memory. */ 149 /* Reject if object wraps past end of memory. */
150 if (ptr + n < ptr) 150 if (ptr + (n - 1) < ptr)
151 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); 151 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
152 152
153 /* Reject if NULL or ZERO-allocation. */ 153 /* Reject if NULL or ZERO-allocation. */
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e0fc963acc41..7ba11e12a11f 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3279,9 +3279,19 @@ retry:
3279 goto overflow; 3279 goto overflow;
3280 3280
3281 /* 3281 /*
3282 * If required width exeeds current VA block, move
3283 * base downwards and then recheck.
3284 */
3285 if (base + end > va->va_end) {
3286 base = pvm_determine_end_from_reverse(&va, align) - end;
3287 term_area = area;
3288 continue;
3289 }
3290
3291 /*
3282 * If this VA does not fit, move base downwards and recheck. 3292 * If this VA does not fit, move base downwards and recheck.
3283 */ 3293 */
3284 if (base + start < va->va_start || base + end > va->va_end) { 3294 if (base + start < va->va_start) {
3285 va = node_to_va(rb_prev(&va->rb_node)); 3295 va = node_to_va(rb_prev(&va->rb_node));
3286 base = pvm_determine_end_from_reverse(&va, align) - end; 3296 base = pvm_determine_end_from_reverse(&va, align) - end;
3287 term_area = area; 3297 term_area = area;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dbdc46a84f63..a6c5d0b28321 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -88,9 +88,6 @@ struct scan_control {
88 /* Can pages be swapped as part of reclaim? */ 88 /* Can pages be swapped as part of reclaim? */
89 unsigned int may_swap:1; 89 unsigned int may_swap:1;
90 90
91 /* e.g. boosted watermark reclaim leaves slabs alone */
92 unsigned int may_shrinkslab:1;
93
94 /* 91 /*
95 * Cgroups are not reclaimed below their configured memory.low, 92 * Cgroups are not reclaimed below their configured memory.low,
96 * unless we threaten to OOM. If any cgroups are skipped due to 93 * unless we threaten to OOM. If any cgroups are skipped due to
@@ -2714,10 +2711,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
2714 shrink_node_memcg(pgdat, memcg, sc, &lru_pages); 2711 shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
2715 node_lru_pages += lru_pages; 2712 node_lru_pages += lru_pages;
2716 2713
2717 if (sc->may_shrinkslab) { 2714 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
2718 shrink_slab(sc->gfp_mask, pgdat->node_id, 2715 sc->priority);
2719 memcg, sc->priority);
2720 }
2721 2716
2722 /* Record the group's reclaim efficiency */ 2717 /* Record the group's reclaim efficiency */
2723 vmpressure(sc->gfp_mask, memcg, false, 2718 vmpressure(sc->gfp_mask, memcg, false,
@@ -3194,7 +3189,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3194 .may_writepage = !laptop_mode, 3189 .may_writepage = !laptop_mode,
3195 .may_unmap = 1, 3190 .may_unmap = 1,
3196 .may_swap = 1, 3191 .may_swap = 1,
3197 .may_shrinkslab = 1,
3198 }; 3192 };
3199 3193
3200 /* 3194 /*
@@ -3226,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
3226 3220
3227#ifdef CONFIG_MEMCG 3221#ifdef CONFIG_MEMCG
3228 3222
3223/* Only used by soft limit reclaim. Do not reuse for anything else. */
3229unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, 3224unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3230 gfp_t gfp_mask, bool noswap, 3225 gfp_t gfp_mask, bool noswap,
3231 pg_data_t *pgdat, 3226 pg_data_t *pgdat,
@@ -3238,11 +3233,11 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3238 .may_unmap = 1, 3233 .may_unmap = 1,
3239 .reclaim_idx = MAX_NR_ZONES - 1, 3234 .reclaim_idx = MAX_NR_ZONES - 1,
3240 .may_swap = !noswap, 3235 .may_swap = !noswap,
3241 .may_shrinkslab = 1,
3242 }; 3236 };
3243 unsigned long lru_pages; 3237 unsigned long lru_pages;
3244 3238
3245 set_task_reclaim_state(current, &sc.reclaim_state); 3239 WARN_ON_ONCE(!current->reclaim_state);
3240
3246 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 3241 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
3247 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 3242 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
3248 3243
@@ -3260,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
3260 3255
3261 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 3256 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
3262 3257
3263 set_task_reclaim_state(current, NULL);
3264 *nr_scanned = sc.nr_scanned; 3258 *nr_scanned = sc.nr_scanned;
3265 3259
3266 return sc.nr_reclaimed; 3260 return sc.nr_reclaimed;
@@ -3286,7 +3280,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
3286 .may_writepage = !laptop_mode, 3280 .may_writepage = !laptop_mode,
3287 .may_unmap = 1, 3281 .may_unmap = 1,
3288 .may_swap = may_swap, 3282 .may_swap = may_swap,
3289 .may_shrinkslab = 1,
3290 }; 3283 };
3291 3284
3292 set_task_reclaim_state(current, &sc.reclaim_state); 3285 set_task_reclaim_state(current, &sc.reclaim_state);
@@ -3598,7 +3591,6 @@ restart:
3598 */ 3591 */
3599 sc.may_writepage = !laptop_mode && !nr_boost_reclaim; 3592 sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
3600 sc.may_swap = !nr_boost_reclaim; 3593 sc.may_swap = !nr_boost_reclaim;
3601 sc.may_shrinkslab = !nr_boost_reclaim;
3602 3594
3603 /* 3595 /*
3604 * Do some background aging of the anon list, to give 3596 * Do some background aging of the anon list, to give
diff --git a/mm/workingset.c b/mm/workingset.c
index e0b4edcb88c8..c963831d354f 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -380,14 +380,12 @@ void workingset_update_node(struct xa_node *node)
380 if (node->count && node->count == node->nr_values) { 380 if (node->count && node->count == node->nr_values) {
381 if (list_empty(&node->private_list)) { 381 if (list_empty(&node->private_list)) {
382 list_lru_add(&shadow_nodes, &node->private_list); 382 list_lru_add(&shadow_nodes, &node->private_list);
383 __inc_lruvec_page_state(virt_to_page(node), 383 __inc_lruvec_slab_state(node, WORKINGSET_NODES);
384 WORKINGSET_NODES);
385 } 384 }
386 } else { 385 } else {
387 if (!list_empty(&node->private_list)) { 386 if (!list_empty(&node->private_list)) {
388 list_lru_del(&shadow_nodes, &node->private_list); 387 list_lru_del(&shadow_nodes, &node->private_list);
389 __dec_lruvec_page_state(virt_to_page(node), 388 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
390 WORKINGSET_NODES);
391 } 389 }
392 } 390 }
393} 391}
@@ -480,7 +478,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
480 } 478 }
481 479
482 list_lru_isolate(lru, item); 480 list_lru_isolate(lru, item);
483 __dec_lruvec_page_state(virt_to_page(node), WORKINGSET_NODES); 481 __dec_lruvec_slab_state(node, WORKINGSET_NODES);
484 482
485 spin_unlock(lru_lock); 483 spin_unlock(lru_lock);
486 484
@@ -503,7 +501,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
503 * shadow entries we were tracking ... 501 * shadow entries we were tracking ...
504 */ 502 */
505 xas_store(&xas, NULL); 503 xas_store(&xas, NULL);
506 __inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM); 504 __inc_lruvec_slab_state(node, WORKINGSET_NODERECLAIM);
507 505
508out_invalid: 506out_invalid:
509 xa_unlock_irq(&mapping->i_pages); 507 xa_unlock_irq(&mapping->i_pages);
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 1a029a7432ee..75b7962439ff 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,6 +41,7 @@
41#include <linux/workqueue.h> 41#include <linux/workqueue.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/spinlock.h> 43#include <linux/spinlock.h>
44#include <linux/wait.h>
44#include <linux/zpool.h> 45#include <linux/zpool.h>
45#include <linux/magic.h> 46#include <linux/magic.h>
46 47
@@ -145,6 +146,8 @@ struct z3fold_header {
145 * @release_wq: workqueue for safe page release 146 * @release_wq: workqueue for safe page release
146 * @work: work_struct for safe page release 147 * @work: work_struct for safe page release
147 * @inode: inode for z3fold pseudo filesystem 148 * @inode: inode for z3fold pseudo filesystem
149 * @destroying: bool to stop migration once we start destruction
150 * @isolated: int to count the number of pages currently in isolation
148 * 151 *
149 * This structure is allocated at pool creation time and maintains metadata 152 * This structure is allocated at pool creation time and maintains metadata
150 * pertaining to a particular z3fold pool. 153 * pertaining to a particular z3fold pool.
@@ -163,8 +166,11 @@ struct z3fold_pool {
163 const struct zpool_ops *zpool_ops; 166 const struct zpool_ops *zpool_ops;
164 struct workqueue_struct *compact_wq; 167 struct workqueue_struct *compact_wq;
165 struct workqueue_struct *release_wq; 168 struct workqueue_struct *release_wq;
169 struct wait_queue_head isolate_wait;
166 struct work_struct work; 170 struct work_struct work;
167 struct inode *inode; 171 struct inode *inode;
172 bool destroying;
173 int isolated;
168}; 174};
169 175
170/* 176/*
@@ -769,6 +775,7 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
769 goto out_c; 775 goto out_c;
770 spin_lock_init(&pool->lock); 776 spin_lock_init(&pool->lock);
771 spin_lock_init(&pool->stale_lock); 777 spin_lock_init(&pool->stale_lock);
778 init_waitqueue_head(&pool->isolate_wait);
772 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); 779 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
773 if (!pool->unbuddied) 780 if (!pool->unbuddied)
774 goto out_pool; 781 goto out_pool;
@@ -808,6 +815,15 @@ out:
808 return NULL; 815 return NULL;
809} 816}
810 817
818static bool pool_isolated_are_drained(struct z3fold_pool *pool)
819{
820 bool ret;
821
822 spin_lock(&pool->lock);
823 ret = pool->isolated == 0;
824 spin_unlock(&pool->lock);
825 return ret;
826}
811/** 827/**
812 * z3fold_destroy_pool() - destroys an existing z3fold pool 828 * z3fold_destroy_pool() - destroys an existing z3fold pool
813 * @pool: the z3fold pool to be destroyed 829 * @pool: the z3fold pool to be destroyed
@@ -817,9 +833,35 @@ out:
817static void z3fold_destroy_pool(struct z3fold_pool *pool) 833static void z3fold_destroy_pool(struct z3fold_pool *pool)
818{ 834{
819 kmem_cache_destroy(pool->c_handle); 835 kmem_cache_destroy(pool->c_handle);
820 z3fold_unregister_migration(pool); 836 /*
821 destroy_workqueue(pool->release_wq); 837 * We set pool-> destroying under lock to ensure that
838 * z3fold_page_isolate() sees any changes to destroying. This way we
839 * avoid the need for any memory barriers.
840 */
841
842 spin_lock(&pool->lock);
843 pool->destroying = true;
844 spin_unlock(&pool->lock);
845
846 /*
847 * We need to ensure that no pages are being migrated while we destroy
848 * these workqueues, as migration can queue work on either of the
849 * workqueues.
850 */
851 wait_event(pool->isolate_wait, !pool_isolated_are_drained(pool));
852
853 /*
854 * We need to destroy pool->compact_wq before pool->release_wq,
855 * as any pending work on pool->compact_wq will call
856 * queue_work(pool->release_wq, &pool->work).
857 *
858 * There are still outstanding pages until both workqueues are drained,
859 * so we cannot unregister migration until then.
860 */
861
822 destroy_workqueue(pool->compact_wq); 862 destroy_workqueue(pool->compact_wq);
863 destroy_workqueue(pool->release_wq);
864 z3fold_unregister_migration(pool);
823 kfree(pool); 865 kfree(pool);
824} 866}
825 867
@@ -1297,6 +1339,28 @@ static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1297 return atomic64_read(&pool->pages_nr); 1339 return atomic64_read(&pool->pages_nr);
1298} 1340}
1299 1341
1342/*
1343 * z3fold_dec_isolated() expects to be called while pool->lock is held.
1344 */
1345static void z3fold_dec_isolated(struct z3fold_pool *pool)
1346{
1347 assert_spin_locked(&pool->lock);
1348 VM_BUG_ON(pool->isolated <= 0);
1349 pool->isolated--;
1350
1351 /*
1352 * If we have no more isolated pages, we have to see if
1353 * z3fold_destroy_pool() is waiting for a signal.
1354 */
1355 if (pool->isolated == 0 && waitqueue_active(&pool->isolate_wait))
1356 wake_up_all(&pool->isolate_wait);
1357}
1358
1359static void z3fold_inc_isolated(struct z3fold_pool *pool)
1360{
1361 pool->isolated++;
1362}
1363
1300static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) 1364static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1301{ 1365{
1302 struct z3fold_header *zhdr; 1366 struct z3fold_header *zhdr;
@@ -1323,6 +1387,34 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1323 spin_lock(&pool->lock); 1387 spin_lock(&pool->lock);
1324 if (!list_empty(&page->lru)) 1388 if (!list_empty(&page->lru))
1325 list_del(&page->lru); 1389 list_del(&page->lru);
1390 /*
1391 * We need to check for destruction while holding pool->lock, as
1392 * otherwise destruction could see 0 isolated pages, and
1393 * proceed.
1394 */
1395 if (unlikely(pool->destroying)) {
1396 spin_unlock(&pool->lock);
1397 /*
1398 * If this page isn't stale, somebody else holds a
1399 * reference to it. Let't drop our refcount so that they
1400 * can call the release logic.
1401 */
1402 if (unlikely(kref_put(&zhdr->refcount,
1403 release_z3fold_page_locked))) {
1404 /*
1405 * If we get here we have kref problems, so we
1406 * should freak out.
1407 */
1408 WARN(1, "Z3fold is experiencing kref problems\n");
1409 z3fold_page_unlock(zhdr);
1410 return false;
1411 }
1412 z3fold_page_unlock(zhdr);
1413 return false;
1414 }
1415
1416
1417 z3fold_inc_isolated(pool);
1326 spin_unlock(&pool->lock); 1418 spin_unlock(&pool->lock);
1327 z3fold_page_unlock(zhdr); 1419 z3fold_page_unlock(zhdr);
1328 return true; 1420 return true;
@@ -1391,6 +1483,10 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
1391 1483
1392 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); 1484 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1393 1485
1486 spin_lock(&pool->lock);
1487 z3fold_dec_isolated(pool);
1488 spin_unlock(&pool->lock);
1489
1394 page_mapcount_reset(page); 1490 page_mapcount_reset(page);
1395 put_page(page); 1491 put_page(page);
1396 return 0; 1492 return 0;
@@ -1410,10 +1506,14 @@ static void z3fold_page_putback(struct page *page)
1410 INIT_LIST_HEAD(&page->lru); 1506 INIT_LIST_HEAD(&page->lru);
1411 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 1507 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1412 atomic64_dec(&pool->pages_nr); 1508 atomic64_dec(&pool->pages_nr);
1509 spin_lock(&pool->lock);
1510 z3fold_dec_isolated(pool);
1511 spin_unlock(&pool->lock);
1413 return; 1512 return;
1414 } 1513 }
1415 spin_lock(&pool->lock); 1514 spin_lock(&pool->lock);
1416 list_add(&page->lru, &pool->lru); 1515 list_add(&page->lru, &pool->lru);
1516 z3fold_dec_isolated(pool);
1417 spin_unlock(&pool->lock); 1517 spin_unlock(&pool->lock);
1418 z3fold_page_unlock(zhdr); 1518 z3fold_page_unlock(zhdr);
1419} 1519}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 57fbb7ced69f..e98bb6ab4f7e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -54,6 +54,7 @@
54#include <linux/mount.h> 54#include <linux/mount.h>
55#include <linux/pseudo_fs.h> 55#include <linux/pseudo_fs.h>
56#include <linux/migrate.h> 56#include <linux/migrate.h>
57#include <linux/wait.h>
57#include <linux/pagemap.h> 58#include <linux/pagemap.h>
58#include <linux/fs.h> 59#include <linux/fs.h>
59 60
@@ -268,6 +269,10 @@ struct zs_pool {
268#ifdef CONFIG_COMPACTION 269#ifdef CONFIG_COMPACTION
269 struct inode *inode; 270 struct inode *inode;
270 struct work_struct free_work; 271 struct work_struct free_work;
272 /* A wait queue for when migration races with async_free_zspage() */
273 struct wait_queue_head migration_wait;
274 atomic_long_t isolated_pages;
275 bool destroying;
271#endif 276#endif
272}; 277};
273 278
@@ -1862,6 +1867,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
1862 zspage->isolated--; 1867 zspage->isolated--;
1863} 1868}
1864 1869
1870static void putback_zspage_deferred(struct zs_pool *pool,
1871 struct size_class *class,
1872 struct zspage *zspage)
1873{
1874 enum fullness_group fg;
1875
1876 fg = putback_zspage(class, zspage);
1877 if (fg == ZS_EMPTY)
1878 schedule_work(&pool->free_work);
1879
1880}
1881
1882static inline void zs_pool_dec_isolated(struct zs_pool *pool)
1883{
1884 VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
1885 atomic_long_dec(&pool->isolated_pages);
1886 /*
1887 * There's no possibility of racing, since wait_for_isolated_drain()
1888 * checks the isolated count under &class->lock after enqueuing
1889 * on migration_wait.
1890 */
1891 if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
1892 wake_up_all(&pool->migration_wait);
1893}
1894
1865static void replace_sub_page(struct size_class *class, struct zspage *zspage, 1895static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1866 struct page *newpage, struct page *oldpage) 1896 struct page *newpage, struct page *oldpage)
1867{ 1897{
@@ -1931,6 +1961,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1931 */ 1961 */
1932 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { 1962 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1933 get_zspage_mapping(zspage, &class_idx, &fullness); 1963 get_zspage_mapping(zspage, &class_idx, &fullness);
1964 atomic_long_inc(&pool->isolated_pages);
1934 remove_zspage(class, zspage, fullness); 1965 remove_zspage(class, zspage, fullness);
1935 } 1966 }
1936 1967
@@ -2030,8 +2061,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2030 * Page migration is done so let's putback isolated zspage to 2061 * Page migration is done so let's putback isolated zspage to
2031 * the list if @page is final isolated subpage in the zspage. 2062 * the list if @page is final isolated subpage in the zspage.
2032 */ 2063 */
2033 if (!is_zspage_isolated(zspage)) 2064 if (!is_zspage_isolated(zspage)) {
2034 putback_zspage(class, zspage); 2065 /*
2066 * We cannot race with zs_destroy_pool() here because we wait
2067 * for isolation to hit zero before we start destroying.
2068 * Also, we ensure that everyone can see pool->destroying before
2069 * we start waiting.
2070 */
2071 putback_zspage_deferred(pool, class, zspage);
2072 zs_pool_dec_isolated(pool);
2073 }
2035 2074
2036 reset_page(page); 2075 reset_page(page);
2037 put_page(page); 2076 put_page(page);
@@ -2077,13 +2116,12 @@ static void zs_page_putback(struct page *page)
2077 spin_lock(&class->lock); 2116 spin_lock(&class->lock);
2078 dec_zspage_isolation(zspage); 2117 dec_zspage_isolation(zspage);
2079 if (!is_zspage_isolated(zspage)) { 2118 if (!is_zspage_isolated(zspage)) {
2080 fg = putback_zspage(class, zspage);
2081 /* 2119 /*
2082 * Due to page_lock, we cannot free zspage immediately 2120 * Due to page_lock, we cannot free zspage immediately
2083 * so let's defer. 2121 * so let's defer.
2084 */ 2122 */
2085 if (fg == ZS_EMPTY) 2123 putback_zspage_deferred(pool, class, zspage);
2086 schedule_work(&pool->free_work); 2124 zs_pool_dec_isolated(pool);
2087 } 2125 }
2088 spin_unlock(&class->lock); 2126 spin_unlock(&class->lock);
2089} 2127}
@@ -2107,8 +2145,36 @@ static int zs_register_migration(struct zs_pool *pool)
2107 return 0; 2145 return 0;
2108} 2146}
2109 2147
2148static bool pool_isolated_are_drained(struct zs_pool *pool)
2149{
2150 return atomic_long_read(&pool->isolated_pages) == 0;
2151}
2152
2153/* Function for resolving migration */
2154static void wait_for_isolated_drain(struct zs_pool *pool)
2155{
2156
2157 /*
2158 * We're in the process of destroying the pool, so there are no
2159 * active allocations. zs_page_isolate() fails for completely free
2160 * zspages, so we need only wait for the zs_pool's isolated
2161 * count to hit zero.
2162 */
2163 wait_event(pool->migration_wait,
2164 pool_isolated_are_drained(pool));
2165}
2166
2110static void zs_unregister_migration(struct zs_pool *pool) 2167static void zs_unregister_migration(struct zs_pool *pool)
2111{ 2168{
2169 pool->destroying = true;
2170 /*
2171 * We need a memory barrier here to ensure global visibility of
2172 * pool->destroying. Thus pool->isolated pages will either be 0 in which
2173 * case we don't care, or it will be > 0 and pool->destroying will
2174 * ensure that we wake up once isolation hits 0.
2175 */
2176 smp_mb();
2177 wait_for_isolated_drain(pool); /* This can block */
2112 flush_work(&pool->free_work); 2178 flush_work(&pool->free_work);
2113 iput(pool->inode); 2179 iput(pool->inode);
2114} 2180}
@@ -2346,6 +2412,10 @@ struct zs_pool *zs_create_pool(const char *name)
2346 if (!pool->name) 2412 if (!pool->name)
2347 goto err; 2413 goto err;
2348 2414
2415#ifdef CONFIG_COMPACTION
2416 init_waitqueue_head(&pool->migration_wait);
2417#endif
2418
2349 if (create_cache(pool)) 2419 if (create_cache(pool))
2350 goto err; 2420 goto err;
2351 2421
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 67d7f83009ae..1d5bdf3a4b65 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -2303,7 +2303,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
2303 2303
2304 while (bucket_tmp < hash->size) { 2304 while (bucket_tmp < hash->size) {
2305 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash, 2305 if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
2306 *bucket, &idx_tmp)) 2306 bucket_tmp, &idx_tmp))
2307 break; 2307 break;
2308 2308
2309 bucket_tmp++; 2309 bucket_tmp++;
@@ -2420,8 +2420,10 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2420 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); 2420 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2421 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS); 2421 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2422 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS); 2422 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2423 batadv_mcast_want_rtr4_update(bat_priv, orig, BATADV_NO_FLAGS); 2423 batadv_mcast_want_rtr4_update(bat_priv, orig,
2424 batadv_mcast_want_rtr6_update(bat_priv, orig, BATADV_NO_FLAGS); 2424 BATADV_MCAST_WANT_NO_RTR4);
2425 batadv_mcast_want_rtr6_update(bat_priv, orig,
2426 BATADV_MCAST_WANT_NO_RTR6);
2425 2427
2426 spin_unlock_bh(&orig->mcast_handler_lock); 2428 spin_unlock_bh(&orig->mcast_handler_lock);
2427} 2429}
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 6f08fd122a8d..7e052d6f759b 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -164,7 +164,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
164{ 164{
165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype); 165 struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
166 166
167 return attr ? nla_get_u32(attr) : 0; 167 return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
168} 168}
169 169
170/** 170/**
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b9585e7d9d2e..04bc79359a17 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3202,6 +3202,7 @@ struct hci_dev *hci_alloc_dev(void)
3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 3202 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 3203 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 3204 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3205 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3205 3206
3206 mutex_init(&hdev->lock); 3207 mutex_init(&hdev->lock);
3207 mutex_init(&hdev->req_lock); 3208 mutex_init(&hdev->req_lock);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index bb67f4a5479a..402e2cc54044 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -433,6 +433,35 @@ static int auto_accept_delay_set(void *data, u64 val)
433 return 0; 433 return 0;
434} 434}
435 435
436static int min_encrypt_key_size_set(void *data, u64 val)
437{
438 struct hci_dev *hdev = data;
439
440 if (val < 1 || val > 16)
441 return -EINVAL;
442
443 hci_dev_lock(hdev);
444 hdev->min_enc_key_size = val;
445 hci_dev_unlock(hdev);
446
447 return 0;
448}
449
450static int min_encrypt_key_size_get(void *data, u64 *val)
451{
452 struct hci_dev *hdev = data;
453
454 hci_dev_lock(hdev);
455 *val = hdev->min_enc_key_size;
456 hci_dev_unlock(hdev);
457
458 return 0;
459}
460
461DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops,
462 min_encrypt_key_size_get,
463 min_encrypt_key_size_set, "%llu\n");
464
436static int auto_accept_delay_get(void *data, u64 *val) 465static int auto_accept_delay_get(void *data, u64 *val)
437{ 466{
438 struct hci_dev *hdev = data; 467 struct hci_dev *hdev = data;
@@ -545,6 +574,8 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev)
545 if (lmp_ssp_capable(hdev)) { 574 if (lmp_ssp_capable(hdev)) {
546 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs, 575 debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
547 hdev, &ssp_debug_mode_fops); 576 hdev, &ssp_debug_mode_fops);
577 debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs,
578 hdev, &min_encrypt_key_size_fops);
548 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, 579 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
549 hdev, &auto_accept_delay_fops); 580 hdev, &auto_accept_delay_fops);
550 } 581 }
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5abd423b55fa..8d889969ae7e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -101,6 +101,7 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
101{ 101{
102 struct sk_buff *skb; 102 struct sk_buff *skb;
103 struct sock *sk = sock->sk; 103 struct sock *sk = sock->sk;
104 int ret;
104 105
105 BT_DBG("session %p data %p size %d", session, data, size); 106 BT_DBG("session %p data %p size %d", session, data, size);
106 107
@@ -114,13 +115,17 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
114 } 115 }
115 116
116 skb_put_u8(skb, hdr); 117 skb_put_u8(skb, hdr);
117 if (data && size > 0) 118 if (data && size > 0) {
118 skb_put_data(skb, data, size); 119 skb_put_data(skb, data, size);
120 ret = size;
121 } else {
122 ret = 0;
123 }
119 124
120 skb_queue_tail(transmit, skb); 125 skb_queue_tail(transmit, skb);
121 wake_up_interruptible(sk_sleep(sk)); 126 wake_up_interruptible(sk_sleep(sk));
122 127
123 return 0; 128 return ret;
124} 129}
125 130
126static int hidp_send_ctrl_message(struct hidp_session *session, 131static int hidp_send_ctrl_message(struct hidp_session *session,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index cc506fe99b4d..dfc1edb168b7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1361,7 +1361,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1361 * actually encrypted before enforcing a key size. 1361 * actually encrypted before enforcing a key size.
1362 */ 1362 */
1363 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || 1363 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1364 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE); 1364 hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1365} 1365}
1366 1366
1367static void l2cap_do_start(struct l2cap_chan *chan) 1367static void l2cap_do_start(struct l2cap_chan *chan)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index c8177a89f52c..4096d8a74a2b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb,
221 return NF_DROP; 221 return NF_DROP;
222 } 222 }
223 223
224 ADD_COUNTER(*(counter_base + i), 1, skb->len); 224 ADD_COUNTER(*(counter_base + i), skb->len, 1);
225 225
226 /* these should only watch: not modify, nor tell us 226 /* these should only watch: not modify, nor tell us
227 * what to do with the packet 227 * what to do with the packet
@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters,
959 continue; 959 continue;
960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 960 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
961 for (i = 0; i < nentries; i++) 961 for (i = 0; i < nentries; i++)
962 ADD_COUNTER(counters[i], counter_base[i].pcnt, 962 ADD_COUNTER(counters[i], counter_base[i].bcnt,
963 counter_base[i].bcnt); 963 counter_base[i].pcnt);
964 } 964 }
965} 965}
966 966
@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name,
1280 1280
1281 /* we add to the counters of the first cpu */ 1281 /* we add to the counters of the first cpu */
1282 for (i = 0; i < num_counters; i++) 1282 for (i = 0; i < num_counters; i++)
1283 ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt); 1283 ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt);
1284 1284
1285 write_unlock_bh(&t->lock); 1285 write_unlock_bh(&t->lock);
1286 ret = 0; 1286 ret = 0;
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 5d6724cee38f..4f75df40fb12 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
136 if (key) { 136 if (key) {
137 kfree(key->key); 137 kfree(key->key);
138 key->key = NULL; 138 key->key = NULL;
139 crypto_free_sync_skcipher(key->tfm); 139 if (key->tfm) {
140 key->tfm = NULL; 140 crypto_free_sync_skcipher(key->tfm);
141 key->tfm = NULL;
142 }
141 } 143 }
142} 144}
143 145
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0b2df09b2554..78ae6e8c953d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1496,7 +1496,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1496 struct ceph_osds up, acting; 1496 struct ceph_osds up, acting;
1497 bool force_resend = false; 1497 bool force_resend = false;
1498 bool unpaused = false; 1498 bool unpaused = false;
1499 bool legacy_change; 1499 bool legacy_change = false;
1500 bool split = false; 1500 bool split = false;
1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); 1501 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1502 bool recovery_deletes = ceph_osdmap_flag(osdc, 1502 bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1584,15 +1584,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1584 t->osd = acting.primary; 1584 t->osd = acting.primary;
1585 } 1585 }
1586 1586
1587 if (unpaused || legacy_change || force_resend || 1587 if (unpaused || legacy_change || force_resend || split)
1588 (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1589 RESEND_ON_SPLIT)))
1590 ct_res = CALC_TARGET_NEED_RESEND; 1588 ct_res = CALC_TARGET_NEED_RESEND;
1591 else 1589 else
1592 ct_res = CALC_TARGET_NO_ACTION; 1590 ct_res = CALC_TARGET_NO_ACTION;
1593 1591
1594out: 1592out:
1595 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd); 1593 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1594 legacy_change, force_resend, split, ct_res, t->osd);
1596 return ct_res; 1595 return ct_res;
1597} 1596}
1598 1597
diff --git a/net/core/filter.c b/net/core/filter.c
index 7878f918b8c0..4c6a252d4212 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -8757,13 +8757,13 @@ sk_reuseport_is_valid_access(int off, int size,
8757 return size == size_default; 8757 return size == size_default;
8758 8758
8759 /* Fields that allow narrowing */ 8759 /* Fields that allow narrowing */
8760 case offsetof(struct sk_reuseport_md, eth_protocol): 8760 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
8761 if (size < FIELD_SIZEOF(struct sk_buff, protocol)) 8761 if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8762 return false; 8762 return false;
8763 /* fall through */ 8763 /* fall through */
8764 case offsetof(struct sk_reuseport_md, ip_protocol): 8764 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
8765 case offsetof(struct sk_reuseport_md, bind_inany): 8765 case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
8766 case offsetof(struct sk_reuseport_md, len): 8766 case bpf_ctx_range(struct sk_reuseport_md, len):
8767 bpf_ctx_record_field_size(info, size_default); 8767 bpf_ctx_record_field_size(info, size_default);
8768 return bpf_ctx_narrow_access_ok(off, size, size_default); 8768 return bpf_ctx_narrow_access_ok(off, size, size_default);
8769 8769
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 3e6fedb57bc1..2470b4b404e6 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
142 mutex_unlock(&flow_dissector_mutex); 142 mutex_unlock(&flow_dissector_mutex);
143 return -ENOENT; 143 return -ENOENT;
144 } 144 }
145 bpf_prog_put(attached);
146 RCU_INIT_POINTER(net->flow_dissector_prog, NULL); 145 RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
146 bpf_prog_put(attached);
147 mutex_unlock(&flow_dissector_mutex); 147 mutex_unlock(&flow_dissector_mutex);
148 return 0; 148 return 0;
149} 149}
diff --git a/net/core/sock.c b/net/core/sock.c
index d57b0cc995a0..545fac19a711 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1992,6 +1992,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1992} 1992}
1993EXPORT_SYMBOL(skb_set_owner_w); 1993EXPORT_SYMBOL(skb_set_owner_w);
1994 1994
1995static bool can_skb_orphan_partial(const struct sk_buff *skb)
1996{
1997#ifdef CONFIG_TLS_DEVICE
1998 /* Drivers depend on in-order delivery for crypto offload,
1999 * partial orphan breaks out-of-order-OK logic.
2000 */
2001 if (skb->decrypted)
2002 return false;
2003#endif
2004 return (skb->destructor == sock_wfree ||
2005 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2006}
2007
1995/* This helper is used by netem, as it can hold packets in its 2008/* This helper is used by netem, as it can hold packets in its
1996 * delay queue. We want to allow the owner socket to send more 2009 * delay queue. We want to allow the owner socket to send more
1997 * packets, as if they were already TX completed by a typical driver. 2010 * packets, as if they were already TX completed by a typical driver.
@@ -2003,11 +2016,7 @@ void skb_orphan_partial(struct sk_buff *skb)
2003 if (skb_is_tcp_pure_ack(skb)) 2016 if (skb_is_tcp_pure_ack(skb))
2004 return; 2017 return;
2005 2018
2006 if (skb->destructor == sock_wfree 2019 if (can_skb_orphan_partial(skb)) {
2007#ifdef CONFIG_INET
2008 || skb->destructor == tcp_wfree
2009#endif
2010 ) {
2011 struct sock *sk = skb->sk; 2020 struct sock *sk = skb->sk;
2012 2021
2013 if (refcount_inc_not_zero(&sk->sk_refcnt)) { 2022 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
@@ -3278,16 +3287,17 @@ static __init int net_inuse_init(void)
3278 3287
3279core_initcall(net_inuse_init); 3288core_initcall(net_inuse_init);
3280 3289
3281static void assign_proto_idx(struct proto *prot) 3290static int assign_proto_idx(struct proto *prot)
3282{ 3291{
3283 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 3292 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3284 3293
3285 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 3294 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3286 pr_err("PROTO_INUSE_NR exhausted\n"); 3295 pr_err("PROTO_INUSE_NR exhausted\n");
3287 return; 3296 return -ENOSPC;
3288 } 3297 }
3289 3298
3290 set_bit(prot->inuse_idx, proto_inuse_idx); 3299 set_bit(prot->inuse_idx, proto_inuse_idx);
3300 return 0;
3291} 3301}
3292 3302
3293static void release_proto_idx(struct proto *prot) 3303static void release_proto_idx(struct proto *prot)
@@ -3296,8 +3306,9 @@ static void release_proto_idx(struct proto *prot)
3296 clear_bit(prot->inuse_idx, proto_inuse_idx); 3306 clear_bit(prot->inuse_idx, proto_inuse_idx);
3297} 3307}
3298#else 3308#else
3299static inline void assign_proto_idx(struct proto *prot) 3309static inline int assign_proto_idx(struct proto *prot)
3300{ 3310{
3311 return 0;
3301} 3312}
3302 3313
3303static inline void release_proto_idx(struct proto *prot) 3314static inline void release_proto_idx(struct proto *prot)
@@ -3346,6 +3357,8 @@ static int req_prot_init(const struct proto *prot)
3346 3357
3347int proto_register(struct proto *prot, int alloc_slab) 3358int proto_register(struct proto *prot, int alloc_slab)
3348{ 3359{
3360 int ret = -ENOBUFS;
3361
3349 if (alloc_slab) { 3362 if (alloc_slab) {
3350 prot->slab = kmem_cache_create_usercopy(prot->name, 3363 prot->slab = kmem_cache_create_usercopy(prot->name,
3351 prot->obj_size, 0, 3364 prot->obj_size, 0,
@@ -3382,20 +3395,27 @@ int proto_register(struct proto *prot, int alloc_slab)
3382 } 3395 }
3383 3396
3384 mutex_lock(&proto_list_mutex); 3397 mutex_lock(&proto_list_mutex);
3398 ret = assign_proto_idx(prot);
3399 if (ret) {
3400 mutex_unlock(&proto_list_mutex);
3401 goto out_free_timewait_sock_slab_name;
3402 }
3385 list_add(&prot->node, &proto_list); 3403 list_add(&prot->node, &proto_list);
3386 assign_proto_idx(prot);
3387 mutex_unlock(&proto_list_mutex); 3404 mutex_unlock(&proto_list_mutex);
3388 return 0; 3405 return ret;
3389 3406
3390out_free_timewait_sock_slab_name: 3407out_free_timewait_sock_slab_name:
3391 kfree(prot->twsk_prot->twsk_slab_name); 3408 if (alloc_slab && prot->twsk_prot)
3409 kfree(prot->twsk_prot->twsk_slab_name);
3392out_free_request_sock_slab: 3410out_free_request_sock_slab:
3393 req_prot_cleanup(prot->rsk_prot); 3411 if (alloc_slab) {
3412 req_prot_cleanup(prot->rsk_prot);
3394 3413
3395 kmem_cache_destroy(prot->slab); 3414 kmem_cache_destroy(prot->slab);
3396 prot->slab = NULL; 3415 prot->slab = NULL;
3416 }
3397out: 3417out:
3398 return -ENOBUFS; 3418 return ret;
3399} 3419}
3400EXPORT_SYMBOL(proto_register); 3420EXPORT_SYMBOL(proto_register);
3401 3421
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 3312a5849a97..c13ffbd33d8d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -19,6 +19,7 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
19static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); 19static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
20static DEFINE_MUTEX(sock_diag_table_mutex); 20static DEFINE_MUTEX(sock_diag_table_mutex);
21static struct workqueue_struct *broadcast_wq; 21static struct workqueue_struct *broadcast_wq;
22static atomic64_t cookie_gen;
22 23
23u64 sock_gen_cookie(struct sock *sk) 24u64 sock_gen_cookie(struct sock *sk)
24{ 25{
@@ -27,7 +28,7 @@ u64 sock_gen_cookie(struct sock *sk)
27 28
28 if (res) 29 if (res)
29 return res; 30 return res;
30 res = atomic64_inc_return(&sock_net(sk)->cookie_gen); 31 res = atomic64_inc_return(&cookie_gen);
31 atomic64_cmpxchg(&sk->sk_cookie, 0, res); 32 atomic64_cmpxchg(&sk->sk_cookie, 0, res);
32 } 33 }
33} 34}
diff --git a/net/core/stream.c b/net/core/stream.c
index e94bb02a5629..4f1d4aa5fb38 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -120,7 +120,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
120 int err = 0; 120 int err = 0;
121 long vm_wait = 0; 121 long vm_wait = 0;
122 long current_timeo = *timeo_p; 122 long current_timeo = *timeo_p;
123 bool noblock = (*timeo_p ? false : true);
124 DEFINE_WAIT_FUNC(wait, woken_wake_function); 123 DEFINE_WAIT_FUNC(wait, woken_wake_function);
125 124
126 if (sk_stream_memory_free(sk)) 125 if (sk_stream_memory_free(sk))
@@ -133,11 +132,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
133 132
134 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 133 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
135 goto do_error; 134 goto do_error;
136 if (!*timeo_p) { 135 if (!*timeo_p)
137 if (noblock) 136 goto do_eagain;
138 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
139 goto do_nonblock;
140 }
141 if (signal_pending(current)) 137 if (signal_pending(current))
142 goto do_interrupted; 138 goto do_interrupted;
143 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 139 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -169,7 +165,13 @@ out:
169do_error: 165do_error:
170 err = -EPIPE; 166 err = -EPIPE;
171 goto out; 167 goto out;
172do_nonblock: 168do_eagain:
169 /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
170 * be generated later.
171 * When TCP receives ACK packets that make room, tcp_check_space()
172 * only calls tcp_new_space() if SOCK_NOSPACE is set.
173 */
174 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
173 err = -EAGAIN; 175 err = -EAGAIN;
174 goto out; 176 goto out;
175do_interrupted: 177do_interrupted:
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 4ec5b7f85d51..09d9286b27cc 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
153{ 153{
154 int port; 154 int port;
155 155
156 if (!ds->ops->port_mdb_add)
157 return;
158
156 for_each_set_bit(port, bitmap, ds->num_ports) 159 for_each_set_bit(port, bitmap, ds->num_ports)
157 ds->ops->port_mdb_add(ds, port, mdb); 160 ds->ops->port_mdb_add(ds, port, mdb);
158} 161}
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index e4aba5d485be..bbe9b3b2d395 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -170,7 +170,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
170 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); 170 reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
171 if (!reasm_data) 171 if (!reasm_data)
172 goto out_oom; 172 goto out_oom;
173 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 173 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
174 174
175 skb->dev = ldev; 175 skb->dev = ldev;
176 skb->tstamp = fq->q.stamp; 176 skb->tstamp = fq->q.stamp;
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index dacbd58e1799..badc5cfe4dc6 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -1092,7 +1092,7 @@ static struct packet_type ieee802154_packet_type = {
1092 1092
1093static int __init af_ieee802154_init(void) 1093static int __init af_ieee802154_init(void)
1094{ 1094{
1095 int rc = -EINVAL; 1095 int rc;
1096 1096
1097 rc = proto_register(&ieee802154_raw_prot, 1); 1097 rc = proto_register(&ieee802154_raw_prot, 1);
1098 if (rc) 1098 if (rc)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2b2b3d291ab0..1ab2fb6bb37d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2145,7 +2145,7 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
2145 2145
2146 if (filter->dump_exceptions) { 2146 if (filter->dump_exceptions) {
2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi, 2147 err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi,
2148 &i_fa, s_fa); 2148 &i_fa, s_fa, flags);
2149 if (err < 0) 2149 if (err < 0)
2150 goto stop; 2150 goto stop;
2151 } 2151 }
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 1510e951f451..4298aae74e0e 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -582,7 +582,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
582 582
583 if (!rt) 583 if (!rt)
584 goto out; 584 goto out;
585 net = dev_net(rt->dst.dev); 585
586 if (rt->dst.dev)
587 net = dev_net(rt->dst.dev);
588 else if (skb_in->dev)
589 net = dev_net(skb_in->dev);
590 else
591 goto out;
586 592
587 /* 593 /*
588 * Find the original header. It is expected to be valid, of course. 594 * Find the original header. It is expected to be valid, of course.
@@ -902,7 +908,7 @@ static bool icmp_redirect(struct sk_buff *skb)
902 return false; 908 return false;
903 } 909 }
904 910
905 icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway); 911 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway));
906 return true; 912 return true;
907} 913}
908 914
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 180f6896b98b..480d0b22db1a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1475,7 +1475,7 @@ EXPORT_SYMBOL(__ip_mc_inc_group);
1475 1475
1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) 1476void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1477{ 1477{
1478 __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE); 1478 __ip_mc_inc_group(in_dev, addr, GFP_KERNEL);
1479} 1479}
1480EXPORT_SYMBOL(ip_mc_inc_group); 1480EXPORT_SYMBOL(ip_mc_inc_group);
1481 1481
@@ -2197,7 +2197,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
2197 iml->sflist = NULL; 2197 iml->sflist = NULL;
2198 iml->sfmode = mode; 2198 iml->sfmode = mode;
2199 rcu_assign_pointer(inet->mc_list, iml); 2199 rcu_assign_pointer(inet->mc_list, iml);
2200 __ip_mc_inc_group(in_dev, addr, mode); 2200 ____ip_mc_inc_group(in_dev, addr, mode, GFP_KERNEL);
2201 err = 0; 2201 err = 0;
2202done: 2202done:
2203 return err; 2203 return err;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index a999451345f9..10d31733297d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -475,11 +475,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
475EXPORT_SYMBOL(inet_frag_reasm_prepare); 475EXPORT_SYMBOL(inet_frag_reasm_prepare);
476 476
477void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, 477void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
478 void *reasm_data) 478 void *reasm_data, bool try_coalesce)
479{ 479{
480 struct sk_buff **nextp = (struct sk_buff **)reasm_data; 480 struct sk_buff **nextp = (struct sk_buff **)reasm_data;
481 struct rb_node *rbn; 481 struct rb_node *rbn;
482 struct sk_buff *fp; 482 struct sk_buff *fp;
483 int sum_truesize;
483 484
484 skb_push(head, head->data - skb_network_header(head)); 485 skb_push(head, head->data - skb_network_header(head));
485 486
@@ -487,25 +488,41 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
487 fp = FRAG_CB(head)->next_frag; 488 fp = FRAG_CB(head)->next_frag;
488 rbn = rb_next(&head->rbnode); 489 rbn = rb_next(&head->rbnode);
489 rb_erase(&head->rbnode, &q->rb_fragments); 490 rb_erase(&head->rbnode, &q->rb_fragments);
491
492 sum_truesize = head->truesize;
490 while (rbn || fp) { 493 while (rbn || fp) {
491 /* fp points to the next sk_buff in the current run; 494 /* fp points to the next sk_buff in the current run;
492 * rbn points to the next run. 495 * rbn points to the next run.
493 */ 496 */
494 /* Go through the current run. */ 497 /* Go through the current run. */
495 while (fp) { 498 while (fp) {
496 *nextp = fp; 499 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
497 nextp = &fp->next; 500 bool stolen;
498 fp->prev = NULL; 501 int delta;
499 memset(&fp->rbnode, 0, sizeof(fp->rbnode)); 502
500 fp->sk = NULL; 503 sum_truesize += fp->truesize;
501 head->data_len += fp->len;
502 head->len += fp->len;
503 if (head->ip_summed != fp->ip_summed) 504 if (head->ip_summed != fp->ip_summed)
504 head->ip_summed = CHECKSUM_NONE; 505 head->ip_summed = CHECKSUM_NONE;
505 else if (head->ip_summed == CHECKSUM_COMPLETE) 506 else if (head->ip_summed == CHECKSUM_COMPLETE)
506 head->csum = csum_add(head->csum, fp->csum); 507 head->csum = csum_add(head->csum, fp->csum);
507 head->truesize += fp->truesize; 508
508 fp = FRAG_CB(fp)->next_frag; 509 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
510 &delta)) {
511 kfree_skb_partial(fp, stolen);
512 } else {
513 fp->prev = NULL;
514 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
515 fp->sk = NULL;
516
517 head->data_len += fp->len;
518 head->len += fp->len;
519 head->truesize += fp->truesize;
520
521 *nextp = fp;
522 nextp = &fp->next;
523 }
524
525 fp = next_frag;
509 } 526 }
510 /* Move to the next run. */ 527 /* Move to the next run. */
511 if (rbn) { 528 if (rbn) {
@@ -516,7 +533,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
516 rbn = rbnext; 533 rbn = rbnext;
517 } 534 }
518 } 535 }
519 sub_frag_mem_limit(q->fqdir, head->truesize); 536 sub_frag_mem_limit(q->fqdir, sum_truesize);
520 537
521 *nextp = NULL; 538 *nextp = NULL;
522 skb_mark_not_on_list(head); 539 skb_mark_not_on_list(head);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 4385eb9e781f..cfeb8890f94e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -393,6 +393,11 @@ err:
393 return err; 393 return err;
394} 394}
395 395
396static bool ip_frag_coalesce_ok(const struct ipq *qp)
397{
398 return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
399}
400
396/* Build a new IP datagram from all its fragments. */ 401/* Build a new IP datagram from all its fragments. */
397static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, 402static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
398 struct sk_buff *prev_tail, struct net_device *dev) 403 struct sk_buff *prev_tail, struct net_device *dev)
@@ -421,7 +426,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
421 if (len > 65535) 426 if (len > 65535)
422 goto out_oversize; 427 goto out_oversize;
423 428
424 inet_frag_reasm_finish(&qp->q, skb, reasm_data); 429 inet_frag_reasm_finish(&qp->q, skb, reasm_data,
430 ip_frag_coalesce_ok(qp));
425 431
426 skb->dev = dev; 432 skb->dev = dev;
427 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); 433 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 517300d587a7..b6a6f18c3dd1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2728,7 +2728,8 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
2728/* called with rcu_read_lock held */ 2728/* called with rcu_read_lock held */
2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src, 2729static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4, 2730 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2731 struct sk_buff *skb, u32 portid, u32 seq) 2731 struct sk_buff *skb, u32 portid, u32 seq,
2732 unsigned int flags)
2732{ 2733{
2733 struct rtmsg *r; 2734 struct rtmsg *r;
2734 struct nlmsghdr *nlh; 2735 struct nlmsghdr *nlh;
@@ -2736,7 +2737,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2736 u32 error; 2737 u32 error;
2737 u32 metrics[RTAX_MAX]; 2738 u32 metrics[RTAX_MAX];
2738 2739
2739 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0); 2740 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2740 if (!nlh) 2741 if (!nlh)
2741 return -EMSGSIZE; 2742 return -EMSGSIZE;
2742 2743
@@ -2860,7 +2861,7 @@ nla_put_failure:
2860static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb, 2861static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2861 struct netlink_callback *cb, u32 table_id, 2862 struct netlink_callback *cb, u32 table_id,
2862 struct fnhe_hash_bucket *bucket, int genid, 2863 struct fnhe_hash_bucket *bucket, int genid,
2863 int *fa_index, int fa_start) 2864 int *fa_index, int fa_start, unsigned int flags)
2864{ 2865{
2865 int i; 2866 int i;
2866 2867
@@ -2891,7 +2892,7 @@ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2891 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, 2892 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2892 table_id, NULL, skb, 2893 table_id, NULL, skb,
2893 NETLINK_CB(cb->skb).portid, 2894 NETLINK_CB(cb->skb).portid,
2894 cb->nlh->nlmsg_seq); 2895 cb->nlh->nlmsg_seq, flags);
2895 if (err) 2896 if (err)
2896 return err; 2897 return err;
2897next: 2898next:
@@ -2904,7 +2905,7 @@ next:
2904 2905
2905int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb, 2906int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2906 u32 table_id, struct fib_info *fi, 2907 u32 table_id, struct fib_info *fi,
2907 int *fa_index, int fa_start) 2908 int *fa_index, int fa_start, unsigned int flags)
2908{ 2909{
2909 struct net *net = sock_net(cb->skb->sk); 2910 struct net *net = sock_net(cb->skb->sk);
2910 int nhsel, genid = fnhe_genid(net); 2911 int nhsel, genid = fnhe_genid(net);
@@ -2922,7 +2923,8 @@ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2922 err = 0; 2923 err = 0;
2923 if (bucket) 2924 if (bucket)
2924 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, 2925 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2925 genid, fa_index, fa_start); 2926 genid, fa_index, fa_start,
2927 flags);
2926 rcu_read_unlock(); 2928 rcu_read_unlock();
2927 if (err) 2929 if (err)
2928 return err; 2930 return err;
@@ -3183,7 +3185,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3183 fl4.flowi4_tos, res.fi, 0); 3185 fl4.flowi4_tos, res.fi, 0);
3184 } else { 3186 } else {
3185 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb, 3187 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3186 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); 3188 NETLINK_CB(in_skb).portid,
3189 nlh->nlmsg_seq, 0);
3187 } 3190 }
3188 if (err < 0) 3191 if (err < 0)
3189 goto errout_rcu; 3192 goto errout_rcu;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 776905899ac0..77b485d60b9d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -984,6 +984,9 @@ new_segment:
984 if (!skb) 984 if (!skb)
985 goto wait_for_memory; 985 goto wait_for_memory;
986 986
987#ifdef CONFIG_TLS_DEVICE
988 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
989#endif
987 skb_entail(sk, skb); 990 skb_entail(sk, skb);
988 copy = size_goal; 991 copy = size_goal;
989 } 992 }
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 3d1e15401384..8a56e09cfb0e 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -398,10 +398,14 @@ more_data:
398static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 398static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
399{ 399{
400 struct sk_msg tmp, *msg_tx = NULL; 400 struct sk_msg tmp, *msg_tx = NULL;
401 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
402 int copied = 0, err = 0; 401 int copied = 0, err = 0;
403 struct sk_psock *psock; 402 struct sk_psock *psock;
404 long timeo; 403 long timeo;
404 int flags;
405
406 /* Don't let internal do_tcp_sendpages() flags through */
407 flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
408 flags |= MSG_NO_SHARED_FRAGS;
405 409
406 psock = sk_psock_get(sk); 410 psock = sk_psock_get(sk);
407 if (unlikely(!psock)) 411 if (unlikely(!psock))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6e4afc48d7bb..979520e46e33 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1320,6 +1320,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1320 buff = sk_stream_alloc_skb(sk, nsize, gfp, true); 1320 buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1321 if (!buff) 1321 if (!buff)
1322 return -ENOMEM; /* We'll just try again later. */ 1322 return -ENOMEM; /* We'll just try again later. */
1323 skb_copy_decrypted(buff, skb);
1323 1324
1324 sk->sk_wmem_queued += buff->truesize; 1325 sk->sk_wmem_queued += buff->truesize;
1325 sk_mem_charge(sk, buff->truesize); 1326 sk_mem_charge(sk, buff->truesize);
@@ -1874,6 +1875,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1874 buff = sk_stream_alloc_skb(sk, 0, gfp, true); 1875 buff = sk_stream_alloc_skb(sk, 0, gfp, true);
1875 if (unlikely(!buff)) 1876 if (unlikely(!buff))
1876 return -ENOMEM; 1877 return -ENOMEM;
1878 skb_copy_decrypted(buff, skb);
1877 1879
1878 sk->sk_wmem_queued += buff->truesize; 1880 sk->sk_wmem_queued += buff->truesize;
1879 sk_mem_charge(sk, buff->truesize); 1881 sk_mem_charge(sk, buff->truesize);
@@ -2143,6 +2145,7 @@ static int tcp_mtu_probe(struct sock *sk)
2143 sk_mem_charge(sk, nskb->truesize); 2145 sk_mem_charge(sk, nskb->truesize);
2144 2146
2145 skb = tcp_send_head(sk); 2147 skb = tcp_send_head(sk);
2148 skb_copy_decrypted(nskb, skb);
2146 2149
2147 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 2150 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2148 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 2151 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index dc73888c7859..6a576ff92c39 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -478,7 +478,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
478 if (!idev) { 478 if (!idev) {
479 idev = ipv6_add_dev(dev); 479 idev = ipv6_add_dev(dev);
480 if (IS_ERR(idev)) 480 if (IS_ERR(idev))
481 return NULL; 481 return idev;
482 } 482 }
483 483
484 if (dev->flags&IFF_UP) 484 if (dev->flags&IFF_UP)
@@ -1045,7 +1045,8 @@ ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1045 int err = 0; 1045 int err = 0;
1046 1046
1047 if (addr_type == IPV6_ADDR_ANY || 1047 if (addr_type == IPV6_ADDR_ANY ||
1048 addr_type & IPV6_ADDR_MULTICAST || 1048 (addr_type & IPV6_ADDR_MULTICAST &&
1049 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1049 (!(idev->dev->flags & IFF_LOOPBACK) && 1050 (!(idev->dev->flags & IFF_LOOPBACK) &&
1050 !netif_is_l3_master(idev->dev) && 1051 !netif_is_l3_master(idev->dev) &&
1051 addr_type & IPV6_ADDR_LOOPBACK)) 1052 addr_type & IPV6_ADDR_LOOPBACK))
@@ -2465,8 +2466,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2465 ASSERT_RTNL(); 2466 ASSERT_RTNL();
2466 2467
2467 idev = ipv6_find_idev(dev); 2468 idev = ipv6_find_idev(dev);
2468 if (!idev) 2469 if (IS_ERR(idev))
2469 return ERR_PTR(-ENOBUFS); 2470 return idev;
2470 2471
2471 if (idev->cnf.disable_ipv6) 2472 if (idev->cnf.disable_ipv6)
2472 return ERR_PTR(-EACCES); 2473 return ERR_PTR(-EACCES);
@@ -3158,7 +3159,7 @@ static void init_loopback(struct net_device *dev)
3158 ASSERT_RTNL(); 3159 ASSERT_RTNL();
3159 3160
3160 idev = ipv6_find_idev(dev); 3161 idev = ipv6_find_idev(dev);
3161 if (!idev) { 3162 if (IS_ERR(idev)) {
3162 pr_debug("%s: add_dev failed\n", __func__); 3163 pr_debug("%s: add_dev failed\n", __func__);
3163 return; 3164 return;
3164 } 3165 }
@@ -3373,7 +3374,7 @@ static void addrconf_sit_config(struct net_device *dev)
3373 */ 3374 */
3374 3375
3375 idev = ipv6_find_idev(dev); 3376 idev = ipv6_find_idev(dev);
3376 if (!idev) { 3377 if (IS_ERR(idev)) {
3377 pr_debug("%s: add_dev failed\n", __func__); 3378 pr_debug("%s: add_dev failed\n", __func__);
3378 return; 3379 return;
3379 } 3380 }
@@ -3398,7 +3399,7 @@ static void addrconf_gre_config(struct net_device *dev)
3398 ASSERT_RTNL(); 3399 ASSERT_RTNL();
3399 3400
3400 idev = ipv6_find_idev(dev); 3401 idev = ipv6_find_idev(dev);
3401 if (!idev) { 3402 if (IS_ERR(idev)) {
3402 pr_debug("%s: add_dev failed\n", __func__); 3403 pr_debug("%s: add_dev failed\n", __func__);
3403 return; 3404 return;
3404 } 3405 }
@@ -4772,8 +4773,8 @@ inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4772 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC; 4773 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4773 4774
4774 idev = ipv6_find_idev(dev); 4775 idev = ipv6_find_idev(dev);
4775 if (!idev) 4776 if (IS_ERR(idev))
4776 return -ENOBUFS; 4777 return PTR_ERR(idev);
4777 4778
4778 if (!ipv6_allow_optimistic_dad(net, idev)) 4779 if (!ipv6_allow_optimistic_dad(net, idev))
4779 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC; 4780 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 0f82c150543b..fed9666a2f7d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
348 348
349 skb_reset_transport_header(skb); 349 skb_reset_transport_header(skb);
350 350
351 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 351 inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
352 352
353 skb->ignore_df = 1; 353 skb->ignore_df = 1;
354 skb->dev = dev; 354 skb->dev = dev;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index ca05b16f1bb9..1f5d4d196dcc 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -282,7 +282,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
282 282
283 skb_reset_transport_header(skb); 283 skb_reset_transport_header(skb);
284 284
285 inet_frag_reasm_finish(&fq->q, skb, reasm_data); 285 inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
286 286
287 skb->dev = dev; 287 skb->dev = dev;
288 ipv6_hdr(skb)->payload_len = htons(payload_len); 288 ipv6_hdr(skb)->payload_len = htons(payload_len);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 4d458067d80d..111c400199ec 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1546,6 +1546,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1546 if (is_multicast_ether_addr(mac)) 1546 if (is_multicast_ether_addr(mac))
1547 return -EINVAL; 1547 return -EINVAL;
1548 1548
1549 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
1550 sdata->vif.type == NL80211_IFTYPE_STATION &&
1551 !sdata->u.mgd.associated)
1552 return -EINVAL;
1553
1549 sta = sta_info_alloc(sdata, mac, GFP_KERNEL); 1554 sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
1550 if (!sta) 1555 if (!sta)
1551 return -ENOMEM; 1556 return -ENOMEM;
@@ -1553,10 +1558,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1553 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 1558 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
1554 sta->sta.tdls = true; 1559 sta->sta.tdls = true;
1555 1560
1556 if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
1557 !sdata->u.mgd.associated)
1558 return -EINVAL;
1559
1560 err = sta_apply_parameters(local, sta, params); 1561 err = sta_apply_parameters(local, sta, params);
1561 if (err) { 1562 if (err) {
1562 sta_info_free(local, sta); 1563 sta_info_free(local, sta);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index d25e91d7bdc1..44b675016393 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -133,12 +133,12 @@ static int mpls_xmit(struct sk_buff *skb)
133 mpls_stats_inc_outucastpkts(out_dev, skb); 133 mpls_stats_inc_outucastpkts(out_dev, skb);
134 134
135 if (rt) { 135 if (rt) {
136 if (rt->rt_gw_family == AF_INET) 136 if (rt->rt_gw_family == AF_INET6)
137 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
138 skb);
139 else if (rt->rt_gw_family == AF_INET6)
140 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6, 137 err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6,
141 skb); 138 skb);
139 else
140 err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4,
141 skb);
142 } else if (rt6) { 142 } else if (rt6) {
143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) { 143 if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
144 /* 6PE (RFC 4798) */ 144 /* 6PE (RFC 4798) */
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index 5c3fad8cba57..0187e65176c0 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -54,7 +54,7 @@ static void ncsi_cmd_build_header(struct ncsi_pkt_hdr *h,
54 checksum = ncsi_calculate_checksum((unsigned char *)h, 54 checksum = ncsi_calculate_checksum((unsigned char *)h,
55 sizeof(*h) + nca->payload); 55 sizeof(*h) + nca->payload);
56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) + 56 pchecksum = (__be32 *)((void *)h + sizeof(struct ncsi_pkt_hdr) +
57 nca->payload); 57 ALIGN(nca->payload, 4));
58 *pchecksum = htonl(checksum); 58 *pchecksum = htonl(checksum);
59} 59}
60 60
@@ -309,14 +309,21 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
309 309
310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca) 310int ncsi_xmit_cmd(struct ncsi_cmd_arg *nca)
311{ 311{
312 struct ncsi_cmd_handler *nch = NULL;
312 struct ncsi_request *nr; 313 struct ncsi_request *nr;
314 unsigned char type;
313 struct ethhdr *eh; 315 struct ethhdr *eh;
314 struct ncsi_cmd_handler *nch = NULL;
315 int i, ret; 316 int i, ret;
316 317
318 /* Use OEM generic handler for Netlink request */
319 if (nca->req_flags == NCSI_REQ_FLAG_NETLINK_DRIVEN)
320 type = NCSI_PKT_CMD_OEM;
321 else
322 type = nca->type;
323
317 /* Search for the handler */ 324 /* Search for the handler */
318 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) { 325 for (i = 0; i < ARRAY_SIZE(ncsi_cmd_handlers); i++) {
319 if (ncsi_cmd_handlers[i].type == nca->type) { 326 if (ncsi_cmd_handlers[i].type == type) {
320 if (ncsi_cmd_handlers[i].handler) 327 if (ncsi_cmd_handlers[i].handler)
321 nch = &ncsi_cmd_handlers[i]; 328 nch = &ncsi_cmd_handlers[i];
322 else 329 else
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 7581bf919885..d876bd55f356 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -47,7 +47,8 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED || 47 if (ntohs(h->code) != NCSI_PKT_RSP_C_COMPLETED ||
48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) { 48 ntohs(h->reason) != NCSI_PKT_RSP_R_NO_ERROR) {
49 netdev_dbg(nr->ndp->ndev.dev, 49 netdev_dbg(nr->ndp->ndev.dev,
50 "NCSI: non zero response/reason code\n"); 50 "NCSI: non zero response/reason code %04xh, %04xh\n",
51 ntohs(h->code), ntohs(h->reason));
51 return -EPERM; 52 return -EPERM;
52 } 53 }
53 54
@@ -55,7 +56,7 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
55 * sender doesn't support checksum according to NCSI 56 * sender doesn't support checksum according to NCSI
56 * specification. 57 * specification.
57 */ 58 */
58 pchecksum = (__be32 *)((void *)(h + 1) + payload - 4); 59 pchecksum = (__be32 *)((void *)(h + 1) + ALIGN(payload, 4) - 4);
59 if (ntohl(*pchecksum) == 0) 60 if (ntohl(*pchecksum) == 0)
60 return 0; 61 return 0;
61 62
@@ -63,7 +64,9 @@ static int ncsi_validate_rsp_pkt(struct ncsi_request *nr,
63 sizeof(*h) + payload - 4); 64 sizeof(*h) + payload - 4);
64 65
65 if (*pchecksum != htonl(checksum)) { 66 if (*pchecksum != htonl(checksum)) {
66 netdev_dbg(nr->ndp->ndev.dev, "NCSI: checksum mismatched\n"); 67 netdev_dbg(nr->ndp->ndev.dev,
68 "NCSI: checksum mismatched; recd: %08x calc: %08x\n",
69 *pchecksum, htonl(checksum));
67 return -EINVAL; 70 return -EINVAL;
68 } 71 }
69 72
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a542761e90d1..81a8ef42b88d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
453 * table location, we assume id gets exposed to userspace. 453 * table location, we assume id gets exposed to userspace.
454 * 454 *
455 * Following nf_conn items do not change throughout lifetime 455 * Following nf_conn items do not change throughout lifetime
456 * of the nf_conn after it has been committed to main hash table: 456 * of the nf_conn:
457 * 457 *
458 * 1. nf_conn address 458 * 1. nf_conn address
459 * 2. nf_conn->ext address 459 * 2. nf_conn->master address (normally NULL)
460 * 3. nf_conn->master address (normally NULL) 460 * 3. the associated net namespace
461 * 4. tuple 461 * 4. the original direction tuple
462 * 5. the associated net namespace
463 */ 462 */
464u32 nf_ct_get_id(const struct nf_conn *ct) 463u32 nf_ct_get_id(const struct nf_conn *ct)
465{ 464{
@@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
469 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed)); 468 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
470 469
471 a = (unsigned long)ct; 470 a = (unsigned long)ct;
472 b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct)); 471 b = (unsigned long)ct->master;
473 c = (unsigned long)ct->ext; 472 c = (unsigned long)nf_ct_net(ct);
474 d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash), 473 d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
474 sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
475 &ct_id_seed); 475 &ct_id_seed);
476#ifdef CONFIG_64BIT 476#ifdef CONFIG_64BIT
477 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed); 477 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index e3d797252a98..80a8f9ae4c93 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -111,15 +111,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
111#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) 111#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
112#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) 112#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
113 113
114static void flow_offload_fixup_ct_state(struct nf_conn *ct) 114static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
115{
116 return (__s32)(timeout - (u32)jiffies);
117}
118
119static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
115{ 120{
116 const struct nf_conntrack_l4proto *l4proto; 121 const struct nf_conntrack_l4proto *l4proto;
122 int l4num = nf_ct_protonum(ct);
117 unsigned int timeout; 123 unsigned int timeout;
118 int l4num;
119
120 l4num = nf_ct_protonum(ct);
121 if (l4num == IPPROTO_TCP)
122 flow_offload_fixup_tcp(&ct->proto.tcp);
123 124
124 l4proto = nf_ct_l4proto_find(l4num); 125 l4proto = nf_ct_l4proto_find(l4num);
125 if (!l4proto) 126 if (!l4proto)
@@ -132,7 +133,20 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
132 else 133 else
133 return; 134 return;
134 135
135 ct->timeout = nfct_time_stamp + timeout; 136 if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
137 ct->timeout = nfct_time_stamp + timeout;
138}
139
140static void flow_offload_fixup_ct_state(struct nf_conn *ct)
141{
142 if (nf_ct_protonum(ct) == IPPROTO_TCP)
143 flow_offload_fixup_tcp(&ct->proto.tcp);
144}
145
146static void flow_offload_fixup_ct(struct nf_conn *ct)
147{
148 flow_offload_fixup_ct_state(ct);
149 flow_offload_fixup_ct_timeout(ct);
136} 150}
137 151
138void flow_offload_free(struct flow_offload *flow) 152void flow_offload_free(struct flow_offload *flow)
@@ -208,6 +222,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
208} 222}
209EXPORT_SYMBOL_GPL(flow_offload_add); 223EXPORT_SYMBOL_GPL(flow_offload_add);
210 224
225static inline bool nf_flow_has_expired(const struct flow_offload *flow)
226{
227 return nf_flow_timeout_delta(flow->timeout) <= 0;
228}
229
211static void flow_offload_del(struct nf_flowtable *flow_table, 230static void flow_offload_del(struct nf_flowtable *flow_table,
212 struct flow_offload *flow) 231 struct flow_offload *flow)
213{ 232{
@@ -223,6 +242,11 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
223 e = container_of(flow, struct flow_offload_entry, flow); 242 e = container_of(flow, struct flow_offload_entry, flow);
224 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status); 243 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
225 244
245 if (nf_flow_has_expired(flow))
246 flow_offload_fixup_ct(e->ct);
247 else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
248 flow_offload_fixup_ct_timeout(e->ct);
249
226 flow_offload_free(flow); 250 flow_offload_free(flow);
227} 251}
228 252
@@ -298,11 +322,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
298 return err; 322 return err;
299} 323}
300 324
301static inline bool nf_flow_has_expired(const struct flow_offload *flow)
302{
303 return (__s32)(flow->timeout - (u32)jiffies) <= 0;
304}
305
306static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data) 325static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
307{ 326{
308 struct nf_flowtable *flow_table = data; 327 struct nf_flowtable *flow_table = data;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index cdfc33517e85..d68c801dd614 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -214,6 +214,25 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
214 return true; 214 return true;
215} 215}
216 216
217static int nf_flow_offload_dst_check(struct dst_entry *dst)
218{
219 if (unlikely(dst_xfrm(dst)))
220 return dst_check(dst, 0) ? 0 : -1;
221
222 return 0;
223}
224
225static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
226 const struct nf_hook_state *state,
227 struct dst_entry *dst)
228{
229 skb_orphan(skb);
230 skb_dst_set_noref(skb, dst);
231 skb->tstamp = 0;
232 dst_output(state->net, state->sk, skb);
233 return NF_STOLEN;
234}
235
217unsigned int 236unsigned int
218nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, 237nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
219 const struct nf_hook_state *state) 238 const struct nf_hook_state *state)
@@ -254,6 +273,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
254 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff)) 273 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
255 return NF_ACCEPT; 274 return NF_ACCEPT;
256 275
276 if (nf_flow_offload_dst_check(&rt->dst)) {
277 flow_offload_teardown(flow);
278 return NF_ACCEPT;
279 }
280
257 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) 281 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
258 return NF_DROP; 282 return NF_DROP;
259 283
@@ -261,6 +285,13 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
261 iph = ip_hdr(skb); 285 iph = ip_hdr(skb);
262 ip_decrease_ttl(iph); 286 ip_decrease_ttl(iph);
263 287
288 if (unlikely(dst_xfrm(&rt->dst))) {
289 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
290 IPCB(skb)->iif = skb->dev->ifindex;
291 IPCB(skb)->flags = IPSKB_FORWARDED;
292 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
293 }
294
264 skb->dev = outdev; 295 skb->dev = outdev;
265 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); 296 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
266 skb_dst_set_noref(skb, &rt->dst); 297 skb_dst_set_noref(skb, &rt->dst);
@@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
467 sizeof(*ip6h))) 498 sizeof(*ip6h)))
468 return NF_ACCEPT; 499 return NF_ACCEPT;
469 500
501 if (nf_flow_offload_dst_check(&rt->dst)) {
502 flow_offload_teardown(flow);
503 return NF_ACCEPT;
504 }
505
470 if (skb_try_make_writable(skb, sizeof(*ip6h))) 506 if (skb_try_make_writable(skb, sizeof(*ip6h)))
471 return NF_DROP; 507 return NF_DROP;
472 508
@@ -477,6 +513,13 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
477 ip6h = ipv6_hdr(skb); 513 ip6h = ipv6_hdr(skb);
478 ip6h->hop_limit--; 514 ip6h->hop_limit--;
479 515
516 if (unlikely(dst_xfrm(&rt->dst))) {
517 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
518 IP6CB(skb)->iif = skb->dev->ifindex;
519 IP6CB(skb)->flags = IP6SKB_FORWARDED;
520 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
521 }
522
480 skb->dev = outdev; 523 skb->dev = outdev;
481 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); 524 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
482 skb_dst_set_noref(skb, &rt->dst); 525 skb_dst_set_noref(skb, &rt->dst);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 605a7cfe7ca7..d47469f824a1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -138,9 +138,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
138 return; 138 return;
139 139
140 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { 140 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
141 if (trans->msg_type == NFT_MSG_NEWSET && 141 switch (trans->msg_type) {
142 nft_trans_set(trans) == set) { 142 case NFT_MSG_NEWSET:
143 set->bound = true; 143 if (nft_trans_set(trans) == set)
144 nft_trans_set_bound(trans) = true;
145 break;
146 case NFT_MSG_NEWSETELEM:
147 if (nft_trans_elem_set(trans) == set)
148 nft_trans_elem_set_bound(trans) = true;
144 break; 149 break;
145 } 150 }
146 } 151 }
@@ -1662,6 +1667,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
1662 1667
1663 chain->flags |= NFT_BASE_CHAIN | flags; 1668 chain->flags |= NFT_BASE_CHAIN | flags;
1664 basechain->policy = NF_ACCEPT; 1669 basechain->policy = NF_ACCEPT;
1670 if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
1671 nft_chain_offload_priority(basechain) < 0)
1672 return -EOPNOTSUPP;
1673
1665 flow_block_init(&basechain->flow_block); 1674 flow_block_init(&basechain->flow_block);
1666 } else { 1675 } else {
1667 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 1676 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
@@ -6906,7 +6915,7 @@ static int __nf_tables_abort(struct net *net)
6906 break; 6915 break;
6907 case NFT_MSG_NEWSET: 6916 case NFT_MSG_NEWSET:
6908 trans->ctx.table->use--; 6917 trans->ctx.table->use--;
6909 if (nft_trans_set(trans)->bound) { 6918 if (nft_trans_set_bound(trans)) {
6910 nft_trans_destroy(trans); 6919 nft_trans_destroy(trans);
6911 break; 6920 break;
6912 } 6921 }
@@ -6918,7 +6927,7 @@ static int __nf_tables_abort(struct net *net)
6918 nft_trans_destroy(trans); 6927 nft_trans_destroy(trans);
6919 break; 6928 break;
6920 case NFT_MSG_NEWSETELEM: 6929 case NFT_MSG_NEWSETELEM:
6921 if (nft_trans_elem_set(trans)->bound) { 6930 if (nft_trans_elem_set_bound(trans)) {
6922 nft_trans_destroy(trans); 6931 nft_trans_destroy(trans);
6923 break; 6932 break;
6924 } 6933 }
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 64f5fd5f240e..c0d18c1d77ac 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -103,10 +103,11 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
103} 103}
104 104
105static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, 105static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
106 __be16 proto, 106 __be16 proto, int priority,
107 struct netlink_ext_ack *extack) 107 struct netlink_ext_ack *extack)
108{ 108{
109 common->protocol = proto; 109 common->protocol = proto;
110 common->prio = priority;
110 common->extack = extack; 111 common->extack = extack;
111} 112}
112 113
@@ -124,6 +125,15 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
124 return 0; 125 return 0;
125} 126}
126 127
128int nft_chain_offload_priority(struct nft_base_chain *basechain)
129{
130 if (basechain->ops.priority <= 0 ||
131 basechain->ops.priority > USHRT_MAX)
132 return -1;
133
134 return 0;
135}
136
127static int nft_flow_offload_rule(struct nft_trans *trans, 137static int nft_flow_offload_rule(struct nft_trans *trans,
128 enum flow_cls_command command) 138 enum flow_cls_command command)
129{ 139{
@@ -142,7 +152,8 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
142 if (flow) 152 if (flow)
143 proto = flow->proto; 153 proto = flow->proto;
144 154
145 nft_flow_offload_common_init(&cls_flow.common, proto, &extack); 155 nft_flow_offload_common_init(&cls_flow.common, proto,
156 basechain->ops.priority, &extack);
146 cls_flow.command = command; 157 cls_flow.command = command;
147 cls_flow.cookie = (unsigned long) rule; 158 cls_flow.cookie = (unsigned long) rule;
148 if (flow) 159 if (flow)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index aa5f571d4361..01705ad74a9a 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
72{ 72{
73 struct nft_flow_offload *priv = nft_expr_priv(expr); 73 struct nft_flow_offload *priv = nft_expr_priv(expr);
74 struct nf_flowtable *flowtable = &priv->flowtable->data; 74 struct nf_flowtable *flowtable = &priv->flowtable->data;
75 struct tcphdr _tcph, *tcph = NULL;
75 enum ip_conntrack_info ctinfo; 76 enum ip_conntrack_info ctinfo;
76 struct nf_flow_route route; 77 struct nf_flow_route route;
77 struct flow_offload *flow; 78 struct flow_offload *flow;
78 enum ip_conntrack_dir dir; 79 enum ip_conntrack_dir dir;
79 bool is_tcp = false;
80 struct nf_conn *ct; 80 struct nf_conn *ct;
81 int ret; 81 int ret;
82 82
@@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
89 89
90 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { 90 switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
91 case IPPROTO_TCP: 91 case IPPROTO_TCP:
92 is_tcp = true; 92 tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
93 sizeof(_tcph), &_tcph);
94 if (unlikely(!tcph || tcph->fin || tcph->rst))
95 goto out;
93 break; 96 break;
94 case IPPROTO_UDP: 97 case IPPROTO_UDP:
95 break; 98 break;
@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
115 if (!flow) 118 if (!flow)
116 goto err_flow_alloc; 119 goto err_flow_alloc;
117 120
118 if (is_tcp) { 121 if (tcph) {
119 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 122 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
120 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; 123 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
121 } 124 }
@@ -146,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
146 return nft_chain_validate_hooks(ctx->chain, hook_mask); 149 return nft_chain_validate_hooks(ctx->chain, hook_mask);
147} 150}
148 151
152static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
153 [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING,
154 .len = NFT_NAME_MAXLEN - 1 },
155};
156
149static int nft_flow_offload_init(const struct nft_ctx *ctx, 157static int nft_flow_offload_init(const struct nft_ctx *ctx,
150 const struct nft_expr *expr, 158 const struct nft_expr *expr,
151 const struct nlattr * const tb[]) 159 const struct nlattr * const tb[])
@@ -204,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = {
204static struct nft_expr_type nft_flow_offload_type __read_mostly = { 212static struct nft_expr_type nft_flow_offload_type __read_mostly = {
205 .name = "flow_offload", 213 .name = "flow_offload",
206 .ops = &nft_flow_offload_ops, 214 .ops = &nft_flow_offload_ops,
215 .policy = nft_flow_offload_policy,
207 .maxattr = NFTA_FLOW_MAX, 216 .maxattr = NFTA_FLOW_MAX,
208 .owner = THIS_MODULE, 217 .owner = THIS_MODULE,
209}; 218};
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index d0ab1adf5bff..5aab6df74e0f 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par)
54 nfnl_acct_put(info->nfacct); 54 nfnl_acct_put(info->nfacct);
55} 55}
56 56
57static struct xt_match nfacct_mt_reg __read_mostly = { 57static struct xt_match nfacct_mt_reg[] __read_mostly = {
58 .name = "nfacct", 58 {
59 .family = NFPROTO_UNSPEC, 59 .name = "nfacct",
60 .checkentry = nfacct_mt_checkentry, 60 .revision = 0,
61 .match = nfacct_mt, 61 .family = NFPROTO_UNSPEC,
62 .destroy = nfacct_mt_destroy, 62 .checkentry = nfacct_mt_checkentry,
63 .matchsize = sizeof(struct xt_nfacct_match_info), 63 .match = nfacct_mt,
64 .usersize = offsetof(struct xt_nfacct_match_info, nfacct), 64 .destroy = nfacct_mt_destroy,
65 .me = THIS_MODULE, 65 .matchsize = sizeof(struct xt_nfacct_match_info),
66 .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
67 .me = THIS_MODULE,
68 },
69 {
70 .name = "nfacct",
71 .revision = 1,
72 .family = NFPROTO_UNSPEC,
73 .checkentry = nfacct_mt_checkentry,
74 .match = nfacct_mt,
75 .destroy = nfacct_mt_destroy,
76 .matchsize = sizeof(struct xt_nfacct_match_info_v1),
77 .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct),
78 .me = THIS_MODULE,
79 },
66}; 80};
67 81
68static int __init nfacct_mt_init(void) 82static int __init nfacct_mt_init(void)
69{ 83{
70 return xt_register_match(&nfacct_mt_reg); 84 return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
71} 85}
72 86
73static void __exit nfacct_mt_exit(void) 87static void __exit nfacct_mt_exit(void)
74{ 88{
75 xt_unregister_match(&nfacct_mt_reg); 89 xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg));
76} 90}
77 91
78module_init(nfacct_mt_init); 92module_init(nfacct_mt_init);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 848c6eb55064..d8da6477d6be 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -67,6 +67,7 @@ struct ovs_conntrack_info {
67 struct md_mark mark; 67 struct md_mark mark;
68 struct md_labels labels; 68 struct md_labels labels;
69 char timeout[CTNL_TIMEOUT_NAME_MAX]; 69 char timeout[CTNL_TIMEOUT_NAME_MAX];
70 struct nf_ct_timeout *nf_ct_timeout;
70#if IS_ENABLED(CONFIG_NF_NAT) 71#if IS_ENABLED(CONFIG_NF_NAT)
71 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */ 72 struct nf_nat_range2 range; /* Only present for SRC NAT and DST NAT. */
72#endif 73#endif
@@ -697,6 +698,14 @@ static bool skb_nfct_cached(struct net *net,
697 if (help && rcu_access_pointer(help->helper) != info->helper) 698 if (help && rcu_access_pointer(help->helper) != info->helper)
698 return false; 699 return false;
699 } 700 }
701 if (info->nf_ct_timeout) {
702 struct nf_conn_timeout *timeout_ext;
703
704 timeout_ext = nf_ct_timeout_find(ct);
705 if (!timeout_ext || info->nf_ct_timeout !=
706 rcu_dereference(timeout_ext->timeout))
707 return false;
708 }
700 /* Force conntrack entry direction to the current packet? */ 709 /* Force conntrack entry direction to the current packet? */
701 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { 710 if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
702 /* Delete the conntrack entry if confirmed, else just release 711 /* Delete the conntrack entry if confirmed, else just release
@@ -1565,7 +1574,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
1565 case OVS_CT_ATTR_TIMEOUT: 1574 case OVS_CT_ATTR_TIMEOUT:
1566 memcpy(info->timeout, nla_data(a), nla_len(a)); 1575 memcpy(info->timeout, nla_data(a), nla_len(a));
1567 if (!memchr(info->timeout, '\0', nla_len(a))) { 1576 if (!memchr(info->timeout, '\0', nla_len(a))) {
1568 OVS_NLERR(log, "Invalid conntrack helper"); 1577 OVS_NLERR(log, "Invalid conntrack timeout");
1569 return -EINVAL; 1578 return -EINVAL;
1570 } 1579 }
1571 break; 1580 break;
@@ -1657,6 +1666,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
1657 ct_info.timeout)) 1666 ct_info.timeout))
1658 pr_info_ratelimited("Failed to associated timeout " 1667 pr_info_ratelimited("Failed to associated timeout "
1659 "policy `%s'\n", ct_info.timeout); 1668 "policy `%s'\n", ct_info.timeout);
1669 else
1670 ct_info.nf_ct_timeout = rcu_dereference(
1671 nf_ct_timeout_find(ct_info.ct)->timeout);
1672
1660 } 1673 }
1661 1674
1662 if (helper) { 1675 if (helper) {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8d54f3047768..e2742b006d25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2618,6 +2618,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2618 2618
2619 mutex_lock(&po->pg_vec_lock); 2619 mutex_lock(&po->pg_vec_lock);
2620 2620
2621 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2622 * we need to confirm it under protection of pg_vec_lock.
2623 */
2624 if (unlikely(!po->tx_ring.pg_vec)) {
2625 err = -EBUSY;
2626 goto out;
2627 }
2621 if (likely(saddr == NULL)) { 2628 if (likely(saddr == NULL)) {
2622 dev = packet_cached_dev_get(po); 2629 dev = packet_cached_dev_get(po);
2623 proto = po->num; 2630 proto = po->num;
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ec05d91aa9a2..45acab2de0cf 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
291 void *buffer) 291 void *buffer)
292{ 292{
293 struct rds_info_rdma_connection *iinfo = buffer; 293 struct rds_info_rdma_connection *iinfo = buffer;
294 struct rds_ib_connection *ic; 294 struct rds_ib_connection *ic = conn->c_transport_data;
295 295
296 /* We will only ever look at IB transports */ 296 /* We will only ever look at IB transports */
297 if (conn->c_trans != &rds_ib_transport) 297 if (conn->c_trans != &rds_ib_transport)
@@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
301 301
302 iinfo->src_addr = conn->c_laddr.s6_addr32[3]; 302 iinfo->src_addr = conn->c_laddr.s6_addr32[3];
303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; 303 iinfo->dst_addr = conn->c_faddr.s6_addr32[3];
304 iinfo->tos = conn->c_tos; 304 if (ic) {
305 iinfo->tos = conn->c_tos;
306 iinfo->sl = ic->i_sl;
307 }
305 308
306 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); 309 memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
307 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); 310 memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
308 if (rds_conn_state(conn) == RDS_CONN_UP) { 311 if (rds_conn_state(conn) == RDS_CONN_UP) {
309 struct rds_ib_device *rds_ibdev; 312 struct rds_ib_device *rds_ibdev;
310 313
311 ic = conn->c_transport_data;
312
313 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, 314 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid,
314 (union ib_gid *)&iinfo->dst_gid); 315 (union ib_gid *)&iinfo->dst_gid);
315 316
@@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
329 void *buffer) 330 void *buffer)
330{ 331{
331 struct rds6_info_rdma_connection *iinfo6 = buffer; 332 struct rds6_info_rdma_connection *iinfo6 = buffer;
332 struct rds_ib_connection *ic; 333 struct rds_ib_connection *ic = conn->c_transport_data;
333 334
334 /* We will only ever look at IB transports */ 335 /* We will only ever look at IB transports */
335 if (conn->c_trans != &rds_ib_transport) 336 if (conn->c_trans != &rds_ib_transport)
@@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
337 338
338 iinfo6->src_addr = conn->c_laddr; 339 iinfo6->src_addr = conn->c_laddr;
339 iinfo6->dst_addr = conn->c_faddr; 340 iinfo6->dst_addr = conn->c_faddr;
341 if (ic) {
342 iinfo6->tos = conn->c_tos;
343 iinfo6->sl = ic->i_sl;
344 }
340 345
341 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); 346 memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid));
342 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); 347 memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid));
@@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
344 if (rds_conn_state(conn) == RDS_CONN_UP) { 349 if (rds_conn_state(conn) == RDS_CONN_UP) {
345 struct rds_ib_device *rds_ibdev; 350 struct rds_ib_device *rds_ibdev;
346 351
347 ic = conn->c_transport_data;
348 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, 352 rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
349 (union ib_gid *)&iinfo6->dst_gid); 353 (union ib_gid *)&iinfo6->dst_gid);
350 rds_ibdev = ic->rds_ibdev; 354 rds_ibdev = ic->rds_ibdev;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 303c6ee8bdb7..f2b558e8b5ea 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -220,6 +220,7 @@ struct rds_ib_connection {
220 /* Send/Recv vectors */ 220 /* Send/Recv vectors */
221 int i_scq_vector; 221 int i_scq_vector;
222 int i_rcq_vector; 222 int i_rcq_vector;
223 u8 i_sl;
223}; 224};
224 225
225/* This assumes that atomic_t is at least 32 bits */ 226/* This assumes that atomic_t is at least 32 bits */
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index fddaa09f7b0d..233f1368162b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
152 RDS_PROTOCOL_MINOR(conn->c_version), 152 RDS_PROTOCOL_MINOR(conn->c_version),
153 ic->i_flowctl ? ", flow control" : ""); 153 ic->i_flowctl ? ", flow control" : "");
154 154
155 /* receive sl from the peer */
156 ic->i_sl = ic->i_cm_id->route.path_rec->sl;
157
155 atomic_set(&ic->i_cq_quiesce, 0); 158 atomic_set(&ic->i_cq_quiesce, 0);
156 159
157 /* Init rings and fill recv. this needs to wait until protocol 160 /* Init rings and fill recv. this needs to wait until protocol
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 9986d6065c4d..5f741e51b4ba 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id;
43static struct rdma_cm_id *rds6_rdma_listen_id; 43static struct rdma_cm_id *rds6_rdma_listen_id;
44#endif 44#endif
45 45
46/* Per IB specification 7.7.3, service level is a 4-bit field. */
47#define TOS_TO_SL(tos) ((tos) & 0xF)
48
46static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, 49static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
47 struct rdma_cm_event *event, 50 struct rdma_cm_event *event,
48 bool isv6) 51 bool isv6)
@@ -97,10 +100,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
97 struct rds_ib_connection *ibic; 100 struct rds_ib_connection *ibic;
98 101
99 ibic = conn->c_transport_data; 102 ibic = conn->c_transport_data;
100 if (ibic && ibic->i_cm_id == cm_id) 103 if (ibic && ibic->i_cm_id == cm_id) {
104 cm_id->route.path_rec[0].sl =
105 TOS_TO_SL(conn->c_tos);
101 ret = trans->cm_initiate_connect(cm_id, isv6); 106 ret = trans->cm_initiate_connect(cm_id, isv6);
102 else 107 } else {
103 rds_conn_drop(conn); 108 rds_conn_drop(conn);
109 }
104 } 110 }
105 break; 111 break;
106 112
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d09eaf153544..0dbbfd1b6487 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
193 193
194service_in_use: 194service_in_use:
195 write_unlock(&local->services_lock); 195 write_unlock(&local->services_lock);
196 rxrpc_put_local(local); 196 rxrpc_unuse_local(local);
197 ret = -EADDRINUSE; 197 ret = -EADDRINUSE;
198error_unlock: 198error_unlock:
199 release_sock(&rx->sk); 199 release_sock(&rx->sk);
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(rxrpc_kernel_check_life);
402 */ 402 */
403void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call) 403void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
404{ 404{
405 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 405 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
406 rxrpc_propose_ack_ping_for_check_life); 406 rxrpc_propose_ack_ping_for_check_life);
407 rxrpc_send_ack_packet(call, true, NULL); 407 rxrpc_send_ack_packet(call, true, NULL);
408} 408}
@@ -901,7 +901,7 @@ static int rxrpc_release_sock(struct sock *sk)
901 rxrpc_queue_work(&rxnet->service_conn_reaper); 901 rxrpc_queue_work(&rxnet->service_conn_reaper);
902 rxrpc_queue_work(&rxnet->client_conn_reaper); 902 rxrpc_queue_work(&rxnet->client_conn_reaper);
903 903
904 rxrpc_put_local(rx->local); 904 rxrpc_unuse_local(rx->local);
905 rx->local = NULL; 905 rx->local = NULL;
906 key_put(rx->key); 906 key_put(rx->key);
907 rx->key = NULL; 907 rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 822f45386e31..145335611af6 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -254,7 +254,8 @@ struct rxrpc_security {
254 */ 254 */
255struct rxrpc_local { 255struct rxrpc_local {
256 struct rcu_head rcu; 256 struct rcu_head rcu;
257 atomic_t usage; 257 atomic_t active_users; /* Number of users of the local endpoint */
258 atomic_t usage; /* Number of references to the structure */
258 struct rxrpc_net *rxnet; /* The network ns in which this resides */ 259 struct rxrpc_net *rxnet; /* The network ns in which this resides */
259 struct list_head link; 260 struct list_head link;
260 struct socket *socket; /* my UDP socket */ 261 struct socket *socket; /* my UDP socket */
@@ -649,7 +650,6 @@ struct rxrpc_call {
649 650
650 /* receive-phase ACK management */ 651 /* receive-phase ACK management */
651 u8 ackr_reason; /* reason to ACK */ 652 u8 ackr_reason; /* reason to ACK */
652 u16 ackr_skew; /* skew on packet being ACK'd */
653 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ 653 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
654 rxrpc_serial_t ackr_first_seq; /* first sequence number received */ 654 rxrpc_serial_t ackr_first_seq; /* first sequence number received */
655 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ 655 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
@@ -743,7 +743,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
743/* 743/*
744 * call_event.c 744 * call_event.c
745 */ 745 */
746void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, 746void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
747 enum rxrpc_propose_ack_trace); 747 enum rxrpc_propose_ack_trace);
748void rxrpc_process_call(struct work_struct *); 748void rxrpc_process_call(struct work_struct *);
749 749
@@ -1002,6 +1002,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
1002struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *); 1002struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
1003struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *); 1003struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
1004void rxrpc_put_local(struct rxrpc_local *); 1004void rxrpc_put_local(struct rxrpc_local *);
1005struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
1006void rxrpc_unuse_local(struct rxrpc_local *);
1005void rxrpc_queue_local(struct rxrpc_local *); 1007void rxrpc_queue_local(struct rxrpc_local *);
1006void rxrpc_destroy_all_locals(struct rxrpc_net *); 1008void rxrpc_destroy_all_locals(struct rxrpc_net *);
1007 1009
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index bc2adeb3acb9..c767679bfa5d 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -43,8 +43,7 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
43 * propose an ACK be sent 43 * propose an ACK be sent
44 */ 44 */
45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 45static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
46 u16 skew, u32 serial, bool immediate, 46 u32 serial, bool immediate, bool background,
47 bool background,
48 enum rxrpc_propose_ack_trace why) 47 enum rxrpc_propose_ack_trace why)
49{ 48{
50 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; 49 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
@@ -69,14 +68,12 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
69 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) { 68 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
70 outcome = rxrpc_propose_ack_update; 69 outcome = rxrpc_propose_ack_update;
71 call->ackr_serial = serial; 70 call->ackr_serial = serial;
72 call->ackr_skew = skew;
73 } 71 }
74 if (!immediate) 72 if (!immediate)
75 goto trace; 73 goto trace;
76 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) { 74 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
77 call->ackr_reason = ack_reason; 75 call->ackr_reason = ack_reason;
78 call->ackr_serial = serial; 76 call->ackr_serial = serial;
79 call->ackr_skew = skew;
80 } else { 77 } else {
81 outcome = rxrpc_propose_ack_subsume; 78 outcome = rxrpc_propose_ack_subsume;
82 } 79 }
@@ -137,11 +134,11 @@ trace:
137 * propose an ACK be sent, locking the call structure 134 * propose an ACK be sent, locking the call structure
138 */ 135 */
139void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, 136void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
140 u16 skew, u32 serial, bool immediate, bool background, 137 u32 serial, bool immediate, bool background,
141 enum rxrpc_propose_ack_trace why) 138 enum rxrpc_propose_ack_trace why)
142{ 139{
143 spin_lock_bh(&call->lock); 140 spin_lock_bh(&call->lock);
144 __rxrpc_propose_ACK(call, ack_reason, skew, serial, 141 __rxrpc_propose_ACK(call, ack_reason, serial,
145 immediate, background, why); 142 immediate, background, why);
146 spin_unlock_bh(&call->lock); 143 spin_unlock_bh(&call->lock);
147} 144}
@@ -239,7 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
239 ack_ts = ktime_sub(now, call->acks_latest_ts); 236 ack_ts = ktime_sub(now, call->acks_latest_ts);
240 if (ktime_to_ns(ack_ts) < call->peer->rtt) 237 if (ktime_to_ns(ack_ts) < call->peer->rtt)
241 goto out; 238 goto out;
242 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 239 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
243 rxrpc_propose_ack_ping_for_lost_ack); 240 rxrpc_propose_ack_ping_for_lost_ack);
244 rxrpc_send_ack_packet(call, true, NULL); 241 rxrpc_send_ack_packet(call, true, NULL);
245 goto out; 242 goto out;
@@ -372,7 +369,7 @@ recheck_state:
372 if (time_after_eq(now, t)) { 369 if (time_after_eq(now, t)) {
373 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); 370 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
374 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); 371 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
375 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true, 372 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
376 rxrpc_propose_ack_ping_for_keepalive); 373 rxrpc_propose_ack_ping_for_keepalive);
377 set_bit(RXRPC_CALL_EV_PING, &call->events); 374 set_bit(RXRPC_CALL_EV_PING, &call->events);
378 } 375 }
@@ -407,7 +404,7 @@ recheck_state:
407 send_ack = NULL; 404 send_ack = NULL;
408 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { 405 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
409 call->acks_lost_top = call->tx_top; 406 call->acks_lost_top = call->tx_top;
410 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 407 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
411 rxrpc_propose_ack_ping_for_lost_ack); 408 rxrpc_propose_ack_ping_for_lost_ack);
412 send_ack = &call->acks_lost_ping; 409 send_ack = &call->acks_lost_ping;
413 } 410 }
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 5bd6f1546e5c..dd47d465d1d3 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -196,15 +196,14 @@ send_extra_data:
196 * Ping the other end to fill our RTT cache and to retrieve the rwind 196 * Ping the other end to fill our RTT cache and to retrieve the rwind
197 * and MTU parameters. 197 * and MTU parameters.
198 */ 198 */
199static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb, 199static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
200 int skew)
201{ 200{
202 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 201 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
203 ktime_t now = skb->tstamp; 202 ktime_t now = skb->tstamp;
204 203
205 if (call->peer->rtt_usage < 3 || 204 if (call->peer->rtt_usage < 3 ||
206 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 205 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
207 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 206 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
208 true, true, 207 true, true,
209 rxrpc_propose_ack_ping_for_params); 208 rxrpc_propose_ack_ping_for_params);
210} 209}
@@ -419,8 +418,7 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
419/* 418/*
420 * Process a DATA packet, adding the packet to the Rx ring. 419 * Process a DATA packet, adding the packet to the Rx ring.
421 */ 420 */
422static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, 421static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
423 u16 skew)
424{ 422{
425 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 423 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
426 enum rxrpc_call_state state; 424 enum rxrpc_call_state state;
@@ -600,11 +598,11 @@ skip:
600 598
601ack: 599ack:
602 if (ack) 600 if (ack)
603 rxrpc_propose_ACK(call, ack, skew, ack_serial, 601 rxrpc_propose_ACK(call, ack, ack_serial,
604 immediate_ack, true, 602 immediate_ack, true,
605 rxrpc_propose_ack_input_data); 603 rxrpc_propose_ack_input_data);
606 else 604 else
607 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial, 605 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
608 false, true, 606 false, true,
609 rxrpc_propose_ack_input_data); 607 rxrpc_propose_ack_input_data);
610 608
@@ -822,8 +820,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
822 * soft-ACK means that the packet may be discarded and retransmission 820 * soft-ACK means that the packet may be discarded and retransmission
823 * requested. A phase is complete when all packets are hard-ACK'd. 821 * requested. A phase is complete when all packets are hard-ACK'd.
824 */ 822 */
825static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb, 823static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
826 u16 skew)
827{ 824{
828 struct rxrpc_ack_summary summary = { 0 }; 825 struct rxrpc_ack_summary summary = { 0 };
829 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 826 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -867,11 +864,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
867 if (buf.ack.reason == RXRPC_ACK_PING) { 864 if (buf.ack.reason == RXRPC_ACK_PING) {
868 _proto("Rx ACK %%%u PING Request", sp->hdr.serial); 865 _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
869 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE, 866 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
870 skew, sp->hdr.serial, true, true, 867 sp->hdr.serial, true, true,
871 rxrpc_propose_ack_respond_to_ping); 868 rxrpc_propose_ack_respond_to_ping);
872 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 869 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
873 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, 870 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
874 skew, sp->hdr.serial, true, true, 871 sp->hdr.serial, true, true,
875 rxrpc_propose_ack_respond_to_ack); 872 rxrpc_propose_ack_respond_to_ack);
876 } 873 }
877 874
@@ -948,7 +945,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
948 RXRPC_TX_ANNO_LAST && 945 RXRPC_TX_ANNO_LAST &&
949 summary.nr_acks == call->tx_top - hard_ack && 946 summary.nr_acks == call->tx_top - hard_ack &&
950 rxrpc_is_client_call(call)) 947 rxrpc_is_client_call(call))
951 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial, 948 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
952 false, true, 949 false, true,
953 rxrpc_propose_ack_ping_for_lost_reply); 950 rxrpc_propose_ack_ping_for_lost_reply);
954 951
@@ -1004,7 +1001,7 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
1004 * Process an incoming call packet. 1001 * Process an incoming call packet.
1005 */ 1002 */
1006static void rxrpc_input_call_packet(struct rxrpc_call *call, 1003static void rxrpc_input_call_packet(struct rxrpc_call *call,
1007 struct sk_buff *skb, u16 skew) 1004 struct sk_buff *skb)
1008{ 1005{
1009 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1006 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1010 unsigned long timo; 1007 unsigned long timo;
@@ -1023,11 +1020,11 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
1023 1020
1024 switch (sp->hdr.type) { 1021 switch (sp->hdr.type) {
1025 case RXRPC_PACKET_TYPE_DATA: 1022 case RXRPC_PACKET_TYPE_DATA:
1026 rxrpc_input_data(call, skb, skew); 1023 rxrpc_input_data(call, skb);
1027 break; 1024 break;
1028 1025
1029 case RXRPC_PACKET_TYPE_ACK: 1026 case RXRPC_PACKET_TYPE_ACK:
1030 rxrpc_input_ack(call, skb, skew); 1027 rxrpc_input_ack(call, skb);
1031 break; 1028 break;
1032 1029
1033 case RXRPC_PACKET_TYPE_BUSY: 1030 case RXRPC_PACKET_TYPE_BUSY:
@@ -1108,8 +1105,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
1108{ 1105{
1109 _enter("%p,%p", local, skb); 1106 _enter("%p,%p", local, skb);
1110 1107
1111 skb_queue_tail(&local->event_queue, skb); 1108 if (rxrpc_get_local_maybe(local)) {
1112 rxrpc_queue_local(local); 1109 skb_queue_tail(&local->event_queue, skb);
1110 rxrpc_queue_local(local);
1111 } else {
1112 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1113 }
1113} 1114}
1114 1115
1115/* 1116/*
@@ -1119,8 +1120,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
1119{ 1120{
1120 CHECK_SLAB_OKAY(&local->usage); 1121 CHECK_SLAB_OKAY(&local->usage);
1121 1122
1122 skb_queue_tail(&local->reject_queue, skb); 1123 if (rxrpc_get_local_maybe(local)) {
1123 rxrpc_queue_local(local); 1124 skb_queue_tail(&local->reject_queue, skb);
1125 rxrpc_queue_local(local);
1126 } else {
1127 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1128 }
1124} 1129}
1125 1130
1126/* 1131/*
@@ -1173,7 +1178,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1173 struct rxrpc_peer *peer = NULL; 1178 struct rxrpc_peer *peer = NULL;
1174 struct rxrpc_sock *rx = NULL; 1179 struct rxrpc_sock *rx = NULL;
1175 unsigned int channel; 1180 unsigned int channel;
1176 int skew = 0;
1177 1181
1178 _enter("%p", udp_sk); 1182 _enter("%p", udp_sk);
1179 1183
@@ -1301,15 +1305,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1301 goto out; 1305 goto out;
1302 } 1306 }
1303 1307
1304 /* Note the serial number skew here */ 1308 if ((int)sp->hdr.serial - (int)conn->hi_serial > 0)
1305 skew = (int)sp->hdr.serial - (int)conn->hi_serial; 1309 conn->hi_serial = sp->hdr.serial;
1306 if (skew >= 0) {
1307 if (skew > 0)
1308 conn->hi_serial = sp->hdr.serial;
1309 } else {
1310 skew = -skew;
1311 skew = min(skew, 65535);
1312 }
1313 1310
1314 /* Call-bound packets are routed by connection channel. */ 1311 /* Call-bound packets are routed by connection channel. */
1315 channel = sp->hdr.cid & RXRPC_CHANNELMASK; 1312 channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -1372,11 +1369,11 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1372 call = rxrpc_new_incoming_call(local, rx, skb); 1369 call = rxrpc_new_incoming_call(local, rx, skb);
1373 if (!call) 1370 if (!call)
1374 goto reject_packet; 1371 goto reject_packet;
1375 rxrpc_send_ping(call, skb, skew); 1372 rxrpc_send_ping(call, skb);
1376 mutex_unlock(&call->user_mutex); 1373 mutex_unlock(&call->user_mutex);
1377 } 1374 }
1378 1375
1379 rxrpc_input_call_packet(call, skb, skew); 1376 rxrpc_input_call_packet(call, skb);
1380 goto discard; 1377 goto discard;
1381 1378
1382discard: 1379discard:
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index b1c71bad510b..72a6e12a9304 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); 79 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
80 if (local) { 80 if (local) {
81 atomic_set(&local->usage, 1); 81 atomic_set(&local->usage, 1);
82 atomic_set(&local->active_users, 1);
82 local->rxnet = rxnet; 83 local->rxnet = rxnet;
83 INIT_LIST_HEAD(&local->link); 84 INIT_LIST_HEAD(&local->link);
84 INIT_WORK(&local->processor, rxrpc_local_processor); 85 INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
92 local->debug_id = atomic_inc_return(&rxrpc_debug_id); 93 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
93 memcpy(&local->srx, srx, sizeof(*srx)); 94 memcpy(&local->srx, srx, sizeof(*srx));
94 local->srx.srx_service = 0; 95 local->srx.srx_service = 0;
95 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); 96 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
96 } 97 }
97 98
98 _leave(" = %p", local); 99 _leave(" = %p", local);
@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
266 * bind the transport socket may still fail if we're attempting 267 * bind the transport socket may still fail if we're attempting
267 * to use a local address that the dying object is still using. 268 * to use a local address that the dying object is still using.
268 */ 269 */
269 if (!rxrpc_get_local_maybe(local)) { 270 if (!rxrpc_use_local(local))
270 cursor = cursor->next;
271 list_del_init(&local->link);
272 break; 271 break;
273 }
274 272
275 age = "old"; 273 age = "old";
276 goto found; 274 goto found;
@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
284 if (ret < 0) 282 if (ret < 0)
285 goto sock_error; 283 goto sock_error;
286 284
287 list_add_tail(&local->link, cursor); 285 if (cursor != &rxnet->local_endpoints)
286 list_replace_init(cursor, &local->link);
287 else
288 list_add_tail(&local->link, cursor);
288 age = "new"; 289 age = "new";
289 290
290found: 291found:
@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
320 int n; 321 int n;
321 322
322 n = atomic_inc_return(&local->usage); 323 n = atomic_inc_return(&local->usage);
323 trace_rxrpc_local(local, rxrpc_local_got, n, here); 324 trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
324 return local; 325 return local;
325} 326}
326 327
@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
334 if (local) { 335 if (local) {
335 int n = atomic_fetch_add_unless(&local->usage, 1, 0); 336 int n = atomic_fetch_add_unless(&local->usage, 1, 0);
336 if (n > 0) 337 if (n > 0)
337 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); 338 trace_rxrpc_local(local->debug_id, rxrpc_local_got,
339 n + 1, here);
338 else 340 else
339 local = NULL; 341 local = NULL;
340 } 342 }
@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
342} 344}
343 345
344/* 346/*
345 * Queue a local endpoint. 347 * Queue a local endpoint and pass the caller's reference to the work item.
346 */ 348 */
347void rxrpc_queue_local(struct rxrpc_local *local) 349void rxrpc_queue_local(struct rxrpc_local *local)
348{ 350{
349 const void *here = __builtin_return_address(0); 351 const void *here = __builtin_return_address(0);
352 unsigned int debug_id = local->debug_id;
353 int n = atomic_read(&local->usage);
350 354
351 if (rxrpc_queue_work(&local->processor)) 355 if (rxrpc_queue_work(&local->processor))
352 trace_rxrpc_local(local, rxrpc_local_queued, 356 trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
353 atomic_read(&local->usage), here); 357 else
354} 358 rxrpc_put_local(local);
355
356/*
357 * A local endpoint reached its end of life.
358 */
359static void __rxrpc_put_local(struct rxrpc_local *local)
360{
361 _enter("%d", local->debug_id);
362 rxrpc_queue_work(&local->processor);
363} 359}
364 360
365/* 361/*
@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
372 368
373 if (local) { 369 if (local) {
374 n = atomic_dec_return(&local->usage); 370 n = atomic_dec_return(&local->usage);
375 trace_rxrpc_local(local, rxrpc_local_put, n, here); 371 trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
376 372
377 if (n == 0) 373 if (n == 0)
378 __rxrpc_put_local(local); 374 call_rcu(&local->rcu, rxrpc_local_rcu);
375 }
376}
377
378/*
379 * Start using a local endpoint.
380 */
381struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
382{
383 unsigned int au;
384
385 local = rxrpc_get_local_maybe(local);
386 if (!local)
387 return NULL;
388
389 au = atomic_fetch_add_unless(&local->active_users, 1, 0);
390 if (au == 0) {
391 rxrpc_put_local(local);
392 return NULL;
393 }
394
395 return local;
396}
397
398/*
399 * Cease using a local endpoint. Once the number of active users reaches 0, we
400 * start the closure of the transport in the work processor.
401 */
402void rxrpc_unuse_local(struct rxrpc_local *local)
403{
404 unsigned int au;
405
406 if (local) {
407 au = atomic_dec_return(&local->active_users);
408 if (au == 0)
409 rxrpc_queue_local(local);
410 else
411 rxrpc_put_local(local);
379 } 412 }
380} 413}
381 414
@@ -393,16 +426,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
393 426
394 _enter("%d", local->debug_id); 427 _enter("%d", local->debug_id);
395 428
396 /* We can get a race between an incoming call packet queueing the
397 * processor again and the work processor starting the destruction
398 * process which will shut down the UDP socket.
399 */
400 if (local->dead) {
401 _leave(" [already dead]");
402 return;
403 }
404 local->dead = true;
405
406 mutex_lock(&rxnet->local_mutex); 429 mutex_lock(&rxnet->local_mutex);
407 list_del_init(&local->link); 430 list_del_init(&local->link);
408 mutex_unlock(&rxnet->local_mutex); 431 mutex_unlock(&rxnet->local_mutex);
@@ -422,13 +445,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
422 */ 445 */
423 rxrpc_purge_queue(&local->reject_queue); 446 rxrpc_purge_queue(&local->reject_queue);
424 rxrpc_purge_queue(&local->event_queue); 447 rxrpc_purge_queue(&local->event_queue);
425
426 _debug("rcu local %d", local->debug_id);
427 call_rcu(&local->rcu, rxrpc_local_rcu);
428} 448}
429 449
430/* 450/*
431 * Process events on an endpoint 451 * Process events on an endpoint. The work item carries a ref which
452 * we must release.
432 */ 453 */
433static void rxrpc_local_processor(struct work_struct *work) 454static void rxrpc_local_processor(struct work_struct *work)
434{ 455{
@@ -436,13 +457,15 @@ static void rxrpc_local_processor(struct work_struct *work)
436 container_of(work, struct rxrpc_local, processor); 457 container_of(work, struct rxrpc_local, processor);
437 bool again; 458 bool again;
438 459
439 trace_rxrpc_local(local, rxrpc_local_processing, 460 trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
440 atomic_read(&local->usage), NULL); 461 atomic_read(&local->usage), NULL);
441 462
442 do { 463 do {
443 again = false; 464 again = false;
444 if (atomic_read(&local->usage) == 0) 465 if (atomic_read(&local->active_users) == 0) {
445 return rxrpc_local_destroyer(local); 466 rxrpc_local_destroyer(local);
467 break;
468 }
446 469
447 if (!skb_queue_empty(&local->reject_queue)) { 470 if (!skb_queue_empty(&local->reject_queue)) {
448 rxrpc_reject_packets(local); 471 rxrpc_reject_packets(local);
@@ -454,6 +477,8 @@ static void rxrpc_local_processor(struct work_struct *work)
454 again = true; 477 again = true;
455 } 478 }
456 } while (again); 479 } while (again);
480
481 rxrpc_put_local(local);
457} 482}
458 483
459/* 484/*
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 948e3fe249ec..369e516c4bdf 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -87,7 +87,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
87 *_top = top; 87 *_top = top;
88 88
89 pkt->ack.bufferSpace = htons(8); 89 pkt->ack.bufferSpace = htons(8);
90 pkt->ack.maxSkew = htons(call->ackr_skew); 90 pkt->ack.maxSkew = htons(0);
91 pkt->ack.firstPacket = htonl(hard_ack + 1); 91 pkt->ack.firstPacket = htonl(hard_ack + 1);
92 pkt->ack.previousPacket = htonl(call->ackr_prev_seq); 92 pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
93 pkt->ack.serial = htonl(serial); 93 pkt->ack.serial = htonl(serial);
@@ -228,7 +228,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
228 if (ping) 228 if (ping)
229 clear_bit(RXRPC_CALL_PINGING, &call->flags); 229 clear_bit(RXRPC_CALL_PINGING, &call->flags);
230 rxrpc_propose_ACK(call, pkt->ack.reason, 230 rxrpc_propose_ACK(call, pkt->ack.reason,
231 ntohs(pkt->ack.maxSkew),
232 ntohl(pkt->ack.serial), 231 ntohl(pkt->ack.serial),
233 false, true, 232 false, true,
234 rxrpc_propose_ack_retry_tx); 233 rxrpc_propose_ack_retry_tx);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 5abf46cf9e6c..9a7e1bc9791d 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -141,7 +141,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); 141 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
142 142
143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { 143 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true, 144 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
145 rxrpc_propose_ack_terminal_ack); 145 rxrpc_propose_ack_terminal_ack);
146 //rxrpc_send_ack_packet(call, false, NULL); 146 //rxrpc_send_ack_packet(call, false, NULL);
147 } 147 }
@@ -159,7 +159,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 159 call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; 160 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
161 write_unlock_bh(&call->state_lock); 161 write_unlock_bh(&call->state_lock);
162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, 162 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
163 rxrpc_propose_ack_processing_op); 163 rxrpc_propose_ack_processing_op);
164 break; 164 break;
165 default: 165 default:
@@ -212,7 +212,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
212 if (after_eq(hard_ack, call->ackr_consumed + 2) || 212 if (after_eq(hard_ack, call->ackr_consumed + 2) ||
213 after_eq(top, call->ackr_seen + 2) || 213 after_eq(top, call->ackr_seen + 2) ||
214 (hard_ack == top && after(hard_ack, call->ackr_consumed))) 214 (hard_ack == top && after(hard_ack, call->ackr_consumed)))
215 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, 215 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
216 true, true, 216 true, true,
217 rxrpc_propose_ack_rotate_rx); 217 rxrpc_propose_ack_rotate_rx);
218 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) 218 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index b100870f02a6..37dced00b63d 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -307,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
307 return tcf_idr_search(tn, a, index); 307 return tcf_idr_search(tn, a, index);
308} 308}
309 309
310static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
311{
312 return nla_total_size(sizeof(struct tc_skbedit))
313 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
314 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
315 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
316 + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
317 + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
318 + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
319}
320
310static struct tc_action_ops act_skbedit_ops = { 321static struct tc_action_ops act_skbedit_ops = {
311 .kind = "skbedit", 322 .kind = "skbedit",
312 .id = TCA_ID_SKBEDIT, 323 .id = TCA_ID_SKBEDIT,
@@ -316,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = {
316 .init = tcf_skbedit_init, 327 .init = tcf_skbedit_init,
317 .cleanup = tcf_skbedit_cleanup, 328 .cleanup = tcf_skbedit_cleanup,
318 .walk = tcf_skbedit_walker, 329 .walk = tcf_skbedit_walker,
330 .get_fill_size = tcf_skbedit_get_fill_size,
319 .lookup = tcf_skbedit_search, 331 .lookup = tcf_skbedit_search,
320 .size = sizeof(struct tcf_skbedit), 332 .size = sizeof(struct tcf_skbedit),
321}; 333};
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index c39db507ba3f..e25d414ae12f 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1195,7 +1195,8 @@ unlock:
1195 spin_unlock_bh(qdisc_lock(sch)); 1195 spin_unlock_bh(qdisc_lock(sch));
1196 1196
1197free_sched: 1197free_sched:
1198 kfree(new_admin); 1198 if (new_admin)
1199 call_rcu(&new_admin->rcu, taprio_free_sched_cb);
1199 1200
1200 return err; 1201 return err;
1201} 1202}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index a554d6d15d1b..1cf5bb5b73c4 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
546 */ 546 */
547 if (net->sctp.pf_enable && 547 if (net->sctp.pf_enable &&
548 (transport->state == SCTP_ACTIVE) && 548 (transport->state == SCTP_ACTIVE) &&
549 (asoc->pf_retrans < transport->pathmaxrxt) && 549 (transport->error_count < transport->pathmaxrxt) &&
550 (transport->error_count > asoc->pf_retrans)) { 550 (transport->error_count > asoc->pf_retrans)) {
551 551
552 sctp_assoc_control_transport(asoc, transport, 552 sctp_assoc_control_transport(asoc, transport,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 25946604af85..e83cdaa2ab76 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
316 nstr_list[i] = htons(str_list[i]); 316 nstr_list[i] = htons(str_list[i]);
317 317
318 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) { 318 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
319 kfree(nstr_list);
319 retval = -EAGAIN; 320 retval = -EAGAIN;
320 goto out; 321 goto out;
321 } 322 }
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index f0de323d15d6..6c8f09c1ce51 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -76,13 +76,11 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
76 DEFINE_WAIT_FUNC(wait, woken_wake_function); 76 DEFINE_WAIT_FUNC(wait, woken_wake_function);
77 struct smc_connection *conn = &smc->conn; 77 struct smc_connection *conn = &smc->conn;
78 struct sock *sk = &smc->sk; 78 struct sock *sk = &smc->sk;
79 bool noblock;
80 long timeo; 79 long timeo;
81 int rc = 0; 80 int rc = 0;
82 81
83 /* similar to sk_stream_wait_memory */ 82 /* similar to sk_stream_wait_memory */
84 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 83 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
85 noblock = timeo ? false : true;
86 add_wait_queue(sk_sleep(sk), &wait); 84 add_wait_queue(sk_sleep(sk), &wait);
87 while (1) { 85 while (1) {
88 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -97,8 +95,8 @@ static int smc_tx_wait(struct smc_sock *smc, int flags)
97 break; 95 break;
98 } 96 }
99 if (!timeo) { 97 if (!timeo) {
100 if (noblock) 98 /* ensure EPOLLOUT is subsequently generated */
101 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 99 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
102 rc = -EAGAIN; 100 rc = -EAGAIN;
103 break; 101 break;
104 } 102 }
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d8679b6027e9..a07b516e503a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1970,6 +1970,7 @@ call_bind(struct rpc_task *task)
1970static void 1970static void
1971call_bind_status(struct rpc_task *task) 1971call_bind_status(struct rpc_task *task)
1972{ 1972{
1973 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1973 int status = -EIO; 1974 int status = -EIO;
1974 1975
1975 if (rpc_task_transmitted(task)) { 1976 if (rpc_task_transmitted(task)) {
@@ -1977,14 +1978,15 @@ call_bind_status(struct rpc_task *task)
1977 return; 1978 return;
1978 } 1979 }
1979 1980
1980 if (task->tk_status >= 0) { 1981 dprint_status(task);
1981 dprint_status(task); 1982 trace_rpc_bind_status(task);
1983 if (task->tk_status >= 0)
1984 goto out_next;
1985 if (xprt_bound(xprt)) {
1982 task->tk_status = 0; 1986 task->tk_status = 0;
1983 task->tk_action = call_connect; 1987 goto out_next;
1984 return;
1985 } 1988 }
1986 1989
1987 trace_rpc_bind_status(task);
1988 switch (task->tk_status) { 1990 switch (task->tk_status) {
1989 case -ENOMEM: 1991 case -ENOMEM:
1990 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); 1992 dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid);
@@ -2003,6 +2005,9 @@ call_bind_status(struct rpc_task *task)
2003 task->tk_rebind_retry--; 2005 task->tk_rebind_retry--;
2004 rpc_delay(task, 3*HZ); 2006 rpc_delay(task, 3*HZ);
2005 goto retry_timeout; 2007 goto retry_timeout;
2008 case -ENOBUFS:
2009 rpc_delay(task, HZ >> 2);
2010 goto retry_timeout;
2006 case -EAGAIN: 2011 case -EAGAIN:
2007 goto retry_timeout; 2012 goto retry_timeout;
2008 case -ETIMEDOUT: 2013 case -ETIMEDOUT:
@@ -2026,7 +2031,6 @@ call_bind_status(struct rpc_task *task)
2026 case -ENETDOWN: 2031 case -ENETDOWN:
2027 case -EHOSTUNREACH: 2032 case -EHOSTUNREACH:
2028 case -ENETUNREACH: 2033 case -ENETUNREACH:
2029 case -ENOBUFS:
2030 case -EPIPE: 2034 case -EPIPE:
2031 dprintk("RPC: %5u remote rpcbind unreachable: %d\n", 2035 dprintk("RPC: %5u remote rpcbind unreachable: %d\n",
2032 task->tk_pid, task->tk_status); 2036 task->tk_pid, task->tk_status);
@@ -2043,7 +2047,9 @@ call_bind_status(struct rpc_task *task)
2043 2047
2044 rpc_call_rpcerror(task, status); 2048 rpc_call_rpcerror(task, status);
2045 return; 2049 return;
2046 2050out_next:
2051 task->tk_action = call_connect;
2052 return;
2047retry_timeout: 2053retry_timeout:
2048 task->tk_status = 0; 2054 task->tk_status = 0;
2049 task->tk_action = call_bind; 2055 task->tk_action = call_bind;
@@ -2090,6 +2096,7 @@ call_connect(struct rpc_task *task)
2090static void 2096static void
2091call_connect_status(struct rpc_task *task) 2097call_connect_status(struct rpc_task *task)
2092{ 2098{
2099 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
2093 struct rpc_clnt *clnt = task->tk_client; 2100 struct rpc_clnt *clnt = task->tk_client;
2094 int status = task->tk_status; 2101 int status = task->tk_status;
2095 2102
@@ -2099,8 +2106,17 @@ call_connect_status(struct rpc_task *task)
2099 } 2106 }
2100 2107
2101 dprint_status(task); 2108 dprint_status(task);
2102
2103 trace_rpc_connect_status(task); 2109 trace_rpc_connect_status(task);
2110
2111 if (task->tk_status == 0) {
2112 clnt->cl_stats->netreconn++;
2113 goto out_next;
2114 }
2115 if (xprt_connected(xprt)) {
2116 task->tk_status = 0;
2117 goto out_next;
2118 }
2119
2104 task->tk_status = 0; 2120 task->tk_status = 0;
2105 switch (status) { 2121 switch (status) {
2106 case -ECONNREFUSED: 2122 case -ECONNREFUSED:
@@ -2117,8 +2133,6 @@ call_connect_status(struct rpc_task *task)
2117 case -ENETDOWN: 2133 case -ENETDOWN:
2118 case -ENETUNREACH: 2134 case -ENETUNREACH:
2119 case -EHOSTUNREACH: 2135 case -EHOSTUNREACH:
2120 case -EADDRINUSE:
2121 case -ENOBUFS:
2122 case -EPIPE: 2136 case -EPIPE:
2123 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt, 2137 xprt_conditional_disconnect(task->tk_rqstp->rq_xprt,
2124 task->tk_rqstp->rq_connect_cookie); 2138 task->tk_rqstp->rq_connect_cookie);
@@ -2127,17 +2141,20 @@ call_connect_status(struct rpc_task *task)
2127 /* retry with existing socket, after a delay */ 2141 /* retry with existing socket, after a delay */
2128 rpc_delay(task, 3*HZ); 2142 rpc_delay(task, 3*HZ);
2129 /* fall through */ 2143 /* fall through */
2144 case -EADDRINUSE:
2130 case -ENOTCONN: 2145 case -ENOTCONN:
2131 case -EAGAIN: 2146 case -EAGAIN:
2132 case -ETIMEDOUT: 2147 case -ETIMEDOUT:
2133 goto out_retry; 2148 goto out_retry;
2134 case 0: 2149 case -ENOBUFS:
2135 clnt->cl_stats->netreconn++; 2150 rpc_delay(task, HZ >> 2);
2136 task->tk_action = call_transmit; 2151 goto out_retry;
2137 return;
2138 } 2152 }
2139 rpc_call_rpcerror(task, status); 2153 rpc_call_rpcerror(task, status);
2140 return; 2154 return;
2155out_next:
2156 task->tk_action = call_transmit;
2157 return;
2141out_retry: 2158out_retry:
2142 /* Check for timeouts before looping back to call_bind */ 2159 /* Check for timeouts before looping back to call_bind */
2143 task->tk_action = call_bind; 2160 task->tk_action = call_bind;
@@ -2365,7 +2382,7 @@ call_status(struct rpc_task *task)
2365 case -ECONNABORTED: 2382 case -ECONNABORTED:
2366 case -ENOTCONN: 2383 case -ENOTCONN:
2367 rpc_force_rebind(clnt); 2384 rpc_force_rebind(clnt);
2368 /* fall through */ 2385 break;
2369 case -EADDRINUSE: 2386 case -EADDRINUSE:
2370 rpc_delay(task, 3*HZ); 2387 rpc_delay(task, 3*HZ);
2371 /* fall through */ 2388 /* fall through */
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 783748dc5e6f..2e71f5455c6c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1408,13 +1408,6 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1408 status = -EBADMSG; 1408 status = -EBADMSG;
1409 goto out_dequeue; 1409 goto out_dequeue;
1410 } 1410 }
1411 if (task->tk_ops->rpc_call_prepare_transmit) {
1412 task->tk_ops->rpc_call_prepare_transmit(task,
1413 task->tk_calldata);
1414 status = task->tk_status;
1415 if (status < 0)
1416 goto out_dequeue;
1417 }
1418 if (RPC_SIGNALLED(task)) { 1411 if (RPC_SIGNALLED(task)) {
1419 status = -ERESTARTSYS; 1412 status = -ERESTARTSYS;
1420 goto out_dequeue; 1413 goto out_dequeue;
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d00913..0f1eaed1bd1b 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
75 tipc_set_node_id(net, node_id); 75 tipc_set_node_id(net, node_id);
76 } 76 }
77 tn->trial_addr = addr; 77 tn->trial_addr = addr;
78 tn->addr_trial_end = jiffies;
78 pr_info("32-bit node address hash set to %x\n", addr); 79 pr_info("32-bit node address hash set to %x\n", addr);
79} 80}
80 81
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 66d3a07bc571..c2c5c53cad22 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -106,8 +106,6 @@ struct tipc_stats {
106 * @transmitq: queue for sent, non-acked messages 106 * @transmitq: queue for sent, non-acked messages
107 * @backlogq: queue for messages waiting to be sent 107 * @backlogq: queue for messages waiting to be sent
108 * @snt_nxt: next sequence number to use for outbound messages 108 * @snt_nxt: next sequence number to use for outbound messages
109 * @prev_from: sequence number of most previous retransmission request
110 * @stale_limit: time when repeated identical retransmits must force link reset
111 * @ackers: # of peers that needs to ack each packet before it can be released 109 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast. 110 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages 111 * @rcv_nxt: next sequence number to expect for inbound messages
@@ -164,9 +162,7 @@ struct tipc_link {
164 u16 limit; 162 u16 limit;
165 } backlog[5]; 163 } backlog[5];
166 u16 snd_nxt; 164 u16 snd_nxt;
167 u16 prev_from;
168 u16 window; 165 u16 window;
169 unsigned long stale_limit;
170 166
171 /* Reception */ 167 /* Reception */
172 u16 rcv_nxt; 168 u16 rcv_nxt;
@@ -1044,47 +1040,53 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
1044 * link_retransmit_failure() - Detect repeated retransmit failures 1040 * link_retransmit_failure() - Detect repeated retransmit failures
1045 * @l: tipc link sender 1041 * @l: tipc link sender
1046 * @r: tipc link receiver (= l in case of unicast) 1042 * @r: tipc link receiver (= l in case of unicast)
1047 * @from: seqno of the 1st packet in retransmit request
1048 * @rc: returned code 1043 * @rc: returned code
1049 * 1044 *
1050 * Return: true if the repeated retransmit failures happens, otherwise 1045 * Return: true if the repeated retransmit failures happens, otherwise
1051 * false 1046 * false
1052 */ 1047 */
1053static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r, 1048static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1054 u16 from, int *rc) 1049 int *rc)
1055{ 1050{
1056 struct sk_buff *skb = skb_peek(&l->transmq); 1051 struct sk_buff *skb = skb_peek(&l->transmq);
1057 struct tipc_msg *hdr; 1052 struct tipc_msg *hdr;
1058 1053
1059 if (!skb) 1054 if (!skb)
1060 return false; 1055 return false;
1061 hdr = buf_msg(skb);
1062 1056
1063 /* Detect repeated retransmit failures on same packet */ 1057 if (!TIPC_SKB_CB(skb)->retr_cnt)
1064 if (r->prev_from != from) { 1058 return false;
1065 r->prev_from = from;
1066 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
1067 } else if (time_after(jiffies, r->stale_limit)) {
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr),
1072 msg_errcode(hdr));
1073 pr_info("sqno %u, prev: %x, src: %x\n",
1074 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1075
1076 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1077 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1078 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1079 1059
1080 if (link_is_bc_sndlink(l)) 1060 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1081 *rc = TIPC_LINK_DOWN_EVT; 1061 msecs_to_jiffies(r->tolerance)))
1062 return false;
1063
1064 hdr = buf_msg(skb);
1065 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1066 return false;
1082 1067
1068 pr_warn("Retransmission failure on link <%s>\n", l->name);
1069 link_print(l, "State of link ");
1070 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1071 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1072 pr_info("sqno %u, prev: %x, dest: %x\n",
1073 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1074 pr_info("retr_stamp %d, retr_cnt %d\n",
1075 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1076 TIPC_SKB_CB(skb)->retr_cnt);
1077
1078 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1079 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1080 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1081
1082 if (link_is_bc_sndlink(l)) {
1083 r->state = LINK_RESET;
1084 *rc = TIPC_LINK_DOWN_EVT;
1085 } else {
1083 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); 1086 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1084 return true;
1085 } 1087 }
1086 1088
1087 return false; 1089 return true;
1088} 1090}
1089 1091
1090/* tipc_link_bc_retrans() - retransmit zero or more packets 1092/* tipc_link_bc_retrans() - retransmit zero or more packets
@@ -1110,7 +1112,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1110 1112
1111 trace_tipc_link_retrans(r, from, to, &l->transmq); 1113 trace_tipc_link_retrans(r, from, to, &l->transmq);
1112 1114
1113 if (link_retransmit_failure(l, r, from, &rc)) 1115 if (link_retransmit_failure(l, r, &rc))
1114 return rc; 1116 return rc;
1115 1117
1116 skb_queue_walk(&l->transmq, skb) { 1118 skb_queue_walk(&l->transmq, skb) {
@@ -1119,11 +1121,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1119 continue; 1121 continue;
1120 if (more(msg_seqno(hdr), to)) 1122 if (more(msg_seqno(hdr), to))
1121 break; 1123 break;
1122 if (link_is_bc_sndlink(l)) { 1124
1123 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1125 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1124 continue; 1126 continue;
1125 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM; 1127 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1126 }
1127 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC); 1128 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
1128 if (!_skb) 1129 if (!_skb)
1129 return 0; 1130 return 0;
@@ -1133,6 +1134,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1133 _skb->priority = TC_PRIO_CONTROL; 1134 _skb->priority = TC_PRIO_CONTROL;
1134 __skb_queue_tail(xmitq, _skb); 1135 __skb_queue_tail(xmitq, _skb);
1135 l->stats.retransmitted++; 1136 l->stats.retransmitted++;
1137
1138 /* Increase actual retrans counter & mark first time */
1139 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1140 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1136 } 1141 }
1137 return 0; 1142 return 0;
1138} 1143}
@@ -1357,12 +1362,10 @@ static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1357 struct tipc_msg *hdr; 1362 struct tipc_msg *hdr;
1358 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1; 1363 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1359 u16 ack = l->rcv_nxt - 1; 1364 u16 ack = l->rcv_nxt - 1;
1365 bool passed = false;
1360 u16 seqno, n = 0; 1366 u16 seqno, n = 0;
1361 int rc = 0; 1367 int rc = 0;
1362 1368
1363 if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
1364 return rc;
1365
1366 skb_queue_walk_safe(&l->transmq, skb, tmp) { 1369 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1367 seqno = buf_seqno(skb); 1370 seqno = buf_seqno(skb);
1368 1371
@@ -1372,12 +1375,17 @@ next_gap_ack:
1372 __skb_unlink(skb, &l->transmq); 1375 __skb_unlink(skb, &l->transmq);
1373 kfree_skb(skb); 1376 kfree_skb(skb);
1374 } else if (less_eq(seqno, acked + gap)) { 1377 } else if (less_eq(seqno, acked + gap)) {
1375 /* retransmit skb */ 1378 /* First, check if repeated retrans failures occurs? */
1379 if (!passed && link_retransmit_failure(l, l, &rc))
1380 return rc;
1381 passed = true;
1382
1383 /* retransmit skb if unrestricted*/
1376 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr)) 1384 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1377 continue; 1385 continue;
1378 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME; 1386 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1379 1387 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
1380 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC); 1388 GFP_ATOMIC);
1381 if (!_skb) 1389 if (!_skb)
1382 continue; 1390 continue;
1383 hdr = buf_msg(_skb); 1391 hdr = buf_msg(_skb);
@@ -1386,6 +1394,10 @@ next_gap_ack:
1386 _skb->priority = TC_PRIO_CONTROL; 1394 _skb->priority = TC_PRIO_CONTROL;
1387 __skb_queue_tail(xmitq, _skb); 1395 __skb_queue_tail(xmitq, _skb);
1388 l->stats.retransmitted++; 1396 l->stats.retransmitted++;
1397
1398 /* Increase actual retrans counter & mark first time */
1399 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1400 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1389 } else { 1401 } else {
1390 /* retry with Gap ACK blocks if any */ 1402 /* retry with Gap ACK blocks if any */
1391 if (!ga || n >= ga->gack_cnt) 1403 if (!ga || n >= ga->gack_cnt)
@@ -2577,7 +2589,7 @@ int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2577 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps); 2589 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2578 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt); 2590 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2579 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt); 2591 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2580 i += scnprintf(buf + i, sz - i, " %u", l->prev_from); 2592 i += scnprintf(buf + i, sz - i, " %u", 0);
2581 i += scnprintf(buf + i, sz - i, " %u", 0); 2593 i += scnprintf(buf + i, sz - i, " %u", 0);
2582 i += scnprintf(buf + i, sz - i, " %u", l->acked); 2594 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2583 2595
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index da509f0eb9ca..d7ebc9e955f6 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -102,13 +102,15 @@ struct plist;
102#define TIPC_MEDIA_INFO_OFFSET 5 102#define TIPC_MEDIA_INFO_OFFSET 5
103 103
104struct tipc_skb_cb { 104struct tipc_skb_cb {
105 u32 bytes_read;
106 u32 orig_member;
107 struct sk_buff *tail; 105 struct sk_buff *tail;
108 unsigned long nxt_retr; 106 unsigned long nxt_retr;
109 bool validated; 107 unsigned long retr_stamp;
108 u32 bytes_read;
109 u32 orig_member;
110 u16 chain_imp; 110 u16 chain_imp;
111 u16 ackers; 111 u16 ackers;
112 u16 retr_cnt;
113 bool validated;
112}; 114};
113 115
114#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 116#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 7c0b2b778703..43922d86e510 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -373,9 +373,9 @@ static int tls_push_data(struct sock *sk,
373 struct tls_context *tls_ctx = tls_get_ctx(sk); 373 struct tls_context *tls_ctx = tls_get_ctx(sk);
374 struct tls_prot_info *prot = &tls_ctx->prot_info; 374 struct tls_prot_info *prot = &tls_ctx->prot_info;
375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); 375 struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
376 int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
377 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); 376 int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
378 struct tls_record_info *record = ctx->open_record; 377 struct tls_record_info *record = ctx->open_record;
378 int tls_push_record_flags;
379 struct page_frag *pfrag; 379 struct page_frag *pfrag;
380 size_t orig_size = size; 380 size_t orig_size = size;
381 u32 max_open_record_len; 381 u32 max_open_record_len;
@@ -390,6 +390,9 @@ static int tls_push_data(struct sock *sk,
390 if (sk->sk_err) 390 if (sk->sk_err)
391 return -sk->sk_err; 391 return -sk->sk_err;
392 392
393 flags |= MSG_SENDPAGE_DECRYPTED;
394 tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
395
393 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 396 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
394 if (tls_is_partially_sent_record(tls_ctx)) { 397 if (tls_is_partially_sent_record(tls_ctx)) {
395 rc = tls_push_partial_record(sk, tls_ctx, flags); 398 rc = tls_push_partial_record(sk, tls_ctx, flags);
@@ -576,7 +579,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
576 gfp_t sk_allocation = sk->sk_allocation; 579 gfp_t sk_allocation = sk->sk_allocation;
577 580
578 sk->sk_allocation = GFP_ATOMIC; 581 sk->sk_allocation = GFP_ATOMIC;
579 tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL); 582 tls_push_partial_record(sk, ctx,
583 MSG_DONTWAIT | MSG_NOSIGNAL |
584 MSG_SENDPAGE_DECRYPTED);
580 sk->sk_allocation = sk_allocation; 585 sk->sk_allocation = sk_allocation;
581 } 586 }
582} 587}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 9cbbae606ced..43252a801c3f 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -308,6 +308,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
308 if (free_ctx) 308 if (free_ctx)
309 icsk->icsk_ulp_data = NULL; 309 icsk->icsk_ulp_data = NULL;
310 sk->sk_prot = ctx->sk_proto; 310 sk->sk_prot = ctx->sk_proto;
311 if (sk->sk_write_space == tls_write_space)
312 sk->sk_write_space = ctx->sk_write_space;
311 write_unlock_bh(&sk->sk_callback_lock); 313 write_unlock_bh(&sk->sk_callback_lock);
312 release_sock(sk); 314 release_sock(sk);
313 if (ctx->tx_conf == TLS_SW) 315 if (ctx->tx_conf == TLS_SW)
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4831ad745f91..327479ce69f5 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2788,7 +2788,7 @@ static void reg_process_pending_hints(void)
2788 2788
2789 /* When last_request->processed becomes true this will be rescheduled */ 2789 /* When last_request->processed becomes true this will be rescheduled */
2790 if (lr && !lr->processed) { 2790 if (lr && !lr->processed) {
2791 reg_process_hint(lr); 2791 pr_debug("Pending regulatory request, waiting for it to be processed...\n");
2792 return; 2792 return;
2793 } 2793 }
2794 2794
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d0e35b7b9e35..e74837824cea 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -233,25 +233,30 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
233 233
234 switch (params->cipher) { 234 switch (params->cipher) {
235 case WLAN_CIPHER_SUITE_TKIP: 235 case WLAN_CIPHER_SUITE_TKIP:
236 /* Extended Key ID can only be used with CCMP/GCMP ciphers */
237 if ((pairwise && key_idx) ||
238 params->mode != NL80211_KEY_RX_TX)
239 return -EINVAL;
240 break;
236 case WLAN_CIPHER_SUITE_CCMP: 241 case WLAN_CIPHER_SUITE_CCMP:
237 case WLAN_CIPHER_SUITE_CCMP_256: 242 case WLAN_CIPHER_SUITE_CCMP_256:
238 case WLAN_CIPHER_SUITE_GCMP: 243 case WLAN_CIPHER_SUITE_GCMP:
239 case WLAN_CIPHER_SUITE_GCMP_256: 244 case WLAN_CIPHER_SUITE_GCMP_256:
240 /* IEEE802.11-2016 allows only 0 and - when using Extended Key 245 /* IEEE802.11-2016 allows only 0 and - when supporting
241 * ID - 1 as index for pairwise keys. 246 * Extended Key ID - 1 as index for pairwise keys.
242 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when 247 * @NL80211_KEY_NO_TX is only allowed for pairwise keys when
243 * the driver supports Extended Key ID. 248 * the driver supports Extended Key ID.
244 * @NL80211_KEY_SET_TX can't be set when installing and 249 * @NL80211_KEY_SET_TX can't be set when installing and
245 * validating a key. 250 * validating a key.
246 */ 251 */
247 if (params->mode == NL80211_KEY_NO_TX) { 252 if ((params->mode == NL80211_KEY_NO_TX && !pairwise) ||
248 if (!wiphy_ext_feature_isset(&rdev->wiphy, 253 params->mode == NL80211_KEY_SET_TX)
249 NL80211_EXT_FEATURE_EXT_KEY_ID)) 254 return -EINVAL;
250 return -EINVAL; 255 if (wiphy_ext_feature_isset(&rdev->wiphy,
251 else if (!pairwise || key_idx < 0 || key_idx > 1) 256 NL80211_EXT_FEATURE_EXT_KEY_ID)) {
257 if (pairwise && (key_idx < 0 || key_idx > 1))
252 return -EINVAL; 258 return -EINVAL;
253 } else if ((pairwise && key_idx) || 259 } else if (pairwise && key_idx) {
254 params->mode == NL80211_KEY_SET_TX) {
255 return -EINVAL; 260 return -EINVAL;
256 } 261 }
257 break; 262 break;
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 83de74ca729a..688aac7a6943 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -365,7 +365,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); 365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
366 if (!umem->pages) { 366 if (!umem->pages) {
367 err = -ENOMEM; 367 err = -ENOMEM;
368 goto out_account; 368 goto out_pin;
369 } 369 }
370 370
371 for (i = 0; i < umem->npgs; i++) 371 for (i = 0; i < umem->npgs; i++)
@@ -373,6 +373,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
373 373
374 return 0; 374 return 0;
375 375
376out_pin:
377 xdp_umem_unpin_pages(umem);
376out_account: 378out_account:
377 xdp_umem_unaccount_pages(umem); 379 xdp_umem_unaccount_pages(umem);
378 return err; 380 return err;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8ca637a72697..ec94f5795ea4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3269,7 +3269,7 @@ decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3269 struct flowi4 *fl4 = &fl->u.ip4; 3269 struct flowi4 *fl4 = &fl->u.ip4;
3270 int oif = 0; 3270 int oif = 0;
3271 3271
3272 if (skb_dst(skb)) 3272 if (skb_dst(skb) && skb_dst(skb)->dev)
3273 oif = skb_dst(skb)->dev->ifindex; 3273 oif = skb_dst(skb)->dev->ifindex;
3274 3274
3275 memset(fl4, 0, sizeof(struct flowi4)); 3275 memset(fl4, 0, sizeof(struct flowi4));
@@ -3387,7 +3387,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3387 3387
3388 nexthdr = nh[nhoff]; 3388 nexthdr = nh[nhoff];
3389 3389
3390 if (skb_dst(skb)) 3390 if (skb_dst(skb) && skb_dst(skb)->dev)
3391 oif = skb_dst(skb)->dev->ifindex; 3391 oif = skb_dst(skb)->dev->ifindex;
3392 3392
3393 memset(fl6, 0, sizeof(struct flowi6)); 3393 memset(fl6, 0, sizeof(struct flowi6));
diff --git a/samples/auxdisplay/cfag12864b-example.c b/samples/auxdisplay/cfag12864b-example.c
index 85571e90191f..bfeab44f81d0 100644
--- a/samples/auxdisplay/cfag12864b-example.c
+++ b/samples/auxdisplay/cfag12864b-example.c
@@ -245,7 +245,7 @@ int main(int argc, char *argv[])
245 245
246 if (argc != 2) { 246 if (argc != 2) {
247 printf( 247 printf(
248 "Sintax: %s fbdev\n" 248 "Syntax: %s fbdev\n"
249 "Usually: /dev/fb0, /dev/fb1...\n", argv[0]); 249 "Usually: /dev/fb0, /dev/fb1...\n", argv[0]);
250 return -1; 250 return -1;
251 } 251 }
diff --git a/scripts/coccinelle/api/atomic_as_refcounter.cocci b/scripts/coccinelle/api/atomic_as_refcounter.cocci
index 988120e0fd67..0f78d94abc35 100644
--- a/scripts/coccinelle/api/atomic_as_refcounter.cocci
+++ b/scripts/coccinelle/api/atomic_as_refcounter.cocci
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1// Check if refcount_t type and API should be used 2// Check if refcount_t type and API should be used
2// instead of atomic_t type when dealing with refcounters 3// instead of atomic_t type when dealing with refcounters
3// 4//
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 7325f382dbf4..957b9e3e1492 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,7 +595,7 @@ struct key *request_key_and_link(struct key_type *type,
595 595
596 key = check_cached_key(&ctx); 596 key = check_cached_key(&ctx);
597 if (key) 597 if (key)
598 return key; 598 goto error_free;
599 599
600 /* search all the process keyrings for a key */ 600 /* search all the process keyrings for a key */
601 rcu_read_lock(); 601 rcu_read_lock();
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 9a94672e7adc..ade699131065 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1228,24 +1228,11 @@ hashalg_fail:
1228 1228
1229static int __init init_digests(void) 1229static int __init init_digests(void)
1230{ 1230{
1231 u8 digest[TPM_MAX_DIGEST_SIZE];
1232 int ret;
1233 int i;
1234
1235 ret = tpm_get_random(chip, digest, TPM_MAX_DIGEST_SIZE);
1236 if (ret < 0)
1237 return ret;
1238 if (ret < TPM_MAX_DIGEST_SIZE)
1239 return -EFAULT;
1240
1241 digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests), 1231 digests = kcalloc(chip->nr_allocated_banks, sizeof(*digests),
1242 GFP_KERNEL); 1232 GFP_KERNEL);
1243 if (!digests) 1233 if (!digests)
1244 return -ENOMEM; 1234 return -ENOMEM;
1245 1235
1246 for (i = 0; i < chip->nr_allocated_banks; i++)
1247 memcpy(digests[i].digest, digest, TPM_MAX_DIGEST_SIZE);
1248
1249 return 0; 1236 return 0;
1250} 1237}
1251 1238
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 7737b2670064..6d9592f0ae1d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1835,8 +1835,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
1835 if (cptr->type == USER_CLIENT) { 1835 if (cptr->type == USER_CLIENT) {
1836 info->input_pool = cptr->data.user.fifo_pool_size; 1836 info->input_pool = cptr->data.user.fifo_pool_size;
1837 info->input_free = info->input_pool; 1837 info->input_free = info->input_pool;
1838 if (cptr->data.user.fifo) 1838 info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
1839 info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
1840 } else { 1839 } else {
1841 info->input_pool = 0; 1840 info->input_pool = 0;
1842 info->input_free = 0; 1841 info->input_free = 0;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index ea69261f269a..eaaa8b5830bb 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -263,3 +263,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
263 263
264 return 0; 264 return 0;
265} 265}
266
267/* get the number of unused cells safely */
268int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
269{
270 unsigned long flags;
271 int cells;
272
273 if (!f)
274 return 0;
275
276 snd_use_lock_use(&f->use_lock);
277 spin_lock_irqsave(&f->lock, flags);
278 cells = snd_seq_unused_cells(f->pool);
279 spin_unlock_irqrestore(&f->lock, flags);
280 snd_use_lock_free(&f->use_lock);
281 return cells;
282}
diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
index edc68743943d..b56a7b897c9c 100644
--- a/sound/core/seq/seq_fifo.h
+++ b/sound/core/seq/seq_fifo.h
@@ -53,5 +53,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
53/* resize pool in fifo */ 53/* resize pool in fifo */
54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize); 54int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
55 55
56/* get the number of unused cells safely */
57int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
56 58
57#endif 59#endif
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 9ea39348cdf5..7c6d1c277d4d 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -248,7 +248,7 @@ static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
248 unsigned int channels = params_channels(hw_params); 248 unsigned int channels = params_channels(hw_params);
249 249
250 mutex_lock(&oxfw->mutex); 250 mutex_lock(&oxfw->mutex);
251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->tx_stream, 251 err = snd_oxfw_stream_reserve_duplex(oxfw, &oxfw->rx_stream,
252 rate, channels); 252 rate, channels);
253 if (err >= 0) 253 if (err >= 0)
254 ++oxfw->substreams_count; 254 ++oxfw->substreams_count;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 485edaba0037..5bf24fb819d2 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6051,6 +6051,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
6051} 6051}
6052EXPORT_SYMBOL_GPL(snd_hda_gen_free); 6052EXPORT_SYMBOL_GPL(snd_hda_gen_free);
6053 6053
6054/**
6055 * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
6056 * @codec: the HDA codec
6057 *
6058 * This can be put as patch_ops reboot_notify function.
6059 */
6060void snd_hda_gen_reboot_notify(struct hda_codec *codec)
6061{
6062 /* Make the codec enter D3 to avoid spurious noises from the internal
6063 * speaker during (and after) reboot
6064 */
6065 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
6066 snd_hda_codec_write(codec, codec->core.afg, 0,
6067 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
6068 msleep(10);
6069}
6070EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
6071
6054#ifdef CONFIG_PM 6072#ifdef CONFIG_PM
6055/** 6073/**
6056 * snd_hda_gen_check_power_status - check the loopback power save state 6074 * snd_hda_gen_check_power_status - check the loopback power save state
@@ -6078,6 +6096,7 @@ static const struct hda_codec_ops generic_patch_ops = {
6078 .init = snd_hda_gen_init, 6096 .init = snd_hda_gen_init,
6079 .free = snd_hda_gen_free, 6097 .free = snd_hda_gen_free,
6080 .unsol_event = snd_hda_jack_unsol_event, 6098 .unsol_event = snd_hda_jack_unsol_event,
6099 .reboot_notify = snd_hda_gen_reboot_notify,
6081#ifdef CONFIG_PM 6100#ifdef CONFIG_PM
6082 .check_power_status = snd_hda_gen_check_power_status, 6101 .check_power_status = snd_hda_gen_check_power_status,
6083#endif 6102#endif
@@ -6100,7 +6119,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
6100 6119
6101 err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0); 6120 err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
6102 if (err < 0) 6121 if (err < 0)
6103 return err; 6122 goto error;
6104 6123
6105 err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg); 6124 err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
6106 if (err < 0) 6125 if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 35a670a71c42..5f199dcb0d18 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -332,6 +332,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
332 struct auto_pin_cfg *cfg); 332 struct auto_pin_cfg *cfg);
333int snd_hda_gen_build_controls(struct hda_codec *codec); 333int snd_hda_gen_build_controls(struct hda_codec *codec);
334int snd_hda_gen_build_pcms(struct hda_codec *codec); 334int snd_hda_gen_build_pcms(struct hda_codec *codec);
335void snd_hda_gen_reboot_notify(struct hda_codec *codec);
335 336
336/* standard jack event callbacks */ 337/* standard jack event callbacks */
337void snd_hda_gen_hp_automute(struct hda_codec *codec, 338void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a6d8c0d77b84..99fc0917339b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2508,6 +2508,9 @@ static const struct pci_device_id azx_ids[] = {
2508 /* AMD, X370 & co */ 2508 /* AMD, X370 & co */
2509 { PCI_DEVICE(0x1022, 0x1457), 2509 { PCI_DEVICE(0x1022, 0x1457),
2510 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB }, 2510 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2511 /* AMD, X570 & co */
2512 { PCI_DEVICE(0x1022, 0x1487),
2513 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
2511 /* AMD Stoney */ 2514 /* AMD Stoney */
2512 { PCI_DEVICE(0x1022, 0x157a), 2515 { PCI_DEVICE(0x1022, 0x157a),
2513 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB | 2516 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0d51823d7270..6d1fb7c11f17 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1175,6 +1175,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), 1175 SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), 1176 SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), 1177 SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0027, "Sound Blaster Z", QUIRK_SBZ),
1178 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ), 1179 SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
1179 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), 1180 SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
1180 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), 1181 SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f299f137eaea..968d3caab6ac 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -163,23 +163,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
163{ 163{
164 struct conexant_spec *spec = codec->spec; 164 struct conexant_spec *spec = codec->spec;
165 165
166 switch (codec->core.vendor_id) {
167 case 0x14f12008: /* CX8200 */
168 case 0x14f150f2: /* CX20722 */
169 case 0x14f150f4: /* CX20724 */
170 break;
171 default:
172 return;
173 }
174
175 /* Turn the problematic codec into D3 to avoid spurious noises 166 /* Turn the problematic codec into D3 to avoid spurious noises
176 from the internal speaker during (and after) reboot */ 167 from the internal speaker during (and after) reboot */
177 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); 168 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
178 169 snd_hda_gen_reboot_notify(codec);
179 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
180 snd_hda_codec_write(codec, codec->core.afg, 0,
181 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
182 msleep(10);
183} 170}
184 171
185static void cx_auto_free(struct hda_codec *codec) 172static void cx_auto_free(struct hda_codec *codec)
@@ -624,18 +611,20 @@ static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
624 611
625/* update LED status via GPIO */ 612/* update LED status via GPIO */
626static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask, 613static void cxt_update_gpio_led(struct hda_codec *codec, unsigned int mask,
627 bool enabled) 614 bool led_on)
628{ 615{
629 struct conexant_spec *spec = codec->spec; 616 struct conexant_spec *spec = codec->spec;
630 unsigned int oldval = spec->gpio_led; 617 unsigned int oldval = spec->gpio_led;
631 618
632 if (spec->mute_led_polarity) 619 if (spec->mute_led_polarity)
633 enabled = !enabled; 620 led_on = !led_on;
634 621
635 if (enabled) 622 if (led_on)
636 spec->gpio_led &= ~mask;
637 else
638 spec->gpio_led |= mask; 623 spec->gpio_led |= mask;
624 else
625 spec->gpio_led &= ~mask;
626 codec_dbg(codec, "mask:%d enabled:%d gpio_led:%d\n",
627 mask, led_on, spec->gpio_led);
639 if (spec->gpio_led != oldval) 628 if (spec->gpio_led != oldval)
640 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 629 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
641 spec->gpio_led); 630 spec->gpio_led);
@@ -646,8 +635,8 @@ static void cxt_fixup_gpio_mute_hook(void *private_data, int enabled)
646{ 635{
647 struct hda_codec *codec = private_data; 636 struct hda_codec *codec = private_data;
648 struct conexant_spec *spec = codec->spec; 637 struct conexant_spec *spec = codec->spec;
649 638 /* muted -> LED on */
650 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, enabled); 639 cxt_update_gpio_led(codec, spec->gpio_mute_led_mask, !enabled);
651} 640}
652 641
653/* turn on/off mic-mute LED via GPIO per capture hook */ 642/* turn on/off mic-mute LED via GPIO per capture hook */
@@ -669,7 +658,6 @@ static void cxt_fixup_mute_led_gpio(struct hda_codec *codec,
669 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 }, 658 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x03 },
670 {} 659 {}
671 }; 660 };
672 codec_info(codec, "action: %d gpio_led: %d\n", action, spec->gpio_led);
673 661
674 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 662 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
675 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook; 663 spec->gen.vmaster_mute.hook = cxt_fixup_gpio_mute_hook;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index de224cbea7a0..e333b3e30e31 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -869,15 +869,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
869 alc_shutup(codec); 869 alc_shutup(codec);
870} 870}
871 871
872/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
873static void alc_d3_at_reboot(struct hda_codec *codec)
874{
875 snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
876 snd_hda_codec_write(codec, codec->core.afg, 0,
877 AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
878 msleep(10);
879}
880
881#define alc_free snd_hda_gen_free 872#define alc_free snd_hda_gen_free
882 873
883#ifdef CONFIG_PM 874#ifdef CONFIG_PM
@@ -5152,7 +5143,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
5152 struct alc_spec *spec = codec->spec; 5143 struct alc_spec *spec = codec->spec;
5153 5144
5154 if (action == HDA_FIXUP_ACT_PRE_PROBE) { 5145 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
5155 spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */ 5146 spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
5156 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; 5147 spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
5157 codec->power_save_node = 0; /* avoid click noises */ 5148 codec->power_save_node = 0; /* avoid click noises */
5158 snd_hda_apply_pincfgs(codec, pincfgs); 5149 snd_hda_apply_pincfgs(codec, pincfgs);
@@ -6987,6 +6978,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6987 SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6978 SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
6988 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6979 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
6989 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6980 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6981 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6990 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 6982 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
6991 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 6983 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
6992 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 6984 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index 2c03e0f6bf72..f70211e6b174 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -550,6 +550,15 @@ int line6_init_pcm(struct usb_line6 *line6,
550 line6pcm->volume_monitor = 255; 550 line6pcm->volume_monitor = 255;
551 line6pcm->line6 = line6; 551 line6pcm->line6 = line6;
552 552
553 spin_lock_init(&line6pcm->out.lock);
554 spin_lock_init(&line6pcm->in.lock);
555 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
556
557 line6->line6pcm = line6pcm;
558
559 pcm->private_data = line6pcm;
560 pcm->private_free = line6_cleanup_pcm;
561
553 line6pcm->max_packet_size_in = 562 line6pcm->max_packet_size_in =
554 usb_maxpacket(line6->usbdev, 563 usb_maxpacket(line6->usbdev,
555 usb_rcvisocpipe(line6->usbdev, ep_read), 0); 564 usb_rcvisocpipe(line6->usbdev, ep_read), 0);
@@ -562,15 +571,6 @@ int line6_init_pcm(struct usb_line6 *line6,
562 return -EINVAL; 571 return -EINVAL;
563 } 572 }
564 573
565 spin_lock_init(&line6pcm->out.lock);
566 spin_lock_init(&line6pcm->in.lock);
567 line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
568
569 line6->line6pcm = line6pcm;
570
571 pcm->private_data = line6pcm;
572 pcm->private_free = line6_cleanup_pcm;
573
574 err = line6_create_audio_out_urbs(line6pcm); 574 err = line6_create_audio_out_urbs(line6pcm);
575 if (err < 0) 575 if (err < 0)
576 return err; 576 return err;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 7498b5191b68..eceab19766db 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -68,6 +68,7 @@ struct mixer_build {
68 unsigned char *buffer; 68 unsigned char *buffer;
69 unsigned int buflen; 69 unsigned int buflen;
70 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS); 70 DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
71 DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
71 struct usb_audio_term oterm; 72 struct usb_audio_term oterm;
72 const struct usbmix_name_map *map; 73 const struct usbmix_name_map *map;
73 const struct usbmix_selector_map *selector_map; 74 const struct usbmix_selector_map *selector_map;
@@ -738,12 +739,13 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
738 struct uac_mixer_unit_descriptor *desc) 739 struct uac_mixer_unit_descriptor *desc)
739{ 740{
740 int mu_channels; 741 int mu_channels;
741 void *c;
742 742
743 if (desc->bLength < sizeof(*desc)) 743 if (desc->bLength < sizeof(*desc))
744 return -EINVAL; 744 return -EINVAL;
745 if (!desc->bNrInPins) 745 if (!desc->bNrInPins)
746 return -EINVAL; 746 return -EINVAL;
747 if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
748 return -EINVAL;
747 749
748 switch (state->mixer->protocol) { 750 switch (state->mixer->protocol) {
749 case UAC_VERSION_1: 751 case UAC_VERSION_1:
@@ -759,13 +761,6 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
759 break; 761 break;
760 } 762 }
761 763
762 if (!mu_channels)
763 return 0;
764
765 c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
766 if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
767 return 0; /* no bmControls -> skip */
768
769 return mu_channels; 764 return mu_channels;
770} 765}
771 766
@@ -773,16 +768,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
773 * parse the source unit recursively until it reaches to a terminal 768 * parse the source unit recursively until it reaches to a terminal
774 * or a branched unit. 769 * or a branched unit.
775 */ 770 */
776static int check_input_term(struct mixer_build *state, int id, 771static int __check_input_term(struct mixer_build *state, int id,
777 struct usb_audio_term *term) 772 struct usb_audio_term *term)
778{ 773{
779 int protocol = state->mixer->protocol; 774 int protocol = state->mixer->protocol;
780 int err; 775 int err;
781 void *p1; 776 void *p1;
777 unsigned char *hdr;
782 778
783 memset(term, 0, sizeof(*term)); 779 memset(term, 0, sizeof(*term));
784 while ((p1 = find_audio_control_unit(state, id)) != NULL) { 780 for (;;) {
785 unsigned char *hdr = p1; 781 /* a loop in the terminal chain? */
782 if (test_and_set_bit(id, state->termbitmap))
783 return -EINVAL;
784
785 p1 = find_audio_control_unit(state, id);
786 if (!p1)
787 break;
788
789 hdr = p1;
786 term->id = id; 790 term->id = id;
787 791
788 if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) { 792 if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -800,7 +804,7 @@ static int check_input_term(struct mixer_build *state, int id,
800 804
801 /* call recursively to verify that the 805 /* call recursively to verify that the
802 * referenced clock entity is valid */ 806 * referenced clock entity is valid */
803 err = check_input_term(state, d->bCSourceID, term); 807 err = __check_input_term(state, d->bCSourceID, term);
804 if (err < 0) 808 if (err < 0)
805 return err; 809 return err;
806 810
@@ -834,7 +838,7 @@ static int check_input_term(struct mixer_build *state, int id,
834 case UAC2_CLOCK_SELECTOR: { 838 case UAC2_CLOCK_SELECTOR: {
835 struct uac_selector_unit_descriptor *d = p1; 839 struct uac_selector_unit_descriptor *d = p1;
836 /* call recursively to retrieve the channel info */ 840 /* call recursively to retrieve the channel info */
837 err = check_input_term(state, d->baSourceID[0], term); 841 err = __check_input_term(state, d->baSourceID[0], term);
838 if (err < 0) 842 if (err < 0)
839 return err; 843 return err;
840 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ 844 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -897,7 +901,7 @@ static int check_input_term(struct mixer_build *state, int id,
897 901
898 /* call recursively to verify that the 902 /* call recursively to verify that the
899 * referenced clock entity is valid */ 903 * referenced clock entity is valid */
900 err = check_input_term(state, d->bCSourceID, term); 904 err = __check_input_term(state, d->bCSourceID, term);
901 if (err < 0) 905 if (err < 0)
902 return err; 906 return err;
903 907
@@ -948,7 +952,7 @@ static int check_input_term(struct mixer_build *state, int id,
948 case UAC3_CLOCK_SELECTOR: { 952 case UAC3_CLOCK_SELECTOR: {
949 struct uac_selector_unit_descriptor *d = p1; 953 struct uac_selector_unit_descriptor *d = p1;
950 /* call recursively to retrieve the channel info */ 954 /* call recursively to retrieve the channel info */
951 err = check_input_term(state, d->baSourceID[0], term); 955 err = __check_input_term(state, d->baSourceID[0], term);
952 if (err < 0) 956 if (err < 0)
953 return err; 957 return err;
954 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */ 958 term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -964,7 +968,7 @@ static int check_input_term(struct mixer_build *state, int id,
964 return -EINVAL; 968 return -EINVAL;
965 969
966 /* call recursively to retrieve the channel info */ 970 /* call recursively to retrieve the channel info */
967 err = check_input_term(state, d->baSourceID[0], term); 971 err = __check_input_term(state, d->baSourceID[0], term);
968 if (err < 0) 972 if (err < 0)
969 return err; 973 return err;
970 974
@@ -982,6 +986,15 @@ static int check_input_term(struct mixer_build *state, int id,
982 return -ENODEV; 986 return -ENODEV;
983} 987}
984 988
989
990static int check_input_term(struct mixer_build *state, int id,
991 struct usb_audio_term *term)
992{
993 memset(term, 0, sizeof(*term));
994 memset(state->termbitmap, 0, sizeof(state->termbitmap));
995 return __check_input_term(state, id, term);
996}
997
985/* 998/*
986 * Feature Unit 999 * Feature Unit
987 */ 1000 */
@@ -1988,6 +2001,31 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
1988 * Mixer Unit 2001 * Mixer Unit
1989 */ 2002 */
1990 2003
2004/* check whether the given in/out overflows bmMixerControls matrix */
2005static bool mixer_bitmap_overflow(struct uac_mixer_unit_descriptor *desc,
2006 int protocol, int num_ins, int num_outs)
2007{
2008 u8 *hdr = (u8 *)desc;
2009 u8 *c = uac_mixer_unit_bmControls(desc, protocol);
2010 size_t rest; /* remaining bytes after bmMixerControls */
2011
2012 switch (protocol) {
2013 case UAC_VERSION_1:
2014 default:
2015 rest = 1; /* iMixer */
2016 break;
2017 case UAC_VERSION_2:
2018 rest = 2; /* bmControls + iMixer */
2019 break;
2020 case UAC_VERSION_3:
2021 rest = 6; /* bmControls + wMixerDescrStr */
2022 break;
2023 }
2024
2025 /* overflow? */
2026 return c + (num_ins * num_outs + 7) / 8 + rest > hdr + hdr[0];
2027}
2028
1991/* 2029/*
1992 * build a mixer unit control 2030 * build a mixer unit control
1993 * 2031 *
@@ -2116,6 +2154,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2116 if (err < 0) 2154 if (err < 0)
2117 return err; 2155 return err;
2118 num_ins += iterm.channels; 2156 num_ins += iterm.channels;
2157 if (mixer_bitmap_overflow(desc, state->mixer->protocol,
2158 num_ins, num_outs))
2159 break;
2119 for (; ich < num_ins; ich++) { 2160 for (; ich < num_ins; ich++) {
2120 int och, ich_has_controls = 0; 2161 int och, ich_has_controls = 0;
2121 2162
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 199fa157a411..27dcb3743690 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1155,17 +1155,17 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
1155{ 1155{
1156 struct usb_mixer_interface *mixer; 1156 struct usb_mixer_interface *mixer;
1157 struct usb_mixer_elem_info *cval; 1157 struct usb_mixer_elem_info *cval;
1158 int unitid = 12; /* SamleRate ExtensionUnit ID */ 1158 int unitid = 12; /* SampleRate ExtensionUnit ID */
1159 1159
1160 list_for_each_entry(mixer, &chip->mixer_list, list) { 1160 list_for_each_entry(mixer, &chip->mixer_list, list) {
1161 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]); 1161 if (mixer->id_elems[unitid]) {
1162 if (cval) { 1162 cval = mixer_elem_list_to_info(mixer->id_elems[unitid]);
1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, 1163 snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR,
1164 cval->control << 8, 1164 cval->control << 8,
1165 samplerate_id); 1165 samplerate_id);
1166 snd_usb_mixer_notify_id(mixer, unitid); 1166 snd_usb_mixer_notify_id(mixer, unitid);
1167 break;
1167 } 1168 }
1168 break;
1169 } 1169 }
1170} 1170}
1171 1171
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 75b96929f76c..e4bbf79de956 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -339,6 +339,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
339 ep = 0x81; 339 ep = 0x81;
340 ifnum = 2; 340 ifnum = 2;
341 goto add_sync_ep_from_ifnum; 341 goto add_sync_ep_from_ifnum;
342 case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */
342 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ 343 case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */
343 ep = 0x81; 344 ep = 0x81;
344 ifnum = 1; 345 ifnum = 1;
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 5215e0870bcb..6a71324be628 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -204,7 +204,11 @@ int do_pin_fd(int fd, const char *name)
204 if (err) 204 if (err)
205 return err; 205 return err;
206 206
207 return bpf_obj_pin(fd, name); 207 err = bpf_obj_pin(fd, name);
208 if (err)
209 p_err("can't pin the object (%s): %s", name, strerror(errno));
210
211 return err;
208} 212}
209 213
210int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32)) 214int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
@@ -237,7 +241,7 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
237 241
238 fd = get_fd_by_id(id); 242 fd = get_fd_by_id(id);
239 if (fd < 0) { 243 if (fd < 0) {
240 p_err("can't get prog by id (%u): %s", id, strerror(errno)); 244 p_err("can't open object by id (%u): %s", id, strerror(errno));
241 return -1; 245 return -1;
242 } 246 }
243 247
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 66f04a4846a5..43fdbbfe41bb 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -363,7 +363,9 @@ static int do_show(int argc, char **argv)
363 if (fd < 0) 363 if (fd < 0)
364 return -1; 364 return -1;
365 365
366 return show_prog(fd); 366 err = show_prog(fd);
367 close(fd);
368 return err;
367 } 369 }
368 370
369 if (argc) 371 if (argc)
diff --git a/tools/hv/hv_get_dhcp_info.sh b/tools/hv/hv_get_dhcp_info.sh
index c38686c44656..2f2a3c7df3de 100755
--- a/tools/hv/hv_get_dhcp_info.sh
+++ b/tools/hv/hv_get_dhcp_info.sh
@@ -13,7 +13,7 @@
13# the script prints the string "Disabled" to stdout. 13# the script prints the string "Disabled" to stdout.
14# 14#
15# Each Distro is expected to implement this script in a distro specific 15# Each Distro is expected to implement this script in a distro specific
16# fashion. For instance on Distros that ship with Network Manager enabled, 16# fashion. For instance, on Distros that ship with Network Manager enabled,
17# this script can be based on the Network Manager APIs for retrieving DHCP 17# this script can be based on the Network Manager APIs for retrieving DHCP
18# information. 18# information.
19 19
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index d7e06fe0270e..e9ef4ca6a655 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -700,7 +700,7 @@ static void kvp_get_ipconfig_info(char *if_name,
700 700
701 701
702 /* 702 /*
703 * Gather the DNS state. 703 * Gather the DNS state.
704 * Since there is no standard way to get this information 704 * Since there is no standard way to get this information
705 * across various distributions of interest; we just invoke 705 * across various distributions of interest; we just invoke
706 * an external script that needs to be ported across distros 706 * an external script that needs to be ported across distros
@@ -809,7 +809,7 @@ kvp_get_ip_info(int family, char *if_name, int op,
809 int sn_offset = 0; 809 int sn_offset = 0;
810 int error = 0; 810 int error = 0;
811 char *buffer; 811 char *buffer;
812 struct hv_kvp_ipaddr_value *ip_buffer; 812 struct hv_kvp_ipaddr_value *ip_buffer = NULL;
813 char cidr_mask[5]; /* /xyz */ 813 char cidr_mask[5]; /* /xyz */
814 int weight; 814 int weight;
815 int i; 815 int i;
@@ -1051,7 +1051,7 @@ static int parse_ip_val_buffer(char *in_buf, int *offset,
1051 char *start; 1051 char *start;
1052 1052
1053 /* 1053 /*
1054 * in_buf has sequence of characters that are seperated by 1054 * in_buf has sequence of characters that are separated by
1055 * the character ';'. The last sequence does not have the 1055 * the character ';'. The last sequence does not have the
1056 * terminating ";" character. 1056 * terminating ";" character.
1057 */ 1057 */
@@ -1386,6 +1386,8 @@ int main(int argc, char *argv[])
1386 daemonize = 0; 1386 daemonize = 0;
1387 break; 1387 break;
1388 case 'h': 1388 case 'h':
1389 print_usage(argv);
1390 exit(0);
1389 default: 1391 default:
1390 print_usage(argv); 1392 print_usage(argv);
1391 exit(EXIT_FAILURE); 1393 exit(EXIT_FAILURE);
@@ -1490,7 +1492,7 @@ int main(int argc, char *argv[])
1490 case KVP_OP_GET_IP_INFO: 1492 case KVP_OP_GET_IP_INFO:
1491 kvp_ip_val = &hv_msg->body.kvp_ip_val; 1493 kvp_ip_val = &hv_msg->body.kvp_ip_val;
1492 1494
1493 error = kvp_mac_to_ip(kvp_ip_val); 1495 error = kvp_mac_to_ip(kvp_ip_val);
1494 1496
1495 if (error) 1497 if (error)
1496 hv_msg->error = error; 1498 hv_msg->error = error;
diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
index 7ed9f85ef908..d10fe35b7f25 100755
--- a/tools/hv/hv_set_ifconfig.sh
+++ b/tools/hv/hv_set_ifconfig.sh
@@ -12,7 +12,7 @@
12# be used to configure the interface. 12# be used to configure the interface.
13# 13#
14# Each Distro is expected to implement this script in a distro specific 14# Each Distro is expected to implement this script in a distro specific
15# fashion. For instance on Distros that ship with Network Manager enabled, 15# fashion. For instance, on Distros that ship with Network Manager enabled,
16# this script can be based on the Network Manager APIs for configuring the 16# this script can be based on the Network Manager APIs for configuring the
17# interface. 17# interface.
18# 18#
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index efe1e34dd91b..92902a88f671 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -42,7 +42,7 @@ static int vss_do_freeze(char *dir, unsigned int cmd)
42 * If a partition is mounted more than once, only the first 42 * If a partition is mounted more than once, only the first
43 * FREEZE/THAW can succeed and the later ones will get 43 * FREEZE/THAW can succeed and the later ones will get
44 * EBUSY/EINVAL respectively: there could be 2 cases: 44 * EBUSY/EINVAL respectively: there could be 2 cases:
45 * 1) a user may mount the same partition to differnt directories 45 * 1) a user may mount the same partition to different directories
46 * by mistake or on purpose; 46 * by mistake or on purpose;
47 * 2) The subvolume of btrfs appears to have the same partition 47 * 2) The subvolume of btrfs appears to have the same partition
48 * mounted more than once. 48 * mounted more than once.
@@ -218,6 +218,8 @@ int main(int argc, char *argv[])
218 daemonize = 0; 218 daemonize = 0;
219 break; 219 break;
220 case 'h': 220 case 'h':
221 print_usage(argv);
222 exit(0);
221 default: 223 default:
222 print_usage(argv); 224 print_usage(argv);
223 exit(EXIT_FAILURE); 225 exit(EXIT_FAILURE);
diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus
index 55e7374bade0..099f2c44dbed 100644
--- a/tools/hv/lsvmbus
+++ b/tools/hv/lsvmbus
@@ -4,10 +4,10 @@
4import os 4import os
5from optparse import OptionParser 5from optparse import OptionParser
6 6
7help_msg = "print verbose messages. Try -vv, -vvv for more verbose messages"
7parser = OptionParser() 8parser = OptionParser()
8parser.add_option("-v", "--verbose", dest="verbose", 9parser.add_option(
9 help="print verbose messages. Try -vv, -vvv for \ 10 "-v", "--verbose", dest="verbose", help=help_msg, action="count")
10 more verbose messages", action="count")
11 11
12(options, args) = parser.parse_args() 12(options, args) = parser.parse_args()
13 13
@@ -21,27 +21,28 @@ if not os.path.isdir(vmbus_sys_path):
21 exit(-1) 21 exit(-1)
22 22
23vmbus_dev_dict = { 23vmbus_dev_dict = {
24 '{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]', 24 '{0e0b6031-5213-4934-818b-38d90ced39db}': '[Operating system shutdown]',
25 '{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]', 25 '{9527e630-d0ae-497b-adce-e80ab0175caf}': '[Time Synchronization]',
26 '{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]', 26 '{57164f39-9115-4e78-ab55-382f3bd5422d}': '[Heartbeat]',
27 '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]', 27 '{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}': '[Data Exchange]',
28 '{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]', 28 '{35fa2e29-ea23-4236-96ae-3a6ebacba440}': '[Backup (volume checkpoint)]',
29 '{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]', 29 '{34d14be3-dee4-41c8-9ae7-6b174977c192}': '[Guest services]',
30 '{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]', 30 '{525074dc-8985-46e2-8057-a307dc18a502}': '[Dynamic Memory]',
31 '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse', 31 '{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}': 'Synthetic mouse',
32 '{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard', 32 '{f912ad6d-2b17-48ea-bd65-f927a61c7684}': 'Synthetic keyboard',
33 '{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter', 33 '{da0a7802-e377-4aac-8e77-0558eb1073f8}': 'Synthetic framebuffer adapter',
34 '{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter', 34 '{f8615163-df3e-46c5-913f-f2d2f965ed0e}': 'Synthetic network adapter',
35 '{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller', 35 '{32412632-86cb-44a2-9b5c-50d1417354f5}': 'Synthetic IDE Controller',
36 '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller', 36 '{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}': 'Synthetic SCSI Controller',
37 '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter', 37 '{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}': 'Synthetic fiber channel adapter',
38 '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter', 38 '{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}': 'Synthetic RDMA adapter',
39 '{44c4f61d-4444-4400-9d52-802e27ede19f}' : 'PCI Express pass-through', 39 '{44c4f61d-4444-4400-9d52-802e27ede19f}': 'PCI Express pass-through',
40 '{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]', 40 '{276aacf4-ac15-426c-98dd-7521ad3f01fe}': '[Reserved system device]',
41 '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]', 41 '{f8e65716-3cb3-4a06-9a60-1889c5cccab5}': '[Reserved system device]',
42 '{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]', 42 '{3375baf4-9e15-4b30-b765-67acb10d607b}': '[Reserved system device]',
43} 43}
44 44
45
45def get_vmbus_dev_attr(dev_name, attr): 46def get_vmbus_dev_attr(dev_name, attr):
46 try: 47 try:
47 f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r') 48 f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r')
@@ -52,6 +53,7 @@ def get_vmbus_dev_attr(dev_name, attr):
52 53
53 return lines 54 return lines
54 55
56
55class VMBus_Dev: 57class VMBus_Dev:
56 pass 58 pass
57 59
@@ -66,12 +68,13 @@ for f in os.listdir(vmbus_sys_path):
66 68
67 chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping') 69 chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping')
68 chn_vp_mapping = [c.strip() for c in chn_vp_mapping] 70 chn_vp_mapping = [c.strip() for c in chn_vp_mapping]
69 chn_vp_mapping = sorted(chn_vp_mapping, 71 chn_vp_mapping = sorted(
70 key = lambda c : int(c.split(':')[0])) 72 chn_vp_mapping, key=lambda c: int(c.split(':')[0]))
71 73
72 chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' % 74 chn_vp_mapping = [
73 (c.split(':')[0], c.split(':')[1]) 75 '\tRel_ID=%s, target_cpu=%s' %
74 for c in chn_vp_mapping] 76 (c.split(':')[0], c.split(':')[1]) for c in chn_vp_mapping
77 ]
75 d = VMBus_Dev() 78 d = VMBus_Dev()
76 d.sysfs_path = '%s/%s' % (vmbus_sys_path, f) 79 d.sysfs_path = '%s/%s' % (vmbus_sys_path, f)
77 d.vmbus_id = vmbus_id 80 d.vmbus_id = vmbus_id
@@ -85,7 +88,7 @@ for f in os.listdir(vmbus_sys_path):
85 vmbus_dev_list.append(d) 88 vmbus_dev_list.append(d)
86 89
87 90
88vmbus_dev_list = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id)) 91vmbus_dev_list = sorted(vmbus_dev_list, key=lambda d: int(d.vmbus_id))
89 92
90format0 = '%2s: %s' 93format0 = '%2s: %s'
91format1 = '%2s: Class_ID = %s - %s\n%s' 94format1 = '%2s: Class_ID = %s - %s\n%s'
@@ -95,9 +98,15 @@ for d in vmbus_dev_list:
95 if verbose == 0: 98 if verbose == 0:
96 print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc)) 99 print(('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc))
97 elif verbose == 1: 100 elif verbose == 1:
98 print (('VMBUS ID ' + format1) % \ 101 print(
99 (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)) 102 ('VMBUS ID ' + format1) %
103 (d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)
104 )
100 else: 105 else:
101 print (('VMBUS ID ' + format2) % \ 106 print(
102 (d.vmbus_id, d.class_id, d.dev_desc, \ 107 ('VMBUS ID ' + format2) %
103 d.device_id, d.sysfs_path, d.chn_vp_mapping)) 108 (
109 d.vmbus_id, d.class_id, d.dev_desc,
110 d.device_id, d.sysfs_path, d.chn_vp_mapping
111 )
112 )
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4e455018da65..a5aa7d3ac6a1 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1466,8 +1466,8 @@ union bpf_attr {
1466 * If no cookie has been set yet, generate a new cookie. Once 1466 * If no cookie has been set yet, generate a new cookie. Once
1467 * generated, the socket cookie remains stable for the life of the 1467 * generated, the socket cookie remains stable for the life of the
1468 * socket. This helper can be useful for monitoring per socket 1468 * socket. This helper can be useful for monitoring per socket
1469 * networking traffic statistics as it provides a unique socket 1469 * networking traffic statistics as it provides a global socket
1470 * identifier per namespace. 1470 * identifier that can be assumed unique.
1471 * Return 1471 * Return
1472 * A 8-byte long non-decreasing number on success, or 0 if the 1472 * A 8-byte long non-decreasing number on success, or 0 if the
1473 * socket field is missing inside *skb*. 1473 * socket field is missing inside *skb*.
@@ -1571,8 +1571,11 @@ union bpf_attr {
1571 * but this is only implemented for native XDP (with driver 1571 * but this is only implemented for native XDP (with driver
1572 * support) as of this writing). 1572 * support) as of this writing).
1573 * 1573 *
1574 * All values for *flags* are reserved for future usage, and must 1574 * The lower two bits of *flags* are used as the return code if
1575 * be left at zero. 1575 * the map lookup fails. This is so that the return value can be
1576 * one of the XDP program return codes up to XDP_TX, as chosen by
1577 * the caller. Any higher bits in the *flags* argument must be
1578 * unset.
1576 * 1579 *
1577 * When used to redirect packets to net devices, this helper 1580 * When used to redirect packets to net devices, this helper
1578 * provides a high performance increase over **bpf_redirect**\ (). 1581 * provides a high performance increase over **bpf_redirect**\ ().
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2586b6cb8f34..2b57d7ea7836 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -182,7 +182,6 @@ struct bpf_program {
182 bpf_program_clear_priv_t clear_priv; 182 bpf_program_clear_priv_t clear_priv;
183 183
184 enum bpf_attach_type expected_attach_type; 184 enum bpf_attach_type expected_attach_type;
185 int btf_fd;
186 void *func_info; 185 void *func_info;
187 __u32 func_info_rec_size; 186 __u32 func_info_rec_size;
188 __u32 func_info_cnt; 187 __u32 func_info_cnt;
@@ -313,7 +312,6 @@ void bpf_program__unload(struct bpf_program *prog)
313 prog->instances.nr = -1; 312 prog->instances.nr = -1;
314 zfree(&prog->instances.fds); 313 zfree(&prog->instances.fds);
315 314
316 zclose(prog->btf_fd);
317 zfree(&prog->func_info); 315 zfree(&prog->func_info);
318 zfree(&prog->line_info); 316 zfree(&prog->line_info);
319} 317}
@@ -392,7 +390,6 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
392 prog->instances.fds = NULL; 390 prog->instances.fds = NULL;
393 prog->instances.nr = -1; 391 prog->instances.nr = -1;
394 prog->type = BPF_PROG_TYPE_UNSPEC; 392 prog->type = BPF_PROG_TYPE_UNSPEC;
395 prog->btf_fd = -1;
396 393
397 return 0; 394 return 0;
398errout: 395errout:
@@ -2288,9 +2285,6 @@ bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
2288 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext); 2285 prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
2289 } 2286 }
2290 2287
2291 if (!insn_offset)
2292 prog->btf_fd = btf__fd(obj->btf);
2293
2294 return 0; 2288 return 0;
2295} 2289}
2296 2290
@@ -2463,7 +2457,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2463 char *cp, errmsg[STRERR_BUFSIZE]; 2457 char *cp, errmsg[STRERR_BUFSIZE];
2464 int log_buf_size = BPF_LOG_BUF_SIZE; 2458 int log_buf_size = BPF_LOG_BUF_SIZE;
2465 char *log_buf; 2459 char *log_buf;
2466 int ret; 2460 int btf_fd, ret;
2467 2461
2468 if (!insns || !insns_cnt) 2462 if (!insns || !insns_cnt)
2469 return -EINVAL; 2463 return -EINVAL;
@@ -2478,7 +2472,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2478 load_attr.license = license; 2472 load_attr.license = license;
2479 load_attr.kern_version = kern_version; 2473 load_attr.kern_version = kern_version;
2480 load_attr.prog_ifindex = prog->prog_ifindex; 2474 load_attr.prog_ifindex = prog->prog_ifindex;
2481 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 2475 /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
2476 if (prog->obj->btf_ext)
2477 btf_fd = bpf_object__btf_fd(prog->obj);
2478 else
2479 btf_fd = -1;
2480 load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
2482 load_attr.func_info = prog->func_info; 2481 load_attr.func_info = prog->func_info;
2483 load_attr.func_info_rec_size = prog->func_info_rec_size; 2482 load_attr.func_info_rec_size = prog->func_info_rec_size;
2484 load_attr.func_info_cnt = prog->func_info_cnt; 2483 load_attr.func_info_cnt = prog->func_info_cnt;
@@ -5000,13 +4999,15 @@ int libbpf_num_possible_cpus(void)
5000 static const char *fcpu = "/sys/devices/system/cpu/possible"; 4999 static const char *fcpu = "/sys/devices/system/cpu/possible";
5001 int len = 0, n = 0, il = 0, ir = 0; 5000 int len = 0, n = 0, il = 0, ir = 0;
5002 unsigned int start = 0, end = 0; 5001 unsigned int start = 0, end = 0;
5002 int tmp_cpus = 0;
5003 static int cpus; 5003 static int cpus;
5004 char buf[128]; 5004 char buf[128];
5005 int error = 0; 5005 int error = 0;
5006 int fd = -1; 5006 int fd = -1;
5007 5007
5008 if (cpus > 0) 5008 tmp_cpus = READ_ONCE(cpus);
5009 return cpus; 5009 if (tmp_cpus > 0)
5010 return tmp_cpus;
5010 5011
5011 fd = open(fcpu, O_RDONLY); 5012 fd = open(fcpu, O_RDONLY);
5012 if (fd < 0) { 5013 if (fd < 0) {
@@ -5029,7 +5030,7 @@ int libbpf_num_possible_cpus(void)
5029 } 5030 }
5030 buf[len] = '\0'; 5031 buf[len] = '\0';
5031 5032
5032 for (ir = 0, cpus = 0; ir <= len; ir++) { 5033 for (ir = 0, tmp_cpus = 0; ir <= len; ir++) {
5033 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ 5034 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
5034 if (buf[ir] == ',' || buf[ir] == '\0') { 5035 if (buf[ir] == ',' || buf[ir] == '\0') {
5035 buf[ir] = '\0'; 5036 buf[ir] = '\0';
@@ -5041,13 +5042,15 @@ int libbpf_num_possible_cpus(void)
5041 } else if (n == 1) { 5042 } else if (n == 1) {
5042 end = start; 5043 end = start;
5043 } 5044 }
5044 cpus += end - start + 1; 5045 tmp_cpus += end - start + 1;
5045 il = ir + 1; 5046 il = ir + 1;
5046 } 5047 }
5047 } 5048 }
5048 if (cpus <= 0) { 5049 if (tmp_cpus <= 0) {
5049 pr_warning("Invalid #CPUs %d from %s\n", cpus, fcpu); 5050 pr_warning("Invalid #CPUs %d from %s\n", tmp_cpus, fcpu);
5050 return -EINVAL; 5051 return -EINVAL;
5051 } 5052 }
5052 return cpus; 5053
5054 WRITE_ONCE(cpus, tmp_cpus);
5055 return tmp_cpus;
5053} 5056}
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 045f5f7d68ab..13f1e8b9ac52 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -9,9 +9,10 @@ ifeq ("$(origin O)", "command line")
9endif 9endif
10 10
11turbostat : turbostat.c 11turbostat : turbostat.c
12override CFLAGS += -Wall -I../../../include 12override CFLAGS += -O2 -Wall -I../../../include
13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' 13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
14override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' 14override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
15override CFLAGS += -D_FORTIFY_SOURCE=2
15 16
16%: %.c 17%: %.c
17 @mkdir -p $(BUILD_OUTPUT) 18 @mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 75fc4fb9901c..b2a86438f074 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -39,7 +39,6 @@ FILE *outf;
39int *fd_percpu; 39int *fd_percpu;
40struct timeval interval_tv = {5, 0}; 40struct timeval interval_tv = {5, 0};
41struct timespec interval_ts = {5, 0}; 41struct timespec interval_ts = {5, 0};
42struct timespec one_msec = {0, 1000000};
43unsigned int num_iterations; 42unsigned int num_iterations;
44unsigned int debug; 43unsigned int debug;
45unsigned int quiet; 44unsigned int quiet;
@@ -60,6 +59,7 @@ unsigned int do_irtl_hsw;
60unsigned int units = 1000000; /* MHz etc */ 59unsigned int units = 1000000; /* MHz etc */
61unsigned int genuine_intel; 60unsigned int genuine_intel;
62unsigned int authentic_amd; 61unsigned int authentic_amd;
62unsigned int hygon_genuine;
63unsigned int max_level, max_extended_level; 63unsigned int max_level, max_extended_level;
64unsigned int has_invariant_tsc; 64unsigned int has_invariant_tsc;
65unsigned int do_nhm_platform_info; 65unsigned int do_nhm_platform_info;
@@ -100,6 +100,7 @@ unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
100unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */ 100unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
101unsigned int has_misc_feature_control; 101unsigned int has_misc_feature_control;
102unsigned int first_counter_read = 1; 102unsigned int first_counter_read = 1;
103int ignore_stdin;
103 104
104#define RAPL_PKG (1 << 0) 105#define RAPL_PKG (1 << 0)
105 /* 0x610 MSR_PKG_POWER_LIMIT */ 106 /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -166,6 +167,7 @@ size_t cpu_present_setsize, cpu_affinity_setsize, cpu_subset_size;
166struct thread_data { 167struct thread_data {
167 struct timeval tv_begin; 168 struct timeval tv_begin;
168 struct timeval tv_end; 169 struct timeval tv_end;
170 struct timeval tv_delta;
169 unsigned long long tsc; 171 unsigned long long tsc;
170 unsigned long long aperf; 172 unsigned long long aperf;
171 unsigned long long mperf; 173 unsigned long long mperf;
@@ -506,6 +508,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU
506unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; 508unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
507 509
508#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) 510#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
511#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME)
509#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) 512#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
510#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) 513#define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT)
511#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) 514#define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT)
@@ -849,7 +852,6 @@ int dump_counters(struct thread_data *t, struct core_data *c,
849 outp += sprintf(outp, "pc8: %016llX\n", p->pc8); 852 outp += sprintf(outp, "pc8: %016llX\n", p->pc8);
850 outp += sprintf(outp, "pc9: %016llX\n", p->pc9); 853 outp += sprintf(outp, "pc9: %016llX\n", p->pc9);
851 outp += sprintf(outp, "pc10: %016llX\n", p->pc10); 854 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
852 outp += sprintf(outp, "pc10: %016llX\n", p->pc10);
853 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi); 855 outp += sprintf(outp, "cpu_lpi: %016llX\n", p->cpu_lpi);
854 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi); 856 outp += sprintf(outp, "sys_lpi: %016llX\n", p->sys_lpi);
855 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg); 857 outp += sprintf(outp, "Joules PKG: %0X\n", p->energy_pkg);
@@ -911,7 +913,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
911 if (DO_BIC(BIC_TOD)) 913 if (DO_BIC(BIC_TOD))
912 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec); 914 outp += sprintf(outp, "%10ld.%06ld\t", t->tv_end.tv_sec, t->tv_end.tv_usec);
913 915
914 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 916 interval_float = t->tv_delta.tv_sec + t->tv_delta.tv_usec/1000000.0;
915 917
916 tsc = t->tsc * tsc_tweak; 918 tsc = t->tsc * tsc_tweak;
917 919
@@ -1287,6 +1289,14 @@ delta_core(struct core_data *new, struct core_data *old)
1287 } 1289 }
1288} 1290}
1289 1291
1292int soft_c1_residency_display(int bic)
1293{
1294 if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr)
1295 return 0;
1296
1297 return DO_BIC_READ(bic);
1298}
1299
1290/* 1300/*
1291 * old = new - old 1301 * old = new - old
1292 */ 1302 */
@@ -1309,6 +1319,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1309 * over-write old w/ new so we can print end of interval values 1319 * over-write old w/ new so we can print end of interval values
1310 */ 1320 */
1311 1321
1322 timersub(&new->tv_begin, &old->tv_begin, &old->tv_delta);
1312 old->tv_begin = new->tv_begin; 1323 old->tv_begin = new->tv_begin;
1313 old->tv_end = new->tv_end; 1324 old->tv_end = new->tv_end;
1314 1325
@@ -1322,7 +1333,8 @@ delta_thread(struct thread_data *new, struct thread_data *old,
1322 1333
1323 old->c1 = new->c1 - old->c1; 1334 old->c1 = new->c1 - old->c1;
1324 1335
1325 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1336 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1337 soft_c1_residency_display(BIC_Avg_MHz)) {
1326 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { 1338 if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) {
1327 old->aperf = new->aperf - old->aperf; 1339 old->aperf = new->aperf - old->aperf;
1328 old->mperf = new->mperf - old->mperf; 1340 old->mperf = new->mperf - old->mperf;
@@ -1404,6 +1416,8 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
1404 t->tv_begin.tv_usec = 0; 1416 t->tv_begin.tv_usec = 0;
1405 t->tv_end.tv_sec = 0; 1417 t->tv_end.tv_sec = 0;
1406 t->tv_end.tv_usec = 0; 1418 t->tv_end.tv_usec = 0;
1419 t->tv_delta.tv_sec = 0;
1420 t->tv_delta.tv_usec = 0;
1407 1421
1408 t->tsc = 0; 1422 t->tsc = 0;
1409 t->aperf = 0; 1423 t->aperf = 0;
@@ -1573,6 +1587,9 @@ void compute_average(struct thread_data *t, struct core_data *c,
1573 1587
1574 for_all_cpus(sum_counters, t, c, p); 1588 for_all_cpus(sum_counters, t, c, p);
1575 1589
1590 /* Use the global time delta for the average. */
1591 average.threads.tv_delta = tv_delta;
1592
1576 average.threads.tsc /= topo.num_cpus; 1593 average.threads.tsc /= topo.num_cpus;
1577 average.threads.aperf /= topo.num_cpus; 1594 average.threads.aperf /= topo.num_cpus;
1578 average.threads.mperf /= topo.num_cpus; 1595 average.threads.mperf /= topo.num_cpus;
@@ -1714,7 +1731,7 @@ void get_apic_id(struct thread_data *t)
1714 if (!DO_BIC(BIC_X2APIC)) 1731 if (!DO_BIC(BIC_X2APIC))
1715 return; 1732 return;
1716 1733
1717 if (authentic_amd) { 1734 if (authentic_amd || hygon_genuine) {
1718 unsigned int topology_extensions; 1735 unsigned int topology_extensions;
1719 1736
1720 if (max_extended_level < 0x8000001e) 1737 if (max_extended_level < 0x8000001e)
@@ -1762,19 +1779,20 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
1762 struct msr_counter *mp; 1779 struct msr_counter *mp;
1763 int i; 1780 int i;
1764 1781
1765 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1766
1767 if (cpu_migrate(cpu)) { 1782 if (cpu_migrate(cpu)) {
1768 fprintf(outf, "Could not migrate to CPU %d\n", cpu); 1783 fprintf(outf, "Could not migrate to CPU %d\n", cpu);
1769 return -1; 1784 return -1;
1770 } 1785 }
1771 1786
1787 gettimeofday(&t->tv_begin, (struct timezone *)NULL);
1788
1772 if (first_counter_read) 1789 if (first_counter_read)
1773 get_apic_id(t); 1790 get_apic_id(t);
1774retry: 1791retry:
1775 t->tsc = rdtsc(); /* we are running on local CPU of interest */ 1792 t->tsc = rdtsc(); /* we are running on local CPU of interest */
1776 1793
1777 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { 1794 if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) ||
1795 soft_c1_residency_display(BIC_Avg_MHz)) {
1778 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; 1796 unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time;
1779 1797
1780 /* 1798 /*
@@ -1851,20 +1869,20 @@ retry:
1851 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1869 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1852 goto done; 1870 goto done;
1853 1871
1854 if (DO_BIC(BIC_CPU_c3)) { 1872 if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) {
1855 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1873 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1856 return -6; 1874 return -6;
1857 } 1875 }
1858 1876
1859 if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { 1877 if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) {
1860 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) 1878 if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
1861 return -7; 1879 return -7;
1862 } else if (do_knl_cstates) { 1880 } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) {
1863 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) 1881 if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
1864 return -7; 1882 return -7;
1865 } 1883 }
1866 1884
1867 if (DO_BIC(BIC_CPU_c7)) 1885 if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7))
1868 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) 1886 if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7))
1869 return -8; 1887 return -8;
1870 1888
@@ -2912,6 +2930,7 @@ int snapshot_cpu_lpi_us(void)
2912 if (retval != 1) { 2930 if (retval != 1) {
2913 fprintf(stderr, "Disabling Low Power Idle CPU output\n"); 2931 fprintf(stderr, "Disabling Low Power Idle CPU output\n");
2914 BIC_NOT_PRESENT(BIC_CPU_LPI); 2932 BIC_NOT_PRESENT(BIC_CPU_LPI);
2933 fclose(fp);
2915 return -1; 2934 return -1;
2916 } 2935 }
2917 2936
@@ -2938,6 +2957,7 @@ int snapshot_sys_lpi_us(void)
2938 if (retval != 1) { 2957 if (retval != 1) {
2939 fprintf(stderr, "Disabling Low Power Idle System output\n"); 2958 fprintf(stderr, "Disabling Low Power Idle System output\n");
2940 BIC_NOT_PRESENT(BIC_SYS_LPI); 2959 BIC_NOT_PRESENT(BIC_SYS_LPI);
2960 fclose(fp);
2941 return -1; 2961 return -1;
2942 } 2962 }
2943 fclose(fp); 2963 fclose(fp);
@@ -2985,8 +3005,6 @@ static void signal_handler (int signal)
2985 fprintf(stderr, "SIGUSR1\n"); 3005 fprintf(stderr, "SIGUSR1\n");
2986 break; 3006 break;
2987 } 3007 }
2988 /* make sure this manually-invoked interval is at least 1ms long */
2989 nanosleep(&one_msec, NULL);
2990} 3008}
2991 3009
2992void setup_signal_handler(void) 3010void setup_signal_handler(void)
@@ -3005,29 +3023,38 @@ void setup_signal_handler(void)
3005 3023
3006void do_sleep(void) 3024void do_sleep(void)
3007{ 3025{
3008 struct timeval select_timeout; 3026 struct timeval tout;
3027 struct timespec rest;
3009 fd_set readfds; 3028 fd_set readfds;
3010 int retval; 3029 int retval;
3011 3030
3012 FD_ZERO(&readfds); 3031 FD_ZERO(&readfds);
3013 FD_SET(0, &readfds); 3032 FD_SET(0, &readfds);
3014 3033
3015 if (!isatty(fileno(stdin))) { 3034 if (ignore_stdin) {
3016 nanosleep(&interval_ts, NULL); 3035 nanosleep(&interval_ts, NULL);
3017 return; 3036 return;
3018 } 3037 }
3019 3038
3020 select_timeout = interval_tv; 3039 tout = interval_tv;
3021 retval = select(1, &readfds, NULL, NULL, &select_timeout); 3040 retval = select(1, &readfds, NULL, NULL, &tout);
3022 3041
3023 if (retval == 1) { 3042 if (retval == 1) {
3024 switch (getc(stdin)) { 3043 switch (getc(stdin)) {
3025 case 'q': 3044 case 'q':
3026 exit_requested = 1; 3045 exit_requested = 1;
3027 break; 3046 break;
3047 case EOF:
3048 /*
3049 * 'stdin' is a pipe closed on the other end. There
3050 * won't be any further input.
3051 */
3052 ignore_stdin = 1;
3053 /* Sleep the rest of the time */
3054 rest.tv_sec = (tout.tv_sec + tout.tv_usec / 1000000);
3055 rest.tv_nsec = (tout.tv_usec % 1000000) * 1000;
3056 nanosleep(&rest, NULL);
3028 } 3057 }
3029 /* make sure this manually-invoked interval is at least 1ms long */
3030 nanosleep(&one_msec, NULL);
3031 } 3058 }
3032} 3059}
3033 3060
@@ -3209,6 +3236,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
3209 break; 3236 break;
3210 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3237 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3211 case INTEL_FAM6_HASWELL_X: /* HSX */ 3238 case INTEL_FAM6_HASWELL_X: /* HSX */
3239 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3212 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3240 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3213 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3241 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3214 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3242 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3405,6 +3433,7 @@ int has_config_tdp(unsigned int family, unsigned int model)
3405 case INTEL_FAM6_IVYBRIDGE: /* IVB */ 3433 case INTEL_FAM6_IVYBRIDGE: /* IVB */
3406 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3434 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3407 case INTEL_FAM6_HASWELL_X: /* HSX */ 3435 case INTEL_FAM6_HASWELL_X: /* HSX */
3436 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3408 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3437 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3409 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3438 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3410 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3439 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3803,6 +3832,7 @@ double get_tdp_amd(unsigned int family)
3803{ 3832{
3804 switch (family) { 3833 switch (family) {
3805 case 0x17: 3834 case 0x17:
3835 case 0x18:
3806 default: 3836 default:
3807 /* This is the max stock TDP of HEDT/Server Fam17h chips */ 3837 /* This is the max stock TDP of HEDT/Server Fam17h chips */
3808 return 250.0; 3838 return 250.0;
@@ -3841,6 +3871,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
3841 case INTEL_FAM6_SANDYBRIDGE: 3871 case INTEL_FAM6_SANDYBRIDGE:
3842 case INTEL_FAM6_IVYBRIDGE: 3872 case INTEL_FAM6_IVYBRIDGE:
3843 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 3873 case INTEL_FAM6_HASWELL_CORE: /* HSW */
3874 case INTEL_FAM6_HASWELL_ULT: /* HSW */
3844 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 3875 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
3845 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 3876 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
3846 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 3877 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3982,6 +4013,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
3982 4013
3983 switch (family) { 4014 switch (family) {
3984 case 0x17: /* Zen, Zen+ */ 4015 case 0x17: /* Zen, Zen+ */
4016 case 0x18: /* Hygon Dhyana */
3985 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY; 4017 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
3986 if (rapl_joules) { 4018 if (rapl_joules) {
3987 BIC_PRESENT(BIC_Pkg_J); 4019 BIC_PRESENT(BIC_Pkg_J);
@@ -4002,7 +4034,7 @@ void rapl_probe_amd(unsigned int family, unsigned int model)
4002 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f)); 4034 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
4003 rapl_power_units = ldexp(1.0, -(msr & 0xf)); 4035 rapl_power_units = ldexp(1.0, -(msr & 0xf));
4004 4036
4005 tdp = get_tdp_amd(model); 4037 tdp = get_tdp_amd(family);
4006 4038
4007 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 4039 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
4008 if (!quiet) 4040 if (!quiet)
@@ -4018,7 +4050,7 @@ void rapl_probe(unsigned int family, unsigned int model)
4018{ 4050{
4019 if (genuine_intel) 4051 if (genuine_intel)
4020 rapl_probe_intel(family, model); 4052 rapl_probe_intel(family, model);
4021 if (authentic_amd) 4053 if (authentic_amd || hygon_genuine)
4022 rapl_probe_amd(family, model); 4054 rapl_probe_amd(family, model);
4023} 4055}
4024 4056
@@ -4032,6 +4064,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
4032 4064
4033 switch (model) { 4065 switch (model) {
4034 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4066 case INTEL_FAM6_HASWELL_CORE: /* HSW */
4067 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4035 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4068 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
4036 do_gfx_perf_limit_reasons = 1; 4069 do_gfx_perf_limit_reasons = 1;
4037 case INTEL_FAM6_HASWELL_X: /* HSX */ 4070 case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -4251,6 +4284,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4251 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ 4284 case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
4252 case INTEL_FAM6_HASWELL_CORE: /* HSW */ 4285 case INTEL_FAM6_HASWELL_CORE: /* HSW */
4253 case INTEL_FAM6_HASWELL_X: /* HSW */ 4286 case INTEL_FAM6_HASWELL_X: /* HSW */
4287 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4254 case INTEL_FAM6_HASWELL_GT3E: /* HSW */ 4288 case INTEL_FAM6_HASWELL_GT3E: /* HSW */
4255 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4289 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
4256 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ 4290 case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -4267,7 +4301,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4267} 4301}
4268 4302
4269/* 4303/*
4270 * HSW adds support for additional MSRs: 4304 * HSW ULT added support for C8/C9/C10 MSRs:
4271 * 4305 *
4272 * MSR_PKG_C8_RESIDENCY 0x00000630 4306 * MSR_PKG_C8_RESIDENCY 0x00000630
4273 * MSR_PKG_C9_RESIDENCY 0x00000631 4307 * MSR_PKG_C9_RESIDENCY 0x00000631
@@ -4278,13 +4312,13 @@ int has_snb_msrs(unsigned int family, unsigned int model)
4278 * MSR_PKGC10_IRTL 0x00000635 4312 * MSR_PKGC10_IRTL 0x00000635
4279 * 4313 *
4280 */ 4314 */
4281int has_hsw_msrs(unsigned int family, unsigned int model) 4315int has_c8910_msrs(unsigned int family, unsigned int model)
4282{ 4316{
4283 if (!genuine_intel) 4317 if (!genuine_intel)
4284 return 0; 4318 return 0;
4285 4319
4286 switch (model) { 4320 switch (model) {
4287 case INTEL_FAM6_HASWELL_CORE: 4321 case INTEL_FAM6_HASWELL_ULT: /* HSW */
4288 case INTEL_FAM6_BROADWELL_CORE: /* BDW */ 4322 case INTEL_FAM6_BROADWELL_CORE: /* BDW */
4289 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ 4323 case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
4290 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ 4324 case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
@@ -4568,9 +4602,6 @@ unsigned int intel_model_duplicates(unsigned int model)
4568 case INTEL_FAM6_XEON_PHI_KNM: 4602 case INTEL_FAM6_XEON_PHI_KNM:
4569 return INTEL_FAM6_XEON_PHI_KNL; 4603 return INTEL_FAM6_XEON_PHI_KNL;
4570 4604
4571 case INTEL_FAM6_HASWELL_ULT:
4572 return INTEL_FAM6_HASWELL_CORE;
4573
4574 case INTEL_FAM6_BROADWELL_X: 4605 case INTEL_FAM6_BROADWELL_X:
4575 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ 4606 case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
4576 return INTEL_FAM6_BROADWELL_X; 4607 return INTEL_FAM6_BROADWELL_X;
@@ -4582,7 +4613,11 @@ unsigned int intel_model_duplicates(unsigned int model)
4582 return INTEL_FAM6_SKYLAKE_MOBILE; 4613 return INTEL_FAM6_SKYLAKE_MOBILE;
4583 4614
4584 case INTEL_FAM6_ICELAKE_MOBILE: 4615 case INTEL_FAM6_ICELAKE_MOBILE:
4616 case INTEL_FAM6_ICELAKE_NNPI:
4585 return INTEL_FAM6_CANNONLAKE_MOBILE; 4617 return INTEL_FAM6_CANNONLAKE_MOBILE;
4618
4619 case INTEL_FAM6_ATOM_TREMONT_X:
4620 return INTEL_FAM6_ATOM_GOLDMONT_X;
4586 } 4621 }
4587 return model; 4622 return model;
4588} 4623}
@@ -4600,6 +4635,8 @@ void process_cpuid()
4600 genuine_intel = 1; 4635 genuine_intel = 1;
4601 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65) 4636 else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
4602 authentic_amd = 1; 4637 authentic_amd = 1;
4638 else if (ebx == 0x6f677948 && ecx == 0x656e6975 && edx == 0x6e65476e)
4639 hygon_genuine = 1;
4603 4640
4604 if (!quiet) 4641 if (!quiet)
4605 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ", 4642 fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
@@ -4820,12 +4857,12 @@ void process_cpuid()
4820 BIC_NOT_PRESENT(BIC_CPU_c7); 4857 BIC_NOT_PRESENT(BIC_CPU_c7);
4821 BIC_NOT_PRESENT(BIC_Pkgpc7); 4858 BIC_NOT_PRESENT(BIC_Pkgpc7);
4822 } 4859 }
4823 if (has_hsw_msrs(family, model)) { 4860 if (has_c8910_msrs(family, model)) {
4824 BIC_PRESENT(BIC_Pkgpc8); 4861 BIC_PRESENT(BIC_Pkgpc8);
4825 BIC_PRESENT(BIC_Pkgpc9); 4862 BIC_PRESENT(BIC_Pkgpc9);
4826 BIC_PRESENT(BIC_Pkgpc10); 4863 BIC_PRESENT(BIC_Pkgpc10);
4827 } 4864 }
4828 do_irtl_hsw = has_hsw_msrs(family, model); 4865 do_irtl_hsw = has_c8910_msrs(family, model);
4829 if (has_skl_msrs(family, model)) { 4866 if (has_skl_msrs(family, model)) {
4830 BIC_PRESENT(BIC_Totl_c0); 4867 BIC_PRESENT(BIC_Totl_c0);
4831 BIC_PRESENT(BIC_Any_c0); 4868 BIC_PRESENT(BIC_Any_c0);
@@ -5123,7 +5160,7 @@ int initialize_counters(int cpu_id)
5123 5160
5124void allocate_output_buffer() 5161void allocate_output_buffer()
5125{ 5162{
5126 output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); 5163 output_buffer = calloc(1, (1 + topo.num_cpus) * 2048);
5127 outp = output_buffer; 5164 outp = output_buffer;
5128 if (outp == NULL) 5165 if (outp == NULL)
5129 err(-1, "calloc output buffer"); 5166 err(-1, "calloc output buffer");
@@ -5269,7 +5306,7 @@ int get_and_dump_counters(void)
5269} 5306}
5270 5307
5271void print_version() { 5308void print_version() {
5272 fprintf(outf, "turbostat version 19.03.20" 5309 fprintf(outf, "turbostat version 19.08.31"
5273 " - Len Brown <lenb@kernel.org>\n"); 5310 " - Len Brown <lenb@kernel.org>\n");
5274} 5311}
5275 5312
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
index 1fdeef864e7c..666b325a62a2 100644
--- a/tools/power/x86/x86_energy_perf_policy/Makefile
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -9,8 +9,9 @@ ifeq ("$(origin O)", "command line")
9endif 9endif
10 10
11x86_energy_perf_policy : x86_energy_perf_policy.c 11x86_energy_perf_policy : x86_energy_perf_policy.c
12override CFLAGS += -Wall -I../../../include 12override CFLAGS += -O2 -Wall -I../../../include
13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' 13override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
14override CFLAGS += -D_FORTIFY_SOURCE=2
14 15
15%: %.c 16%: %.c
16 @mkdir -p $(BUILD_OUTPUT) 17 @mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
index 17db1c3af4d0..78c6361898b1 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -40,7 +40,7 @@ in the same processor package.
40Hardware P-States (HWP) are effectively an expansion of hardware 40Hardware P-States (HWP) are effectively an expansion of hardware
41P-state control from the opportunistic turbo-mode P-state range 41P-state control from the opportunistic turbo-mode P-state range
42to include the entire range of available P-states. 42to include the entire range of available P-states.
43On Broadwell Xeon, the initial HWP implementation, EBP influenced HWP. 43On Broadwell Xeon, the initial HWP implementation, EPB influenced HWP.
44That influence was removed in subsequent generations, 44That influence was removed in subsequent generations,
45where it was moved to the 45where it was moved to the
46Energy_Performance_Preference (EPP) field in 46Energy_Performance_Preference (EPP) field in
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 34a796b303fe..3fe1eed900d4 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -545,7 +545,7 @@ void cmdline(int argc, char **argv)
545 545
546 progname = argv[0]; 546 progname = argv[0];
547 547
548 while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw", 548 while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:",
549 long_options, &option_index)) != -1) { 549 long_options, &option_index)) != -1) {
550 switch (opt) { 550 switch (opt) {
551 case 'a': 551 case 'a':
@@ -1259,6 +1259,15 @@ void probe_dev_msr(void)
1259 if (system("/sbin/modprobe msr > /dev/null 2>&1")) 1259 if (system("/sbin/modprobe msr > /dev/null 2>&1"))
1260 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); 1260 err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
1261} 1261}
1262
1263static void get_cpuid_or_exit(unsigned int leaf,
1264 unsigned int *eax, unsigned int *ebx,
1265 unsigned int *ecx, unsigned int *edx)
1266{
1267 if (!__get_cpuid(leaf, eax, ebx, ecx, edx))
1268 errx(1, "Processor not supported\n");
1269}
1270
1262/* 1271/*
1263 * early_cpuid() 1272 * early_cpuid()
1264 * initialize turbo_is_enabled, has_hwp, has_epb 1273 * initialize turbo_is_enabled, has_hwp, has_epb
@@ -1266,15 +1275,10 @@ void probe_dev_msr(void)
1266 */ 1275 */
1267void early_cpuid(void) 1276void early_cpuid(void)
1268{ 1277{
1269 unsigned int eax, ebx, ecx, edx, max_level; 1278 unsigned int eax, ebx, ecx, edx;
1270 unsigned int fms, family, model; 1279 unsigned int fms, family, model;
1271 1280
1272 __get_cpuid(0, &max_level, &ebx, &ecx, &edx); 1281 get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1273
1274 if (max_level < 6)
1275 errx(1, "Processor not supported\n");
1276
1277 __get_cpuid(1, &fms, &ebx, &ecx, &edx);
1278 family = (fms >> 8) & 0xf; 1282 family = (fms >> 8) & 0xf;
1279 model = (fms >> 4) & 0xf; 1283 model = (fms >> 4) & 0xf;
1280 if (family == 6 || family == 0xf) 1284 if (family == 6 || family == 0xf)
@@ -1288,7 +1292,7 @@ void early_cpuid(void)
1288 bdx_highest_ratio = msr & 0xFF; 1292 bdx_highest_ratio = msr & 0xFF;
1289 } 1293 }
1290 1294
1291 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); 1295 get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1292 turbo_is_enabled = (eax >> 1) & 1; 1296 turbo_is_enabled = (eax >> 1) & 1;
1293 has_hwp = (eax >> 7) & 1; 1297 has_hwp = (eax >> 7) & 1;
1294 has_epb = (ecx >> 3) & 1; 1298 has_epb = (ecx >> 3) & 1;
@@ -1306,7 +1310,7 @@ void parse_cpuid(void)
1306 1310
1307 eax = ebx = ecx = edx = 0; 1311 eax = ebx = ecx = edx = 0;
1308 1312
1309 __get_cpuid(0, &max_level, &ebx, &ecx, &edx); 1313 get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx);
1310 1314
1311 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) 1315 if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
1312 genuine_intel = 1; 1316 genuine_intel = 1;
@@ -1315,7 +1319,7 @@ void parse_cpuid(void)
1315 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", 1319 fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
1316 (char *)&ebx, (char *)&edx, (char *)&ecx); 1320 (char *)&ebx, (char *)&edx, (char *)&ecx);
1317 1321
1318 __get_cpuid(1, &fms, &ebx, &ecx, &edx); 1322 get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx);
1319 family = (fms >> 8) & 0xf; 1323 family = (fms >> 8) & 0xf;
1320 model = (fms >> 4) & 0xf; 1324 model = (fms >> 4) & 0xf;
1321 stepping = fms & 0xf; 1325 stepping = fms & 0xf;
@@ -1340,7 +1344,7 @@ void parse_cpuid(void)
1340 errx(1, "CPUID: no MSR"); 1344 errx(1, "CPUID: no MSR");
1341 1345
1342 1346
1343 __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); 1347 get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx);
1344 /* turbo_is_enabled already set */ 1348 /* turbo_is_enabled already set */
1345 /* has_hwp already set */ 1349 /* has_hwp already set */
1346 has_hwp_notify = eax & (1 << 8); 1350 has_hwp_notify = eax & (1 << 8);
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index c085964e1d05..96752ebd938f 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) 34BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
35TEST_GEN_FILES = $(BPF_OBJ_FILES) 35TEST_GEN_FILES = $(BPF_OBJ_FILES)
36 36
37BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
38TEST_FILES = $(BTF_C_FILES)
39
37# Also test sub-register code-gen if LLVM has eBPF v3 processor support which 40# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
38# contains both ALU32 and JMP32 instructions. 41# contains both ALU32 and JMP32 instructions.
39SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \ 42SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
@@ -68,7 +71,8 @@ TEST_PROGS := test_kmod.sh \
68TEST_PROGS_EXTENDED := with_addr.sh \ 71TEST_PROGS_EXTENDED := with_addr.sh \
69 with_tunnels.sh \ 72 with_tunnels.sh \
70 tcp_client.py \ 73 tcp_client.py \
71 tcp_server.py 74 tcp_server.py \
75 test_xdp_vlan.sh
72 76
73# Compile but not part of 'make run_tests' 77# Compile but not part of 'make run_tests'
74TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ 78TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index f7a0744db31e..5dc109f4c097 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
34CONFIG_MPLS_ROUTING=m 34CONFIG_MPLS_ROUTING=m
35CONFIG_MPLS_IPTUNNEL=m 35CONFIG_MPLS_IPTUNNEL=m
36CONFIG_IPV6_SIT=m 36CONFIG_IPV6_SIT=m
37CONFIG_BPF_JIT=y
diff --git a/tools/testing/selftests/bpf/test_btf_dump.c b/tools/testing/selftests/bpf/test_btf_dump.c
index 8f850823d35f..6e75dd3cb14f 100644
--- a/tools/testing/selftests/bpf/test_btf_dump.c
+++ b/tools/testing/selftests/bpf/test_btf_dump.c
@@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
97 } 97 }
98 98
99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name); 99 snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
100 if (access(test_file, R_OK) == -1)
101 /*
102 * When the test is run with O=, kselftest copies TEST_FILES
103 * without preserving the directory structure.
104 */
105 snprintf(test_file, sizeof(test_file), "%s.c",
106 test_case->name);
100 /* 107 /*
101 * Diff test output and expected test output, contained between 108 * Diff test output and expected test output, contained between
102 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case. 109 * START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 2fc4625c1a15..655729004391 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -20,9 +20,9 @@ int main(int argc, char **argv)
20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 20 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 21 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
22 BPF_FUNC_get_local_storage), 22 BPF_FUNC_get_local_storage),
23 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 23 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), 24 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
25 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), 25 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
26 26
27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ 27 BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ 28 BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
@@ -30,7 +30,7 @@ int main(int argc, char **argv)
30 BPF_FUNC_get_local_storage), 30 BPF_FUNC_get_local_storage),
31 BPF_MOV64_IMM(BPF_REG_1, 1), 31 BPF_MOV64_IMM(BPF_REG_1, 1),
32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), 32 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
33 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), 33 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), 34 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 35 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
36 BPF_EXIT_INSN(), 36 BPF_EXIT_INSN(),
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index fb679ac3d4b0..0e6652733462 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -13,6 +13,7 @@
13#include <bpf/bpf.h> 13#include <bpf/bpf.h>
14 14
15#include "cgroup_helpers.h" 15#include "cgroup_helpers.h"
16#include "bpf_endian.h"
16#include "bpf_rlimit.h" 17#include "bpf_rlimit.h"
17#include "bpf_util.h" 18#include "bpf_util.h"
18 19
@@ -232,7 +233,8 @@ static struct sock_test tests[] = {
232 /* if (ip == expected && port == expected) */ 233 /* if (ip == expected && port == expected) */
233 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 234 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
234 offsetof(struct bpf_sock, src_ip6[3])), 235 offsetof(struct bpf_sock, src_ip6[3])),
235 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4), 236 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
237 __bpf_constant_ntohl(0x00000001), 4),
236 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 238 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
237 offsetof(struct bpf_sock, src_port)), 239 offsetof(struct bpf_sock, src_port)),
238 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2), 240 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
@@ -261,7 +263,8 @@ static struct sock_test tests[] = {
261 /* if (ip == expected && port == expected) */ 263 /* if (ip == expected && port == expected) */
262 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 264 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
263 offsetof(struct bpf_sock, src_ip4)), 265 offsetof(struct bpf_sock, src_ip4)),
264 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4), 266 BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
267 __bpf_constant_ntohl(0x7F000001), 4),
265 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, 268 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
266 offsetof(struct bpf_sock, src_port)), 269 offsetof(struct bpf_sock, src_port)),
267 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2), 270 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c
index 5e980a5ab69d..1fc4e61e9f9f 100644
--- a/tools/testing/selftests/bpf/verifier/loops1.c
+++ b/tools/testing/selftests/bpf/verifier/loops1.c
@@ -159,3 +159,31 @@
159 .errstr = "loop detected", 159 .errstr = "loop detected",
160 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 160 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
161}, 161},
162{
163 "not-taken loop with back jump to 1st insn",
164 .insns = {
165 BPF_MOV64_IMM(BPF_REG_0, 123),
166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, -2),
167 BPF_EXIT_INSN(),
168 },
169 .result = ACCEPT,
170 .prog_type = BPF_PROG_TYPE_XDP,
171 .retval = 123,
172},
173{
174 "taken loop with back jump to 1st insn",
175 .insns = {
176 BPF_MOV64_IMM(BPF_REG_1, 10),
177 BPF_MOV64_IMM(BPF_REG_2, 0),
178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
179 BPF_EXIT_INSN(),
180 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
181 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
182 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -3),
183 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
184 BPF_EXIT_INSN(),
185 },
186 .result = ACCEPT,
187 .prog_type = BPF_PROG_TYPE_XDP,
188 .retval = 55,
189},
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
index 4059014d93ea..4912d23844bc 100644
--- a/tools/testing/selftests/kvm/include/evmcs.h
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -220,6 +220,8 @@ struct hv_enlightened_vmcs {
220struct hv_enlightened_vmcs *current_evmcs; 220struct hv_enlightened_vmcs *current_evmcs;
221struct hv_vp_assist_page *current_vp_assist; 221struct hv_vp_assist_page *current_vp_assist;
222 222
223int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
224
223static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) 225static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
224{ 226{
225 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | 227 u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 6cb34a0fa200..0a5e487dbc50 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1060,9 +1060,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i", 1060 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
1061 r); 1061 r);
1062 1062
1063 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs); 1063 if (kvm_check_cap(KVM_CAP_XCRS)) {
1064 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i", 1064 r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
1065 r); 1065 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
1066 r);
1067 }
1066 1068
1067 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs); 1069 r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
1068 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i", 1070 TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
@@ -1103,9 +1105,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
1103 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i", 1105 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1104 r); 1106 r);
1105 1107
1106 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs); 1108 if (kvm_check_cap(KVM_CAP_XCRS)) {
1107 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i", 1109 r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1108 r); 1110 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1111 r);
1112 }
1109 1113
1110 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs); 1114 r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1111 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i", 1115 TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 204f847bd065..9cef0455b819 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -12,6 +12,26 @@
12 12
13bool enable_evmcs; 13bool enable_evmcs;
14 14
15int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
16{
17 uint16_t evmcs_ver;
18
19 struct kvm_enable_cap enable_evmcs_cap = {
20 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
21 .args[0] = (unsigned long)&evmcs_ver
22 };
23
24 vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
25
26 /* KVM should return supported EVMCS version range */
27 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
28 (evmcs_ver & 0xff) > 0,
29 "Incorrect EVMCS version range: %x:%x\n",
30 evmcs_ver & 0xff, evmcs_ver >> 8);
31
32 return evmcs_ver;
33}
34
15/* Allocate memory regions for nested VMX tests. 35/* Allocate memory regions for nested VMX tests.
16 * 36 *
17 * Input Args: 37 * Input Args:
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index f95c08343b48..92915e6408e7 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -79,11 +79,6 @@ int main(int argc, char *argv[])
79 struct kvm_x86_state *state; 79 struct kvm_x86_state *state;
80 struct ucall uc; 80 struct ucall uc;
81 int stage; 81 int stage;
82 uint16_t evmcs_ver;
83 struct kvm_enable_cap enable_evmcs_cap = {
84 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
85 .args[0] = (unsigned long)&evmcs_ver
86 };
87 82
88 /* Create VM */ 83 /* Create VM */
89 vm = vm_create_default(VCPU_ID, 0, guest_code); 84 vm = vm_create_default(VCPU_ID, 0, guest_code);
@@ -96,13 +91,7 @@ int main(int argc, char *argv[])
96 exit(KSFT_SKIP); 91 exit(KSFT_SKIP);
97 } 92 }
98 93
99 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 94 vcpu_enable_evmcs(vm, VCPU_ID);
100
101 /* KVM should return supported EVMCS version range */
102 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
103 (evmcs_ver & 0xff) > 0,
104 "Incorrect EVMCS version range: %x:%x\n",
105 evmcs_ver & 0xff, evmcs_ver >> 8);
106 95
107 run = vcpu_state(vm, VCPU_ID); 96 run = vcpu_state(vm, VCPU_ID);
108 97
@@ -146,7 +135,7 @@ int main(int argc, char *argv[])
146 kvm_vm_restart(vm, O_RDWR); 135 kvm_vm_restart(vm, O_RDWR);
147 vm_vcpu_add(vm, VCPU_ID); 136 vm_vcpu_add(vm, VCPU_ID);
148 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); 137 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
149 vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 138 vcpu_enable_evmcs(vm, VCPU_ID);
150 vcpu_load_state(vm, VCPU_ID, state); 139 vcpu_load_state(vm, VCPU_ID, state);
151 run = vcpu_state(vm, VCPU_ID); 140 run = vcpu_state(vm, VCPU_ID);
152 free(state); 141 free(state);
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
index f72b3043db0e..ee59831fbc98 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -18,6 +18,7 @@
18#include "test_util.h" 18#include "test_util.h"
19#include "kvm_util.h" 19#include "kvm_util.h"
20#include "processor.h" 20#include "processor.h"
21#include "vmx.h"
21 22
22#define VCPU_ID 0 23#define VCPU_ID 0
23 24
@@ -106,12 +107,7 @@ int main(int argc, char *argv[])
106{ 107{
107 struct kvm_vm *vm; 108 struct kvm_vm *vm;
108 int rv; 109 int rv;
109 uint16_t evmcs_ver;
110 struct kvm_cpuid2 *hv_cpuid_entries; 110 struct kvm_cpuid2 *hv_cpuid_entries;
111 struct kvm_enable_cap enable_evmcs_cap = {
112 .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
113 .args[0] = (unsigned long)&evmcs_ver
114 };
115 111
116 /* Tell stdout not to buffer its content */ 112 /* Tell stdout not to buffer its content */
117 setbuf(stdout, NULL); 113 setbuf(stdout, NULL);
@@ -136,14 +132,14 @@ int main(int argc, char *argv[])
136 132
137 free(hv_cpuid_entries); 133 free(hv_cpuid_entries);
138 134
139 rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap); 135 if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
140
141 if (rv) {
142 fprintf(stderr, 136 fprintf(stderr,
143 "Enlightened VMCS is unsupported, skip related test\n"); 137 "Enlightened VMCS is unsupported, skip related test\n");
144 goto vm_free; 138 goto vm_free;
145 } 139 }
146 140
141 vcpu_enable_evmcs(vm, VCPU_ID);
142
147 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm); 143 hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
148 if (!hv_cpuid_entries) 144 if (!hv_cpuid_entries)
149 return 1; 145 return 1;
diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 40050e44ec0a..f9334bd3cce9 100644
--- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -99,8 +99,8 @@ int main(int argc, char *argv[])
99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO); 99 msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, 100 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO); 101 msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
102 test_msr_platform_info_disabled(vm);
103 test_msr_platform_info_enabled(vm); 102 test_msr_platform_info_enabled(vm);
103 test_msr_platform_info_disabled(vm);
104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info); 104 vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
105 105
106 kvm_vm_free(vm); 106 kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
index ed7218d166da..853e370e8a39 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
@@ -25,24 +25,17 @@
25#define VMCS12_REVISION 0x11e57ed0 25#define VMCS12_REVISION 0x11e57ed0
26#define VCPU_ID 5 26#define VCPU_ID 5
27 27
28bool have_evmcs;
29
28void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state) 30void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
29{ 31{
30 volatile struct kvm_run *run;
31
32 vcpu_nested_state_set(vm, VCPU_ID, state, false); 32 vcpu_nested_state_set(vm, VCPU_ID, state, false);
33 run = vcpu_state(vm, VCPU_ID);
34 vcpu_run(vm, VCPU_ID);
35 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
36 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
37 run->exit_reason,
38 exit_reason_str(run->exit_reason));
39} 33}
40 34
41void test_nested_state_expect_errno(struct kvm_vm *vm, 35void test_nested_state_expect_errno(struct kvm_vm *vm,
42 struct kvm_nested_state *state, 36 struct kvm_nested_state *state,
43 int expected_errno) 37 int expected_errno)
44{ 38{
45 volatile struct kvm_run *run;
46 int rv; 39 int rv;
47 40
48 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true); 41 rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
@@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm,
50 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)", 43 "Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
51 strerror(expected_errno), expected_errno, rv, strerror(errno), 44 strerror(expected_errno), expected_errno, rv, strerror(errno),
52 errno); 45 errno);
53 run = vcpu_state(vm, VCPU_ID);
54 vcpu_run(vm, VCPU_ID);
55 TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
56 "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
57 run->exit_reason,
58 exit_reason_str(run->exit_reason));
59} 46}
60 47
61void test_nested_state_expect_einval(struct kvm_vm *vm, 48void test_nested_state_expect_einval(struct kvm_vm *vm,
@@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
90{ 77{
91 memset(state, 0, size); 78 memset(state, 0, size);
92 state->flags = KVM_STATE_NESTED_GUEST_MODE | 79 state->flags = KVM_STATE_NESTED_GUEST_MODE |
93 KVM_STATE_NESTED_RUN_PENDING | 80 KVM_STATE_NESTED_RUN_PENDING;
94 KVM_STATE_NESTED_EVMCS; 81 if (have_evmcs)
82 state->flags |= KVM_STATE_NESTED_EVMCS;
95 state->format = 0; 83 state->format = 0;
96 state->size = size; 84 state->size = size;
97 state->hdr.vmx.vmxon_pa = 0x1000; 85 state->hdr.vmx.vmxon_pa = 0x1000;
@@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm)
141 /* 129 /*
142 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without 130 * Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
143 * setting the nested state but flags other than eVMCS must be clear. 131 * setting the nested state but flags other than eVMCS must be clear.
132 * The eVMCS flag can be set if the enlightened VMCS capability has
133 * been enabled.
144 */ 134 */
145 set_default_vmx_state(state, state_sz); 135 set_default_vmx_state(state, state_sz);
146 state->hdr.vmx.vmxon_pa = -1ull; 136 state->hdr.vmx.vmxon_pa = -1ull;
147 state->hdr.vmx.vmcs12_pa = -1ull; 137 state->hdr.vmx.vmcs12_pa = -1ull;
148 test_nested_state_expect_einval(vm, state); 138 test_nested_state_expect_einval(vm, state);
149 139
150 state->flags = KVM_STATE_NESTED_EVMCS; 140 state->flags &= KVM_STATE_NESTED_EVMCS;
141 if (have_evmcs) {
142 test_nested_state_expect_einval(vm, state);
143 vcpu_enable_evmcs(vm, VCPU_ID);
144 }
151 test_nested_state(vm, state); 145 test_nested_state(vm, state);
152 146
153 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */ 147 /* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
@@ -232,6 +226,8 @@ int main(int argc, char *argv[])
232 struct kvm_nested_state state; 226 struct kvm_nested_state state;
233 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1); 227 struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
234 228
229 have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
230
235 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) { 231 if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
236 printf("KVM_CAP_NESTED_STATE not available, skipping test\n"); 232 printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
237 exit(KSFT_SKIP); 233 exit(KSFT_SKIP);
diff --git a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
index 41476399e184..f6e65674b83c 100755
--- a/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
+++ b/tools/testing/selftests/net/tcp_fastopen_backup_key.sh
@@ -30,7 +30,7 @@ do_test() {
30 ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1" 30 ip netns exec "${NETNS}" ./tcp_fastopen_backup_key "$1"
31 val=$(ip netns exec "${NETNS}" nstat -az | \ 31 val=$(ip netns exec "${NETNS}" nstat -az | \
32 grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}') 32 grep TcpExtTCPFastOpenPassiveFail | awk '{print $2}')
33 if [ $val -ne 0 ]; then 33 if [ "$val" != 0 ]; then
34 echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero" 34 echo "FAIL: TcpExtTCPFastOpenPassiveFail non-zero"
35 return 1 35 return 1
36 fi 36 fi
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index fe52488a6f72..16571ac1dab4 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -321,4 +321,52 @@ else
321 ip netns exec nsr1 nft list ruleset 321 ip netns exec nsr1 nft list ruleset
322fi 322fi
323 323
324KEY_SHA="0x"$(ps -xaf | sha1sum | cut -d " " -f 1)
325KEY_AES="0x"$(ps -xaf | md5sum | cut -d " " -f 1)
326SPI1=$RANDOM
327SPI2=$RANDOM
328
329if [ $SPI1 -eq $SPI2 ]; then
330 SPI2=$((SPI2+1))
331fi
332
333do_esp() {
334 local ns=$1
335 local me=$2
336 local remote=$3
337 local lnet=$4
338 local rnet=$5
339 local spi_out=$6
340 local spi_in=$7
341
342 ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
343 ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
344
345 # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
346 ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 1 action allow
347 # to fwd decrypted packets after esp processing:
348 ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 1 action allow
349
350}
351
352do_esp nsr1 192.168.10.1 192.168.10.2 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
353
354do_esp nsr2 192.168.10.2 192.168.10.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
355
356ip netns exec nsr1 nft delete table ip nat
357
358# restore default routes
359ip -net ns2 route del 192.168.10.1 via 10.0.2.1
360ip -net ns2 route add default via 10.0.2.1
361ip -net ns2 route add default via dead:2::1
362
363test_tcp_forwarding ns1 ns2
364if [ $? -eq 0 ] ;then
365 echo "PASS: ipsec tunnel mode for ns1/ns2"
366else
367 echo "FAIL: ipsec tunnel mode for ns1/ns2"
368 ip netns exec nsr1 nft list ruleset 1>&2
369 ip netns exec nsr1 cat /proc/net/xfrm_stat 1>&2
370fi
371
324exit $ret 372exit $ret
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
index bf5ebf59c2d4..9cdd2e31ac2c 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
@@ -670,5 +670,52 @@
670 "teardown": [ 670 "teardown": [
671 "$TC actions flush action skbedit" 671 "$TC actions flush action skbedit"
672 ] 672 ]
673 },
674 {
675 "id": "630c",
676 "name": "Add batch of 32 skbedit actions with all parameters and cookie",
677 "category": [
678 "actions",
679 "skbedit"
680 ],
681 "setup": [
682 [
683 "$TC actions flush action skbedit",
684 0,
685 1,
686 255
687 ]
688 ],
689 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
690 "expExitCode": "0",
691 "verifyCmd": "$TC actions list action skbedit",
692 "matchPattern": "^[ \t]+index [0-9]+ ref",
693 "matchCount": "32",
694 "teardown": [
695 "$TC actions flush action skbedit"
696 ]
697 },
698 {
699 "id": "706d",
700 "name": "Delete batch of 32 skbedit actions with all parameters",
701 "category": [
702 "actions",
703 "skbedit"
704 ],
705 "setup": [
706 [
707 "$TC actions flush action skbedit",
708 0,
709 1,
710 255
711 ],
712 "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit queue_mapping 2 priority 10 mark 7/0xaabbccdd ptype host inheritdsfield index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
713 ],
714 "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action skbedit index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
715 "expExitCode": "0",
716 "verifyCmd": "$TC actions list action skbedit",
717 "matchPattern": "^[ \t]+index [0-9]+ ref",
718 "matchCount": "0",
719 "teardown": []
673 } 720 }
674] 721]
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index a8a6a0c883f1..6af5c91337f2 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -86,6 +86,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
86 unsigned int len; 86 unsigned int len;
87 int mask; 87 int mask;
88 88
89 /* Detect an already handled MMIO return */
90 if (unlikely(!vcpu->mmio_needed))
91 return 0;
92
93 vcpu->mmio_needed = 0;
94
89 if (!run->mmio.is_write) { 95 if (!run->mmio.is_write) {
90 len = run->mmio.len; 96 len = run->mmio.len;
91 if (len > sizeof(unsigned long)) 97 if (len > sizeof(unsigned long))
@@ -188,6 +194,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
188 run->mmio.is_write = is_write; 194 run->mmio.is_write = is_write;
189 run->mmio.phys_addr = fault_ipa; 195 run->mmio.phys_addr = fault_ipa;
190 run->mmio.len = len; 196 run->mmio.len = len;
197 vcpu->mmio_needed = 1;
191 198
192 if (!ret) { 199 if (!ret) {
193 /* We handled the access successfully in the kernel. */ 200 /* We handled the access successfully in the kernel. */
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index bdbc297d06fb..e621b5d45b27 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -8,6 +8,7 @@
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/kvm_host.h> 9#include <linux/kvm_host.h>
10#include <kvm/arm_vgic.h> 10#include <kvm/arm_vgic.h>
11#include <asm/kvm_emulate.h>
11#include <asm/kvm_mmu.h> 12#include <asm/kvm_mmu.h>
12#include "vgic.h" 13#include "vgic.h"
13 14
@@ -164,12 +165,18 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
164 irq->vcpu = NULL; 165 irq->vcpu = NULL;
165 irq->target_vcpu = vcpu0; 166 irq->target_vcpu = vcpu0;
166 kref_init(&irq->refcount); 167 kref_init(&irq->refcount);
167 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) { 168 switch (dist->vgic_model) {
169 case KVM_DEV_TYPE_ARM_VGIC_V2:
168 irq->targets = 0; 170 irq->targets = 0;
169 irq->group = 0; 171 irq->group = 0;
170 } else { 172 break;
173 case KVM_DEV_TYPE_ARM_VGIC_V3:
171 irq->mpidr = 0; 174 irq->mpidr = 0;
172 irq->group = 1; 175 irq->group = 1;
176 break;
177 default:
178 kfree(dist->spis);
179 return -EINVAL;
173 } 180 }
174 } 181 }
175 return 0; 182 return 0;
@@ -209,7 +216,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
209 irq->intid = i; 216 irq->intid = i;
210 irq->vcpu = NULL; 217 irq->vcpu = NULL;
211 irq->target_vcpu = vcpu; 218 irq->target_vcpu = vcpu;
212 irq->targets = 1U << vcpu->vcpu_id;
213 kref_init(&irq->refcount); 219 kref_init(&irq->refcount);
214 if (vgic_irq_is_sgi(i)) { 220 if (vgic_irq_is_sgi(i)) {
215 /* SGIs */ 221 /* SGIs */
@@ -219,11 +225,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
219 /* PPIs */ 225 /* PPIs */
220 irq->config = VGIC_CONFIG_LEVEL; 226 irq->config = VGIC_CONFIG_LEVEL;
221 } 227 }
222
223 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
224 irq->group = 1;
225 else
226 irq->group = 0;
227 } 228 }
228 229
229 if (!irqchip_in_kernel(vcpu->kvm)) 230 if (!irqchip_in_kernel(vcpu->kvm))
@@ -286,10 +287,19 @@ int vgic_init(struct kvm *kvm)
286 287
287 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { 288 for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
288 struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; 289 struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
289 if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) 290 switch (dist->vgic_model) {
291 case KVM_DEV_TYPE_ARM_VGIC_V3:
290 irq->group = 1; 292 irq->group = 1;
291 else 293 irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
294 break;
295 case KVM_DEV_TYPE_ARM_VGIC_V2:
292 irq->group = 0; 296 irq->group = 0;
297 irq->targets = 1U << idx;
298 break;
299 default:
300 ret = -EINVAL;
301 goto out;
302 }
293 } 303 }
294 } 304 }
295 305
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 44efc2ff863f..0d090482720d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -211,6 +211,12 @@ static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
211 vgic_irq_set_phys_active(irq, true); 211 vgic_irq_set_phys_active(irq, true);
212} 212}
213 213
214static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
215{
216 return (vgic_irq_is_sgi(irq->intid) &&
217 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
218}
219
214void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, 220void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215 gpa_t addr, unsigned int len, 221 gpa_t addr, unsigned int len,
216 unsigned long val) 222 unsigned long val)
@@ -223,6 +229,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
223 for_each_set_bit(i, &val, len * 8) { 229 for_each_set_bit(i, &val, len * 8) {
224 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 230 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
225 231
232 /* GICD_ISPENDR0 SGI bits are WI */
233 if (is_vgic_v2_sgi(vcpu, irq)) {
234 vgic_put_irq(vcpu->kvm, irq);
235 continue;
236 }
237
226 raw_spin_lock_irqsave(&irq->irq_lock, flags); 238 raw_spin_lock_irqsave(&irq->irq_lock, flags);
227 if (irq->hw) 239 if (irq->hw)
228 vgic_hw_irq_spending(vcpu, irq, is_uaccess); 240 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
@@ -270,6 +282,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
270 for_each_set_bit(i, &val, len * 8) { 282 for_each_set_bit(i, &val, len * 8) {
271 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); 283 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
272 284
285 /* GICD_ICPENDR0 SGI bits are WI */
286 if (is_vgic_v2_sgi(vcpu, irq)) {
287 vgic_put_irq(vcpu->kvm, irq);
288 continue;
289 }
290
273 raw_spin_lock_irqsave(&irq->irq_lock, flags); 291 raw_spin_lock_irqsave(&irq->irq_lock, flags);
274 292
275 if (irq->hw) 293 if (irq->hw)
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 96aab77d0471..b00aa304c260 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -184,7 +184,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
184 if (vgic_irq_is_sgi(irq->intid)) { 184 if (vgic_irq_is_sgi(irq->intid)) {
185 u32 src = ffs(irq->source); 185 u32 src = ffs(irq->source);
186 186
187 BUG_ON(!src); 187 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
188 irq->intid))
189 return;
190
188 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 191 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
189 irq->source &= ~(1 << (src - 1)); 192 irq->source &= ~(1 << (src - 1));
190 if (irq->source) { 193 if (irq->source) {
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 0c653a1e5215..a4ad431c92a9 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -167,7 +167,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
167 model == KVM_DEV_TYPE_ARM_VGIC_V2) { 167 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
168 u32 src = ffs(irq->source); 168 u32 src = ffs(irq->source);
169 169
170 BUG_ON(!src); 170 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
171 irq->intid))
172 return;
173
171 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT; 174 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
172 irq->source &= ~(1 << (src - 1)); 175 irq->source &= ~(1 << (src - 1));
173 if (irq->source) { 176 if (irq->source) {
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 13d4b38a94ec..e7bde65ba67c 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -254,6 +254,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
254 bool penda, pendb; 254 bool penda, pendb;
255 int ret; 255 int ret;
256 256
257 /*
258 * list_sort may call this function with the same element when
259 * the list is fairly long.
260 */
261 if (unlikely(irqa == irqb))
262 return 0;
263
257 raw_spin_lock(&irqa->irq_lock); 264 raw_spin_lock(&irqa->irq_lock);
258 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 265 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
259 266