aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.clang-format43
-rw-r--r--Documentation/ABI/testing/sysfs-block9
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram11
-rw-r--r--Documentation/block/bfq-iosched.txt7
-rw-r--r--Documentation/block/null_blk.txt3
-rw-r--r--Documentation/block/queue-sysfs.txt7
-rw-r--r--Documentation/blockdev/zram.txt74
-rw-r--r--Documentation/bpf/bpf_design_QA.rst11
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-capacity.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/sp810.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/topology.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/marvell,mmp2.txt2
-rw-r--r--Documentation/devicetree/bindings/display/arm,pl11x.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mvebu.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt2
-rw-r--r--Documentation/devicetree/bindings/reset/socfpga-reset.txt3
-rw-r--r--Documentation/devicetree/bindings/reset/uniphier-reset.txt25
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt4
-rw-r--r--Documentation/driver-model/bus.txt8
-rw-r--r--Documentation/fb/fbcon.txt8
-rw-r--r--Documentation/features/core/cBPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/eBPF-JIT/arch-support.txt1
-rw-r--r--Documentation/features/core/generic-idle-thread/arch-support.txt1
-rw-r--r--Documentation/features/core/jump-labels/arch-support.txt1
-rw-r--r--Documentation/features/core/tracehook/arch-support.txt1
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt1
-rw-r--r--Documentation/features/debug/gcov-profile-all/arch-support.txt1
-rw-r--r--Documentation/features/debug/kgdb/arch-support.txt1
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt1
-rw-r--r--Documentation/features/debug/kprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/kretprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/optprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/stackprotector/arch-support.txt1
-rw-r--r--Documentation/features/debug/uprobes/arch-support.txt1
-rw-r--r--Documentation/features/debug/user-ret-profiler/arch-support.txt1
-rw-r--r--Documentation/features/io/dma-contiguous/arch-support.txt1
-rw-r--r--Documentation/features/locking/cmpxchg-local/arch-support.txt1
-rw-r--r--Documentation/features/locking/lockdep/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-rwlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/queued-spinlocks/arch-support.txt1
-rw-r--r--Documentation/features/locking/rwsem-optimized/arch-support.txt1
-rw-r--r--Documentation/features/perf/kprobes-event/arch-support.txt1
-rw-r--r--Documentation/features/perf/perf-regs/arch-support.txt1
-rw-r--r--Documentation/features/perf/perf-stackdump/arch-support.txt1
-rw-r--r--Documentation/features/sched/membarrier-sync-core/arch-support.txt1
-rw-r--r--Documentation/features/sched/numa-balancing/arch-support.txt1
-rw-r--r--Documentation/features/seccomp/seccomp-filter/arch-support.txt1
-rw-r--r--Documentation/features/time/arch-tick-broadcast/arch-support.txt1
-rw-r--r--Documentation/features/time/clockevents/arch-support.txt1
-rw-r--r--Documentation/features/time/context-tracking/arch-support.txt1
-rw-r--r--Documentation/features/time/irq-time-acct/arch-support.txt1
-rw-r--r--Documentation/features/time/modern-timekeeping/arch-support.txt1
-rw-r--r--Documentation/features/time/virt-cpuacct/arch-support.txt1
-rw-r--r--Documentation/features/vm/ELF-ASLR/arch-support.txt1
-rw-r--r--Documentation/features/vm/PG_uncached/arch-support.txt1
-rw-r--r--Documentation/features/vm/THP/arch-support.txt1
-rw-r--r--Documentation/features/vm/TLB/arch-support.txt1
-rw-r--r--Documentation/features/vm/huge-vmap/arch-support.txt1
-rw-r--r--Documentation/features/vm/ioremap_prot/arch-support.txt1
-rw-r--r--Documentation/features/vm/numa-memblock/arch-support.txt1
-rw-r--r--Documentation/features/vm/pte_special/arch-support.txt1
-rw-r--r--Documentation/filesystems/sysfs.txt4
-rw-r--r--Documentation/networking/index.rst26
-rw-r--r--Documentation/networking/rxrpc.txt45
-rw-r--r--Documentation/networking/snmp_counter.rst130
-rw-r--r--Documentation/networking/timestamping.txt4
-rw-r--r--Documentation/trace/coresight-cpu-debug.txt2
-rw-r--r--Documentation/virtual/kvm/amd-memory-encryption.rst2
-rw-r--r--Documentation/x86/resctrl_ui.txt2
-rw-r--r--MAINTAINERS23
-rw-r--r--Makefile10
-rw-r--r--arch/arm/boot/dts/da850-evm.dts31
-rw-r--r--arch/arm/boot/dts/da850-lcdk.dts38
-rw-r--r--arch/arm/boot/dts/kirkwood-dnskw.dtsi4
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c4
-rw-r--r--arch/arm/mach-integrator/impd1.c8
-rw-r--r--arch/arm/mach-socfpga/socfpga.c4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi17
-rw-r--r--arch/arm64/configs/defconfig4
-rw-r--r--arch/arm64/include/asm/asm-prototypes.h2
-rw-r--r--arch/arm64/include/asm/cache.h4
-rw-r--r--arch/arm64/include/asm/mmu.h44
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h4
-rw-r--r--arch/arm64/kernel/cpu_errata.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c9
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/kernel/kaslr.c8
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c4
-rw-r--r--arch/c6x/include/uapi/asm/Kbuild1
-rw-r--r--arch/csky/include/asm/io.h25
-rw-r--r--arch/csky/include/asm/pgalloc.h43
-rw-r--r--arch/csky/kernel/module.c38
-rw-r--r--arch/h8300/Makefile2
-rw-r--r--arch/h8300/include/uapi/asm/Kbuild1
-rw-r--r--arch/hexagon/include/uapi/asm/Kbuild1
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild1
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/bcm47xx/setup.c31
-rw-r--r--arch/mips/cavium-octeon/setup.c2
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h2
-rw-r--r--arch/mips/jazz/jazzdma.c5
-rw-r--r--arch/mips/lantiq/irq.c77
-rw-r--r--arch/mips/lantiq/xway/dma.c6
-rw-r--r--arch/mips/pci/msi-octeon.c4
-rw-r--r--arch/nds32/Makefile8
-rw-r--r--arch/openrisc/Makefile3
-rw-r--r--arch/openrisc/include/asm/uaccess.h8
-rw-r--r--arch/openrisc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/powerpc/kernel/head_8xx.S3
-rw-r--r--arch/powerpc/kernel/signal_64.c7
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c7
-rw-r--r--arch/powerpc/perf/perf_regs.c6
-rw-r--r--arch/powerpc/platforms/4xx/ocm.c6
-rw-r--r--arch/powerpc/platforms/chrp/setup.c3
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c3
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c7
-rw-r--r--arch/riscv/Kconfig4
-rw-r--r--arch/riscv/include/asm/module.h28
-rw-r--r--arch/riscv/include/asm/ptrace.h5
-rw-r--r--arch/riscv/include/asm/syscall.h10
-rw-r--r--arch/riscv/include/asm/thread_info.h6
-rw-r--r--arch/riscv/include/asm/unistd.h2
-rw-r--r--arch/riscv/kernel/entry.S4
-rw-r--r--arch/riscv/kernel/module-sections.c30
-rw-r--r--arch/riscv/kernel/ptrace.c9
-rw-r--r--arch/riscv/kernel/setup.c9
-rw-r--r--arch/riscv/kernel/smp.c43
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S8
-rw-r--r--arch/unicore32/include/uapi/asm/Kbuild1
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/include/asm/resctrl_sched.h4
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile4
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--arch/x86/kvm/vmx/nested.c3
-rw-r--r--arch/x86/kvm/vmx/vmx.c4
-rw-r--r--arch/x86/xen/enlighten_pv.c5
-rw-r--r--arch/x86/xen/time.c12
-rw-r--r--block/bfq-wf2q.c11
-rw-r--r--block/blk-core.c20
-rw-r--r--block/blk-mq-debugfs-zoned.c2
-rw-r--r--block/blk-mq.c3
-rw-r--r--crypto/adiantum.c4
-rw-r--r--crypto/authenc.c14
-rw-r--r--crypto/authencesn.c2
-rw-r--r--crypto/sm3_generic.c2
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/arm64/iort.c5
-rw-r--r--drivers/acpi/bus.c24
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/nfit/core.c20
-rw-r--r--drivers/acpi/nfit/intel.c8
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c41
-rw-r--r--drivers/acpi/power.c22
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_mvebu.c87
-rw-r--r--drivers/ata/libahci_platform.c13
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/atm/he.c41
-rw-r--r--drivers/atm/idt77252.c16
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/power/runtime.c11
-rw-r--r--drivers/base/regmap/regmap-irq.c8
-rw-r--r--drivers/block/loop.c35
-rw-r--r--drivers/block/nbd.c5
-rw-r--r--drivers/block/null_blk.h1
-rw-r--r--drivers/block/rbd.c9
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/zram/zram_drv.c90
-rw-r--r--drivers/block/zram/zram_drv.h5
-rw-r--r--drivers/cpufreq/cpufreq.c12
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c8
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c4
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/bcm/cipher.c44
-rw-r--r--drivers/crypto/caam/caamalg.c2
-rw-r--r--drivers/crypto/caam/caamhash.c15
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/error.h9
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c7
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c6
-rw-r--r--drivers/crypto/ccree/cc_aead.c40
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c4
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c15
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c16
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c68
-rw-r--r--drivers/crypto/talitos.c26
-rw-r--r--drivers/dma/imx-sdma.c8
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c14
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c6
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c19
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c48
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c133
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c23
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c6
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c23
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c14
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/hwmon/lm80.c4
-rw-r--r--drivers/hwmon/nct6775.c12
-rw-r--r--drivers/hwmon/occ/common.c24
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c15
-rw-r--r--drivers/i2c/i2c-dev.c6
-rw-r--r--drivers/i3c/master/dw-i3c-master.c7
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c4
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c5
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/rdma_core.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c11
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c62
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/hfi1/init.c29
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c27
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h35
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c6
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c4
-rw-r--r--drivers/iommu/mtk_iommu_v1.c5
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c77
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c3
-rw-r--r--drivers/isdn/i4l/isdn_tty.c6
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c2
-rw-r--r--drivers/media/platform/vim2m.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c24
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/axp20x.c126
-rw-r--r--drivers/mfd/bd9571mwv.c1
-rw-r--r--drivers/mfd/cros_ec_dev.c1
-rw-r--r--drivers/mfd/db8500-prcmu.c4
-rw-r--r--drivers/mfd/exynos-lpass.c4
-rw-r--r--drivers/mfd/madera-core.c5
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/mc13xxx-core.c4
-rw-r--r--drivers/mfd/mt6397-core.c3
-rw-r--r--drivers/mfd/qcom_rpm.c4
-rw-r--r--drivers/mfd/rave-sp.c2
-rw-r--r--drivers/mfd/stmpe.c12
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c5
-rw-r--r--drivers/mfd/tps65218.c24
-rw-r--r--drivers/mfd/tps6586x.c24
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/mic/vop/vop_main.c9
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c36
-rw-r--r--drivers/mtd/nand/raw/denali.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c21
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c20
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/mt7530.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c113
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h10
-rw-r--r--drivers/net/dsa/realtek-smi.c18
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c12
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c61
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c22
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c8
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c68
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c14
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/intel/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c14
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c18
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c35
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c12
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c8
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c12
-rw-r--r--drivers/net/fddi/defxx.c6
-rw-r--r--drivers/net/fddi/skfp/skfddi.c8
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/cortina.c1
-rw-r--r--drivers/net/phy/marvell.c37
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c1
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/phy.c19
-rw-r--r--drivers/net/phy/phy_device.c17
-rw-r--r--drivers/net/phy/teranetics.c1
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/tun.c11
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/cdc_ether.c34
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c69
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c39
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c6
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c8
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvme/host/core.c19
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/multipath.c2
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c96
-rw-r--r--drivers/nvme/host/tcp.c16
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/pdt.c1
-rw-r--r--drivers/of/property.c1
-rw-r--r--drivers/opp/core.c63
-rw-r--r--drivers/pci/Kconfig22
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c1
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/msi.c22
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/switch/switchtec.c8
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c5
-rw-r--r--drivers/phy/ti/Kconfig1
-rw-r--r--drivers/platform/x86/Kconfig6
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/rapidio/devices/tsi721.c22
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c9
-rw-r--r--drivers/reset/Kconfig20
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/core.c42
-rw-r--r--drivers/reset/reset-hsdk.c1
-rw-r--r--drivers/reset/reset-simple.c13
-rw-r--r--drivers/reset/reset-socfpga.c88
-rw-r--r--drivers/reset/reset-uniphier-glue.c (renamed from drivers/reset/reset-uniphier-usb3.c)50
-rw-r--r--drivers/s390/net/ism_drv.c15
-rw-r--r--drivers/s390/virtio/virtio_ccw.c12
-rw-r--r--drivers/scsi/3w-sas.c5
-rw-r--r--drivers/scsi/a100u2w.c8
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c18
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c49
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c44
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c8
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c9
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c28
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c12
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c35
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c15
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c8
-rw-r--r--drivers/scsi/mesh.c5
-rw-r--r--drivers/scsi/mvumi.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c6
-rw-r--r--drivers/scsi/qedf/qedf_main.c29
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c39
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_pm.c26
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c34
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c2
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c55
-rw-r--r--drivers/soc/renesas/Kconfig2
-rw-r--r--drivers/soc/renesas/r8a774c0-sysc.c23
-rw-r--r--drivers/spi/spi-pic32-sqi.c6
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/staging/vt6655/device_main.c19
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_user.c88
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/tty/serial/Kconfig12
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/earlycon-riscv-sbi.c28
-rw-r--r--drivers/tty/serial/lantiq.c36
-rw-r--r--drivers/tty/tty_io.c20
-rw-r--r--drivers/usb/class/cdc-acm.c7
-rw-r--r--drivers/usb/core/generic.c9
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c13
-rw-r--r--drivers/usb/host/uhci-hcd.c6
-rw-r--r--drivers/usb/host/xhci-mem.c8
-rw-r--r--drivers/usb/storage/scsiglue.c8
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/vfio/pci/trace.h2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c20
-rw-r--r--drivers/vhost/vhost.c105
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c28
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbmem.c19
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
-rw-r--r--drivers/video/fbdev/offb.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/logo/Kconfig9
-rw-r--r--drivers/virtio/virtio_balloon.c98
-rw-r--r--drivers/virtio/virtio_mmio.c9
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/watchdog/mt7621_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/tqmx86_wdt.c8
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/pvcalls-back.c9
-rw-r--r--drivers/xen/pvcalls-front.c104
-rw-r--r--fs/afs/flock.c4
-rw-r--r--fs/afs/inode.c3
-rw-r--r--fs/afs/protocol_yfs.h11
-rw-r--r--fs/afs/rxrpc.c53
-rw-r--r--fs/afs/server_list.c4
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/block_dev.c28
-rw-r--r--fs/btrfs/ctree.c16
-rw-r--r--fs/btrfs/ctree.h7
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent-tree.c21
-rw-r--r--fs/btrfs/inode.c5
-rw-r--r--fs/btrfs/ioctl.c49
-rw-r--r--fs/btrfs/volumes.c12
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h20
-rw-r--r--fs/cifs/cifssmb.c30
-rw-r--r--fs/cifs/connect.c7
-rw-r--r--fs/cifs/dfs_cache.c1
-rw-r--r--fs/cifs/file.c45
-rw-r--r--fs/cifs/inode.c10
-rw-r--r--fs/cifs/smb2file.c8
-rw-r--r--fs/cifs/smb2pdu.c31
-rw-r--r--fs/cifs/transport.c122
-rw-r--r--fs/hugetlbfs/inode.c61
-rw-r--r--fs/nfs/nfs4file.c8
-rw-r--r--fs/pstore/ram.c12
-rw-r--r--fs/sysfs/dir.c3
-rw-r--r--fs/sysfs/file.c6
-rw-r--r--fs/sysfs/group.c3
-rw-r--r--fs/sysfs/symlink.c3
-rw-r--r--include/drm/drm_dp_helper.h7
-rw-r--r--include/drm/drm_dp_mst_helper.h3
-rw-r--r--include/dt-bindings/reset/amlogic,meson-axg-reset.h3
-rw-r--r--include/linux/bcma/bcma_soc.h1
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/bpfilter.h15
-rw-r--r--include/linux/ceph/libceph.h6
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler-intel.h4
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/dma-mapping.h9
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/libnvdimm.h1
-rw-r--r--include/linux/mfd/cros_ec_commands.h94
-rw-r--r--include/linux/mfd/ingenic-tcu.h2
-rw-r--r--include/linux/mfd/madera/core.h7
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h4
-rw-r--r--include/linux/mfd/tmio.h2
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/pci-dma-compat.h2
-rw-r--r--include/linux/phy.h6
-rw-r--r--include/linux/phy/phy.h1
-rw-r--r--include/linux/pm_opp.h5
-rw-r--r--include/linux/qcom_scm.h1
-rw-r--r--include/linux/qed/qed_chain.h31
-rw-r--r--include/linux/reset.h15
-rw-r--r--include/linux/sched.h11
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/umh.h2
-rw-r--r--include/linux/virtio_config.h13
-rw-r--r--include/net/af_rxrpc.h16
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/trace/events/afs.h2
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/linux/ptp_clock.h2
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h1
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/bpf/btf.c14
-rw-r--r--kernel/bpf/cgroup.c1
-rw-r--r--kernel/bpf/map_in_map.c17
-rw-r--r--kernel/bpf/stackmap.c12
-rw-r--r--kernel/bpf/verifier.c61
-rw-r--r--kernel/dma/swiotlb.c2
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c14
-rw-r--r--kernel/seccomp.c4
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/trace/trace_kprobe.c12
-rw-r--r--kernel/umh.c33
-rw-r--r--lib/int_sqrt.c2
-rw-r--r--lib/sbitmap.c13
-rw-r--r--mm/hugetlb.c81
-rw-r--r--mm/kasan/common.c65
-rw-r--r--mm/memory-failure.c16
-rw-r--r--mm/memory.c26
-rw-r--r--mm/migrate.c13
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slub.c2
-rw-r--r--mm/usercopy.c9
-rw-r--r--mm/userfaultfd.c11
-rw-r--r--mm/util.c2
-rw-r--r--net/bpfilter/bpfilter_kern.c76
-rw-r--r--net/bpfilter/bpfilter_umh_blob.S2
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_forward.c10
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netfilter_ipv6.c1
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_vlan.c26
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c1
-rw-r--r--net/can/gw.c30
-rw-r--r--net/ceph/ceph_common.c11
-rw-r--r--net/ceph/debugfs.c2
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/core/filter.c34
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/neighbour.c15
-rw-r--r--net/core/skbuff.c7
-rw-r--r--net/ipv4/bpfilter/sockopt.c58
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_trie.c15
-rw-r--r--net/ipv4/fou.c12
-rw-r--r--net/ipv4/ip_gre.c23
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c18
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv4/udplite.c1
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/af_inet6.c14
-rw-r--r--net/ipv6/datagram.c11
-rw-r--r--net/ipv6/fou6.c17
-rw-r--r--net/ipv6/icmp.c8
-rw-r--r--net/ipv6/ip6_gre.c14
-rw-r--r--net/ipv6/route.c14
-rw-r--r--net/ipv6/udp.c26
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udplite.c1
-rw-r--r--net/netfilter/nf_flow_table_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c14
-rw-r--r--net/netfilter/nft_flow_offload.c13
-rw-r--r--net/openvswitch/flow.c8
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rds/ib_send.c4
-rw-r--r--net/rds/message.c4
-rw-r--r--net/rds/rds.h4
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rxrpc/af_rxrpc.c70
-rw-r--r--net/rxrpc/ar-internal.h19
-rw-r--r--net/rxrpc/call_object.c97
-rw-r--r--net/rxrpc/conn_client.c5
-rw-r--r--net/rxrpc/sendmsg.c24
-rw-r--r--net/sched/act_tunnel_key.c19
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_flower.c19
-rw-r--r--net/sched/sch_cake.c5
-rw-r--r--net/sched/sch_cbs.c3
-rw-r--r--net/sched/sch_drr.c7
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_hfsc.c9
-rw-r--r--net/sched/sch_htb.c3
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_qfq.c20
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/ipv6.c5
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/smc/af_smc.c4
-rw-r--r--net/sunrpc/auth.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c12
-rw-r--r--net/sunrpc/clnt.c20
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/verbs.c10
-rw-r--r--net/sunrpc/xprtsock.c22
-rw-r--r--net/tipc/netlink_compat.c54
-rw-r--r--net/tipc/topsrv.c2
-rw-r--r--net/xdp/xdp_umem.c16
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--samples/bpf/asm_goto_workaround.h16
-rw-r--r--samples/bpf/test_cgrp2_attach2.c14
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c2
-rw-r--r--samples/bpf/xdp1_user.c2
-rw-r--r--samples/seccomp/Makefile1
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/coccinelle/api/alloc/alloc_cast.cocci8
-rw-r--r--scripts/coccinelle/api/alloc/zalloc-simple.cocci11
-rw-r--r--scripts/gcc-plugins/arm_ssp_per_task_plugin.c23
-rw-r--r--scripts/kconfig/Makefile2
-rw-r--r--scripts/mod/modpost.c2
-rw-r--r--security/security.c7
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/yama/yama_lsm.c4
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c4
-rw-r--r--sound/pci/cs46xx/dsp_spos.c3
-rw-r--r--sound/pci/hda/patch_realtek.c18
-rw-r--r--sound/sparc/dbri.c4
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/mixer.c29
-rw-r--r--sound/usb/quirks-table.h6
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/usb/stream.c36
-rw-r--r--tools/arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--tools/arch/powerpc/include/uapi/asm/unistd.h404
-rw-r--r--tools/arch/riscv/include/uapi/asm/bitsperlong.h25
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/bpf/bpftool/Makefile9
-rw-r--r--tools/bpf/bpftool/btf_dumper.c13
-rw-r--r--tools/bpf/bpftool/json_writer.c7
-rw-r--r--tools/bpf/bpftool/json_writer.h5
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/asm/bitsperlong.h2
-rw-r--r--tools/include/uapi/drm/i915_drm.h8
-rw-r--r--tools/include/uapi/linux/fs.h60
-rw-r--r--tools/include/uapi/linux/if_link.h19
-rw-r--r--tools/include/uapi/linux/in.h10
-rw-r--r--tools/include/uapi/linux/kvm.h19
-rw-r--r--tools/include/uapi/linux/mount.h58
-rw-r--r--tools/include/uapi/linux/pkt_sched.h1163
-rw-r--r--tools/include/uapi/linux/prctl.h8
-rw-r--r--tools/include/uapi/linux/vhost.h113
-rw-r--r--tools/lib/bpf/.gitignore1
-rw-r--r--tools/lib/bpf/README.rst14
-rw-r--r--tools/lib/bpf/bpf.c19
-rw-r--r--tools/lib/traceevent/event-parse-api.c4
-rw-r--r--tools/lib/traceevent/event-parse-local.h4
-rw-r--r--tools/lib/traceevent/event-parse.c129
-rw-r--r--tools/lib/traceevent/event-parse.h17
-rw-r--r--tools/lib/traceevent/plugin_kvm.c2
-rw-r--r--tools/lib/traceevent/trace-seq.c17
-rw-r--r--tools/perf/Makefile.perf8
-rw-r--r--tools/perf/arch/arm/tests/Build1
-rw-r--r--tools/perf/arch/arm/tests/arch-tests.c4
-rw-r--r--tools/perf/arch/arm/tests/vectors-page.c24
-rw-r--r--tools/perf/arch/powerpc/Makefile15
-rwxr-xr-xtools/perf/arch/powerpc/entry/syscalls/mksyscalltbl22
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl427
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h3
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c1
-rw-r--r--tools/perf/builtin-stat.c3
-rw-r--r--tools/perf/builtin-top.c7
-rw-r--r--tools/perf/builtin-trace.c15
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/perf-read-vdso.c6
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh3
-rw-r--r--tools/perf/tests/tests.h5
-rwxr-xr-xtools/perf/trace/beauty/mount_flags.sh4
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh2
-rw-r--r--tools/perf/util/annotate.c8
-rw-r--r--tools/perf/util/callchain.c32
-rw-r--r--tools/perf/util/callchain.h1
-rw-r--r--tools/perf/util/find-map.c (renamed from tools/perf/util/find-vdso-map.c)7
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/strbuf.c1
-rw-r--r--tools/perf/util/symbol.c1
-rw-r--r--tools/perf/util/vdso.c6
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile5
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c6
-rw-r--r--tools/testing/selftests/bpf/test_btf.c29
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c2
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c2
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c30
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_sock.c2
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c55
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c2
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c120
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh20
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh18
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh47
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh2
-rw-r--r--tools/testing/selftests/net/ip_defrag.c96
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh9
-rw-r--r--tools/testing/selftests/networking/timestamping/txtimestamp.c2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json88
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json31
-rw-r--r--tools/thermal/tmon/Makefile2
-rw-r--r--tools/vm/page_owner_sort.c4
-rw-r--r--virt/kvm/kvm_main.c9
912 files changed, 8793 insertions, 4600 deletions
diff --git a/.clang-format b/.clang-format
index e6080f5834a3..bc2ffb2a0b53 100644
--- a/.clang-format
+++ b/.clang-format
@@ -72,6 +72,10 @@ ForEachMacros:
72 - 'apei_estatus_for_each_section' 72 - 'apei_estatus_for_each_section'
73 - 'ata_for_each_dev' 73 - 'ata_for_each_dev'
74 - 'ata_for_each_link' 74 - 'ata_for_each_link'
75 - '__ata_qc_for_each'
76 - 'ata_qc_for_each'
77 - 'ata_qc_for_each_raw'
78 - 'ata_qc_for_each_with_internal'
75 - 'ax25_for_each' 79 - 'ax25_for_each'
76 - 'ax25_uid_for_each' 80 - 'ax25_uid_for_each'
77 - 'bio_for_each_integrity_vec' 81 - 'bio_for_each_integrity_vec'
@@ -85,6 +89,7 @@ ForEachMacros:
85 - 'blk_queue_for_each_rl' 89 - 'blk_queue_for_each_rl'
86 - 'bond_for_each_slave' 90 - 'bond_for_each_slave'
87 - 'bond_for_each_slave_rcu' 91 - 'bond_for_each_slave_rcu'
92 - 'bpf_for_each_spilled_reg'
88 - 'btree_for_each_safe128' 93 - 'btree_for_each_safe128'
89 - 'btree_for_each_safe32' 94 - 'btree_for_each_safe32'
90 - 'btree_for_each_safe64' 95 - 'btree_for_each_safe64'
@@ -103,6 +108,8 @@ ForEachMacros:
103 - 'drm_atomic_crtc_for_each_plane' 108 - 'drm_atomic_crtc_for_each_plane'
104 - 'drm_atomic_crtc_state_for_each_plane' 109 - 'drm_atomic_crtc_state_for_each_plane'
105 - 'drm_atomic_crtc_state_for_each_plane_state' 110 - 'drm_atomic_crtc_state_for_each_plane_state'
111 - 'drm_atomic_for_each_plane_damage'
112 - 'drm_connector_for_each_possible_encoder'
106 - 'drm_for_each_connector_iter' 113 - 'drm_for_each_connector_iter'
107 - 'drm_for_each_crtc' 114 - 'drm_for_each_crtc'
108 - 'drm_for_each_encoder' 115 - 'drm_for_each_encoder'
@@ -121,11 +128,21 @@ ForEachMacros:
121 - 'for_each_bio' 128 - 'for_each_bio'
122 - 'for_each_board_func_rsrc' 129 - 'for_each_board_func_rsrc'
123 - 'for_each_bvec' 130 - 'for_each_bvec'
131 - 'for_each_card_components'
132 - 'for_each_card_links'
133 - 'for_each_card_links_safe'
134 - 'for_each_card_prelinks'
135 - 'for_each_card_rtds'
136 - 'for_each_card_rtds_safe'
137 - 'for_each_cgroup_storage_type'
124 - 'for_each_child_of_node' 138 - 'for_each_child_of_node'
125 - 'for_each_clear_bit' 139 - 'for_each_clear_bit'
126 - 'for_each_clear_bit_from' 140 - 'for_each_clear_bit_from'
127 - 'for_each_cmsghdr' 141 - 'for_each_cmsghdr'
128 - 'for_each_compatible_node' 142 - 'for_each_compatible_node'
143 - 'for_each_component_dais'
144 - 'for_each_component_dais_safe'
145 - 'for_each_comp_order'
129 - 'for_each_console' 146 - 'for_each_console'
130 - 'for_each_cpu' 147 - 'for_each_cpu'
131 - 'for_each_cpu_and' 148 - 'for_each_cpu_and'
@@ -133,6 +150,10 @@ ForEachMacros:
133 - 'for_each_cpu_wrap' 150 - 'for_each_cpu_wrap'
134 - 'for_each_dev_addr' 151 - 'for_each_dev_addr'
135 - 'for_each_dma_cap_mask' 152 - 'for_each_dma_cap_mask'
153 - 'for_each_dpcm_be'
154 - 'for_each_dpcm_be_rollback'
155 - 'for_each_dpcm_be_safe'
156 - 'for_each_dpcm_fe'
136 - 'for_each_drhd_unit' 157 - 'for_each_drhd_unit'
137 - 'for_each_dss_dev' 158 - 'for_each_dss_dev'
138 - 'for_each_efi_memory_desc' 159 - 'for_each_efi_memory_desc'
@@ -149,6 +170,7 @@ ForEachMacros:
149 - 'for_each_iommu' 170 - 'for_each_iommu'
150 - 'for_each_ip_tunnel_rcu' 171 - 'for_each_ip_tunnel_rcu'
151 - 'for_each_irq_nr' 172 - 'for_each_irq_nr'
173 - 'for_each_link_codecs'
152 - 'for_each_lru' 174 - 'for_each_lru'
153 - 'for_each_matching_node' 175 - 'for_each_matching_node'
154 - 'for_each_matching_node_and_match' 176 - 'for_each_matching_node_and_match'
@@ -160,6 +182,7 @@ ForEachMacros:
160 - 'for_each_mem_range_rev' 182 - 'for_each_mem_range_rev'
161 - 'for_each_migratetype_order' 183 - 'for_each_migratetype_order'
162 - 'for_each_msi_entry' 184 - 'for_each_msi_entry'
185 - 'for_each_msi_entry_safe'
163 - 'for_each_net' 186 - 'for_each_net'
164 - 'for_each_netdev' 187 - 'for_each_netdev'
165 - 'for_each_netdev_continue' 188 - 'for_each_netdev_continue'
@@ -183,12 +206,14 @@ ForEachMacros:
183 - 'for_each_node_with_property' 206 - 'for_each_node_with_property'
184 - 'for_each_of_allnodes' 207 - 'for_each_of_allnodes'
185 - 'for_each_of_allnodes_from' 208 - 'for_each_of_allnodes_from'
209 - 'for_each_of_cpu_node'
186 - 'for_each_of_pci_range' 210 - 'for_each_of_pci_range'
187 - 'for_each_old_connector_in_state' 211 - 'for_each_old_connector_in_state'
188 - 'for_each_old_crtc_in_state' 212 - 'for_each_old_crtc_in_state'
189 - 'for_each_oldnew_connector_in_state' 213 - 'for_each_oldnew_connector_in_state'
190 - 'for_each_oldnew_crtc_in_state' 214 - 'for_each_oldnew_crtc_in_state'
191 - 'for_each_oldnew_plane_in_state' 215 - 'for_each_oldnew_plane_in_state'
216 - 'for_each_oldnew_plane_in_state_reverse'
192 - 'for_each_oldnew_private_obj_in_state' 217 - 'for_each_oldnew_private_obj_in_state'
193 - 'for_each_old_plane_in_state' 218 - 'for_each_old_plane_in_state'
194 - 'for_each_old_private_obj_in_state' 219 - 'for_each_old_private_obj_in_state'
@@ -206,14 +231,17 @@ ForEachMacros:
206 - 'for_each_process' 231 - 'for_each_process'
207 - 'for_each_process_thread' 232 - 'for_each_process_thread'
208 - 'for_each_property_of_node' 233 - 'for_each_property_of_node'
234 - 'for_each_registered_fb'
209 - 'for_each_reserved_mem_region' 235 - 'for_each_reserved_mem_region'
210 - 'for_each_resv_unavail_range' 236 - 'for_each_rtd_codec_dai'
237 - 'for_each_rtd_codec_dai_rollback'
211 - 'for_each_rtdcom' 238 - 'for_each_rtdcom'
212 - 'for_each_rtdcom_safe' 239 - 'for_each_rtdcom_safe'
213 - 'for_each_set_bit' 240 - 'for_each_set_bit'
214 - 'for_each_set_bit_from' 241 - 'for_each_set_bit_from'
215 - 'for_each_sg' 242 - 'for_each_sg'
216 - 'for_each_sg_page' 243 - 'for_each_sg_page'
244 - 'for_each_sibling_event'
217 - '__for_each_thread' 245 - '__for_each_thread'
218 - 'for_each_thread' 246 - 'for_each_thread'
219 - 'for_each_zone' 247 - 'for_each_zone'
@@ -251,6 +279,8 @@ ForEachMacros:
251 - 'hlist_nulls_for_each_entry_from' 279 - 'hlist_nulls_for_each_entry_from'
252 - 'hlist_nulls_for_each_entry_rcu' 280 - 'hlist_nulls_for_each_entry_rcu'
253 - 'hlist_nulls_for_each_entry_safe' 281 - 'hlist_nulls_for_each_entry_safe'
282 - 'i3c_bus_for_each_i2cdev'
283 - 'i3c_bus_for_each_i3cdev'
254 - 'ide_host_for_each_port' 284 - 'ide_host_for_each_port'
255 - 'ide_port_for_each_dev' 285 - 'ide_port_for_each_dev'
256 - 'ide_port_for_each_present_dev' 286 - 'ide_port_for_each_present_dev'
@@ -267,11 +297,14 @@ ForEachMacros:
267 - 'kvm_for_each_memslot' 297 - 'kvm_for_each_memslot'
268 - 'kvm_for_each_vcpu' 298 - 'kvm_for_each_vcpu'
269 - 'list_for_each' 299 - 'list_for_each'
300 - 'list_for_each_codec'
301 - 'list_for_each_codec_safe'
270 - 'list_for_each_entry' 302 - 'list_for_each_entry'
271 - 'list_for_each_entry_continue' 303 - 'list_for_each_entry_continue'
272 - 'list_for_each_entry_continue_rcu' 304 - 'list_for_each_entry_continue_rcu'
273 - 'list_for_each_entry_continue_reverse' 305 - 'list_for_each_entry_continue_reverse'
274 - 'list_for_each_entry_from' 306 - 'list_for_each_entry_from'
307 - 'list_for_each_entry_from_rcu'
275 - 'list_for_each_entry_from_reverse' 308 - 'list_for_each_entry_from_reverse'
276 - 'list_for_each_entry_lockless' 309 - 'list_for_each_entry_lockless'
277 - 'list_for_each_entry_rcu' 310 - 'list_for_each_entry_rcu'
@@ -291,6 +324,7 @@ ForEachMacros:
291 - 'media_device_for_each_intf' 324 - 'media_device_for_each_intf'
292 - 'media_device_for_each_link' 325 - 'media_device_for_each_link'
293 - 'media_device_for_each_pad' 326 - 'media_device_for_each_pad'
327 - 'nanddev_io_for_each_page'
294 - 'netdev_for_each_lower_dev' 328 - 'netdev_for_each_lower_dev'
295 - 'netdev_for_each_lower_private' 329 - 'netdev_for_each_lower_private'
296 - 'netdev_for_each_lower_private_rcu' 330 - 'netdev_for_each_lower_private_rcu'
@@ -357,12 +391,14 @@ ForEachMacros:
357 - 'sk_nulls_for_each' 391 - 'sk_nulls_for_each'
358 - 'sk_nulls_for_each_from' 392 - 'sk_nulls_for_each_from'
359 - 'sk_nulls_for_each_rcu' 393 - 'sk_nulls_for_each_rcu'
394 - 'snd_array_for_each'
360 - 'snd_pcm_group_for_each_entry' 395 - 'snd_pcm_group_for_each_entry'
361 - 'snd_soc_dapm_widget_for_each_path' 396 - 'snd_soc_dapm_widget_for_each_path'
362 - 'snd_soc_dapm_widget_for_each_path_safe' 397 - 'snd_soc_dapm_widget_for_each_path_safe'
363 - 'snd_soc_dapm_widget_for_each_sink_path' 398 - 'snd_soc_dapm_widget_for_each_sink_path'
364 - 'snd_soc_dapm_widget_for_each_source_path' 399 - 'snd_soc_dapm_widget_for_each_source_path'
365 - 'tb_property_for_each' 400 - 'tb_property_for_each'
401 - 'tcf_exts_for_each_action'
366 - 'udp_portaddr_for_each_entry' 402 - 'udp_portaddr_for_each_entry'
367 - 'udp_portaddr_for_each_entry_rcu' 403 - 'udp_portaddr_for_each_entry_rcu'
368 - 'usb_hub_for_each_child' 404 - 'usb_hub_for_each_child'
@@ -371,6 +407,11 @@ ForEachMacros:
371 - 'v4l2_m2m_for_each_dst_buf_safe' 407 - 'v4l2_m2m_for_each_dst_buf_safe'
372 - 'v4l2_m2m_for_each_src_buf' 408 - 'v4l2_m2m_for_each_src_buf'
373 - 'v4l2_m2m_for_each_src_buf_safe' 409 - 'v4l2_m2m_for_each_src_buf_safe'
410 - 'virtio_device_for_each_vq'
411 - 'xa_for_each'
412 - 'xas_for_each'
413 - 'xas_for_each_conflict'
414 - 'xas_for_each_marked'
374 - 'zorro_for_each_dev' 415 - 'zorro_for_each_dev'
375 416
376#IncludeBlocks: Preserve # Unknown to clang-format-5.0 417#IncludeBlocks: Preserve # Unknown to clang-format-5.0
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index 7710d4022b19..dfad7427817c 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -279,3 +279,12 @@ Description:
279 size in 512B sectors of the zones of the device, with 279 size in 512B sectors of the zones of the device, with
280 the eventual exception of the last zone of the device 280 the eventual exception of the last zone of the device
281 which may be smaller. 281 which may be smaller.
282
283What: /sys/block/<disk>/queue/io_timeout
284Date: November 2018
285Contact: Weiping Zhang <zhangweiping@didiglobal.com>
286Description:
287 io_timeout is the request timeout in milliseconds. If a request
288 does not complete in this time then the block driver timeout
289 handler is invoked. That timeout handler can decide to retry
290 the request, to fail it or to start a device recovery strategy.
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index 9d2339a485c8..14b2bf2e5105 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -122,11 +122,18 @@ Description:
122 statistics (bd_count, bd_reads, bd_writes) in a format 122 statistics (bd_count, bd_reads, bd_writes) in a format
123 similar to block layer statistics file format. 123 similar to block layer statistics file format.
124 124
125What: /sys/block/zram<id>/writeback_limit_enable
126Date: November 2018
127Contact: Minchan Kim <minchan@kernel.org>
128Description:
129 The writeback_limit_enable file is read-write and specifies
130 eanbe of writeback_limit feature. "1" means eable the feature.
131 No limit "0" is the initial state.
132
125What: /sys/block/zram<id>/writeback_limit 133What: /sys/block/zram<id>/writeback_limit
126Date: November 2018 134Date: November 2018
127Contact: Minchan Kim <minchan@kernel.org> 135Contact: Minchan Kim <minchan@kernel.org>
128Description: 136Description:
129 The writeback_limit file is read-write and specifies the maximum 137 The writeback_limit file is read-write and specifies the maximum
130 amount of writeback ZRAM can do. The limit could be changed 138 amount of writeback ZRAM can do. The limit could be changed
131 in run time and "0" means disable the limit. 139 in run time.
132 No limit is the initial state.
diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt
index 8d8d8f06cab2..98a8dd5ee385 100644
--- a/Documentation/block/bfq-iosched.txt
+++ b/Documentation/block/bfq-iosched.txt
@@ -357,6 +357,13 @@ video playing/streaming, a very low drop rate may be more important
357than maximum throughput. In these cases, consider setting the 357than maximum throughput. In these cases, consider setting the
358strict_guarantees parameter. 358strict_guarantees parameter.
359 359
360slice_idle_us
361-------------
362
363Controls the same tuning parameter as slice_idle, but in microseconds.
364Either tunable can be used to set idling behavior. Afterwards, the
365other tunable will reflect the newly set value in sysfs.
366
360strict_guarantees 367strict_guarantees
361----------------- 368-----------------
362 369
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
index ea2dafe49ae8..4cad1024fff7 100644
--- a/Documentation/block/null_blk.txt
+++ b/Documentation/block/null_blk.txt
@@ -88,7 +88,8 @@ shared_tags=[0/1]: Default: 0
88 88
89zoned=[0/1]: Default: 0 89zoned=[0/1]: Default: 0
90 0: Block device is exposed as a random-access block device. 90 0: Block device is exposed as a random-access block device.
91 1: Block device is exposed as a host-managed zoned block device. 91 1: Block device is exposed as a host-managed zoned block device. Requires
92 CONFIG_BLK_DEV_ZONED.
92 93
93zone_size=[MB]: Default: 256 94zone_size=[MB]: Default: 256
94 Per zone size when exposed as a zoned block device. Must be a power of two. 95 Per zone size when exposed as a zoned block device. Must be a power of two.
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 39e286d7afc9..83b457e24bba 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -67,6 +67,13 @@ If set to a value larger than 0, the kernel will put the process issuing
67IO to sleep for this amount of microseconds before entering classic 67IO to sleep for this amount of microseconds before entering classic
68polling. 68polling.
69 69
70io_timeout (RW)
71---------------
72io_timeout is the request timeout in milliseconds. If a request does not
73complete in this time then the block driver timeout handler is invoked.
74That timeout handler can decide to retry the request, to fail it or to start
75a device recovery strategy.
76
70iostats (RW) 77iostats (RW)
71------------- 78-------------
72This file is used to control (on/off) the iostats accounting of the 79This file is used to control (on/off) the iostats accounting of the
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 436c5e98e1b6..4df0ce271085 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -156,22 +156,23 @@ Per-device statistics are exported as various nodes under /sys/block/zram<id>/
156A brief description of exported device attributes. For more details please 156A brief description of exported device attributes. For more details please
157read Documentation/ABI/testing/sysfs-block-zram. 157read Documentation/ABI/testing/sysfs-block-zram.
158 158
159Name access description 159Name access description
160---- ------ ----------- 160---- ------ -----------
161disksize RW show and set the device's disk size 161disksize RW show and set the device's disk size
162initstate RO shows the initialization state of the device 162initstate RO shows the initialization state of the device
163reset WO trigger device reset 163reset WO trigger device reset
164mem_used_max WO reset the `mem_used_max' counter (see later) 164mem_used_max WO reset the `mem_used_max' counter (see later)
165mem_limit WO specifies the maximum amount of memory ZRAM can use 165mem_limit WO specifies the maximum amount of memory ZRAM can use
166 to store the compressed data 166 to store the compressed data
167writeback_limit WO specifies the maximum amount of write IO zram can 167writeback_limit WO specifies the maximum amount of write IO zram can
168 write out to backing device as 4KB unit 168 write out to backing device as 4KB unit
169max_comp_streams RW the number of possible concurrent compress operations 169writeback_limit_enable RW show and set writeback_limit feature
170comp_algorithm RW show and change the compression algorithm 170max_comp_streams RW the number of possible concurrent compress operations
171compact WO trigger memory compaction 171comp_algorithm RW show and change the compression algorithm
172debug_stat RO this file is used for zram debugging purposes 172compact WO trigger memory compaction
173backing_dev RW set up backend storage for zram to write out 173debug_stat RO this file is used for zram debugging purposes
174idle WO mark allocated slot as idle 174backing_dev RW set up backend storage for zram to write out
175idle WO mark allocated slot as idle
175 176
176 177
177User space is advised to use the following files to read the device statistics. 178User space is advised to use the following files to read the device statistics.
@@ -280,32 +281,51 @@ With the command, zram writeback idle pages from memory to the storage.
280If there are lots of write IO with flash device, potentially, it has 281If there are lots of write IO with flash device, potentially, it has
281flash wearout problem so that admin needs to design write limitation 282flash wearout problem so that admin needs to design write limitation
282to guarantee storage health for entire product life. 283to guarantee storage health for entire product life.
283To overcome the concern, zram supports "writeback_limit". 284
284The "writeback_limit"'s default value is 0 so that it doesn't limit 285To overcome the concern, zram supports "writeback_limit" feature.
285any writeback. If admin want to measure writeback count in a certain 286The "writeback_limit_enable"'s default value is 0 so that it doesn't limit
286period, he could know it via /sys/block/zram0/bd_stat's 3rd column. 287any writeback. IOW, if admin want to apply writeback budget, he should
288enable writeback_limit_enable via
289
290 $ echo 1 > /sys/block/zramX/writeback_limit_enable
291
292Once writeback_limit_enable is set, zram doesn't allow any writeback
293until admin set the budget via /sys/block/zramX/writeback_limit.
294
295(If admin doesn't enable writeback_limit_enable, writeback_limit's value
296assigned via /sys/block/zramX/writeback_limit is meaninless.)
287 297
288If admin want to limit writeback as per-day 400M, he could do it 298If admin want to limit writeback as per-day 400M, he could do it
289like below. 299like below.
290 300
291 MB_SHIFT=20 301 $ MB_SHIFT=20
292 4K_SHIFT=12 302 $ 4K_SHIFT=12
293 echo $((400<<MB_SHIFT>>4K_SHIFT)) > \ 303 $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
294 /sys/block/zram0/writeback_limit. 304 /sys/block/zram0/writeback_limit.
305 $ echo 1 > /sys/block/zram0/writeback_limit_enable
295 306
296If admin want to allow further write again, he could do it like below 307If admin want to allow further write again once the bugdet is exausted,
308he could do it like below
297 309
298 echo 0 > /sys/block/zram0/writeback_limit 310 $ echo $((400<<MB_SHIFT>>4K_SHIFT)) > \
311 /sys/block/zram0/writeback_limit
299 312
300If admin want to see remaining writeback budget since he set, 313If admin want to see remaining writeback budget since he set,
301 314
302 cat /sys/block/zram0/writeback_limit 315 $ cat /sys/block/zramX/writeback_limit
316
317If admin want to disable writeback limit, he could do
318
319 $ echo 0 > /sys/block/zramX/writeback_limit_enable
303 320
304The writeback_limit count will reset whenever you reset zram(e.g., 321The writeback_limit count will reset whenever you reset zram(e.g.,
305system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of 322system reboot, echo 1 > /sys/block/zramX/reset) so keeping how many of
306writeback happened until you reset the zram to allocate extra writeback 323writeback happened until you reset the zram to allocate extra writeback
307budget in next setting is user's job. 324budget in next setting is user's job.
308 325
326If admin want to measure writeback count in a certain period, he could
327know it via /sys/block/zram0/bd_stat's 3rd column.
328
309= memory tracking 329= memory tracking
310 330
311With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the 331With CONFIG_ZRAM_MEMORY_TRACKING, user can know information of the
diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst
index 6780a6d81745..7cc9e368c1e9 100644
--- a/Documentation/bpf/bpf_design_QA.rst
+++ b/Documentation/bpf/bpf_design_QA.rst
@@ -157,12 +157,11 @@ Q: Does BPF have a stable ABI?
157------------------------------ 157------------------------------
158A: YES. BPF instructions, arguments to BPF programs, set of helper 158A: YES. BPF instructions, arguments to BPF programs, set of helper
159functions and their arguments, recognized return codes are all part 159functions and their arguments, recognized return codes are all part
160of ABI. However when tracing programs are using bpf_probe_read() helper 160of ABI. However there is one specific exception to tracing programs
161to walk kernel internal datastructures and compile with kernel 161which are using helpers like bpf_probe_read() to walk kernel internal
162internal headers these accesses can and will break with newer 162data structures and compile with kernel internal headers. Both of these
163kernels. The union bpf_attr -> kern_version is checked at load time 163kernel internals are subject to change and can break with newer kernels
164to prevent accidentally loading kprobe-based bpf programs written 164such that the program needs to be adapted accordingly.
165for a different kernel. Networking programs don't do kern_version check.
166 165
167Q: How much stack space a BPF program uses? 166Q: How much stack space a BPF program uses?
168------------------------------------------- 167-------------------------------------------
diff --git a/Documentation/devicetree/bindings/arm/cpu-capacity.txt b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
index 84262cdb8d29..96fa46cb133c 100644
--- a/Documentation/devicetree/bindings/arm/cpu-capacity.txt
+++ b/Documentation/devicetree/bindings/arm/cpu-capacity.txt
@@ -235,4 +235,4 @@ cpus {
235=========================================== 235===========================================
236 236
237[1] ARM Linux Kernel documentation - CPUs bindings 237[1] ARM Linux Kernel documentation - CPUs bindings
238 Documentation/devicetree/bindings/arm/cpus.txt 238 Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index 8f0937db55c5..45730ba60af5 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -684,7 +684,7 @@ cpus {
684=========================================== 684===========================================
685 685
686[1] ARM Linux Kernel documentation - CPUs bindings 686[1] ARM Linux Kernel documentation - CPUs bindings
687 Documentation/devicetree/bindings/arm/cpus.txt 687 Documentation/devicetree/bindings/arm/cpus.yaml
688 688
689[2] ARM Linux Kernel documentation - PSCI bindings 689[2] ARM Linux Kernel documentation - PSCI bindings
690 Documentation/devicetree/bindings/arm/psci.txt 690 Documentation/devicetree/bindings/arm/psci.txt
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt
index 1b2ab1ff5587..46652bf65147 100644
--- a/Documentation/devicetree/bindings/arm/sp810.txt
+++ b/Documentation/devicetree/bindings/arm/sp810.txt
@@ -4,7 +4,7 @@ SP810 System Controller
4Required properties: 4Required properties:
5 5
6- compatible: standard compatible string for a Primecell peripheral, 6- compatible: standard compatible string for a Primecell peripheral,
7 see Documentation/devicetree/bindings/arm/primecell.txt 7 see Documentation/devicetree/bindings/arm/primecell.yaml
8 for more details 8 for more details
9 should be: "arm,sp810", "arm,primecell" 9 should be: "arm,sp810", "arm,primecell"
10 10
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt
index de9eb0486630..b0d80c0fb265 100644
--- a/Documentation/devicetree/bindings/arm/topology.txt
+++ b/Documentation/devicetree/bindings/arm/topology.txt
@@ -472,4 +472,4 @@ cpus {
472 472
473=============================================================================== 473===============================================================================
474[1] ARM Linux kernel documentation 474[1] ARM Linux kernel documentation
475 Documentation/devicetree/bindings/arm/cpus.txt 475 Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
index af376a01f2b7..23b52dc02266 100644
--- a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
+++ b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
@@ -18,4 +18,4 @@ Required Properties:
18Each clock is assigned an identifier and client nodes use this identifier 18Each clock is assigned an identifier and client nodes use this identifier
19to specify the clock which they consume. 19to specify the clock which they consume.
20 20
21All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>. 21All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
diff --git a/Documentation/devicetree/bindings/display/arm,pl11x.txt b/Documentation/devicetree/bindings/display/arm,pl11x.txt
index ef89ab46b2c9..572fa2773ec4 100644
--- a/Documentation/devicetree/bindings/display/arm,pl11x.txt
+++ b/Documentation/devicetree/bindings/display/arm,pl11x.txt
@@ -1,6 +1,6 @@
1* ARM PrimeCell Color LCD Controller PL110/PL111 1* ARM PrimeCell Color LCD Controller PL110/PL111
2 2
3See also Documentation/devicetree/bindings/arm/primecell.txt 3See also Documentation/devicetree/bindings/arm/primecell.yaml
4 4
5Required properties: 5Required properties:
6 6
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
index 38ca2201e8ae..2e097b57f170 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
@@ -14,8 +14,6 @@ Required properties:
14 14
15 "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K 15 "marvell,armada-8k-gpio" should be used for the Armada 7K and 8K
16 SoCs (either from AP or CP), see 16 SoCs (either from AP or CP), see
17 Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt
18 and
19 Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt 17 Documentation/devicetree/bindings/arm/marvell/ap806-system-controller.txt
20 for specific details about the offset property. 18 for specific details about the offset property.
21 19
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index b83bb8249074..a3be5298a5eb 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -78,7 +78,7 @@ Sub-nodes:
78PPI affinity can be expressed as a single "ppi-partitions" node, 78PPI affinity can be expressed as a single "ppi-partitions" node,
79containing a set of sub-nodes, each with the following property: 79containing a set of sub-nodes, each with the following property:
80- affinity: Should be a list of phandles to CPU nodes (as described in 80- affinity: Should be a list of phandles to CPU nodes (as described in
81Documentation/devicetree/bindings/arm/cpus.txt). 81 Documentation/devicetree/bindings/arm/cpus.yaml).
82 82
83GICv3 has one or more Interrupt Translation Services (ITS) that are 83GICv3 has one or more Interrupt Translation Services (ITS) that are
84used to route Message Signalled Interrupts (MSI) to the CPUs. 84used to route Message Signalled Interrupts (MSI) to the CPUs.
diff --git a/Documentation/devicetree/bindings/reset/socfpga-reset.txt b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
index 98c9f560e5c5..38fe34fd8b8a 100644
--- a/Documentation/devicetree/bindings/reset/socfpga-reset.txt
+++ b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
@@ -1,7 +1,8 @@
1Altera SOCFPGA Reset Manager 1Altera SOCFPGA Reset Manager
2 2
3Required properties: 3Required properties:
4- compatible : "altr,rst-mgr" 4- compatible : "altr,rst-mgr" for (Cyclone5/Arria5/Arria10)
5 "altr,stratix10-rst-mgr","altr,rst-mgr" for Stratix10 ARM64 SoC
5- reg : Should contain 1 register ranges(address and length) 6- reg : Should contain 1 register ranges(address and length)
6- altr,modrst-offset : Should contain the offset of the first modrst register. 7- altr,modrst-offset : Should contain the offset of the first modrst register.
7- #reset-cells: 1 8- #reset-cells: 1
diff --git a/Documentation/devicetree/bindings/reset/uniphier-reset.txt b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
index 101743dda223..ea005177d20a 100644
--- a/Documentation/devicetree/bindings/reset/uniphier-reset.txt
+++ b/Documentation/devicetree/bindings/reset/uniphier-reset.txt
@@ -120,27 +120,30 @@ Example:
120 }; 120 };
121 121
122 122
123USB3 core reset 123Peripheral core reset in glue layer
124--------------- 124-----------------------------------
125 125
126USB3 core reset belongs to USB3 glue layer. Before using the core reset, 126Some peripheral core reset belongs to its own glue layer. Before using
127it is necessary to control the clocks and resets to enable this layer. 127this core reset, it is necessary to control the clocks and resets to enable
128These clocks and resets should be described in each property. 128this layer. These clocks and resets should be described in each property.
129 129
130Required properties: 130Required properties:
131- compatible: Should be 131- compatible: Should be
132 "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC 132 "socionext,uniphier-pro4-usb3-reset" - for Pro4 SoC USB3
133 "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC 133 "socionext,uniphier-pxs2-usb3-reset" - for PXs2 SoC USB3
134 "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC 134 "socionext,uniphier-ld20-usb3-reset" - for LD20 SoC USB3
135 "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC 135 "socionext,uniphier-pxs3-usb3-reset" - for PXs3 SoC USB3
136 "socionext,uniphier-pro4-ahci-reset" - for Pro4 SoC AHCI
137 "socionext,uniphier-pxs2-ahci-reset" - for PXs2 SoC AHCI
138 "socionext,uniphier-pxs3-ahci-reset" - for PXs3 SoC AHCI
136- #reset-cells: Should be 1. 139- #reset-cells: Should be 1.
137- reg: Specifies offset and length of the register set for the device. 140- reg: Specifies offset and length of the register set for the device.
138- clocks: A list of phandles to the clock gate for USB3 glue layer. 141- clocks: A list of phandles to the clock gate for the glue layer.
139 According to the clock-names, appropriate clocks are required. 142 According to the clock-names, appropriate clocks are required.
140- clock-names: Should contain 143- clock-names: Should contain
141 "gio", "link" - for Pro4 SoC 144 "gio", "link" - for Pro4 SoC
142 "link" - for others 145 "link" - for others
143- resets: A list of phandles to the reset control for USB3 glue layer. 146- resets: A list of phandles to the reset control for the glue layer.
144 According to the reset-names, appropriate resets are required. 147 According to the reset-names, appropriate resets are required.
145- reset-names: Should contain 148- reset-names: Should contain
146 "gio", "link" - for Pro4 SoC 149 "gio", "link" - for Pro4 SoC
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
index 0b8cc533ca83..cf759e5f9b10 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,glink.txt
@@ -55,7 +55,7 @@ of these nodes are defined by the individual bindings for the specific function
55= EXAMPLE 55= EXAMPLE
56The following example represents the GLINK RPM node on a MSM8996 device, with 56The following example represents the GLINK RPM node on a MSM8996 device, with
57the function for the "rpm_request" channel defined, which is used for 57the function for the "rpm_request" channel defined, which is used for
58regualtors and root clocks. 58regulators and root clocks.
59 59
60 apcs_glb: mailbox@9820000 { 60 apcs_glb: mailbox@9820000 {
61 compatible = "qcom,msm8996-apcs-hmss-global"; 61 compatible = "qcom,msm8996-apcs-hmss-global";
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
index a35af2dafdad..49e1d72d3648 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smp2p.txt
@@ -41,12 +41,12 @@ processor ID) and a string identifier.
41- qcom,local-pid: 41- qcom,local-pid:
42 Usage: required 42 Usage: required
43 Value type: <u32> 43 Value type: <u32>
44 Definition: specifies the identfier of the local endpoint of this edge 44 Definition: specifies the identifier of the local endpoint of this edge
45 45
46- qcom,remote-pid: 46- qcom,remote-pid:
47 Usage: required 47 Usage: required
48 Value type: <u32> 48 Value type: <u32>
49 Definition: specifies the identfier of the remote endpoint of this edge 49 Definition: specifies the identifier of the remote endpoint of this edge
50 50
51= SUBNODES 51= SUBNODES
52Each SMP2P pair contain a set of inbound and outbound entries, these are 52Each SMP2P pair contain a set of inbound and outbound entries, these are
diff --git a/Documentation/driver-model/bus.txt b/Documentation/driver-model/bus.txt
index b577a45b93ea..c247b488a567 100644
--- a/Documentation/driver-model/bus.txt
+++ b/Documentation/driver-model/bus.txt
@@ -124,11 +124,11 @@ struct bus_attribute {
124 ssize_t (*store)(struct bus_type *, const char * buf, size_t count); 124 ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
125}; 125};
126 126
127Bus drivers can export attributes using the BUS_ATTR macro that works 127Bus drivers can export attributes using the BUS_ATTR_RW macro that works
128similarly to the DEVICE_ATTR macro for devices. For example, a definition 128similarly to the DEVICE_ATTR_RW macro for devices. For example, a
129like this: 129definition like this:
130 130
131static BUS_ATTR(debug,0644,show_debug,store_debug); 131static BUS_ATTR_RW(debug);
132 132
133is equivalent to declaring: 133is equivalent to declaring:
134 134
diff --git a/Documentation/fb/fbcon.txt b/Documentation/fb/fbcon.txt
index 62af30511a95..60a5ec04e8f0 100644
--- a/Documentation/fb/fbcon.txt
+++ b/Documentation/fb/fbcon.txt
@@ -163,6 +163,14 @@ C. Boot options
163 be preserved until there actually is some text is output to the console. 163 be preserved until there actually is some text is output to the console.
164 This option causes fbcon to bind immediately to the fbdev device. 164 This option causes fbcon to bind immediately to the fbdev device.
165 165
1667. fbcon=logo-pos:<location>
167
168 The only possible 'location' is 'center' (without quotes), and when
169 given, the bootup logo is moved from the default top-left corner
170 location to the center of the framebuffer. If more than one logo is
171 displayed due to multiple CPUs, the collected line of logos is moved
172 as a whole.
173
166C. Attaching, Detaching and Unloading 174C. Attaching, Detaching and Unloading
167 175
168Before going on to how to attach, detach and unload the framebuffer console, an 176Before going on to how to attach, detach and unload the framebuffer console, an
diff --git a/Documentation/features/core/cBPF-JIT/arch-support.txt b/Documentation/features/core/cBPF-JIT/arch-support.txt
index 90459cdde314..8620c38d4db0 100644
--- a/Documentation/features/core/cBPF-JIT/arch-support.txt
+++ b/Documentation/features/core/cBPF-JIT/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/core/eBPF-JIT/arch-support.txt b/Documentation/features/core/eBPF-JIT/arch-support.txt
index c90a0382fe66..9ae6e8d0d10d 100644
--- a/Documentation/features/core/eBPF-JIT/arch-support.txt
+++ b/Documentation/features/core/eBPF-JIT/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/core/generic-idle-thread/arch-support.txt b/Documentation/features/core/generic-idle-thread/arch-support.txt
index 0ef6acdb991c..365df2c2ff0b 100644
--- a/Documentation/features/core/generic-idle-thread/arch-support.txt
+++ b/Documentation/features/core/generic-idle-thread/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | ok |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/core/jump-labels/arch-support.txt b/Documentation/features/core/jump-labels/arch-support.txt
index 60111395f932..7fc2e243dee9 100644
--- a/Documentation/features/core/jump-labels/arch-support.txt
+++ b/Documentation/features/core/jump-labels/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/core/tracehook/arch-support.txt b/Documentation/features/core/tracehook/arch-support.txt
index f44c274e40ed..d344b99aae1e 100644
--- a/Documentation/features/core/tracehook/arch-support.txt
+++ b/Documentation/features/core/tracehook/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | ok | 13 | c6x: | ok |
14 | csky: | ok |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index 282ecc8ea1da..304dcd461795 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/gcov-profile-all/arch-support.txt b/Documentation/features/debug/gcov-profile-all/arch-support.txt
index 01b2b3004e0a..059d58a549c7 100644
--- a/Documentation/features/debug/gcov-profile-all/arch-support.txt
+++ b/Documentation/features/debug/gcov-profile-all/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt
index 3b4dff22329f..3e6b8f07d5d0 100644
--- a/Documentation/features/debug/kgdb/arch-support.txt
+++ b/Documentation/features/debug/kgdb/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | ok | 15 | h8300: | ok |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 7e963d0ae646..68f266944d5f 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/kprobes/arch-support.txt b/Documentation/features/debug/kprobes/arch-support.txt
index 4ada027faf16..f4e45bd58fea 100644
--- a/Documentation/features/debug/kprobes/arch-support.txt
+++ b/Documentation/features/debug/kprobes/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/debug/kretprobes/arch-support.txt b/Documentation/features/debug/kretprobes/arch-support.txt
index 044e13fcca5d..1d5651ef11f8 100644
--- a/Documentation/features/debug/kretprobes/arch-support.txt
+++ b/Documentation/features/debug/kretprobes/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/debug/optprobes/arch-support.txt b/Documentation/features/debug/optprobes/arch-support.txt
index dce7669c918f..fb297a88f62c 100644
--- a/Documentation/features/debug/optprobes/arch-support.txt
+++ b/Documentation/features/debug/optprobes/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/stackprotector/arch-support.txt b/Documentation/features/debug/stackprotector/arch-support.txt
index 954ac1c95553..9999ea521f3e 100644
--- a/Documentation/features/debug/stackprotector/arch-support.txt
+++ b/Documentation/features/debug/stackprotector/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt
index 1a3f9d3229bf..1c577d0cfc7f 100644
--- a/Documentation/features/debug/uprobes/arch-support.txt
+++ b/Documentation/features/debug/uprobes/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/debug/user-ret-profiler/arch-support.txt b/Documentation/features/debug/user-ret-profiler/arch-support.txt
index 1d78d1069a5f..6bfa36b0e017 100644
--- a/Documentation/features/debug/user-ret-profiler/arch-support.txt
+++ b/Documentation/features/debug/user-ret-profiler/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/io/dma-contiguous/arch-support.txt b/Documentation/features/io/dma-contiguous/arch-support.txt
index 30c072d2b67c..eb28b5c97ca6 100644
--- a/Documentation/features/io/dma-contiguous/arch-support.txt
+++ b/Documentation/features/io/dma-contiguous/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | ok |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/cmpxchg-local/arch-support.txt b/Documentation/features/locking/cmpxchg-local/arch-support.txt
index 51704a2dc8d1..242ff5a6586e 100644
--- a/Documentation/features/locking/cmpxchg-local/arch-support.txt
+++ b/Documentation/features/locking/cmpxchg-local/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/lockdep/arch-support.txt b/Documentation/features/locking/lockdep/arch-support.txt
index bd39c5edd460..941fd5b1094d 100644
--- a/Documentation/features/locking/lockdep/arch-support.txt
+++ b/Documentation/features/locking/lockdep/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/queued-rwlocks/arch-support.txt b/Documentation/features/locking/queued-rwlocks/arch-support.txt
index da7aff3bee0b..c683da198f31 100644
--- a/Documentation/features/locking/queued-rwlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-rwlocks/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | ok |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
index 478e9101322c..e3080b82aefd 100644
--- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
+++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/locking/rwsem-optimized/arch-support.txt b/Documentation/features/locking/rwsem-optimized/arch-support.txt
index e54b1f1a8091..7521d7500fbe 100644
--- a/Documentation/features/locking/rwsem-optimized/arch-support.txt
+++ b/Documentation/features/locking/rwsem-optimized/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/perf/kprobes-event/arch-support.txt b/Documentation/features/perf/kprobes-event/arch-support.txt
index 7331402d1887..d8278bf62b85 100644
--- a/Documentation/features/perf/kprobes-event/arch-support.txt
+++ b/Documentation/features/perf/kprobes-event/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/perf/perf-regs/arch-support.txt b/Documentation/features/perf/perf-regs/arch-support.txt
index 53feeee6cdad..687d049d9cee 100644
--- a/Documentation/features/perf/perf-regs/arch-support.txt
+++ b/Documentation/features/perf/perf-regs/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/perf/perf-stackdump/arch-support.txt b/Documentation/features/perf/perf-stackdump/arch-support.txt
index 16164348e0ea..90996e3d18a8 100644
--- a/Documentation/features/perf/perf-stackdump/arch-support.txt
+++ b/Documentation/features/perf/perf-stackdump/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
index c7858dd1ea8f..8a521a622966 100644
--- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt
+++ b/Documentation/features/sched/membarrier-sync-core/arch-support.txt
@@ -34,6 +34,7 @@
34 | arm: | ok | 34 | arm: | ok |
35 | arm64: | ok | 35 | arm64: | ok |
36 | c6x: | TODO | 36 | c6x: | TODO |
37 | csky: | TODO |
37 | h8300: | TODO | 38 | h8300: | TODO |
38 | hexagon: | TODO | 39 | hexagon: | TODO |
39 | ia64: | TODO | 40 | ia64: | TODO |
diff --git a/Documentation/features/sched/numa-balancing/arch-support.txt b/Documentation/features/sched/numa-balancing/arch-support.txt
index c68bb2c2cb62..350823692f28 100644
--- a/Documentation/features/sched/numa-balancing/arch-support.txt
+++ b/Documentation/features/sched/numa-balancing/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | .. | 11 | arm: | .. |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | .. | 13 | c6x: | .. |
14 | csky: | .. |
14 | h8300: | .. | 15 | h8300: | .. |
15 | hexagon: | .. | 16 | hexagon: | .. |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
index d4271b493b41..4fe6c3c3be5c 100644
--- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt
+++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/time/arch-tick-broadcast/arch-support.txt b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
index 83d9e68462bb..593536f7925b 100644
--- a/Documentation/features/time/arch-tick-broadcast/arch-support.txt
+++ b/Documentation/features/time/arch-tick-broadcast/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/time/clockevents/arch-support.txt b/Documentation/features/time/clockevents/arch-support.txt
index 3d4908fce6da..7a27157da408 100644
--- a/Documentation/features/time/clockevents/arch-support.txt
+++ b/Documentation/features/time/clockevents/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | ok | 13 | c6x: | ok |
14 | csky: | ok |
14 | h8300: | ok | 15 | h8300: | ok |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/time/context-tracking/arch-support.txt b/Documentation/features/time/context-tracking/arch-support.txt
index c29974afffaa..048bfb6d3872 100644
--- a/Documentation/features/time/context-tracking/arch-support.txt
+++ b/Documentation/features/time/context-tracking/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/time/irq-time-acct/arch-support.txt b/Documentation/features/time/irq-time-acct/arch-support.txt
index 8d73c463ec27..a14bbad8e948 100644
--- a/Documentation/features/time/irq-time-acct/arch-support.txt
+++ b/Documentation/features/time/irq-time-acct/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | .. | 17 | ia64: | .. |
diff --git a/Documentation/features/time/modern-timekeeping/arch-support.txt b/Documentation/features/time/modern-timekeeping/arch-support.txt
index e7c6ea6b8fb3..2855dfe2464d 100644
--- a/Documentation/features/time/modern-timekeeping/arch-support.txt
+++ b/Documentation/features/time/modern-timekeeping/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | ok | 13 | c6x: | ok |
14 | csky: | ok |
14 | h8300: | ok | 15 | h8300: | ok |
15 | hexagon: | ok | 16 | hexagon: | ok |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/time/virt-cpuacct/arch-support.txt b/Documentation/features/time/virt-cpuacct/arch-support.txt
index 4646457461cf..fb0d0cab9cab 100644
--- a/Documentation/features/time/virt-cpuacct/arch-support.txt
+++ b/Documentation/features/time/virt-cpuacct/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/vm/ELF-ASLR/arch-support.txt b/Documentation/features/vm/ELF-ASLR/arch-support.txt
index 1f71d090ff2c..adc25878d217 100644
--- a/Documentation/features/vm/ELF-ASLR/arch-support.txt
+++ b/Documentation/features/vm/ELF-ASLR/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/PG_uncached/arch-support.txt b/Documentation/features/vm/PG_uncached/arch-support.txt
index fbd5aa463b0a..f05588f9e4b4 100644
--- a/Documentation/features/vm/PG_uncached/arch-support.txt
+++ b/Documentation/features/vm/PG_uncached/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/vm/THP/arch-support.txt b/Documentation/features/vm/THP/arch-support.txt
index 5d7ecc378f29..cdfe8925f881 100644
--- a/Documentation/features/vm/THP/arch-support.txt
+++ b/Documentation/features/vm/THP/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | .. | 13 | c6x: | .. |
14 | csky: | .. |
14 | h8300: | .. | 15 | h8300: | .. |
15 | hexagon: | .. | 16 | hexagon: | .. |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/TLB/arch-support.txt b/Documentation/features/vm/TLB/arch-support.txt
index f7af9678eb66..2bdd3b6cee3c 100644
--- a/Documentation/features/vm/TLB/arch-support.txt
+++ b/Documentation/features/vm/TLB/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | .. | 13 | c6x: | .. |
14 | csky: | TODO |
14 | h8300: | .. | 15 | h8300: | .. |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/huge-vmap/arch-support.txt b/Documentation/features/vm/huge-vmap/arch-support.txt
index d0713ccc7117..019131c5acce 100644
--- a/Documentation/features/vm/huge-vmap/arch-support.txt
+++ b/Documentation/features/vm/huge-vmap/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/ioremap_prot/arch-support.txt b/Documentation/features/vm/ioremap_prot/arch-support.txt
index 326e4797bc65..3a6b87de6a19 100644
--- a/Documentation/features/vm/ioremap_prot/arch-support.txt
+++ b/Documentation/features/vm/ioremap_prot/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | TODO | 11 | arm: | TODO |
12 | arm64: | TODO | 12 | arm64: | TODO |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/features/vm/numa-memblock/arch-support.txt b/Documentation/features/vm/numa-memblock/arch-support.txt
index 1a988052cd24..3004beb0fd71 100644
--- a/Documentation/features/vm/numa-memblock/arch-support.txt
+++ b/Documentation/features/vm/numa-memblock/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | .. | 11 | arm: | .. |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | .. | 13 | c6x: | .. |
14 | csky: | .. |
14 | h8300: | .. | 15 | h8300: | .. |
15 | hexagon: | .. | 16 | hexagon: | .. |
16 | ia64: | ok | 17 | ia64: | ok |
diff --git a/Documentation/features/vm/pte_special/arch-support.txt b/Documentation/features/vm/pte_special/arch-support.txt
index a8378424bc98..2dc5df6a1cf5 100644
--- a/Documentation/features/vm/pte_special/arch-support.txt
+++ b/Documentation/features/vm/pte_special/arch-support.txt
@@ -11,6 +11,7 @@
11 | arm: | ok | 11 | arm: | ok |
12 | arm64: | ok | 12 | arm64: | ok |
13 | c6x: | TODO | 13 | c6x: | TODO |
14 | csky: | TODO |
14 | h8300: | TODO | 15 | h8300: | TODO |
15 | hexagon: | TODO | 16 | hexagon: | TODO |
16 | ia64: | TODO | 17 | ia64: | TODO |
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index a1426cabcef1..41411b0c60a3 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -344,7 +344,9 @@ struct bus_attribute {
344 344
345Declaring: 345Declaring:
346 346
347BUS_ATTR(_name, _mode, _show, _store) 347static BUS_ATTR_RW(name);
348static BUS_ATTR_RO(name);
349static BUS_ATTR_WO(name);
348 350
349Creation/Removal: 351Creation/Removal:
350 352
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 6a47629ef8ed..59e86de662cd 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -11,19 +11,19 @@ Contents:
11 batman-adv 11 batman-adv
12 can 12 can
13 can_ucan_protocol 13 can_ucan_protocol
14 dpaa2/index 14 device_drivers/freescale/dpaa2/index
15 e100 15 device_drivers/intel/e100
16 e1000 16 device_drivers/intel/e1000
17 e1000e 17 device_drivers/intel/e1000e
18 fm10k 18 device_drivers/intel/fm10k
19 igb 19 device_drivers/intel/igb
20 igbvf 20 device_drivers/intel/igbvf
21 ixgb 21 device_drivers/intel/ixgb
22 ixgbe 22 device_drivers/intel/ixgbe
23 ixgbevf 23 device_drivers/intel/ixgbevf
24 i40e 24 device_drivers/intel/i40e
25 iavf 25 device_drivers/intel/iavf
26 ice 26 device_drivers/intel/ice
27 kapi 27 kapi
28 z8530book 28 z8530book
29 msg_zerocopy 29 msg_zerocopy
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt
index c9d052e0cf51..2df5894353d6 100644
--- a/Documentation/networking/rxrpc.txt
+++ b/Documentation/networking/rxrpc.txt
@@ -1000,51 +1000,6 @@ The kernel interface functions are as follows:
1000 size should be set when the call is begun. tx_total_len may not be less 1000 size should be set when the call is begun. tx_total_len may not be less
1001 than zero. 1001 than zero.
1002 1002
1003 (*) Check to see the completion state of a call so that the caller can assess
1004 whether it needs to be retried.
1005
1006 enum rxrpc_call_completion {
1007 RXRPC_CALL_SUCCEEDED,
1008 RXRPC_CALL_REMOTELY_ABORTED,
1009 RXRPC_CALL_LOCALLY_ABORTED,
1010 RXRPC_CALL_LOCAL_ERROR,
1011 RXRPC_CALL_NETWORK_ERROR,
1012 };
1013
1014 int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
1015 enum rxrpc_call_completion *_compl,
1016 u32 *_abort_code);
1017
1018 On return, -EINPROGRESS will be returned if the call is still ongoing; if
1019 it is finished, *_compl will be set to indicate the manner of completion,
1020 *_abort_code will be set to any abort code that occurred. 0 will be
1021 returned on a successful completion, -ECONNABORTED will be returned if the
1022 client failed due to a remote abort and anything else will return an
1023 appropriate error code.
1024
1025 The caller should look at this information to decide if it's worth
1026 retrying the call.
1027
1028 (*) Retry a client call.
1029
1030 int rxrpc_kernel_retry_call(struct socket *sock,
1031 struct rxrpc_call *call,
1032 struct sockaddr_rxrpc *srx,
1033 struct key *key);
1034
1035 This attempts to partially reinitialise a call and submit it again while
1036 reusing the original call's Tx queue to avoid the need to repackage and
1037 re-encrypt the data to be sent. call indicates the call to retry, srx the
1038 new address to send it to and key the encryption key to use for signing or
1039 encrypting the packets.
1040
1041 For this to work, the first Tx data packet must still be in the transmit
1042 queue, and currently this is only permitted for local and network errors
1043 and the call must not have been aborted. Any partially constructed Tx
1044 packet is left as is and can continue being filled afterwards.
1045
1046 It returns 0 if the call was requeued and an error otherwise.
1047
1048 (*) Get call RTT. 1003 (*) Get call RTT.
1049 1004
1050 u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call); 1005 u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call);
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
index b0dfdaaca512..fe8f741193be 100644
--- a/Documentation/networking/snmp_counter.rst
+++ b/Documentation/networking/snmp_counter.rst
@@ -336,7 +336,26 @@ time client replies ACK, this socket will get another chance to move
336to the accept queue. 336to the accept queue.
337 337
338 338
339TCP Fast Open 339* TcpEstabResets
340Defined in `RFC1213 tcpEstabResets`_.
341
342.. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48
343
344* TcpAttemptFails
345Defined in `RFC1213 tcpAttemptFails`_.
346
347.. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48
348
349* TcpOutRsts
350Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates
351the 'segments sent containing the RST flag', but in linux kernel, this
352couner indicates the segments kerenl tried to send. The sending
353process might be failed due to some errors (e.g. memory alloc failed).
354
355.. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52
356
357
358TCP Fast Path
340============ 359============
341When kernel receives a TCP packet, it has two paths to handler the 360When kernel receives a TCP packet, it has two paths to handler the
342packet, one is fast path, another is slow path. The comment in kernel 361packet, one is fast path, another is slow path. The comment in kernel
@@ -383,8 +402,6 @@ increase 1.
383 402
384TCP abort 403TCP abort
385======== 404========
386
387
388* TcpExtTCPAbortOnData 405* TcpExtTCPAbortOnData
389It means TCP layer has data in flight, but need to close the 406It means TCP layer has data in flight, but need to close the
390connection. So TCP layer sends a RST to the other side, indicate the 407connection. So TCP layer sends a RST to the other side, indicate the
@@ -545,7 +562,6 @@ packet yet, the sender would know packet 4 is out of order. The TCP
545stack of kernel will increase TcpExtTCPSACKReorder for both of the 562stack of kernel will increase TcpExtTCPSACKReorder for both of the
546above scenarios. 563above scenarios.
547 564
548
549DSACK 565DSACK
550===== 566=====
551The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report 567The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report
@@ -566,13 +582,63 @@ The TCP stack receives an out of order duplicate packet, so it sends a
566DSACK to the sender. 582DSACK to the sender.
567 583
568* TcpExtTCPDSACKRecv 584* TcpExtTCPDSACKRecv
569The TCP stack receives a DSACK, which indicate an acknowledged 585The TCP stack receives a DSACK, which indicates an acknowledged
570duplicate packet is received. 586duplicate packet is received.
571 587
572* TcpExtTCPDSACKOfoRecv 588* TcpExtTCPDSACKOfoRecv
573The TCP stack receives a DSACK, which indicate an out of order 589The TCP stack receives a DSACK, which indicate an out of order
574duplicate packet is received. 590duplicate packet is received.
575 591
592invalid SACK and DSACK
593====================
594When a SACK (or DSACK) block is invalid, a corresponding counter would
595be updated. The validation method is base on the start/end sequence
596number of the SACK block. For more details, please refer the comment
597of the function tcp_is_sackblock_valid in the kernel source code. A
598SACK option could have up to 4 blocks, they are checked
599individually. E.g., if 3 blocks of a SACk is invalid, the
600corresponding counter would be updated 3 times. The comment of the
601`Add counters for discarded SACK blocks`_ patch has additional
602explaination:
603
604.. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
605
606* TcpExtTCPSACKDiscard
607This counter indicates how many SACK blocks are invalid. If the invalid
608SACK block is caused by ACK recording, the TCP stack will only ignore
609it and won't update this counter.
610
611* TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
612When a DSACK block is invalid, one of these two counters would be
613updated. Which counter will be updated depends on the undo_marker flag
614of the TCP socket. If the undo_marker is not set, the TCP stack isn't
615likely to re-transmit any packets, and we still receive an invalid
616DSACK block, the reason might be that the packet is duplicated in the
617middle of the network. In such scenario, TcpExtTCPDSACKIgnoredNoUndo
618will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
619will be updated. As implied in its name, it might be an old packet.
620
621SACK shift
622=========
623The linux networking stack stores data in sk_buff struct (skb for
624short). If a SACK block acrosses multiple skb, the TCP stack will try
625to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
62610 to 15, skb1 has seq 10 to 13, skb2 has seq 14 to 20. The seq 14 and
62715 in skb2 would be moved to skb1. This operation is 'shift'. If a
628SACK block acknowledges seq 10 to 20, skb1 has seq 10 to 13, skb2 has
629seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
630discard, this operation is 'merge'.
631
632* TcpExtTCPSackShifted
633A skb is shifted
634
635* TcpExtTCPSackMerged
636A skb is merged
637
638* TcpExtTCPSackShiftFallback
639A skb should be shifted or merged, but the TCP stack doesn't do it for
640some reasons.
641
576TCP out of order 642TCP out of order
577=============== 643===============
578* TcpExtTCPOFOQueue 644* TcpExtTCPOFOQueue
@@ -662,6 +728,60 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_).
662.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9 728.. _RFC 5961 section 4.2: https://tools.ietf.org/html/rfc5961#page-9
663.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 729.. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11
664 730
731TCP receive window
732=================
733* TcpExtTCPWantZeroWindowAdv
734Depending on current memory usage, the TCP stack tries to set receive
735window to zero. But the receive window might still be a no-zero
736value. For example, if the previous window size is 10, and the TCP
737stack receives 3 bytes, the current window size would be 7 even if the
738window size calculated by the memory usage is zero.
739
740* TcpExtTCPToZeroWindowAdv
741The TCP receive window is set to zero from a no-zero value.
742
743* TcpExtTCPFromZeroWindowAdv
744The TCP receive window is set to no-zero value from zero.
745
746
747Delayed ACK
748==========
749The TCP Delayed ACK is a technique which is used for reducing the
750packet count in the network. For more details, please refer the
751`Delayed ACK wiki`_
752
753.. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment
754
755* TcpExtDelayedACKs
756A delayed ACK timer expires. The TCP stack will send a pure ACK packet
757and exit the delayed ACK mode.
758
759* TcpExtDelayedACKLocked
760A delayed ACK timer expires, but the TCP stack can't send an ACK
761immediately due to the socket is locked by a userspace program. The
762TCP stack will send a pure ACK later (after the userspace program
763unlock the socket). When the TCP stack sends the pure ACK later, the
764TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK
765mode.
766
767* TcpExtDelayedACKLost
768It will be updated when the TCP stack receives a packet which has been
769ACKed. A Delayed ACK loss might cause this issue, but it would also be
770triggered by other reasons, such as a packet is duplicated in the
771network.
772
773Tail Loss Probe (TLP)
774===================
775TLP is an algorithm which is used to detect TCP packet loss. For more
776details, please refer the `TLP paper`_.
777
778.. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01
779
780* TcpExtTCPLossProbes
781A TLP probe packet is sent.
782
783* TcpExtTCPLossProbeRecovery
784A packet loss is detected and recovered by TLP.
665 785
666examples 786examples
667======= 787=======
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 1be0b6f9e0cb..9d1432e0aaa8 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -417,7 +417,7 @@ is again deprecated and ts[2] holds a hardware timestamp if set.
417 417
418Hardware time stamping must also be initialized for each device driver 418Hardware time stamping must also be initialized for each device driver
419that is expected to do hardware time stamping. The parameter is defined in 419that is expected to do hardware time stamping. The parameter is defined in
420/include/linux/net_tstamp.h as: 420include/uapi/linux/net_tstamp.h as:
421 421
422struct hwtstamp_config { 422struct hwtstamp_config {
423 int flags; /* no flags defined right now, must be zero */ 423 int flags; /* no flags defined right now, must be zero */
@@ -487,7 +487,7 @@ enum {
487 HWTSTAMP_FILTER_PTP_V1_L4_EVENT, 487 HWTSTAMP_FILTER_PTP_V1_L4_EVENT,
488 488
489 /* for the complete list of values, please check 489 /* for the complete list of values, please check
490 * the include file /include/linux/net_tstamp.h 490 * the include file include/uapi/linux/net_tstamp.h
491 */ 491 */
492}; 492};
493 493
diff --git a/Documentation/trace/coresight-cpu-debug.txt b/Documentation/trace/coresight-cpu-debug.txt
index 89ab09e78e8d..f07e38094b40 100644
--- a/Documentation/trace/coresight-cpu-debug.txt
+++ b/Documentation/trace/coresight-cpu-debug.txt
@@ -165,7 +165,7 @@ Do some work...
165The same can also be done from an application program. 165The same can also be done from an application program.
166 166
167Disable specific CPU's specific idle state from cpuidle sysfs (see 167Disable specific CPU's specific idle state from cpuidle sysfs (see
168Documentation/cpuidle/sysfs.txt): 168Documentation/admin-guide/pm/cpuidle.rst):
169# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable 169# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
170 170
171 171
diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst
index 71d6d257074f..659bbc093b52 100644
--- a/Documentation/virtual/kvm/amd-memory-encryption.rst
+++ b/Documentation/virtual/kvm/amd-memory-encryption.rst
@@ -242,6 +242,6 @@ References
242========== 242==========
243 243
244.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf 244.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf
245.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf 245.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM_API_Specification.pdf
246.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34) 246.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34)
247.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf 247.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf
diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt
index d9aed8303984..e8e8d14d3c4e 100644
--- a/Documentation/x86/resctrl_ui.txt
+++ b/Documentation/x86/resctrl_ui.txt
@@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com>
9Tony Luck <tony.luck@intel.com> 9Tony Luck <tony.luck@intel.com>
10Vikas Shivappa <vikas.shivappa@intel.com> 10Vikas Shivappa <vikas.shivappa@intel.com>
11 11
12This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo 12This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo
13flag bits: 13flag bits:
14RDT (Resource Director Technology) Allocation - "rdt_a" 14RDT (Resource Director Technology) Allocation - "rdt_a"
15CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" 15CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
diff --git a/MAINTAINERS b/MAINTAINERS
index 32d444476a90..51029a425dbe 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3471,10 +3471,9 @@ F: drivers/i2c/busses/i2c-octeon*
3471F: drivers/i2c/busses/i2c-thunderx* 3471F: drivers/i2c/busses/i2c-thunderx*
3472 3472
3473CAVIUM LIQUIDIO NETWORK DRIVER 3473CAVIUM LIQUIDIO NETWORK DRIVER
3474M: Derek Chickles <derek.chickles@caviumnetworks.com> 3474M: Derek Chickles <dchickles@marvell.com>
3475M: Satanand Burla <satananda.burla@caviumnetworks.com> 3475M: Satanand Burla <sburla@marvell.com>
3476M: Felix Manlunas <felix.manlunas@caviumnetworks.com> 3476M: Felix Manlunas <fmanlunas@marvell.com>
3477M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
3478L: netdev@vger.kernel.org 3477L: netdev@vger.kernel.org
3479W: http://www.cavium.com 3478W: http://www.cavium.com
3480S: Supported 3479S: Supported
@@ -3951,7 +3950,7 @@ L: netdev@vger.kernel.org
3951S: Maintained 3950S: Maintained
3952F: drivers/net/ethernet/ti/cpmac.c 3951F: drivers/net/ethernet/ti/cpmac.c
3953 3952
3954CPU FREQUENCY DRIVERS 3953CPU FREQUENCY SCALING FRAMEWORK
3955M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 3954M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
3956M: Viresh Kumar <viresh.kumar@linaro.org> 3955M: Viresh Kumar <viresh.kumar@linaro.org>
3957L: linux-pm@vger.kernel.org 3956L: linux-pm@vger.kernel.org
@@ -3959,6 +3958,8 @@ S: Maintained
3959T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 3958T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
3960T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) 3959T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
3961B: https://bugzilla.kernel.org 3960B: https://bugzilla.kernel.org
3961F: Documentation/admin-guide/pm/cpufreq.rst
3962F: Documentation/admin-guide/pm/intel_pstate.rst
3962F: Documentation/cpu-freq/ 3963F: Documentation/cpu-freq/
3963F: Documentation/devicetree/bindings/cpufreq/ 3964F: Documentation/devicetree/bindings/cpufreq/
3964F: drivers/cpufreq/ 3965F: drivers/cpufreq/
@@ -4006,13 +4007,14 @@ S: Supported
4006F: drivers/cpuidle/cpuidle-exynos.c 4007F: drivers/cpuidle/cpuidle-exynos.c
4007F: arch/arm/mach-exynos/pm.c 4008F: arch/arm/mach-exynos/pm.c
4008 4009
4009CPUIDLE DRIVERS 4010CPU IDLE TIME MANAGEMENT FRAMEWORK
4010M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 4011M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
4011M: Daniel Lezcano <daniel.lezcano@linaro.org> 4012M: Daniel Lezcano <daniel.lezcano@linaro.org>
4012L: linux-pm@vger.kernel.org 4013L: linux-pm@vger.kernel.org
4013S: Maintained 4014S: Maintained
4014T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git 4015T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
4015B: https://bugzilla.kernel.org 4016B: https://bugzilla.kernel.org
4017F: Documentation/admin-guide/pm/cpuidle.rst
4016F: drivers/cpuidle/* 4018F: drivers/cpuidle/*
4017F: include/linux/cpuidle.h 4019F: include/linux/cpuidle.h
4018 4020
@@ -13820,8 +13822,9 @@ F: drivers/media/mmc/siano/
13820 13822
13821SIFIVE DRIVERS 13823SIFIVE DRIVERS
13822M: Palmer Dabbelt <palmer@sifive.com> 13824M: Palmer Dabbelt <palmer@sifive.com>
13825M: Paul Walmsley <paul.walmsley@sifive.com>
13823L: linux-riscv@lists.infradead.org 13826L: linux-riscv@lists.infradead.org
13824T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git 13827T: git git://github.com/sifive/riscv-linux.git
13825S: Supported 13828S: Supported
13826K: sifive 13829K: sifive
13827N: sifive 13830N: sifive
@@ -14432,6 +14435,11 @@ M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
14432S: Odd Fixes 14435S: Odd Fixes
14433F: drivers/staging/rtl8712/ 14436F: drivers/staging/rtl8712/
14434 14437
14438STAGING - REALTEK RTL8188EU DRIVERS
14439M: Larry Finger <Larry.Finger@lwfinger.net>
14440S: Odd Fixes
14441F: drivers/staging/rtl8188eu/
14442
14435STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER 14443STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
14436M: Sudip Mukherjee <sudipm.mukherjee@gmail.com> 14444M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
14437M: Teddy Wang <teddy.wang@siliconmotion.com> 14445M: Teddy Wang <teddy.wang@siliconmotion.com>
@@ -15802,7 +15810,6 @@ M: Alan Stern <stern@rowland.harvard.edu>
15802L: linux-usb@vger.kernel.org 15810L: linux-usb@vger.kernel.org
15803L: usb-storage@lists.one-eyed-alien.net 15811L: usb-storage@lists.one-eyed-alien.net
15804S: Maintained 15812S: Maintained
15805W: http://www.one-eyed-alien.net/~mdharm/linux-usb/
15806F: drivers/usb/storage/ 15813F: drivers/usb/storage/
15807 15814
15808USB MIDI DRIVER 15815USB MIDI DRIVER
diff --git a/Makefile b/Makefile
index 8c55b6404e19..f5b1d0d168e0 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 0 3PATCHLEVEL = 0
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc1 5EXTRAVERSION = -rc3
6NAME = Shy Crocodile 6NAME = Shy Crocodile
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
@@ -955,6 +955,7 @@ ifdef CONFIG_STACK_VALIDATION
955 endif 955 endif
956endif 956endif
957 957
958PHONY += prepare0
958 959
959ifeq ($(KBUILD_EXTMOD),) 960ifeq ($(KBUILD_EXTMOD),)
960core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ 961core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
@@ -1061,8 +1062,7 @@ scripts: scripts_basic scripts_dtc
1061# archprepare is used in arch Makefiles and when processed asm symlink, 1062# archprepare is used in arch Makefiles and when processed asm symlink,
1062# version.h and scripts_basic is processed / created. 1063# version.h and scripts_basic is processed / created.
1063 1064
1064# Listed in dependency order 1065PHONY += prepare archprepare prepare1 prepare2 prepare3
1065PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
1066 1066
1067# prepare3 is used to check if we are building in a separate output directory, 1067# prepare3 is used to check if we are building in a separate output directory,
1068# and if so do: 1068# and if so do:
@@ -1360,11 +1360,11 @@ mrproper: rm-dirs := $(wildcard $(MRPROPER_DIRS))
1360mrproper: rm-files := $(wildcard $(MRPROPER_FILES)) 1360mrproper: rm-files := $(wildcard $(MRPROPER_FILES))
1361mrproper-dirs := $(addprefix _mrproper_,scripts) 1361mrproper-dirs := $(addprefix _mrproper_,scripts)
1362 1362
1363PHONY += $(mrproper-dirs) mrproper archmrproper 1363PHONY += $(mrproper-dirs) mrproper
1364$(mrproper-dirs): 1364$(mrproper-dirs):
1365 $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@) 1365 $(Q)$(MAKE) $(clean)=$(patsubst _mrproper_%,%,$@)
1366 1366
1367mrproper: clean archmrproper $(mrproper-dirs) 1367mrproper: clean $(mrproper-dirs)
1368 $(call cmd,rmdirs) 1368 $(call cmd,rmdirs)
1369 $(call cmd,rmfiles) 1369 $(call cmd,rmfiles)
1370 1370
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index a3c9b346721d..f04bc3e15332 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -94,6 +94,28 @@
94 regulator-boot-on; 94 regulator-boot-on;
95 }; 95 };
96 96
97 baseboard_3v3: fixedregulator-3v3 {
98 /* TPS73701DCQ */
99 compatible = "regulator-fixed";
100 regulator-name = "baseboard_3v3";
101 regulator-min-microvolt = <3300000>;
102 regulator-max-microvolt = <3300000>;
103 vin-supply = <&vbat>;
104 regulator-always-on;
105 regulator-boot-on;
106 };
107
108 baseboard_1v8: fixedregulator-1v8 {
109 /* TPS73701DCQ */
110 compatible = "regulator-fixed";
111 regulator-name = "baseboard_1v8";
112 regulator-min-microvolt = <1800000>;
113 regulator-max-microvolt = <1800000>;
114 vin-supply = <&vbat>;
115 regulator-always-on;
116 regulator-boot-on;
117 };
118
97 backlight_lcd: backlight-regulator { 119 backlight_lcd: backlight-regulator {
98 compatible = "regulator-fixed"; 120 compatible = "regulator-fixed";
99 regulator-name = "lcd_backlight_pwr"; 121 regulator-name = "lcd_backlight_pwr";
@@ -105,7 +127,7 @@
105 127
106 sound { 128 sound {
107 compatible = "simple-audio-card"; 129 compatible = "simple-audio-card";
108 simple-audio-card,name = "DA850/OMAP-L138 EVM"; 130 simple-audio-card,name = "DA850-OMAPL138 EVM";
109 simple-audio-card,widgets = 131 simple-audio-card,widgets =
110 "Line", "Line In", 132 "Line", "Line In",
111 "Line", "Line Out"; 133 "Line", "Line Out";
@@ -210,10 +232,9 @@
210 232
211 /* Regulators */ 233 /* Regulators */
212 IOVDD-supply = <&vdcdc2_reg>; 234 IOVDD-supply = <&vdcdc2_reg>;
213 /* Derived from VBAT: Baseboard 3.3V / 1.8V */ 235 AVDD-supply = <&baseboard_3v3>;
214 AVDD-supply = <&vbat>; 236 DRVDD-supply = <&baseboard_3v3>;
215 DRVDD-supply = <&vbat>; 237 DVDD-supply = <&baseboard_1v8>;
216 DVDD-supply = <&vbat>;
217 }; 238 };
218 tca6416: gpio@20 { 239 tca6416: gpio@20 {
219 compatible = "ti,tca6416"; 240 compatible = "ti,tca6416";
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index 0177e3ed20fe..3a2fa6e035a3 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -39,9 +39,39 @@
39 }; 39 };
40 }; 40 };
41 41
42 vcc_5vd: fixedregulator-vcc_5vd {
43 compatible = "regulator-fixed";
44 regulator-name = "vcc_5vd";
45 regulator-min-microvolt = <5000000>;
46 regulator-max-microvolt = <5000000>;
47 regulator-boot-on;
48 };
49
50 vcc_3v3d: fixedregulator-vcc_3v3d {
51 /* TPS650250 - VDCDC1 */
52 compatible = "regulator-fixed";
53 regulator-name = "vcc_3v3d";
54 regulator-min-microvolt = <3300000>;
55 regulator-max-microvolt = <3300000>;
56 vin-supply = <&vcc_5vd>;
57 regulator-always-on;
58 regulator-boot-on;
59 };
60
61 vcc_1v8d: fixedregulator-vcc_1v8d {
62 /* TPS650250 - VDCDC2 */
63 compatible = "regulator-fixed";
64 regulator-name = "vcc_1v8d";
65 regulator-min-microvolt = <1800000>;
66 regulator-max-microvolt = <1800000>;
67 vin-supply = <&vcc_5vd>;
68 regulator-always-on;
69 regulator-boot-on;
70 };
71
42 sound { 72 sound {
43 compatible = "simple-audio-card"; 73 compatible = "simple-audio-card";
44 simple-audio-card,name = "DA850/OMAP-L138 LCDK"; 74 simple-audio-card,name = "DA850-OMAPL138 LCDK";
45 simple-audio-card,widgets = 75 simple-audio-card,widgets =
46 "Line", "Line In", 76 "Line", "Line In",
47 "Line", "Line Out"; 77 "Line", "Line Out";
@@ -221,6 +251,12 @@
221 compatible = "ti,tlv320aic3106"; 251 compatible = "ti,tlv320aic3106";
222 reg = <0x18>; 252 reg = <0x18>;
223 status = "okay"; 253 status = "okay";
254
255 /* Regulators */
256 IOVDD-supply = <&vcc_3v3d>;
257 AVDD-supply = <&vcc_3v3d>;
258 DRVDD-supply = <&vcc_3v3d>;
259 DVDD-supply = <&vcc_1v8d>;
224 }; 260 };
225}; 261};
226 262
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index cbaf06f2f78e..eb917462b219 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -36,8 +36,8 @@
36 compatible = "gpio-fan"; 36 compatible = "gpio-fan";
37 pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>; 37 pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
38 pinctrl-names = "default"; 38 pinctrl-names = "default";
39 gpios = <&gpio1 14 GPIO_ACTIVE_LOW 39 gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
40 &gpio1 13 GPIO_ACTIVE_LOW>; 40 &gpio1 13 GPIO_ACTIVE_HIGH>;
41 gpio-fan,speed-map = <0 0 41 gpio-fan,speed-map = <0 0
42 3000 1 42 3000 1
43 6000 2>; 43 6000 2>;
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index e52ec1619b70..c4da635ee4ce 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -208,9 +208,9 @@ static struct gpiod_lookup_table mmc_gpios_table = {
208 .dev_id = "da830-mmc.0", 208 .dev_id = "da830-mmc.0",
209 .table = { 209 .table = {
210 /* gpio chip 1 contains gpio range 32-63 */ 210 /* gpio chip 1 contains gpio range 32-63 */
211 GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_CD_PIN, "cd", 211 GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_CD_PIN, "cd",
212 GPIO_ACTIVE_LOW), 212 GPIO_ACTIVE_LOW),
213 GPIO_LOOKUP("davinci_gpio.0", DA830_MMCSD_WP_PIN, "wp", 213 GPIO_LOOKUP("davinci_gpio", DA830_MMCSD_WP_PIN, "wp",
214 GPIO_ACTIVE_LOW), 214 GPIO_ACTIVE_LOW),
215 }, 215 },
216}; 216};
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 6a29baf0a289..44bca048dfd0 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -805,9 +805,9 @@ static struct gpiod_lookup_table mmc_gpios_table = {
805 .dev_id = "da830-mmc.0", 805 .dev_id = "da830-mmc.0",
806 .table = { 806 .table = {
807 /* gpio chip 2 contains gpio range 64-95 */ 807 /* gpio chip 2 contains gpio range 64-95 */
808 GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", 808 GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_CD_PIN, "cd",
809 GPIO_ACTIVE_LOW), 809 GPIO_ACTIVE_LOW),
810 GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", 810 GPIO_LOOKUP("davinci_gpio", DA850_MMCSD_WP_PIN, "wp",
811 GPIO_ACTIVE_HIGH), 811 GPIO_ACTIVE_HIGH),
812 }, 812 },
813}; 813};
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index f53a461a606f..f7fa960c23e3 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -117,9 +117,9 @@ static struct platform_device davinci_nand_device = {
117static struct gpiod_lookup_table i2c_recovery_gpiod_table = { 117static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
118 .dev_id = "i2c_davinci.1", 118 .dev_id = "i2c_davinci.1",
119 .table = { 119 .table = {
120 GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SDA_PIN, "sda", 120 GPIO_LOOKUP("davinci_gpio", DM355_I2C_SDA_PIN, "sda",
121 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 121 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
122 GPIO_LOOKUP("davinci_gpio.0", DM355_I2C_SCL_PIN, "scl", 122 GPIO_LOOKUP("davinci_gpio", DM355_I2C_SCL_PIN, "scl",
123 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 123 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
124 }, 124 },
125}; 125};
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index e1428115067f..b80c4ee76217 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -660,9 +660,9 @@ static struct i2c_board_info __initdata i2c_info[] = {
660static struct gpiod_lookup_table i2c_recovery_gpiod_table = { 660static struct gpiod_lookup_table i2c_recovery_gpiod_table = {
661 .dev_id = "i2c_davinci.1", 661 .dev_id = "i2c_davinci.1",
662 .table = { 662 .table = {
663 GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SDA_PIN, "sda", 663 GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SDA_PIN, "sda",
664 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 664 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
665 GPIO_LOOKUP("davinci_gpio.0", DM644X_I2C_SCL_PIN, "scl", 665 GPIO_LOOKUP("davinci_gpio", DM644X_I2C_SCL_PIN, "scl",
666 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), 666 GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
667 }, 667 },
668}; 668};
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 8e8d51f4a276..94c4f126ef86 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -134,9 +134,9 @@ static const short hawk_mmcsd0_pins[] = {
134static struct gpiod_lookup_table mmc_gpios_table = { 134static struct gpiod_lookup_table mmc_gpios_table = {
135 .dev_id = "da830-mmc.0", 135 .dev_id = "da830-mmc.0",
136 .table = { 136 .table = {
137 GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_CD_PIN, "cd", 137 GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_CD_PIN, "cd",
138 GPIO_ACTIVE_LOW), 138 GPIO_ACTIVE_LOW),
139 GPIO_LOOKUP("davinci_gpio.0", DA850_HAWK_MMCSD_WP_PIN, "wp", 139 GPIO_LOOKUP("davinci_gpio", DA850_HAWK_MMCSD_WP_PIN, "wp",
140 GPIO_ACTIVE_LOW), 140 GPIO_ACTIVE_LOW),
141 }, 141 },
142}; 142};
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index a109f6482413..8dfad012dfae 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -390,10 +390,14 @@ static int __ref impd1_probe(struct lm_device *dev)
390 char *mmciname; 390 char *mmciname;
391 391
392 lookup = devm_kzalloc(&dev->dev, 392 lookup = devm_kzalloc(&dev->dev,
393 sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), 393 struct_size(lookup, table, 3),
394 GFP_KERNEL); 394 GFP_KERNEL);
395 chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL); 395 chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
396 mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id); 396 mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
397 "lm%x:00700", dev->id);
398 if (!lookup || !chipname || !mmciname)
399 return -ENOMEM;
400
397 lookup->dev_id = mmciname; 401 lookup->dev_id = mmciname;
398 /* 402 /*
399 * Offsets on GPIO block 1: 403 * Offsets on GPIO block 1:
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index 5fb6f79059a8..afd98971d903 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -32,6 +32,8 @@ void __iomem *rst_manager_base_addr;
32void __iomem *sdr_ctl_base_addr; 32void __iomem *sdr_ctl_base_addr;
33unsigned long socfpga_cpu1start_addr; 33unsigned long socfpga_cpu1start_addr;
34 34
35extern void __init socfpga_reset_init(void);
36
35static void __init socfpga_sysmgr_init(void) 37static void __init socfpga_sysmgr_init(void)
36{ 38{
37 struct device_node *np; 39 struct device_node *np;
@@ -64,6 +66,7 @@ static void __init socfpga_init_irq(void)
64 66
65 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) 67 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
66 socfpga_init_ocram_ecc(); 68 socfpga_init_ocram_ecc();
69 socfpga_reset_init();
67} 70}
68 71
69static void __init socfpga_arria10_init_irq(void) 72static void __init socfpga_arria10_init_irq(void)
@@ -74,6 +77,7 @@ static void __init socfpga_arria10_init_irq(void)
74 socfpga_init_arria10_l2_ecc(); 77 socfpga_init_arria10_l2_ecc();
75 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM)) 78 if (IS_ENABLED(CONFIG_EDAC_ALTERA_OCRAM))
76 socfpga_init_arria10_ocram_ecc(); 79 socfpga_init_arria10_ocram_ecc();
80 socfpga_reset_init();
77} 81}
78 82
79static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd) 83static void socfpga_cyclone5_restart(enum reboot_mode mode, const char *cmd)
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
index 29ea7e81ec4c..329f8ceeebea 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
@@ -183,7 +183,7 @@
183 pinctrl-0 = <&cp0_pcie_pins>; 183 pinctrl-0 = <&cp0_pcie_pins>;
184 num-lanes = <4>; 184 num-lanes = <4>;
185 num-viewport = <8>; 185 num-viewport = <8>;
186 reset-gpio = <&cp0_gpio1 20 GPIO_ACTIVE_LOW>; 186 reset-gpios = <&cp0_gpio2 20 GPIO_ACTIVE_LOW>;
187 status = "okay"; 187 status = "okay";
188}; 188};
189 189
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 7d94c1fa592a..7f799cb5668e 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -28,6 +28,23 @@
28 method = "smc"; 28 method = "smc";
29 }; 29 };
30 30
31 reserved-memory {
32 #address-cells = <2>;
33 #size-cells = <2>;
34 ranges;
35
36 /*
37 * This area matches the mapping done with a
38 * mainline U-Boot, and should be updated by the
39 * bootloader.
40 */
41
42 psci-area@4000000 {
43 reg = <0x0 0x4000000 0x0 0x200000>;
44 no-map;
45 };
46 };
47
31 ap806 { 48 ap806 {
32 #address-cells = <2>; 49 #address-cells = <2>;
33 #size-cells = <2>; 50 #size-cells = <2>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 3ef443cfbab6..c8432e24207e 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -506,11 +506,15 @@ CONFIG_SND_SOC_ROCKCHIP=m
506CONFIG_SND_SOC_ROCKCHIP_SPDIF=m 506CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
507CONFIG_SND_SOC_ROCKCHIP_RT5645=m 507CONFIG_SND_SOC_ROCKCHIP_RT5645=m
508CONFIG_SND_SOC_RK3399_GRU_SOUND=m 508CONFIG_SND_SOC_RK3399_GRU_SOUND=m
509CONFIG_SND_MESON_AXG_SOUND_CARD=m
509CONFIG_SND_SOC_SAMSUNG=y 510CONFIG_SND_SOC_SAMSUNG=y
510CONFIG_SND_SOC_RCAR=m 511CONFIG_SND_SOC_RCAR=m
511CONFIG_SND_SOC_AK4613=m 512CONFIG_SND_SOC_AK4613=m
512CONFIG_SND_SIMPLE_CARD=m 513CONFIG_SND_SIMPLE_CARD=m
513CONFIG_SND_AUDIO_GRAPH_CARD=m 514CONFIG_SND_AUDIO_GRAPH_CARD=m
515CONFIG_SND_SOC_ES7134=m
516CONFIG_SND_SOC_ES7241=m
517CONFIG_SND_SOC_TAS571X=m
514CONFIG_I2C_HID=m 518CONFIG_I2C_HID=m
515CONFIG_USB=y 519CONFIG_USB=y
516CONFIG_USB_OTG=y 520CONFIG_USB_OTG=y
diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h
index 2173ad32d550..1c9a3a0c5fa5 100644
--- a/arch/arm64/include/asm/asm-prototypes.h
+++ b/arch/arm64/include/asm/asm-prototypes.h
@@ -2,7 +2,7 @@
2#ifndef __ASM_PROTOTYPES_H 2#ifndef __ASM_PROTOTYPES_H
3#define __ASM_PROTOTYPES_H 3#define __ASM_PROTOTYPES_H
4/* 4/*
5 * CONFIG_MODEVERIONS requires a C declaration to generate the appropriate CRC 5 * CONFIG_MODVERSIONS requires a C declaration to generate the appropriate CRC
6 * for each symbol. Since commit: 6 * for each symbol. Since commit:
7 * 7 *
8 * 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm") 8 * 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm")
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 13dd42c3ad4e..926434f413fa 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -58,6 +58,10 @@
58 */ 58 */
59#define ARCH_DMA_MINALIGN (128) 59#define ARCH_DMA_MINALIGN (128)
60 60
61#ifdef CONFIG_KASAN_SW_TAGS
62#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
63#endif
64
61#ifndef __ASSEMBLY__ 65#ifndef __ASSEMBLY__
62 66
63#include <linux/bitops.h> 67#include <linux/bitops.h>
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 7689c7aa1d77..3e8063f4f9d3 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_MMU_H 16#ifndef __ASM_MMU_H
17#define __ASM_MMU_H 17#define __ASM_MMU_H
18 18
19#include <asm/cputype.h>
20
19#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */ 21#define MMCF_AARCH32 0x1 /* mm context flag for AArch32 executables */
20#define USER_ASID_BIT 48 22#define USER_ASID_BIT 48
21#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) 23#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
@@ -44,6 +46,48 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
44 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); 46 cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
45} 47}
46 48
49static inline bool arm64_kernel_use_ng_mappings(void)
50{
51 bool tx1_bug;
52
53 /* What's a kpti? Use global mappings if we don't know. */
54 if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
55 return false;
56
57 /*
58 * Note: this function is called before the CPU capabilities have
59 * been configured, so our early mappings will be global. If we
60 * later determine that kpti is required, then
61 * kpti_install_ng_mappings() will make them non-global.
62 */
63 if (arm64_kernel_unmapped_at_el0())
64 return true;
65
66 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
67 return false;
68
69 /*
70 * KASLR is enabled so we're going to be enabling kpti on non-broken
71 * CPUs regardless of their susceptibility to Meltdown. Rather
72 * than force everybody to go through the G -> nG dance later on,
73 * just put down non-global mappings from the beginning.
74 */
75 if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
76 tx1_bug = false;
77#ifndef MODULE
78 } else if (!static_branch_likely(&arm64_const_caps_ready)) {
79 extern const struct midr_range cavium_erratum_27456_cpus[];
80
81 tx1_bug = is_midr_in_range_list(read_cpuid_id(),
82 cavium_erratum_27456_cpus);
83#endif
84 } else {
85 tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
86 }
87
88 return !tx1_bug && kaslr_offset() > 0;
89}
90
47typedef void (*bp_hardening_cb_t)(void); 91typedef void (*bp_hardening_cb_t)(void);
48 92
49struct bp_hardening_data { 93struct bp_hardening_data {
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 78b942c1bea4..986e41c4c32b 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -37,8 +37,8 @@
37#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 37#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
38#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 38#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
39 39
40#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) 40#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
41#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) 41#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
42 42
43#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) 43#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
44#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) 44#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 09ac548c9d44..9950bb0cbd52 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -553,7 +553,7 @@ static const struct midr_range arm64_repeat_tlbi_cpus[] = {
553#endif 553#endif
554 554
555#ifdef CONFIG_CAVIUM_ERRATUM_27456 555#ifdef CONFIG_CAVIUM_ERRATUM_27456
556static const struct midr_range cavium_erratum_27456_cpus[] = { 556const struct midr_range cavium_erratum_27456_cpus[] = {
557 /* Cavium ThunderX, T88 pass 1.x - 2.1 */ 557 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
558 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), 558 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
559 /* Cavium ThunderX, T81 pass 1.0 */ 559 /* Cavium ThunderX, T81 pass 1.0 */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4f272399de89..f6d84e2c92fe 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -983,7 +983,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
983 983
984 /* Useful for KASLR robustness */ 984 /* Useful for KASLR robustness */
985 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) 985 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
986 return true; 986 return kaslr_offset() > 0;
987 987
988 /* Don't force KPTI for CPUs that are not vulnerable */ 988 /* Don't force KPTI for CPUs that are not vulnerable */
989 if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list)) 989 if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
@@ -1003,7 +1003,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1003 static bool kpti_applied = false; 1003 static bool kpti_applied = false;
1004 int cpu = smp_processor_id(); 1004 int cpu = smp_processor_id();
1005 1005
1006 if (kpti_applied) 1006 /*
1007 * We don't need to rewrite the page-tables if either we've done
1008 * it already or we have KASLR enabled and therefore have not
1009 * created any global mappings at all.
1010 */
1011 if (kpti_applied || kaslr_offset() > 0)
1007 return; 1012 return;
1008 1013
1009 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); 1014 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c7213674cb24..15d79a8e5e5e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -475,6 +475,7 @@ ENDPROC(__primary_switched)
475 475
476ENTRY(kimage_vaddr) 476ENTRY(kimage_vaddr)
477 .quad _text - TEXT_OFFSET 477 .quad _text - TEXT_OFFSET
478EXPORT_SYMBOL(kimage_vaddr)
478 479
479/* 480/*
480 * If we're fortunate enough to boot at EL2, ensure that the world is 481 * If we're fortunate enough to boot at EL2, ensure that the world is
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index f0e6ab8abe9c..ba6b41790fcd 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#include <asm/cacheflush.h>
17#include <asm/fixmap.h> 18#include <asm/fixmap.h>
18#include <asm/kernel-pgtable.h> 19#include <asm/kernel-pgtable.h>
19#include <asm/memory.h> 20#include <asm/memory.h>
@@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
43 return ret; 44 return ret;
44} 45}
45 46
46static __init const u8 *get_cmdline(void *fdt) 47static __init const u8 *kaslr_get_cmdline(void *fdt)
47{ 48{
48 static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; 49 static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
49 50
@@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
109 * Check if 'nokaslr' appears on the command line, and 110 * Check if 'nokaslr' appears on the command line, and
110 * return 0 if that is the case. 111 * return 0 if that is the case.
111 */ 112 */
112 cmdline = get_cmdline(fdt); 113 cmdline = kaslr_get_cmdline(fdt);
113 str = strstr(cmdline, "nokaslr"); 114 str = strstr(cmdline, "nokaslr");
114 if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) 115 if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
115 return 0; 116 return 0;
@@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
169 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; 170 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
170 module_alloc_base &= PAGE_MASK; 171 module_alloc_base &= PAGE_MASK;
171 172
173 __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
174 __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
175
172 return offset; 176 return offset;
173} 177}
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 10e33860e47a..f2c211a6229b 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -87,7 +87,9 @@ static int setup_dtb(struct kimage *image,
87 87
88 /* add kaslr-seed */ 88 /* add kaslr-seed */
89 ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED); 89 ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
90 if (ret && (ret != -FDT_ERR_NOTFOUND)) 90 if (ret == -FDT_ERR_NOTFOUND)
91 ret = 0;
92 else if (ret)
91 goto out; 93 goto out;
92 94
93 if (rng_is_initialized()) { 95 if (rng_is_initialized()) {
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild
index 0febf1a07c30..6c6f6301012e 100644
--- a/arch/c6x/include/uapi/asm/Kbuild
+++ b/arch/c6x/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
4generic-y += ucontext.h 5generic-y += ucontext.h
diff --git a/arch/csky/include/asm/io.h b/arch/csky/include/asm/io.h
index ecae6b358f95..c1dfa9c10e36 100644
--- a/arch/csky/include/asm/io.h
+++ b/arch/csky/include/asm/io.h
@@ -15,6 +15,31 @@ extern void iounmap(void *addr);
15extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr, 15extern int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
16 size_t size, unsigned long flags); 16 size_t size, unsigned long flags);
17 17
18/*
19 * I/O memory access primitives. Reads are ordered relative to any
20 * following Normal memory access. Writes are ordered relative to any prior
21 * Normal memory access.
22 *
23 * For CACHEV1 (807, 810), store instruction could fast retire, so we need
24 * another mb() to prevent st fast retire.
25 *
26 * For CACHEV2 (860), store instruction with PAGE_ATTR_NO_BUFFERABLE won't
27 * fast retire.
28 */
29#define readb(c) ({ u8 __v = readb_relaxed(c); rmb(); __v; })
30#define readw(c) ({ u16 __v = readw_relaxed(c); rmb(); __v; })
31#define readl(c) ({ u32 __v = readl_relaxed(c); rmb(); __v; })
32
33#ifdef CONFIG_CPU_HAS_CACHEV2
34#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); })
35#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); })
36#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); })
37#else
38#define writeb(v,c) ({ wmb(); writeb_relaxed((v),(c)); mb(); })
39#define writew(v,c) ({ wmb(); writew_relaxed((v),(c)); mb(); })
40#define writel(v,c) ({ wmb(); writel_relaxed((v),(c)); mb(); })
41#endif
42
18#define ioremap_nocache(phy, sz) ioremap(phy, sz) 43#define ioremap_nocache(phy, sz) ioremap(phy, sz)
19#define ioremap_wc ioremap_nocache 44#define ioremap_wc ioremap_nocache
20#define ioremap_wt ioremap_nocache 45#define ioremap_wt ioremap_nocache
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index bf4f4a0e140e..d213bb47b717 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -24,41 +24,34 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
24 24
25extern void pgd_init(unsigned long *p); 25extern void pgd_init(unsigned long *p);
26 26
27static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 27static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
28 unsigned long address)
29{ 28{
30 pte_t *pte; 29 pte_t *pte;
31 unsigned long *kaddr, i; 30 unsigned long i;
32 31
33 pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, 32 pte = (pte_t *) __get_free_page(GFP_KERNEL);
34 PTE_ORDER); 33 if (!pte)
35 kaddr = (unsigned long *)pte; 34 return NULL;
36 if (address & 0x80000000) 35
37 for (i = 0; i < (PAGE_SIZE/4); i++) 36 for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++)
38 *(kaddr + i) = 0x1; 37 (pte + i)->pte_low = _PAGE_GLOBAL;
39 else
40 clear_page(kaddr);
41 38
42 return pte; 39 return pte;
43} 40}
44 41
45static inline struct page *pte_alloc_one(struct mm_struct *mm, 42static inline struct page *pte_alloc_one(struct mm_struct *mm)
46 unsigned long address)
47{ 43{
48 struct page *pte; 44 struct page *pte;
49 unsigned long *kaddr, i; 45
50 46 pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
51 pte = alloc_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, PTE_ORDER); 47 if (!pte)
52 if (pte) { 48 return NULL;
53 kaddr = kmap_atomic(pte); 49
54 if (address & 0x80000000) { 50 if (!pgtable_page_ctor(pte)) {
55 for (i = 0; i < (PAGE_SIZE/4); i++) 51 __free_page(pte);
56 *(kaddr + i) = 0x1; 52 return NULL;
57 } else
58 clear_page(kaddr);
59 kunmap_atomic(kaddr);
60 pgtable_page_ctor(pte);
61 } 53 }
54
62 return pte; 55 return pte;
63} 56}
64 57
diff --git a/arch/csky/kernel/module.c b/arch/csky/kernel/module.c
index 65abab0c7a47..b5ad7d9de18c 100644
--- a/arch/csky/kernel/module.c
+++ b/arch/csky/kernel/module.c
@@ -12,7 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <asm/pgtable.h> 13#include <asm/pgtable.h>
14 14
15#if defined(__CSKYABIV2__) 15#ifdef CONFIG_CPU_CK810
16#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000) 16#define IS_BSR32(hi16, lo16) (((hi16) & 0xFC00) == 0xE000)
17#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0) 17#define IS_JSRI32(hi16, lo16) ((hi16) == 0xEAE0)
18 18
@@ -25,6 +25,26 @@
25 *(uint16_t *)(addr) = 0xE8Fa; \ 25 *(uint16_t *)(addr) = 0xE8Fa; \
26 *((uint16_t *)(addr) + 1) = 0x0000; \ 26 *((uint16_t *)(addr) + 1) = 0x0000; \
27} while (0) 27} while (0)
28
29static void jsri_2_lrw_jsr(uint32_t *location)
30{
31 uint16_t *location_tmp = (uint16_t *)location;
32
33 if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
34 return;
35
36 if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
37 /* jsri 0x... --> lrw r26, 0x... */
38 CHANGE_JSRI_TO_LRW(location);
39 /* lsli r0, r0 --> jsr r26 */
40 SET_JSR32_R26(location + 1);
41 }
42}
43#else
44static void inline jsri_2_lrw_jsr(uint32_t *location)
45{
46 return;
47}
28#endif 48#endif
29 49
30int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, 50int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
@@ -35,9 +55,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
35 Elf32_Sym *sym; 55 Elf32_Sym *sym;
36 uint32_t *location; 56 uint32_t *location;
37 short *temp; 57 short *temp;
38#if defined(__CSKYABIV2__)
39 uint16_t *location_tmp;
40#endif
41 58
42 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 59 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
43 /* This is where to make the change */ 60 /* This is where to make the change */
@@ -59,18 +76,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
59 case R_CSKY_PCRELJSR_IMM11BY2: 76 case R_CSKY_PCRELJSR_IMM11BY2:
60 break; 77 break;
61 case R_CSKY_PCRELJSR_IMM26BY2: 78 case R_CSKY_PCRELJSR_IMM26BY2:
62#if defined(__CSKYABIV2__) 79 jsri_2_lrw_jsr(location);
63 location_tmp = (uint16_t *)location;
64 if (IS_BSR32(*location_tmp, *(location_tmp + 1)))
65 break;
66
67 if (IS_JSRI32(*location_tmp, *(location_tmp + 1))) {
68 /* jsri 0x... --> lrw r26, 0x... */
69 CHANGE_JSRI_TO_LRW(location);
70 /* lsli r0, r0 --> jsr r26 */
71 SET_JSR32_R26(location + 1);
72 }
73#endif
74 break; 80 break;
75 case R_CSKY_ADDR_HI16: 81 case R_CSKY_ADDR_HI16:
76 temp = ((short *)location) + 1; 82 temp = ((short *)location) + 1;
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index 4003ddc616e1..f801f3708a89 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -37,8 +37,6 @@ libs-y += arch/$(ARCH)/lib/
37 37
38boot := arch/h8300/boot 38boot := arch/h8300/boot
39 39
40archmrproper:
41
42archclean: 40archclean:
43 $(Q)$(MAKE) $(clean)=$(boot) 41 $(Q)$(MAKE) $(clean)=$(boot)
44 42
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
index 0febf1a07c30..6c6f6301012e 100644
--- a/arch/h8300/include/uapi/asm/Kbuild
+++ b/arch/h8300/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
4generic-y += ucontext.h 5generic-y += ucontext.h
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild
index c1b06dcf6cf8..61d955c1747a 100644
--- a/arch/hexagon/include/uapi/asm/Kbuild
+++ b/arch/hexagon/include/uapi/asm/Kbuild
@@ -1,3 +1,4 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += shmparam.h
3generic-y += ucontext.h 4generic-y += ucontext.h
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 320d86f192ee..171290f9f1de 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -16,8 +16,6 @@ KBUILD_DEFCONFIG := generic_defconfig
16NM := $(CROSS_COMPILE)nm -B 16NM := $(CROSS_COMPILE)nm -B
17READELF := $(CROSS_COMPILE)readelf 17READELF := $(CROSS_COMPILE)readelf
18 18
19export AWK
20
21CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__ 19CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
22 20
23OBJCOPYFLAGS := --strip-all 21OBJCOPYFLAGS := --strip-all
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index 960bf1e4be53..b8b3525271fa 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -2,3 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm
2 2
3generated-y += unistd_32.h 3generated-y += unistd_32.h
4generic-y += kvm_para.h 4generic-y += kvm_para.h
5generic-y += shmparam.h
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 97823ec46e97..28823e3db825 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -2,4 +2,5 @@ include include/uapi/asm-generic/Kbuild.asm
2 2
3generated-y += unistd_32.h 3generated-y += unistd_32.h
4generic-y += kvm_para.h 4generic-y += kvm_para.h
5generic-y += shmparam.h
5generic-y += ucontext.h 6generic-y += ucontext.h
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 787290781b8c..0d14f51d0002 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -3155,6 +3155,7 @@ config MIPS32_O32
3155config MIPS32_N32 3155config MIPS32_N32
3156 bool "Kernel support for n32 binaries" 3156 bool "Kernel support for n32 binaries"
3157 depends on 64BIT 3157 depends on 64BIT
3158 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
3158 select COMPAT 3159 select COMPAT
3159 select MIPS32_COMPAT 3160 select MIPS32_COMPAT
3160 select SYSVIPC_COMPAT if SYSVIPC 3161 select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 6054d49e608e..fe3773539eff 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
173 pm_power_off = bcm47xx_machine_halt; 173 pm_power_off = bcm47xx_machine_halt;
174} 174}
175 175
176#ifdef CONFIG_BCM47XX_BCMA
177static struct device * __init bcm47xx_setup_device(void)
178{
179 struct device *dev;
180 int err;
181
182 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
183 if (!dev)
184 return NULL;
185
186 err = dev_set_name(dev, "bcm47xx_soc");
187 if (err) {
188 pr_err("Failed to set SoC device name: %d\n", err);
189 kfree(dev);
190 return NULL;
191 }
192
193 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
194 if (err)
195 pr_err("Failed to set SoC DMA mask: %d\n", err);
196
197 return dev;
198}
199#endif
200
176/* 201/*
177 * This finishes bus initialization doing things that were not possible without 202 * This finishes bus initialization doing things that were not possible without
178 * kmalloc. Make sure to call it late enough (after mm_init). 203 * kmalloc. Make sure to call it late enough (after mm_init).
@@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
183 if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { 208 if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
184 int err; 209 int err;
185 210
211 bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
212 if (!bcm47xx_bus.bcma.dev)
213 panic("Failed to setup SoC device\n");
214
186 err = bcma_host_soc_init(&bcm47xx_bus.bcma); 215 err = bcma_host_soc_init(&bcm47xx_bus.bcma);
187 if (err) 216 if (err)
188 panic("Failed to initialize BCMA bus (err %d)", err); 217 panic("Failed to initialize BCMA bus (err %d)", err);
@@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
235#endif 264#endif
236#ifdef CONFIG_BCM47XX_BCMA 265#ifdef CONFIG_BCM47XX_BCMA
237 case BCM47XX_BUS_TYPE_BCMA: 266 case BCM47XX_BUS_TYPE_BCMA:
267 if (device_register(bcm47xx_bus.bcma.dev))
268 pr_err("Failed to register SoC device\n");
238 bcma_bus_register(&bcm47xx_bus.bcma.bus); 269 bcma_bus_register(&bcm47xx_bus.bcma.bus);
239 break; 270 break;
240#endif 271#endif
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 2c79ab52977a..8bf43c5a7bc7 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -98,7 +98,7 @@ static void octeon_kexec_smp_down(void *ignored)
98 " sync \n" 98 " sync \n"
99 " synci ($0) \n"); 99 " synci ($0) \n");
100 100
101 relocated_kexec_smp_wait(NULL); 101 kexec_reboot();
102} 102}
103#endif 103#endif
104 104
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 4e4ec779f182..6f981af67826 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -66,6 +66,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
66# CONFIG_SERIAL_8250_PCI is not set 66# CONFIG_SERIAL_8250_PCI is not set
67CONFIG_SERIAL_8250_NR_UARTS=1 67CONFIG_SERIAL_8250_NR_UARTS=1
68CONFIG_SERIAL_8250_RUNTIME_UARTS=1 68CONFIG_SERIAL_8250_RUNTIME_UARTS=1
69CONFIG_SERIAL_OF_PLATFORM=y
69CONFIG_SERIAL_AR933X=y 70CONFIG_SERIAL_AR933X=y
70CONFIG_SERIAL_AR933X_CONSOLE=y 71CONFIG_SERIAL_AR933X_CONSOLE=y
71# CONFIG_HW_RANDOM is not set 72# CONFIG_HW_RANDOM is not set
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
index c6b63a409641..6dd8ad2409dc 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -18,8 +18,6 @@
18#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32) 18#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
19#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) 19#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
20 20
21#define MIPS_CPU_TIMER_IRQ 7
22
23#define MAX_IM 5 21#define MAX_IM 5
24 22
25#endif /* _FALCON_IRQ__ */ 23#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index 141076325307..0b424214a5e9 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -19,8 +19,6 @@
19 19
20#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) 20#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
21 21
22#define MIPS_CPU_TIMER_IRQ 7
23
24#define MAX_IM 5 22#define MAX_IM 5
25 23
26#endif 24#endif
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 6256d35dbf4d..bedb5047aff3 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -74,14 +74,15 @@ static int __init vdma_init(void)
74 get_order(VDMA_PGTBL_SIZE)); 74 get_order(VDMA_PGTBL_SIZE));
75 BUG_ON(!pgtbl); 75 BUG_ON(!pgtbl);
76 dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE); 76 dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
77 pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl); 77 pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
78 78
79 /* 79 /*
80 * Clear the R4030 translation table 80 * Clear the R4030 translation table
81 */ 81 */
82 vdma_pgtbl_init(); 82 vdma_pgtbl_init();
83 83
84 r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl)); 84 r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
85 CPHYSADDR((unsigned long)pgtbl));
85 r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE); 86 r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
86 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0); 87 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
87 88
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index f0bc3312ed11..6549499eb202 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
224 .irq_set_type = ltq_eiu_settype, 224 .irq_set_type = ltq_eiu_settype,
225}; 225};
226 226
227static void ltq_hw_irqdispatch(int module) 227static void ltq_hw_irq_handler(struct irq_desc *desc)
228{ 228{
229 int module = irq_desc_get_irq(desc) - 2;
229 u32 irq; 230 u32 irq;
231 int hwirq;
230 232
231 irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); 233 irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
232 if (irq == 0) 234 if (irq == 0)
@@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
237 * other bits might be bogus 239 * other bits might be bogus
238 */ 240 */
239 irq = __fls(irq); 241 irq = __fls(irq);
240 do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); 242 hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
243 generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
241 244
242 /* if this is a EBU irq, we need to ack it or get a deadlock */ 245 /* if this is a EBU irq, we need to ack it or get a deadlock */
243 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) 246 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
245 LTQ_EBU_PCC_ISTAT); 248 LTQ_EBU_PCC_ISTAT);
246} 249}
247 250
248#define DEFINE_HWx_IRQDISPATCH(x) \
249 static void ltq_hw ## x ## _irqdispatch(void) \
250 { \
251 ltq_hw_irqdispatch(x); \
252 }
253DEFINE_HWx_IRQDISPATCH(0)
254DEFINE_HWx_IRQDISPATCH(1)
255DEFINE_HWx_IRQDISPATCH(2)
256DEFINE_HWx_IRQDISPATCH(3)
257DEFINE_HWx_IRQDISPATCH(4)
258
259#if MIPS_CPU_TIMER_IRQ == 7
260static void ltq_hw5_irqdispatch(void)
261{
262 do_IRQ(MIPS_CPU_TIMER_IRQ);
263}
264#else
265DEFINE_HWx_IRQDISPATCH(5)
266#endif
267
268static void ltq_hw_irq_handler(struct irq_desc *desc)
269{
270 ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
271}
272
273asmlinkage void plat_irq_dispatch(void)
274{
275 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
276 int irq;
277
278 if (!pending) {
279 spurious_interrupt();
280 return;
281 }
282
283 pending >>= CAUSEB_IP;
284 while (pending) {
285 irq = fls(pending) - 1;
286 do_IRQ(MIPS_CPU_IRQ_BASE + irq);
287 pending &= ~BIT(irq);
288 }
289}
290
291static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) 251static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
292{ 252{
293 struct irq_chip *chip = &ltq_irq_type; 253 struct irq_chip *chip = &ltq_irq_type;
@@ -343,38 +303,13 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
343 for (i = 0; i < MAX_IM; i++) 303 for (i = 0; i < MAX_IM; i++)
344 irq_set_chained_handler(i + 2, ltq_hw_irq_handler); 304 irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
345 305
346 if (cpu_has_vint) {
347 pr_info("Setting up vectored interrupts\n");
348 set_vi_handler(2, ltq_hw0_irqdispatch);
349 set_vi_handler(3, ltq_hw1_irqdispatch);
350 set_vi_handler(4, ltq_hw2_irqdispatch);
351 set_vi_handler(5, ltq_hw3_irqdispatch);
352 set_vi_handler(6, ltq_hw4_irqdispatch);
353 set_vi_handler(7, ltq_hw5_irqdispatch);
354 }
355
356 ltq_domain = irq_domain_add_linear(node, 306 ltq_domain = irq_domain_add_linear(node,
357 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, 307 (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
358 &irq_domain_ops, 0); 308 &irq_domain_ops, 0);
359 309
360#ifndef CONFIG_MIPS_MT_SMP
361 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
362 IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
363#else
364 set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
365 IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
366#endif
367
368 /* tell oprofile which irq to use */ 310 /* tell oprofile which irq to use */
369 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); 311 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
370 312
371 /*
372 * if the timer irq is not one of the mips irqs we need to
373 * create a mapping
374 */
375 if (MIPS_CPU_TIMER_IRQ != 7)
376 irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
377
378 /* the external interrupts are optional and xway only */ 313 /* the external interrupts are optional and xway only */
379 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway"); 314 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
380 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) { 315 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
@@ -411,7 +346,7 @@ EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
411 346
412unsigned int get_c0_compare_int(void) 347unsigned int get_c0_compare_int(void)
413{ 348{
414 return MIPS_CPU_TIMER_IRQ; 349 return CP0_LEGACY_COMPARE_IRQ;
415} 350}
416 351
417static struct of_device_id __initdata of_irq_ids[] = { 352static struct of_device_id __initdata of_irq_ids[] = {
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 982859f2b2a3..5e6a1a45cbd2 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -129,9 +129,9 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
129 unsigned long flags; 129 unsigned long flags;
130 130
131 ch->desc = 0; 131 ch->desc = 0;
132 ch->desc_base = dma_zalloc_coherent(ch->dev, 132 ch->desc_base = dma_alloc_coherent(ch->dev,
133 LTQ_DESC_NUM * LTQ_DESC_SIZE, 133 LTQ_DESC_NUM * LTQ_DESC_SIZE,
134 &ch->phys, GFP_ATOMIC); 134 &ch->phys, GFP_ATOMIC);
135 135
136 spin_lock_irqsave(&ltq_dma_lock, flags); 136 spin_lock_irqsave(&ltq_dma_lock, flags);
137 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 137 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb849b10e..288b58b00dc8 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
369 int irq; 369 int irq;
370 struct irq_chip *msi; 370 struct irq_chip *msi;
371 371
372 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { 372 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
373 return 0;
374 } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
373 msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; 375 msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
374 msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; 376 msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
375 msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; 377 msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 0a935c136ec2..ac3482882cf9 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -3,9 +3,6 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
3 3
4KBUILD_DEFCONFIG := defconfig 4KBUILD_DEFCONFIG := defconfig
5 5
6comma = ,
7
8
9ifdef CONFIG_FUNCTION_TRACER 6ifdef CONFIG_FUNCTION_TRACER
10arch-y += -malways-save-lp -mno-relax 7arch-y += -malways-save-lp -mno-relax
11endif 8endif
@@ -54,8 +51,6 @@ endif
54boot := arch/nds32/boot 51boot := arch/nds32/boot
55core-y += $(boot)/dts/ 52core-y += $(boot)/dts/
56 53
57.PHONY: FORCE
58
59Image: vmlinux 54Image: vmlinux
60 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 55 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
61 56
@@ -68,9 +63,6 @@ prepare: vdso_prepare
68vdso_prepare: prepare0 63vdso_prepare: prepare0
69 $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h 64 $(Q)$(MAKE) $(build)=arch/nds32/kernel/vdso include/generated/vdso-offsets.h
70 65
71CLEAN_FILES += include/asm-nds32/constants.h*
72
73# We use MRPROPER_FILES and CLEAN_FILES now
74archclean: 66archclean:
75 $(Q)$(MAKE) $(clean)=$(boot) 67 $(Q)$(MAKE) $(clean)=$(boot)
76 68
diff --git a/arch/openrisc/Makefile b/arch/openrisc/Makefile
index 70e06d34006c..bf10141c7426 100644
--- a/arch/openrisc/Makefile
+++ b/arch/openrisc/Makefile
@@ -20,7 +20,6 @@
20KBUILD_DEFCONFIG := or1ksim_defconfig 20KBUILD_DEFCONFIG := or1ksim_defconfig
21 21
22OBJCOPYFLAGS := -O binary -R .note -R .comment -S 22OBJCOPYFLAGS := -O binary -R .note -R .comment -S
23LDFLAGS_vmlinux :=
24LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 23LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
25 24
26KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__ 25KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
@@ -50,5 +49,3 @@ else
50BUILTIN_DTB := n 49BUILTIN_DTB := n
51endif 50endif
52core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/ 51core-$(BUILTIN_DTB) += arch/openrisc/boot/dts/
53
54all: vmlinux
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index bc8191a34db7..a44682c8adc3 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -58,8 +58,12 @@
58/* Ensure that addr is below task's addr_limit */ 58/* Ensure that addr is below task's addr_limit */
59#define __addr_ok(addr) ((unsigned long) addr < get_fs()) 59#define __addr_ok(addr) ((unsigned long) addr < get_fs())
60 60
61#define access_ok(addr, size) \ 61#define access_ok(addr, size) \
62 __range_ok((unsigned long)addr, (unsigned long)size) 62({ \
63 unsigned long __ao_addr = (unsigned long)(addr); \
64 unsigned long __ao_size = (unsigned long)(size); \
65 __range_ok(__ao_addr, __ao_size); \
66})
63 67
64/* 68/*
65 * These are the main single-value transfer routines. They automatically 69 * These are the main single-value transfer routines. They automatically
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild
index 0febf1a07c30..6c6f6301012e 100644
--- a/arch/openrisc/include/uapi/asm/Kbuild
+++ b/arch/openrisc/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
4generic-y += ucontext.h 5generic-y += ucontext.h
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index ff91192407d1..f599064dd8dc 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
47 PERF_REG_POWERPC_DAR, 47 PERF_REG_POWERPC_DAR,
48 PERF_REG_POWERPC_DSISR, 48 PERF_REG_POWERPC_DSISR,
49 PERF_REG_POWERPC_SIER, 49 PERF_REG_POWERPC_SIER,
50 PERF_REG_POWERPC_MMCRA,
50 PERF_REG_POWERPC_MAX, 51 PERF_REG_POWERPC_MAX,
51}; 52};
52#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ 53#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 57deb1e9ffea..20cc816b3508 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -852,11 +852,12 @@ start_here:
852 852
853 /* set up the PTE pointers for the Abatron bdiGDB. 853 /* set up the PTE pointers for the Abatron bdiGDB.
854 */ 854 */
855 tovirt(r6,r6)
856 lis r5, abatron_pteptrs@h 855 lis r5, abatron_pteptrs@h
857 ori r5, r5, abatron_pteptrs@l 856 ori r5, r5, abatron_pteptrs@l
858 stw r5, 0xf0(0) /* Must match your Abatron config file */ 857 stw r5, 0xf0(0) /* Must match your Abatron config file */
859 tophys(r5,r5) 858 tophys(r5,r5)
859 lis r6, swapper_pg_dir@h
860 ori r6, r6, swapper_pg_dir@l
860 stw r6, 0(r5) 861 stw r6, 0(r5)
861 862
862/* Now turn on the MMU for real! */ 863/* Now turn on the MMU for real! */
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index bd5e6834ca69..6794466f6420 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -755,11 +755,12 @@ SYSCALL_DEFINE0(rt_sigreturn)
755 if (restore_tm_sigcontexts(current, &uc->uc_mcontext, 755 if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
756 &uc_transact->uc_mcontext)) 756 &uc_transact->uc_mcontext))
757 goto badframe; 757 goto badframe;
758 } 758 } else
759#endif 759#endif
760 /* Fall through, for non-TM restore */ 760 {
761 if (!MSR_TM_ACTIVE(msr)) {
762 /* 761 /*
762 * Fall through, for non-TM restore
763 *
763 * Unset MSR[TS] on the thread regs since MSR from user 764 * Unset MSR[TS] on the thread regs since MSR from user
764 * context does not have MSR active, and recheckpoint was 765 * context does not have MSR active, and recheckpoint was
765 * not called since restore_tm_sigcontexts() was not called 766 * not called since restore_tm_sigcontexts() was not called
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 29746dc28df5..517662a56bdc 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -967,13 +967,6 @@ out:
967} 967}
968#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 968#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
969 969
970#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
971unsigned long __init arch_syscall_addr(int nr)
972{
973 return sys_call_table[nr*2];
974}
975#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
976
977#ifdef PPC64_ELF_ABI_v1 970#ifdef PPC64_ELF_ABI_v1
978char *arch_ftrace_match_adjust(char *str, const char *search) 971char *arch_ftrace_match_adjust(char *str, const char *search)
979{ 972{
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index 5c36b3a8d47a..3349f3f8fe84 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -70,6 +70,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
70 PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar), 70 PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
71 PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr), 71 PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
72 PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar), 72 PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
73 PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
73}; 74};
74 75
75u64 perf_reg_value(struct pt_regs *regs, int idx) 76u64 perf_reg_value(struct pt_regs *regs, int idx)
@@ -83,6 +84,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
83 !is_sier_available())) 84 !is_sier_available()))
84 return 0; 85 return 0;
85 86
87 if (idx == PERF_REG_POWERPC_MMCRA &&
88 (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
89 IS_ENABLED(CONFIG_PPC32)))
90 return 0;
91
86 return regs_get_register(regs, pt_regs_offset[idx]); 92 return regs_get_register(regs, pt_regs_offset[idx]);
87} 93}
88 94
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c
index a1aaa1569d7c..f0e488d97567 100644
--- a/arch/powerpc/platforms/4xx/ocm.c
+++ b/arch/powerpc/platforms/4xx/ocm.c
@@ -237,12 +237,12 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
237 continue; 237 continue;
238 238
239 seq_printf(m, "PPC4XX OCM : %d\n", ocm->index); 239 seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
240 seq_printf(m, "PhysAddr : %pa[p]\n", &(ocm->phys)); 240 seq_printf(m, "PhysAddr : %pa\n", &(ocm->phys));
241 seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal); 241 seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
242 seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal); 242 seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
243 seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal); 243 seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal);
244 244
245 seq_printf(m, "NC.PhysAddr : %pa[p]\n", &(ocm->nc.phys)); 245 seq_printf(m, "NC.PhysAddr : %pa\n", &(ocm->nc.phys));
246 seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt); 246 seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
247 seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal); 247 seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
248 seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree); 248 seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
@@ -252,7 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
252 blk->size, blk->owner); 252 blk->size, blk->owner);
253 } 253 }
254 254
255 seq_printf(m, "\nC.PhysAddr : %pa[p]\n", &(ocm->c.phys)); 255 seq_printf(m, "\nC.PhysAddr : %pa\n", &(ocm->c.phys));
256 seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt); 256 seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
257 seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal); 257 seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
258 seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree); 258 seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index e66644e0fb40..9438fa0fc355 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -538,8 +538,7 @@ static void __init chrp_init_IRQ(void)
538 /* see if there is a keyboard in the device tree 538 /* see if there is a keyboard in the device tree
539 with a parent of type "adb" */ 539 with a parent of type "adb" */
540 for_each_node_by_name(kbd, "keyboard") 540 for_each_node_by_name(kbd, "keyboard")
541 if (kbd->parent && kbd->parent->type 541 if (of_node_is_type(kbd->parent, "adb"))
542 && strcmp(kbd->parent->type, "adb") == 0)
543 break; 542 break;
544 of_node_put(kbd); 543 of_node_put(kbd);
545 if (kbd) 544 if (kbd)
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index d18d16489a15..bdf9b716e848 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -255,7 +255,7 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
255 255
256 chan->ring_size = ring_size; 256 chan->ring_size = ring_size;
257 257
258 chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev, 258 chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
259 ring_size * sizeof(u64), 259 ring_size * sizeof(u64),
260 &chan->ring_dma, GFP_KERNEL); 260 &chan->ring_dma, GFP_KERNEL);
261 261
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index d7f742ed48ba..3f58c7dbd581 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -564,7 +564,7 @@ struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
564 } 564 }
565 } else { 565 } else {
566 /* Create a group for 1 GPU and attached NPUs for POWER8 */ 566 /* Create a group for 1 GPU and attached NPUs for POWER8 */
567 pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL); 567 pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
568 table_group = &pe->npucomp->table_group; 568 table_group = &pe->npucomp->table_group;
569 table_group->ops = &pnv_npu_peers_ops; 569 table_group->ops = &pnv_npu_peers_ops;
570 iommu_register_group(table_group, hose->global_number, 570 iommu_register_group(table_group, hose->global_number,
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 1d6406a051f1..7db3119f8a5b 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2681,7 +2681,8 @@ static void pnv_pci_ioda_setup_iommu_api(void)
2681 list_for_each_entry(hose, &hose_list, list_node) { 2681 list_for_each_entry(hose, &hose_list, list_node) {
2682 phb = hose->private_data; 2682 phb = hose->private_data;
2683 2683
2684 if (phb->type == PNV_PHB_NPU_NVLINK) 2684 if (phb->type == PNV_PHB_NPU_NVLINK ||
2685 phb->type == PNV_PHB_NPU_OCAPI)
2685 continue; 2686 continue;
2686 2687
2687 list_for_each_entry(pe, &phb->ioda.pe_list, list) { 2688 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 7725825d887d..37a77e57893e 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -264,7 +264,9 @@ void __init pSeries_final_fixup(void)
264 if (!of_device_is_compatible(nvdn->parent, 264 if (!of_device_is_compatible(nvdn->parent,
265 "ibm,power9-npu")) 265 "ibm,power9-npu"))
266 continue; 266 continue;
267#ifdef CONFIG_PPC_POWERNV
267 WARN_ON_ONCE(pnv_npu2_init(hose)); 268 WARN_ON_ONCE(pnv_npu2_init(hose));
269#endif
268 break; 270 break;
269 } 271 }
270 } 272 }
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index 8b0ebf3940d2..ebed46f80254 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -756,9 +756,10 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
756 } 756 }
757 757
758 /* Initialize outbound message descriptor ring */ 758 /* Initialize outbound message descriptor ring */
759 rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev, 759 rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
760 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE, 760 rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
761 &rmu->msg_tx_ring.phys, GFP_KERNEL); 761 &rmu->msg_tx_ring.phys,
762 GFP_KERNEL);
762 if (!rmu->msg_tx_ring.virt) { 763 if (!rmu->msg_tx_ring.virt) {
763 rc = -ENOMEM; 764 rc = -ENOMEM;
764 goto out_dma; 765 goto out_dma;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e0d7d61779a6..feeeaa60697c 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -28,11 +28,13 @@ config RISCV
28 select GENERIC_STRNLEN_USER 28 select GENERIC_STRNLEN_USER
29 select GENERIC_SMP_IDLE_THREAD 29 select GENERIC_SMP_IDLE_THREAD
30 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A 30 select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
31 select HAVE_ARCH_AUDITSYSCALL
31 select HAVE_MEMBLOCK_NODE_MAP 32 select HAVE_MEMBLOCK_NODE_MAP
32 select HAVE_DMA_CONTIGUOUS 33 select HAVE_DMA_CONTIGUOUS
33 select HAVE_FUTEX_CMPXCHG if FUTEX 34 select HAVE_FUTEX_CMPXCHG if FUTEX
34 select HAVE_GENERIC_DMA_COHERENT 35 select HAVE_GENERIC_DMA_COHERENT
35 select HAVE_PERF_EVENTS 36 select HAVE_PERF_EVENTS
37 select HAVE_SYSCALL_TRACEPOINTS
36 select IRQ_DOMAIN 38 select IRQ_DOMAIN
37 select RISCV_ISA_A if SMP 39 select RISCV_ISA_A if SMP
38 select SPARSE_IRQ 40 select SPARSE_IRQ
@@ -40,6 +42,7 @@ config RISCV
40 select HAVE_ARCH_TRACEHOOK 42 select HAVE_ARCH_TRACEHOOK
41 select HAVE_PCI 43 select HAVE_PCI
42 select MODULES_USE_ELF_RELA if MODULES 44 select MODULES_USE_ELF_RELA if MODULES
45 select MODULE_SECTIONS if MODULES
43 select THREAD_INFO_IN_TASK 46 select THREAD_INFO_IN_TASK
44 select PCI_DOMAINS_GENERIC if PCI 47 select PCI_DOMAINS_GENERIC if PCI
45 select PCI_MSI if PCI 48 select PCI_MSI if PCI
@@ -152,7 +155,6 @@ choice
152 bool "2GiB" 155 bool "2GiB"
153 config MAXPHYSMEM_128GB 156 config MAXPHYSMEM_128GB
154 depends on 64BIT && CMODEL_MEDANY 157 depends on 64BIT && CMODEL_MEDANY
155 select MODULE_SECTIONS if MODULES
156 bool "128GiB" 158 bool "128GiB"
157endchoice 159endchoice
158 160
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
index cd2af4b013e3..46202dad365d 100644
--- a/arch/riscv/include/asm/module.h
+++ b/arch/riscv/include/asm/module.h
@@ -9,12 +9,12 @@
9#define MODULE_ARCH_VERMAGIC "riscv" 9#define MODULE_ARCH_VERMAGIC "riscv"
10 10
11struct module; 11struct module;
12u64 module_emit_got_entry(struct module *mod, u64 val); 12unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
13u64 module_emit_plt_entry(struct module *mod, u64 val); 13unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);
14 14
15#ifdef CONFIG_MODULE_SECTIONS 15#ifdef CONFIG_MODULE_SECTIONS
16struct mod_section { 16struct mod_section {
17 struct elf64_shdr *shdr; 17 Elf_Shdr *shdr;
18 int num_entries; 18 int num_entries;
19 int max_entries; 19 int max_entries;
20}; 20};
@@ -26,18 +26,18 @@ struct mod_arch_specific {
26}; 26};
27 27
28struct got_entry { 28struct got_entry {
29 u64 symbol_addr; /* the real variable address */ 29 unsigned long symbol_addr; /* the real variable address */
30}; 30};
31 31
32static inline struct got_entry emit_got_entry(u64 val) 32static inline struct got_entry emit_got_entry(unsigned long val)
33{ 33{
34 return (struct got_entry) {val}; 34 return (struct got_entry) {val};
35} 35}
36 36
37static inline struct got_entry *get_got_entry(u64 val, 37static inline struct got_entry *get_got_entry(unsigned long val,
38 const struct mod_section *sec) 38 const struct mod_section *sec)
39{ 39{
40 struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr; 40 struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr);
41 int i; 41 int i;
42 for (i = 0; i < sec->num_entries; i++) { 42 for (i = 0; i < sec->num_entries; i++) {
43 if (got[i].symbol_addr == val) 43 if (got[i].symbol_addr == val)
@@ -62,7 +62,9 @@ struct plt_entry {
62#define REG_T0 0x5 62#define REG_T0 0x5
63#define REG_T1 0x6 63#define REG_T1 0x6
64 64
65static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt) 65static inline struct plt_entry emit_plt_entry(unsigned long val,
66 unsigned long plt,
67 unsigned long got_plt)
66{ 68{
67 /* 69 /*
68 * U-Type encoding: 70 * U-Type encoding:
@@ -76,7 +78,7 @@ static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
76 * +------------+------------+--------+----------+----------+ 78 * +------------+------------+--------+----------+----------+
77 * 79 *
78 */ 80 */
79 u64 offset = got_plt - plt; 81 unsigned long offset = got_plt - plt;
80 u32 hi20 = (offset + 0x800) & 0xfffff000; 82 u32 hi20 = (offset + 0x800) & 0xfffff000;
81 u32 lo12 = (offset - hi20); 83 u32 lo12 = (offset - hi20);
82 return (struct plt_entry) { 84 return (struct plt_entry) {
@@ -86,7 +88,7 @@ static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
86 }; 88 };
87} 89}
88 90
89static inline int get_got_plt_idx(u64 val, const struct mod_section *sec) 91static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec)
90{ 92{
91 struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr; 93 struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
92 int i; 94 int i;
@@ -97,9 +99,9 @@ static inline int get_got_plt_idx(u64 val, const struct mod_section *sec)
97 return -1; 99 return -1;
98} 100}
99 101
100static inline struct plt_entry *get_plt_entry(u64 val, 102static inline struct plt_entry *get_plt_entry(unsigned long val,
101 const struct mod_section *sec_plt, 103 const struct mod_section *sec_plt,
102 const struct mod_section *sec_got_plt) 104 const struct mod_section *sec_got_plt)
103{ 105{
104 struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; 106 struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
105 int got_plt_idx = get_got_plt_idx(val, sec_got_plt); 107 int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index bbe1862e8f80..d35ec2f41381 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -113,6 +113,11 @@ static inline void frame_pointer_set(struct pt_regs *regs,
113 SET_FP(regs, val); 113 SET_FP(regs, val);
114} 114}
115 115
116static inline unsigned long regs_return_value(struct pt_regs *regs)
117{
118 return regs->a0;
119}
120
116#endif /* __ASSEMBLY__ */ 121#endif /* __ASSEMBLY__ */
117 122
118#endif /* _ASM_RISCV_PTRACE_H */ 123#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 8d25f8904c00..bba3da6ef157 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -18,6 +18,7 @@
18#ifndef _ASM_RISCV_SYSCALL_H 18#ifndef _ASM_RISCV_SYSCALL_H
19#define _ASM_RISCV_SYSCALL_H 19#define _ASM_RISCV_SYSCALL_H
20 20
21#include <uapi/linux/audit.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
22#include <linux/err.h> 23#include <linux/err.h>
23 24
@@ -99,4 +100,13 @@ static inline void syscall_set_arguments(struct task_struct *task,
99 memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0)); 100 memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
100} 101}
101 102
103static inline int syscall_get_arch(void)
104{
105#ifdef CONFIG_64BIT
106 return AUDIT_ARCH_RISCV64;
107#else
108 return AUDIT_ARCH_RISCV32;
109#endif
110}
111
102#endif /* _ASM_RISCV_SYSCALL_H */ 112#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index f8fa1cd2dad9..1c9cc8389928 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -80,13 +80,19 @@ struct thread_info {
80#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ 80#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
81#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ 81#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
82#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ 82#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
83#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
83 84
84#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 85#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
85#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 86#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
86#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 87#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
87#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 88#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
89#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
90#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
88 91
89#define _TIF_WORK_MASK \ 92#define _TIF_WORK_MASK \
90 (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED) 93 (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
91 94
95#define _TIF_SYSCALL_WORK \
96 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT)
97
92#endif /* _ASM_RISCV_THREAD_INFO_H */ 98#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index fef96f117b4d..073ee80fdf74 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -19,3 +19,5 @@
19#define __ARCH_WANT_SYS_CLONE 19#define __ARCH_WANT_SYS_CLONE
20 20
21#include <uapi/asm/unistd.h> 21#include <uapi/asm/unistd.h>
22
23#define NR_syscalls (__NR_syscalls)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 13d4826ab2a1..355166f57205 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -201,7 +201,7 @@ handle_syscall:
201 REG_S s2, PT_SEPC(sp) 201 REG_S s2, PT_SEPC(sp)
202 /* Trace syscalls, but only if requested by the user. */ 202 /* Trace syscalls, but only if requested by the user. */
203 REG_L t0, TASK_TI_FLAGS(tp) 203 REG_L t0, TASK_TI_FLAGS(tp)
204 andi t0, t0, _TIF_SYSCALL_TRACE 204 andi t0, t0, _TIF_SYSCALL_WORK
205 bnez t0, handle_syscall_trace_enter 205 bnez t0, handle_syscall_trace_enter
206check_syscall_nr: 206check_syscall_nr:
207 /* Check to make sure we don't jump to a bogus syscall number. */ 207 /* Check to make sure we don't jump to a bogus syscall number. */
@@ -221,7 +221,7 @@ ret_from_syscall:
221 REG_S a0, PT_A0(sp) 221 REG_S a0, PT_A0(sp)
222 /* Trace syscalls, but only if requested by the user. */ 222 /* Trace syscalls, but only if requested by the user. */
223 REG_L t0, TASK_TI_FLAGS(tp) 223 REG_L t0, TASK_TI_FLAGS(tp)
224 andi t0, t0, _TIF_SYSCALL_TRACE 224 andi t0, t0, _TIF_SYSCALL_WORK
225 bnez t0, handle_syscall_trace_exit 225 bnez t0, handle_syscall_trace_exit
226 226
227ret_from_exception: 227ret_from_exception:
diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c
index bbbd26e19bfd..c9ae48333114 100644
--- a/arch/riscv/kernel/module-sections.c
+++ b/arch/riscv/kernel/module-sections.c
@@ -9,14 +9,14 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/module.h> 10#include <linux/module.h>
11 11
12u64 module_emit_got_entry(struct module *mod, u64 val) 12unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
13{ 13{
14 struct mod_section *got_sec = &mod->arch.got; 14 struct mod_section *got_sec = &mod->arch.got;
15 int i = got_sec->num_entries; 15 int i = got_sec->num_entries;
16 struct got_entry *got = get_got_entry(val, got_sec); 16 struct got_entry *got = get_got_entry(val, got_sec);
17 17
18 if (got) 18 if (got)
19 return (u64)got; 19 return (unsigned long)got;
20 20
21 /* There is no duplicate entry, create a new one */ 21 /* There is no duplicate entry, create a new one */
22 got = (struct got_entry *)got_sec->shdr->sh_addr; 22 got = (struct got_entry *)got_sec->shdr->sh_addr;
@@ -25,10 +25,10 @@ u64 module_emit_got_entry(struct module *mod, u64 val)
25 got_sec->num_entries++; 25 got_sec->num_entries++;
26 BUG_ON(got_sec->num_entries > got_sec->max_entries); 26 BUG_ON(got_sec->num_entries > got_sec->max_entries);
27 27
28 return (u64)&got[i]; 28 return (unsigned long)&got[i];
29} 29}
30 30
31u64 module_emit_plt_entry(struct module *mod, u64 val) 31unsigned long module_emit_plt_entry(struct module *mod, unsigned long val)
32{ 32{
33 struct mod_section *got_plt_sec = &mod->arch.got_plt; 33 struct mod_section *got_plt_sec = &mod->arch.got_plt;
34 struct got_entry *got_plt; 34 struct got_entry *got_plt;
@@ -37,27 +37,29 @@ u64 module_emit_plt_entry(struct module *mod, u64 val)
37 int i = plt_sec->num_entries; 37 int i = plt_sec->num_entries;
38 38
39 if (plt) 39 if (plt)
40 return (u64)plt; 40 return (unsigned long)plt;
41 41
42 /* There is no duplicate entry, create a new one */ 42 /* There is no duplicate entry, create a new one */
43 got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr; 43 got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr;
44 got_plt[i] = emit_got_entry(val); 44 got_plt[i] = emit_got_entry(val);
45 plt = (struct plt_entry *)plt_sec->shdr->sh_addr; 45 plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
46 plt[i] = emit_plt_entry(val, (u64)&plt[i], (u64)&got_plt[i]); 46 plt[i] = emit_plt_entry(val,
47 (unsigned long)&plt[i],
48 (unsigned long)&got_plt[i]);
47 49
48 plt_sec->num_entries++; 50 plt_sec->num_entries++;
49 got_plt_sec->num_entries++; 51 got_plt_sec->num_entries++;
50 BUG_ON(plt_sec->num_entries > plt_sec->max_entries); 52 BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
51 53
52 return (u64)&plt[i]; 54 return (unsigned long)&plt[i];
53} 55}
54 56
55static int is_rela_equal(const Elf64_Rela *x, const Elf64_Rela *y) 57static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
56{ 58{
57 return x->r_info == y->r_info && x->r_addend == y->r_addend; 59 return x->r_info == y->r_info && x->r_addend == y->r_addend;
58} 60}
59 61
60static bool duplicate_rela(const Elf64_Rela *rela, int idx) 62static bool duplicate_rela(const Elf_Rela *rela, int idx)
61{ 63{
62 int i; 64 int i;
63 for (i = 0; i < idx; i++) { 65 for (i = 0; i < idx; i++) {
@@ -67,13 +69,13 @@ static bool duplicate_rela(const Elf64_Rela *rela, int idx)
67 return false; 69 return false;
68} 70}
69 71
70static void count_max_entries(Elf64_Rela *relas, int num, 72static void count_max_entries(Elf_Rela *relas, int num,
71 unsigned int *plts, unsigned int *gots) 73 unsigned int *plts, unsigned int *gots)
72{ 74{
73 unsigned int type, i; 75 unsigned int type, i;
74 76
75 for (i = 0; i < num; i++) { 77 for (i = 0; i < num; i++) {
76 type = ELF64_R_TYPE(relas[i].r_info); 78 type = ELF_RISCV_R_TYPE(relas[i].r_info);
77 if (type == R_RISCV_CALL_PLT) { 79 if (type == R_RISCV_CALL_PLT) {
78 if (!duplicate_rela(relas, i)) 80 if (!duplicate_rela(relas, i))
79 (*plts)++; 81 (*plts)++;
@@ -118,9 +120,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
118 120
119 /* Calculate the maxinum number of entries */ 121 /* Calculate the maxinum number of entries */
120 for (i = 0; i < ehdr->e_shnum; i++) { 122 for (i = 0; i < ehdr->e_shnum; i++) {
121 Elf64_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset; 123 Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
122 int num_rela = sechdrs[i].sh_size / sizeof(Elf64_Rela); 124 int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
123 Elf64_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info; 125 Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
124 126
125 if (sechdrs[i].sh_type != SHT_RELA) 127 if (sechdrs[i].sh_type != SHT_RELA)
126 continue; 128 continue;
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 60f1e02eed36..2ae5e0284f56 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -18,12 +18,15 @@
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19#include <asm/syscall.h> 19#include <asm/syscall.h>
20#include <asm/thread_info.h> 20#include <asm/thread_info.h>
21#include <linux/audit.h>
21#include <linux/ptrace.h> 22#include <linux/ptrace.h>
22#include <linux/elf.h> 23#include <linux/elf.h>
23#include <linux/regset.h> 24#include <linux/regset.h>
24#include <linux/sched.h> 25#include <linux/sched.h>
25#include <linux/sched/task_stack.h> 26#include <linux/sched/task_stack.h>
26#include <linux/tracehook.h> 27#include <linux/tracehook.h>
28
29#define CREATE_TRACE_POINTS
27#include <trace/events/syscalls.h> 30#include <trace/events/syscalls.h>
28 31
29enum riscv_regset { 32enum riscv_regset {
@@ -163,15 +166,19 @@ void do_syscall_trace_enter(struct pt_regs *regs)
163 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 166 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
164 trace_sys_enter(regs, syscall_get_nr(current, regs)); 167 trace_sys_enter(regs, syscall_get_nr(current, regs));
165#endif 168#endif
169
170 audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
166} 171}
167 172
168void do_syscall_trace_exit(struct pt_regs *regs) 173void do_syscall_trace_exit(struct pt_regs *regs)
169{ 174{
175 audit_syscall_exit(regs);
176
170 if (test_thread_flag(TIF_SYSCALL_TRACE)) 177 if (test_thread_flag(TIF_SYSCALL_TRACE))
171 tracehook_report_syscall_exit(regs, 0); 178 tracehook_report_syscall_exit(regs, 0);
172 179
173#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS 180#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
174 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 181 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
175 trace_sys_exit(regs, regs->regs[0]); 182 trace_sys_exit(regs, regs_return_value(regs));
176#endif 183#endif
177} 184}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index fc8006a042eb..6e079e94b638 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -149,7 +149,14 @@ asmlinkage void __init setup_vm(void)
149 149
150void __init parse_dtb(unsigned int hartid, void *dtb) 150void __init parse_dtb(unsigned int hartid, void *dtb)
151{ 151{
152 early_init_dt_scan(__va(dtb)); 152 if (!early_init_dt_scan(__va(dtb)))
153 return;
154
155 pr_err("No DTB passed to the kernel\n");
156#ifdef CONFIG_CMDLINE_FORCE
157 strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
158 pr_info("Forcing kernel command line to: %s\n", boot_command_line);
159#endif
153} 160}
154 161
155static void __init setup_bootmem(void) 162static void __init setup_bootmem(void)
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 57b1383e5ef7..246635eac7bb 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -23,6 +23,7 @@
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/delay.h>
26 27
27#include <asm/sbi.h> 28#include <asm/sbi.h>
28#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
@@ -31,6 +32,7 @@
31enum ipi_message_type { 32enum ipi_message_type {
32 IPI_RESCHEDULE, 33 IPI_RESCHEDULE,
33 IPI_CALL_FUNC, 34 IPI_CALL_FUNC,
35 IPI_CPU_STOP,
34 IPI_MAX 36 IPI_MAX
35}; 37};
36 38
@@ -66,6 +68,13 @@ int setup_profiling_timer(unsigned int multiplier)
66 return -EINVAL; 68 return -EINVAL;
67} 69}
68 70
71static void ipi_stop(void)
72{
73 set_cpu_online(smp_processor_id(), false);
74 while (1)
75 wait_for_interrupt();
76}
77
69void riscv_software_interrupt(void) 78void riscv_software_interrupt(void)
70{ 79{
71 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; 80 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
@@ -94,6 +103,11 @@ void riscv_software_interrupt(void)
94 generic_smp_call_function_interrupt(); 103 generic_smp_call_function_interrupt();
95 } 104 }
96 105
106 if (ops & (1 << IPI_CPU_STOP)) {
107 stats[IPI_CPU_STOP]++;
108 ipi_stop();
109 }
110
97 BUG_ON((ops >> IPI_MAX) != 0); 111 BUG_ON((ops >> IPI_MAX) != 0);
98 112
99 /* Order data access and bit testing. */ 113 /* Order data access and bit testing. */
@@ -121,6 +135,7 @@ send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
121static const char * const ipi_names[] = { 135static const char * const ipi_names[] = {
122 [IPI_RESCHEDULE] = "Rescheduling interrupts", 136 [IPI_RESCHEDULE] = "Rescheduling interrupts",
123 [IPI_CALL_FUNC] = "Function call interrupts", 137 [IPI_CALL_FUNC] = "Function call interrupts",
138 [IPI_CPU_STOP] = "CPU stop interrupts",
124}; 139};
125 140
126void show_ipi_stats(struct seq_file *p, int prec) 141void show_ipi_stats(struct seq_file *p, int prec)
@@ -146,15 +161,29 @@ void arch_send_call_function_single_ipi(int cpu)
146 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); 161 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
147} 162}
148 163
149static void ipi_stop(void *unused)
150{
151 while (1)
152 wait_for_interrupt();
153}
154
155void smp_send_stop(void) 164void smp_send_stop(void)
156{ 165{
157 on_each_cpu(ipi_stop, NULL, 1); 166 unsigned long timeout;
167
168 if (num_online_cpus() > 1) {
169 cpumask_t mask;
170
171 cpumask_copy(&mask, cpu_online_mask);
172 cpumask_clear_cpu(smp_processor_id(), &mask);
173
174 if (system_state <= SYSTEM_RUNNING)
175 pr_crit("SMP: stopping secondary CPUs\n");
176 send_ipi_message(&mask, IPI_CPU_STOP);
177 }
178
179 /* Wait up to one second for other CPUs to stop */
180 timeout = USEC_PER_SEC;
181 while (num_online_cpus() > 1 && timeout--)
182 udelay(1);
183
184 if (num_online_cpus() > 1)
185 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
186 cpumask_pr_args(cpu_online_mask));
158} 187}
159 188
160void smp_send_reschedule(int cpu) 189void smp_send_reschedule(int cpu)
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 65df1dfdc303..1e1395d63dab 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -18,6 +18,8 @@
18#include <asm/cache.h> 18#include <asm/cache.h>
19#include <asm/thread_info.h> 19#include <asm/thread_info.h>
20 20
21#define MAX_BYTES_PER_LONG 0x10
22
21OUTPUT_ARCH(riscv) 23OUTPUT_ARCH(riscv)
22ENTRY(_start) 24ENTRY(_start)
23 25
@@ -74,8 +76,6 @@ SECTIONS
74 *(.sbss*) 76 *(.sbss*)
75 } 77 }
76 78
77 BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
78
79 EXCEPTION_TABLE(0x10) 79 EXCEPTION_TABLE(0x10)
80 NOTES 80 NOTES
81 81
@@ -83,6 +83,10 @@ SECTIONS
83 *(.rel.dyn*) 83 *(.rel.dyn*)
84 } 84 }
85 85
86 BSS_SECTION(MAX_BYTES_PER_LONG,
87 MAX_BYTES_PER_LONG,
88 MAX_BYTES_PER_LONG)
89
86 _end = .; 90 _end = .;
87 91
88 STABS_DEBUG 92 STABS_DEBUG
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild
index 0febf1a07c30..6c6f6301012e 100644
--- a/arch/unicore32/include/uapi/asm/Kbuild
+++ b/arch/unicore32/include/uapi/asm/Kbuild
@@ -1,4 +1,5 @@
1include include/uapi/asm-generic/Kbuild.asm 1include include/uapi/asm-generic/Kbuild.asm
2 2
3generic-y += kvm_para.h 3generic-y += kvm_para.h
4generic-y += shmparam.h
4generic-y += ucontext.h 5generic-y += ucontext.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6185d4f33296..4b4a7f32b68e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -446,7 +446,7 @@ config RETPOLINE
446 branches. Requires a compiler with -mindirect-branch=thunk-extern 446 branches. Requires a compiler with -mindirect-branch=thunk-extern
447 support for full protection. The kernel may run slower. 447 support for full protection. The kernel may run slower.
448 448
449config RESCTRL 449config X86_RESCTRL
450 bool "Resource Control support" 450 bool "Resource Control support"
451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) 451 depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
452 select KERNFS 452 select KERNFS
@@ -617,7 +617,7 @@ config X86_INTEL_QUARK
617 617
618config X86_INTEL_LPSS 618config X86_INTEL_LPSS
619 bool "Intel Low Power Subsystem Support" 619 bool "Intel Low Power Subsystem Support"
620 depends on X86 && ACPI 620 depends on X86 && ACPI && PCI
621 select COMMON_CLK 621 select COMMON_CLK
622 select PINCTRL 622 select PINCTRL
623 select IOSF_MBI 623 select IOSF_MBI
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h
index 54990fe2a3ae..40ebddde6ac2 100644
--- a/arch/x86/include/asm/resctrl_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -2,7 +2,7 @@
2#ifndef _ASM_X86_RESCTRL_SCHED_H 2#ifndef _ASM_X86_RESCTRL_SCHED_H
3#define _ASM_X86_RESCTRL_SCHED_H 3#define _ASM_X86_RESCTRL_SCHED_H
4 4
5#ifdef CONFIG_RESCTRL 5#ifdef CONFIG_X86_RESCTRL
6 6
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/jump_label.h> 8#include <linux/jump_label.h>
@@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void)
88 88
89static inline void resctrl_sched_in(void) {} 89static inline void resctrl_sched_in(void) {}
90 90
91#endif /* CONFIG_RESCTRL */ 91#endif /* CONFIG_X86_RESCTRL */
92 92
93#endif /* _ASM_X86_RESCTRL_SCHED_H */ 93#endif /* _ASM_X86_RESCTRL_SCHED_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index a77445d1b034..780f2b42c8ef 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -711,7 +711,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
711{ 711{
712 if (unlikely(!access_ok(ptr,len))) 712 if (unlikely(!access_ok(ptr,len)))
713 return 0; 713 return 0;
714 __uaccess_begin(); 714 __uaccess_begin_nospec();
715 return 1; 715 return 1;
716} 716}
717#define user_access_begin(a,b) user_access_begin(a,b) 717#define user_access_begin(a,b) user_access_begin(a,b)
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index ac78f90aea56..b6fa0869f7aa 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
39obj-$(CONFIG_X86_MCE) += mce/ 39obj-$(CONFIG_X86_MCE) += mce/
40obj-$(CONFIG_MTRR) += mtrr/ 40obj-$(CONFIG_MTRR) += mtrr/
41obj-$(CONFIG_MICROCODE) += microcode/ 41obj-$(CONFIG_MICROCODE) += microcode/
42obj-$(CONFIG_RESCTRL) += resctrl/ 42obj-$(CONFIG_X86_RESCTRL) += resctrl/
43 43
44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o 44obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
45 45
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 8654b8b0c848..1de0f4170178 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -215,7 +215,7 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
215static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = 215static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
216 SPECTRE_V2_USER_NONE; 216 SPECTRE_V2_USER_NONE;
217 217
218#ifdef RETPOLINE 218#ifdef CONFIG_RETPOLINE
219static bool spectre_v2_bad_module; 219static bool spectre_v2_bad_module;
220 220
221bool retpoline_module_ok(bool has_retpoline) 221bool retpoline_module_ok(bool has_retpoline)
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
index 6895049ceef7..1cabe6fd8e11 100644
--- a/arch/x86/kernel/cpu/resctrl/Makefile
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -1,4 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o 2obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o
3obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o 3obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o
4CFLAGS_pseudo_lock.o = -I$(src) 4CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 307e5bddb6d9..a157ca5b6869 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6278,6 +6278,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
6278 int asid, ret; 6278 int asid, ret;
6279 6279
6280 ret = -EBUSY; 6280 ret = -EBUSY;
6281 if (unlikely(sev->active))
6282 return ret;
6283
6281 asid = sev_asid_new(); 6284 asid = sev_asid_new();
6282 if (asid < 0) 6285 if (asid < 0)
6283 return ret; 6286 return ret;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 3170e291215d..2616bd2c7f2c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4540,9 +4540,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
4540 * given physical address won't match the required 4540 * given physical address won't match the required
4541 * VMCS12_REVISION identifier. 4541 * VMCS12_REVISION identifier.
4542 */ 4542 */
4543 nested_vmx_failValid(vcpu, 4543 return nested_vmx_failValid(vcpu,
4544 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); 4544 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4545 return kvm_skip_emulated_instruction(vcpu);
4546 } 4545 }
4547 new_vmcs12 = kmap(page); 4546 new_vmcs12 = kmap(page);
4548 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || 4547 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 4d39f731bc33..f6915f10e584 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -453,7 +453,7 @@ static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
453 struct kvm_tlb_range *range) 453 struct kvm_tlb_range *range)
454{ 454{
455 struct kvm_vcpu *vcpu; 455 struct kvm_vcpu *vcpu;
456 int ret = -ENOTSUPP, i; 456 int ret = 0, i;
457 457
458 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); 458 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
459 459
@@ -7044,7 +7044,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
7044 7044
7045 /* unmask address range configure area */ 7045 /* unmask address range configure area */
7046 for (i = 0; i < vmx->pt_desc.addr_range; i++) 7046 for (i = 0; i < vmx->pt_desc.addr_range; i++)
7047 vmx->pt_desc.ctl_bitmask &= ~(0xf << (32 + i * 4)); 7047 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7048} 7048}
7049 7049
7050static void vmx_cpuid_update(struct kvm_vcpu *vcpu) 7050static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 2f6787fc7106..c54a493e139a 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -898,10 +898,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
898 val = native_read_msr_safe(msr, err); 898 val = native_read_msr_safe(msr, err);
899 switch (msr) { 899 switch (msr) {
900 case MSR_IA32_APICBASE: 900 case MSR_IA32_APICBASE:
901#ifdef CONFIG_X86_X2APIC 901 val &= ~X2APIC_ENABLE;
902 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
903#endif
904 val &= ~X2APIC_ENABLE;
905 break; 902 break;
906 } 903 }
907 return val; 904 return val;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 72bf446c3fee..6e29794573b7 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -361,8 +361,6 @@ void xen_timer_resume(void)
361{ 361{
362 int cpu; 362 int cpu;
363 363
364 pvclock_resume();
365
366 if (xen_clockevent != &xen_vcpuop_clockevent) 364 if (xen_clockevent != &xen_vcpuop_clockevent)
367 return; 365 return;
368 366
@@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
379}; 377};
380 378
381static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; 379static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
380static u64 xen_clock_value_saved;
382 381
383void xen_save_time_memory_area(void) 382void xen_save_time_memory_area(void)
384{ 383{
385 struct vcpu_register_time_memory_area t; 384 struct vcpu_register_time_memory_area t;
386 int ret; 385 int ret;
387 386
387 xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
388
388 if (!xen_clock) 389 if (!xen_clock)
389 return; 390 return;
390 391
@@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
404 int ret; 405 int ret;
405 406
406 if (!xen_clock) 407 if (!xen_clock)
407 return; 408 goto out;
408 409
409 t.addr.v = &xen_clock->pvti; 410 t.addr.v = &xen_clock->pvti;
410 411
@@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
421 if (ret != 0) 422 if (ret != 0)
422 pr_notice("Cannot restore secondary vcpu_time_info (err %d)", 423 pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
423 ret); 424 ret);
425
426out:
427 /* Need pvclock_resume() before using xen_clocksource_read(). */
428 pvclock_resume();
429 xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
424} 430}
425 431
426static void xen_setup_vsyscall_time_info(void) 432static void xen_setup_vsyscall_time_info(void)
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 63e0f12be7c9..72adbbe975d5 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
1154} 1154}
1155 1155
1156/** 1156/**
1157 * __bfq_deactivate_entity - deactivate an entity from its service tree. 1157 * __bfq_deactivate_entity - update sched_data and service trees for
1158 * @entity: the entity to deactivate. 1158 * entity, so as to represent entity as inactive
1159 * @entity: the entity being deactivated.
1159 * @ins_into_idle_tree: if false, the entity will not be put into the 1160 * @ins_into_idle_tree: if false, the entity will not be put into the
1160 * idle tree. 1161 * idle tree.
1161 * 1162 *
1162 * Deactivates an entity, independently of its previous state. Must 1163 * If necessary and allowed, puts entity into the idle tree. NOTE:
1163 * be invoked only if entity is on a service tree. Extracts the entity 1164 * entity may be on no tree if in service.
1164 * from that tree, and if necessary and allowed, puts it into the idle
1165 * tree.
1166 */ 1165 */
1167bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) 1166bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
1168{ 1167{
diff --git a/block/blk-core.c b/block/blk-core.c
index c78042975737..3c5f61ceeb67 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -661,7 +661,6 @@ no_merge:
661 * blk_attempt_plug_merge - try to merge with %current's plugged list 661 * blk_attempt_plug_merge - try to merge with %current's plugged list
662 * @q: request_queue new bio is being queued at 662 * @q: request_queue new bio is being queued at
663 * @bio: new bio being queued 663 * @bio: new bio being queued
664 * @request_count: out parameter for number of traversed plugged requests
665 * @same_queue_rq: pointer to &struct request that gets filled in when 664 * @same_queue_rq: pointer to &struct request that gets filled in when
666 * another request associated with @q is found on the plug list 665 * another request associated with @q is found on the plug list
667 * (optional, may be %NULL) 666 * (optional, may be %NULL)
@@ -1683,6 +1682,15 @@ EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1683 * @plug: The &struct blk_plug that needs to be initialized 1682 * @plug: The &struct blk_plug that needs to be initialized
1684 * 1683 *
1685 * Description: 1684 * Description:
1685 * blk_start_plug() indicates to the block layer an intent by the caller
1686 * to submit multiple I/O requests in a batch. The block layer may use
1687 * this hint to defer submitting I/Os from the caller until blk_finish_plug()
1688 * is called. However, the block layer may choose to submit requests
1689 * before a call to blk_finish_plug() if the number of queued I/Os
1690 * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than
1691 * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if
1692 * the task schedules (see below).
1693 *
1686 * Tracking blk_plug inside the task_struct will help with auto-flushing the 1694 * Tracking blk_plug inside the task_struct will help with auto-flushing the
1687 * pending I/O should the task end up blocking between blk_start_plug() and 1695 * pending I/O should the task end up blocking between blk_start_plug() and
1688 * blk_finish_plug(). This is important from a performance perspective, but 1696 * blk_finish_plug(). This is important from a performance perspective, but
@@ -1765,6 +1773,16 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1765 blk_mq_flush_plug_list(plug, from_schedule); 1773 blk_mq_flush_plug_list(plug, from_schedule);
1766} 1774}
1767 1775
1776/**
1777 * blk_finish_plug - mark the end of a batch of submitted I/O
1778 * @plug: The &struct blk_plug passed to blk_start_plug()
1779 *
1780 * Description:
1781 * Indicate that a batch of I/O submissions is complete. This function
1782 * must be paired with an initial call to blk_start_plug(). The intent
1783 * is to allow the block layer to optimize I/O submission. See the
1784 * documentation for blk_start_plug() for more information.
1785 */
1768void blk_finish_plug(struct blk_plug *plug) 1786void blk_finish_plug(struct blk_plug *plug)
1769{ 1787{
1770 if (plug != current->plug) 1788 if (plug != current->plug)
diff --git a/block/blk-mq-debugfs-zoned.c b/block/blk-mq-debugfs-zoned.c
index fb2c82c351e4..038cb627c868 100644
--- a/block/blk-mq-debugfs-zoned.c
+++ b/block/blk-mq-debugfs-zoned.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates. 3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */ 4 */
7 5
8#include <linux/blkdev.h> 6#include <linux/blkdev.h>
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ba37b9e15e9..8f5b533764ca 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1906{ 1906{
1907 const int is_sync = op_is_sync(bio->bi_opf); 1907 const int is_sync = op_is_sync(bio->bi_opf);
1908 const int is_flush_fua = op_is_flush(bio->bi_opf); 1908 const int is_flush_fua = op_is_flush(bio->bi_opf);
1909 struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; 1909 struct blk_mq_alloc_data data = { .flags = 0};
1910 struct request *rq; 1910 struct request *rq;
1911 struct blk_plug *plug; 1911 struct blk_plug *plug;
1912 struct request *same_queue_rq = NULL; 1912 struct request *same_queue_rq = NULL;
@@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1928 1928
1929 rq_qos_throttle(q, bio); 1929 rq_qos_throttle(q, bio);
1930 1930
1931 data.cmd_flags = bio->bi_opf;
1931 rq = blk_mq_get_request(q, bio, &data); 1932 rq = blk_mq_get_request(q, bio, &data);
1932 if (unlikely(!rq)) { 1933 if (unlikely(!rq)) {
1933 rq_qos_cleanup(q, bio); 1934 rq_qos_cleanup(q, bio);
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 6651e713c45d..5564e73266a6 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -539,6 +539,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
539 ictx = skcipher_instance_ctx(inst); 539 ictx = skcipher_instance_ctx(inst);
540 540
541 /* Stream cipher, e.g. "xchacha12" */ 541 /* Stream cipher, e.g. "xchacha12" */
542 crypto_set_skcipher_spawn(&ictx->streamcipher_spawn,
543 skcipher_crypto_instance(inst));
542 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name, 544 err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
543 0, crypto_requires_sync(algt->type, 545 0, crypto_requires_sync(algt->type,
544 algt->mask)); 546 algt->mask));
@@ -547,6 +549,8 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
547 streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn); 549 streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
548 550
549 /* Block cipher, e.g. "aes" */ 551 /* Block cipher, e.g. "aes" */
552 crypto_set_spawn(&ictx->blockcipher_spawn,
553 skcipher_crypto_instance(inst));
550 err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name, 554 err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
551 CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); 555 CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
552 if (err) 556 if (err)
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 37f54d1b2f66..4be293a4b5f0 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
58 return -EINVAL; 58 return -EINVAL;
59 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 59 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
60 return -EINVAL; 60 return -EINVAL;
61 if (RTA_PAYLOAD(rta) < sizeof(*param)) 61
62 /*
63 * RTA_OK() didn't align the rtattr's payload when validating that it
64 * fits in the buffer. Yet, the keys should start on the next 4-byte
65 * aligned boundary. To avoid confusion, require that the rtattr
66 * payload be exactly the param struct, which has a 4-byte aligned size.
67 */
68 if (RTA_PAYLOAD(rta) != sizeof(*param))
62 return -EINVAL; 69 return -EINVAL;
70 BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
63 71
64 param = RTA_DATA(rta); 72 param = RTA_DATA(rta);
65 keys->enckeylen = be32_to_cpu(param->enckeylen); 73 keys->enckeylen = be32_to_cpu(param->enckeylen);
66 74
67 key += RTA_ALIGN(rta->rta_len); 75 key += rta->rta_len;
68 keylen -= RTA_ALIGN(rta->rta_len); 76 keylen -= rta->rta_len;
69 77
70 if (keylen < keys->enckeylen) 78 if (keylen < keys->enckeylen)
71 return -EINVAL; 79 return -EINVAL;
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 80a25cc04aec..4741fe89ba2c 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
279 struct aead_request *req = areq->data; 279 struct aead_request *req = areq->data;
280 280
281 err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); 281 err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
282 aead_request_complete(req, err); 282 authenc_esn_request_complete(req, err);
283} 283}
284 284
285static int crypto_authenc_esn_decrypt(struct aead_request *req) 285static int crypto_authenc_esn_decrypt(struct aead_request *req)
diff --git a/crypto/sm3_generic.c b/crypto/sm3_generic.c
index 9a5c60f08aad..c0cf87ae7ef6 100644
--- a/crypto/sm3_generic.c
+++ b/crypto/sm3_generic.c
@@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
100 100
101 for (i = 0; i <= 63; i++) { 101 for (i = 0; i <= 63; i++) {
102 102
103 ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); 103 ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
104 104
105 ss2 = ss1 ^ rol32(a, 12); 105 ss2 = ss1 ^ rol32(a, 12);
106 106
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7b65a807b3dd..90ff0a47c12e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -10,6 +10,7 @@ menuconfig ACPI
10 bool "ACPI (Advanced Configuration and Power Interface) Support" 10 bool "ACPI (Advanced Configuration and Power Interface) Support"
11 depends on ARCH_SUPPORTS_ACPI 11 depends on ARCH_SUPPORTS_ACPI
12 select PNP 12 select PNP
13 select NLS
13 default y if X86 14 default y if X86
14 help 15 help
15 Advanced Configuration and Power Interface (ACPI) support for 16 Advanced Configuration and Power Interface (ACPI) support for
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 7c6afc111d76..bb857421c2e8 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -41,7 +41,8 @@ acpi-y += ec.o
41acpi-$(CONFIG_ACPI_DOCK) += dock.o 41acpi-$(CONFIG_ACPI_DOCK) += dock.o
42acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o 42acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
43obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o 43obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
44acpi-y += acpi_lpss.o acpi_apd.o 44acpi-$(CONFIG_PCI) += acpi_lpss.o
45acpi-y += acpi_apd.o
45acpi-y += acpi_platform.o 46acpi-y += acpi_platform.o
46acpi-y += acpi_pnp.o 47acpi-y += acpi_pnp.o
47acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o 48acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index fdd90ffceb85..e48894e002ba 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -876,7 +876,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
876 return (resv == its->its_count) ? resv : -ENODEV; 876 return (resv == its->its_count) ? resv : -ENODEV;
877} 877}
878#else 878#else
879static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev); 879static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
880{ return NULL; } 880{ return NULL; }
881static inline int iort_add_device_replay(const struct iommu_ops *ops, 881static inline int iort_add_device_replay(const struct iommu_ops *ops,
882 struct device *dev) 882 struct device *dev)
@@ -952,9 +952,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
952{ 952{
953 struct acpi_iort_node *node; 953 struct acpi_iort_node *node;
954 struct acpi_iort_root_complex *rc; 954 struct acpi_iort_root_complex *rc;
955 struct pci_bus *pbus = to_pci_dev(dev)->bus;
955 956
956 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 957 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
957 iort_match_node_callback, dev); 958 iort_match_node_callback, &pbus->dev);
958 if (!node || node->revision < 1) 959 if (!node || node->revision < 1)
959 return -ENODEV; 960 return -ENODEV;
960 961
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 99d820a693a8..5c093ce01bcd 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1054,18 +1054,6 @@ void __init acpi_early_init(void)
1054 goto error0; 1054 goto error0;
1055 } 1055 }
1056 1056
1057 /*
1058 * ACPI 2.0 requires the EC driver to be loaded and work before
1059 * the EC device is found in the namespace (i.e. before
1060 * acpi_load_tables() is called).
1061 *
1062 * This is accomplished by looking for the ECDT table, and getting
1063 * the EC parameters out of that.
1064 *
1065 * Ignore the result. Not having an ECDT is not fatal.
1066 */
1067 status = acpi_ec_ecdt_probe();
1068
1069#ifdef CONFIG_X86 1057#ifdef CONFIG_X86
1070 if (!acpi_ioapic) { 1058 if (!acpi_ioapic) {
1071 /* compatible (0) means level (3) */ 1059 /* compatible (0) means level (3) */
@@ -1142,6 +1130,18 @@ static int __init acpi_bus_init(void)
1142 goto error1; 1130 goto error1;
1143 } 1131 }
1144 1132
1133 /*
1134 * ACPI 2.0 requires the EC driver to be loaded and work before the EC
1135 * device is found in the namespace.
1136 *
1137 * This is accomplished by looking for the ECDT table and getting the EC
1138 * parameters out of that.
1139 *
1140 * Do that before calling acpi_initialize_objects() which may trigger EC
1141 * address space accesses.
1142 */
1143 acpi_ec_ecdt_probe();
1144
1145 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); 1145 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
1146 if (ACPI_FAILURE(status)) { 1146 if (ACPI_FAILURE(status)) {
1147 printk(KERN_ERR PREFIX 1147 printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7e6952edb5b0..6a9e1fb8913a 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -81,7 +81,11 @@ void acpi_debugfs_init(void);
81#else 81#else
82static inline void acpi_debugfs_init(void) { return; } 82static inline void acpi_debugfs_init(void) { return; }
83#endif 83#endif
84#ifdef CONFIG_PCI
84void acpi_lpss_init(void); 85void acpi_lpss_init(void);
86#else
87static inline void acpi_lpss_init(void) {}
88#endif
85 89
86void acpi_apd_init(void); 90void acpi_apd_init(void);
87 91
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 011d3db19c80..5143e11e3b0f 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -26,7 +26,6 @@
26#include <acpi/nfit.h> 26#include <acpi/nfit.h>
27#include "intel.h" 27#include "intel.h"
28#include "nfit.h" 28#include "nfit.h"
29#include "intel.h"
30 29
31/* 30/*
32 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
78} 77}
79EXPORT_SYMBOL(to_nfit_uuid); 78EXPORT_SYMBOL(to_nfit_uuid);
80 79
81static struct acpi_nfit_desc *to_acpi_nfit_desc(
82 struct nvdimm_bus_descriptor *nd_desc)
83{
84 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
85}
86
87static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 80static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
88{ 81{
89 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 82 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -419,7 +412,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
419int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 412int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
420 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 413 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
421{ 414{
422 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 415 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
423 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 416 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
424 union acpi_object in_obj, in_buf, *out_obj; 417 union acpi_object in_obj, in_buf, *out_obj;
425 const struct nd_cmd_desc *desc = NULL; 418 const struct nd_cmd_desc *desc = NULL;
@@ -721,6 +714,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
721 struct acpi_nfit_memory_map *memdev; 714 struct acpi_nfit_memory_map *memdev;
722 struct acpi_nfit_desc *acpi_desc; 715 struct acpi_nfit_desc *acpi_desc;
723 struct nfit_mem *nfit_mem; 716 struct nfit_mem *nfit_mem;
717 u16 physical_id;
724 718
725 mutex_lock(&acpi_desc_lock); 719 mutex_lock(&acpi_desc_lock);
726 list_for_each_entry(acpi_desc, &acpi_descs, list) { 720 list_for_each_entry(acpi_desc, &acpi_descs, list) {
@@ -728,10 +722,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
728 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 722 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
729 memdev = __to_nfit_memdev(nfit_mem); 723 memdev = __to_nfit_memdev(nfit_mem);
730 if (memdev->device_handle == device_handle) { 724 if (memdev->device_handle == device_handle) {
725 *flags = memdev->flags;
726 physical_id = memdev->physical_id;
731 mutex_unlock(&acpi_desc->init_mutex); 727 mutex_unlock(&acpi_desc->init_mutex);
732 mutex_unlock(&acpi_desc_lock); 728 mutex_unlock(&acpi_desc_lock);
733 *flags = memdev->flags; 729 return physical_id;
734 return memdev->physical_id;
735 } 730 }
736 } 731 }
737 mutex_unlock(&acpi_desc->init_mutex); 732 mutex_unlock(&acpi_desc->init_mutex);
@@ -2231,7 +2226,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2231 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2226 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2232 if (!nd_set) 2227 if (!nd_set)
2233 return -ENOMEM; 2228 return -ENOMEM;
2234 ndr_desc->nd_set = nd_set;
2235 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2229 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2236 2230
2237 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2231 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
@@ -3367,7 +3361,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init);
3367 3361
3368static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3362static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3369{ 3363{
3370 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3364 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3371 struct device *dev = acpi_desc->dev; 3365 struct device *dev = acpi_desc->dev;
3372 3366
3373 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3367 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
@@ -3384,7 +3378,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3384static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3378static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3385 struct nvdimm *nvdimm, unsigned int cmd) 3379 struct nvdimm *nvdimm, unsigned int cmd)
3386{ 3380{
3387 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3381 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3388 3382
3389 if (nvdimm) 3383 if (nvdimm)
3390 return 0; 3384 return 0;
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 850b2927b4e7..f70de71f79d6 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
146 146
147static void nvdimm_invalidate_cache(void); 147static void nvdimm_invalidate_cache(void);
148 148
149static int intel_security_unlock(struct nvdimm *nvdimm, 149static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
150 const struct nvdimm_key_data *key_data) 150 const struct nvdimm_key_data *key_data)
151{ 151{
152 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 152 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm,
227 return 0; 227 return 0;
228} 228}
229 229
230static int intel_security_erase(struct nvdimm *nvdimm, 230static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
231 const struct nvdimm_key_data *key, 231 const struct nvdimm_key_data *key,
232 enum nvdimm_passphrase_type ptype) 232 enum nvdimm_passphrase_type ptype)
233{ 233{
@@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm,
276 return 0; 276 return 0;
277} 277}
278 278
279static int intel_security_query_overwrite(struct nvdimm *nvdimm) 279static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
280{ 280{
281 int rc; 281 int rc;
282 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 282 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm)
313 return 0; 313 return 0;
314} 314}
315 315
316static int intel_security_overwrite(struct nvdimm *nvdimm, 316static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
317 const struct nvdimm_key_data *nkey) 317 const struct nvdimm_key_data *nkey)
318{ 318{
319 int rc; 319 int rc;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 274699463b4f..7bbbf8256a41 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -146,9 +146,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
146 { 146 {
147 struct acpi_srat_mem_affinity *p = 147 struct acpi_srat_mem_affinity *p =
148 (struct acpi_srat_mem_affinity *)header; 148 (struct acpi_srat_mem_affinity *)header;
149 pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", 149 pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
150 (unsigned long)p->base_address, 150 (unsigned long long)p->base_address,
151 (unsigned long)p->length, 151 (unsigned long long)p->length,
152 p->proximity_domain, 152 p->proximity_domain,
153 (p->flags & ACPI_SRAT_MEM_ENABLED) ? 153 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
154 "enabled" : "disabled", 154 "enabled" : "disabled",
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 2579675b7082..e7c0006e6602 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -20,8 +20,11 @@
20#define GPI1_LDO_ON (3 << 0) 20#define GPI1_LDO_ON (3 << 0)
21#define GPI1_LDO_OFF (4 << 0) 21#define GPI1_LDO_OFF (4 << 0)
22 22
23#define AXP288_ADC_TS_PIN_GPADC 0xf2 23#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
24#define AXP288_ADC_TS_PIN_ON 0xf3 24#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
25#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
26#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
27#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
25 28
26static struct pmic_table power_table[] = { 29static struct pmic_table power_table[] = {
27 { 30 {
@@ -212,22 +215,44 @@ out:
212 */ 215 */
213static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) 216static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
214{ 217{
218 int ret, adc_ts_pin_ctrl;
215 u8 buf[2]; 219 u8 buf[2];
216 int ret;
217 220
218 ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, 221 /*
219 AXP288_ADC_TS_PIN_GPADC); 222 * The current-source used for the battery temp-sensor (TS) is shared
223 * with the GPADC. For proper fuel-gauge and charger operation the TS
224 * current-source needs to be permanently on. But to read the GPADC we
225 * need to temporary switch the TS current-source to ondemand, so that
226 * the GPADC can use it, otherwise we will always read an all 0 value.
227 *
228 * Note that the switching from on to on-ondemand is not necessary
229 * when the TS current-source is off (this happens on devices which
230 * do not use the TS-pin).
231 */
232 ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
220 if (ret) 233 if (ret)
221 return ret; 234 return ret;
222 235
223 /* After switching to the GPADC pin give things some time to settle */ 236 if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
224 usleep_range(6000, 10000); 237 ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
238 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
239 AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
240 if (ret)
241 return ret;
242
243 /* Wait a bit after switching the current-source */
244 usleep_range(6000, 10000);
245 }
225 246
226 ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); 247 ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
227 if (ret == 0) 248 if (ret == 0)
228 ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); 249 ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
229 250
230 regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); 251 if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
252 regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
253 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
254 AXP288_ADC_TS_CURRENT_ON);
255 }
231 256
232 return ret; 257 return ret;
233} 258}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1b475bc1ae16..665e93ca0b40 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list)
131 } 131 }
132} 132}
133 133
134static bool acpi_power_resource_is_dup(union acpi_object *package,
135 unsigned int start, unsigned int i)
136{
137 acpi_handle rhandle, dup;
138 unsigned int j;
139
140 /* The caller is expected to check the package element types */
141 rhandle = package->package.elements[i].reference.handle;
142 for (j = start; j < i; j++) {
143 dup = package->package.elements[j].reference.handle;
144 if (dup == rhandle)
145 return true;
146 }
147
148 return false;
149}
150
134int acpi_extract_power_resources(union acpi_object *package, unsigned int start, 151int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
135 struct list_head *list) 152 struct list_head *list)
136{ 153{
@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
150 err = -ENODEV; 167 err = -ENODEV;
151 break; 168 break;
152 } 169 }
170
171 /* Some ACPI tables contain duplicate power resource references */
172 if (acpi_power_resource_is_dup(package, start, i))
173 continue;
174
153 err = acpi_add_power_resource(rhandle); 175 err = acpi_add_power_resource(rhandle);
154 if (err) 176 if (err)
155 break; 177 break;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4ca7a6b4eaae..8218db17ebdb 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers"
1091 1091
1092config PATA_ACPI 1092config PATA_ACPI
1093 tristate "ACPI firmware driver for PATA" 1093 tristate "ACPI firmware driver for PATA"
1094 depends on ATA_ACPI && ATA_BMDMA 1094 depends on ATA_ACPI && ATA_BMDMA && PCI
1095 help 1095 help
1096 This option enables an ACPI method driver which drives 1096 This option enables an ACPI method driver which drives
1097 motherboard PATA controller interfaces through the ACPI 1097 motherboard PATA controller interfaces through the ACPI
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index ef356e70e6de..8810475f307a 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -254,6 +254,8 @@ enum {
254 AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use 254 AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
255 SATA_MOBILE_LPM_POLICY 255 SATA_MOBILE_LPM_POLICY
256 as default lpm_policy */ 256 as default lpm_policy */
257 AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
258 suspend/resume */
257 259
258 /* ap->flags bits */ 260 /* ap->flags bits */
259 261
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f9cb51be38eb..d4bba3ace45d 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -28,6 +28,11 @@
28#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4)) 28#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4))
29#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4)) 29#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4))
30 30
31struct ahci_mvebu_plat_data {
32 int (*plat_config)(struct ahci_host_priv *hpriv);
33 unsigned int flags;
34};
35
31static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, 36static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
32 const struct mbus_dram_target_info *dram) 37 const struct mbus_dram_target_info *dram)
33{ 38{
@@ -62,6 +67,35 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 67 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
63} 68}
64 69
70static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv)
71{
72 const struct mbus_dram_target_info *dram;
73 int rc = 0;
74
75 dram = mv_mbus_dram_info();
76 if (dram)
77 ahci_mvebu_mbus_config(hpriv, dram);
78 else
79 rc = -ENODEV;
80
81 ahci_mvebu_regret_option(hpriv);
82
83 return rc;
84}
85
86static int ahci_mvebu_armada_3700_config(struct ahci_host_priv *hpriv)
87{
88 u32 reg;
89
90 writel(0, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
91
92 reg = readl(hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
93 reg |= BIT(6);
94 writel(reg, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
95
96 return 0;
97}
98
65/** 99/**
66 * ahci_mvebu_stop_engine 100 * ahci_mvebu_stop_engine
67 * 101 *
@@ -126,13 +160,9 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
126{ 160{
127 struct ata_host *host = platform_get_drvdata(pdev); 161 struct ata_host *host = platform_get_drvdata(pdev);
128 struct ahci_host_priv *hpriv = host->private_data; 162 struct ahci_host_priv *hpriv = host->private_data;
129 const struct mbus_dram_target_info *dram; 163 const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data;
130 164
131 dram = mv_mbus_dram_info(); 165 pdata->plat_config(hpriv);
132 if (dram)
133 ahci_mvebu_mbus_config(hpriv, dram);
134
135 ahci_mvebu_regret_option(hpriv);
136 166
137 return ahci_platform_resume_host(&pdev->dev); 167 return ahci_platform_resume_host(&pdev->dev);
138} 168}
@@ -154,29 +184,30 @@ static struct scsi_host_template ahci_platform_sht = {
154 184
155static int ahci_mvebu_probe(struct platform_device *pdev) 185static int ahci_mvebu_probe(struct platform_device *pdev)
156{ 186{
187 const struct ahci_mvebu_plat_data *pdata;
157 struct ahci_host_priv *hpriv; 188 struct ahci_host_priv *hpriv;
158 const struct mbus_dram_target_info *dram;
159 int rc; 189 int rc;
160 190
191 pdata = of_device_get_match_data(&pdev->dev);
192 if (!pdata)
193 return -EINVAL;
194
161 hpriv = ahci_platform_get_resources(pdev, 0); 195 hpriv = ahci_platform_get_resources(pdev, 0);
162 if (IS_ERR(hpriv)) 196 if (IS_ERR(hpriv))
163 return PTR_ERR(hpriv); 197 return PTR_ERR(hpriv);
164 198
199 hpriv->flags |= pdata->flags;
200 hpriv->plat_data = (void *)pdata;
201
165 rc = ahci_platform_enable_resources(hpriv); 202 rc = ahci_platform_enable_resources(hpriv);
166 if (rc) 203 if (rc)
167 return rc; 204 return rc;
168 205
169 hpriv->stop_engine = ahci_mvebu_stop_engine; 206 hpriv->stop_engine = ahci_mvebu_stop_engine;
170 207
171 if (of_device_is_compatible(pdev->dev.of_node, 208 rc = pdata->plat_config(hpriv);
172 "marvell,armada-380-ahci")) { 209 if (rc)
173 dram = mv_mbus_dram_info(); 210 goto disable_resources;
174 if (!dram)
175 return -ENODEV;
176
177 ahci_mvebu_mbus_config(hpriv, dram);
178 ahci_mvebu_regret_option(hpriv);
179 }
180 211
181 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, 212 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
182 &ahci_platform_sht); 213 &ahci_platform_sht);
@@ -190,18 +221,28 @@ disable_resources:
190 return rc; 221 return rc;
191} 222}
192 223
224static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = {
225 .plat_config = ahci_mvebu_armada_380_config,
226};
227
228static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = {
229 .plat_config = ahci_mvebu_armada_3700_config,
230 .flags = AHCI_HFLAG_SUSPEND_PHYS,
231};
232
193static const struct of_device_id ahci_mvebu_of_match[] = { 233static const struct of_device_id ahci_mvebu_of_match[] = {
194 { .compatible = "marvell,armada-380-ahci", }, 234 {
195 { .compatible = "marvell,armada-3700-ahci", }, 235 .compatible = "marvell,armada-380-ahci",
236 .data = &ahci_mvebu_armada_380_plat_data,
237 },
238 {
239 .compatible = "marvell,armada-3700-ahci",
240 .data = &ahci_mvebu_armada_3700_plat_data,
241 },
196 { }, 242 { },
197}; 243};
198MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match); 244MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
199 245
200/*
201 * We currently don't provide power management related operations,
202 * since there is no suspend/resume support at the platform level for
203 * Armada 38x for the moment.
204 */
205static struct platform_driver ahci_mvebu_driver = { 246static struct platform_driver ahci_mvebu_driver = {
206 .probe = ahci_mvebu_probe, 247 .probe = ahci_mvebu_probe,
207 .remove = ata_platform_remove_one, 248 .remove = ata_platform_remove_one,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 4b900fc659f7..81b1a3332ed6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -56,6 +56,12 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
56 if (rc) 56 if (rc)
57 goto disable_phys; 57 goto disable_phys;
58 58
59 rc = phy_set_mode(hpriv->phys[i], PHY_MODE_SATA);
60 if (rc) {
61 phy_exit(hpriv->phys[i]);
62 goto disable_phys;
63 }
64
59 rc = phy_power_on(hpriv->phys[i]); 65 rc = phy_power_on(hpriv->phys[i]);
60 if (rc) { 66 if (rc) {
61 phy_exit(hpriv->phys[i]); 67 phy_exit(hpriv->phys[i]);
@@ -738,6 +744,9 @@ int ahci_platform_suspend_host(struct device *dev)
738 writel(ctl, mmio + HOST_CTL); 744 writel(ctl, mmio + HOST_CTL);
739 readl(mmio + HOST_CTL); /* flush */ 745 readl(mmio + HOST_CTL); /* flush */
740 746
747 if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
748 ahci_platform_disable_phys(hpriv);
749
741 return ata_host_suspend(host, PMSG_SUSPEND); 750 return ata_host_suspend(host, PMSG_SUSPEND);
742} 751}
743EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); 752EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
@@ -756,6 +765,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
756int ahci_platform_resume_host(struct device *dev) 765int ahci_platform_resume_host(struct device *dev)
757{ 766{
758 struct ata_host *host = dev_get_drvdata(dev); 767 struct ata_host *host = dev_get_drvdata(dev);
768 struct ahci_host_priv *hpriv = host->private_data;
759 int rc; 769 int rc;
760 770
761 if (dev->power.power_state.event == PM_EVENT_SUSPEND) { 771 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
@@ -766,6 +776,9 @@ int ahci_platform_resume_host(struct device *dev)
766 ahci_init_controller(host); 776 ahci_init_controller(host);
767 } 777 }
768 778
779 if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
780 ahci_platform_enable_phys(hpriv);
781
769 ata_host_resume(host); 782 ata_host_resume(host);
770 783
771 return 0; 784 return 0;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 4dc528bf8e85..9c1247d42897 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap)
729 if (!pp) 729 if (!pp)
730 return -ENOMEM; 730 return -ENOMEM;
731 731
732 mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 732 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
733 GFP_KERNEL); 733 GFP_KERNEL);
734 if (!mem) { 734 if (!mem) {
735 kfree(pp); 735 kfree(pp);
736 return -ENOMEM; 736 return -ENOMEM;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 29f102dcfec4..211607986134 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
533 533
534static int he_init_tpdrq(struct he_dev *he_dev) 534static int he_init_tpdrq(struct he_dev *he_dev)
535{ 535{
536 he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 536 he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), 537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 &he_dev->tpdrq_phys, GFP_KERNEL); 538 &he_dev->tpdrq_phys,
539 GFP_KERNEL);
539 if (he_dev->tpdrq_base == NULL) { 540 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n"); 541 hprintk("failed to alloc tpdrq\n");
541 return -ENOMEM; 542 return -ENOMEM;
@@ -717,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
717 instead of '/ 512', use '>> 9' to prevent a call 718 instead of '/ 512', use '>> 9' to prevent a call
718 to divdu3 on x86 platforms 719 to divdu3 on x86 platforms
719 */ 720 */
720 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; 721 rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
721 722
722 if (rate_cps < 10) 723 if (rate_cps < 10)
723 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ 724 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
@@ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
805 goto out_free_rbpl_virt; 806 goto out_free_rbpl_virt;
806 } 807 }
807 808
808 he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 809 he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
809 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), 810 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
810 &he_dev->rbpl_phys, GFP_KERNEL); 811 &he_dev->rbpl_phys, GFP_KERNEL);
811 if (he_dev->rbpl_base == NULL) { 812 if (he_dev->rbpl_base == NULL) {
812 hprintk("failed to alloc rbpl_base\n"); 813 hprintk("failed to alloc rbpl_base\n");
813 goto out_destroy_rbpl_pool; 814 goto out_destroy_rbpl_pool;
@@ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
844 845
845 /* rx buffer ready queue */ 846 /* rx buffer ready queue */
846 847
847 he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 848 he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
848 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 849 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
849 &he_dev->rbrq_phys, GFP_KERNEL); 850 &he_dev->rbrq_phys, GFP_KERNEL);
850 if (he_dev->rbrq_base == NULL) { 851 if (he_dev->rbrq_base == NULL) {
851 hprintk("failed to allocate rbrq\n"); 852 hprintk("failed to allocate rbrq\n");
852 goto out_free_rbpl; 853 goto out_free_rbpl;
@@ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
868 869
869 /* tx buffer ready queue */ 870 /* tx buffer ready queue */
870 871
871 he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 872 he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
872 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 873 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
873 &he_dev->tbrq_phys, GFP_KERNEL); 874 &he_dev->tbrq_phys, GFP_KERNEL);
874 if (he_dev->tbrq_base == NULL) { 875 if (he_dev->tbrq_base == NULL) {
875 hprintk("failed to allocate tbrq\n"); 876 hprintk("failed to allocate tbrq\n");
876 goto out_free_rbpq_base; 877 goto out_free_rbpq_base;
@@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev)
913 /* 2.9.3.5 tail offset for each interrupt queue is located after the 914 /* 2.9.3.5 tail offset for each interrupt queue is located after the
914 end of the interrupt queue */ 915 end of the interrupt queue */
915 916
916 he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 917 he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
917 (CONFIG_IRQ_SIZE + 1) 918 (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
918 * sizeof(struct he_irq), 919 &he_dev->irq_phys, GFP_KERNEL);
919 &he_dev->irq_phys,
920 GFP_KERNEL);
921 if (he_dev->irq_base == NULL) { 920 if (he_dev->irq_base == NULL) {
922 hprintk("failed to allocate irq\n"); 921 hprintk("failed to allocate irq\n");
923 return -ENOMEM; 922 return -ENOMEM;
@@ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev)
1464 1463
1465 /* host status page */ 1464 /* host status page */
1466 1465
1467 he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, 1466 he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1468 sizeof(struct he_hsp), 1467 sizeof(struct he_hsp),
1469 &he_dev->hsp_phys, GFP_KERNEL); 1468 &he_dev->hsp_phys, GFP_KERNEL);
1470 if (he_dev->hsp == NULL) { 1469 if (he_dev->hsp == NULL) {
1471 hprintk("failed to allocate host status page\n"); 1470 hprintk("failed to allocate host status page\n");
1472 return -ENOMEM; 1471 return -ENOMEM;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 6e737142ceaa..43a14579e80e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
642 if (!scq) 642 if (!scq)
643 return NULL; 643 return NULL;
644 scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE, 644 scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE,
645 &scq->paddr, GFP_KERNEL); 645 &scq->paddr, GFP_KERNEL);
646 if (scq->base == NULL) { 646 if (scq->base == NULL) {
647 kfree(scq); 647 kfree(scq);
648 return NULL; 648 return NULL;
@@ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
971{ 971{
972 struct rsq_entry *rsqe; 972 struct rsq_entry *rsqe;
973 973
974 card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE, 974 card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
975 &card->rsq.paddr, GFP_KERNEL); 975 &card->rsq.paddr, GFP_KERNEL);
976 if (card->rsq.base == NULL) { 976 if (card->rsq.base == NULL) {
977 printk("%s: can't allocate RSQ.\n", card->name); 977 printk("%s: can't allocate RSQ.\n", card->name);
978 return -1; 978 return -1;
@@ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev)
3390 writel(0, SAR_REG_GP); 3390 writel(0, SAR_REG_GP);
3391 3391
3392 /* Initialize RAW Cell Handle Register */ 3392 /* Initialize RAW Cell Handle Register */
3393 card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev, 3393 card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev,
3394 2 * sizeof(u32), 3394 2 * sizeof(u32),
3395 &card->raw_cell_paddr, 3395 &card->raw_cell_paddr,
3396 GFP_KERNEL); 3396 GFP_KERNEL);
3397 if (!card->raw_cell_hnd) { 3397 if (!card->raw_cell_hnd) {
3398 printk("%s: memory allocation failure.\n", card->name); 3398 printk("%s: memory allocation failure.\n", card->name);
3399 deinit_card(card); 3399 deinit_card(card);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a690fd400260..0992e67e862b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,7 @@
32#include <trace/events/power.h> 32#include <trace/events/power.h>
33#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
34#include <linux/cpuidle.h> 34#include <linux/cpuidle.h>
35#include <linux/devfreq.h>
35#include <linux/timer.h> 36#include <linux/timer.h>
36 37
37#include "../base.h" 38#include "../base.h"
@@ -1078,6 +1079,7 @@ void dpm_resume(pm_message_t state)
1078 dpm_show_time(starttime, state, 0, NULL); 1079 dpm_show_time(starttime, state, 0, NULL);
1079 1080
1080 cpufreq_resume(); 1081 cpufreq_resume();
1082 devfreq_resume();
1081 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 1083 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1082} 1084}
1083 1085
@@ -1852,6 +1854,7 @@ int dpm_suspend(pm_message_t state)
1852 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1854 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1853 might_sleep(); 1855 might_sleep();
1854 1856
1857 devfreq_suspend();
1855 cpufreq_suspend(); 1858 cpufreq_suspend();
1856 1859
1857 mutex_lock(&dpm_list_mtx); 1860 mutex_lock(&dpm_list_mtx);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 70624695b6d5..457be03b744d 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -121,7 +121,7 @@ static void pm_runtime_cancel_pending(struct device *dev)
121 * Compute the autosuspend-delay expiration time based on the device's 121 * Compute the autosuspend-delay expiration time based on the device's
122 * power.last_busy time. If the delay has already expired or is disabled 122 * power.last_busy time. If the delay has already expired or is disabled
123 * (negative) or the power.use_autosuspend flag isn't set, return 0. 123 * (negative) or the power.use_autosuspend flag isn't set, return 0.
124 * Otherwise return the expiration time in jiffies (adjusted to be nonzero). 124 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
125 * 125 *
126 * This function may be called either with or without dev->power.lock held. 126 * This function may be called either with or without dev->power.lock held.
127 * Either way it can be racy, since power.last_busy may be updated at any time. 127 * Either way it can be racy, since power.last_busy may be updated at any time.
@@ -141,7 +141,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
141 141
142 last_busy = READ_ONCE(dev->power.last_busy); 142 last_busy = READ_ONCE(dev->power.last_busy);
143 143
144 expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; 144 expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
145 if (expires <= now) 145 if (expires <= now)
146 expires = 0; /* Already expired. */ 146 expires = 0; /* Already expired. */
147 147
@@ -525,7 +525,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
525 * We add a slack of 25% to gather wakeups 525 * We add a slack of 25% to gather wakeups
526 * without sacrificing the granularity. 526 * without sacrificing the granularity.
527 */ 527 */
528 u64 slack = READ_ONCE(dev->power.autosuspend_delay) * 528 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
529 (NSEC_PER_MSEC >> 2); 529 (NSEC_PER_MSEC >> 2);
530 530
531 dev->power.timer_expires = expires; 531 dev->power.timer_expires = expires;
@@ -905,7 +905,10 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
905 spin_lock_irqsave(&dev->power.lock, flags); 905 spin_lock_irqsave(&dev->power.lock, flags);
906 906
907 expires = dev->power.timer_expires; 907 expires = dev->power.timer_expires;
908 /* If 'expire' is after 'jiffies' we've been called too early. */ 908 /*
909 * If 'expires' is after the current time, we've been called
910 * too early.
911 */
909 if (expires > 0 && expires < ktime_to_ns(ktime_get())) { 912 if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
910 dev->power.timer_expires = 0; 913 dev->power.timer_expires = 0;
911 rpm_suspend(dev, dev->power.timer_autosuspends ? 914 rpm_suspend(dev, dev->power.timer_autosuspends ?
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1bd1145ad8b5..330c1f7e9665 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
108 * suppress pointless writes. 108 * suppress pointless writes.
109 */ 109 */
110 for (i = 0; i < d->chip->num_regs; i++) { 110 for (i = 0; i < d->chip->num_regs; i++) {
111 if (!d->chip->mask_base)
112 continue;
113
111 reg = d->chip->mask_base + 114 reg = d->chip->mask_base +
112 (i * map->reg_stride * d->irq_reg_stride); 115 (i * map->reg_stride * d->irq_reg_stride);
113 if (d->chip->mask_invert) { 116 if (d->chip->mask_invert) {
@@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
258 const struct regmap_irq_type *t = &irq_data->type; 261 const struct regmap_irq_type *t = &irq_data->type;
259 262
260 if ((t->types_supported & type) != type) 263 if ((t->types_supported & type) != type)
261 return -ENOTSUPP; 264 return 0;
262 265
263 reg = t->type_reg_offset / map->reg_stride; 266 reg = t->type_reg_offset / map->reg_stride;
264 267
@@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
588 /* Mask all the interrupts by default */ 591 /* Mask all the interrupts by default */
589 for (i = 0; i < chip->num_regs; i++) { 592 for (i = 0; i < chip->num_regs; i++) {
590 d->mask_buf[i] = d->mask_buf_def[i]; 593 d->mask_buf[i] = d->mask_buf_def[i];
594 if (!chip->mask_base)
595 continue;
596
591 reg = chip->mask_base + 597 reg = chip->mask_base +
592 (i * map->reg_stride * d->irq_reg_stride); 598 (i * map->reg_stride * d->irq_reg_stride);
593 if (chip->mask_invert) 599 if (chip->mask_invert)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b8a0720d3653..cf5538942834 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1190,6 +1190,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1190 goto out_unlock; 1190 goto out_unlock;
1191 } 1191 }
1192 1192
1193 if (lo->lo_offset != info->lo_offset ||
1194 lo->lo_sizelimit != info->lo_sizelimit) {
1195 sync_blockdev(lo->lo_device);
1196 kill_bdev(lo->lo_device);
1197 }
1198
1193 /* I/O need to be drained during transfer transition */ 1199 /* I/O need to be drained during transfer transition */
1194 blk_mq_freeze_queue(lo->lo_queue); 1200 blk_mq_freeze_queue(lo->lo_queue);
1195 1201
@@ -1218,6 +1224,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1218 1224
1219 if (lo->lo_offset != info->lo_offset || 1225 if (lo->lo_offset != info->lo_offset ||
1220 lo->lo_sizelimit != info->lo_sizelimit) { 1226 lo->lo_sizelimit != info->lo_sizelimit) {
1227 /* kill_bdev should have truncated all the pages */
1228 if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1229 err = -EAGAIN;
1230 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1231 __func__, lo->lo_number, lo->lo_file_name,
1232 lo->lo_device->bd_inode->i_mapping->nrpages);
1233 goto out_unfreeze;
1234 }
1221 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { 1235 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
1222 err = -EFBIG; 1236 err = -EFBIG;
1223 goto out_unfreeze; 1237 goto out_unfreeze;
@@ -1443,22 +1457,39 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1443 1457
1444static int loop_set_block_size(struct loop_device *lo, unsigned long arg) 1458static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1445{ 1459{
1460 int err = 0;
1461
1446 if (lo->lo_state != Lo_bound) 1462 if (lo->lo_state != Lo_bound)
1447 return -ENXIO; 1463 return -ENXIO;
1448 1464
1449 if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) 1465 if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
1450 return -EINVAL; 1466 return -EINVAL;
1451 1467
1468 if (lo->lo_queue->limits.logical_block_size != arg) {
1469 sync_blockdev(lo->lo_device);
1470 kill_bdev(lo->lo_device);
1471 }
1472
1452 blk_mq_freeze_queue(lo->lo_queue); 1473 blk_mq_freeze_queue(lo->lo_queue);
1453 1474
1475 /* kill_bdev should have truncated all the pages */
1476 if (lo->lo_queue->limits.logical_block_size != arg &&
1477 lo->lo_device->bd_inode->i_mapping->nrpages) {
1478 err = -EAGAIN;
1479 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1480 __func__, lo->lo_number, lo->lo_file_name,
1481 lo->lo_device->bd_inode->i_mapping->nrpages);
1482 goto out_unfreeze;
1483 }
1484
1454 blk_queue_logical_block_size(lo->lo_queue, arg); 1485 blk_queue_logical_block_size(lo->lo_queue, arg);
1455 blk_queue_physical_block_size(lo->lo_queue, arg); 1486 blk_queue_physical_block_size(lo->lo_queue, arg);
1456 blk_queue_io_min(lo->lo_queue, arg); 1487 blk_queue_io_min(lo->lo_queue, arg);
1457 loop_update_dio(lo); 1488 loop_update_dio(lo);
1458 1489out_unfreeze:
1459 blk_mq_unfreeze_queue(lo->lo_queue); 1490 blk_mq_unfreeze_queue(lo->lo_queue);
1460 1491
1461 return 0; 1492 return err;
1462} 1493}
1463 1494
1464static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, 1495static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08696f5f00bb..7c9a949e876b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
288 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 288 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
289 set_capacity(nbd->disk, config->bytesize >> 9); 289 set_capacity(nbd->disk, config->bytesize >> 9);
290 if (bdev) { 290 if (bdev) {
291 if (bdev->bd_disk) 291 if (bdev->bd_disk) {
292 bd_set_size(bdev, config->bytesize); 292 bd_set_size(bdev, config->bytesize);
293 else 293 set_blocksize(bdev, config->blksize);
294 } else
294 bdev->bd_invalidated = 1; 295 bdev->bd_invalidated = 1;
295 bdput(bdev); 296 bdput(bdev);
296 } 297 }
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index b3df2793e7cd..34b22d6523ba 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -97,6 +97,7 @@ void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
97#else 97#else
98static inline int null_zone_init(struct nullb_device *dev) 98static inline int null_zone_init(struct nullb_device *dev)
99{ 99{
100 pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n");
100 return -EINVAL; 101 return -EINVAL;
101} 102}
102static inline void null_zone_exit(struct nullb_device *dev) {} 103static inline void null_zone_exit(struct nullb_device *dev) {}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e5140bbf241..1e92b61d0bd5 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5986,7 +5986,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
5986 struct list_head *tmp; 5986 struct list_head *tmp;
5987 int dev_id; 5987 int dev_id;
5988 char opt_buf[6]; 5988 char opt_buf[6];
5989 bool already = false;
5990 bool force = false; 5989 bool force = false;
5991 int ret; 5990 int ret;
5992 5991
@@ -6019,13 +6018,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
6019 spin_lock_irq(&rbd_dev->lock); 6018 spin_lock_irq(&rbd_dev->lock);
6020 if (rbd_dev->open_count && !force) 6019 if (rbd_dev->open_count && !force)
6021 ret = -EBUSY; 6020 ret = -EBUSY;
6022 else 6021 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6023 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, 6022 &rbd_dev->flags))
6024 &rbd_dev->flags); 6023 ret = -EINPROGRESS;
6025 spin_unlock_irq(&rbd_dev->lock); 6024 spin_unlock_irq(&rbd_dev->lock);
6026 } 6025 }
6027 spin_unlock(&rbd_dev_list_lock); 6026 spin_unlock(&rbd_dev_list_lock);
6028 if (ret < 0 || already) 6027 if (ret)
6029 return ret; 6028 return ret;
6030 6029
6031 if (force) { 6030 if (force) {
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index a10d5736d8f7..ab893a7571a2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
2641 "comp pci_alloc, total bytes %zd entries %d\n", 2641 "comp pci_alloc, total bytes %zd entries %d\n",
2642 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); 2642 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
2643 2643
2644 skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2644 skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2645 &skdev->cq_dma_address, GFP_KERNEL); 2645 &skdev->cq_dma_address, GFP_KERNEL);
2646 2646
2647 if (skcomp == NULL) { 2647 if (skcomp == NULL) {
2648 rc = -ENOMEM; 2648 rc = -ENOMEM;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 33c5cc879f24..04ca65912638 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -316,11 +316,9 @@ static ssize_t idle_store(struct device *dev,
316 * See the comment in writeback_store. 316 * See the comment in writeback_store.
317 */ 317 */
318 zram_slot_lock(zram, index); 318 zram_slot_lock(zram, index);
319 if (!zram_allocated(zram, index) || 319 if (zram_allocated(zram, index) &&
320 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 320 !zram_test_flag(zram, index, ZRAM_UNDER_WB))
321 goto next; 321 zram_set_flag(zram, index, ZRAM_IDLE);
322 zram_set_flag(zram, index, ZRAM_IDLE);
323next:
324 zram_slot_unlock(zram, index); 322 zram_slot_unlock(zram, index);
325 } 323 }
326 324
@@ -330,6 +328,41 @@ next:
330} 328}
331 329
332#ifdef CONFIG_ZRAM_WRITEBACK 330#ifdef CONFIG_ZRAM_WRITEBACK
331static ssize_t writeback_limit_enable_store(struct device *dev,
332 struct device_attribute *attr, const char *buf, size_t len)
333{
334 struct zram *zram = dev_to_zram(dev);
335 u64 val;
336 ssize_t ret = -EINVAL;
337
338 if (kstrtoull(buf, 10, &val))
339 return ret;
340
341 down_read(&zram->init_lock);
342 spin_lock(&zram->wb_limit_lock);
343 zram->wb_limit_enable = val;
344 spin_unlock(&zram->wb_limit_lock);
345 up_read(&zram->init_lock);
346 ret = len;
347
348 return ret;
349}
350
351static ssize_t writeback_limit_enable_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
353{
354 bool val;
355 struct zram *zram = dev_to_zram(dev);
356
357 down_read(&zram->init_lock);
358 spin_lock(&zram->wb_limit_lock);
359 val = zram->wb_limit_enable;
360 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock);
362
363 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
364}
365
333static ssize_t writeback_limit_store(struct device *dev, 366static ssize_t writeback_limit_store(struct device *dev,
334 struct device_attribute *attr, const char *buf, size_t len) 367 struct device_attribute *attr, const char *buf, size_t len)
335{ 368{
@@ -341,9 +374,9 @@ static ssize_t writeback_limit_store(struct device *dev,
341 return ret; 374 return ret;
342 375
343 down_read(&zram->init_lock); 376 down_read(&zram->init_lock);
344 atomic64_set(&zram->stats.bd_wb_limit, val); 377 spin_lock(&zram->wb_limit_lock);
345 if (val == 0) 378 zram->bd_wb_limit = val;
346 zram->stop_writeback = false; 379 spin_unlock(&zram->wb_limit_lock);
347 up_read(&zram->init_lock); 380 up_read(&zram->init_lock);
348 ret = len; 381 ret = len;
349 382
@@ -357,7 +390,9 @@ static ssize_t writeback_limit_show(struct device *dev,
357 struct zram *zram = dev_to_zram(dev); 390 struct zram *zram = dev_to_zram(dev);
358 391
359 down_read(&zram->init_lock); 392 down_read(&zram->init_lock);
360 val = atomic64_read(&zram->stats.bd_wb_limit); 393 spin_lock(&zram->wb_limit_lock);
394 val = zram->bd_wb_limit;
395 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock); 396 up_read(&zram->init_lock);
362 397
363 return scnprintf(buf, PAGE_SIZE, "%llu\n", val); 398 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
@@ -588,8 +623,8 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
588 return 1; 623 return 1;
589} 624}
590 625
591#define HUGE_WRITEBACK 0x1 626#define HUGE_WRITEBACK 1
592#define IDLE_WRITEBACK 0x2 627#define IDLE_WRITEBACK 2
593 628
594static ssize_t writeback_store(struct device *dev, 629static ssize_t writeback_store(struct device *dev,
595 struct device_attribute *attr, const char *buf, size_t len) 630 struct device_attribute *attr, const char *buf, size_t len)
@@ -602,7 +637,7 @@ static ssize_t writeback_store(struct device *dev,
602 struct page *page; 637 struct page *page;
603 ssize_t ret, sz; 638 ssize_t ret, sz;
604 char mode_buf[8]; 639 char mode_buf[8];
605 unsigned long mode = -1UL; 640 int mode = -1;
606 unsigned long blk_idx = 0; 641 unsigned long blk_idx = 0;
607 642
608 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 643 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
@@ -618,7 +653,7 @@ static ssize_t writeback_store(struct device *dev,
618 else if (!strcmp(mode_buf, "huge")) 653 else if (!strcmp(mode_buf, "huge"))
619 mode = HUGE_WRITEBACK; 654 mode = HUGE_WRITEBACK;
620 655
621 if (mode == -1UL) 656 if (mode == -1)
622 return -EINVAL; 657 return -EINVAL;
623 658
624 down_read(&zram->init_lock); 659 down_read(&zram->init_lock);
@@ -645,10 +680,13 @@ static ssize_t writeback_store(struct device *dev,
645 bvec.bv_len = PAGE_SIZE; 680 bvec.bv_len = PAGE_SIZE;
646 bvec.bv_offset = 0; 681 bvec.bv_offset = 0;
647 682
648 if (zram->stop_writeback) { 683 spin_lock(&zram->wb_limit_lock);
684 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
685 spin_unlock(&zram->wb_limit_lock);
649 ret = -EIO; 686 ret = -EIO;
650 break; 687 break;
651 } 688 }
689 spin_unlock(&zram->wb_limit_lock);
652 690
653 if (!blk_idx) { 691 if (!blk_idx) {
654 blk_idx = alloc_block_bdev(zram); 692 blk_idx = alloc_block_bdev(zram);
@@ -667,10 +705,11 @@ static ssize_t writeback_store(struct device *dev,
667 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 705 zram_test_flag(zram, index, ZRAM_UNDER_WB))
668 goto next; 706 goto next;
669 707
670 if ((mode & IDLE_WRITEBACK && 708 if (mode == IDLE_WRITEBACK &&
671 !zram_test_flag(zram, index, ZRAM_IDLE)) && 709 !zram_test_flag(zram, index, ZRAM_IDLE))
672 (mode & HUGE_WRITEBACK && 710 goto next;
673 !zram_test_flag(zram, index, ZRAM_HUGE))) 711 if (mode == HUGE_WRITEBACK &&
712 !zram_test_flag(zram, index, ZRAM_HUGE))
674 goto next; 713 goto next;
675 /* 714 /*
676 * Clearing ZRAM_UNDER_WB is duty of caller. 715 * Clearing ZRAM_UNDER_WB is duty of caller.
@@ -732,11 +771,10 @@ static ssize_t writeback_store(struct device *dev,
732 zram_set_element(zram, index, blk_idx); 771 zram_set_element(zram, index, blk_idx);
733 blk_idx = 0; 772 blk_idx = 0;
734 atomic64_inc(&zram->stats.pages_stored); 773 atomic64_inc(&zram->stats.pages_stored);
735 if (atomic64_add_unless(&zram->stats.bd_wb_limit, 774 spin_lock(&zram->wb_limit_lock);
736 -1 << (PAGE_SHIFT - 12), 0)) { 775 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
737 if (atomic64_read(&zram->stats.bd_wb_limit) == 0) 776 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
738 zram->stop_writeback = true; 777 spin_unlock(&zram->wb_limit_lock);
739 }
740next: 778next:
741 zram_slot_unlock(zram, index); 779 zram_slot_unlock(zram, index);
742 } 780 }
@@ -1812,6 +1850,7 @@ static DEVICE_ATTR_RW(comp_algorithm);
1812static DEVICE_ATTR_RW(backing_dev); 1850static DEVICE_ATTR_RW(backing_dev);
1813static DEVICE_ATTR_WO(writeback); 1851static DEVICE_ATTR_WO(writeback);
1814static DEVICE_ATTR_RW(writeback_limit); 1852static DEVICE_ATTR_RW(writeback_limit);
1853static DEVICE_ATTR_RW(writeback_limit_enable);
1815#endif 1854#endif
1816 1855
1817static struct attribute *zram_disk_attrs[] = { 1856static struct attribute *zram_disk_attrs[] = {
@@ -1828,6 +1867,7 @@ static struct attribute *zram_disk_attrs[] = {
1828 &dev_attr_backing_dev.attr, 1867 &dev_attr_backing_dev.attr,
1829 &dev_attr_writeback.attr, 1868 &dev_attr_writeback.attr,
1830 &dev_attr_writeback_limit.attr, 1869 &dev_attr_writeback_limit.attr,
1870 &dev_attr_writeback_limit_enable.attr,
1831#endif 1871#endif
1832 &dev_attr_io_stat.attr, 1872 &dev_attr_io_stat.attr,
1833 &dev_attr_mm_stat.attr, 1873 &dev_attr_mm_stat.attr,
@@ -1867,7 +1907,9 @@ static int zram_add(void)
1867 device_id = ret; 1907 device_id = ret;
1868 1908
1869 init_rwsem(&zram->init_lock); 1909 init_rwsem(&zram->init_lock);
1870 1910#ifdef CONFIG_ZRAM_WRITEBACK
1911 spin_lock_init(&zram->wb_limit_lock);
1912#endif
1871 queue = blk_alloc_queue(GFP_KERNEL); 1913 queue = blk_alloc_queue(GFP_KERNEL);
1872 if (!queue) { 1914 if (!queue) {
1873 pr_err("Error allocating disk queue for device %d\n", 1915 pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 4bd3afd15e83..f2fd46daa760 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -86,7 +86,6 @@ struct zram_stats {
86 atomic64_t bd_count; /* no. of pages in backing device */ 86 atomic64_t bd_count; /* no. of pages in backing device */
87 atomic64_t bd_reads; /* no. of reads from backing device */ 87 atomic64_t bd_reads; /* no. of reads from backing device */
88 atomic64_t bd_writes; /* no. of writes from backing device */ 88 atomic64_t bd_writes; /* no. of writes from backing device */
89 atomic64_t bd_wb_limit; /* writeback limit of backing device */
90#endif 89#endif
91}; 90};
92 91
@@ -114,8 +113,10 @@ struct zram {
114 */ 113 */
115 bool claim; /* Protected by bdev->bd_mutex */ 114 bool claim; /* Protected by bdev->bd_mutex */
116 struct file *backing_dev; 115 struct file *backing_dev;
117 bool stop_writeback;
118#ifdef CONFIG_ZRAM_WRITEBACK 116#ifdef CONFIG_ZRAM_WRITEBACK
117 spinlock_t wb_limit_lock;
118 bool wb_limit_enable;
119 u64 bd_wb_limit;
119 struct block_device *bdev; 120 struct block_device *bdev;
120 unsigned int old_block_size; 121 unsigned int old_block_size;
121 unsigned long *bitmap; 122 unsigned long *bitmap;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6f23ebb395f1..e35a886e00bc 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1530,17 +1530,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1530{ 1530{
1531 unsigned int ret_freq = 0; 1531 unsigned int ret_freq = 0;
1532 1532
1533 if (!cpufreq_driver->get) 1533 if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
1534 return ret_freq; 1534 return ret_freq;
1535 1535
1536 ret_freq = cpufreq_driver->get(policy->cpu); 1536 ret_freq = cpufreq_driver->get(policy->cpu);
1537 1537
1538 /* 1538 /*
1539 * Updating inactive policies is invalid, so avoid doing that. Also 1539 * If fast frequency switching is used with the given policy, the check
1540 * if fast frequency switching is used with the given policy, the check
1541 * against policy->cur is pointless, so skip it in that case too. 1540 * against policy->cur is pointless, so skip it in that case too.
1542 */ 1541 */
1543 if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) 1542 if (policy->fast_switch_enabled)
1544 return ret_freq; 1543 return ret_freq;
1545 1544
1546 if (ret_freq && policy->cur && 1545 if (ret_freq && policy->cur &&
@@ -1569,10 +1568,7 @@ unsigned int cpufreq_get(unsigned int cpu)
1569 1568
1570 if (policy) { 1569 if (policy) {
1571 down_read(&policy->rwsem); 1570 down_read(&policy->rwsem);
1572 1571 ret_freq = __cpufreq_get(policy);
1573 if (!policy_is_inactive(policy))
1574 ret_freq = __cpufreq_get(policy);
1575
1576 up_read(&policy->rwsem); 1572 up_read(&policy->rwsem);
1577 1573
1578 cpufreq_cpu_put(policy); 1574 cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 50b1551ba894..242c3370544e 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -52,9 +52,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
52 int ret; 52 int ret;
53 struct scmi_data *priv = policy->driver_data; 53 struct scmi_data *priv = policy->driver_data;
54 struct scmi_perf_ops *perf_ops = handle->perf_ops; 54 struct scmi_perf_ops *perf_ops = handle->perf_ops;
55 u64 freq = policy->freq_table[index].frequency * 1000; 55 u64 freq = policy->freq_table[index].frequency;
56 56
57 ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); 57 ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
58 if (!ret) 58 if (!ret)
59 arch_set_freq_scale(policy->related_cpus, freq, 59 arch_set_freq_scale(policy->related_cpus, freq,
60 policy->cpuinfo.max_freq); 60 policy->cpuinfo.max_freq);
@@ -176,7 +176,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
176out_free_priv: 176out_free_priv:
177 kfree(priv); 177 kfree(priv);
178out_free_opp: 178out_free_opp:
179 dev_pm_opp_cpumask_remove_table(policy->cpus); 179 dev_pm_opp_remove_all_dynamic(cpu_dev);
180 180
181 return ret; 181 return ret;
182} 182}
@@ -188,7 +188,7 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
188 cpufreq_cooling_unregister(priv->cdev); 188 cpufreq_cooling_unregister(priv->cdev);
189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
190 kfree(priv); 190 kfree(priv);
191 dev_pm_opp_cpumask_remove_table(policy->related_cpus); 191 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
192 192
193 return 0; 193 return 0;
194} 194}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 87a98ec77773..99449738faa4 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -177,7 +177,7 @@ out_free_cpufreq_table:
177out_free_priv: 177out_free_priv:
178 kfree(priv); 178 kfree(priv);
179out_free_opp: 179out_free_opp:
180 dev_pm_opp_cpumask_remove_table(policy->cpus); 180 dev_pm_opp_remove_all_dynamic(cpu_dev);
181 181
182 return ret; 182 return ret;
183} 183}
@@ -190,7 +190,7 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
190 clk_put(priv->clk); 190 clk_put(priv->clk);
191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
192 kfree(priv); 192 kfree(priv);
193 dev_pm_opp_cpumask_remove_table(policy->related_cpus); 193 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
194 194
195 return 0; 195 return 0;
196} 196}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5a90075f719d..0be55fcc19ba 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
692 depends on ARCH_BCM_IPROC 692 depends on ARCH_BCM_IPROC
693 depends on MAILBOX 693 depends on MAILBOX
694 default m 694 default m
695 select CRYPTO_AUTHENC
695 select CRYPTO_DES 696 select CRYPTO_DES
696 select CRYPTO_MD5 697 select CRYPTO_MD5
697 select CRYPTO_SHA1 698 select CRYPTO_SHA1
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 63cb6956c948..acf79889d903 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
283 */ 283 */
284static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) 284static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
285{ 285{
286 dev->gdr = dma_zalloc_coherent(dev->core_dev->device, 286 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
287 sizeof(struct ce_gd) * PPC4XX_NUM_GD, 287 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
288 &dev->gdr_pa, GFP_ATOMIC); 288 &dev->gdr_pa, GFP_ATOMIC);
289 if (!dev->gdr) 289 if (!dev->gdr)
290 return -ENOMEM; 290 return -ENOMEM;
291 291
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c9393ffb70ed..5567cbda2798 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2845 struct spu_hw *spu = &iproc_priv.spu; 2845 struct spu_hw *spu = &iproc_priv.spu;
2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2848 struct rtattr *rta = (void *)key; 2848 struct crypto_authenc_keys keys;
2849 struct crypto_authenc_key_param *param; 2849 int ret;
2850 const u8 *origkey = key;
2851 const unsigned int origkeylen = keylen;
2852
2853 int ret = 0;
2854 2850
2855 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, 2851 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2856 keylen); 2852 keylen);
2857 flow_dump(" key: ", key, keylen); 2853 flow_dump(" key: ", key, keylen);
2858 2854
2859 if (!RTA_OK(rta, keylen)) 2855 ret = crypto_authenc_extractkeys(&keys, key, keylen);
2860 goto badkey; 2856 if (ret)
2861 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
2862 goto badkey;
2863 if (RTA_PAYLOAD(rta) < sizeof(*param))
2864 goto badkey; 2857 goto badkey;
2865 2858
2866 param = RTA_DATA(rta); 2859 if (keys.enckeylen > MAX_KEY_SIZE ||
2867 ctx->enckeylen = be32_to_cpu(param->enckeylen); 2860 keys.authkeylen > MAX_KEY_SIZE)
2868
2869 key += RTA_ALIGN(rta->rta_len);
2870 keylen -= RTA_ALIGN(rta->rta_len);
2871
2872 if (keylen < ctx->enckeylen)
2873 goto badkey;
2874 if (ctx->enckeylen > MAX_KEY_SIZE)
2875 goto badkey; 2861 goto badkey;
2876 2862
2877 ctx->authkeylen = keylen - ctx->enckeylen; 2863 ctx->enckeylen = keys.enckeylen;
2878 2864 ctx->authkeylen = keys.authkeylen;
2879 if (ctx->authkeylen > MAX_KEY_SIZE)
2880 goto badkey;
2881 2865
2882 memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); 2866 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2883 /* May end up padding auth key. So make sure it's zeroed. */ 2867 /* May end up padding auth key. So make sure it's zeroed. */
2884 memset(ctx->authkey, 0, sizeof(ctx->authkey)); 2868 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2885 memcpy(ctx->authkey, key, ctx->authkeylen); 2869 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2886 2870
2887 switch (ctx->alg->cipher_info.alg) { 2871 switch (ctx->alg->cipher_info.alg) {
2888 case CIPHER_ALG_DES: 2872 case CIPHER_ALG_DES:
@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2890 u32 tmp[DES_EXPKEY_WORDS]; 2874 u32 tmp[DES_EXPKEY_WORDS];
2891 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 2875 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2892 2876
2893 if (des_ekey(tmp, key) == 0) { 2877 if (des_ekey(tmp, keys.enckey) == 0) {
2894 if (crypto_aead_get_flags(cipher) & 2878 if (crypto_aead_get_flags(cipher) &
2895 CRYPTO_TFM_REQ_WEAK_KEY) { 2879 CRYPTO_TFM_REQ_WEAK_KEY) {
2896 crypto_aead_set_flags(cipher, flags); 2880 crypto_aead_set_flags(cipher, flags);
@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2905 break; 2889 break;
2906 case CIPHER_ALG_3DES: 2890 case CIPHER_ALG_3DES:
2907 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2891 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2908 const u32 *K = (const u32 *)key; 2892 const u32 *K = (const u32 *)keys.enckey;
2909 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 2893 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
2910 2894
2911 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 2895 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2956 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 2940 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2957 ctx->fallback_cipher->base.crt_flags |= 2941 ctx->fallback_cipher->base.crt_flags |=
2958 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 2942 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2959 ret = 2943 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2960 crypto_aead_setkey(ctx->fallback_cipher, origkey,
2961 origkeylen);
2962 if (ret) { 2944 if (ret) {
2963 flow_log(" fallback setkey() returned:%d\n", ret); 2945 flow_log(" fallback setkey() returned:%d\n", ret);
2964 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 2946 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 92e593e2069a..80ae69f906fb 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void)
3476 * Skip algorithms requiring message digests 3476 * Skip algorithms requiring message digests
3477 * if MD or MD size is not supported by device. 3477 * if MD or MD size is not supported by device.
3478 */ 3478 */
3479 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && 3479 if (is_mdha(c2_alg_sel) &&
3480 (!md_inst || t_alg->aead.maxauthsize > md_limit)) 3480 (!md_inst || t_alg->aead.maxauthsize > md_limit))
3481 continue; 3481 continue;
3482 3482
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 81712aa5d0f2..bb1a2cdf1951 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1072 1072
1073 desc = edesc->hw_desc; 1073 desc = edesc->hw_desc;
1074 1074
1075 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1075 if (buflen) {
1076 if (dma_mapping_error(jrdev, state->buf_dma)) { 1076 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1077 dev_err(jrdev, "unable to map src\n"); 1077 DMA_TO_DEVICE);
1078 goto unmap; 1078 if (dma_mapping_error(jrdev, state->buf_dma)) {
1079 } 1079 dev_err(jrdev, "unable to map src\n");
1080 goto unmap;
1081 }
1080 1082
1081 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1083 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1084 }
1082 1085
1083 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1086 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1084 digestsize); 1087 digestsize);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index ec10230178c5..4b6854bf896a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,6 +1155,7 @@
1155#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) 1155#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
1156#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) 1156#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
1157#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) 1157#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
1158#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
1158#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) 1159#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
1159#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) 1160#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
1160#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) 1161#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 67ea94079837..8c6b83e02a70 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,6 +7,9 @@
7 7
8#ifndef CAAM_ERROR_H 8#ifndef CAAM_ERROR_H
9#define CAAM_ERROR_H 9#define CAAM_ERROR_H
10
11#include "desc.h"
12
10#define CAAM_ERROR_STR_MAX 302 13#define CAAM_ERROR_STR_MAX 302
11 14
12void caam_strstatus(struct device *dev, u32 status, bool qi_v2); 15void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
17void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 20void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
18 int rowsize, int groupsize, struct scatterlist *sg, 21 int rowsize, int groupsize, struct scatterlist *sg,
19 size_t tlen, bool ascii); 22 size_t tlen, bool ascii);
23
24static inline bool is_mdha(u32 algtype)
25{
26 return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
27 OP_ALG_CHA_MDHA;
28}
20#endif /* CAAM_ERROR_H */ 29#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 06ad85ab5e86..a876535529d1 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
278 mcode->num_cores = is_ae ? 6 : 10; 278 mcode->num_cores = is_ae ? 6 : 10;
279 279
280 /* Allocate DMAable space */ 280 /* Allocate DMAable space */
281 mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, 281 mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
282 &mcode->phys_base, GFP_KERNEL); 282 &mcode->phys_base, GFP_KERNEL);
283 if (!mcode->code) { 283 if (!mcode->code) {
284 dev_err(dev, "Unable to allocate space for microcode"); 284 dev_err(dev, "Unable to allocate space for microcode");
285 ret = -ENOMEM; 285 ret = -ENOMEM;
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 5c796ed55eba..2ca431ed1db8 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
236 236
237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : 237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
238 rem_q_size; 238 rem_q_size;
239 curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, 239 curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
240 c_size + CPT_NEXT_CHUNK_PTR_SIZE, 240 c_size + CPT_NEXT_CHUNK_PTR_SIZE,
241 &curr->dma_addr, GFP_KERNEL); 241 &curr->dma_addr,
242 GFP_KERNEL);
242 if (!curr->head) { 243 if (!curr->head) {
243 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", 244 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
244 i, queue->nchunks); 245 i, queue->nchunks);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 9138bae12521..4ace9bcd603a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
25 struct nitrox_device *ndev = cmdq->ndev; 25 struct nitrox_device *ndev = cmdq->ndev;
26 26
27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; 27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
28 cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, 28 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
29 &cmdq->unalign_dma, 29 &cmdq->unalign_dma,
30 GFP_KERNEL); 30 GFP_KERNEL);
31 if (!cmdq->unalign_base) 31 if (!cmdq->unalign_base)
32 return -ENOMEM; 32 return -ENOMEM;
33 33
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index e34e4df8fd24..fe070d75c842 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -567,10 +567,10 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
567 567
568 /* ORH error code */ 568 /* ORH error code */
569 err = READ_ONCE(*sr->resp.orh) & 0xff; 569 err = READ_ONCE(*sr->resp.orh) & 0xff;
570 softreq_destroy(sr);
571 570
572 if (sr->callback) 571 if (sr->callback)
573 sr->callback(sr->cb_arg, err); 572 sr->callback(sr->cb_arg, err);
573 softreq_destroy(sr);
574 574
575 req_completed++; 575 req_completed++;
576 } 576 }
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 44a4d2779b15..c9bfd4f439ce 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp)
822 /* Page alignment satisfies our needs for N <= 128 */ 822 /* Page alignment satisfies our needs for N <= 128 */
823 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); 823 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
824 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 824 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
825 cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, 825 cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
826 &cmd_q->qbase_dma, 826 &cmd_q->qbase_dma,
827 GFP_KERNEL); 827 GFP_KERNEL);
828 if (!cmd_q->qbase) { 828 if (!cmd_q->qbase) {
829 dev_err(dev, "unable to allocate command queue\n"); 829 dev_err(dev, "unable to allocate command queue\n");
830 ret = -ENOMEM; 830 ret = -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index f2643cda45db..a3527c00b29a 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
549 unsigned int keylen) 549 unsigned int keylen)
550{ 550{
551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
552 struct rtattr *rta = (struct rtattr *)key;
553 struct cc_crypto_req cc_req = {}; 552 struct cc_crypto_req cc_req = {};
554 struct crypto_authenc_key_param *param;
555 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; 553 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
556 int rc = -EINVAL;
557 unsigned int seq_len = 0; 554 unsigned int seq_len = 0;
558 struct device *dev = drvdata_to_dev(ctx->drvdata); 555 struct device *dev = drvdata_to_dev(ctx->drvdata);
556 const u8 *enckey, *authkey;
557 int rc;
559 558
560 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", 559 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
561 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); 560 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
563 /* STAT_PHASE_0: Init and sanity checks */ 562 /* STAT_PHASE_0: Init and sanity checks */
564 563
565 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ 564 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
566 if (!RTA_OK(rta, keylen)) 565 struct crypto_authenc_keys keys;
567 goto badkey; 566
568 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 567 rc = crypto_authenc_extractkeys(&keys, key, keylen);
569 goto badkey; 568 if (rc)
570 if (RTA_PAYLOAD(rta) < sizeof(*param))
571 goto badkey;
572 param = RTA_DATA(rta);
573 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
574 key += RTA_ALIGN(rta->rta_len);
575 keylen -= RTA_ALIGN(rta->rta_len);
576 if (keylen < ctx->enc_keylen)
577 goto badkey; 569 goto badkey;
578 ctx->auth_keylen = keylen - ctx->enc_keylen; 570 enckey = keys.enckey;
571 authkey = keys.authkey;
572 ctx->enc_keylen = keys.enckeylen;
573 ctx->auth_keylen = keys.authkeylen;
579 574
580 if (ctx->cipher_mode == DRV_CIPHER_CTR) { 575 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
581 /* the nonce is stored in bytes at end of key */ 576 /* the nonce is stored in bytes at end of key */
577 rc = -EINVAL;
582 if (ctx->enc_keylen < 578 if (ctx->enc_keylen <
583 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) 579 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
584 goto badkey; 580 goto badkey;
585 /* Copy nonce from last 4 bytes in CTR key to 581 /* Copy nonce from last 4 bytes in CTR key to
586 * first 4 bytes in CTR IV 582 * first 4 bytes in CTR IV
587 */ 583 */
588 memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + 584 memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
589 ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, 585 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
590 CTR_RFC3686_NONCE_SIZE);
591 /* Set CTR key size */ 586 /* Set CTR key size */
592 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; 587 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
593 } 588 }
594 } else { /* non-authenc - has just one key */ 589 } else { /* non-authenc - has just one key */
590 enckey = key;
591 authkey = NULL;
595 ctx->enc_keylen = keylen; 592 ctx->enc_keylen = keylen;
596 ctx->auth_keylen = 0; 593 ctx->auth_keylen = 0;
597 } 594 }
@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
603 /* STAT_PHASE_1: Copy key to ctx */ 600 /* STAT_PHASE_1: Copy key to ctx */
604 601
605 /* Get key material */ 602 /* Get key material */
606 memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); 603 memcpy(ctx->enckey, enckey, ctx->enc_keylen);
607 if (ctx->enc_keylen == 24) 604 if (ctx->enc_keylen == 24)
608 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); 605 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
609 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { 606 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
610 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); 607 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
608 ctx->auth_keylen);
611 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ 609 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
612 rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); 610 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
613 if (rc) 611 if (rc)
614 goto badkey; 612 goto badkey;
615 } 613 }
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index cdc4f9a171d9..adc0cd8ae97b 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
241 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); 241 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
242 } else { 242 } else {
243 /* new key */ 243 /* new key */
244 ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, 244 ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
245 &ctx->pkey, GFP_KERNEL); 245 &ctx->pkey, GFP_KERNEL);
246 if (!ctx->key) { 246 if (!ctx->key) {
247 mutex_unlock(&ctx->lock); 247 mutex_unlock(&ctx->lock);
248 return -ENOMEM; 248 return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index c1ee4e7bf996..91ee2bb575df 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
1082 struct sec_queue_ring_db *ring_db = &queue->ring_db; 1082 struct sec_queue_ring_db *ring_db = &queue->ring_db;
1083 int ret; 1083 int ret;
1084 1084
1085 ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, 1085 ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
1086 &ring_cmd->paddr, 1086 &ring_cmd->paddr, GFP_KERNEL);
1087 GFP_KERNEL);
1088 if (!ring_cmd->vaddr) 1087 if (!ring_cmd->vaddr)
1089 return -ENOMEM; 1088 return -ENOMEM;
1090 1089
@@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
1092 mutex_init(&ring_cmd->lock); 1091 mutex_init(&ring_cmd->lock);
1093 ring_cmd->callback = sec_alg_callback; 1092 ring_cmd->callback = sec_alg_callback;
1094 1093
1095 ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, 1094 ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
1096 &ring_cq->paddr, 1095 &ring_cq->paddr, GFP_KERNEL);
1097 GFP_KERNEL);
1098 if (!ring_cq->vaddr) { 1096 if (!ring_cq->vaddr) {
1099 ret = -ENOMEM; 1097 ret = -ENOMEM;
1100 goto err_free_ring_cmd; 1098 goto err_free_ring_cmd;
1101 } 1099 }
1102 1100
1103 ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, 1101 ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
1104 &ring_db->paddr, 1102 &ring_db->paddr, GFP_KERNEL);
1105 GFP_KERNEL);
1106 if (!ring_db->vaddr) { 1103 if (!ring_db->vaddr) {
1107 ret = -ENOMEM; 1104 ret = -ENOMEM;
1108 goto err_free_ring_cq; 1105 goto err_free_ring_cq;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 19fba998b86b..1b0d156bb9be 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,9 +260,9 @@ static int setup_crypt_desc(void)
260{ 260{
261 struct device *dev = &pdev->dev; 261 struct device *dev = &pdev->dev;
262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
263 crypt_virt = dma_zalloc_coherent(dev, 263 crypt_virt = dma_alloc_coherent(dev,
264 NPE_QLEN * sizeof(struct crypt_ctl), 264 NPE_QLEN * sizeof(struct crypt_ctl),
265 &crypt_phys, GFP_ATOMIC); 265 &crypt_phys, GFP_ATOMIC);
266 if (!crypt_virt) 266 if (!crypt_virt)
267 return -ENOMEM; 267 return -ENOMEM;
268 return 0; 268 return 0;
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index ee0404e27a0f..5660e5e5e022 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
453 if (!ring[i]) 453 if (!ring[i])
454 goto err_cleanup; 454 goto err_cleanup;
455 455
456 ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, 456 ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
457 MTK_DESC_RING_SZ, 457 MTK_DESC_RING_SZ,
458 &ring[i]->cmd_dma, 458 &ring[i]->cmd_dma,
459 GFP_KERNEL); 459 GFP_KERNEL);
460 if (!ring[i]->cmd_base) 460 if (!ring[i]->cmd_base)
461 goto err_cleanup; 461 goto err_cleanup;
462 462
463 ring[i]->res_base = dma_zalloc_coherent(cryp->dev, 463 ring[i]->res_base = dma_alloc_coherent(cryp->dev,
464 MTK_DESC_RING_SZ, 464 MTK_DESC_RING_SZ,
465 &ring[i]->res_dma, 465 &ring[i]->res_dma,
466 GFP_KERNEL); 466 GFP_KERNEL);
467 if (!ring[i]->res_base) 467 if (!ring[i]->res_base)
468 goto err_cleanup; 468 goto err_cleanup;
469 469
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 3744b22f0c46..d28cba34773e 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
244 dev_to_node(&GET_DEV(accel_dev))); 244 dev_to_node(&GET_DEV(accel_dev)));
245 if (!admin) 245 if (!admin)
246 return -ENOMEM; 246 return -ENOMEM;
247 admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 247 admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
248 &admin->phy_addr, GFP_KERNEL); 248 &admin->phy_addr, GFP_KERNEL);
249 if (!admin->virt_addr) { 249 if (!admin->virt_addr) {
250 dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); 250 dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
251 kfree(admin); 251 kfree(admin);
252 return -ENOMEM; 252 return -ENOMEM;
253 } 253 }
254 254
255 admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), 255 admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
256 PAGE_SIZE, 256 PAGE_SIZE,
257 &admin->const_tbl_addr, 257 &admin->const_tbl_addr,
258 GFP_KERNEL); 258 GFP_KERNEL);
259 if (!admin->virt_tbl_addr) { 259 if (!admin->virt_tbl_addr) {
260 dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); 260 dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
261 dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 261 dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index d2698299896f..975c75198f56 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
601 601
602 dev = &GET_DEV(inst->accel_dev); 602 dev = &GET_DEV(inst->accel_dev);
603 ctx->inst = inst; 603 ctx->inst = inst;
604 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 604 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
605 &ctx->enc_cd_paddr, 605 &ctx->enc_cd_paddr,
606 GFP_ATOMIC); 606 GFP_ATOMIC);
607 if (!ctx->enc_cd) { 607 if (!ctx->enc_cd) {
608 return -ENOMEM; 608 return -ENOMEM;
609 } 609 }
610 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 610 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
611 &ctx->dec_cd_paddr, 611 &ctx->dec_cd_paddr,
612 GFP_ATOMIC); 612 GFP_ATOMIC);
613 if (!ctx->dec_cd) { 613 if (!ctx->dec_cd) {
614 goto out_free_enc; 614 goto out_free_enc;
615 } 615 }
@@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
933 933
934 dev = &GET_DEV(inst->accel_dev); 934 dev = &GET_DEV(inst->accel_dev);
935 ctx->inst = inst; 935 ctx->inst = inst;
936 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 936 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
937 &ctx->enc_cd_paddr, 937 &ctx->enc_cd_paddr,
938 GFP_ATOMIC); 938 GFP_ATOMIC);
939 if (!ctx->enc_cd) { 939 if (!ctx->enc_cd) {
940 spin_unlock(&ctx->lock); 940 spin_unlock(&ctx->lock);
941 return -ENOMEM; 941 return -ENOMEM;
942 } 942 }
943 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 943 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
944 &ctx->dec_cd_paddr, 944 &ctx->dec_cd_paddr,
945 GFP_ATOMIC); 945 GFP_ATOMIC);
946 if (!ctx->dec_cd) { 946 if (!ctx->dec_cd) {
947 spin_unlock(&ctx->lock); 947 spin_unlock(&ctx->lock);
948 goto out_free_enc; 948 goto out_free_enc;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 320e7854b4ee..c9f324730d71 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
332 } else { 332 } else {
333 int shift = ctx->p_size - req->src_len; 333 int shift = ctx->p_size - req->src_len;
334 334
335 qat_req->src_align = dma_zalloc_coherent(dev, 335 qat_req->src_align = dma_alloc_coherent(dev,
336 ctx->p_size, 336 ctx->p_size,
337 &qat_req->in.dh.in.b, 337 &qat_req->in.dh.in.b,
338 GFP_KERNEL); 338 GFP_KERNEL);
339 if (unlikely(!qat_req->src_align)) 339 if (unlikely(!qat_req->src_align))
340 return ret; 340 return ret;
341 341
@@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req)
360 goto unmap_src; 360 goto unmap_src;
361 361
362 } else { 362 } else {
363 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, 363 qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
364 &qat_req->out.dh.r, 364 &qat_req->out.dh.r,
365 GFP_KERNEL); 365 GFP_KERNEL);
366 if (unlikely(!qat_req->dst_align)) 366 if (unlikely(!qat_req->dst_align))
367 goto unmap_src; 367 goto unmap_src;
368 } 368 }
@@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
447 return -EINVAL; 447 return -EINVAL;
448 448
449 ctx->p_size = params->p_size; 449 ctx->p_size = params->p_size;
450 ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); 450 ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
451 if (!ctx->p) 451 if (!ctx->p)
452 return -ENOMEM; 452 return -ENOMEM;
453 memcpy(ctx->p, params->p, ctx->p_size); 453 memcpy(ctx->p, params->p, ctx->p_size);
@@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
458 return 0; 458 return 0;
459 } 459 }
460 460
461 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); 461 ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
462 if (!ctx->g) 462 if (!ctx->g)
463 return -ENOMEM; 463 return -ENOMEM;
464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, 464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
@@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
503 if (ret < 0) 503 if (ret < 0)
504 goto err_clear_ctx; 504 goto err_clear_ctx;
505 505
506 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, 506 ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
507 GFP_KERNEL); 507 GFP_KERNEL);
508 if (!ctx->xa) { 508 if (!ctx->xa) {
509 ret = -ENOMEM; 509 ret = -ENOMEM;
510 goto err_clear_ctx; 510 goto err_clear_ctx;
@@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
737 } else { 737 } else {
738 int shift = ctx->key_sz - req->src_len; 738 int shift = ctx->key_sz - req->src_len;
739 739
740 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 740 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
741 &qat_req->in.rsa.enc.m, 741 &qat_req->in.rsa.enc.m,
742 GFP_KERNEL); 742 GFP_KERNEL);
743 if (unlikely(!qat_req->src_align)) 743 if (unlikely(!qat_req->src_align))
744 return ret; 744 return ret;
745 745
@@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
756 goto unmap_src; 756 goto unmap_src;
757 757
758 } else { 758 } else {
759 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 759 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
760 &qat_req->out.rsa.enc.c, 760 &qat_req->out.rsa.enc.c,
761 GFP_KERNEL); 761 GFP_KERNEL);
762 if (unlikely(!qat_req->dst_align)) 762 if (unlikely(!qat_req->dst_align))
763 goto unmap_src; 763 goto unmap_src;
764 764
@@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
881 } else { 881 } else {
882 int shift = ctx->key_sz - req->src_len; 882 int shift = ctx->key_sz - req->src_len;
883 883
884 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 884 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
885 &qat_req->in.rsa.dec.c, 885 &qat_req->in.rsa.dec.c,
886 GFP_KERNEL); 886 GFP_KERNEL);
887 if (unlikely(!qat_req->src_align)) 887 if (unlikely(!qat_req->src_align))
888 return ret; 888 return ret;
889 889
@@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
900 goto unmap_src; 900 goto unmap_src;
901 901
902 } else { 902 } else {
903 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 903 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
904 &qat_req->out.rsa.dec.m, 904 &qat_req->out.rsa.dec.m,
905 GFP_KERNEL); 905 GFP_KERNEL);
906 if (unlikely(!qat_req->dst_align)) 906 if (unlikely(!qat_req->dst_align))
907 goto unmap_src; 907 goto unmap_src;
908 908
@@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
989 goto err; 989 goto err;
990 990
991 ret = -ENOMEM; 991 ret = -ENOMEM;
992 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); 992 ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
993 if (!ctx->n) 993 if (!ctx->n)
994 goto err; 994 goto err;
995 995
@@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
1018 return -EINVAL; 1018 return -EINVAL;
1019 } 1019 }
1020 1020
1021 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 1021 ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
1022 if (!ctx->e) 1022 if (!ctx->e)
1023 return -ENOMEM; 1023 return -ENOMEM;
1024 1024
@@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
1044 goto err; 1044 goto err;
1045 1045
1046 ret = -ENOMEM; 1046 ret = -ENOMEM;
1047 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 1047 ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1048 if (!ctx->d) 1048 if (!ctx->d)
1049 goto err; 1049 goto err;
1050 1050
@@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1077 qat_rsa_drop_leading_zeros(&ptr, &len); 1077 qat_rsa_drop_leading_zeros(&ptr, &len);
1078 if (!len) 1078 if (!len)
1079 goto err; 1079 goto err;
1080 ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); 1080 ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1081 if (!ctx->p) 1081 if (!ctx->p)
1082 goto err; 1082 goto err;
1083 memcpy(ctx->p + (half_key_sz - len), ptr, len); 1083 memcpy(ctx->p + (half_key_sz - len), ptr, len);
@@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1088 qat_rsa_drop_leading_zeros(&ptr, &len); 1088 qat_rsa_drop_leading_zeros(&ptr, &len);
1089 if (!len) 1089 if (!len)
1090 goto free_p; 1090 goto free_p;
1091 ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); 1091 ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1092 if (!ctx->q) 1092 if (!ctx->q)
1093 goto free_p; 1093 goto free_p;
1094 memcpy(ctx->q + (half_key_sz - len), ptr, len); 1094 memcpy(ctx->q + (half_key_sz - len), ptr, len);
@@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1099 qat_rsa_drop_leading_zeros(&ptr, &len); 1099 qat_rsa_drop_leading_zeros(&ptr, &len);
1100 if (!len) 1100 if (!len)
1101 goto free_q; 1101 goto free_q;
1102 ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, 1102 ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1103 GFP_KERNEL); 1103 GFP_KERNEL);
1104 if (!ctx->dp) 1104 if (!ctx->dp)
1105 goto free_q; 1105 goto free_q;
1106 memcpy(ctx->dp + (half_key_sz - len), ptr, len); 1106 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
@@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1111 qat_rsa_drop_leading_zeros(&ptr, &len); 1111 qat_rsa_drop_leading_zeros(&ptr, &len);
1112 if (!len) 1112 if (!len)
1113 goto free_dp; 1113 goto free_dp;
1114 ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, 1114 ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1115 GFP_KERNEL); 1115 GFP_KERNEL);
1116 if (!ctx->dq) 1116 if (!ctx->dq)
1117 goto free_dp; 1117 goto free_dp;
1118 memcpy(ctx->dq + (half_key_sz - len), ptr, len); 1118 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
@@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1123 qat_rsa_drop_leading_zeros(&ptr, &len); 1123 qat_rsa_drop_leading_zeros(&ptr, &len);
1124 if (!len) 1124 if (!len)
1125 goto free_dq; 1125 goto free_dq;
1126 ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, 1126 ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1127 GFP_KERNEL); 1127 GFP_KERNEL);
1128 if (!ctx->qinv) 1128 if (!ctx->qinv)
1129 goto free_dq; 1129 goto free_dq;
1130 memcpy(ctx->qinv + (half_key_sz - len), ptr, len); 1130 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 45e20707cef8..f8e2c5c3f4eb 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1361 struct talitos_private *priv = dev_get_drvdata(dev); 1361 struct talitos_private *priv = dev_get_drvdata(dev);
1362 bool is_sec1 = has_ftr_sec1(priv); 1362 bool is_sec1 = has_ftr_sec1(priv);
1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1364 void *err;
1365 1364
1366 if (cryptlen + authsize > max_len) { 1365 if (cryptlen + authsize > max_len) {
1367 dev_err(dev, "length exceeds h/w max limit\n"); 1366 dev_err(dev, "length exceeds h/w max limit\n");
1368 return ERR_PTR(-EINVAL); 1367 return ERR_PTR(-EINVAL);
1369 } 1368 }
1370 1369
1371 if (ivsize)
1372 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1373
1374 if (!dst || dst == src) { 1370 if (!dst || dst == src) {
1375 src_len = assoclen + cryptlen + authsize; 1371 src_len = assoclen + cryptlen + authsize;
1376 src_nents = sg_nents_for_len(src, src_len); 1372 src_nents = sg_nents_for_len(src, src_len);
1377 if (src_nents < 0) { 1373 if (src_nents < 0) {
1378 dev_err(dev, "Invalid number of src SG.\n"); 1374 dev_err(dev, "Invalid number of src SG.\n");
1379 err = ERR_PTR(-EINVAL); 1375 return ERR_PTR(-EINVAL);
1380 goto error_sg;
1381 } 1376 }
1382 src_nents = (src_nents == 1) ? 0 : src_nents; 1377 src_nents = (src_nents == 1) ? 0 : src_nents;
1383 dst_nents = dst ? src_nents : 0; 1378 dst_nents = dst ? src_nents : 0;
@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1387 src_nents = sg_nents_for_len(src, src_len); 1382 src_nents = sg_nents_for_len(src, src_len);
1388 if (src_nents < 0) { 1383 if (src_nents < 0) {
1389 dev_err(dev, "Invalid number of src SG.\n"); 1384 dev_err(dev, "Invalid number of src SG.\n");
1390 err = ERR_PTR(-EINVAL); 1385 return ERR_PTR(-EINVAL);
1391 goto error_sg;
1392 } 1386 }
1393 src_nents = (src_nents == 1) ? 0 : src_nents; 1387 src_nents = (src_nents == 1) ? 0 : src_nents;
1394 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); 1388 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1395 dst_nents = sg_nents_for_len(dst, dst_len); 1389 dst_nents = sg_nents_for_len(dst, dst_len);
1396 if (dst_nents < 0) { 1390 if (dst_nents < 0) {
1397 dev_err(dev, "Invalid number of dst SG.\n"); 1391 dev_err(dev, "Invalid number of dst SG.\n");
1398 err = ERR_PTR(-EINVAL); 1392 return ERR_PTR(-EINVAL);
1399 goto error_sg;
1400 } 1393 }
1401 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1394 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1402 } 1395 }
@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1423 /* if its a ahash, add space for a second desc next to the first one */ 1416 /* if its a ahash, add space for a second desc next to the first one */
1424 if (is_sec1 && !dst) 1417 if (is_sec1 && !dst)
1425 alloc_len += sizeof(struct talitos_desc); 1418 alloc_len += sizeof(struct talitos_desc);
1419 alloc_len += ivsize;
1426 1420
1427 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1421 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1428 if (!edesc) { 1422 if (!edesc)
1429 err = ERR_PTR(-ENOMEM); 1423 return ERR_PTR(-ENOMEM);
1430 goto error_sg; 1424 if (ivsize) {
1425 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1426 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1431 } 1427 }
1432 memset(&edesc->desc, 0, sizeof(edesc->desc)); 1428 memset(&edesc->desc, 0, sizeof(edesc->desc));
1433 1429
@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1445 DMA_BIDIRECTIONAL); 1441 DMA_BIDIRECTIONAL);
1446 } 1442 }
1447 return edesc; 1443 return edesc;
1448error_sg:
1449 if (iv_dma)
1450 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1451 return err;
1452} 1444}
1453 1445
1454static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1446static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a2b0a0e71168..86708fb9bda1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
1182{ 1182{
1183 int ret = -EBUSY; 1183 int ret = -EBUSY;
1184 1184
1185 sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1185 sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
1186 GFP_NOWAIT); 1186 GFP_NOWAIT);
1187 if (!sdma->bd0) { 1187 if (!sdma->bd0) {
1188 ret = -ENOMEM; 1188 ret = -ENOMEM;
1189 goto out; 1189 goto out;
@@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
1205 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1205 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1206 int ret = 0; 1206 int ret = 0;
1207 1207
1208 desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, 1208 desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
1209 GFP_NOWAIT); 1209 GFP_NOWAIT);
1210 if (!desc->bd) { 1210 if (!desc->bd) {
1211 ret = -ENOMEM; 1211 ret = -ENOMEM;
1212 goto out; 1212 goto out;
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index b7ec56ae02a6..1a2028e1c29e 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
325 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. 325 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
326 */ 326 */
327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); 327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
328 ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, 328 ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
329 &ring->tphys, GFP_NOWAIT); 329 &ring->tphys, GFP_NOWAIT);
330 if (!ring->txd) 330 if (!ring->txd)
331 return -ENOMEM; 331 return -ENOMEM;
332 332
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 35193b31a9e0..22cc7f68ef6e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
416 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 416 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
417 int ret; 417 int ret;
418 418
419 mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, 419 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
420 CCW_BLOCK_SIZE, 420 CCW_BLOCK_SIZE,
421 &mxs_chan->ccw_phys, GFP_KERNEL); 421 &mxs_chan->ccw_phys, GFP_KERNEL);
422 if (!mxs_chan->ccw) { 422 if (!mxs_chan->ccw) {
423 ret = -ENOMEM; 423 ret = -ENOMEM;
424 goto err_alloc; 424 goto err_alloc;
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 1d5988849aa6..eafd6c4b90fe 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1208 ring->size = ret; 1208 ring->size = ret;
1209 1209
1210 /* Allocate memory for DMA ring descriptor */ 1210 /* Allocate memory for DMA ring descriptor */
1211 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, 1211 ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
1212 &ring->desc_paddr, GFP_KERNEL); 1212 &ring->desc_paddr, GFP_KERNEL);
1213 if (!ring->desc_vaddr) { 1213 if (!ring->desc_vaddr) {
1214 chan_err(chan, "Failed to allocate ring desc\n"); 1214 chan_err(chan, "Failed to allocate ring desc\n");
1215 return -ENOMEM; 1215 return -ENOMEM;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 02880963092f..cb20b411493e 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
879 */ 879 */
880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
881 /* Allocate the buffer descriptors. */ 881 /* Allocate the buffer descriptors. */
882 chan->seg_v = dma_zalloc_coherent(chan->dev, 882 chan->seg_v = dma_alloc_coherent(chan->dev,
883 sizeof(*chan->seg_v) * 883 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
884 XILINX_DMA_NUM_DESCS, 884 &chan->seg_p, GFP_KERNEL);
885 &chan->seg_p, GFP_KERNEL);
886 if (!chan->seg_v) { 885 if (!chan->seg_v) {
887 dev_err(chan->dev, 886 dev_err(chan->dev,
888 "unable to allocate channel %d descriptors\n", 887 "unable to allocate channel %d descriptors\n",
@@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
895 * so allocating a desc segment during channel allocation for 894 * so allocating a desc segment during channel allocation for
896 * programming tail descriptor. 895 * programming tail descriptor.
897 */ 896 */
898 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, 897 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
899 sizeof(*chan->cyclic_seg_v), 898 sizeof(*chan->cyclic_seg_v),
900 &chan->cyclic_seg_p, GFP_KERNEL); 899 &chan->cyclic_seg_p,
900 GFP_KERNEL);
901 if (!chan->cyclic_seg_v) { 901 if (!chan->cyclic_seg_v) {
902 dev_err(chan->dev, 902 dev_err(chan->dev,
903 "unable to allocate desc segment for cyclic DMA\n"); 903 "unable to allocate desc segment for cyclic DMA\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 8db51750ce93..4478787a247f 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
490 list_add_tail(&desc->node, &chan->free_list); 490 list_add_tail(&desc->node, &chan->free_list);
491 } 491 }
492 492
493 chan->desc_pool_v = dma_zalloc_coherent(chan->dev, 493 chan->desc_pool_v = dma_alloc_coherent(chan->dev,
494 (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), 494 (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
495 &chan->desc_pool_p, GFP_KERNEL); 495 &chan->desc_pool_p, GFP_KERNEL);
496 if (!chan->desc_pool_v) 496 if (!chan->desc_pool_v)
497 return -ENOMEM; 497 return -ENOMEM;
498 498
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 83617fdc661d..0dc96419efe3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -289,7 +289,7 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
289 return pca953x_check_register(chip, reg, bank); 289 return pca953x_check_register(chip, reg, bank);
290} 290}
291 291
292const struct regmap_config pca953x_i2c_regmap = { 292static const struct regmap_config pca953x_i2c_regmap = {
293 .reg_bits = 8, 293 .reg_bits = 8,
294 .val_bits = 8, 294 .val_bits = 8,
295 295
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 48534bda73d3..259cf6ab969b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -357,8 +357,6 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
357 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); 357 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
358 358
359 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 359 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
360 struct gpio_desc *desc;
361
362 if (event->irq_requested) { 360 if (event->irq_requested) {
363 if (event->irq_is_wake) 361 if (event->irq_is_wake)
364 disable_irq_wake(event->irq); 362 disable_irq_wake(event->irq);
@@ -366,11 +364,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
366 free_irq(event->irq, event); 364 free_irq(event->irq, event);
367 } 365 }
368 366
369 desc = event->desc;
370 if (WARN_ON(IS_ERR(desc)))
371 continue;
372 gpiochip_unlock_as_irq(chip, event->pin); 367 gpiochip_unlock_as_irq(chip, event->pin);
373 gpiochip_free_own_desc(desc); 368 gpiochip_free_own_desc(event->desc);
374 list_del(&event->node); 369 list_del(&event->node);
375 kfree(event); 370 kfree(event);
376 } 371 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8a078f4ae73d..7ff3a28fc903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1701,8 +1701,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1701 amdgpu_xgmi_add_device(adev); 1701 amdgpu_xgmi_add_device(adev);
1702 amdgpu_amdkfd_device_init(adev); 1702 amdgpu_amdkfd_device_init(adev);
1703 1703
1704 if (amdgpu_sriov_vf(adev)) 1704 if (amdgpu_sriov_vf(adev)) {
1705 amdgpu_virt_init_data_exchange(adev);
1705 amdgpu_virt_release_full_gpu(adev, true); 1706 amdgpu_virt_release_full_gpu(adev, true);
1707 }
1706 1708
1707 return 0; 1709 return 0;
1708} 1710}
@@ -2632,9 +2634,6 @@ fence_driver_init:
2632 goto failed; 2634 goto failed;
2633 } 2635 }
2634 2636
2635 if (amdgpu_sriov_vf(adev))
2636 amdgpu_virt_init_data_exchange(adev);
2637
2638 amdgpu_fbdev_init(adev); 2637 amdgpu_fbdev_init(adev);
2639 2638
2640 r = amdgpu_pm_sysfs_init(adev); 2639 r = amdgpu_pm_sysfs_init(adev);
@@ -2798,7 +2797,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2798 struct drm_framebuffer *fb = crtc->primary->fb; 2797 struct drm_framebuffer *fb = crtc->primary->fb;
2799 struct amdgpu_bo *robj; 2798 struct amdgpu_bo *robj;
2800 2799
2801 if (amdgpu_crtc->cursor_bo) { 2800 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
2802 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2801 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2803 r = amdgpu_bo_reserve(aobj, true); 2802 r = amdgpu_bo_reserve(aobj, true);
2804 if (r == 0) { 2803 if (r == 0) {
@@ -2906,7 +2905,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2906 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2905 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2907 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2906 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2908 2907
2909 if (amdgpu_crtc->cursor_bo) { 2908 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
2910 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2909 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2911 r = amdgpu_bo_reserve(aobj, true); 2910 r = amdgpu_bo_reserve(aobj, true);
2912 if (r == 0) { 2911 if (r == 0) {
@@ -3226,6 +3225,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3226 r = amdgpu_ib_ring_tests(adev); 3225 r = amdgpu_ib_ring_tests(adev);
3227 3226
3228error: 3227error:
3228 amdgpu_virt_init_data_exchange(adev);
3229 amdgpu_virt_release_full_gpu(adev, true); 3229 amdgpu_virt_release_full_gpu(adev, true);
3230 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3230 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3231 atomic_inc(&adev->vram_lost_counter); 3231 atomic_inc(&adev->vram_lost_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 15ce7e681d67..b083b219b1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
188 goto cleanup; 188 goto cleanup;
189 } 189 }
190 190
191 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); 191 if (!adev->enable_virtual_display) {
192 if (unlikely(r != 0)) { 192 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
193 DRM_ERROR("failed to pin new abo buffer before flip\n"); 193 if (unlikely(r != 0)) {
194 goto unreserve; 194 DRM_ERROR("failed to pin new abo buffer before flip\n");
195 goto unreserve;
196 }
195 } 197 }
196 198
197 r = amdgpu_ttm_alloc_gart(&new_abo->tbo); 199 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
@@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
211 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); 213 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
212 amdgpu_bo_unreserve(new_abo); 214 amdgpu_bo_unreserve(new_abo);
213 215
214 work->base = amdgpu_bo_gpu_offset(new_abo); 216 if (!adev->enable_virtual_display)
217 work->base = amdgpu_bo_gpu_offset(new_abo);
215 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 218 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
216 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 219 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
217 220
@@ -242,9 +245,10 @@ pflip_cleanup:
242 goto cleanup; 245 goto cleanup;
243 } 246 }
244unpin: 247unpin:
245 if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { 248 if (!adev->enable_virtual_display)
246 DRM_ERROR("failed to unpin new abo in error path\n"); 249 if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
247 } 250 DRM_ERROR("failed to unpin new abo in error path\n");
251
248unreserve: 252unreserve:
249 amdgpu_bo_unreserve(new_abo); 253 amdgpu_bo_unreserve(new_abo);
250 254
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 1f61ed95727c..6896dec97fc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2008,6 +2008,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2008 2008
2009int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 2009int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2010{ 2010{
2011 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2011 int ret; 2012 int ret;
2012 2013
2013 if (adev->pm.sysfs_initialized) 2014 if (adev->pm.sysfs_initialized)
@@ -2091,12 +2092,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2091 "pp_power_profile_mode\n"); 2092 "pp_power_profile_mode\n");
2092 return ret; 2093 return ret;
2093 } 2094 }
2094 ret = device_create_file(adev->dev, 2095 if (hwmgr->od_enabled) {
2095 &dev_attr_pp_od_clk_voltage); 2096 ret = device_create_file(adev->dev,
2096 if (ret) { 2097 &dev_attr_pp_od_clk_voltage);
2097 DRM_ERROR("failed to create device file " 2098 if (ret) {
2098 "pp_od_clk_voltage\n"); 2099 DRM_ERROR("failed to create device file "
2099 return ret; 2100 "pp_od_clk_voltage\n");
2101 return ret;
2102 }
2100 } 2103 }
2101 ret = device_create_file(adev->dev, 2104 ret = device_create_file(adev->dev,
2102 &dev_attr_gpu_busy_percent); 2105 &dev_attr_gpu_busy_percent);
@@ -2118,6 +2121,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2118 2121
2119void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 2122void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2120{ 2123{
2124 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2125
2121 if (adev->pm.dpm_enabled == 0) 2126 if (adev->pm.dpm_enabled == 0)
2122 return; 2127 return;
2123 2128
@@ -2138,8 +2143,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2138 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 2143 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
2139 device_remove_file(adev->dev, 2144 device_remove_file(adev->dev,
2140 &dev_attr_pp_power_profile_mode); 2145 &dev_attr_pp_power_profile_mode);
2141 device_remove_file(adev->dev, 2146 if (hwmgr->od_enabled)
2142 &dev_attr_pp_od_clk_voltage); 2147 device_remove_file(adev->dev,
2148 &dev_attr_pp_od_clk_voltage);
2143 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 2149 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
2144} 2150}
2145 2151
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e73d152659a2..d2ea5ce2cefb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -847,9 +847,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
847 bp->size = amdgpu_vm_bo_size(adev, level); 847 bp->size = amdgpu_vm_bo_size(adev, level);
848 bp->byte_align = AMDGPU_GPU_PAGE_SIZE; 848 bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
849 bp->domain = AMDGPU_GEM_DOMAIN_VRAM; 849 bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
850 if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
851 adev->flags & AMD_IS_APU)
852 bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
853 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); 850 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
854 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 851 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
855 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 852 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index fdace004544d..e4cc1d48eaab 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
168 168
169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
170 if (crtc->primary->fb) {
171 int r;
172 struct amdgpu_bo *abo;
173
174 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
175 r = amdgpu_bo_reserve(abo, true);
176 if (unlikely(r))
177 DRM_ERROR("failed to reserve abo before unpin\n");
178 else {
179 amdgpu_bo_unpin(abo);
180 amdgpu_bo_unreserve(abo);
181 }
182 }
183 170
184 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 171 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
185 amdgpu_crtc->encoder = NULL; 172 amdgpu_crtc->encoder = NULL;
@@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
692 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 679 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
693 680
694 drm_crtc_vblank_put(&amdgpu_crtc->base); 681 drm_crtc_vblank_put(&amdgpu_crtc->base);
695 schedule_work(&works->unpin_work); 682 amdgpu_bo_unref(&works->old_abo);
683 kfree(works->shared);
684 kfree(works);
696 685
697 return 0; 686 return 0;
698} 687}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 381f593b0cda..57cb3a51bda7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4233,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4233 u32 tmp; 4233 u32 tmp;
4234 u32 rb_bufsz; 4234 u32 rb_bufsz;
4235 u64 rb_addr, rptr_addr, wptr_gpu_addr; 4235 u64 rb_addr, rptr_addr, wptr_gpu_addr;
4236 int r;
4237 4236
4238 /* Set the write pointer delay */ 4237 /* Set the write pointer delay */
4239 WREG32(mmCP_RB_WPTR_DELAY, 0); 4238 WREG32(mmCP_RB_WPTR_DELAY, 0);
@@ -4278,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4278 amdgpu_ring_clear_ring(ring); 4277 amdgpu_ring_clear_ring(ring);
4279 gfx_v8_0_cp_gfx_start(adev); 4278 gfx_v8_0_cp_gfx_start(adev);
4280 ring->sched.ready = true; 4279 ring->sched.ready = true;
4281 r = amdgpu_ring_test_helper(ring);
4282 4280
4283 return r; 4281 return 0;
4284} 4282}
4285 4283
4286static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 4284static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4369,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4369 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 4367 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4370 } 4368 }
4371 4369
4372 r = amdgpu_ring_test_helper(kiq_ring); 4370 amdgpu_ring_commit(kiq_ring);
4373 if (r) 4371
4374 DRM_ERROR("KCQ enable failed\n"); 4372 return 0;
4375 return r;
4376} 4373}
4377 4374
4378static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) 4375static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
@@ -4709,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4709 if (r) 4706 if (r)
4710 goto done; 4707 goto done;
4711 4708
4712 /* Test KCQs - reversing the order of rings seems to fix ring test failure 4709done:
4713 * after GPU reset 4710 return r;
4714 */ 4711}
4715 for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { 4712
4713static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4714{
4715 int r, i;
4716 struct amdgpu_ring *ring;
4717
4718 /* collect all the ring_tests here, gfx, kiq, compute */
4719 ring = &adev->gfx.gfx_ring[0];
4720 r = amdgpu_ring_test_helper(ring);
4721 if (r)
4722 return r;
4723
4724 ring = &adev->gfx.kiq.ring;
4725 r = amdgpu_ring_test_helper(ring);
4726 if (r)
4727 return r;
4728
4729 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4716 ring = &adev->gfx.compute_ring[i]; 4730 ring = &adev->gfx.compute_ring[i];
4717 r = amdgpu_ring_test_helper(ring); 4731 amdgpu_ring_test_helper(ring);
4718 } 4732 }
4719 4733
4720done: 4734 return 0;
4721 return r;
4722} 4735}
4723 4736
4724static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) 4737static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
@@ -4739,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4739 r = gfx_v8_0_kcq_resume(adev); 4752 r = gfx_v8_0_kcq_resume(adev);
4740 if (r) 4753 if (r)
4741 return r; 4754 return r;
4755
4756 r = gfx_v8_0_cp_test_all_rings(adev);
4757 if (r)
4758 return r;
4759
4742 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 4760 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4743 4761
4744 return 0; 4762 return 0;
@@ -5086,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
5086 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) 5104 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5087 gfx_v8_0_cp_gfx_resume(adev); 5105 gfx_v8_0_cp_gfx_resume(adev);
5088 5106
5107 gfx_v8_0_cp_test_all_rings(adev);
5108
5089 adev->gfx.rlc.funcs->start(adev); 5109 adev->gfx.rlc.funcs->start(adev);
5090 5110
5091 return 0; 5111 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7556716038d3..fbca0494f871 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -113,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
117}; 120};
118 121
119static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = 122static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -135,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), 140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), 141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
142}; 142};
143 143
144static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = 144static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -3587,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3587{ 3587{
3588 uint32_t data, def; 3588 uint32_t data, def;
3589 3589
3590 amdgpu_gfx_rlc_enter_safe_mode(adev);
3591
3590 /* It is disabled by HW by default */ 3592 /* It is disabled by HW by default */
3591 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3593 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3592 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3594 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -3651,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3651 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3653 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3652 } 3654 }
3653 } 3655 }
3656
3657 amdgpu_gfx_rlc_exit_safe_mode(adev);
3654} 3658}
3655 3659
3656static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, 3660static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8cbb4655896a..b11a1c17a7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
174 return r; 174 return r;
175 } 175 }
176 /* Retrieve checksum from mailbox2 */ 176 /* Retrieve checksum from mailbox2 */
177 if (req == IDH_REQ_GPU_INIT_ACCESS) { 177 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
178 adev->virt.fw_reserve.checksum_key = 178 adev->virt.fw_reserve.checksum_key =
179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index fd0bfe140ee0..6811a5d05b27 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), 80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), 82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
96static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { 95static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 96 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
98 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), 97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
98 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
99 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 99 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) 100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
101}; 101};
@@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
103static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { 103static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), 105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
106 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
106 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) 108 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
108}; 109};
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index fbf0ee5201c3..c3613604a4f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,8 +4,8 @@
4 4
5config HSA_AMD 5config HSA_AMD
6 bool "HSA kernel driver for AMD GPU devices" 6 bool "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && X86_64 7 depends on DRM_AMDGPU && (X86_64 || ARM64)
8 imply AMD_IOMMU_V2 8 imply AMD_IOMMU_V2 if X86_64
9 select MMU_NOTIFIER 9 select MMU_NOTIFIER
10 help 10 help
11 Enable this if you want to use HSA features on AMD GPU devices. 11 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index b7bc7d7d048f..5d85ff341385 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
863 return 0; 863 return 0;
864} 864}
865 865
866#if CONFIG_X86_64
866static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, 867static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
867 uint32_t *num_entries, 868 uint32_t *num_entries,
868 struct crat_subtype_iolink *sub_type_hdr) 869 struct crat_subtype_iolink *sub_type_hdr)
@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
905 906
906 return 0; 907 return 0;
907} 908}
909#endif
908 910
909/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU 911/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
910 * 912 *
@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
920 struct crat_subtype_generic *sub_type_hdr; 922 struct crat_subtype_generic *sub_type_hdr;
921 int avail_size = *size; 923 int avail_size = *size;
922 int numa_node_id; 924 int numa_node_id;
925#ifdef CONFIG_X86_64
923 uint32_t entries = 0; 926 uint32_t entries = 0;
927#endif
924 int ret = 0; 928 int ret = 0;
925 929
926 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) 930 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
982 sub_type_hdr->length); 986 sub_type_hdr->length);
983 987
984 /* Fill in Subtype: IO Link */ 988 /* Fill in Subtype: IO Link */
989#ifdef CONFIG_X86_64
985 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, 990 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
986 &entries, 991 &entries,
987 (struct crat_subtype_iolink *)sub_type_hdr); 992 (struct crat_subtype_iolink *)sub_type_hdr);
@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
992 997
993 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + 998 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
994 sub_type_hdr->length * entries); 999 sub_type_hdr->length * entries);
1000#else
1001 pr_info("IO link not available for non x86 platforms\n");
1002#endif
995 1003
996 crat_table->num_domains++; 1004 crat_table->num_domains++;
997 } 1005 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5f5b2acedbac..09da91644f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
1093 * the GPU device is not already present in the topology device 1093 * the GPU device is not already present in the topology device
1094 * list then return NULL. This means a new topology device has to 1094 * list then return NULL. This means a new topology device has to
1095 * be created for this GPU. 1095 * be created for this GPU.
1096 * TODO: Rather than assiging @gpu to first topology device withtout
1097 * gpu attached, it will better to have more stringent check.
1098 */ 1096 */
1099static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) 1097static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1100{ 1098{
@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1102 struct kfd_topology_device *out_dev = NULL; 1100 struct kfd_topology_device *out_dev = NULL;
1103 1101
1104 down_write(&topology_lock); 1102 down_write(&topology_lock);
1105 list_for_each_entry(dev, &topology_device_list, list) 1103 list_for_each_entry(dev, &topology_device_list, list) {
1104 /* Discrete GPUs need their own topology device list
1105 * entries. Don't assign them to CPU/APU nodes.
1106 */
1107 if (!gpu->device_info->needs_iommu_device &&
1108 dev->node_props.cpu_cores_count)
1109 continue;
1110
1106 if (!dev->gpu && (dev->node_props.simd_count > 0)) { 1111 if (!dev->gpu && (dev->node_props.simd_count > 0)) {
1107 dev->gpu = gpu; 1112 dev->gpu = gpu;
1108 out_dev = dev; 1113 out_dev = dev;
1109 break; 1114 break;
1110 } 1115 }
1116 }
1111 up_write(&topology_lock); 1117 up_write(&topology_lock);
1112 return out_dev; 1118 return out_dev;
1113} 1119}
@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
1392 1398
1393static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) 1399static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1394{ 1400{
1395 const struct cpuinfo_x86 *cpuinfo;
1396 int first_cpu_of_numa_node; 1401 int first_cpu_of_numa_node;
1397 1402
1398 if (!cpumask || cpumask == cpu_none_mask) 1403 if (!cpumask || cpumask == cpu_none_mask)
@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1400 first_cpu_of_numa_node = cpumask_first(cpumask); 1405 first_cpu_of_numa_node = cpumask_first(cpumask);
1401 if (first_cpu_of_numa_node >= nr_cpu_ids) 1406 if (first_cpu_of_numa_node >= nr_cpu_ids)
1402 return -1; 1407 return -1;
1403 cpuinfo = &cpu_data(first_cpu_of_numa_node); 1408#ifdef CONFIG_X86_64
1404 1409 return cpu_data(first_cpu_of_numa_node).apicid;
1405 return cpuinfo->apicid; 1410#else
1411 return first_cpu_of_numa_node;
1412#endif
1406} 1413}
1407 1414
1408/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor 1415/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a9a28dbc3e24..f4fa40c387d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -699,22 +699,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
699{ 699{
700 struct amdgpu_dm_connector *aconnector; 700 struct amdgpu_dm_connector *aconnector;
701 struct drm_connector *connector; 701 struct drm_connector *connector;
702 struct drm_dp_mst_topology_mgr *mgr;
703 int ret;
704 bool need_hotplug = false;
702 705
703 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 706 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
704 707
705 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 708 list_for_each_entry(connector, &dev->mode_config.connector_list,
706 aconnector = to_amdgpu_dm_connector(connector); 709 head) {
707 if (aconnector->dc_link->type == dc_connection_mst_branch && 710 aconnector = to_amdgpu_dm_connector(connector);
708 !aconnector->mst_port) { 711 if (aconnector->dc_link->type != dc_connection_mst_branch ||
712 aconnector->mst_port)
713 continue;
709 714
710 if (suspend) 715 mgr = &aconnector->mst_mgr;
711 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); 716
712 else 717 if (suspend) {
713 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); 718 drm_dp_mst_topology_mgr_suspend(mgr);
714 } 719 } else {
720 ret = drm_dp_mst_topology_mgr_resume(mgr);
721 if (ret < 0) {
722 drm_dp_mst_topology_mgr_set_mst(mgr, false);
723 need_hotplug = true;
724 }
725 }
715 } 726 }
716 727
717 drm_modeset_unlock(&dev->mode_config.connection_mutex); 728 drm_modeset_unlock(&dev->mode_config.connection_mutex);
729
730 if (need_hotplug)
731 drm_kms_helper_hotplug_event(dev);
718} 732}
719 733
720/** 734/**
@@ -898,7 +912,6 @@ static int dm_resume(void *handle)
898 struct drm_plane_state *new_plane_state; 912 struct drm_plane_state *new_plane_state;
899 struct dm_plane_state *dm_new_plane_state; 913 struct dm_plane_state *dm_new_plane_state;
900 enum dc_connection_type new_connection_type = dc_connection_none; 914 enum dc_connection_type new_connection_type = dc_connection_none;
901 int ret;
902 int i; 915 int i;
903 916
904 /* power on hardware */ 917 /* power on hardware */
@@ -971,13 +984,13 @@ static int dm_resume(void *handle)
971 } 984 }
972 } 985 }
973 986
974 ret = drm_atomic_helper_resume(ddev, dm->cached_state); 987 drm_atomic_helper_resume(ddev, dm->cached_state);
975 988
976 dm->cached_state = NULL; 989 dm->cached_state = NULL;
977 990
978 amdgpu_dm_irq_resume_late(adev); 991 amdgpu_dm_irq_resume_late(adev);
979 992
980 return ret; 993 return 0;
981} 994}
982 995
983/** 996/**
@@ -1759,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1759 + caps.min_input_signal * 0x101; 1772 + caps.min_input_signal * 0x101;
1760 1773
1761 if (dc_link_set_backlight_level(dm->backlight_link, 1774 if (dc_link_set_backlight_level(dm->backlight_link,
1762 brightness, 0, 0)) 1775 brightness, 0))
1763 return 0; 1776 return 0;
1764 else 1777 else
1765 return 1; 1778 return 1;
@@ -5920,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5920 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 5933 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5921 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 5934 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5922 !new_crtc_state->color_mgmt_changed && 5935 !new_crtc_state->color_mgmt_changed &&
5923 !new_crtc_state->vrr_enabled) 5936 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
5924 continue; 5937 continue;
5925 5938
5926 if (!new_crtc_state->enable) 5939 if (!new_crtc_state->enable)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 52deacf39841..b0265dbebd4c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
2190 2190
2191bool dc_link_set_backlight_level(const struct dc_link *link, 2191bool dc_link_set_backlight_level(const struct dc_link *link,
2192 uint32_t backlight_pwm_u16_16, 2192 uint32_t backlight_pwm_u16_16,
2193 uint32_t frame_ramp, 2193 uint32_t frame_ramp)
2194 const struct dc_stream_state *stream)
2195{ 2194{
2196 struct dc *core_dc = link->ctx->dc; 2195 struct dc *core_dc = link->ctx->dc;
2197 struct abm *abm = core_dc->res_pool->abm; 2196 struct abm *abm = core_dc->res_pool->abm;
@@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
2206 (abm->funcs->set_backlight_level_pwm == NULL)) 2205 (abm->funcs->set_backlight_level_pwm == NULL))
2207 return false; 2206 return false;
2208 2207
2209 if (stream)
2210 ((struct dc_stream_state *)stream)->bl_pwm_level =
2211 backlight_pwm_u16_16;
2212
2213 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 2208 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
2214 2209
2215 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", 2210 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
@@ -2637,11 +2632,6 @@ void core_link_enable_stream(
2637 2632
2638 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2633 if (dc_is_dp_signal(pipe_ctx->stream->signal))
2639 enable_stream_features(pipe_ctx); 2634 enable_stream_features(pipe_ctx);
2640
2641 dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
2642 pipe_ctx->stream->bl_pwm_level,
2643 0,
2644 pipe_ctx->stream);
2645 } 2635 }
2646 2636
2647} 2637}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 29f19d57ff7a..b2243e0dad1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
146 */ 146 */
147bool dc_link_set_backlight_level(const struct dc_link *dc_link, 147bool dc_link_set_backlight_level(const struct dc_link *dc_link,
148 uint32_t backlight_pwm_u16_16, 148 uint32_t backlight_pwm_u16_16,
149 uint32_t frame_ramp, 149 uint32_t frame_ramp);
150 const struct dc_stream_state *stream);
151 150
152int dc_link_get_backlight_level(const struct dc_link *dc_link); 151int dc_link_get_backlight_level(const struct dc_link *dc_link);
153 152
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index be34d638e15d..d70c9e1cda3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -91,7 +91,6 @@ struct dc_stream_state {
91 91
92 /* DMCU info */ 92 /* DMCU info */
93 unsigned int abm_level; 93 unsigned int abm_level;
94 unsigned int bl_pwm_level;
95 94
96 /* from core_stream struct */ 95 /* from core_stream struct */
97 struct dc_context *ctx; 96 struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4bf24758217f..8f09b8625c5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
1000 1000
1001 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); 1001 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
1002 1002
1003 if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) 1003 if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
1004 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ 1004 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
1005 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); 1005 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1006 /* un-mute audio */ 1006 /* un-mute audio */
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1017 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 1017 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
1018 pipe_ctx->stream_res.stream_enc, true); 1018 pipe_ctx->stream_res.stream_enc, true);
1019 if (pipe_ctx->stream_res.audio) { 1019 if (pipe_ctx->stream_res.audio) {
1020 struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
1021
1020 if (option != KEEP_ACQUIRED_RESOURCE || 1022 if (option != KEEP_ACQUIRED_RESOURCE ||
1021 !dc->debug.az_endpoint_mute_only) { 1023 !dc->debug.az_endpoint_mute_only) {
1022 /*only disalbe az_endpoint if power down or free*/ 1024 /*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1036 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); 1038 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
1037 pipe_ctx->stream_res.audio = NULL; 1039 pipe_ctx->stream_res.audio = NULL;
1038 } 1040 }
1041 if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
1042 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
1043 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1039 1044
1040 /* TODO: notify audio driver for if audio modes list changed 1045 /* TODO: notify audio driver for if audio modes list changed
1041 * add audio mode list change flag */ 1046 * add audio mode list change flag */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index dcb3c5530236..cd1ebe57ed59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
463 if (src_y_offset >= (int)param->viewport.height) 463 if (src_y_offset >= (int)param->viewport.height)
464 cur_en = 0; /* not visible beyond bottom edge*/ 464 cur_en = 0; /* not visible beyond bottom edge*/
465 465
466 if (src_y_offset < 0) 466 if (src_y_offset + (int)height <= 0)
467 cur_en = 0; /* not visible beyond top edge*/ 467 cur_en = 0; /* not visible beyond top edge*/
468 468
469 REG_UPDATE(CURSOR0_CONTROL, 469 REG_UPDATE(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 345af015d061..d1acd7165bc8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position(
1140 if (src_y_offset >= (int)param->viewport.height) 1140 if (src_y_offset >= (int)param->viewport.height)
1141 cur_en = 0; /* not visible beyond bottom edge*/ 1141 cur_en = 0; /* not visible beyond bottom edge*/
1142 1142
1143 if (src_y_offset < 0) //+ (int)hubp->curs_attr.height 1143 if (src_y_offset + (int)hubp->curs_attr.height <= 0)
1144 cur_en = 0; /* not visible beyond top edge*/ 1144 cur_en = 0; /* not visible beyond top edge*/
1145 1145
1146 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) 1146 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 91e015e14355..58a12ddf12f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface(
2355 top_pipe_to_program->plane_state->update_flags.bits.full_update) 2355 top_pipe_to_program->plane_state->update_flags.bits.full_update)
2356 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2356 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2358 2358 tg = pipe_ctx->stream_res.tg;
2359 /* Skip inactive pipes and ones already updated */ 2359 /* Skip inactive pipes and ones already updated */
2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream 2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream
2361 || !pipe_ctx->plane_state) 2361 || !pipe_ctx->plane_state
2362 || !tg->funcs->is_tg_enabled(tg))
2362 continue; 2363 continue;
2363 2364
2364 pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); 2365 tg->funcs->lock(tg);
2365 2366
2366 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( 2367 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2367 pipe_ctx->plane_res.hubp, 2368 pipe_ctx->plane_res.hubp,
2368 &pipe_ctx->dlg_regs, 2369 &pipe_ctx->dlg_regs,
2369 &pipe_ctx->ttu_regs); 2370 &pipe_ctx->ttu_regs);
2370 }
2371
2372 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2373 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2374 2371
2375 if (!pipe_ctx->stream || pipe_ctx->stream == stream 2372 tg->funcs->unlock(tg);
2376 || !pipe_ctx->plane_state) 2373 }
2377 continue;
2378
2379 dcn10_pipe_control_lock(dc, pipe_ctx, false);
2380 }
2381 2374
2382 if (num_planes == 0) 2375 if (num_planes == 0)
2383 false_optc_underflow_wa(dc, stream, tg); 2376 false_optc_underflow_wa(dc, stream, tg);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 00f63b7dd32f..c11a443dcbc8 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
57#define NUM_POWER_FN_SEGS 8 57#define NUM_POWER_FN_SEGS 8
58#define NUM_BL_CURVE_SEGS 16 58#define NUM_BL_CURVE_SEGS 16
59 59
60#pragma pack(push, 1)
60/* NOTE: iRAM is 256B in size */ 61/* NOTE: iRAM is 256B in size */
61struct iram_table_v_2 { 62struct iram_table_v_2 {
62 /* flags */ 63 /* flags */
@@ -100,6 +101,7 @@ struct iram_table_v_2 {
100 uint8_t dummy8; /* 0xfe */ 101 uint8_t dummy8; /* 0xfe */
101 uint8_t dummy9; /* 0xff */ 102 uint8_t dummy9; /* 0xff */
102}; 103};
104#pragma pack(pop)
103 105
104static uint16_t backlight_8_to_16(unsigned int backlight_8bit) 106static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
105{ 107{
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 1479ea1dc3e7..789c4f288485 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -127,12 +127,13 @@ enum amd_pp_task {
127}; 127};
128 128
129enum PP_SMC_POWER_PROFILE { 129enum PP_SMC_POWER_PROFILE {
130 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0, 130 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
131 PP_SMC_POWER_PROFILE_POWERSAVING = 0x1, 131 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
132 PP_SMC_POWER_PROFILE_VIDEO = 0x2, 132 PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
133 PP_SMC_POWER_PROFILE_VR = 0x3, 133 PP_SMC_POWER_PROFILE_VIDEO = 0x3,
134 PP_SMC_POWER_PROFILE_COMPUTE = 0x4, 134 PP_SMC_POWER_PROFILE_VR = 0x4,
135 PP_SMC_POWER_PROFILE_CUSTOM = 0x5, 135 PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
136 PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
136}; 137};
137 138
138enum { 139enum {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 0173d0480024..310b102a9292 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
64 64
65static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) 65static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
66{ 66{
67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2; 67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0; 68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1; 69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3; 70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4; 71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
72 72 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
73 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING; 73
74 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO; 74 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
75 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 75 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
76 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR; 76 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
77 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; 77 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
78 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
79 hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
78} 80}
79 81
80int hwmgr_early_init(struct pp_hwmgr *hwmgr) 82int hwmgr_early_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index d91390459326..c8f5c00dd1e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,8 +77,9 @@
77#define PCIE_BUS_CLK 10000 77#define PCIE_BUS_CLK 10000
78#define TCLK (PCIE_BUS_CLK / 10) 78#define TCLK (PCIE_BUS_CLK / 10)
79 79
80static const struct profile_mode_setting smu7_profiling[6] = 80static const struct profile_mode_setting smu7_profiling[7] =
81 {{1, 0, 100, 30, 1, 0, 100, 10}, 81 {{0, 0, 0, 0, 0, 0, 0, 0},
82 {1, 0, 100, 30, 1, 0, 100, 10},
82 {1, 10, 0, 30, 0, 0, 0, 0}, 83 {1, 10, 0, 30, 0, 0, 0, 0},
83 {0, 0, 0, 0, 1, 10, 16, 31}, 84 {0, 0, 0, 0, 1, 10, 16, 31},
84 {1, 0, 11, 50, 1, 0, 100, 10}, 85 {1, 0, 11, 50, 1, 0, 100, 10},
@@ -4889,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4889 uint32_t i, size = 0; 4890 uint32_t i, size = 0;
4890 uint32_t len; 4891 uint32_t len;
4891 4892
4892 static const char *profile_name[6] = {"3D_FULL_SCREEN", 4893 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4894 "3D_FULL_SCREEN",
4893 "POWER_SAVING", 4895 "POWER_SAVING",
4894 "VIDEO", 4896 "VIDEO",
4895 "VR", 4897 "VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 79c86247d0ac..91e3bbe6d61d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -804,9 +804,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
804 804
805 hwmgr->backend = data; 805 hwmgr->backend = data;
806 806
807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
810 810
811 vega10_set_default_registry_data(hwmgr); 811 vega10_set_default_registry_data(hwmgr);
812 data->disable_dpm_mask = 0xff; 812 data->disable_dpm_mask = 0xff;
@@ -4668,13 +4668,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4668{ 4668{
4669 struct vega10_hwmgr *data = hwmgr->backend; 4669 struct vega10_hwmgr *data = hwmgr->backend;
4670 uint32_t i, size = 0; 4670 uint32_t i, size = 0;
4671 static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,}, 4671 static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
4672 {70, 60, 1, 3,},
4672 {90, 60, 0, 0,}, 4673 {90, 60, 0, 0,},
4673 {70, 60, 0, 0,}, 4674 {70, 60, 0, 0,},
4674 {70, 90, 0, 0,}, 4675 {70, 90, 0, 0,},
4675 {30, 60, 0, 6,}, 4676 {30, 60, 0, 6,},
4676 }; 4677 };
4677 static const char *profile_name[6] = {"3D_FULL_SCREEN", 4678 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4679 "3D_FULL_SCREEN",
4678 "POWER_SAVING", 4680 "POWER_SAVING",
4679 "VIDEO", 4681 "VIDEO",
4680 "VR", 4682 "VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 54364444ecd1..0c8212902275 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
753 return 0; 753 return 0;
754} 754}
755 755
756static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
757{
758 uint32_t result;
759
760 PP_ASSERT_WITH_CODE(
761 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
762 "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
763 return -EINVAL);
764
765 result = smum_get_argument(hwmgr);
766 PP_ASSERT_WITH_CODE(result == 1,
767 "Failed to run ACG BTC!", return -EINVAL);
768
769 return 0;
770}
771
756static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 772static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
757{ 773{
758 struct vega12_hwmgr *data = 774 struct vega12_hwmgr *data =
@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
931 "Failed to initialize SMC table!", 947 "Failed to initialize SMC table!",
932 result = tmp_result); 948 result = tmp_result);
933 949
950 tmp_result = vega12_run_acg_btc(hwmgr);
951 PP_ASSERT_WITH_CODE(!tmp_result,
952 "Failed to run ACG BTC!",
953 result = tmp_result);
954
934 result = vega12_enable_all_smu_features(hwmgr); 955 result = vega12_enable_all_smu_features(hwmgr);
935 PP_ASSERT_WITH_CODE(!result, 956 PP_ASSERT_WITH_CODE(!result,
936 "Failed to enable all smu features!", 957 "Failed to enable all smu features!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 26154f9b2178..82935a3bd950 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -390,9 +390,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
390 390
391 hwmgr->backend = data; 391 hwmgr->backend = data;
392 392
393 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 393 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
394 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 394 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
395 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 395 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
396 396
397 vega20_set_default_registry_data(hwmgr); 397 vega20_set_default_registry_data(hwmgr);
398 398
@@ -980,6 +980,9 @@ static int vega20_od8_set_feature_capabilities(
980 pp_table->FanZeroRpmEnable) 980 pp_table->FanZeroRpmEnable)
981 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 981 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
982 982
983 if (!od_settings->overdrive8_capabilities)
984 hwmgr->od_enabled = false;
985
983 return 0; 986 return 0;
984} 987}
985 988
@@ -1689,13 +1692,6 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
1689 (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1692 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1690 "Failed to set soft min memclk !", 1693 "Failed to set soft min memclk !",
1691 return ret); 1694 return ret);
1692
1693 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1694 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1695 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1696 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1697 "Failed to set hard min memclk !",
1698 return ret);
1699 } 1695 }
1700 1696
1701 if (data->smu_features[GNLD_DPM_UVD].enabled && 1697 if (data->smu_features[GNLD_DPM_UVD].enabled &&
@@ -2248,6 +2244,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2248 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2244 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2249 soft_max_level = mask ? (fls(mask) - 1) : 0; 2245 soft_max_level = mask ? (fls(mask) - 1) : 0;
2250 2246
2247 if (soft_max_level >= data->dpm_table.gfx_table.count) {
2248 pr_err("Clock level specified %d is over max allowed %d\n",
2249 soft_max_level,
2250 data->dpm_table.gfx_table.count - 1);
2251 return -EINVAL;
2252 }
2253
2251 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2254 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2252 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2255 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2253 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2256 data->dpm_table.gfx_table.dpm_state.soft_max_level =
@@ -2268,6 +2271,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2268 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2271 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2269 soft_max_level = mask ? (fls(mask) - 1) : 0; 2272 soft_max_level = mask ? (fls(mask) - 1) : 0;
2270 2273
2274 if (soft_max_level >= data->dpm_table.mem_table.count) {
2275 pr_err("Clock level specified %d is over max allowed %d\n",
2276 soft_max_level,
2277 data->dpm_table.mem_table.count - 1);
2278 return -EINVAL;
2279 }
2280
2271 data->dpm_table.mem_table.dpm_state.soft_min_level = 2281 data->dpm_table.mem_table.dpm_state.soft_min_level =
2272 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2282 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2273 data->dpm_table.mem_table.dpm_state.soft_max_level = 2283 data->dpm_table.mem_table.dpm_state.soft_max_level =
@@ -3261,6 +3271,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
3261 int pplib_workload = 0; 3271 int pplib_workload = 0;
3262 3272
3263 switch (power_profile) { 3273 switch (power_profile) {
3274 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3275 pplib_workload = WORKLOAD_DEFAULT_BIT;
3276 break;
3264 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3277 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3265 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3278 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3266 break; 3279 break;
@@ -3290,6 +3303,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3290 uint32_t i, size = 0; 3303 uint32_t i, size = 0;
3291 uint16_t workload_type = 0; 3304 uint16_t workload_type = 0;
3292 static const char *profile_name[] = { 3305 static const char *profile_name[] = {
3306 "BOOTUP_DEFAULT",
3293 "3D_FULL_SCREEN", 3307 "3D_FULL_SCREEN",
3294 "POWER_SAVING", 3308 "POWER_SAVING",
3295 "VIDEO", 3309 "VIDEO",
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 0d298a0409f5..8cb831b6a016 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -705,7 +705,7 @@ enum PP_TABLE_VERSION {
705/** 705/**
706 * The main hardware manager structure. 706 * The main hardware manager structure.
707 */ 707 */
708#define Workload_Policy_Max 5 708#define Workload_Policy_Max 6
709 709
710struct pp_hwmgr { 710struct pp_hwmgr {
711 void *adev; 711 void *adev;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8e28e738cb52..e6403b9549f1 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -98,6 +98,8 @@
98#define DP0_STARTVAL 0x064c 98#define DP0_STARTVAL 0x064c
99#define DP0_ACTIVEVAL 0x0650 99#define DP0_ACTIVEVAL 0x0650
100#define DP0_SYNCVAL 0x0654 100#define DP0_SYNCVAL 0x0654
101#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
102#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
101#define DP0_MISC 0x0658 103#define DP0_MISC 0x0658
102#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ 104#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
103#define BPC_6 (0 << 5) 105#define BPC_6 (0 << 5)
@@ -142,6 +144,8 @@
142#define DP0_LTLOOPCTRL 0x06d8 144#define DP0_LTLOOPCTRL 0x06d8
143#define DP0_SNKLTCTRL 0x06e4 145#define DP0_SNKLTCTRL 0x06e4
144 146
147#define DP1_SRCCTRL 0x07a0
148
145/* PHY */ 149/* PHY */
146#define DP_PHY_CTRL 0x0800 150#define DP_PHY_CTRL 0x0800
147#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ 151#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
@@ -150,6 +154,7 @@
150#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ 154#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
151#define PHY_RDY BIT(16) /* PHY Main Channels Ready */ 155#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
152#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ 156#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
157#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
153#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ 158#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
154#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ 159#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
155 160
@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
540 unsigned long rate; 545 unsigned long rate;
541 u32 value; 546 u32 value;
542 int ret; 547 int ret;
548 u32 dp_phy_ctrl;
543 549
544 rate = clk_get_rate(tc->refclk); 550 rate = clk_get_rate(tc->refclk);
545 switch (rate) { 551 switch (rate) {
@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
564 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; 570 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
565 tc_write(SYS_PLLPARAM, value); 571 tc_write(SYS_PLLPARAM, value);
566 572
567 tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); 573 dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
574 if (tc->link.base.num_lanes == 2)
575 dp_phy_ctrl |= PHY_2LANE;
576 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
568 577
569 /* 578 /*
570 * Initially PLLs are in bypass. Force PLL parameter update, 579 * Initially PLLs are in bypass. Force PLL parameter update,
@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
719 728
720 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); 729 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
721 730
722 tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); 731 tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
732 ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
733 ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
723 734
724 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | 735 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
725 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); 736 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
829 if (!tc->mode) 840 if (!tc->mode)
830 return -EINVAL; 841 return -EINVAL;
831 842
832 /* from excel file - DP0_SrcCtrl */ 843 tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
833 tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | 844 /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
834 DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | 845 tc_write(DP1_SRCCTRL,
835 DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); 846 (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
836 /* from excel file - DP1_SrcCtrl */ 847 ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
837 tc_write(0x07a0, 0x00003083);
838 848
839 rate = clk_get_rate(tc->refclk); 849 rate = clk_get_rate(tc->refclk);
840 switch (rate) { 850 switch (rate) {
@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
855 } 865 }
856 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; 866 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
857 tc_write(SYS_PLLPARAM, value); 867 tc_write(SYS_PLLPARAM, value);
868
858 /* Setup Main Link */ 869 /* Setup Main Link */
859 dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; 870 dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
871 if (tc->link.base.num_lanes == 2)
872 dp_phy_ctrl |= PHY_2LANE;
860 tc_write(DP_PHY_CTRL, dp_phy_ctrl); 873 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
861 msleep(100); 874 msleep(100);
862 875
@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1105static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, 1118static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
1106 struct drm_display_mode *mode) 1119 struct drm_display_mode *mode)
1107{ 1120{
1121 struct tc_data *tc = connector_to_tc(connector);
1122 u32 req, avail;
1123 u32 bits_per_pixel = 24;
1124
1108 /* DPI interface clock limitation: upto 154 MHz */ 1125 /* DPI interface clock limitation: upto 154 MHz */
1109 if (mode->clock > 154000) 1126 if (mode->clock > 154000)
1110 return MODE_CLOCK_HIGH; 1127 return MODE_CLOCK_HIGH;
1111 1128
1129 req = mode->clock * bits_per_pixel / 8;
1130 avail = tc->link.base.num_lanes * tc->link.base.rate;
1131
1132 if (req > avail)
1133 return MODE_BAD;
1134
1112 return MODE_OK; 1135 return MODE_OK;
1113} 1136}
1114 1137
@@ -1186,7 +1209,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
1186 /* Create eDP connector */ 1209 /* Create eDP connector */
1187 drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); 1210 drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
1188 ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, 1211 ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
1189 DRM_MODE_CONNECTOR_eDP); 1212 tc->panel ? DRM_MODE_CONNECTOR_eDP :
1213 DRM_MODE_CONNECTOR_DisplayPort);
1190 if (ret) 1214 if (ret)
1191 return ret; 1215 return ret;
1192 1216
@@ -1195,6 +1219,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
1195 1219
1196 drm_display_info_set_bus_formats(&tc->connector.display_info, 1220 drm_display_info_set_bus_formats(&tc->connector.display_info,
1197 &bus_format, 1); 1221 &bus_format, 1);
1222 tc->connector.display_info.bus_flags =
1223 DRM_BUS_FLAG_DE_HIGH |
1224 DRM_BUS_FLAG_PIXDATA_NEGEDGE |
1225 DRM_BUS_FLAG_SYNC_NEGEDGE;
1198 drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); 1226 drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
1199 1227
1200 return 0; 1228 return 0;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c40889888a16..9a1f41adfc67 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1296,12 +1296,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1296 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 1296 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1297 return -EINVAL; 1297 return -EINVAL;
1298 1298
1299 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1300
1301 state = drm_atomic_state_alloc(dev); 1299 state = drm_atomic_state_alloc(dev);
1302 if (!state) 1300 if (!state)
1303 return -ENOMEM; 1301 return -ENOMEM;
1304 1302
1303 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1305 state->acquire_ctx = &ctx; 1304 state->acquire_ctx = &ctx;
1306 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1305 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1307 1306
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 2d6c491a0542..516e82d0ed50 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1273,6 +1273,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
1273 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1273 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
1274 /* LG LP140WF6-SPM1 eDP panel */ 1274 /* LG LP140WF6-SPM1 eDP panel */
1275 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1275 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
1276 /* Apple panels need some additional handling to support PSR */
1277 { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
1276}; 1278};
1277 1279
1278#undef OUI 1280#undef OUI
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d3af098b0922..d73703a695e8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1621,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
1621 var_1->transp.msb_right == var_2->transp.msb_right; 1621 var_1->transp.msb_right == var_2->transp.msb_right;
1622} 1622}
1623 1623
1624static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
1625 u8 depth)
1626{
1627 switch (depth) {
1628 case 8:
1629 var->red.offset = 0;
1630 var->green.offset = 0;
1631 var->blue.offset = 0;
1632 var->red.length = 8; /* 8bit DAC */
1633 var->green.length = 8;
1634 var->blue.length = 8;
1635 var->transp.offset = 0;
1636 var->transp.length = 0;
1637 break;
1638 case 15:
1639 var->red.offset = 10;
1640 var->green.offset = 5;
1641 var->blue.offset = 0;
1642 var->red.length = 5;
1643 var->green.length = 5;
1644 var->blue.length = 5;
1645 var->transp.offset = 15;
1646 var->transp.length = 1;
1647 break;
1648 case 16:
1649 var->red.offset = 11;
1650 var->green.offset = 5;
1651 var->blue.offset = 0;
1652 var->red.length = 5;
1653 var->green.length = 6;
1654 var->blue.length = 5;
1655 var->transp.offset = 0;
1656 break;
1657 case 24:
1658 var->red.offset = 16;
1659 var->green.offset = 8;
1660 var->blue.offset = 0;
1661 var->red.length = 8;
1662 var->green.length = 8;
1663 var->blue.length = 8;
1664 var->transp.offset = 0;
1665 var->transp.length = 0;
1666 break;
1667 case 32:
1668 var->red.offset = 16;
1669 var->green.offset = 8;
1670 var->blue.offset = 0;
1671 var->red.length = 8;
1672 var->green.length = 8;
1673 var->blue.length = 8;
1674 var->transp.offset = 24;
1675 var->transp.length = 8;
1676 break;
1677 default:
1678 break;
1679 }
1680}
1681
1624/** 1682/**
1625 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var 1683 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
1626 * @var: screeninfo to check 1684 * @var: screeninfo to check
@@ -1632,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1632 struct drm_fb_helper *fb_helper = info->par; 1690 struct drm_fb_helper *fb_helper = info->par;
1633 struct drm_framebuffer *fb = fb_helper->fb; 1691 struct drm_framebuffer *fb = fb_helper->fb;
1634 1692
1635 if (var->pixclock != 0 || in_dbg_master()) 1693 if (in_dbg_master())
1636 return -EINVAL; 1694 return -EINVAL;
1637 1695
1696 if (var->pixclock != 0) {
1697 DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
1698 var->pixclock = 0;
1699 }
1700
1638 if ((drm_format_info_block_width(fb->format, 0) > 1) || 1701 if ((drm_format_info_block_width(fb->format, 0) > 1) ||
1639 (drm_format_info_block_height(fb->format, 0) > 1)) 1702 (drm_format_info_block_height(fb->format, 0) > 1))
1640 return -EINVAL; 1703 return -EINVAL;
@@ -1655,6 +1718,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1655 } 1718 }
1656 1719
1657 /* 1720 /*
1721 * Workaround for SDL 1.2, which is known to be setting all pixel format
1722 * fields values to zero in some cases. We treat this situation as a
1723 * kind of "use some reasonable autodetected values".
1724 */
1725 if (!var->red.offset && !var->green.offset &&
1726 !var->blue.offset && !var->transp.offset &&
1727 !var->red.length && !var->green.length &&
1728 !var->blue.length && !var->transp.length &&
1729 !var->red.msb_right && !var->green.msb_right &&
1730 !var->blue.msb_right && !var->transp.msb_right) {
1731 drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
1732 }
1733
1734 /*
1658 * drm fbdev emulation doesn't support changing the pixel format at all, 1735 * drm fbdev emulation doesn't support changing the pixel format at all,
1659 * so reject all pixel format changing requests. 1736 * so reject all pixel format changing requests.
1660 */ 1737 */
@@ -1967,59 +2044,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
1967 info->var.yoffset = 0; 2044 info->var.yoffset = 0;
1968 info->var.activate = FB_ACTIVATE_NOW; 2045 info->var.activate = FB_ACTIVATE_NOW;
1969 2046
1970 switch (fb->format->depth) { 2047 drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
1971 case 8:
1972 info->var.red.offset = 0;
1973 info->var.green.offset = 0;
1974 info->var.blue.offset = 0;
1975 info->var.red.length = 8; /* 8bit DAC */
1976 info->var.green.length = 8;
1977 info->var.blue.length = 8;
1978 info->var.transp.offset = 0;
1979 info->var.transp.length = 0;
1980 break;
1981 case 15:
1982 info->var.red.offset = 10;
1983 info->var.green.offset = 5;
1984 info->var.blue.offset = 0;
1985 info->var.red.length = 5;
1986 info->var.green.length = 5;
1987 info->var.blue.length = 5;
1988 info->var.transp.offset = 15;
1989 info->var.transp.length = 1;
1990 break;
1991 case 16:
1992 info->var.red.offset = 11;
1993 info->var.green.offset = 5;
1994 info->var.blue.offset = 0;
1995 info->var.red.length = 5;
1996 info->var.green.length = 6;
1997 info->var.blue.length = 5;
1998 info->var.transp.offset = 0;
1999 break;
2000 case 24:
2001 info->var.red.offset = 16;
2002 info->var.green.offset = 8;
2003 info->var.blue.offset = 0;
2004 info->var.red.length = 8;
2005 info->var.green.length = 8;
2006 info->var.blue.length = 8;
2007 info->var.transp.offset = 0;
2008 info->var.transp.length = 0;
2009 break;
2010 case 32:
2011 info->var.red.offset = 16;
2012 info->var.green.offset = 8;
2013 info->var.blue.offset = 0;
2014 info->var.red.length = 8;
2015 info->var.green.length = 8;
2016 info->var.blue.length = 8;
2017 info->var.transp.offset = 24;
2018 info->var.transp.length = 8;
2019 break;
2020 default:
2021 break;
2022 }
2023 2048
2024 info->var.xres = fb_width; 2049 info->var.xres = fb_width;
2025 info->var.yres = fb_height; 2050 info->var.yres = fb_height;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index cd9bc0ce9be0..004191d01772 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -459,11 +459,11 @@ static int set_property_atomic(struct drm_mode_object *obj,
459 struct drm_modeset_acquire_ctx ctx; 459 struct drm_modeset_acquire_ctx ctx;
460 int ret; 460 int ret;
461 461
462 drm_modeset_acquire_init(&ctx, 0);
463
464 state = drm_atomic_state_alloc(dev); 462 state = drm_atomic_state_alloc(dev);
465 if (!state) 463 if (!state)
466 return -ENOMEM; 464 return -ENOMEM;
465
466 drm_modeset_acquire_init(&ctx, 0);
467 state->acquire_ctx = &ctx; 467 state->acquire_ctx = &ctx;
468retry: 468retry:
469 if (prop == state->dev->mode_config.dpms_property) { 469 if (prop == state->dev->mode_config.dpms_property) {
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a9d9df6c85ad..693748ad8b88 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
61 return NULL; 61 return NULL;
62 62
63 dmah->size = size; 63 dmah->size = size;
64 dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, 64 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
65 GFP_KERNEL | __GFP_COMP); 65 &dmah->busaddr,
66 GFP_KERNEL | __GFP_COMP);
66 67
67 if (dmah->vaddr == NULL) { 68 if (dmah->vaddr == NULL) {
68 kfree(dmah); 69 kfree(dmah);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 38dcee1ca062..40a61ef9aac1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
984 intel_runtime_pm_get(i915); 984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915); 985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915); 986 intel_runtime_pm_put(i915);
987 if (!gpu) 987 if (IS_ERR(gpu))
988 return -ENOMEM; 988 return PTR_ERR(gpu);
989 989
990 file->private_data = gpu; 990 file->private_data = gpu;
991 return 0; 991 return 0;
@@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp,
1018 1018
1019static int i915_error_state_open(struct inode *inode, struct file *file) 1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{ 1020{
1021 file->private_data = i915_first_error_state(inode->i_private); 1021 struct i915_gpu_state *error;
1022
1023 error = i915_first_error_state(inode->i_private);
1024 if (IS_ERR(error))
1025 return PTR_ERR(error);
1026
1027 file->private_data = error;
1022 return 0; 1028 return 0;
1023} 1029}
1024 1030
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index add1fe7aeb93..bd17dd1f5da5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
2075int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) 2075int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2076{ 2076{
2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2078 int err;
2078 2079
2079 /* 2080 /*
2080 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt 2081 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2090 * allocator works in address space sizes, so it's multiplied by page 2091 * allocator works in address space sizes, so it's multiplied by page
2091 * size. We allocate at the top of the GTT to avoid fragmentation. 2092 * size. We allocate at the top of the GTT to avoid fragmentation.
2092 */ 2093 */
2093 return i915_vma_pin(ppgtt->vma, 2094 err = i915_vma_pin(ppgtt->vma,
2094 0, GEN6_PD_ALIGN, 2095 0, GEN6_PD_ALIGN,
2095 PIN_GLOBAL | PIN_HIGH); 2096 PIN_GLOBAL | PIN_HIGH);
2097 if (err)
2098 goto unpin;
2099
2100 return 0;
2101
2102unpin:
2103 ppgtt->pin_count = 0;
2104 return err;
2096} 2105}
2097 2106
2098void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) 2107void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 07465123c166..3f9ce403c755 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
1907{ 1907{
1908 struct i915_gpu_state *error; 1908 struct i915_gpu_state *error;
1909 1909
1910 /* Check if GPU capture has been disabled */
1911 error = READ_ONCE(i915->gpu_error.first_error);
1912 if (IS_ERR(error))
1913 return error;
1914
1910 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1915 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1911 if (!error) 1916 if (!error) {
1912 return NULL; 1917 i915_disable_error_state(i915, -ENOMEM);
1918 return ERR_PTR(-ENOMEM);
1919 }
1913 1920
1914 kref_init(&error->ref); 1921 kref_init(&error->ref);
1915 error->i915 = i915; 1922 error->i915 = i915;
@@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1945 return; 1952 return;
1946 1953
1947 error = i915_capture_gpu_state(i915); 1954 error = i915_capture_gpu_state(i915);
1948 if (!error) { 1955 if (IS_ERR(error))
1949 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1950 i915_disable_error_state(i915, -ENOMEM);
1951 return; 1956 return;
1952 }
1953 1957
1954 i915_error_capture_msg(i915, error, engine_mask, error_msg); 1958 i915_error_capture_msg(i915, error, engine_mask, error_msg);
1955 DRM_INFO("%s\n", error->error_msg); 1959 DRM_INFO("%s\n", error->error_msg);
@@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915)
1987 1991
1988 spin_lock_irq(&i915->gpu_error.lock); 1992 spin_lock_irq(&i915->gpu_error.lock);
1989 error = i915->gpu_error.first_error; 1993 error = i915->gpu_error.first_error;
1990 if (error) 1994 if (!IS_ERR_OR_NULL(error))
1991 i915_gpu_state_get(error); 1995 i915_gpu_state_get(error);
1992 spin_unlock_irq(&i915->gpu_error.lock); 1996 spin_unlock_irq(&i915->gpu_error.lock);
1993 1997
@@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
2000 2004
2001 spin_lock_irq(&i915->gpu_error.lock); 2005 spin_lock_irq(&i915->gpu_error.lock);
2002 error = i915->gpu_error.first_error; 2006 error = i915->gpu_error.first_error;
2003 i915->gpu_error.first_error = NULL; 2007 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2008 i915->gpu_error.first_error = NULL;
2004 spin_unlock_irq(&i915->gpu_error.lock); 2009 spin_unlock_irq(&i915->gpu_error.lock);
2005 2010
2006 if (!IS_ERR(error)) 2011 if (!IS_ERR_OR_NULL(error))
2007 i915_gpu_state_put(error); 2012 i915_gpu_state_put(error);
2008} 2013}
2009 2014
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 535caebd9813..c0cfe7ae2ba5 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
521 ssize_t ret; 521 ssize_t ret;
522 522
523 gpu = i915_first_error_state(i915); 523 gpu = i915_first_error_state(i915);
524 if (gpu) { 524 if (IS_ERR(gpu)) {
525 ret = PTR_ERR(gpu);
526 } else if (gpu) {
525 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); 527 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
526 i915_gpu_state_put(gpu); 528 i915_gpu_state_put(gpu);
527 } else { 529 } else {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4be167dcd209..eab9341a5152 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
303 */ 303 */
304 if (!(prio & I915_PRIORITY_NEWCLIENT)) { 304 if (!(prio & I915_PRIORITY_NEWCLIENT)) {
305 prio |= I915_PRIORITY_NEWCLIENT; 305 prio |= I915_PRIORITY_NEWCLIENT;
306 active->sched.attr.priority = prio;
306 list_move_tail(&active->sched.link, 307 list_move_tail(&active->sched.link,
307 i915_sched_lookup_priolist(engine, prio)); 308 i915_sched_lookup_priolist(engine, prio));
308 } 309 }
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
645 int i; 646 int i;
646 647
647 priolist_for_each_request_consume(rq, rn, p, i) { 648 priolist_for_each_request_consume(rq, rn, p, i) {
649 GEM_BUG_ON(last &&
650 need_preempt(engine, last, rq_prio(rq)));
651
648 /* 652 /*
649 * Can we combine this request with the current port? 653 * Can we combine this request with the current port?
650 * It has to be the same context/ringbuffer and not 654 * It has to be the same context/ringbuffer and not
@@ -2244,6 +2248,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
2244 if (ret) 2248 if (ret)
2245 return ret; 2249 return ret;
2246 2250
2251 intel_engine_init_workarounds(engine);
2252
2247 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2253 if (HAS_LOGICAL_RING_ELSQ(i915)) {
2248 execlists->submit_reg = i915->regs + 2254 execlists->submit_reg = i915->regs +
2249 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); 2255 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
@@ -2310,7 +2316,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
2310 } 2316 }
2311 2317
2312 intel_engine_init_whitelist(engine); 2318 intel_engine_init_whitelist(engine);
2313 intel_engine_init_workarounds(engine);
2314 2319
2315 return 0; 2320 return 0;
2316} 2321}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 419e56342523..f71970df9936 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
275 intel_dp->psr_dpcd[0]); 275 intel_dp->psr_dpcd[0]);
276 276
277 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
278 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
279 return;
280 }
281
277 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 282 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
278 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 283 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
279 return; 284 return;
280 } 285 }
286
281 dev_priv->psr.sink_support = true; 287 dev_priv->psr.sink_support = true;
282 dev_priv->psr.sink_sync_latency = 288 dev_priv->psr.sink_sync_latency =
283 intel_dp_get_sink_sync_latency(intel_dp); 289 intel_dp_get_sink_sync_latency(intel_dp);
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 75d97f1b2e8f..4f5c67f70c4d 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -46,7 +46,6 @@ struct meson_crtc {
46 struct drm_crtc base; 46 struct drm_crtc base;
47 struct drm_pending_vblank_event *event; 47 struct drm_pending_vblank_event *event;
48 struct meson_drm *priv; 48 struct meson_drm *priv;
49 bool enabled;
50}; 49};
51#define to_meson_crtc(x) container_of(x, struct meson_crtc, base) 50#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
52 51
@@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
82 81
83}; 82};
84 83
85static void meson_crtc_enable(struct drm_crtc *crtc) 84static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
85 struct drm_crtc_state *old_state)
86{ 86{
87 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 87 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
88 struct drm_crtc_state *crtc_state = crtc->state; 88 struct drm_crtc_state *crtc_state = crtc->state;
@@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
108 108
109 drm_crtc_vblank_on(crtc); 109 drm_crtc_vblank_on(crtc);
110 110
111 meson_crtc->enabled = true;
112}
113
114static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
115 struct drm_crtc_state *old_state)
116{
117 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
118 struct meson_drm *priv = meson_crtc->priv;
119
120 DRM_DEBUG_DRIVER("\n");
121
122 if (!meson_crtc->enabled)
123 meson_crtc_enable(crtc);
124
125 priv->viu.osd1_enabled = true; 111 priv->viu.osd1_enabled = true;
126} 112}
127 113
@@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
153 139
154 crtc->state->event = NULL; 140 crtc->state->event = NULL;
155 } 141 }
156
157 meson_crtc->enabled = false;
158} 142}
159 143
160static void meson_crtc_atomic_begin(struct drm_crtc *crtc, 144static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
163 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 147 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
164 unsigned long flags; 148 unsigned long flags;
165 149
166 if (crtc->state->enable && !meson_crtc->enabled)
167 meson_crtc_enable(crtc);
168
169 if (crtc->state->event) { 150 if (crtc->state->event) {
170 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 151 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
171 152
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 3ee4d4a4ecba..12ff47b13668 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
75 .fb_create = drm_gem_fb_create, 75 .fb_create = drm_gem_fb_create,
76}; 76};
77 77
78static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
79 .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
80};
81
78static irqreturn_t meson_irq(int irq, void *arg) 82static irqreturn_t meson_irq(int irq, void *arg)
79{ 83{
80 struct drm_device *dev = arg; 84 struct drm_device *dev = arg;
@@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
266 drm->mode_config.max_width = 3840; 270 drm->mode_config.max_width = 3840;
267 drm->mode_config.max_height = 2160; 271 drm->mode_config.max_height = 2160;
268 drm->mode_config.funcs = &meson_mode_config_funcs; 272 drm->mode_config.funcs = &meson_mode_config_funcs;
273 drm->mode_config.helper_private = &meson_mode_config_helpers;
269 274
270 /* Hardware Initialization */ 275 /* Hardware Initialization */
271 276
@@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev,
388 remote_node = of_graph_get_remote_port_parent(ep); 393 remote_node = of_graph_get_remote_port_parent(ep);
389 if (!remote_node || 394 if (!remote_node ||
390 remote_node == parent || /* Ignore parent endpoint */ 395 remote_node == parent || /* Ignore parent endpoint */
391 !of_device_is_available(remote_node)) 396 !of_device_is_available(remote_node)) {
397 of_node_put(remote_node);
392 continue; 398 continue;
399 }
393 400
394 count += meson_probe_remote(pdev, match, remote, remote_node); 401 count += meson_probe_remote(pdev, match, remote, remote_node);
395 402
@@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev)
408 415
409 for_each_endpoint_of_node(np, ep) { 416 for_each_endpoint_of_node(np, ep) {
410 remote = of_graph_get_remote_port_parent(ep); 417 remote = of_graph_get_remote_port_parent(ep);
411 if (!remote || !of_device_is_available(remote)) 418 if (!remote || !of_device_is_available(remote)) {
419 of_node_put(remote);
412 continue; 420 continue;
421 }
413 422
414 count += meson_probe_remote(pdev, &match, np, remote); 423 count += meson_probe_remote(pdev, &match, np, remote);
424 of_node_put(remote);
415 } 425 }
416 426
417 if (count && !match) 427 if (count && !match)
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 5f5be6368aed..c7a94c94dbf3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -253,6 +253,9 @@ nouveau_backlight_init(struct drm_connector *connector)
253 case NV_DEVICE_INFO_V0_FERMI: 253 case NV_DEVICE_INFO_V0_FERMI:
254 case NV_DEVICE_INFO_V0_KEPLER: 254 case NV_DEVICE_INFO_V0_KEPLER:
255 case NV_DEVICE_INFO_V0_MAXWELL: 255 case NV_DEVICE_INFO_V0_MAXWELL:
256 case NV_DEVICE_INFO_V0_PASCAL:
257 case NV_DEVICE_INFO_V0_VOLTA:
258 case NV_DEVICE_INFO_V0_TURING:
256 ret = nv50_backlight_init(nv_encoder, &props, &ops); 259 ret = nv50_backlight_init(nv_encoder, &props, &ops);
257 break; 260 break;
258 default: 261 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index bfbc9341e0c2..d9edb5785813 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2435,6 +2435,38 @@ nv140_chipset = {
2435}; 2435};
2436 2436
2437static const struct nvkm_device_chip 2437static const struct nvkm_device_chip
2438nv162_chipset = {
2439 .name = "TU102",
2440 .bar = tu104_bar_new,
2441 .bios = nvkm_bios_new,
2442 .bus = gf100_bus_new,
2443 .devinit = tu104_devinit_new,
2444 .fault = tu104_fault_new,
2445 .fb = gv100_fb_new,
2446 .fuse = gm107_fuse_new,
2447 .gpio = gk104_gpio_new,
2448 .i2c = gm200_i2c_new,
2449 .ibus = gm200_ibus_new,
2450 .imem = nv50_instmem_new,
2451 .ltc = gp102_ltc_new,
2452 .mc = tu104_mc_new,
2453 .mmu = tu104_mmu_new,
2454 .pci = gp100_pci_new,
2455 .pmu = gp102_pmu_new,
2456 .therm = gp100_therm_new,
2457 .timer = gk20a_timer_new,
2458 .top = gk104_top_new,
2459 .ce[0] = tu104_ce_new,
2460 .ce[1] = tu104_ce_new,
2461 .ce[2] = tu104_ce_new,
2462 .ce[3] = tu104_ce_new,
2463 .ce[4] = tu104_ce_new,
2464 .disp = tu104_disp_new,
2465 .dma = gv100_dma_new,
2466 .fifo = tu104_fifo_new,
2467};
2468
2469static const struct nvkm_device_chip
2438nv164_chipset = { 2470nv164_chipset = {
2439 .name = "TU104", 2471 .name = "TU104",
2440 .bar = tu104_bar_new, 2472 .bar = tu104_bar_new,
@@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2950 case 0x138: device->chip = &nv138_chipset; break; 2982 case 0x138: device->chip = &nv138_chipset; break;
2951 case 0x13b: device->chip = &nv13b_chipset; break; 2983 case 0x13b: device->chip = &nv13b_chipset; break;
2952 case 0x140: device->chip = &nv140_chipset; break; 2984 case 0x140: device->chip = &nv140_chipset; break;
2985 case 0x162: device->chip = &nv162_chipset; break;
2953 case 0x164: device->chip = &nv164_chipset; break; 2986 case 0x164: device->chip = &nv164_chipset; break;
2954 case 0x166: device->chip = &nv166_chipset; break; 2987 case 0x166: device->chip = &nv166_chipset; break;
2955 default: 2988 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 816ccaedfc73..8675613e142b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -22,6 +22,7 @@
22#include <engine/falcon.h> 22#include <engine/falcon.h>
23 23
24#include <core/gpuobj.h> 24#include <core/gpuobj.h>
25#include <subdev/mc.h>
25#include <subdev/timer.h> 26#include <subdev/timer.h>
26#include <engine/fifo.h> 27#include <engine/fifo.h>
27 28
@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
107 } 108 }
108 } 109 }
109 110
110 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); 111 if (nvkm_mc_enabled(device, engine->subdev.index)) {
111 nvkm_wr32(device, base + 0x014, 0xffffffff); 112 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
113 nvkm_wr32(device, base + 0x014, 0xffffffff);
114 }
112 return 0; 115 return 0;
113} 116}
114 117
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 3695cde669f8..07914e36939e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
132 duty = nvkm_therm_update_linear(therm); 132 duty = nvkm_therm_update_linear(therm);
133 break; 133 break;
134 case NVBIOS_THERM_FAN_OTHER: 134 case NVBIOS_THERM_FAN_OTHER:
135 if (therm->cstate) 135 if (therm->cstate) {
136 duty = therm->cstate; 136 duty = therm->cstate;
137 else 137 poll = false;
138 } else {
138 duty = nvkm_therm_update_linear_fallback(therm); 139 duty = nvkm_therm_update_linear_fallback(therm);
139 poll = false; 140 }
140 break; 141 break;
141 } 142 }
142 immd = false; 143 immd = false;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 13c8a662f9b4..ccb090f3ab30 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = {
250#if defined(CONFIG_DEBUG_FS) 250#if defined(CONFIG_DEBUG_FS)
251 .debugfs_init = qxl_debugfs_init, 251 .debugfs_init = qxl_debugfs_init,
252#endif 252#endif
253 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
254 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
255 .gem_prime_export = drm_gem_prime_export, 253 .gem_prime_export = drm_gem_prime_export,
256 .gem_prime_import = drm_gem_prime_import, 254 .gem_prime_import = drm_gem_prime_import,
257 .gem_prime_pin = qxl_gem_prime_pin, 255 .gem_prime_pin = qxl_gem_prime_pin,
258 .gem_prime_unpin = qxl_gem_prime_unpin, 256 .gem_prime_unpin = qxl_gem_prime_unpin,
259 .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
260 .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
261 .gem_prime_vmap = qxl_gem_prime_vmap, 257 .gem_prime_vmap = qxl_gem_prime_vmap,
262 .gem_prime_vunmap = qxl_gem_prime_vunmap, 258 .gem_prime_vunmap = qxl_gem_prime_vunmap,
263 .gem_prime_mmap = qxl_gem_prime_mmap, 259 .gem_prime_mmap = qxl_gem_prime_mmap,
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index a55dece118b2..df65d3c1a7b8 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
38 WARN_ONCE(1, "not implemented"); 38 WARN_ONCE(1, "not implemented");
39} 39}
40 40
41struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
42{
43 WARN_ONCE(1, "not implemented");
44 return ERR_PTR(-ENOSYS);
45}
46
47struct drm_gem_object *qxl_gem_prime_import_sg_table(
48 struct drm_device *dev, struct dma_buf_attachment *attach,
49 struct sg_table *table)
50{
51 WARN_ONCE(1, "not implemented");
52 return ERR_PTR(-ENOSYS);
53}
54
55void *qxl_gem_prime_vmap(struct drm_gem_object *obj) 41void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
56{ 42{
57 WARN_ONCE(1, "not implemented"); 43 WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 96ac1458a59c..37f93022a106 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
113 child_count++; 113 child_count++;
114 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, 114 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
115 &panel, &bridge); 115 &panel, &bridge);
116 if (!ret) 116 if (!ret) {
117 of_node_put(endpoint);
117 break; 118 break;
119 }
118 } 120 }
119 121
120 of_node_put(port); 122 of_node_put(port);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 9e9255ee59cd..a021bab11a4f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
786 remote = of_graph_get_remote_port_parent(ep); 786 remote = of_graph_get_remote_port_parent(ep);
787 if (!remote) 787 if (!remote)
788 continue; 788 continue;
789 of_node_put(remote);
789 790
790 /* does this node match any registered engines? */ 791 /* does this node match any registered engines? */
791 list_for_each_entry(frontend, &drv->frontend_list, list) { 792 list_for_each_entry(frontend, &drv->frontend_list, list) {
792 if (remote == frontend->node) { 793 if (remote == frontend->node) {
793 of_node_put(remote);
794 of_node_put(port); 794 of_node_put(port);
795 of_node_put(ep);
795 return frontend; 796 return frontend;
796 } 797 }
797 } 798 }
798 } 799 }
799 800 of_node_put(port);
800 return ERR_PTR(-EINVAL); 801 return ERR_PTR(-EINVAL);
801} 802}
802 803
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index f7f32a885af7..2d1aaca49105 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -127,14 +127,10 @@ static struct drm_driver driver = {
127#if defined(CONFIG_DEBUG_FS) 127#if defined(CONFIG_DEBUG_FS)
128 .debugfs_init = virtio_gpu_debugfs_init, 128 .debugfs_init = virtio_gpu_debugfs_init,
129#endif 129#endif
130 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
131 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
132 .gem_prime_export = drm_gem_prime_export, 130 .gem_prime_export = drm_gem_prime_export,
133 .gem_prime_import = drm_gem_prime_import, 131 .gem_prime_import = drm_gem_prime_import,
134 .gem_prime_pin = virtgpu_gem_prime_pin, 132 .gem_prime_pin = virtgpu_gem_prime_pin,
135 .gem_prime_unpin = virtgpu_gem_prime_unpin, 133 .gem_prime_unpin = virtgpu_gem_prime_unpin,
136 .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
137 .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
138 .gem_prime_vmap = virtgpu_gem_prime_vmap, 134 .gem_prime_vmap = virtgpu_gem_prime_vmap,
139 .gem_prime_vunmap = virtgpu_gem_prime_vunmap, 135 .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
140 .gem_prime_mmap = virtgpu_gem_prime_mmap, 136 .gem_prime_mmap = virtgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 1deb41d42ea4..0c15000f926e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
372/* virtgpu_prime.c */ 372/* virtgpu_prime.c */
373int virtgpu_gem_prime_pin(struct drm_gem_object *obj); 373int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
374void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); 374void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
375struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
376struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
377 struct drm_device *dev, struct dma_buf_attachment *attach,
378 struct sg_table *sgt);
379void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); 375void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
380void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 376void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
381int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, 377int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 86ce0ae93f59..c59ec34c80a5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
39 WARN_ONCE(1, "not implemented"); 39 WARN_ONCE(1, "not implemented");
40} 40}
41 41
42struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
43{
44 WARN_ONCE(1, "not implemented");
45 return ERR_PTR(-ENODEV);
46}
47
48struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
49 struct drm_device *dev, struct dma_buf_attachment *attach,
50 struct sg_table *table)
51{
52 WARN_ONCE(1, "not implemented");
53 return ERR_PTR(-ENODEV);
54}
55
56void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) 42void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
57{ 43{
58 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 44 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index b677e5d524e6..d5f1d8e1c6f8 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
21 bool "Laptop Hybrid Graphics - GPU switching support" 21 bool "Laptop Hybrid Graphics - GPU switching support"
22 depends on X86 22 depends on X86
23 depends on ACPI 23 depends on ACPI
24 depends on PCI
24 select VGA_ARB 25 select VGA_ARB
25 help 26 help
26 Many laptops released in 2008/9/10 have two GPUs with a multiplexer 27 Many laptops released in 2008/9/10 have two GPUs with a multiplexer
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 0e30fa00204c..f9b8e3e23a8e 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
393 } 393 }
394 394
395 rv = lm80_read_value(client, LM80_REG_FANDIV); 395 rv = lm80_read_value(client, LM80_REG_FANDIV);
396 if (rv < 0) 396 if (rv < 0) {
397 mutex_unlock(&data->update_lock);
397 return rv; 398 return rv;
399 }
398 reg = (rv & ~(3 << (2 * (nr + 1)))) 400 reg = (rv & ~(3 << (2 * (nr + 1))))
399 | (data->fan_div[nr] << (2 * (nr + 1))); 401 | (data->fan_div[nr] << (2 * (nr + 1)));
400 lm80_write_value(client, LM80_REG_FANDIV, reg); 402 lm80_write_value(client, LM80_REG_FANDIV, reg);
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c3040079b1cb..4adec4ab7d06 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -44,8 +44,8 @@
44 * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 44 * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
45 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 45 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
46 * (0xd451) 46 * (0xd451)
47 * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 47 * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
48 * (0xd459) 48 * (0xd429)
49 * 49 *
50 * #temp lists the number of monitored temperature sources (first value) plus 50 * #temp lists the number of monitored temperature sources (first value) plus
51 * the number of directly connectable temperature sensors (second value). 51 * the number of directly connectable temperature sensors (second value).
@@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
138#define SIO_NCT6795_ID 0xd350 138#define SIO_NCT6795_ID 0xd350
139#define SIO_NCT6796_ID 0xd420 139#define SIO_NCT6796_ID 0xd420
140#define SIO_NCT6797_ID 0xd450 140#define SIO_NCT6797_ID 0xd450
141#define SIO_NCT6798_ID 0xd458 141#define SIO_NCT6798_ID 0xd428
142#define SIO_ID_MASK 0xFFF8 142#define SIO_ID_MASK 0xFFF8
143 143
144enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; 144enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -4508,7 +4508,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
4508 4508
4509 if (data->kind == nct6791 || data->kind == nct6792 || 4509 if (data->kind == nct6791 || data->kind == nct6792 ||
4510 data->kind == nct6793 || data->kind == nct6795 || 4510 data->kind == nct6793 || data->kind == nct6795 ||
4511 data->kind == nct6796) 4511 data->kind == nct6796 || data->kind == nct6797 ||
4512 data->kind == nct6798)
4512 nct6791_enable_io_mapping(sioreg); 4513 nct6791_enable_io_mapping(sioreg);
4513 4514
4514 superio_exit(sioreg); 4515 superio_exit(sioreg);
@@ -4644,7 +4645,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4644 4645
4645 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || 4646 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
4646 sio_data->kind == nct6793 || sio_data->kind == nct6795 || 4647 sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
4647 sio_data->kind == nct6796) 4648 sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
4649 sio_data->kind == nct6798)
4648 nct6791_enable_io_mapping(sioaddr); 4650 nct6791_enable_io_mapping(sioaddr);
4649 4651
4650 superio_exit(sioaddr); 4652 superio_exit(sioaddr);
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 423903f87955..391118c8aae8 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev,
380 val *= 1000000ULL; 380 val *= 1000000ULL;
381 break; 381 break;
382 case 2: 382 case 2:
383 val = get_unaligned_be32(&power->update_tag) * 383 val = (u64)get_unaligned_be32(&power->update_tag) *
384 occ->powr_sample_time_us; 384 occ->powr_sample_time_us;
385 break; 385 break;
386 case 3: 386 case 3:
387 val = get_unaligned_be16(&power->value) * 1000000ULL; 387 val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev,
425 &power->update_tag); 425 &power->update_tag);
426 break; 426 break;
427 case 2: 427 case 2:
428 val = get_unaligned_be32(&power->update_tag) * 428 val = (u64)get_unaligned_be32(&power->update_tag) *
429 occ->powr_sample_time_us; 429 occ->powr_sample_time_us;
430 break; 430 break;
431 case 3: 431 case 3:
432 val = get_unaligned_be16(&power->value) * 1000000ULL; 432 val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
463 &power->system.update_tag); 463 &power->system.update_tag);
464 break; 464 break;
465 case 2: 465 case 2:
466 val = get_unaligned_be32(&power->system.update_tag) * 466 val = (u64)get_unaligned_be32(&power->system.update_tag) *
467 occ->powr_sample_time_us; 467 occ->powr_sample_time_us;
468 break; 468 break;
469 case 3: 469 case 3:
470 val = get_unaligned_be16(&power->system.value) * 1000000ULL; 470 val = get_unaligned_be16(&power->system.value) * 1000000ULL;
@@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
477 &power->proc.update_tag); 477 &power->proc.update_tag);
478 break; 478 break;
479 case 6: 479 case 6:
480 val = get_unaligned_be32(&power->proc.update_tag) * 480 val = (u64)get_unaligned_be32(&power->proc.update_tag) *
481 occ->powr_sample_time_us; 481 occ->powr_sample_time_us;
482 break; 482 break;
483 case 7: 483 case 7:
484 val = get_unaligned_be16(&power->proc.value) * 1000000ULL; 484 val = get_unaligned_be16(&power->proc.value) * 1000000ULL;
@@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
491 &power->vdd.update_tag); 491 &power->vdd.update_tag);
492 break; 492 break;
493 case 10: 493 case 10:
494 val = get_unaligned_be32(&power->vdd.update_tag) * 494 val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
495 occ->powr_sample_time_us; 495 occ->powr_sample_time_us;
496 break; 496 break;
497 case 11: 497 case 11:
498 val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; 498 val = get_unaligned_be16(&power->vdd.value) * 1000000ULL;
@@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
505 &power->vdn.update_tag); 505 &power->vdn.update_tag);
506 break; 506 break;
507 case 14: 507 case 14:
508 val = get_unaligned_be32(&power->vdn.update_tag) * 508 val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
509 occ->powr_sample_time_us; 509 occ->powr_sample_time_us;
510 break; 510 break;
511 case 15: 511 case 15:
512 val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; 512 val = get_unaligned_be16(&power->vdn.value) * 1000000ULL;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 8844c9565d2a..7053be59ad2e 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
88 .data = (void *)2 88 .data = (void *)2
89 }, 89 },
90 { 90 {
91 .compatible = "ti,tmp422", 91 .compatible = "ti,tmp442",
92 .data = (void *)3 92 .data = (void *)3
93 }, 93 },
94 { }, 94 { },
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e417ebf7628c..c77adbbea0c7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -155,6 +155,8 @@ enum msg_end_type {
155 * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that 155 * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
156 * provides additional features and allows for longer messages to 156 * provides additional features and allows for longer messages to
157 * be transferred in one go. 157 * be transferred in one go.
158 * @quirks: i2c adapter quirks for limiting write/read transfer size and not
159 * allowing 0 length transfers.
158 */ 160 */
159struct tegra_i2c_hw_feature { 161struct tegra_i2c_hw_feature {
160 bool has_continue_xfer_support; 162 bool has_continue_xfer_support;
@@ -167,6 +169,7 @@ struct tegra_i2c_hw_feature {
167 bool has_multi_master_mode; 169 bool has_multi_master_mode;
168 bool has_slcg_override_reg; 170 bool has_slcg_override_reg;
169 bool has_mst_fifo; 171 bool has_mst_fifo;
172 const struct i2c_adapter_quirks *quirks;
170}; 173};
171 174
172/** 175/**
@@ -837,6 +840,10 @@ static const struct i2c_adapter_quirks tegra_i2c_quirks = {
837 .max_write_len = 4096, 840 .max_write_len = 4096,
838}; 841};
839 842
843static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
844 .flags = I2C_AQ_NO_ZERO_LEN,
845};
846
840static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { 847static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
841 .has_continue_xfer_support = false, 848 .has_continue_xfer_support = false,
842 .has_per_pkt_xfer_complete_irq = false, 849 .has_per_pkt_xfer_complete_irq = false,
@@ -848,6 +855,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
848 .has_multi_master_mode = false, 855 .has_multi_master_mode = false,
849 .has_slcg_override_reg = false, 856 .has_slcg_override_reg = false,
850 .has_mst_fifo = false, 857 .has_mst_fifo = false,
858 .quirks = &tegra_i2c_quirks,
851}; 859};
852 860
853static const struct tegra_i2c_hw_feature tegra30_i2c_hw = { 861static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -861,6 +869,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
861 .has_multi_master_mode = false, 869 .has_multi_master_mode = false,
862 .has_slcg_override_reg = false, 870 .has_slcg_override_reg = false,
863 .has_mst_fifo = false, 871 .has_mst_fifo = false,
872 .quirks = &tegra_i2c_quirks,
864}; 873};
865 874
866static const struct tegra_i2c_hw_feature tegra114_i2c_hw = { 875static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -874,6 +883,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
874 .has_multi_master_mode = false, 883 .has_multi_master_mode = false,
875 .has_slcg_override_reg = false, 884 .has_slcg_override_reg = false,
876 .has_mst_fifo = false, 885 .has_mst_fifo = false,
886 .quirks = &tegra_i2c_quirks,
877}; 887};
878 888
879static const struct tegra_i2c_hw_feature tegra124_i2c_hw = { 889static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -887,6 +897,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
887 .has_multi_master_mode = false, 897 .has_multi_master_mode = false,
888 .has_slcg_override_reg = true, 898 .has_slcg_override_reg = true,
889 .has_mst_fifo = false, 899 .has_mst_fifo = false,
900 .quirks = &tegra_i2c_quirks,
890}; 901};
891 902
892static const struct tegra_i2c_hw_feature tegra210_i2c_hw = { 903static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
@@ -900,6 +911,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
900 .has_multi_master_mode = true, 911 .has_multi_master_mode = true,
901 .has_slcg_override_reg = true, 912 .has_slcg_override_reg = true,
902 .has_mst_fifo = false, 913 .has_mst_fifo = false,
914 .quirks = &tegra_i2c_quirks,
903}; 915};
904 916
905static const struct tegra_i2c_hw_feature tegra194_i2c_hw = { 917static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
@@ -913,6 +925,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
913 .has_multi_master_mode = true, 925 .has_multi_master_mode = true,
914 .has_slcg_override_reg = true, 926 .has_slcg_override_reg = true,
915 .has_mst_fifo = true, 927 .has_mst_fifo = true,
928 .quirks = &tegra194_i2c_quirks,
916}; 929};
917 930
918/* Match table for of_platform binding */ 931/* Match table for of_platform binding */
@@ -964,7 +977,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
964 i2c_dev->base = base; 977 i2c_dev->base = base;
965 i2c_dev->div_clk = div_clk; 978 i2c_dev->div_clk = div_clk;
966 i2c_dev->adapter.algo = &tegra_i2c_algo; 979 i2c_dev->adapter.algo = &tegra_i2c_algo;
967 i2c_dev->adapter.quirks = &tegra_i2c_quirks;
968 i2c_dev->irq = irq; 980 i2c_dev->irq = irq;
969 i2c_dev->cont_id = pdev->id; 981 i2c_dev->cont_id = pdev->id;
970 i2c_dev->dev = &pdev->dev; 982 i2c_dev->dev = &pdev->dev;
@@ -980,6 +992,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
980 i2c_dev->hw = of_device_get_match_data(&pdev->dev); 992 i2c_dev->hw = of_device_get_match_data(&pdev->dev);
981 i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node, 993 i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
982 "nvidia,tegra20-i2c-dvc"); 994 "nvidia,tegra20-i2c-dvc");
995 i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
983 init_completion(&i2c_dev->msg_complete); 996 init_completion(&i2c_dev->msg_complete);
984 spin_lock_init(&i2c_dev->xfer_lock); 997 spin_lock_init(&i2c_dev->xfer_lock);
985 998
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 1aca742fde4a..ccd76c71af09 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
470 data_arg.data); 470 data_arg.data);
471 } 471 }
472 case I2C_RETRIES: 472 case I2C_RETRIES:
473 if (arg > INT_MAX)
474 return -EINVAL;
475
473 client->adapter->retries = arg; 476 client->adapter->retries = arg;
474 break; 477 break;
475 case I2C_TIMEOUT: 478 case I2C_TIMEOUT:
479 if (arg > INT_MAX)
480 return -EINVAL;
481
476 /* For historical reasons, user-space sets the timeout 482 /* For historical reasons, user-space sets the timeout
477 * value in units of 10 ms. 483 * value in units of 10 ms.
478 */ 484 */
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index b532e2c9cf5c..f8c00b94817f 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -901,9 +901,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
901 master->regs + 901 master->regs +
902 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 902 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
903 903
904 if (!old_dyn_addr)
905 return 0;
906
907 master->addrs[data->index] = dev->info.dyn_addr; 904 master->addrs[data->index] = dev->info.dyn_addr;
908 905
909 return 0; 906 return 0;
@@ -925,11 +922,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
925 return -ENOMEM; 922 return -ENOMEM;
926 923
927 data->index = pos; 924 data->index = pos;
928 master->addrs[pos] = dev->info.dyn_addr; 925 master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
929 master->free_pos &= ~BIT(pos); 926 master->free_pos &= ~BIT(pos);
930 i3c_dev_set_master_data(dev, data); 927 i3c_dev_set_master_data(dev, data);
931 928
932 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 929 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
933 master->regs + 930 master->regs +
934 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 931 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
935 932
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index bbd79b8b1a80..8889a4fdb454 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
1556 return PTR_ERR(master->pclk); 1556 return PTR_ERR(master->pclk);
1557 1557
1558 master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); 1558 master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
1559 if (IS_ERR(master->pclk)) 1559 if (IS_ERR(master->sysclk))
1560 return PTR_ERR(master->pclk); 1560 return PTR_ERR(master->sysclk);
1561 1561
1562 irq = platform_get_irq(pdev, 0); 1562 irq = platform_get_irq(pdev, 0);
1563 if (irq < 0) 1563 if (irq < 0)
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index cafb1dcadc48..9d984f2a8ba7 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -142,7 +142,10 @@ static void tiadc_step_config(struct iio_dev *indio_dev)
142 stepconfig |= STEPCONFIG_MODE_SWCNT; 142 stepconfig |= STEPCONFIG_MODE_SWCNT;
143 143
144 tiadc_writel(adc_dev, REG_STEPCONFIG(steps), 144 tiadc_writel(adc_dev, REG_STEPCONFIG(steps),
145 stepconfig | STEPCONFIG_INP(chan)); 145 stepconfig | STEPCONFIG_INP(chan) |
146 STEPCONFIG_INM_ADCREFM |
147 STEPCONFIG_RFP_VREFP |
148 STEPCONFIG_RFM_VREFN);
146 149
147 if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) { 150 if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) {
148 dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n", 151 dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n",
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 63a7cc00bae0..84f077b2b90a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
494 id_priv->id.route.addr.dev_addr.transport = 494 id_priv->id.route.addr.dev_addr.transport =
495 rdma_node_get_transport(cma_dev->device->node_type); 495 rdma_node_get_transport(cma_dev->device->node_type);
496 list_add_tail(&id_priv->list, &cma_dev->id_list); 496 list_add_tail(&id_priv->list, &cma_dev->id_list);
497 rdma_restrack_kadd(&id_priv->res); 497 if (id_priv->res.kern_name)
498 rdma_restrack_kadd(&id_priv->res);
499 else
500 rdma_restrack_uadd(&id_priv->res);
498} 501}
499 502
500static void cma_attach_to_dev(struct rdma_id_private *id_priv, 503static void cma_attach_to_dev(struct rdma_id_private *id_priv,
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e600fc23ae62..3c97a8b6bf1e 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
586 goto err; 586 goto err;
587 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
588 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
589 pd->unsafe_global_rkey))
590 goto err;
591 587
592 if (fill_res_name_pid(msg, res)) 588 if (fill_res_name_pid(msg, res))
593 goto err; 589 goto err;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index be6b8e1257d0..69f8db66925e 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
106 enum uverbs_obj_access access, 106 enum uverbs_obj_access access,
107 bool commit); 107 bool commit);
108 108
109int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
110
109void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); 111void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
110void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); 112void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
111 113
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6b12cc5f97b2..3317300ab036 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
60{ 60{
61 int ret; 61 int ret;
62 62
63 if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
64 return uverbs_copy_to_struct_or_zero(
65 attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
66
63 if (copy_to_user(attrs->ucore.outbuf, resp, 67 if (copy_to_user(attrs->ucore.outbuf, resp,
64 min(attrs->ucore.outlen, resp_len))) 68 min(attrs->ucore.outlen, resp_len)))
65 return -EFAULT; 69 return -EFAULT;
@@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1181 goto out_put; 1185 goto out_put;
1182 } 1186 }
1183 1187
1188 if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
1189 ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
1190
1184 ret = 0; 1191 ret = 0;
1185 1192
1186out_put: 1193out_put:
@@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
2012 return -ENOMEM; 2019 return -ENOMEM;
2013 2020
2014 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); 2021 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2015 if (!qp) 2022 if (!qp) {
2023 ret = -EINVAL;
2016 goto out; 2024 goto out;
2025 }
2017 2026
2018 is_ud = qp->qp_type == IB_QPT_UD; 2027 is_ud = qp->qp_type == IB_QPT_UD;
2019 sg_ind = 0; 2028 sg_ind = 0;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 8c81ff698052..0ca04d224015 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
144 0, uattr->len - len); 144 0, uattr->len - len);
145} 145}
146 146
147static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
148 const struct uverbs_attr *attr)
149{
150 struct bundle_priv *pbundle =
151 container_of(bundle, struct bundle_priv, bundle);
152 u16 flags;
153
154 flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
155 UVERBS_ATTR_F_VALID_OUTPUT;
156 if (put_user(flags,
157 &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
158 return -EFAULT;
159 return 0;
160}
161
147static int uverbs_process_idrs_array(struct bundle_priv *pbundle, 162static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
148 const struct uverbs_api_attr *attr_uapi, 163 const struct uverbs_api_attr *attr_uapi,
149 struct uverbs_objs_arr_attr *attr, 164 struct uverbs_objs_arr_attr *attr,
@@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
456 } 471 }
457 472
458 /* 473 /*
474 * Until the drivers are revised to use the bundle directly we have to
475 * assume that the driver wrote to its UHW_OUT and flag userspace
476 * appropriately.
477 */
478 if (!ret && pbundle->method_elm->has_udata) {
479 const struct uverbs_attr *attr =
480 uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
481
482 if (!IS_ERR(attr))
483 ret = uverbs_set_output(&pbundle->bundle, attr);
484 }
485
486 /*
459 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can 487 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
460 * not invoke the method because the request is not supported. No 488 * not invoke the method because the request is not supported. No
461 * other cases should return this code. 489 * other cases should return this code.
@@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
706int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, 734int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
707 const void *from, size_t size) 735 const void *from, size_t size)
708{ 736{
709 struct bundle_priv *pbundle =
710 container_of(bundle, struct bundle_priv, bundle);
711 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 737 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
712 u16 flags;
713 size_t min_size; 738 size_t min_size;
714 739
715 if (IS_ERR(attr)) 740 if (IS_ERR(attr))
@@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
719 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) 744 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
720 return -EFAULT; 745 return -EFAULT;
721 746
722 flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | 747 return uverbs_set_output(bundle, attr);
723 UVERBS_ATTR_F_VALID_OUTPUT;
724 if (put_user(flags,
725 &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
726 return -EFAULT;
727
728 return 0;
729} 748}
730EXPORT_SYMBOL(uverbs_copy_to); 749EXPORT_SYMBOL(uverbs_copy_to);
731 750
751
752/*
753 * This is only used if the caller has directly used copy_to_use to write the
754 * data. It signals to user space that the buffer is filled in.
755 */
756int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
757{
758 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
759
760 if (IS_ERR(attr))
761 return PTR_ERR(attr);
762
763 return uverbs_set_output(bundle, attr);
764}
765
732int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, 766int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
733 size_t idx, s64 lower_bound, u64 upper_bound, 767 size_t idx, s64 lower_bound, u64 upper_bound,
734 s64 *def_val) 768 s64 *def_val)
@@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
757{ 791{
758 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 792 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
759 793
760 if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), 794 if (size < attr->ptr_attr.len) {
761 attr->ptr_attr.len)) 795 if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
762 return -EFAULT; 796 attr->ptr_attr.len - size))
797 return -EFAULT;
798 }
763 return uverbs_copy_to(bundle, idx, from, size); 799 return uverbs_copy_to(bundle, idx, from, size);
764} 800}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb0007aa0c27..2890a77339e1 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -690,6 +690,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
690 690
691 buf += sizeof(hdr); 691 buf += sizeof(hdr);
692 692
693 memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
693 bundle.ufile = file; 694 bundle.ufile = file;
694 if (!method_elm->is_ex) { 695 if (!method_elm->is_ex) {
695 size_t in_len = hdr.in_words * 4 - sizeof(hdr); 696 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 326805461265..19551aa43850 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
766 return NULL; 766 return NULL;
767 767
768 sbuf->size = size; 768 sbuf->size = size;
769 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 769 sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
770 &sbuf->dma_addr, GFP_ATOMIC); 770 &sbuf->dma_addr, GFP_ATOMIC);
771 if (!sbuf->sb) 771 if (!sbuf->sb)
772 goto bail; 772 goto bail;
773 773
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 59eeac55626f..57d4951679cb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
105 105
106 if (!sghead) { 106 if (!sghead) {
107 for (i = 0; i < pages; i++) { 107 for (i = 0; i < pages; i++) {
108 pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, 108 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
109 pbl->pg_size, 109 pbl->pg_size,
110 &pbl->pg_map_arr[i], 110 &pbl->pg_map_arr[i],
111 GFP_KERNEL); 111 GFP_KERNEL);
112 if (!pbl->pg_arr[i]) 112 if (!pbl->pg_arr[i])
113 goto fail; 113 goto fail;
114 pbl->pg_count++; 114 pbl->pg_count++;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index df4f7a3f043d..8ac72ac7cbac 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
291 if (!wq->sq) 291 if (!wq->sq)
292 goto err3; 292 goto err3;
293 293
294 wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev), 294 wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
295 depth * sizeof(union t3_wr), 295 depth * sizeof(union t3_wr),
296 &(wq->dma_addr), GFP_KERNEL); 296 &(wq->dma_addr), GFP_KERNEL);
297 if (!wq->queue) 297 if (!wq->queue)
298 goto err4; 298 goto err4;
299 299
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 981ff5cfb5d1..504cf525508f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2564 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> 2564 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2565 T4_RQT_ENTRY_SHIFT; 2565 T4_RQT_ENTRY_SHIFT;
2566 2566
2567 wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev, 2567 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2568 wq->memsize, &wq->dma_addr, 2568 &wq->dma_addr, GFP_KERNEL);
2569 GFP_KERNEL);
2570 if (!wq->queue) 2569 if (!wq->queue)
2571 goto err_free_rqtpool; 2570 goto err_free_rqtpool;
2572 2571
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 09044905284f..7835eb52e7c5 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
899 goto done; 899 goto done;
900 900
901 /* allocate dummy tail memory for all receive contexts */ 901 /* allocate dummy tail memory for all receive contexts */
902 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 902 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
903 &dd->pcidev->dev, sizeof(u64), 903 sizeof(u64),
904 &dd->rcvhdrtail_dummy_dma, 904 &dd->rcvhdrtail_dummy_dma,
905 GFP_KERNEL); 905 GFP_KERNEL);
906 906
907 if (!dd->rcvhdrtail_dummy_kvaddr) { 907 if (!dd->rcvhdrtail_dummy_kvaddr) {
908 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 908 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
@@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1863 gfp_flags = GFP_KERNEL; 1863 gfp_flags = GFP_KERNEL;
1864 else 1864 else
1865 gfp_flags = GFP_USER; 1865 gfp_flags = GFP_USER;
1866 rcd->rcvhdrq = dma_zalloc_coherent( 1866 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1867 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1867 &rcd->rcvhdrq_dma,
1868 gfp_flags | __GFP_COMP); 1868 gfp_flags | __GFP_COMP);
1869 1869
1870 if (!rcd->rcvhdrq) { 1870 if (!rcd->rcvhdrq) {
1871 dd_dev_err(dd, 1871 dd_dev_err(dd,
@@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1876 1876
1877 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 1877 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1878 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1878 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1879 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1879 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1880 &dd->pcidev->dev, PAGE_SIZE, 1880 PAGE_SIZE,
1881 &rcd->rcvhdrqtailaddr_dma, gfp_flags); 1881 &rcd->rcvhdrqtailaddr_dma,
1882 gfp_flags);
1882 if (!rcd->rcvhdrtail_kvaddr) 1883 if (!rcd->rcvhdrtail_kvaddr)
1883 goto bail_free; 1884 goto bail_free;
1884 } 1885 }
@@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1974 while (alloced_bytes < rcd->egrbufs.size && 1975 while (alloced_bytes < rcd->egrbufs.size &&
1975 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1976 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1976 rcd->egrbufs.buffers[idx].addr = 1977 rcd->egrbufs.buffers[idx].addr =
1977 dma_zalloc_coherent(&dd->pcidev->dev, 1978 dma_alloc_coherent(&dd->pcidev->dev,
1978 rcd->egrbufs.rcvtid_size, 1979 rcd->egrbufs.rcvtid_size,
1979 &rcd->egrbufs.buffers[idx].dma, 1980 &rcd->egrbufs.buffers[idx].dma,
1980 gfp_flags); 1981 gfp_flags);
1981 if (rcd->egrbufs.buffers[idx].addr) { 1982 if (rcd->egrbufs.buffers[idx].addr) {
1982 rcd->egrbufs.buffers[idx].len = 1983 rcd->egrbufs.buffers[idx].len =
1983 rcd->egrbufs.rcvtid_size; 1984 rcd->egrbufs.rcvtid_size;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index dd5a5c030066..04126d7e318d 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd)
2098 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2098 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2099 2099
2100 set_dev_node(&dd->pcidev->dev, i); 2100 set_dev_node(&dd->pcidev->dev, i);
2101 dd->cr_base[i].va = dma_zalloc_coherent( 2101 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
2102 &dd->pcidev->dev, 2102 bytes,
2103 bytes, 2103 &dd->cr_base[i].dma,
2104 &dd->cr_base[i].dma, 2104 GFP_KERNEL);
2105 GFP_KERNEL);
2106 if (!dd->cr_base[i].va) { 2105 if (!dd->cr_base[i].va) {
2107 set_dev_node(&dd->pcidev->dev, dd->node); 2106 set_dev_node(&dd->pcidev->dev, dd->node);
2108 dd_dev_err(dd, 2107 dd_dev_err(dd,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index b84356e1a4c1..96897a91fb0a 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
1453 timer_setup(&sde->err_progress_check_timer, 1453 timer_setup(&sde->err_progress_check_timer,
1454 sdma_err_progress_check, 0); 1454 sdma_err_progress_check, 0);
1455 1455
1456 sde->descq = dma_zalloc_coherent( 1456 sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
1457 &dd->pcidev->dev, 1457 descq_cnt * sizeof(u64[2]),
1458 descq_cnt * sizeof(u64[2]), 1458 &sde->descq_phys, GFP_KERNEL);
1459 &sde->descq_phys,
1460 GFP_KERNEL
1461 );
1462 if (!sde->descq) 1459 if (!sde->descq)
1463 goto bail; 1460 goto bail;
1464 sde->tx_ring = 1461 sde->tx_ring =
@@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
1471 1468
1472 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1469 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1473 /* Allocate memory for DMA of head registers to memory */ 1470 /* Allocate memory for DMA of head registers to memory */
1474 dd->sdma_heads_dma = dma_zalloc_coherent( 1471 dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
1475 &dd->pcidev->dev, 1472 dd->sdma_heads_size,
1476 dd->sdma_heads_size, 1473 &dd->sdma_heads_phys,
1477 &dd->sdma_heads_phys, 1474 GFP_KERNEL);
1478 GFP_KERNEL
1479 );
1480 if (!dd->sdma_heads_dma) { 1475 if (!dd->sdma_heads_dma) {
1481 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1476 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1482 goto bail; 1477 goto bail;
1483 } 1478 }
1484 1479
1485 /* Allocate memory for pad */ 1480 /* Allocate memory for pad */
1486 dd->sdma_pad_dma = dma_zalloc_coherent( 1481 dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
1487 &dd->pcidev->dev, 1482 &dd->sdma_pad_phys, GFP_KERNEL);
1488 sizeof(u32),
1489 &dd->sdma_pad_phys,
1490 GFP_KERNEL
1491 );
1492 if (!dd->sdma_pad_dma) { 1483 if (!dd->sdma_pad_dma) {
1493 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1484 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1494 goto bail; 1485 goto bail;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 6300033a448f..dac058d3df53 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
197 buf->npages = 1 << order; 197 buf->npages = 1 << order;
198 buf->page_shift = page_shift; 198 buf->page_shift = page_shift;
199 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ 199 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
200 buf->direct.buf = dma_zalloc_coherent(dev, 200 buf->direct.buf = dma_alloc_coherent(dev, size, &t,
201 size, &t, GFP_KERNEL); 201 GFP_KERNEL);
202 if (!buf->direct.buf) 202 if (!buf->direct.buf)
203 return -ENOMEM; 203 return -ENOMEM;
204 204
@@ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
219 return -ENOMEM; 219 return -ENOMEM;
220 220
221 for (i = 0; i < buf->nbufs; ++i) { 221 for (i = 0; i < buf->nbufs; ++i) {
222 buf->page_list[i].buf = dma_zalloc_coherent(dev, 222 buf->page_list[i].buf = dma_alloc_coherent(dev,
223 page_size, &t, 223 page_size,
224 GFP_KERNEL); 224 &t,
225 GFP_KERNEL);
225 226
226 if (!buf->page_list[i].buf) 227 if (!buf->page_list[i].buf)
227 goto err_free; 228 goto err_free;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 3a669451cf86..543fa1504cd3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5091 eqe_alloc = i * (buf_chk_sz / eq->eqe_size); 5091 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5092 size = (eq->entries - eqe_alloc) * eq->eqe_size; 5092 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5093 } 5093 }
5094 eq->buf[i] = dma_zalloc_coherent(dev, size, 5094 eq->buf[i] = dma_alloc_coherent(dev, size,
5095 &(eq->buf_dma[i]), 5095 &(eq->buf_dma[i]),
5096 GFP_KERNEL); 5096 GFP_KERNEL);
5097 if (!eq->buf[i]) 5097 if (!eq->buf[i])
@@ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5126 size = (eq->entries - eqe_alloc) 5126 size = (eq->entries - eqe_alloc)
5127 * eq->eqe_size; 5127 * eq->eqe_size;
5128 } 5128 }
5129 eq->buf[idx] = dma_zalloc_coherent(dev, size, 5129 eq->buf[idx] = dma_alloc_coherent(dev, size,
5130 &(eq->buf_dma[idx]), 5130 &(eq->buf_dma[idx]),
5131 GFP_KERNEL); 5131 GFP_KERNEL);
5132 if (!eq->buf[idx]) 5132 if (!eq->buf[idx])
5133 goto err_dma_alloc_buf; 5133 goto err_dma_alloc_buf;
5134 5134
@@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5241 goto free_cmd_mbox; 5241 goto free_cmd_mbox;
5242 } 5242 }
5243 5243
5244 eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz, 5244 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5245 &(eq->buf_list->map), 5245 &(eq->buf_list->map),
5246 GFP_KERNEL); 5246 GFP_KERNEL);
5247 if (!eq->buf_list->buf) { 5247 if (!eq->buf_list->buf) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index a9ea966877f2..59e978141ad4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
745 if (!mem) 745 if (!mem)
746 return I40IW_ERR_PARAM; 746 return I40IW_ERR_PARAM;
747 mem->size = ALIGN(size, alignment); 747 mem->size = ALIGN(size, alignment);
748 mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, 748 mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
749 (dma_addr_t *)&mem->pa, GFP_KERNEL); 749 (dma_addr_t *)&mem->pa, GFP_KERNEL);
750 if (!mem->va) 750 if (!mem->va)
751 return I40IW_ERR_NO_MEMORY; 751 return I40IW_ERR_NO_MEMORY;
752 return 0; 752 return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index cc9c0c8ccba3..112d2f38e0de 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
623 page = dev->db_tab->page + end; 623 page = dev->db_tab->page + end;
624 624
625alloc: 625alloc:
626 page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 626 page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
627 &page->mapping, GFP_KERNEL); 627 MTHCA_ICM_PAGE_SIZE, &page->mapping,
628 GFP_KERNEL);
628 if (!page->db_rec) { 629 if (!page->db_rec) {
629 ret = -ENOMEM; 630 ret = -ENOMEM;
630 goto out; 631 goto out;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 82cb6b71ac7c..e3e9dd54caa2 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
534 { 534 {
535 struct mthca_ucontext *context; 535 struct mthca_ucontext *context;
536 536
537 qp = kmalloc(sizeof *qp, GFP_KERNEL); 537 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
538 if (!qp) 538 if (!qp)
539 return ERR_PTR(-ENOMEM); 539 return ERR_PTR(-ENOMEM);
540 540
@@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
600 if (udata) 600 if (udata)
601 return ERR_PTR(-EINVAL); 601 return ERR_PTR(-EINVAL);
602 602
603 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); 603 qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
604 if (!qp) 604 if (!qp)
605 return ERR_PTR(-ENOMEM); 605 return ERR_PTR(-ENOMEM);
606 606
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 241a57a07485..097e5ab2a19f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
380 q->len = len; 380 q->len = len;
381 q->entry_size = entry_size; 381 q->entry_size = entry_size;
382 q->size = len * entry_size; 382 q->size = len * entry_size;
383 q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size, 383 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
384 &q->dma, GFP_KERNEL); 384 GFP_KERNEL);
385 if (!q->va) 385 if (!q->va)
386 return -ENOMEM; 386 return -ENOMEM;
387 return 0; 387 return 0;
@@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1819 return -ENOMEM; 1819 return -ENOMEM;
1820 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, 1820 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1821 OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1821 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1822 cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); 1822 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1823 if (!cq->va) { 1823 if (!cq->va) {
1824 status = -ENOMEM; 1824 status = -ENOMEM;
1825 goto mem_err; 1825 goto mem_err;
@@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2209 qp->sq.max_cnt = max_wqe_allocated; 2209 qp->sq.max_cnt = max_wqe_allocated;
2210 len = (hw_pages * hw_page_size); 2210 len = (hw_pages * hw_page_size);
2211 2211
2212 qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2212 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2213 if (!qp->sq.va) 2213 if (!qp->sq.va)
2214 return -EINVAL; 2214 return -EINVAL;
2215 qp->sq.len = len; 2215 qp->sq.len = len;
@@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2259 qp->rq.max_cnt = max_rqe_allocated; 2259 qp->rq.max_cnt = max_rqe_allocated;
2260 len = (hw_pages * hw_page_size); 2260 len = (hw_pages * hw_page_size);
2261 2261
2262 qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2262 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2263 if (!qp->rq.va) 2263 if (!qp->rq.va)
2264 return -ENOMEM; 2264 return -ENOMEM;
2265 qp->rq.pa = pa; 2265 qp->rq.pa = pa;
@@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2315 if (dev->attr.ird == 0) 2315 if (dev->attr.ird == 0)
2316 return 0; 2316 return 0;
2317 2317
2318 qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa, 2318 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
2319 GFP_KERNEL); 2319 GFP_KERNEL);
2320 if (!qp->ird_q_va) 2320 if (!qp->ird_q_va)
2321 return -ENOMEM; 2321 return -ENOMEM;
2322 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, 2322 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index dd15474b19b7..6be0ea109138 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
74 sizeof(struct ocrdma_rdma_stats_resp)); 74 sizeof(struct ocrdma_rdma_stats_resp));
75 75
76 mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, 76 mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
77 &mem->pa, GFP_KERNEL); 77 &mem->pa, GFP_KERNEL);
78 if (!mem->va) { 78 if (!mem->va) {
79 pr_err("%s: stats mbox allocation failed\n", __func__); 79 pr_err("%s: stats mbox allocation failed\n", __func__);
80 return false; 80 return false;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c46bed0c5513..287c332ff0e6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
504 INIT_LIST_HEAD(&ctx->mm_head); 504 INIT_LIST_HEAD(&ctx->mm_head);
505 mutex_init(&ctx->mm_list_lock); 505 mutex_init(&ctx->mm_list_lock);
506 506
507 ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, 507 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
508 &ctx->ah_tbl.pa, GFP_KERNEL); 508 &ctx->ah_tbl.pa, GFP_KERNEL);
509 if (!ctx->ah_tbl.va) { 509 if (!ctx->ah_tbl.va) {
510 kfree(ctx); 510 kfree(ctx);
511 return ERR_PTR(-ENOMEM); 511 return ERR_PTR(-ENOMEM);
@@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
838 return -ENOMEM; 838 return -ENOMEM;
839 839
840 for (i = 0; i < mr->num_pbls; i++) { 840 for (i = 0; i < mr->num_pbls; i++) {
841 va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 841 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
842 if (!va) { 842 if (!va) {
843 ocrdma_free_mr_pbl_tbl(dev, mr); 843 ocrdma_free_mr_pbl_tbl(dev, mr);
844 status = -ENOMEM; 844 status = -ENOMEM;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b342a70e2814..e1ccf32b1c3d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
556 return ERR_PTR(-ENOMEM); 556 return ERR_PTR(-ENOMEM);
557 557
558 for (i = 0; i < pbl_info->num_pbls; i++) { 558 for (i = 0; i < pbl_info->num_pbls; i++) {
559 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, 559 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
560 &pa, flags); 560 flags);
561 if (!va) 561 if (!va)
562 goto err; 562 goto err;
563 563
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 42b8685c997e..3c633ab58052 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
427 427
428static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) 428static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
429{ 429{
430 return (enum pvrdma_wr_opcode)op; 430 switch (op) {
431 case IB_WR_RDMA_WRITE:
432 return PVRDMA_WR_RDMA_WRITE;
433 case IB_WR_RDMA_WRITE_WITH_IMM:
434 return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
435 case IB_WR_SEND:
436 return PVRDMA_WR_SEND;
437 case IB_WR_SEND_WITH_IMM:
438 return PVRDMA_WR_SEND_WITH_IMM;
439 case IB_WR_RDMA_READ:
440 return PVRDMA_WR_RDMA_READ;
441 case IB_WR_ATOMIC_CMP_AND_SWP:
442 return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
443 case IB_WR_ATOMIC_FETCH_AND_ADD:
444 return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
445 case IB_WR_LSO:
446 return PVRDMA_WR_LSO;
447 case IB_WR_SEND_WITH_INV:
448 return PVRDMA_WR_SEND_WITH_INV;
449 case IB_WR_RDMA_READ_WITH_INV:
450 return PVRDMA_WR_RDMA_READ_WITH_INV;
451 case IB_WR_LOCAL_INV:
452 return PVRDMA_WR_LOCAL_INV;
453 case IB_WR_REG_MR:
454 return PVRDMA_WR_FAST_REG_MR;
455 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
456 return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
457 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
458 return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
459 case IB_WR_REG_SIG_MR:
460 return PVRDMA_WR_REG_SIG_MR;
461 default:
462 return PVRDMA_WR_ERROR;
463 }
431} 464}
432 465
433static inline enum ib_wc_status pvrdma_wc_status_to_ib( 466static inline enum ib_wc_status pvrdma_wc_status_to_ib(
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index eaa109dbc96a..39c37b6fd715 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
890 dev_info(&pdev->dev, "device version %d, driver version %d\n", 890 dev_info(&pdev->dev, "device version %d, driver version %d\n",
891 dev->dsr_version, PVRDMA_VERSION); 891 dev->dsr_version, PVRDMA_VERSION);
892 892
893 dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), 893 dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
894 &dev->dsrbase, GFP_KERNEL); 894 &dev->dsrbase, GFP_KERNEL);
895 if (!dev->dsr) { 895 if (!dev->dsr) {
896 dev_err(&pdev->dev, "failed to allocate shared region\n"); 896 dev_err(&pdev->dev, "failed to allocate shared region\n");
897 ret = -ENOMEM; 897 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 3acf74cbe266..1ec3646087ba 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
721 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 721 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
722 wqe_hdr->ex.imm_data = wr->ex.imm_data; 722 wqe_hdr->ex.imm_data = wr->ex.imm_data;
723 723
724 if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
725 *bad_wr = wr;
726 ret = -EINVAL;
727 goto out;
728 }
729
724 switch (qp->ibqp.qp_type) { 730 switch (qp->ibqp.qp_type) {
725 case IB_QPT_GSI: 731 case IB_QPT_GSI:
726 case IB_QPT_UD: 732 case IB_QPT_UD:
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index f456c1125bd6..69881265d121 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev)
147 return -ENOMEM; 147 return -ENOMEM;
148 ts->pdev = pdev; 148 ts->pdev = pdev;
149 149
150 ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, 150 ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
151 GFP_KERNEL); 151 GFP_KERNEL);
152 if (!ts->fw_regs_va) { 152 if (!ts->fw_regs_va) {
153 dev_err(dev, "failed to dma_alloc_coherent\n"); 153 dev_err(dev, "failed to dma_alloc_coherent\n");
154 return -ENOMEM; 154 return -ENOMEM;
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 6ede4286b835..730f7dabcf37 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
232 232
233 spin_lock_init(&dom->pgtlock); 233 spin_lock_init(&dom->pgtlock);
234 234
235 dom->pgt_va = dma_zalloc_coherent(data->dev, 235 dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
236 M2701_IOMMU_PGT_SIZE, 236 &dom->pgt_pa, GFP_KERNEL);
237 &dom->pgt_pa, GFP_KERNEL);
238 if (!dom->pgt_va) 237 if (!dom->pgt_va)
239 return -ENOMEM; 238 return -ENOMEM;
240 239
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index 2543baba8b1f..5a2ec43b7ddd 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -95,7 +95,7 @@ static inline void setup_irq_channel(u32 magic, void __iomem *reg_addr)
95 95
96 /* Setup 64 channel slots */ 96 /* Setup 64 channel slots */
97 for (i = 0; i < INTC_IRQS; i += 4) 97 for (i = 0; i < INTC_IRQS; i += 4)
98 writel_relaxed(build_channel_val(i, magic), reg_addr + i); 98 writel(build_channel_val(i, magic), reg_addr + i);
99} 99}
100 100
101static int __init 101static int __init
@@ -135,16 +135,10 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent)
135static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq, 135static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
136 u32 irq_base) 136 u32 irq_base)
137{ 137{
138 u32 irq;
139
140 if (hwirq == 0) 138 if (hwirq == 0)
141 return 0; 139 return 0;
142 140
143 while (hwirq) { 141 handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
144 irq = __ffs(hwirq);
145 hwirq &= ~BIT(irq);
146 handle_domain_irq(root_domain, irq_base + irq, regs);
147 }
148 142
149 return 1; 143 return 1;
150} 144}
@@ -154,12 +148,16 @@ static void gx_irq_handler(struct pt_regs *regs)
154{ 148{
155 bool ret; 149 bool ret;
156 150
157 do { 151retry:
158 ret = handle_irq_perbit(regs, 152 ret = handle_irq_perbit(regs,
159 readl_relaxed(reg_base + GX_INTC_PEN31_00), 0); 153 readl(reg_base + GX_INTC_PEN63_32), 32);
160 ret |= handle_irq_perbit(regs, 154 if (ret)
161 readl_relaxed(reg_base + GX_INTC_PEN63_32), 32); 155 goto retry;
162 } while (ret); 156
157 ret = handle_irq_perbit(regs,
158 readl(reg_base + GX_INTC_PEN31_00), 0);
159 if (ret)
160 goto retry;
163} 161}
164 162
165static int __init 163static int __init
@@ -174,14 +172,14 @@ gx_intc_init(struct device_node *node, struct device_node *parent)
174 /* 172 /*
175 * Initial enable reg to disable all interrupts 173 * Initial enable reg to disable all interrupts
176 */ 174 */
177 writel_relaxed(0x0, reg_base + GX_INTC_NEN31_00); 175 writel(0x0, reg_base + GX_INTC_NEN31_00);
178 writel_relaxed(0x0, reg_base + GX_INTC_NEN63_32); 176 writel(0x0, reg_base + GX_INTC_NEN63_32);
179 177
180 /* 178 /*
181 * Initial mask reg with all unmasked, because we only use enalbe reg 179 * Initial mask reg with all unmasked, because we only use enalbe reg
182 */ 180 */
183 writel_relaxed(0x0, reg_base + GX_INTC_NMASK31_00); 181 writel(0x0, reg_base + GX_INTC_NMASK31_00);
184 writel_relaxed(0x0, reg_base + GX_INTC_NMASK63_32); 182 writel(0x0, reg_base + GX_INTC_NMASK63_32);
185 183
186 setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE); 184 setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE);
187 185
@@ -204,20 +202,29 @@ static void ck_irq_handler(struct pt_regs *regs)
204 void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00; 202 void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00;
205 void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32; 203 void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32;
206 204
207 do { 205retry:
208 /* handle 0 - 31 irqs */ 206 /* handle 0 - 63 irqs */
209 ret = handle_irq_perbit(regs, readl_relaxed(reg_pen_lo), 0); 207 ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32);
210 ret |= handle_irq_perbit(regs, readl_relaxed(reg_pen_hi), 32); 208 if (ret)
209 goto retry;
211 210
212 if (nr_irq == INTC_IRQS) 211 ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0);
213 continue; 212 if (ret)
213 goto retry;
214
215 if (nr_irq == INTC_IRQS)
216 return;
214 217
215 /* handle 64 - 127 irqs */ 218 /* handle 64 - 127 irqs */
216 ret |= handle_irq_perbit(regs, 219 ret = handle_irq_perbit(regs,
217 readl_relaxed(reg_pen_lo + CK_INTC_DUAL_BASE), 64); 220 readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
218 ret |= handle_irq_perbit(regs, 221 if (ret)
219 readl_relaxed(reg_pen_hi + CK_INTC_DUAL_BASE), 96); 222 goto retry;
220 } while (ret); 223
224 ret = handle_irq_perbit(regs,
225 readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
226 if (ret)
227 goto retry;
221} 228}
222 229
223static int __init 230static int __init
@@ -230,11 +237,11 @@ ck_intc_init(struct device_node *node, struct device_node *parent)
230 return ret; 237 return ret;
231 238
232 /* Initial enable reg to disable all interrupts */ 239 /* Initial enable reg to disable all interrupts */
233 writel_relaxed(0, reg_base + CK_INTC_NEN31_00); 240 writel(0, reg_base + CK_INTC_NEN31_00);
234 writel_relaxed(0, reg_base + CK_INTC_NEN63_32); 241 writel(0, reg_base + CK_INTC_NEN63_32);
235 242
236 /* Enable irq intc */ 243 /* Enable irq intc */
237 writel_relaxed(BIT(31), reg_base + CK_INTC_ICR); 244 writel(BIT(31), reg_base + CK_INTC_ICR);
238 245
239 ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0); 246 ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0);
240 ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32); 247 ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32);
@@ -260,8 +267,8 @@ ck_dual_intc_init(struct device_node *node, struct device_node *parent)
260 return ret; 267 return ret;
261 268
262 /* Initial enable reg to disable all interrupts */ 269 /* Initial enable reg to disable all interrupts */
263 writel_relaxed(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE); 270 writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
264 writel_relaxed(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE); 271 writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
265 272
266 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64); 273 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64);
267 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96); 274 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4ac378e48902..40ca1e8fa09f 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
423 int i, j; 423 int i, j;
424 424
425 for (j = 0; j < AVM_MAXVERSION; j++) 425 for (j = 0; j < AVM_MAXVERSION; j++)
426 cinfo->version[j] = "\0\0" + 1; 426 cinfo->version[j] = "";
427 for (i = 0, j = 0; 427 for (i = 0, j = 0;
428 j < AVM_MAXVERSION && i < cinfo->versionlen; 428 j < AVM_MAXVERSION && i < cinfo->versionlen;
429 j++, i += cinfo->versionbuf[i] + 1) 429 j++, i += cinfo->versionbuf[i] + 1)
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 6d05946b445e..124ff530da82 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -262,8 +262,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
262 struct dchannel *dch = &hw->dch; 262 struct dchannel *dch = &hw->dch;
263 int i; 263 int i;
264 264
265 phi = kzalloc(sizeof(struct ph_info) + 265 phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
266 dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC);
267 phi->dch.ch.protocol = hw->protocol; 266 phi->dch.ch.protocol = hw->protocol;
268 phi->dch.ch.Flags = dch->Flags; 267 phi->dch.ch.Flags = dch->Flags;
269 phi->dch.state = dch->state; 268 phi->dch.state = dch->state;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 1b2239c1d569..dc1cded716c1 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1437{ 1437{
1438 modem_info *info = (modem_info *) tty->driver_data; 1438 modem_info *info = (modem_info *) tty->driver_data;
1439 1439
1440 mutex_lock(&modem_info_mutex);
1440 if (!old_termios) 1441 if (!old_termios)
1441 isdn_tty_change_speed(info); 1442 isdn_tty_change_speed(info);
1442 else { 1443 else {
1443 if (tty->termios.c_cflag == old_termios->c_cflag && 1444 if (tty->termios.c_cflag == old_termios->c_cflag &&
1444 tty->termios.c_ispeed == old_termios->c_ispeed && 1445 tty->termios.c_ispeed == old_termios->c_ispeed &&
1445 tty->termios.c_ospeed == old_termios->c_ospeed) 1446 tty->termios.c_ospeed == old_termios->c_ospeed) {
1447 mutex_unlock(&modem_info_mutex);
1446 return; 1448 return;
1449 }
1447 isdn_tty_change_speed(info); 1450 isdn_tty_change_speed(info);
1448 } 1451 }
1452 mutex_unlock(&modem_info_mutex);
1449} 1453}
1450 1454
1451/* 1455/*
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index a2e74feee2b2..fd64df5a57a5 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
318 318
319 /* Let the programs run for couple of ms and check the engine status */ 319 /* Let the programs run for couple of ms and check the engine status */
320 usleep_range(3000, 6000); 320 usleep_range(3000, 6000);
321 lp55xx_read(chip, LP5523_REG_STATUS, &status); 321 ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
322 if (ret)
323 return ret;
322 status &= LP5523_ENG_STATUS_MASK; 324 status &= LP5523_ENG_STATUS_MASK;
323 325
324 if (status != LP5523_ENG_STATUS_MASK) { 326 if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fd4af4de03b4..05ffffb8b769 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -207,15 +207,10 @@ static bool create_on_open = true;
207struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 207struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
208 struct mddev *mddev) 208 struct mddev *mddev)
209{ 209{
210 struct bio *b;
211
212 if (!mddev || !bioset_initialized(&mddev->bio_set)) 210 if (!mddev || !bioset_initialized(&mddev->bio_set))
213 return bio_alloc(gfp_mask, nr_iovecs); 211 return bio_alloc(gfp_mask, nr_iovecs);
214 212
215 b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 213 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
216 if (!b)
217 return NULL;
218 return b;
219} 214}
220EXPORT_SYMBOL_GPL(bio_alloc_mddev); 215EXPORT_SYMBOL_GPL(bio_alloc_mddev);
221 216
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 447baaebca44..cdb79ae2d8dc 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
218{ 218{
219 struct device *dev = &cio2->pci_dev->dev; 219 struct device *dev = &cio2->pci_dev->dev;
220 220
221 q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, 221 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
222 GFP_KERNEL); 222 GFP_KERNEL);
223 if (!q->fbpt) 223 if (!q->fbpt)
224 return -ENOMEM; 224 return -ENOMEM;
225 225
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index e80123cba406..060c0ad6243a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
49 struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; 49 struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
50 struct device *dev = &ctx->dev->plat_dev->dev; 50 struct device *dev = &ctx->dev->plat_dev->dev;
51 51
52 mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); 52 mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
53 if (!mem->va) { 53 if (!mem->va) {
54 mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), 54 mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
55 size); 55 size);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d01821a6906a..89d9c4c21037 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
807 struct vb2_v4l2_buffer *vbuf; 807 struct vb2_v4l2_buffer *vbuf;
808 unsigned long flags; 808 unsigned long flags;
809 809
810 cancel_delayed_work_sync(&dev->work_run); 810 if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
811 cancel_delayed_work_sync(&dev->work_run);
812
811 for (;;) { 813 for (;;) {
812 if (V4L2_TYPE_IS_OUTPUT(q->type)) 814 if (V4L2_TYPE_IS_OUTPUT(q->type))
813 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 815 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 1441a73ce64c..90aad465f9ed 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only)
287 const struct v4l2_window *win; 287 const struct v4l2_window *win;
288 const struct v4l2_sdr_format *sdr; 288 const struct v4l2_sdr_format *sdr;
289 const struct v4l2_meta_format *meta; 289 const struct v4l2_meta_format *meta;
290 u32 planes;
290 unsigned i; 291 unsigned i;
291 292
292 pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); 293 pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only)
317 prt_names(mp->field, v4l2_field_names), 318 prt_names(mp->field, v4l2_field_names),
318 mp->colorspace, mp->num_planes, mp->flags, 319 mp->colorspace, mp->num_planes, mp->flags,
319 mp->ycbcr_enc, mp->quantization, mp->xfer_func); 320 mp->ycbcr_enc, mp->quantization, mp->xfer_func);
320 for (i = 0; i < mp->num_planes; i++) 321 planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
322 for (i = 0; i < planes; i++)
321 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, 323 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
322 mp->plane_fmt[i].bytesperline, 324 mp->plane_fmt[i].bytesperline,
323 mp->plane_fmt[i].sizeimage); 325 mp->plane_fmt[i].sizeimage);
@@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1551 if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) 1553 if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
1552 break; 1554 break;
1553 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1555 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1556 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1557 break;
1554 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1558 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1555 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1559 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1560 bytesperline);
1556 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); 1561 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
1557 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1562 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1558 if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) 1563 if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
@@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1581 if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) 1586 if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
1582 break; 1587 break;
1583 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1588 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1589 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1590 break;
1584 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1591 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1585 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1592 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1593 bytesperline);
1586 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); 1594 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
1587 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 1595 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1588 if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) 1596 if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
@@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1648 if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) 1656 if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
1649 break; 1657 break;
1650 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1658 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1659 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1660 break;
1651 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1661 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1652 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1662 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1663 bytesperline);
1653 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); 1664 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
1654 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1665 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1655 if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) 1666 if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
@@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1678 if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) 1689 if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
1679 break; 1690 break;
1680 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1691 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1692 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1693 break;
1681 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1694 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1682 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1695 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1696 bytesperline);
1683 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); 1697 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
1684 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 1698 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1685 if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) 1699 if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 8c5dfdce4326..f461460a2aeb 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -102,6 +102,7 @@ config MFD_AAT2870_CORE
102config MFD_AT91_USART 102config MFD_AT91_USART
103 tristate "AT91 USART Driver" 103 tristate "AT91 USART Driver"
104 select MFD_CORE 104 select MFD_CORE
105 depends on ARCH_AT91 || COMPILE_TEST
105 help 106 help
106 Select this to get support for AT91 USART IP. This is a wrapper 107 Select this to get support for AT91 USART IP. This is a wrapper
107 over at91-usart-serial driver and usart-spi-driver. Only one function 108 over at91-usart-serial driver and usart-spi-driver. Only one function
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 30d09d177171..11ab17f64c64 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
261 mutex_unlock(&ab8500->lock); 261 mutex_unlock(&ab8500->lock);
262 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); 262 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
263 263
264 return ret; 264 return (ret < 0) ? ret : 0;
265} 265}
266 266
267static int ab8500_get_register(struct device *dev, u8 bank, 267static int ab8500_get_register(struct device *dev, u8 bank,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index e1450a56fc07..3c97f2c0fdfe 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -641,9 +641,9 @@ static const struct mfd_cell axp221_cells[] = {
641 641
642static const struct mfd_cell axp223_cells[] = { 642static const struct mfd_cell axp223_cells[] = {
643 { 643 {
644 .name = "axp221-pek", 644 .name = "axp221-pek",
645 .num_resources = ARRAY_SIZE(axp22x_pek_resources), 645 .num_resources = ARRAY_SIZE(axp22x_pek_resources),
646 .resources = axp22x_pek_resources, 646 .resources = axp22x_pek_resources,
647 }, { 647 }, {
648 .name = "axp22x-adc", 648 .name = "axp22x-adc",
649 .of_compatible = "x-powers,axp221-adc", 649 .of_compatible = "x-powers,axp221-adc",
@@ -651,7 +651,7 @@ static const struct mfd_cell axp223_cells[] = {
651 .name = "axp20x-battery-power-supply", 651 .name = "axp20x-battery-power-supply",
652 .of_compatible = "x-powers,axp221-battery-power-supply", 652 .of_compatible = "x-powers,axp221-battery-power-supply",
653 }, { 653 }, {
654 .name = "axp20x-regulator", 654 .name = "axp20x-regulator",
655 }, { 655 }, {
656 .name = "axp20x-ac-power-supply", 656 .name = "axp20x-ac-power-supply",
657 .of_compatible = "x-powers,axp221-ac-power-supply", 657 .of_compatible = "x-powers,axp221-ac-power-supply",
@@ -667,9 +667,9 @@ static const struct mfd_cell axp223_cells[] = {
667 667
668static const struct mfd_cell axp152_cells[] = { 668static const struct mfd_cell axp152_cells[] = {
669 { 669 {
670 .name = "axp20x-pek", 670 .name = "axp20x-pek",
671 .num_resources = ARRAY_SIZE(axp152_pek_resources), 671 .num_resources = ARRAY_SIZE(axp152_pek_resources),
672 .resources = axp152_pek_resources, 672 .resources = axp152_pek_resources,
673 }, 673 },
674}; 674};
675 675
@@ -698,87 +698,101 @@ static const struct resource axp288_charger_resources[] = {
698 698
699static const struct mfd_cell axp288_cells[] = { 699static const struct mfd_cell axp288_cells[] = {
700 { 700 {
701 .name = "axp288_adc", 701 .name = "axp288_adc",
702 .num_resources = ARRAY_SIZE(axp288_adc_resources), 702 .num_resources = ARRAY_SIZE(axp288_adc_resources),
703 .resources = axp288_adc_resources, 703 .resources = axp288_adc_resources,
704 }, 704 }, {
705 { 705 .name = "axp288_extcon",
706 .name = "axp288_extcon", 706 .num_resources = ARRAY_SIZE(axp288_extcon_resources),
707 .num_resources = ARRAY_SIZE(axp288_extcon_resources), 707 .resources = axp288_extcon_resources,
708 .resources = axp288_extcon_resources, 708 }, {
709 }, 709 .name = "axp288_charger",
710 { 710 .num_resources = ARRAY_SIZE(axp288_charger_resources),
711 .name = "axp288_charger", 711 .resources = axp288_charger_resources,
712 .num_resources = ARRAY_SIZE(axp288_charger_resources), 712 }, {
713 .resources = axp288_charger_resources, 713 .name = "axp288_fuel_gauge",
714 }, 714 .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
715 { 715 .resources = axp288_fuel_gauge_resources,
716 .name = "axp288_fuel_gauge", 716 }, {
717 .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), 717 .name = "axp221-pek",
718 .resources = axp288_fuel_gauge_resources, 718 .num_resources = ARRAY_SIZE(axp288_power_button_resources),
719 }, 719 .resources = axp288_power_button_resources,
720 { 720 }, {
721 .name = "axp221-pek", 721 .name = "axp288_pmic_acpi",
722 .num_resources = ARRAY_SIZE(axp288_power_button_resources),
723 .resources = axp288_power_button_resources,
724 },
725 {
726 .name = "axp288_pmic_acpi",
727 }, 722 },
728}; 723};
729 724
730static const struct mfd_cell axp803_cells[] = { 725static const struct mfd_cell axp803_cells[] = {
731 { 726 {
732 .name = "axp221-pek", 727 .name = "axp221-pek",
733 .num_resources = ARRAY_SIZE(axp803_pek_resources), 728 .num_resources = ARRAY_SIZE(axp803_pek_resources),
734 .resources = axp803_pek_resources, 729 .resources = axp803_pek_resources,
730 }, {
731 .name = "axp20x-gpio",
732 .of_compatible = "x-powers,axp813-gpio",
733 }, {
734 .name = "axp813-adc",
735 .of_compatible = "x-powers,axp813-adc",
736 }, {
737 .name = "axp20x-battery-power-supply",
738 .of_compatible = "x-powers,axp813-battery-power-supply",
739 }, {
740 .name = "axp20x-ac-power-supply",
741 .of_compatible = "x-powers,axp813-ac-power-supply",
742 .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
743 .resources = axp20x_ac_power_supply_resources,
735 }, 744 },
736 { .name = "axp20x-regulator" }, 745 { .name = "axp20x-regulator" },
737}; 746};
738 747
739static const struct mfd_cell axp806_self_working_cells[] = { 748static const struct mfd_cell axp806_self_working_cells[] = {
740 { 749 {
741 .name = "axp221-pek", 750 .name = "axp221-pek",
742 .num_resources = ARRAY_SIZE(axp806_pek_resources), 751 .num_resources = ARRAY_SIZE(axp806_pek_resources),
743 .resources = axp806_pek_resources, 752 .resources = axp806_pek_resources,
744 }, 753 },
745 { .name = "axp20x-regulator" }, 754 { .name = "axp20x-regulator" },
746}; 755};
747 756
748static const struct mfd_cell axp806_cells[] = { 757static const struct mfd_cell axp806_cells[] = {
749 { 758 {
750 .id = 2, 759 .id = 2,
751 .name = "axp20x-regulator", 760 .name = "axp20x-regulator",
752 }, 761 },
753}; 762};
754 763
755static const struct mfd_cell axp809_cells[] = { 764static const struct mfd_cell axp809_cells[] = {
756 { 765 {
757 .name = "axp221-pek", 766 .name = "axp221-pek",
758 .num_resources = ARRAY_SIZE(axp809_pek_resources), 767 .num_resources = ARRAY_SIZE(axp809_pek_resources),
759 .resources = axp809_pek_resources, 768 .resources = axp809_pek_resources,
760 }, { 769 }, {
761 .id = 1, 770 .id = 1,
762 .name = "axp20x-regulator", 771 .name = "axp20x-regulator",
763 }, 772 },
764}; 773};
765 774
766static const struct mfd_cell axp813_cells[] = { 775static const struct mfd_cell axp813_cells[] = {
767 { 776 {
768 .name = "axp221-pek", 777 .name = "axp221-pek",
769 .num_resources = ARRAY_SIZE(axp803_pek_resources), 778 .num_resources = ARRAY_SIZE(axp803_pek_resources),
770 .resources = axp803_pek_resources, 779 .resources = axp803_pek_resources,
771 }, { 780 }, {
772 .name = "axp20x-regulator", 781 .name = "axp20x-regulator",
773 }, { 782 }, {
774 .name = "axp20x-gpio", 783 .name = "axp20x-gpio",
775 .of_compatible = "x-powers,axp813-gpio", 784 .of_compatible = "x-powers,axp813-gpio",
776 }, { 785 }, {
777 .name = "axp813-adc", 786 .name = "axp813-adc",
778 .of_compatible = "x-powers,axp813-adc", 787 .of_compatible = "x-powers,axp813-adc",
779 }, { 788 }, {
780 .name = "axp20x-battery-power-supply", 789 .name = "axp20x-battery-power-supply",
781 .of_compatible = "x-powers,axp813-battery-power-supply", 790 .of_compatible = "x-powers,axp813-battery-power-supply",
791 }, {
792 .name = "axp20x-ac-power-supply",
793 .of_compatible = "x-powers,axp813-ac-power-supply",
794 .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
795 .resources = axp20x_ac_power_supply_resources,
782 }, 796 },
783}; 797};
784 798
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 503979c81dae..fab3cdc27ed6 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
59}; 59};
60 60
61static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { 61static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
62 regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
62 regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), 63 regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
63 regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), 64 regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
64 regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ), 65 regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index b99a194ce5a4..2d0fee488c5a 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
499 499
500 cros_ec_debugfs_remove(ec); 500 cros_ec_debugfs_remove(ec);
501 501
502 mfd_remove_devices(ec->dev);
502 cdev_del(&ec->cdev); 503 cdev_del(&ec->cdev);
503 device_unregister(&ec->class_dev); 504 device_unregister(&ec->class_dev);
504 return 0; 505 return 0;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5970b8def548..aec20e1c7d3d 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
2584 .irq_unmask = prcmu_irq_unmask, 2584 .irq_unmask = prcmu_irq_unmask,
2585}; 2585};
2586 2586
2587static __init char *fw_project_name(u32 project) 2587static char *fw_project_name(u32 project)
2588{ 2588{
2589 switch (project) { 2589 switch (project) {
2590 case PRCMU_FW_PROJECT_U8500: 2590 case PRCMU_FW_PROJECT_U8500:
@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
2732 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); 2732 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
2733} 2733}
2734 2734
2735static void __init init_prcm_registers(void) 2735static void init_prcm_registers(void)
2736{ 2736{
2737 u32 val; 2737 u32 val;
2738 2738
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index ca829f85672f..2713de989f05 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -82,11 +82,13 @@ static void exynos_lpass_enable(struct exynos_lpass *lpass)
82 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); 82 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
83 83
84 regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK, 84 regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK,
85 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); 85 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S |
86 LPASS_INTR_UART);
86 87
87 exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET); 88 exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET);
88 exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET); 89 exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET);
89 exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET); 90 exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET);
91 exynos_lpass_core_sw_reset(lpass, LPASS_UART_SW_RESET);
90} 92}
91 93
92static void exynos_lpass_disable(struct exynos_lpass *lpass) 94static void exynos_lpass_disable(struct exynos_lpass *lpass)
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 440030cecbbd..2a77988d0462 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -15,6 +15,7 @@
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/mfd/core.h> 16#include <linux/mfd/core.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/mutex.h>
18#include <linux/notifier.h> 19#include <linux/notifier.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
@@ -155,7 +156,7 @@ static int madera_wait_for_boot(struct madera *madera)
155 usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2, 156 usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2,
156 MADERA_BOOT_POLL_INTERVAL_USEC); 157 MADERA_BOOT_POLL_INTERVAL_USEC);
157 regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val); 158 regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
158 }; 159 }
159 160
160 if (!(val & MADERA_BOOT_DONE_STS1)) { 161 if (!(val & MADERA_BOOT_DONE_STS1)) {
161 dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n"); 162 dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n");
@@ -357,6 +358,8 @@ int madera_dev_init(struct madera *madera)
357 358
358 dev_set_drvdata(madera->dev, madera); 359 dev_set_drvdata(madera->dev, madera);
359 BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier); 360 BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier);
361 mutex_init(&madera->dapm_ptr_lock);
362
360 madera_set_micbias_info(madera); 363 madera_set_micbias_info(madera);
361 364
362 /* 365 /*
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index d8217366ed36..d8ddd1a6f304 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -280,7 +280,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
280 280
281 for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) { 281 for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
282 sprintf(fps_name, "fps%d", fps_id); 282 sprintf(fps_name, "fps%d", fps_id);
283 if (!strcmp(fps_np->name, fps_name)) 283 if (of_node_name_eq(fps_np, fps_name))
284 break; 284 break;
285 } 285 }
286 286
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index f475e848252f..d0bf50e3568d 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
274 274
275 mc13xxx->adcflags |= MC13XXX_ADC_WORKING; 275 mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
276 276
277 mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); 277 ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
278 if (ret)
279 goto out;
278 280
279 adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | 281 adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
280 MC13XXX_ADC0_CHRGRAWDIV; 282 MC13XXX_ADC0_CHRGRAWDIV;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 77b64bd64df3..ab24e176ef44 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
329 329
330 default: 330 default:
331 dev_err(&pdev->dev, "unsupported chip: %d\n", id); 331 dev_err(&pdev->dev, "unsupported chip: %d\n", id);
332 ret = -ENODEV; 332 return -ENODEV;
333 break;
334 } 333 }
335 334
336 if (ret) { 335 if (ret) {
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 52fafea06067..8d420c37b2a6 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
638 return -EFAULT; 638 return -EFAULT;
639 } 639 }
640 640
641 writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
642 writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
643 writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
644
641 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], 645 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
642 fw_version[1], 646 fw_version[1],
643 fw_version[2]); 647 fw_version[2]);
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 2a8369657e38..26c7b63e008a 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -109,7 +109,7 @@ struct rave_sp_reply {
109/** 109/**
110 * struct rave_sp_checksum - Variant specific checksum implementation details 110 * struct rave_sp_checksum - Variant specific checksum implementation details
111 * 111 *
112 * @length: Caculated checksum length 112 * @length: Calculated checksum length
113 * @subroutine: Utilized checksum algorithm implementation 113 * @subroutine: Utilized checksum algorithm implementation
114 */ 114 */
115struct rave_sp_checksum { 115struct rave_sp_checksum {
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 566caca4efd8..7569a4be0608 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1302,17 +1302,17 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
1302 pdata->autosleep = (pdata->autosleep_timeout) ? true : false; 1302 pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
1303 1303
1304 for_each_child_of_node(np, child) { 1304 for_each_child_of_node(np, child) {
1305 if (!strcmp(child->name, "stmpe_gpio")) { 1305 if (of_node_name_eq(child, "stmpe_gpio")) {
1306 pdata->blocks |= STMPE_BLOCK_GPIO; 1306 pdata->blocks |= STMPE_BLOCK_GPIO;
1307 } else if (!strcmp(child->name, "stmpe_keypad")) { 1307 } else if (of_node_name_eq(child, "stmpe_keypad")) {
1308 pdata->blocks |= STMPE_BLOCK_KEYPAD; 1308 pdata->blocks |= STMPE_BLOCK_KEYPAD;
1309 } else if (!strcmp(child->name, "stmpe_touchscreen")) { 1309 } else if (of_node_name_eq(child, "stmpe_touchscreen")) {
1310 pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN; 1310 pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
1311 } else if (!strcmp(child->name, "stmpe_adc")) { 1311 } else if (of_node_name_eq(child, "stmpe_adc")) {
1312 pdata->blocks |= STMPE_BLOCK_ADC; 1312 pdata->blocks |= STMPE_BLOCK_ADC;
1313 } else if (!strcmp(child->name, "stmpe_pwm")) { 1313 } else if (of_node_name_eq(child, "stmpe_pwm")) {
1314 pdata->blocks |= STMPE_BLOCK_PWM; 1314 pdata->blocks |= STMPE_BLOCK_PWM;
1315 } else if (!strcmp(child->name, "stmpe_rotator")) { 1315 } else if (of_node_name_eq(child, "stmpe_rotator")) {
1316 pdata->blocks |= STMPE_BLOCK_ROTATOR; 1316 pdata->blocks |= STMPE_BLOCK_ROTATOR;
1317 } 1317 }
1318 } 1318 }
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c2d47d78705b..fd111296b959 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
264 cell->pdata_size = sizeof(tscadc); 264 cell->pdata_size = sizeof(tscadc);
265 } 265 }
266 266
267 err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells, 267 err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
268 tscadc->used_cells, NULL, 0, NULL); 268 tscadc->cells, tscadc->used_cells, NULL,
269 0, NULL);
269 if (err < 0) 270 if (err < 0)
270 goto err_disable_clk; 271 goto err_disable_clk;
271 272
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 910f569ff77c..8bcdecf494d0 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
235 235
236 mutex_init(&tps->tps_lock); 236 mutex_init(&tps->tps_lock);
237 237
238 ret = regmap_add_irq_chip(tps->regmap, tps->irq, 238 ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
239 IRQF_ONESHOT, 0, &tps65218_irq_chip, 239 IRQF_ONESHOT, 0, &tps65218_irq_chip,
240 &tps->irq_data); 240 &tps->irq_data);
241 if (ret < 0) 241 if (ret < 0)
242 return ret; 242 return ret;
243 243
@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
253 ARRAY_SIZE(tps65218_cells), NULL, 0, 253 ARRAY_SIZE(tps65218_cells), NULL, 0,
254 regmap_irq_get_domain(tps->irq_data)); 254 regmap_irq_get_domain(tps->irq_data));
255 255
256 if (ret < 0)
257 goto err_irq;
258
259 return 0;
260
261err_irq:
262 regmap_del_irq_chip(tps->irq, tps->irq_data);
263
264 return ret; 256 return ret;
265} 257}
266 258
267static int tps65218_remove(struct i2c_client *client)
268{
269 struct tps65218 *tps = i2c_get_clientdata(client);
270
271 regmap_del_irq_chip(tps->irq, tps->irq_data);
272
273 return 0;
274}
275
276static const struct i2c_device_id tps65218_id_table[] = { 259static const struct i2c_device_id tps65218_id_table[] = {
277 { "tps65218", TPS65218 }, 260 { "tps65218", TPS65218 },
278 { }, 261 { },
@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
285 .of_match_table = of_tps65218_match_table, 268 .of_match_table = of_tps65218_match_table,
286 }, 269 },
287 .probe = tps65218_probe, 270 .probe = tps65218_probe,
288 .remove = tps65218_remove,
289 .id_table = tps65218_id_table, 271 .id_table = tps65218_id_table,
290}; 272};
291 273
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b89379782741..9c7925ca13cf 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
592 return 0; 592 return 0;
593} 593}
594 594
595static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
596{
597 struct tps6586x *tps6586x = dev_get_drvdata(dev);
598
599 if (tps6586x->client->irq)
600 disable_irq(tps6586x->client->irq);
601
602 return 0;
603}
604
605static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
606{
607 struct tps6586x *tps6586x = dev_get_drvdata(dev);
608
609 if (tps6586x->client->irq)
610 enable_irq(tps6586x->client->irq);
611
612 return 0;
613}
614
615static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
616 tps6586x_i2c_resume);
617
595static const struct i2c_device_id tps6586x_id_table[] = { 618static const struct i2c_device_id tps6586x_id_table[] = {
596 { "tps6586x", 0 }, 619 { "tps6586x", 0 },
597 { }, 620 { },
@@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
602 .driver = { 625 .driver = {
603 .name = "tps6586x", 626 .name = "tps6586x",
604 .of_match_table = of_match_ptr(tps6586x_of_match), 627 .of_match_table = of_match_ptr(tps6586x_of_match),
628 .pm = &tps6586x_pm_ops,
605 }, 629 },
606 .probe = tps6586x_i2c_probe, 630 .probe = tps6586x_i2c_probe,
607 .remove = tps6586x_i2c_remove, 631 .remove = tps6586x_i2c_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 4be3d239da9e..299016bc46d9 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
979 * letting it generate the right frequencies for USB, MADC, and 979 * letting it generate the right frequencies for USB, MADC, and
980 * other purposes. 980 * other purposes.
981 */ 981 */
982static inline int __init protect_pm_master(void) 982static inline int protect_pm_master(void)
983{ 983{
984 int e = 0; 984 int e = 0;
985 985
@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
988 return e; 988 return e;
989} 989}
990 990
991static inline int __init unprotect_pm_master(void) 991static inline int unprotect_pm_master(void)
992{ 992{
993 int e = 0; 993 int e = 0;
994 994
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 1ee68bd440fb..16c6e2accfaa 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
1618 { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ 1618 { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
1619 { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ 1619 { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
1620 { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ 1620 { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
1621 { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
1621 { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ 1622 { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
1622 { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ 1623 { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
1623 { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ 1624 { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
2869 case ARIZONA_ASRC_ENABLE: 2870 case ARIZONA_ASRC_ENABLE:
2870 case ARIZONA_ASRC_STATUS: 2871 case ARIZONA_ASRC_STATUS:
2871 case ARIZONA_ASRC_RATE1: 2872 case ARIZONA_ASRC_RATE1:
2873 case ARIZONA_ASRC_RATE2:
2872 case ARIZONA_ISRC_1_CTRL_1: 2874 case ARIZONA_ISRC_1_CTRL_1:
2873 case ARIZONA_ISRC_1_CTRL_2: 2875 case ARIZONA_ISRC_1_CTRL_2:
2874 case ARIZONA_ISRC_1_CTRL_3: 2876 case ARIZONA_ISRC_1_CTRL_3:
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index efe2fb72d54b..25265fd0fd6e 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
218 if (get_order(size) >= MAX_ORDER) 218 if (get_order(size) >= MAX_ORDER)
219 return NULL; 219 return NULL;
220 220
221 return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, 221 return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
222 GFP_KERNEL); 222 GFP_KERNEL);
223} 223}
224 224
225void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, 225void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 6b212c8b78e7..2bfa3a903bf9 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -394,16 +394,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
394 struct _vop_vdev *vdev = to_vopvdev(dev); 394 struct _vop_vdev *vdev = to_vopvdev(dev);
395 struct vop_device *vpdev = vdev->vpdev; 395 struct vop_device *vpdev = vdev->vpdev;
396 struct mic_device_ctrl __iomem *dc = vdev->dc; 396 struct mic_device_ctrl __iomem *dc = vdev->dc;
397 int i, err, retry; 397 int i, err, retry, queue_idx = 0;
398 398
399 /* We must have this many virtqueues. */ 399 /* We must have this many virtqueues. */
400 if (nvqs > ioread8(&vdev->desc->num_vq)) 400 if (nvqs > ioread8(&vdev->desc->num_vq))
401 return -ENOENT; 401 return -ENOENT;
402 402
403 for (i = 0; i < nvqs; ++i) { 403 for (i = 0; i < nvqs; ++i) {
404 if (!names[i]) {
405 vqs[i] = NULL;
406 continue;
407 }
408
404 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", 409 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
405 __func__, i, names[i]); 410 __func__, i, names[i]);
406 vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], 411 vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
407 ctx ? ctx[i] : false); 412 ctx ? ctx[i] : false);
408 if (IS_ERR(vqs[i])) { 413 if (IS_ERR(vqs[i])) {
409 err = PTR_ERR(vqs[i]); 414 err = PTR_ERR(vqs[i]);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f57f5de54206..cf58ccaf22d5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -234,7 +234,7 @@ int mmc_of_parse(struct mmc_host *host)
234 if (device_property_read_bool(dev, "broken-cd")) 234 if (device_property_read_bool(dev, "broken-cd"))
235 host->caps |= MMC_CAP_NEEDS_POLL; 235 host->caps |= MMC_CAP_NEEDS_POLL;
236 236
237 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 237 ret = mmc_gpiod_request_cd(host, "cd", 0, false,
238 cd_debounce_delay_ms * 1000, 238 cd_debounce_delay_ms * 1000,
239 &cd_gpio_invert); 239 &cd_gpio_invert);
240 if (!ret) 240 if (!ret)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a22e11a65658..eba9bcc92ad3 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host)
3763 * Use zalloc to zero the reserved high 32-bits of 128-bit 3763 * Use zalloc to zero the reserved high 32-bits of 128-bit
3764 * descriptors so that they never need to be written. 3764 * descriptors so that they never need to be written.
3765 */ 3765 */
3766 buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 3766 buf = dma_alloc_coherent(mmc_dev(mmc),
3767 host->adma_table_sz, &dma, GFP_KERNEL); 3767 host->align_buffer_sz + host->adma_table_sz,
3768 &dma, GFP_KERNEL);
3768 if (!buf) { 3769 if (!buf) {
3769 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3770 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3770 mmc_hostname(mmc)); 3771 mmc_hostname(mmc));
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 21e3cdc04036..999b705769a8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -522,7 +522,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
522 mtd->nvmem = nvmem_register(&config); 522 mtd->nvmem = nvmem_register(&config);
523 if (IS_ERR(mtd->nvmem)) { 523 if (IS_ERR(mtd->nvmem)) {
524 /* Just ignore if there is no NVMEM support in the kernel */ 524 /* Just ignore if there is no NVMEM support in the kernel */
525 if (PTR_ERR(mtd->nvmem) == -ENOSYS) { 525 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
526 mtd->nvmem = NULL; 526 mtd->nvmem = NULL;
527 } else { 527 } else {
528 dev_err(&mtd->dev, "Failed to register NVMEM device\n"); 528 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 9887bda317cd..b31c868019ad 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -7,7 +7,7 @@
7extern struct mutex mtd_table_mutex; 7extern struct mutex mtd_table_mutex;
8 8
9struct mtd_info *__mtd_next_device(int i); 9struct mtd_info *__mtd_next_device(int i);
10int add_mtd_device(struct mtd_info *mtd); 10int __must_check add_mtd_device(struct mtd_info *mtd);
11int del_mtd_device(struct mtd_info *mtd); 11int del_mtd_device(struct mtd_info *mtd);
12int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); 12int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
13int del_mtd_partitions(struct mtd_info *); 13int del_mtd_partitions(struct mtd_info *);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index b6af41b04622..60104e1079c5 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -618,10 +618,22 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
618 list_add(&new->list, &mtd_partitions); 618 list_add(&new->list, &mtd_partitions);
619 mutex_unlock(&mtd_partitions_mutex); 619 mutex_unlock(&mtd_partitions_mutex);
620 620
621 add_mtd_device(&new->mtd); 621 ret = add_mtd_device(&new->mtd);
622 if (ret)
623 goto err_remove_part;
622 624
623 mtd_add_partition_attrs(new); 625 mtd_add_partition_attrs(new);
624 626
627 return 0;
628
629err_remove_part:
630 mutex_lock(&mtd_partitions_mutex);
631 list_del(&new->list);
632 mutex_unlock(&mtd_partitions_mutex);
633
634 free_partition(new);
635 pr_info("%s:%i\n", __func__, __LINE__);
636
625 return ret; 637 return ret;
626} 638}
627EXPORT_SYMBOL_GPL(mtd_add_partition); 639EXPORT_SYMBOL_GPL(mtd_add_partition);
@@ -712,22 +724,31 @@ int add_mtd_partitions(struct mtd_info *master,
712{ 724{
713 struct mtd_part *slave; 725 struct mtd_part *slave;
714 uint64_t cur_offset = 0; 726 uint64_t cur_offset = 0;
715 int i; 727 int i, ret;
716 728
717 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 729 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
718 730
719 for (i = 0; i < nbparts; i++) { 731 for (i = 0; i < nbparts; i++) {
720 slave = allocate_partition(master, parts + i, i, cur_offset); 732 slave = allocate_partition(master, parts + i, i, cur_offset);
721 if (IS_ERR(slave)) { 733 if (IS_ERR(slave)) {
722 del_mtd_partitions(master); 734 ret = PTR_ERR(slave);
723 return PTR_ERR(slave); 735 goto err_del_partitions;
724 } 736 }
725 737
726 mutex_lock(&mtd_partitions_mutex); 738 mutex_lock(&mtd_partitions_mutex);
727 list_add(&slave->list, &mtd_partitions); 739 list_add(&slave->list, &mtd_partitions);
728 mutex_unlock(&mtd_partitions_mutex); 740 mutex_unlock(&mtd_partitions_mutex);
729 741
730 add_mtd_device(&slave->mtd); 742 ret = add_mtd_device(&slave->mtd);
743 if (ret) {
744 mutex_lock(&mtd_partitions_mutex);
745 list_del(&slave->list);
746 mutex_unlock(&mtd_partitions_mutex);
747
748 free_partition(slave);
749 goto err_del_partitions;
750 }
751
731 mtd_add_partition_attrs(slave); 752 mtd_add_partition_attrs(slave);
732 /* Look for subpartitions */ 753 /* Look for subpartitions */
733 parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); 754 parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
@@ -736,6 +757,11 @@ int add_mtd_partitions(struct mtd_info *master,
736 } 757 }
737 758
738 return 0; 759 return 0;
760
761err_del_partitions:
762 del_mtd_partitions(master);
763
764 return ret;
739} 765}
740 766
741static DEFINE_SPINLOCK(part_parser_lock); 767static DEFINE_SPINLOCK(part_parser_lock);
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index eebac35304c6..6e8edc9375dd 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali)
1322 } 1322 }
1323 1323
1324 /* clk rate info is needed for setup_data_interface */ 1324 /* clk rate info is needed for setup_data_interface */
1325 if (denali->clk_rate && denali->clk_x_rate) 1325 if (!denali->clk_rate || !denali->clk_x_rate)
1326 chip->options |= NAND_KEEP_TIMINGS; 1326 chip->options |= NAND_KEEP_TIMINGS;
1327 1327
1328 chip->legacy.dummy_controller.ops = &denali_controller_ops; 1328 chip->legacy.dummy_controller.ops = &denali_controller_ops;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 325b4414dccc..c9149a37f8f0 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
593 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); 593 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
594} 594}
595 595
596/* fsmc_select_chip - assert or deassert nCE */
597static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert)
598{
599 u32 pc = readl(host->regs_va + FSMC_PC);
600
601 if (!assert)
602 writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
603 else
604 writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
605
606 /*
607 * nCE line changes must be applied before returning from this
608 * function.
609 */
610 mb();
611}
612
613/* 596/*
614 * fsmc_exec_op - hook called by the core to execute NAND operations 597 * fsmc_exec_op - hook called by the core to execute NAND operations
615 * 598 *
@@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
627 610
628 pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); 611 pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
629 612
630 fsmc_ce_ctrl(host, true);
631
632 for (op_id = 0; op_id < op->ninstrs; op_id++) { 613 for (op_id = 0; op_id < op->ninstrs; op_id++) {
633 instr = &op->instrs[op_id]; 614 instr = &op->instrs[op_id];
634 615
@@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
686 } 667 }
687 } 668 }
688 669
689 fsmc_ce_ctrl(host, false);
690
691 return ret; 670 return ret;
692} 671}
693 672
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index f92ae5aa2a54..9526d5b23c80 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat,
260} 260}
261 261
262static int jz_nand_ioremap_resource(struct platform_device *pdev, 262static int jz_nand_ioremap_resource(struct platform_device *pdev,
263 const char *name, struct resource **res, void *__iomem *base) 263 const char *name, struct resource **res, void __iomem **base)
264{ 264{
265 int ret; 265 int ret;
266 266
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 46c62a31fa46..920e7375084f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2833,6 +2833,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2833 if (ret) 2833 if (ret)
2834 return ret; 2834 return ret;
2835 2835
2836 if (nandc->props->is_bam) {
2837 free_bam_transaction(nandc);
2838 nandc->bam_txn = alloc_bam_transaction(nandc);
2839 if (!nandc->bam_txn) {
2840 dev_err(nandc->dev,
2841 "failed to allocate bam transaction\n");
2842 return -ENOMEM;
2843 }
2844 }
2845
2836 ret = mtd_device_register(mtd, NULL, 0); 2846 ret = mtd_device_register(mtd, NULL, 0);
2837 if (ret) 2847 if (ret)
2838 nand_cleanup(chip); 2848 nand_cleanup(chip);
@@ -2847,16 +2857,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2847 struct qcom_nand_host *host; 2857 struct qcom_nand_host *host;
2848 int ret; 2858 int ret;
2849 2859
2850 if (nandc->props->is_bam) {
2851 free_bam_transaction(nandc);
2852 nandc->bam_txn = alloc_bam_transaction(nandc);
2853 if (!nandc->bam_txn) {
2854 dev_err(nandc->dev,
2855 "failed to allocate bam transaction\n");
2856 return -ENOMEM;
2857 }
2858 }
2859
2860 for_each_available_child_of_node(dn, child) { 2860 for_each_available_child_of_node(dn, child) {
2861 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 2861 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2862 if (!host) { 2862 if (!host) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6371958dd170..edb1c023a753 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -519,7 +519,7 @@ config NET_FAILOVER
519 and destroy a failover master netdev and manages a primary and 519 and destroy a failover master netdev and manages a primary and
520 standby slave netdevs that get registered via the generic failover 520 standby slave netdevs that get registered via the generic failover
521 infrastructure. This can be used by paravirtual drivers to enable 521 infrastructure. This can be used by paravirtual drivers to enable
522 an alternate low latency datapath. It alsoenables live migration of 522 an alternate low latency datapath. It also enables live migration of
523 a VM with direct attached VF by failing over to the paravirtual 523 a VM with direct attached VF by failing over to the paravirtual
524 datapath when the VF is unplugged. 524 datapath when the VF is unplugged.
525 525
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a9d597f28023..485462d3087f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1963,6 +1963,9 @@ static int __bond_release_one(struct net_device *bond_dev,
1963 if (!bond_has_slaves(bond)) { 1963 if (!bond_has_slaves(bond)) {
1964 bond_set_carrier(bond); 1964 bond_set_carrier(bond);
1965 eth_hw_addr_random(bond_dev); 1965 eth_hw_addr_random(bond_dev);
1966 bond->nest_level = SINGLE_DEPTH_NESTING;
1967 } else {
1968 bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1966 } 1969 }
1967 1970
1968 unblock_netpoll_tx(); 1971 unblock_netpoll_tx();
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 3b12e2dcff31..8a5111f9414c 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -7,7 +7,6 @@
7 7
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/gpio.h>
11#include <linux/gpio/consumer.h> 10#include <linux/gpio/consumer.h>
12#include <linux/kernel.h> 11#include <linux/kernel.h>
13#include <linux/module.h> 12#include <linux/module.h>
@@ -15,7 +14,6 @@
15#include <linux/phy.h> 14#include <linux/phy.h>
16#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
17#include <linux/if_bridge.h> 16#include <linux/if_bridge.h>
18#include <linux/of_gpio.h>
19#include <linux/of_net.h> 17#include <linux/of_net.h>
20#include <net/dsa.h> 18#include <net/dsa.h>
21#include <net/switchdev.h> 19#include <net/switchdev.h>
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 74547f43b938..a8a2c728afba 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -18,7 +18,6 @@
18#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/of_gpio.h>
22#include <linux/of_mdio.h> 21#include <linux/of_mdio.h>
23#include <linux/of_net.h> 22#include <linux/of_net.h>
24#include <linux/of_platform.h> 23#include <linux/of_platform.h>
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8a517d8fb9d1..8dca2c949e73 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2403,6 +2403,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
2403 return mv88e6xxx_g1_stats_clear(chip); 2403 return mv88e6xxx_g1_stats_clear(chip);
2404} 2404}
2405 2405
2406/* The mv88e6390 has some hidden registers used for debug and
2407 * development. The errata also makes use of them.
2408 */
2409static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
2410 int reg, u16 val)
2411{
2412 u16 ctrl;
2413 int err;
2414
2415 err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
2416 PORT_RESERVED_1A, val);
2417 if (err)
2418 return err;
2419
2420 ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
2421 PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
2422 reg;
2423
2424 return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
2425 PORT_RESERVED_1A, ctrl);
2426}
2427
2428static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
2429{
2430 return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
2431 PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
2432}
2433
2434
2435static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
2436 int reg, u16 *val)
2437{
2438 u16 ctrl;
2439 int err;
2440
2441 ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
2442 PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
2443 reg;
2444
2445 err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
2446 PORT_RESERVED_1A, ctrl);
2447 if (err)
2448 return err;
2449
2450 err = mv88e6390_hidden_wait(chip);
2451 if (err)
2452 return err;
2453
2454 return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
2455 PORT_RESERVED_1A, val);
2456}
2457
2458/* Check if the errata has already been applied. */
2459static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
2460{
2461 int port;
2462 int err;
2463 u16 val;
2464
2465 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2466 err = mv88e6390_hidden_read(chip, port, 0, &val);
2467 if (err) {
2468 dev_err(chip->dev,
2469 "Error reading hidden register: %d\n", err);
2470 return false;
2471 }
2472 if (val != 0x01c0)
2473 return false;
2474 }
2475
2476 return true;
2477}
2478
2479/* The 6390 copper ports have an errata which require poking magic
2480 * values into undocumented hidden registers and then performing a
2481 * software reset.
2482 */
2483static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
2484{
2485 int port;
2486 int err;
2487
2488 if (mv88e6390_setup_errata_applied(chip))
2489 return 0;
2490
2491 /* Set the ports into blocking mode */
2492 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2493 err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
2494 if (err)
2495 return err;
2496 }
2497
2498 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2499 err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
2500 if (err)
2501 return err;
2502 }
2503
2504 return mv88e6xxx_software_reset(chip);
2505}
2506
2406static int mv88e6xxx_setup(struct dsa_switch *ds) 2507static int mv88e6xxx_setup(struct dsa_switch *ds)
2407{ 2508{
2408 struct mv88e6xxx_chip *chip = ds->priv; 2509 struct mv88e6xxx_chip *chip = ds->priv;
@@ -2415,6 +2516,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2415 2516
2416 mutex_lock(&chip->reg_lock); 2517 mutex_lock(&chip->reg_lock);
2417 2518
2519 if (chip->info->ops->setup_errata) {
2520 err = chip->info->ops->setup_errata(chip);
2521 if (err)
2522 goto unlock;
2523 }
2524
2418 /* Cache the cmode of each port. */ 2525 /* Cache the cmode of each port. */
2419 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { 2526 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
2420 if (chip->info->ops->port_get_cmode) { 2527 if (chip->info->ops->port_get_cmode) {
@@ -3226,6 +3333,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
3226 3333
3227static const struct mv88e6xxx_ops mv88e6190_ops = { 3334static const struct mv88e6xxx_ops mv88e6190_ops = {
3228 /* MV88E6XXX_FAMILY_6390 */ 3335 /* MV88E6XXX_FAMILY_6390 */
3336 .setup_errata = mv88e6390_setup_errata,
3229 .irl_init_all = mv88e6390_g2_irl_init_all, 3337 .irl_init_all = mv88e6390_g2_irl_init_all,
3230 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3338 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3231 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3339 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3269,6 +3377,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
3269 3377
3270static const struct mv88e6xxx_ops mv88e6190x_ops = { 3378static const struct mv88e6xxx_ops mv88e6190x_ops = {
3271 /* MV88E6XXX_FAMILY_6390 */ 3379 /* MV88E6XXX_FAMILY_6390 */
3380 .setup_errata = mv88e6390_setup_errata,
3272 .irl_init_all = mv88e6390_g2_irl_init_all, 3381 .irl_init_all = mv88e6390_g2_irl_init_all,
3273 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3382 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3274 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3383 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3312,6 +3421,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
3312 3421
3313static const struct mv88e6xxx_ops mv88e6191_ops = { 3422static const struct mv88e6xxx_ops mv88e6191_ops = {
3314 /* MV88E6XXX_FAMILY_6390 */ 3423 /* MV88E6XXX_FAMILY_6390 */
3424 .setup_errata = mv88e6390_setup_errata,
3315 .irl_init_all = mv88e6390_g2_irl_init_all, 3425 .irl_init_all = mv88e6390_g2_irl_init_all,
3316 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3426 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3317 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3427 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3404,6 +3514,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
3404 3514
3405static const struct mv88e6xxx_ops mv88e6290_ops = { 3515static const struct mv88e6xxx_ops mv88e6290_ops = {
3406 /* MV88E6XXX_FAMILY_6390 */ 3516 /* MV88E6XXX_FAMILY_6390 */
3517 .setup_errata = mv88e6390_setup_errata,
3407 .irl_init_all = mv88e6390_g2_irl_init_all, 3518 .irl_init_all = mv88e6390_g2_irl_init_all,
3408 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3519 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3409 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3520 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3709,6 +3820,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
3709 3820
3710static const struct mv88e6xxx_ops mv88e6390_ops = { 3821static const struct mv88e6xxx_ops mv88e6390_ops = {
3711 /* MV88E6XXX_FAMILY_6390 */ 3822 /* MV88E6XXX_FAMILY_6390 */
3823 .setup_errata = mv88e6390_setup_errata,
3712 .irl_init_all = mv88e6390_g2_irl_init_all, 3824 .irl_init_all = mv88e6390_g2_irl_init_all,
3713 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3825 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3714 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3826 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3756,6 +3868,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
3756 3868
3757static const struct mv88e6xxx_ops mv88e6390x_ops = { 3869static const struct mv88e6xxx_ops mv88e6390x_ops = {
3758 /* MV88E6XXX_FAMILY_6390 */ 3870 /* MV88E6XXX_FAMILY_6390 */
3871 .setup_errata = mv88e6390_setup_errata,
3759 .irl_init_all = mv88e6390_g2_irl_init_all, 3872 .irl_init_all = mv88e6390_g2_irl_init_all,
3760 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3873 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3761 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3874 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index f9ecb7872d32..546651d8c3e1 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus {
300}; 300};
301 301
302struct mv88e6xxx_ops { 302struct mv88e6xxx_ops {
303 /* Switch Setup Errata, called early in the switch setup to
304 * allow any errata actions to be performed
305 */
306 int (*setup_errata)(struct mv88e6xxx_chip *chip);
307
303 int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); 308 int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
304 int (*ip_pri_map)(struct mv88e6xxx_chip *chip); 309 int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
305 310
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 0d81866d0e4a..e583641de758 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -251,6 +251,16 @@
251/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ 251/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
252#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 252#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
253 253
254/* Offset 0x1a: Magic undocumented errata register */
255#define PORT_RESERVED_1A 0x1a
256#define PORT_RESERVED_1A_BUSY BIT(15)
257#define PORT_RESERVED_1A_WRITE BIT(14)
258#define PORT_RESERVED_1A_READ 0
259#define PORT_RESERVED_1A_PORT_SHIFT 5
260#define PORT_RESERVED_1A_BLOCK (0xf << 10)
261#define PORT_RESERVED_1A_CTRL_PORT 4
262#define PORT_RESERVED_1A_DATA_PORT 5
263
254int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, 264int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
255 u16 *val); 265 u16 *val);
256int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, 266int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
index b4b839a1d095..ad41ec63cc9f 100644
--- a/drivers/net/dsa/realtek-smi.c
+++ b/drivers/net/dsa/realtek-smi.c
@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
347 struct device_node *mdio_np; 347 struct device_node *mdio_np;
348 int ret; 348 int ret;
349 349
350 mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, 350 mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
351 "realtek,smi-mdio");
352 if (!mdio_np) { 351 if (!mdio_np) {
353 dev_err(smi->dev, "no MDIO bus node\n"); 352 dev_err(smi->dev, "no MDIO bus node\n");
354 return -ENODEV; 353 return -ENODEV;
355 } 354 }
356 355
357 smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); 356 smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
358 if (!smi->slave_mii_bus) 357 if (!smi->slave_mii_bus) {
359 return -ENOMEM; 358 ret = -ENOMEM;
359 goto err_put_node;
360 }
360 smi->slave_mii_bus->priv = smi; 361 smi->slave_mii_bus->priv = smi;
361 smi->slave_mii_bus->name = "SMI slave MII"; 362 smi->slave_mii_bus->name = "SMI slave MII";
362 smi->slave_mii_bus->read = realtek_smi_mdio_read; 363 smi->slave_mii_bus->read = realtek_smi_mdio_read;
@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
371 if (ret) { 372 if (ret) {
372 dev_err(smi->dev, "unable to register MDIO bus %s\n", 373 dev_err(smi->dev, "unable to register MDIO bus %s\n",
373 smi->slave_mii_bus->id); 374 smi->slave_mii_bus->id);
374 of_node_put(mdio_np); 375 goto err_put_node;
375 } 376 }
376 377
377 return 0; 378 return 0;
379
380err_put_node:
381 of_node_put(mdio_np);
382
383 return ret;
378} 384}
379 385
380static int realtek_smi_probe(struct platform_device *pdev) 386static int realtek_smi_probe(struct platform_device *pdev)
@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
457 struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); 463 struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
458 464
459 dsa_unregister_switch(smi->ds); 465 dsa_unregister_switch(smi->ds);
466 if (smi->slave_mii_bus)
467 of_node_put(smi->slave_mii_bus->dev.of_node);
460 gpiod_set_value(smi->reset, 1); 468 gpiod_set_value(smi->reset, 1);
461 469
462 return 0; 470 return 0;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 91fc64c1145e..47e5984f16fb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev)
1433 } 1433 }
1434 1434
1435 /* Allocate TX descriptor ring in coherent memory */ 1435 /* Allocate TX descriptor ring in coherent memory */
1436 greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1436 greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1437 &greth->tx_bd_base_phys, 1437 &greth->tx_bd_base_phys,
1438 GFP_KERNEL); 1438 GFP_KERNEL);
1439 if (!greth->tx_bd_base) { 1439 if (!greth->tx_bd_base) {
1440 err = -ENOMEM; 1440 err = -ENOMEM;
1441 goto error3; 1441 goto error3;
1442 } 1442 }
1443 1443
1444 /* Allocate RX descriptor ring in coherent memory */ 1444 /* Allocate RX descriptor ring in coherent memory */
1445 greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1445 greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1446 &greth->rx_bd_base_phys, 1446 &greth->rx_bd_base_phys,
1447 GFP_KERNEL); 1447 GFP_KERNEL);
1448 if (!greth->rx_bd_base) { 1448 if (!greth->rx_bd_base) {
1449 err = -ENOMEM; 1449 err = -ENOMEM;
1450 goto error4; 1450 goto error4;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 0b60921c392f..16477aa6d61f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev)
795 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; 795 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
796 796
797 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 797 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
798 descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr, 798 descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
799 GFP_KERNEL); 799 GFP_KERNEL);
800 if (!descs) { 800 if (!descs) {
801 netdev_err(sdev->netdev, 801 netdev_err(sdev->netdev,
802 "failed to allocate status descriptors\n"); 802 "failed to allocate status descriptors\n");
@@ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev)
1240 struct slic_shmem_data *sm_data; 1240 struct slic_shmem_data *sm_data;
1241 dma_addr_t paddr; 1241 dma_addr_t paddr;
1242 1242
1243 sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1243 sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
1244 &paddr, GFP_KERNEL); 1244 &paddr, GFP_KERNEL);
1245 if (!sm_data) { 1245 if (!sm_data) {
1246 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); 1246 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
1247 return -ENOMEM; 1247 return -ENOMEM;
@@ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev)
1621 int err = 0; 1621 int err = 0;
1622 u8 *mac[2]; 1622 u8 *mac[2];
1623 1623
1624 eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1624 eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
1625 &paddr, GFP_KERNEL); 1625 &paddr, GFP_KERNEL);
1626 if (!eeprom) 1626 if (!eeprom)
1627 return -ENOMEM; 1627 return -ENOMEM;
1628 1628
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 420cede41ca4..b17d435de09f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
111 struct ena_com_admin_sq *sq = &queue->sq; 111 struct ena_com_admin_sq *sq = &queue->sq;
112 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 112 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113 113
114 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 GFP_KERNEL); 115 GFP_KERNEL);
116 116
117 if (!sq->entries) { 117 if (!sq->entries) {
118 pr_err("memory allocation failed"); 118 pr_err("memory allocation failed");
@@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
133 struct ena_com_admin_cq *cq = &queue->cq; 133 struct ena_com_admin_cq *cq = &queue->cq;
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 134 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135 135
136 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 GFP_KERNEL); 137 GFP_KERNEL);
138 138
139 if (!cq->entries) { 139 if (!cq->entries) {
140 pr_err("memory allocation failed"); 140 pr_err("memory allocation failed");
@@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
156 156
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, 159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
160 GFP_KERNEL); 160 GFP_KERNEL);
161 161
162 if (!aenq->entries) { 162 if (!aenq->entries) {
163 pr_err("memory allocation failed"); 163 pr_err("memory allocation failed");
@@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
344 dev_node = dev_to_node(ena_dev->dmadev); 344 dev_node = dev_to_node(ena_dev->dmadev);
345 set_dev_node(ena_dev->dmadev, ctx->numa_node); 345 set_dev_node(ena_dev->dmadev, ctx->numa_node);
346 io_sq->desc_addr.virt_addr = 346 io_sq->desc_addr.virt_addr =
347 dma_zalloc_coherent(ena_dev->dmadev, size, 347 dma_alloc_coherent(ena_dev->dmadev, size,
348 &io_sq->desc_addr.phys_addr, 348 &io_sq->desc_addr.phys_addr,
349 GFP_KERNEL); 349 GFP_KERNEL);
350 set_dev_node(ena_dev->dmadev, dev_node); 350 set_dev_node(ena_dev->dmadev, dev_node);
351 if (!io_sq->desc_addr.virt_addr) { 351 if (!io_sq->desc_addr.virt_addr) {
352 io_sq->desc_addr.virt_addr = 352 io_sq->desc_addr.virt_addr =
353 dma_zalloc_coherent(ena_dev->dmadev, size, 353 dma_alloc_coherent(ena_dev->dmadev, size,
354 &io_sq->desc_addr.phys_addr, 354 &io_sq->desc_addr.phys_addr,
355 GFP_KERNEL); 355 GFP_KERNEL);
356 } 356 }
357 357
358 if (!io_sq->desc_addr.virt_addr) { 358 if (!io_sq->desc_addr.virt_addr) {
@@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
425 prev_node = dev_to_node(ena_dev->dmadev); 425 prev_node = dev_to_node(ena_dev->dmadev);
426 set_dev_node(ena_dev->dmadev, ctx->numa_node); 426 set_dev_node(ena_dev->dmadev, ctx->numa_node);
427 io_cq->cdesc_addr.virt_addr = 427 io_cq->cdesc_addr.virt_addr =
428 dma_zalloc_coherent(ena_dev->dmadev, size, 428 dma_alloc_coherent(ena_dev->dmadev, size,
429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
430 set_dev_node(ena_dev->dmadev, prev_node); 430 set_dev_node(ena_dev->dmadev, prev_node);
431 if (!io_cq->cdesc_addr.virt_addr) { 431 if (!io_cq->cdesc_addr.virt_addr) {
432 io_cq->cdesc_addr.virt_addr = 432 io_cq->cdesc_addr.virt_addr =
433 dma_zalloc_coherent(ena_dev->dmadev, size, 433 dma_alloc_coherent(ena_dev->dmadev, size,
434 &io_cq->cdesc_addr.phys_addr, 434 &io_cq->cdesc_addr.phys_addr,
435 GFP_KERNEL); 435 GFP_KERNEL);
436 } 436 }
437 437
438 if (!io_cq->cdesc_addr.virt_addr) { 438 if (!io_cq->cdesc_addr.virt_addr) {
@@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1026 struct ena_rss *rss = &ena_dev->rss; 1026 struct ena_rss *rss = &ena_dev->rss;
1027 1027
1028 rss->hash_key = 1028 rss->hash_key =
1029 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1029 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1030 &rss->hash_key_dma_addr, GFP_KERNEL); 1030 &rss->hash_key_dma_addr, GFP_KERNEL);
1031 1031
1032 if (unlikely(!rss->hash_key)) 1032 if (unlikely(!rss->hash_key))
1033 return -ENOMEM; 1033 return -ENOMEM;
@@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1050 struct ena_rss *rss = &ena_dev->rss; 1050 struct ena_rss *rss = &ena_dev->rss;
1051 1051
1052 rss->hash_ctrl = 1052 rss->hash_ctrl =
1053 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1053 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1054 &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1054 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1055 1055
1056 if (unlikely(!rss->hash_ctrl)) 1056 if (unlikely(!rss->hash_ctrl))
1057 return -ENOMEM; 1057 return -ENOMEM;
@@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1094 sizeof(struct ena_admin_rss_ind_table_entry); 1094 sizeof(struct ena_admin_rss_ind_table_entry);
1095 1095
1096 rss->rss_ind_tbl = 1096 rss->rss_ind_tbl =
1097 dma_zalloc_coherent(ena_dev->dmadev, tbl_size, 1097 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1098 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1098 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1099 if (unlikely(!rss->rss_ind_tbl)) 1099 if (unlikely(!rss->rss_ind_tbl))
1100 goto mem_err1; 1100 goto mem_err1;
1101 1101
@@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1649 1649
1650 spin_lock_init(&mmio_read->lock); 1650 spin_lock_init(&mmio_read->lock);
1651 mmio_read->read_resp = 1651 mmio_read->read_resp =
1652 dma_zalloc_coherent(ena_dev->dmadev, 1652 dma_alloc_coherent(ena_dev->dmadev,
1653 sizeof(*mmio_read->read_resp), 1653 sizeof(*mmio_read->read_resp),
1654 &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1654 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1655 if (unlikely(!mmio_read->read_resp)) 1655 if (unlikely(!mmio_read->read_resp))
1656 goto err; 1656 goto err;
1657 1657
@@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2624 2624
2625 host_attr->host_info = 2625 host_attr->host_info =
2626 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, 2626 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2627 &host_attr->host_info_dma_addr, GFP_KERNEL); 2627 &host_attr->host_info_dma_addr, GFP_KERNEL);
2628 if (unlikely(!host_attr->host_info)) 2628 if (unlikely(!host_attr->host_info))
2629 return -ENOMEM; 2629 return -ENOMEM;
2630 2630
@@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2642 2642
2643 host_attr->debug_area_virt_addr = 2643 host_attr->debug_area_virt_addr =
2644 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, 2644 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2645 &host_attr->debug_area_dma_addr, GFP_KERNEL); 2645 &host_attr->debug_area_dma_addr,
2646 GFP_KERNEL);
2646 if (unlikely(!host_attr->debug_area_virt_addr)) { 2647 if (unlikely(!host_attr->debug_area_virt_addr)) {
2647 host_attr->debug_area_size = 0; 2648 host_attr->debug_area_size = 0;
2648 return -ENOMEM; 2649 return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index d272dc6984ac..b40d4377cc71 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -431,8 +431,6 @@
431#define MAC_MDIOSCAR_PA_WIDTH 5 431#define MAC_MDIOSCAR_PA_WIDTH 5
432#define MAC_MDIOSCAR_RA_INDEX 0 432#define MAC_MDIOSCAR_RA_INDEX 0
433#define MAC_MDIOSCAR_RA_WIDTH 16 433#define MAC_MDIOSCAR_RA_WIDTH 16
434#define MAC_MDIOSCAR_REG_INDEX 0
435#define MAC_MDIOSCAR_REG_WIDTH 21
436#define MAC_MDIOSCCDR_BUSY_INDEX 22 434#define MAC_MDIOSCCDR_BUSY_INDEX 22
437#define MAC_MDIOSCCDR_BUSY_WIDTH 1 435#define MAC_MDIOSCCDR_BUSY_WIDTH 1
438#define MAC_MDIOSCCDR_CMD_INDEX 16 436#define MAC_MDIOSCCDR_CMD_INDEX 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1e929a1e4ca7..4666084eda16 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1284 } 1284 }
1285} 1285}
1286 1286
1287static unsigned int xgbe_create_mdio_sca(int port, int reg)
1288{
1289 unsigned int mdio_sca, da;
1290
1291 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1292
1293 mdio_sca = 0;
1294 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1297
1298 return mdio_sca;
1299}
1300
1287static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, 1301static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1288 int reg, u16 val) 1302 int reg, u16 val)
1289{ 1303{
@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1291 1305
1292 reinit_completion(&pdata->mdio_complete); 1306 reinit_completion(&pdata->mdio_complete);
1293 1307
1294 mdio_sca = 0; 1308 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1297 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1309 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1298 1310
1299 mdio_sccd = 0; 1311 mdio_sccd = 0;
@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1317 1329
1318 reinit_completion(&pdata->mdio_complete); 1330 reinit_completion(&pdata->mdio_complete);
1319 1331
1320 mdio_sca = 0; 1332 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1321 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1322 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1323 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1333 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1324 1334
1325 mdio_sccd = 0; 1335 mdio_sccd = 0;
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 0f2ad50f3bd7..87b142a312e0 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
206 } 206 }
207 207
208 /* Packet buffers should be 64B aligned */ 208 /* Packet buffers should be 64B aligned */
209 pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, 209 pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
210 GFP_ATOMIC); 210 GFP_ATOMIC);
211 if (unlikely(!pkt_buf)) { 211 if (unlikely(!pkt_buf)) {
212 dev_kfree_skb_any(skb); 212 dev_kfree_skb_any(skb);
213 return NETDEV_TX_OK; 213 return NETDEV_TX_OK;
@@ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
428 ring->ndev = ndev; 428 ring->ndev = ndev;
429 429
430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; 430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
431 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, 431 ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
432 GFP_KERNEL); 432 GFP_KERNEL);
433 if (!ring->desc_addr) 433 if (!ring->desc_addr)
434 goto err; 434 goto err;
435 435
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c131cfc1b79d..e3538ba7d0e7 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx)
660 alx->num_txq + 660 alx->num_txq +
661 sizeof(struct alx_rrd) * alx->rx_ringsz + 661 sizeof(struct alx_rrd) * alx->rx_ringsz +
662 sizeof(struct alx_rfd) * alx->rx_ringsz; 662 sizeof(struct alx_rfd) * alx->rx_ringsz;
663 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, 663 alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
664 alx->descmem.size, 664 alx->descmem.size,
665 &alx->descmem.dma, 665 &alx->descmem.dma, GFP_KERNEL);
666 GFP_KERNEL);
667 if (!alx->descmem.virt) 666 if (!alx->descmem.virt)
668 return -ENOMEM; 667 return -ENOMEM;
669 668
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 7087b88550db..3a3b35b5df67 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1019 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1019 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
1020 8 * 4; 1020 8 * 4;
1021 1021
1022 ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, 1022 ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
1023 &ring_header->dma, GFP_KERNEL); 1023 &ring_header->dma, GFP_KERNEL);
1024 if (unlikely(!ring_header->desc)) { 1024 if (unlikely(!ring_header->desc)) {
1025 dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); 1025 dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1026 goto err_nomem; 1026 goto err_nomem;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6bae973d4dce..09cd188826b1 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev)
936 936
937 /* allocate rx dma ring */ 937 /* allocate rx dma ring */
938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
939 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 939 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
940 if (!p) { 940 if (!p) {
941 ret = -ENOMEM; 941 ret = -ENOMEM;
942 goto out_freeirq_tx; 942 goto out_freeirq_tx;
@@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev)
947 947
948 /* allocate tx dma ring */ 948 /* allocate tx dma ring */
949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
950 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 950 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
951 if (!p) { 951 if (!p) {
952 ret = -ENOMEM; 952 ret = -ENOMEM;
953 goto out_free_rx_ring; 953 goto out_free_rx_ring;
@@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev)
2120 2120
2121 /* allocate rx dma ring */ 2121 /* allocate rx dma ring */
2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2123 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2123 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2124 if (!p) { 2124 if (!p) {
2125 dev_err(kdev, "cannot allocate rx ring %u\n", size); 2125 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2126 ret = -ENOMEM; 2126 ret = -ENOMEM;
@@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev)
2132 2132
2133 /* allocate tx dma ring */ 2133 /* allocate tx dma ring */
2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2135 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2135 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2136 if (!p) { 2136 if (!p) {
2137 dev_err(kdev, "cannot allocate tx ring\n"); 2137 dev_err(kdev, "cannot allocate tx ring\n");
2138 ret = -ENOMEM; 2138 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4574275ef445..f9521d0274b7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1506,8 +1506,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1506 /* We just need one DMA descriptor which is DMA-able, since writing to 1506 /* We just need one DMA descriptor which is DMA-able, since writing to
1507 * the port will allocate a new descriptor in its internal linked-list 1507 * the port will allocate a new descriptor in its internal linked-list
1508 */ 1508 */
1509 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1509 p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1510 GFP_KERNEL); 1510 GFP_KERNEL);
1511 if (!p) { 1511 if (!p) {
1512 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1512 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1513 return -ENOMEM; 1513 return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index cabc8e49ad24..2d3a44c40221 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
634 634
635 /* Alloc ring of descriptors */ 635 /* Alloc ring of descriptors */
636 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 636 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
637 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 637 ring->cpu_base = dma_alloc_coherent(dma_dev, size,
638 &ring->dma_base, 638 &ring->dma_base,
639 GFP_KERNEL); 639 GFP_KERNEL);
640 if (!ring->cpu_base) { 640 if (!ring->cpu_base) {
641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", 641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
642 ring->mmio_base); 642 ring->mmio_base);
@@ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
659 659
660 /* Alloc ring of descriptors */ 660 /* Alloc ring of descriptors */
661 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 661 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
662 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 662 ring->cpu_base = dma_alloc_coherent(dma_dev, size,
663 &ring->dma_base, 663 &ring->dma_base,
664 GFP_KERNEL); 664 GFP_KERNEL);
665 if (!ring->cpu_base) { 665 if (!ring->cpu_base) {
666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", 666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
667 ring->mmio_base); 667 ring->mmio_base);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index bbb247116045..d63371d70bce 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev)
844 BNX2_SBLK_MSIX_ALIGN_SIZE); 844 BNX2_SBLK_MSIX_ALIGN_SIZE);
845 bp->status_stats_size = status_blk_size + 845 bp->status_stats_size = status_blk_size +
846 sizeof(struct statistics_block); 846 sizeof(struct statistics_block);
847 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, 847 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
848 &bp->status_blk_mapping, GFP_KERNEL); 848 &bp->status_blk_mapping, GFP_KERNEL);
849 if (!status_blk) 849 if (!status_blk)
850 return -ENOMEM; 850 return -ENOMEM;
851 851
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 5cd3135dfe30..03d131f777bc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2081,7 +2081,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2081 bool is_pf); 2081 bool is_pf);
2082 2082
2083#define BNX2X_ILT_ZALLOC(x, y, size) \ 2083#define BNX2X_ILT_ZALLOC(x, y, size) \
2084 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) 2084 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
2085 2085
2086#define BNX2X_ILT_FREE(x, y, size) \ 2086#define BNX2X_ILT_FREE(x, y, size) \
2087 do { \ 2087 do { \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 142bc11b9fbb..2462e7aa0c5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -52,7 +52,7 @@ extern int bnx2x_num_queues;
52 52
53#define BNX2X_PCI_ALLOC(y, size) \ 53#define BNX2X_PCI_ALLOC(y, size) \
54({ \ 54({ \
55 void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 55 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
56 if (x) \ 56 if (x) \
57 DP(NETIF_MSG_HW, \ 57 DP(NETIF_MSG_HW, \
58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ 58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3aa80da973d7..6a512871176b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3449,10 +3449,10 @@ alloc_ext_stats:
3449 goto alloc_tx_ext_stats; 3449 goto alloc_tx_ext_stats;
3450 3450
3451 bp->hw_rx_port_stats_ext = 3451 bp->hw_rx_port_stats_ext =
3452 dma_zalloc_coherent(&pdev->dev, 3452 dma_alloc_coherent(&pdev->dev,
3453 sizeof(struct rx_port_stats_ext), 3453 sizeof(struct rx_port_stats_ext),
3454 &bp->hw_rx_port_stats_ext_map, 3454 &bp->hw_rx_port_stats_ext_map,
3455 GFP_KERNEL); 3455 GFP_KERNEL);
3456 if (!bp->hw_rx_port_stats_ext) 3456 if (!bp->hw_rx_port_stats_ext)
3457 return 0; 3457 return 0;
3458 3458
@@ -3462,10 +3462,10 @@ alloc_tx_ext_stats:
3462 3462
3463 if (bp->hwrm_spec_code >= 0x10902) { 3463 if (bp->hwrm_spec_code >= 0x10902) {
3464 bp->hw_tx_port_stats_ext = 3464 bp->hw_tx_port_stats_ext =
3465 dma_zalloc_coherent(&pdev->dev, 3465 dma_alloc_coherent(&pdev->dev,
3466 sizeof(struct tx_port_stats_ext), 3466 sizeof(struct tx_port_stats_ext),
3467 &bp->hw_tx_port_stats_ext_map, 3467 &bp->hw_tx_port_stats_ext_map,
3468 GFP_KERNEL); 3468 GFP_KERNEL);
3469 } 3469 }
3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3471 } 3471 }
@@ -5601,7 +5601,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5601 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 5601 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5602 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 5602 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
5603 if (bp->flags & BNXT_FLAG_CHIP_P5) 5603 if (bp->flags & BNXT_FLAG_CHIP_P5)
5604 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 5604 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5605 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
5605 else 5606 else
5606 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 5607 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5607 } 5608 }
@@ -6221,9 +6222,12 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6221 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6222 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6222 rmem->depth = 1; 6223 rmem->depth = 1;
6223 rmem->nr_pages = MAX_CTX_PAGES; 6224 rmem->nr_pages = MAX_CTX_PAGES;
6224 if (i == (nr_tbls - 1)) 6225 if (i == (nr_tbls - 1)) {
6225 rmem->nr_pages = ctx_pg->nr_pages % 6226 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6226 MAX_CTX_PAGES; 6227
6228 if (rem)
6229 rmem->nr_pages = rem;
6230 }
6227 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 6231 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6228 if (rc) 6232 if (rc)
6229 break; 6233 break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 15c7041e937b..70775158c8c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
316 316
317 n = IEEE_8021QAZ_MAX_TCS; 317 n = IEEE_8021QAZ_MAX_TCS;
318 data_len = sizeof(*data) + sizeof(*fw_app) * n; 318 data_len = sizeof(*data) + sizeof(*fw_app) * n;
319 data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, 319 data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
320 GFP_KERNEL); 320 GFP_KERNEL);
321 if (!data) 321 if (!data)
322 return -ENOMEM; 322 return -ENOMEM;
323 323
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 140dbd62106d..7f56032e44ac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
85 return -EFAULT; 85 return -EFAULT;
86 } 86 }
87 87
88 data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, 88 data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
89 &data_dma_addr, GFP_KERNEL); 89 &data_dma_addr, GFP_KERNEL);
90 if (!data_addr) 90 if (!data_addr)
91 return -ENOMEM; 91 return -ENOMEM;
92 92
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f1aaac8e6268..0a0995894ddb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -386,8 +386,8 @@ struct hwrm_err_output {
386#define HWRM_VERSION_MAJOR 1 386#define HWRM_VERSION_MAJOR 1
387#define HWRM_VERSION_MINOR 10 387#define HWRM_VERSION_MINOR 10
388#define HWRM_VERSION_UPDATE 0 388#define HWRM_VERSION_UPDATE 0
389#define HWRM_VERSION_RSVD 33 389#define HWRM_VERSION_RSVD 35
390#define HWRM_VERSION_STR "1.10.0.33" 390#define HWRM_VERSION_STR "1.10.0.35"
391 391
392/* hwrm_ver_get_input (size:192b/24B) */ 392/* hwrm_ver_get_input (size:192b/24B) */
393struct hwrm_ver_get_input { 393struct hwrm_ver_get_input {
@@ -1184,6 +1184,7 @@ struct hwrm_func_cfg_input {
1184 #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL 1184 #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
1185 #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL 1185 #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
1186 #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL 1186 #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
1187 #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
1187 __le32 enables; 1188 __le32 enables;
1188 #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL 1189 #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
1189 #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL 1190 #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3b1397af81f7..b1627dd5f2fd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
8712 if (!i && tg3_flag(tp, ENABLE_RSS)) 8712 if (!i && tg3_flag(tp, ENABLE_RSS))
8713 continue; 8713 continue;
8714 8714
8715 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, 8715 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8716 TG3_RX_RCB_RING_BYTES(tp), 8716 TG3_RX_RCB_RING_BYTES(tp),
8717 &tnapi->rx_rcb_mapping, 8717 &tnapi->rx_rcb_mapping,
8718 GFP_KERNEL); 8718 GFP_KERNEL);
8719 if (!tnapi->rx_rcb) 8719 if (!tnapi->rx_rcb)
8720 goto err_out; 8720 goto err_out;
8721 } 8721 }
@@ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8768{ 8768{
8769 int i; 8769 int i;
8770 8770
8771 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, 8771 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8772 sizeof(struct tg3_hw_stats), 8772 sizeof(struct tg3_hw_stats),
8773 &tp->stats_mapping, GFP_KERNEL); 8773 &tp->stats_mapping, GFP_KERNEL);
8774 if (!tp->hw_stats) 8774 if (!tp->hw_stats)
8775 goto err_out; 8775 goto err_out;
8776 8776
@@ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8778 struct tg3_napi *tnapi = &tp->napi[i]; 8778 struct tg3_napi *tnapi = &tp->napi[i];
8779 struct tg3_hw_status *sblk; 8779 struct tg3_hw_status *sblk;
8780 8780
8781 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, 8781 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8782 TG3_HW_STATUS_SIZE, 8782 TG3_HW_STATUS_SIZE,
8783 &tnapi->status_mapping, 8783 &tnapi->status_mapping,
8784 GFP_KERNEL); 8784 GFP_KERNEL);
8785 if (!tnapi->hw_status) 8785 if (!tnapi->hw_status)
8786 goto err_out; 8786 goto err_out;
8787 8787
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b126926ef7f5..66cc7927061a 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1738,12 +1738,8 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
1738 *skb = nskb; 1738 *skb = nskb;
1739 } 1739 }
1740 1740
1741 if (padlen) { 1741 if (padlen > ETH_FCS_LEN)
1742 if (padlen >= ETH_FCS_LEN) 1742 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1743 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1744 else
1745 skb_trim(*skb, ETH_FCS_LEN - padlen);
1746 }
1747 1743
1748add_fcs: 1744add_fcs:
1749 /* set FCS to packet */ 1745 /* set FCS to packet */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index fcaf18fa3904..5b4d3badcb73 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
59 dmem->q_len = q_len; 59 dmem->q_len = q_len;
60 dmem->size = (desc_size * q_len) + align_bytes; 60 dmem->size = (desc_size * q_len) + align_bytes;
61 /* Save address, need it while freeing */ 61 /* Save address, need it while freeing */
62 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 62 dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
63 &dmem->dma, GFP_KERNEL); 63 &dmem->dma, GFP_KERNEL);
64 if (!dmem->unalign_base) 64 if (!dmem->unalign_base)
65 return -ENOMEM; 65 return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 20b6e1b3f5e3..89db739b7819 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
620{ 620{
621 size_t len = nelem * elem_size; 621 size_t len = nelem * elem_size;
622 void *s = NULL; 622 void *s = NULL;
623 void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 623 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
624 624
625 if (!p) 625 if (!p)
626 return NULL; 626 return NULL;
@@ -2381,7 +2381,7 @@ no_mem:
2381 lro_add_page(adap, qs, fl, 2381 lro_add_page(adap, qs, fl,
2382 G_RSPD_LEN(len), 2382 G_RSPD_LEN(len),
2383 flags & F_RSPD_EOP); 2383 flags & F_RSPD_EOP);
2384 goto next_fl; 2384 goto next_fl;
2385 } 2385 }
2386 2386
2387 skb = get_packet_pg(adap, fl, q, 2387 skb = get_packet_pg(adap, fl, q,
@@ -3214,11 +3214,13 @@ void t3_start_sge_timers(struct adapter *adap)
3214 for (i = 0; i < SGE_QSETS; ++i) { 3214 for (i = 0; i < SGE_QSETS; ++i) {
3215 struct sge_qset *q = &adap->sge.qs[i]; 3215 struct sge_qset *q = &adap->sge.qs[i];
3216 3216
3217 if (q->tx_reclaim_timer.function) 3217 if (q->tx_reclaim_timer.function)
3218 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3218 mod_timer(&q->tx_reclaim_timer,
3219 jiffies + TX_RECLAIM_PERIOD);
3219 3220
3220 if (q->rx_reclaim_timer.function) 3221 if (q->rx_reclaim_timer.function)
3221 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); 3222 mod_timer(&q->rx_reclaim_timer,
3223 jiffies + RX_RECLAIM_PERIOD);
3222 } 3224 }
3223} 3225}
3224 3226
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 080918af773c..0a9f2c596624 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1082,7 +1082,7 @@ int t3_check_fw_version(struct adapter *adapter)
1082 CH_WARN(adapter, "found newer FW version(%u.%u), " 1082 CH_WARN(adapter, "found newer FW version(%u.%u), "
1083 "driver compiled for version %u.%u\n", major, minor, 1083 "driver compiled for version %u.%u\n", major, minor,
1084 FW_VERSION_MAJOR, FW_VERSION_MINOR); 1084 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1085 return 0; 1085 return 0;
1086 } 1086 }
1087 return -EINVAL; 1087 return -EINVAL;
1088} 1088}
@@ -3619,7 +3619,7 @@ int t3_reset_adapter(struct adapter *adapter)
3619 3619
3620static int init_parity(struct adapter *adap) 3620static int init_parity(struct adapter *adap)
3621{ 3621{
3622 int i, err, addr; 3622 int i, err, addr;
3623 3623
3624 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 3624 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3625 return -EBUSY; 3625 return -EBUSY;
@@ -3806,6 +3806,6 @@ int t3_replay_prep_adapter(struct adapter *adapter)
3806 p->phy.ops->power_down(&p->phy, 1); 3806 p->phy.ops->power_down(&p->phy, 1);
3807 } 3807 }
3808 3808
3809return 0; 3809 return 0;
3810} 3810}
3811 3811
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 9f9d6cae39d5..58a039c3224a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -378,10 +378,10 @@ static void cxgb4_init_ptp_timer(struct adapter *adapter)
378 int err; 378 int err;
379 379
380 memset(&c, 0, sizeof(c)); 380 memset(&c, 0, sizeof(c));
381 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | 381 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
382 FW_CMD_REQUEST_F | 382 FW_CMD_REQUEST_F |
383 FW_CMD_WRITE_F | 383 FW_CMD_WRITE_F |
384 FW_PTP_CMD_PORTID_V(0)); 384 FW_PTP_CMD_PORTID_V(0));
385 c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); 385 c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
386 c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; 386 c.u.scmd.sc = FW_PTP_SC_INIT_TIMER;
387 387
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 9a6065a3fa46..c041f44324db 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -78,7 +78,7 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
78 unsigned long flags; 78 unsigned long flags;
79 79
80 spin_lock_irqsave(&bmap->lock, flags); 80 spin_lock_irqsave(&bmap->lock, flags);
81 __clear_bit(msix_idx, bmap->msix_bmap); 81 __clear_bit(msix_idx, bmap->msix_bmap);
82 spin_unlock_irqrestore(&bmap->lock, flags); 82 spin_unlock_irqrestore(&bmap->lock, flags);
83} 83}
84 84
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b90188401d4a..fc0bc6458e84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
694{ 694{
695 size_t len = nelem * elem_size + stat_size; 695 size_t len = nelem * elem_size + stat_size;
696 void *s = NULL; 696 void *s = NULL;
697 void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); 697 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
698 698
699 if (!p) 699 if (!p)
700 return NULL; 700 return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8c34292a0ec..2b03f6187a24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3794,7 +3794,7 @@ int t4_load_phy_fw(struct adapter *adap,
3794 /* If we have version number support, then check to see if the adapter 3794 /* If we have version number support, then check to see if the adapter
3795 * already has up-to-date PHY firmware loaded. 3795 * already has up-to-date PHY firmware loaded.
3796 */ 3796 */
3797 if (phy_fw_version) { 3797 if (phy_fw_version) {
3798 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); 3798 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3799 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); 3799 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3800 if (ret < 0) 3800 if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 3007e1ac1e61..1d534f0baa69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
756 * Allocate the hardware ring and PCI DMA bus address space for said. 756 * Allocate the hardware ring and PCI DMA bus address space for said.
757 */ 757 */
758 size_t hwlen = nelem * hwsize + stat_size; 758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); 759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760 760
761 if (!hwring) 761 if (!hwring)
762 return NULL; 762 return NULL;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1e9d882c04ef..59a7f0b99069 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
1808 total_size = buf_len; 1808 total_size = buf_len;
1809 1809
1810 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1810 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1811 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1811 get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1812 get_fat_cmd.size, 1812 get_fat_cmd.size,
1813 &get_fat_cmd.dma, GFP_ATOMIC); 1813 &get_fat_cmd.dma, GFP_ATOMIC);
1814 if (!get_fat_cmd.va) 1814 if (!get_fat_cmd.va)
1815 return -ENOMEM; 1815 return -ENOMEM;
1816 1816
@@ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2302 return -EINVAL; 2302 return -EINVAL;
2303 2303
2304 cmd.size = sizeof(struct be_cmd_resp_port_type); 2304 cmd.size = sizeof(struct be_cmd_resp_port_type);
2305 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2305 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2306 GFP_ATOMIC); 2306 GFP_ATOMIC);
2307 if (!cmd.va) { 2307 if (!cmd.va) {
2308 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2308 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2309 return -ENOMEM; 2309 return -ENOMEM;
@@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter,
3066 3066
3067 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 3067 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3068 + LANCER_FW_DOWNLOAD_CHUNK; 3068 + LANCER_FW_DOWNLOAD_CHUNK;
3069 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, 3069 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3070 &flash_cmd.dma, GFP_KERNEL); 3070 GFP_KERNEL);
3071 if (!flash_cmd.va) 3071 if (!flash_cmd.va)
3072 return -ENOMEM; 3072 return -ENOMEM;
3073 3073
@@ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
3184 } 3184 }
3185 3185
3186 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 3186 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3187 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3187 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3188 GFP_KERNEL); 3188 GFP_KERNEL);
3189 if (!flash_cmd.va) 3189 if (!flash_cmd.va)
3190 return -ENOMEM; 3190 return -ENOMEM;
3191 3191
@@ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
3435 goto err; 3435 goto err;
3436 } 3436 }
3437 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 3437 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
3438 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3438 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3439 GFP_ATOMIC); 3439 GFP_ATOMIC);
3440 if (!cmd.va) { 3440 if (!cmd.va) {
3441 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3441 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3442 status = -ENOMEM; 3442 status = -ENOMEM;
@@ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
3522 3522
3523 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 3523 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
3524 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 3524 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
3525 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3525 attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3526 attribs_cmd.size, 3526 attribs_cmd.size,
3527 &attribs_cmd.dma, GFP_ATOMIC); 3527 &attribs_cmd.dma, GFP_ATOMIC);
3528 if (!attribs_cmd.va) { 3528 if (!attribs_cmd.va) {
3529 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3529 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3530 status = -ENOMEM; 3530 status = -ENOMEM;
@@ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3699 3699
3700 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 3700 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3701 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 3701 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3702 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3702 get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3703 get_mac_list_cmd.size, 3703 get_mac_list_cmd.size,
3704 &get_mac_list_cmd.dma, 3704 &get_mac_list_cmd.dma,
3705 GFP_ATOMIC); 3705 GFP_ATOMIC);
3706 3706
3707 if (!get_mac_list_cmd.va) { 3707 if (!get_mac_list_cmd.va) {
3708 dev_err(&adapter->pdev->dev, 3708 dev_err(&adapter->pdev->dev,
@@ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3829 3829
3830 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3830 memset(&cmd, 0, sizeof(struct be_dma_mem));
3831 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3831 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3832 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3832 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3833 GFP_KERNEL); 3833 GFP_KERNEL);
3834 if (!cmd.va) 3834 if (!cmd.va)
3835 return -ENOMEM; 3835 return -ENOMEM;
3836 3836
@@ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
4035 4035
4036 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4036 memset(&cmd, 0, sizeof(struct be_dma_mem));
4037 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 4037 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
4038 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4038 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4039 GFP_ATOMIC); 4039 GFP_ATOMIC);
4040 if (!cmd.va) { 4040 if (!cmd.va) {
4041 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 4041 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
4042 status = -ENOMEM; 4042 status = -ENOMEM;
@@ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
4089 4089
4090 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4090 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4091 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4091 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4092 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4092 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4093 extfat_cmd.size, &extfat_cmd.dma, 4093 extfat_cmd.size, &extfat_cmd.dma,
4094 GFP_ATOMIC); 4094 GFP_ATOMIC);
4095 if (!extfat_cmd.va) 4095 if (!extfat_cmd.va)
4096 return -ENOMEM; 4096 return -ENOMEM;
4097 4097
@@ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
4127 4127
4128 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4128 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4129 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4129 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4130 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4130 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4131 extfat_cmd.size, &extfat_cmd.dma, 4131 extfat_cmd.size, &extfat_cmd.dma,
4132 GFP_ATOMIC); 4132 GFP_ATOMIC);
4133 4133
4134 if (!extfat_cmd.va) { 4134 if (!extfat_cmd.va) {
4135 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 4135 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
4354 4354
4355 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4355 memset(&cmd, 0, sizeof(struct be_dma_mem));
4356 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 4356 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
4357 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4357 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4358 GFP_ATOMIC); 4358 GFP_ATOMIC);
4359 if (!cmd.va) { 4359 if (!cmd.va) {
4360 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 4360 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
4361 status = -ENOMEM; 4361 status = -ENOMEM;
@@ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
4452 4452
4453 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4453 memset(&cmd, 0, sizeof(struct be_dma_mem));
4454 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 4454 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
4455 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4455 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4456 GFP_ATOMIC); 4456 GFP_ATOMIC);
4457 if (!cmd.va) 4457 if (!cmd.va)
4458 return -ENOMEM; 4458 return -ENOMEM;
4459 4459
@@ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4539 4539
4540 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4540 memset(&cmd, 0, sizeof(struct be_dma_mem));
4541 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 4541 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
4542 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4542 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4543 GFP_ATOMIC); 4543 GFP_ATOMIC);
4544 if (!cmd.va) 4544 if (!cmd.va)
4545 return -ENOMEM; 4545 return -ENOMEM;
4546 4546
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 3f6749fc889f..4c218341c51b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
274 int status = 0; 274 int status = 0;
275 275
276 read_cmd.size = LANCER_READ_FILE_CHUNK; 276 read_cmd.size = LANCER_READ_FILE_CHUNK;
277 read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, 277 read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
278 &read_cmd.dma, GFP_ATOMIC); 278 &read_cmd.dma, GFP_ATOMIC);
279 279
280 if (!read_cmd.va) { 280 if (!read_cmd.va) {
281 dev_err(&adapter->pdev->dev, 281 dev_err(&adapter->pdev->dev,
@@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
815 } 815 }
816 816
817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
818 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); 818 cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
819 if (!cmd.va) 819 if (!cmd.va)
820 return -ENOMEM; 820 return -ENOMEM;
821 821
@@ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
851 }; 851 };
852 852
853 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 853 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
854 ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 854 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
855 ddrdma_cmd.size, &ddrdma_cmd.dma, 855 ddrdma_cmd.size, &ddrdma_cmd.dma,
856 GFP_KERNEL); 856 GFP_KERNEL);
857 if (!ddrdma_cmd.va) 857 if (!ddrdma_cmd.va)
858 return -ENOMEM; 858 return -ENOMEM;
859 859
@@ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev,
1014 1014
1015 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 1015 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
1016 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 1016 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
1017 eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1017 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1018 eeprom_cmd.size, &eeprom_cmd.dma, 1018 eeprom_cmd.size, &eeprom_cmd.dma,
1019 GFP_KERNEL); 1019 GFP_KERNEL);
1020 1020
1021 if (!eeprom_cmd.va) 1021 if (!eeprom_cmd.va)
1022 return -ENOMEM; 1022 return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 852f5bfe5f6d..d5026909dec5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
167 q->len = len; 167 q->len = len;
168 q->entry_size = entry_size; 168 q->entry_size = entry_size;
169 mem->size = len * entry_size; 169 mem->size = len * entry_size;
170 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 170 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
171 GFP_KERNEL); 171 &mem->dma, GFP_KERNEL);
172 if (!mem->va) 172 if (!mem->va)
173 return -ENOMEM; 173 return -ENOMEM;
174 return 0; 174 return 0;
@@ -5766,9 +5766,9 @@ static int be_drv_init(struct be_adapter *adapter)
5766 int status = 0; 5766 int status = 0;
5767 5767
5768 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 5768 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5769 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, 5769 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5770 &mbox_mem_alloc->dma, 5770 &mbox_mem_alloc->dma,
5771 GFP_KERNEL); 5771 GFP_KERNEL);
5772 if (!mbox_mem_alloc->va) 5772 if (!mbox_mem_alloc->va)
5773 return -ENOMEM; 5773 return -ENOMEM;
5774 5774
@@ -5777,8 +5777,8 @@ static int be_drv_init(struct be_adapter *adapter)
5777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 5777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5778 5778
5779 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 5779 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5780 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, 5780 rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5781 &rx_filter->dma, GFP_KERNEL); 5781 &rx_filter->dma, GFP_KERNEL);
5782 if (!rx_filter->va) { 5782 if (!rx_filter->va) {
5783 status = -ENOMEM; 5783 status = -ENOMEM;
5784 goto free_mbox; 5784 goto free_mbox;
@@ -5792,8 +5792,8 @@ static int be_drv_init(struct be_adapter *adapter)
5792 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 5792 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5793 else 5793 else
5794 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); 5794 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5795 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, 5795 stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5796 &stats_cmd->dma, GFP_KERNEL); 5796 &stats_cmd->dma, GFP_KERNEL);
5797 if (!stats_cmd->va) { 5797 if (!stats_cmd->va) {
5798 status = -ENOMEM; 5798 status = -ENOMEM;
5799 goto free_rx_filter; 5799 goto free_rx_filter;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4d673225ed3e..3e5e97186fc4 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
935 return -ENOMEM; 935 return -ENOMEM;
936 936
937 /* Allocate descriptors */ 937 /* Allocate descriptors */
938 priv->rxdes = dma_zalloc_coherent(priv->dev, 938 priv->rxdes = dma_alloc_coherent(priv->dev,
939 MAX_RX_QUEUE_ENTRIES * 939 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
940 sizeof(struct ftgmac100_rxdes), 940 &priv->rxdes_dma, GFP_KERNEL);
941 &priv->rxdes_dma, GFP_KERNEL);
942 if (!priv->rxdes) 941 if (!priv->rxdes)
943 return -ENOMEM; 942 return -ENOMEM;
944 priv->txdes = dma_zalloc_coherent(priv->dev, 943 priv->txdes = dma_alloc_coherent(priv->dev,
945 MAX_TX_QUEUE_ENTRIES * 944 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
946 sizeof(struct ftgmac100_txdes), 945 &priv->txdes_dma, GFP_KERNEL);
947 &priv->txdes_dma, GFP_KERNEL);
948 if (!priv->txdes) 946 if (!priv->txdes)
949 return -ENOMEM; 947 return -ENOMEM;
950 948
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 084f24daf2b5..2a0e820526dc 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
734{ 734{
735 int i; 735 int i;
736 736
737 priv->descs = dma_zalloc_coherent(priv->dev, 737 priv->descs = dma_alloc_coherent(priv->dev,
738 sizeof(struct ftmac100_descs), 738 sizeof(struct ftmac100_descs),
739 &priv->descs_dma_addr, 739 &priv->descs_dma_addr, GFP_KERNEL);
740 GFP_KERNEL);
741 if (!priv->descs) 740 if (!priv->descs)
742 return -ENOMEM; 741 return -ENOMEM;
743 742
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f53090cde041..dfebc30c4841 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2051 bool nonlinear = skb_is_nonlinear(skb); 2051 bool nonlinear = skb_is_nonlinear(skb);
2052 struct rtnl_link_stats64 *percpu_stats; 2052 struct rtnl_link_stats64 *percpu_stats;
2053 struct dpaa_percpu_priv *percpu_priv; 2053 struct dpaa_percpu_priv *percpu_priv;
2054 struct netdev_queue *txq;
2054 struct dpaa_priv *priv; 2055 struct dpaa_priv *priv;
2055 struct qm_fd fd; 2056 struct qm_fd fd;
2056 int offset = 0; 2057 int offset = 0;
@@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2100 if (unlikely(err < 0)) 2101 if (unlikely(err < 0))
2101 goto skb_to_fd_failed; 2102 goto skb_to_fd_failed;
2102 2103
2104 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2105
2106 /* LLTX requires to do our own update of trans_start */
2107 txq->trans_start = jiffies;
2108
2103 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 2109 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2104 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); 2110 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2105 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2111 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 471805ea363b..e5d853b7b454 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
1006 1006
1007 for (i = 0; i < QUEUE_NUMS; i++) { 1007 for (i = 0; i < QUEUE_NUMS; i++) {
1008 size = priv->pool[i].count * sizeof(struct hix5hd2_desc); 1008 size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
1009 virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, 1009 virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
1010 GFP_KERNEL); 1010 GFP_KERNEL);
1011 if (virt_addr == NULL) 1011 if (virt_addr == NULL)
1012 goto error_free_pool; 1012 goto error_free_pool;
1013 1013
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ad1779fc410e..a78bfafd212c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
147 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); 147 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
148 int i; 148 int i;
149 149
150 vf_cb->mac_cb = NULL;
151
152 kfree(vf_cb);
153
154 for (i = 0; i < handle->q_num; i++) 150 for (i = 0; i < handle->q_num; i++)
155 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; 151 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
152
153 kfree(vf_cb);
156} 154}
157 155
158static int hns_ae_wait_flow_down(struct hnae_handle *handle) 156static int hns_ae_wait_flow_down(struct hnae_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 5748d3f722f6..5b33238c6680 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1170,6 +1170,13 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1170 if (!h->phy_dev) 1170 if (!h->phy_dev)
1171 return 0; 1171 return 0;
1172 1172
1173 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1174 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1175 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1176
1177 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1178 phy_dev->autoneg = false;
1179
1173 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { 1180 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1174 phy_dev->dev_flags = 0; 1181 phy_dev->dev_flags = 0;
1175 1182
@@ -1181,16 +1188,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1181 if (unlikely(ret)) 1188 if (unlikely(ret))
1182 return -ENODEV; 1189 return -ENODEV;
1183 1190
1184 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1185 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1186 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1187
1188 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1189 phy_dev->autoneg = false;
1190
1191 if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
1192 phy_stop(phy_dev);
1193
1194 return 0; 1191 return 0;
1195} 1192}
1196 1193
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 07cd58798083..1bf7a5f116a0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2041{ 2041{
2042 int size = ring->desc_num * sizeof(ring->desc[0]); 2042 int size = ring->desc_num * sizeof(ring->desc[0]);
2043 2043
2044 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, 2044 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2045 &ring->desc_dma_addr, 2045 &ring->desc_dma_addr, GFP_KERNEL);
2046 GFP_KERNEL);
2047 if (!ring->desc) 2046 if (!ring->desc)
2048 return -ENOMEM; 2047 return -ENOMEM;
2049 2048
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 8af0cef5609b..e483a6e730e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
39{ 39{
40 int size = ring->desc_num * sizeof(struct hclge_desc); 40 int size = ring->desc_num * sizeof(struct hclge_desc);
41 41
42 ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 42 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
43 size, &ring->desc_dma_addr, 43 &ring->desc_dma_addr, GFP_KERNEL);
44 GFP_KERNEL);
45 if (!ring->desc) 44 if (!ring->desc)
46 return -ENOMEM; 45 return -ENOMEM;
47 46
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d5765c8cf3a3..4e78e8812a04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
115{ 115{
116 int size = ring->desc_num * sizeof(struct hclgevf_desc); 116 int size = ring->desc_num * sizeof(struct hclgevf_desc);
117 117
118 ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 118 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
119 size, &ring->desc_dma_addr, 119 &ring->desc_dma_addr, GFP_KERNEL);
120 GFP_KERNEL);
121 if (!ring->desc) 120 if (!ring->desc)
122 return -ENOMEM; 121 return -ENOMEM;
123 122
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index c40603a183df..b4fefb4c3064 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
613 u8 *cmd_vaddr; 613 u8 *cmd_vaddr;
614 int err = 0; 614 int err = 0;
615 615
616 cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, 616 cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
617 &cmd_paddr, GFP_KERNEL); 617 &cmd_paddr, GFP_KERNEL);
618 if (!cmd_vaddr) { 618 if (!cmd_vaddr) {
619 dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); 619 dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
620 return -ENOMEM; 620 return -ENOMEM;
@@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
663 dma_addr_t node_paddr; 663 dma_addr_t node_paddr;
664 int err; 664 int err;
665 665
666 node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, 666 node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
667 &node_paddr, GFP_KERNEL); 667 GFP_KERNEL);
668 if (!node) { 668 if (!node) {
669 dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); 669 dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
670 return -ENOMEM; 670 return -ENOMEM;
@@ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
821 if (!chain->cell_ctxt) 821 if (!chain->cell_ctxt)
822 return -ENOMEM; 822 return -ENOMEM;
823 823
824 chain->wb_status = dma_zalloc_coherent(&pdev->dev, 824 chain->wb_status = dma_alloc_coherent(&pdev->dev,
825 sizeof(*chain->wb_status), 825 sizeof(*chain->wb_status),
826 &chain->wb_status_paddr, 826 &chain->wb_status_paddr,
827 GFP_KERNEL); 827 GFP_KERNEL);
828 if (!chain->wb_status) { 828 if (!chain->wb_status) {
829 dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); 829 dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
830 return -ENOMEM; 830 return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 7cb8b9b94726..683e67515016 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq)
593 } 593 }
594 594
595 for (pg = 0; pg < eq->num_pages; pg++) { 595 for (pg = 0; pg < eq->num_pages; pg++) {
596 eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, 596 eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
597 eq->page_size, 597 eq->page_size,
598 &eq->dma_addr[pg], 598 &eq->dma_addr[pg],
599 GFP_KERNEL); 599 GFP_KERNEL);
600 if (!eq->virt_addr[pg]) { 600 if (!eq->virt_addr[pg]) {
601 err = -ENOMEM; 601 err = -ENOMEM;
602 goto err_dma_alloc; 602 goto err_dma_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 8e5897669a3a..a322a22d9357 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
355 goto err_sq_db; 355 goto err_sq_db;
356 } 356 }
357 357
358 ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 358 ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
359 &func_to_io->ci_dma_base, 359 &func_to_io->ci_dma_base,
360 GFP_KERNEL); 360 GFP_KERNEL);
361 if (!ci_addr_base) { 361 if (!ci_addr_base) {
362 dev_err(&pdev->dev, "Failed to allocate CI area\n"); 362 dev_err(&pdev->dev, "Failed to allocate CI area\n");
363 err = -ENOMEM; 363 err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index bbf9bdd0ee3e..d62cf509646a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq)
336 goto err_cqe_dma_arr_alloc; 336 goto err_cqe_dma_arr_alloc;
337 337
338 for (i = 0; i < wq->q_depth; i++) { 338 for (i = 0; i < wq->q_depth; i++) {
339 rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, 339 rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
340 sizeof(*rq->cqe[i]), 340 sizeof(*rq->cqe[i]),
341 &rq->cqe_dma[i], GFP_KERNEL); 341 &rq->cqe_dma[i], GFP_KERNEL);
342 if (!rq->cqe[i]) 342 if (!rq->cqe[i])
343 goto err_cqe_alloc; 343 goto err_cqe_alloc;
344 } 344 }
@@ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
415 415
416 /* HW requirements: Must be at least 32 bit */ 416 /* HW requirements: Must be at least 32 bit */
417 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); 417 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
418 rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, 418 rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
419 &rq->pi_dma_addr, GFP_KERNEL); 419 &rq->pi_dma_addr, GFP_KERNEL);
420 if (!rq->pi_virt_addr) { 420 if (!rq->pi_virt_addr) {
421 dev_err(&pdev->dev, "Failed to allocate PI address\n"); 421 dev_err(&pdev->dev, "Failed to allocate PI address\n");
422 err = -ENOMEM; 422 err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 1dfa7eb05c10..cb66e7024659 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
114 struct pci_dev *pdev = hwif->pdev; 114 struct pci_dev *pdev = hwif->pdev;
115 dma_addr_t dma_addr; 115 dma_addr_t dma_addr;
116 116
117 *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, 117 *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
118 GFP_KERNEL); 118 GFP_KERNEL);
119 if (!*vaddr) { 119 if (!*vaddr) {
120 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); 120 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
121 return -ENOMEM; 121 return -ENOMEM;
@@ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
482 u64 *paddr = &wq->block_vaddr[i]; 482 u64 *paddr = &wq->block_vaddr[i];
483 dma_addr_t dma_addr; 483 dma_addr_t dma_addr;
484 484
485 *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, 485 *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
486 &dma_addr, GFP_KERNEL); 486 &dma_addr, GFP_KERNEL);
487 if (!*vaddr) { 487 if (!*vaddr) {
488 dev_err(&pdev->dev, "Failed to allocate wq page\n"); 488 dev_err(&pdev->dev, "Failed to allocate wq page\n");
489 goto err_alloc_wq_pages; 489 goto err_alloc_wq_pages;
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fff09dcf9e34..787d5aca5278 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev)
636 bd_size = sizeof(struct mal_descriptor) * 636 bd_size = sizeof(struct mal_descriptor) *
637 (NUM_TX_BUFF * mal->num_tx_chans + 637 (NUM_TX_BUFF * mal->num_tx_chans +
638 NUM_RX_BUFF * mal->num_rx_chans); 638 NUM_RX_BUFF * mal->num_rx_chans);
639 mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 639 mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
640 GFP_KERNEL); 640 GFP_KERNEL);
641 if (mal->bd_virt == NULL) { 641 if (mal->bd_virt == NULL) {
642 err = -ENOMEM; 642 err = -ENOMEM;
643 goto fail_unmap; 643 goto fail_unmap;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 31fb76ee9d82..a1246e89aad4 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -159,7 +159,7 @@ config IXGBE
159 tristate "Intel(R) 10GbE PCI Express adapters support" 159 tristate "Intel(R) 10GbE PCI Express adapters support"
160 depends on PCI 160 depends on PCI
161 select MDIO 161 select MDIO
162 select MDIO_DEVICE 162 select PHYLIB
163 imply PTP_1588_CLOCK 163 imply PTP_1588_CLOCK
164 ---help--- 164 ---help---
165 This driver supports Intel(R) 10GbE PCI Express family of 165 This driver supports Intel(R) 10GbE PCI Express family of
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2569a168334c..a41008523c98 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
993 993
994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
995 txdr->size = ALIGN(txdr->size, 4096); 995 txdr->size = ALIGN(txdr->size, 4096);
996 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 996 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
997 GFP_KERNEL); 997 GFP_KERNEL);
998 if (!txdr->desc) { 998 if (!txdr->desc) {
999 ret_val = 2; 999 ret_val = 2;
1000 goto err_nomem; 1000 goto err_nomem;
@@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1051 } 1051 }
1052 1052
1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1054 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1054 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1055 GFP_KERNEL); 1055 GFP_KERNEL);
1056 if (!rxdr->desc) { 1056 if (!rxdr->desc) {
1057 ret_val = 6; 1057 ret_val = 6;
1058 goto err_nomem; 1058 goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 308c006cb41d..189f231075c2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2305{ 2305{
2306 struct pci_dev *pdev = adapter->pdev; 2306 struct pci_dev *pdev = adapter->pdev;
2307 2307
2308 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, 2308 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2309 GFP_KERNEL); 2309 GFP_KERNEL);
2310 if (!ring->desc) 2310 if (!ring->desc)
2311 return -ENOMEM; 2311 return -ENOMEM;
2312 2312
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4d40878e395a..f52e2c46e6a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
110 110
111 mem->size = ALIGN(size, alignment); 111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 112 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
113 &mem->pa, GFP_KERNEL); 113 GFP_KERNEL);
114 if (!mem->va) 114 if (!mem->va)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index fe1592ae8769..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -515,7 +515,7 @@ struct igb_adapter {
515 /* OS defined structs */ 515 /* OS defined structs */
516 struct pci_dev *pdev; 516 struct pci_dev *pdev;
517 517
518 struct mutex stats64_lock; 518 spinlock_t stats64_lock;
519 struct rtnl_link_stats64 stats64; 519 struct rtnl_link_stats64 stats64;
520 520
521 /* structs defined in e1000_hw.h */ 521 /* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7426060b678f..c57671068245 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2295 int i, j; 2295 int i, j;
2296 char *p; 2296 char *p;
2297 2297
2298 mutex_lock(&adapter->stats64_lock); 2298 spin_lock(&adapter->stats64_lock);
2299 igb_update_stats(adapter); 2299 igb_update_stats(adapter);
2300 2300
2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
2339 i += IGB_RX_QUEUE_STATS_LEN; 2339 i += IGB_RX_QUEUE_STATS_LEN;
2340 } 2340 }
2341 mutex_unlock(&adapter->stats64_lock); 2341 spin_unlock(&adapter->stats64_lock);
2342} 2342}
2343 2343
2344static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2344static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 87bdf1604ae2..7137e7f9c7f3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter)
2203 del_timer_sync(&adapter->phy_info_timer); 2203 del_timer_sync(&adapter->phy_info_timer);
2204 2204
2205 /* record the stats before reset*/ 2205 /* record the stats before reset*/
2206 mutex_lock(&adapter->stats64_lock); 2206 spin_lock(&adapter->stats64_lock);
2207 igb_update_stats(adapter); 2207 igb_update_stats(adapter);
2208 mutex_unlock(&adapter->stats64_lock); 2208 spin_unlock(&adapter->stats64_lock);
2209 2209
2210 adapter->link_speed = 0; 2210 adapter->link_speed = 0;
2211 adapter->link_duplex = 0; 2211 adapter->link_duplex = 0;
@@ -3840,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3841 3841
3842 spin_lock_init(&adapter->nfc_lock); 3842 spin_lock_init(&adapter->nfc_lock);
3843 mutex_init(&adapter->stats64_lock); 3843 spin_lock_init(&adapter->stats64_lock);
3844#ifdef CONFIG_PCI_IOV 3844#ifdef CONFIG_PCI_IOV
3845 switch (hw->mac.type) { 3845 switch (hw->mac.type) {
3846 case e1000_82576: 3846 case e1000_82576:
@@ -5406,9 +5406,9 @@ no_wait:
5406 } 5406 }
5407 } 5407 }
5408 5408
5409 mutex_lock(&adapter->stats64_lock); 5409 spin_lock(&adapter->stats64_lock);
5410 igb_update_stats(adapter); 5410 igb_update_stats(adapter);
5411 mutex_unlock(&adapter->stats64_lock); 5411 spin_unlock(&adapter->stats64_lock);
5412 5412
5413 for (i = 0; i < adapter->num_tx_queues; i++) { 5413 for (i = 0; i < adapter->num_tx_queues; i++) {
5414 struct igb_ring *tx_ring = adapter->tx_ring[i]; 5414 struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -6235,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev,
6235{ 6235{
6236 struct igb_adapter *adapter = netdev_priv(netdev); 6236 struct igb_adapter *adapter = netdev_priv(netdev);
6237 6237
6238 mutex_lock(&adapter->stats64_lock); 6238 spin_lock(&adapter->stats64_lock);
6239 igb_update_stats(adapter); 6239 igb_update_stats(adapter);
6240 memcpy(stats, &adapter->stats64, sizeof(*stats)); 6240 memcpy(stats, &adapter->stats64, sizeof(*stats));
6241 mutex_unlock(&adapter->stats64_lock); 6241 spin_unlock(&adapter->stats64_lock);
6242} 6242}
6243 6243
6244/** 6244/**
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1d4d1686909a..e5ac2d3fd816 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
681 txdr->size = ALIGN(txdr->size, 4096); 681 txdr->size = ALIGN(txdr->size, 4096);
682 682
683 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 683 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
684 GFP_KERNEL); 684 GFP_KERNEL);
685 if (!txdr->desc) { 685 if (!txdr->desc) {
686 vfree(txdr->buffer_info); 686 vfree(txdr->buffer_info);
687 return -ENOMEM; 687 return -ENOMEM;
@@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
764 rxdr->size = ALIGN(rxdr->size, 4096); 764 rxdr->size = ALIGN(rxdr->size, 4096);
765 765
766 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 766 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
767 GFP_KERNEL); 767 GFP_KERNEL);
768 768
769 if (!rxdr->desc) { 769 if (!rxdr->desc) {
770 vfree(rxdr->buffer_info); 770 vfree(rxdr->buffer_info);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e0875476a780..16066c2d5b3a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2044,9 +2044,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2044 u32 txq_dma; 2044 u32 txq_dma;
2045 2045
2046 /* Allocate memory for TX descriptors */ 2046 /* Allocate memory for TX descriptors */
2047 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, 2047 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2048 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2048 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2049 &aggr_txq->descs_dma, GFP_KERNEL); 2049 &aggr_txq->descs_dma, GFP_KERNEL);
2050 if (!aggr_txq->descs) 2050 if (!aggr_txq->descs)
2051 return -ENOMEM; 2051 return -ENOMEM;
2052 2052
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 742f0c1f60df..6d55e3d0b7ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -825,7 +825,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
825 if (!cgx->cgx_cmd_workq) { 825 if (!cgx->cgx_cmd_workq) {
826 dev_err(dev, "alloc workqueue failed for cgx cmd"); 826 dev_err(dev, "alloc workqueue failed for cgx cmd");
827 err = -ENOMEM; 827 err = -ENOMEM;
828 goto err_release_regions; 828 goto err_free_irq_vectors;
829 } 829 }
830 830
831 list_add(&cgx->cgx_list, &cgx_list); 831 list_add(&cgx->cgx_list, &cgx_list);
@@ -841,6 +841,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
841err_release_lmac: 841err_release_lmac:
842 cgx_lmac_exit(cgx); 842 cgx_lmac_exit(cgx);
843 list_del(&cgx->cgx_list); 843 list_del(&cgx->cgx_list);
844err_free_irq_vectors:
845 pci_free_irq_vectors(pdev);
844err_release_regions: 846err_release_regions:
845 pci_release_regions(pdev); 847 pci_release_regions(pdev);
846err_disable_device: 848err_disable_device:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index ec50a21c5aaf..e332e82fc066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -64,7 +64,7 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q,
64 64
65 qmem->entry_sz = entry_sz; 65 qmem->entry_sz = entry_sz;
66 qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; 66 qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
67 qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz, 67 qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
68 &qmem->iova, GFP_KERNEL); 68 &qmem->iova, GFP_KERNEL);
69 if (!qmem->base) 69 if (!qmem->base)
70 return -ENOMEM; 70 return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 0bd4351b2a49..f8a6d6e3cb7a 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -557,9 +557,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
557 * table is full. 557 * table is full.
558 */ 558 */
559 if (!pep->htpr) { 559 if (!pep->htpr) {
560 pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, 560 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
561 HASH_ADDR_TABLE_SIZE, 561 HASH_ADDR_TABLE_SIZE,
562 &pep->htpr_dma, GFP_KERNEL); 562 &pep->htpr_dma, GFP_KERNEL);
563 if (!pep->htpr) 563 if (!pep->htpr)
564 return -ENOMEM; 564 return -ENOMEM;
565 } else { 565 } else {
@@ -1044,9 +1044,9 @@ static int rxq_init(struct net_device *dev)
1044 pep->rx_desc_count = 0; 1044 pep->rx_desc_count = 0;
1045 size = pep->rx_ring_size * sizeof(struct rx_desc); 1045 size = pep->rx_ring_size * sizeof(struct rx_desc);
1046 pep->rx_desc_area_size = size; 1046 pep->rx_desc_area_size = size;
1047 pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1047 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1048 &pep->rx_desc_dma, 1048 &pep->rx_desc_dma,
1049 GFP_KERNEL); 1049 GFP_KERNEL);
1050 if (!pep->p_rx_desc_area) 1050 if (!pep->p_rx_desc_area)
1051 goto out; 1051 goto out;
1052 1052
@@ -1103,9 +1103,9 @@ static int txq_init(struct net_device *dev)
1103 pep->tx_desc_count = 0; 1103 pep->tx_desc_count = 0;
1104 size = pep->tx_ring_size * sizeof(struct tx_desc); 1104 size = pep->tx_ring_size * sizeof(struct tx_desc);
1105 pep->tx_desc_area_size = size; 1105 pep->tx_desc_area_size = size;
1106 pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1106 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1107 &pep->tx_desc_dma, 1107 &pep->tx_desc_dma,
1108 GFP_KERNEL); 1108 GFP_KERNEL);
1109 if (!pep->p_tx_desc_area) 1109 if (!pep->p_tx_desc_area)
1110 goto out; 1110 goto out;
1111 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1111 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 399f565dd85a..49f926b7a91c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -258,11 +258,6 @@ static void mtk_phy_link_adjust(struct net_device *dev)
258 258
259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); 259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
260 260
261 if (dev->phydev->link)
262 netif_carrier_on(dev);
263 else
264 netif_carrier_off(dev);
265
266 if (!of_phy_is_fixed_link(mac->of_node)) 261 if (!of_phy_is_fixed_link(mac->of_node))
267 phy_print_status(dev->phydev); 262 phy_print_status(dev->phydev);
268} 263}
@@ -347,17 +342,6 @@ static int mtk_phy_connect(struct net_device *dev)
347 if (mtk_phy_connect_node(eth, mac, np)) 342 if (mtk_phy_connect_node(eth, mac, np))
348 goto err_phy; 343 goto err_phy;
349 344
350 dev->phydev->autoneg = AUTONEG_ENABLE;
351 dev->phydev->speed = 0;
352 dev->phydev->duplex = 0;
353
354 phy_set_max_speed(dev->phydev, SPEED_1000);
355 phy_support_asym_pause(dev->phydev);
356 linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
357 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
358 dev->phydev->advertising);
359 phy_start_aneg(dev->phydev);
360
361 of_node_put(np); 345 of_node_put(np);
362 346
363 return 0; 347 return 0;
@@ -598,10 +582,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
598 dma_addr_t dma_addr; 582 dma_addr_t dma_addr;
599 int i; 583 int i;
600 584
601 eth->scratch_ring = dma_zalloc_coherent(eth->dev, 585 eth->scratch_ring = dma_alloc_coherent(eth->dev,
602 cnt * sizeof(struct mtk_tx_dma), 586 cnt * sizeof(struct mtk_tx_dma),
603 &eth->phy_scratch_ring, 587 &eth->phy_scratch_ring,
604 GFP_ATOMIC); 588 GFP_ATOMIC);
605 if (unlikely(!eth->scratch_ring)) 589 if (unlikely(!eth->scratch_ring))
606 return -ENOMEM; 590 return -ENOMEM;
607 591
@@ -1213,8 +1197,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
1213 if (!ring->buf) 1197 if (!ring->buf)
1214 goto no_tx_mem; 1198 goto no_tx_mem;
1215 1199
1216 ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1200 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1217 &ring->phys, GFP_ATOMIC); 1201 &ring->phys, GFP_ATOMIC);
1218 if (!ring->dma) 1202 if (!ring->dma)
1219 goto no_tx_mem; 1203 goto no_tx_mem;
1220 1204
@@ -1310,9 +1294,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1310 return -ENOMEM; 1294 return -ENOMEM;
1311 } 1295 }
1312 1296
1313 ring->dma = dma_zalloc_coherent(eth->dev, 1297 ring->dma = dma_alloc_coherent(eth->dev,
1314 rx_dma_size * sizeof(*ring->dma), 1298 rx_dma_size * sizeof(*ring->dma),
1315 &ring->phys, GFP_ATOMIC); 1299 &ring->phys, GFP_ATOMIC);
1316 if (!ring->dma) 1300 if (!ring->dma)
1317 return -ENOMEM; 1301 return -ENOMEM;
1318 1302
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 9af34e03892c..dbc483e4a2ef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
584 buf->npages = 1; 584 buf->npages = 1;
585 buf->page_shift = get_order(size) + PAGE_SHIFT; 585 buf->page_shift = get_order(size) + PAGE_SHIFT;
586 buf->direct.buf = 586 buf->direct.buf =
587 dma_zalloc_coherent(&dev->persist->pdev->dev, 587 dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
588 size, &t, GFP_KERNEL); 588 GFP_KERNEL);
589 if (!buf->direct.buf) 589 if (!buf->direct.buf)
590 return -ENOMEM; 590 return -ENOMEM;
591 591
@@ -624,8 +624,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
624 624
625 for (i = 0; i < buf->nbufs; ++i) { 625 for (i = 0; i < buf->nbufs; ++i) {
626 buf->page_list[i].buf = 626 buf->page_list[i].buf =
627 dma_zalloc_coherent(&dev->persist->pdev->dev, 627 dma_alloc_coherent(&dev->persist->pdev->dev,
628 PAGE_SIZE, &t, GFP_KERNEL); 628 PAGE_SIZE, &t, GFP_KERNEL);
629 if (!buf->page_list[i].buf) 629 if (!buf->page_list[i].buf)
630 goto err_free; 630 goto err_free;
631 631
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 4b4351141b94..d89a3da89e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
57 int i; 57 int i;
58 58
59 if (chunk->nsg > 0) 59 if (chunk->nsg > 0)
60 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages,
61 PCI_DMA_BIDIRECTIONAL); 61 DMA_BIDIRECTIONAL);
62 62
63 for (i = 0; i < chunk->npages; ++i) 63 for (i = 0; i < chunk->npages; ++i)
64 __free_pages(sg_page(&chunk->mem[i]), 64 __free_pages(sg_page(&chunk->sg[i]),
65 get_order(chunk->mem[i].length)); 65 get_order(chunk->sg[i].length));
66} 66}
67 67
68static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 68static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
71 71
72 for (i = 0; i < chunk->npages; ++i) 72 for (i = 0; i < chunk->npages; ++i)
73 dma_free_coherent(&dev->persist->pdev->dev, 73 dma_free_coherent(&dev->persist->pdev->dev,
74 chunk->mem[i].length, 74 chunk->buf[i].size,
75 lowmem_page_address(sg_page(&chunk->mem[i])), 75 chunk->buf[i].addr,
76 sg_dma_address(&chunk->mem[i])); 76 chunk->buf[i].dma_addr);
77} 77}
78 78
79void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) 79void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
111 return 0; 111 return 0;
112} 112}
113 113
114static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 114static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
115 int order, gfp_t gfp_mask) 115 int order, gfp_t gfp_mask)
116{ 116{
117 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, 117 buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
118 &sg_dma_address(mem), gfp_mask); 118 &buf->dma_addr, gfp_mask);
119 if (!buf) 119 if (!buf->addr)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
122 if (offset_in_page(buf)) { 122 if (offset_in_page(buf->addr)) {
123 dma_free_coherent(dev, PAGE_SIZE << order, 123 dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
124 buf, sg_dma_address(mem)); 124 buf->dma_addr);
125 return -ENOMEM; 125 return -ENOMEM;
126 } 126 }
127 127
128 sg_set_buf(mem, buf, PAGE_SIZE << order); 128 buf->size = PAGE_SIZE << order;
129 sg_dma_len(mem) = PAGE_SIZE << order;
130 return 0; 129 return 0;
131} 130}
132 131
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
159 158
160 while (npages > 0) { 159 while (npages > 0) {
161 if (!chunk) { 160 if (!chunk) {
162 chunk = kmalloc_node(sizeof(*chunk), 161 chunk = kzalloc_node(sizeof(*chunk),
163 gfp_mask & ~(__GFP_HIGHMEM | 162 gfp_mask & ~(__GFP_HIGHMEM |
164 __GFP_NOWARN), 163 __GFP_NOWARN),
165 dev->numa_node); 164 dev->numa_node);
166 if (!chunk) { 165 if (!chunk) {
167 chunk = kmalloc(sizeof(*chunk), 166 chunk = kzalloc(sizeof(*chunk),
168 gfp_mask & ~(__GFP_HIGHMEM | 167 gfp_mask & ~(__GFP_HIGHMEM |
169 __GFP_NOWARN)); 168 __GFP_NOWARN));
170 if (!chunk) 169 if (!chunk)
171 goto fail; 170 goto fail;
172 } 171 }
172 chunk->coherent = coherent;
173 173
174 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 174 if (!coherent)
175 chunk->npages = 0; 175 sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
176 chunk->nsg = 0;
177 list_add_tail(&chunk->list, &icm->chunk_list); 176 list_add_tail(&chunk->list, &icm->chunk_list);
178 } 177 }
179 178
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
186 185
187 if (coherent) 186 if (coherent)
188 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, 187 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
189 &chunk->mem[chunk->npages], 188 &chunk->buf[chunk->npages],
190 cur_order, mask); 189 cur_order, mask);
191 else 190 else
192 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
193 cur_order, mask, 192 cur_order, mask,
194 dev->numa_node); 193 dev->numa_node);
195 194
@@ -205,9 +204,9 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
205 if (coherent) 204 if (coherent)
206 ++chunk->nsg; 205 ++chunk->nsg;
207 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
208 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 207 chunk->nsg = dma_map_sg(&dev->persist->pdev->dev,
209 chunk->npages, 208 chunk->sg, chunk->npages,
210 PCI_DMA_BIDIRECTIONAL); 209 DMA_BIDIRECTIONAL);
211 210
212 if (chunk->nsg <= 0) 211 if (chunk->nsg <= 0)
213 goto fail; 212 goto fail;
@@ -220,9 +219,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
220 } 219 }
221 220
222 if (!coherent && chunk) { 221 if (!coherent && chunk) {
223 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 222 chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
224 chunk->npages, 223 chunk->npages, DMA_BIDIRECTIONAL);
225 PCI_DMA_BIDIRECTIONAL);
226 224
227 if (chunk->nsg <= 0) 225 if (chunk->nsg <= 0)
228 goto fail; 226 goto fail;
@@ -320,7 +318,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
320 u64 idx; 318 u64 idx;
321 struct mlx4_icm_chunk *chunk; 319 struct mlx4_icm_chunk *chunk;
322 struct mlx4_icm *icm; 320 struct mlx4_icm *icm;
323 struct page *page = NULL; 321 void *addr = NULL;
324 322
325 if (!table->lowmem) 323 if (!table->lowmem)
326 return NULL; 324 return NULL;
@@ -336,28 +334,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
336 334
337 list_for_each_entry(chunk, &icm->chunk_list, list) { 335 list_for_each_entry(chunk, &icm->chunk_list, list) {
338 for (i = 0; i < chunk->npages; ++i) { 336 for (i = 0; i < chunk->npages; ++i) {
337 dma_addr_t dma_addr;
338 size_t len;
339
340 if (table->coherent) {
341 len = chunk->buf[i].size;
342 dma_addr = chunk->buf[i].dma_addr;
343 addr = chunk->buf[i].addr;
344 } else {
345 struct page *page;
346
347 len = sg_dma_len(&chunk->sg[i]);
348 dma_addr = sg_dma_address(&chunk->sg[i]);
349
350 /* XXX: we should never do this for highmem
351 * allocation. This function either needs
352 * to be split, or the kernel virtual address
353 * return needs to be made optional.
354 */
355 page = sg_page(&chunk->sg[i]);
356 addr = lowmem_page_address(page);
357 }
358
339 if (dma_handle && dma_offset >= 0) { 359 if (dma_handle && dma_offset >= 0) {
340 if (sg_dma_len(&chunk->mem[i]) > dma_offset) 360 if (len > dma_offset)
341 *dma_handle = sg_dma_address(&chunk->mem[i]) + 361 *dma_handle = dma_addr + dma_offset;
342 dma_offset; 362 dma_offset -= len;
343 dma_offset -= sg_dma_len(&chunk->mem[i]);
344 } 363 }
364
345 /* 365 /*
346 * DMA mapping can merge pages but not split them, 366 * DMA mapping can merge pages but not split them,
347 * so if we found the page, dma_handle has already 367 * so if we found the page, dma_handle has already
348 * been assigned to. 368 * been assigned to.
349 */ 369 */
350 if (chunk->mem[i].length > offset) { 370 if (len > offset)
351 page = sg_page(&chunk->mem[i]);
352 goto out; 371 goto out;
353 } 372 offset -= len;
354 offset -= chunk->mem[i].length;
355 } 373 }
356 } 374 }
357 375
376 addr = NULL;
358out: 377out:
359 mutex_unlock(&table->mutex); 378 mutex_unlock(&table->mutex);
360 return page ? lowmem_page_address(page) + offset : NULL; 379 return addr ? addr + offset : NULL;
361} 380}
362 381
363int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 382int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index c9169a490557..d199874b1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -47,11 +47,21 @@ enum {
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, 47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
48}; 48};
49 49
50struct mlx4_icm_buf {
51 void *addr;
52 size_t size;
53 dma_addr_t dma_addr;
54};
55
50struct mlx4_icm_chunk { 56struct mlx4_icm_chunk {
51 struct list_head list; 57 struct list_head list;
52 int npages; 58 int npages;
53 int nsg; 59 int nsg;
54 struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; 60 bool coherent;
61 union {
62 struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
63 struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
64 };
55}; 65};
56 66
57struct mlx4_icm { 67struct mlx4_icm {
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
114 124
115static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) 125static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
116{ 126{
117 return sg_dma_address(&iter->chunk->mem[iter->page_idx]); 127 if (iter->chunk->coherent)
128 return iter->chunk->buf[iter->page_idx].dma_addr;
129 else
130 return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
118} 131}
119 132
120static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) 133static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
121{ 134{
122 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); 135 if (iter->chunk->coherent)
136 return iter->chunk->buf[iter->page_idx].size;
137 else
138 return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
123} 139}
124 140
125int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); 141int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 456f30007ad6..421b9c3c8bf7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
63 mutex_lock(&priv->alloc_mutex); 63 mutex_lock(&priv->alloc_mutex);
64 original_node = dev_to_node(&dev->pdev->dev); 64 original_node = dev_to_node(&dev->pdev->dev);
65 set_dev_node(&dev->pdev->dev, node); 65 set_dev_node(&dev->pdev->dev, node);
66 cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, 66 cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
67 dma_handle, GFP_KERNEL); 67 GFP_KERNEL);
68 set_dev_node(&dev->pdev->dev, original_node); 68 set_dev_node(&dev->pdev->dev, original_node);
69 mutex_unlock(&priv->alloc_mutex); 69 mutex_unlock(&priv->alloc_mutex);
70 return cpu_handle; 70 return cpu_handle;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d3125cdf69db..3e0fa8a8077b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1789,8 +1789,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1789{ 1789{
1790 struct device *ddev = &dev->pdev->dev; 1790 struct device *ddev = &dev->pdev->dev;
1791 1791
1792 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1792 cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1793 &cmd->alloc_dma, GFP_KERNEL); 1793 &cmd->alloc_dma, GFP_KERNEL);
1794 if (!cmd->cmd_alloc_buf) 1794 if (!cmd->cmd_alloc_buf)
1795 return -ENOMEM; 1795 return -ENOMEM;
1796 1796
@@ -1804,9 +1804,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1804 1804
1805 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 1805 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1806 cmd->alloc_dma); 1806 cmd->alloc_dma);
1807 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 1807 cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
1808 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1808 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1809 &cmd->alloc_dma, GFP_KERNEL); 1809 &cmd->alloc_dma, GFP_KERNEL);
1810 if (!cmd->cmd_alloc_buf) 1810 if (!cmd->cmd_alloc_buf)
1811 return -ENOMEM; 1811 return -ENOMEM;
1812 1812
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c9df08133718..3bbccead2f63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -844,9 +844,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
844 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, 844 ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
845 Autoneg); 845 Autoneg);
846 846
847 if (get_fec_supported_advertised(mdev, link_ksettings)) 847 err = get_fec_supported_advertised(mdev, link_ksettings);
848 if (err) {
848 netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", 849 netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
849 __func__, err); 850 __func__, err);
851 err = 0; /* don't fail caps query because of FEC error */
852 }
850 853
851 if (!an_disable_admin) 854 if (!an_disable_admin)
852 ethtool_link_ksettings_add_link_mode(link_ksettings, 855 ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 96cc0c6a4014..04736212a21c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -58,7 +58,8 @@ struct mlx5e_rep_indr_block_priv {
58 struct list_head list; 58 struct list_head list;
59}; 59};
60 60
61static void mlx5e_rep_indr_unregister_block(struct net_device *netdev); 61static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
62 struct net_device *netdev);
62 63
63static void mlx5e_rep_get_drvinfo(struct net_device *dev, 64static void mlx5e_rep_get_drvinfo(struct net_device *dev,
64 struct ethtool_drvinfo *drvinfo) 65 struct ethtool_drvinfo *drvinfo)
@@ -179,6 +180,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
179 180
180 s->tx_packets += sq_stats->packets; 181 s->tx_packets += sq_stats->packets;
181 s->tx_bytes += sq_stats->bytes; 182 s->tx_bytes += sq_stats->bytes;
183 s->tx_queue_dropped += sq_stats->dropped;
182 } 184 }
183 } 185 }
184} 186}
@@ -663,7 +665,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
663 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; 665 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
664 666
665 list_for_each_entry_safe(cb_priv, temp, head, list) { 667 list_for_each_entry_safe(cb_priv, temp, head, list) {
666 mlx5e_rep_indr_unregister_block(cb_priv->netdev); 668 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
667 kfree(cb_priv); 669 kfree(cb_priv);
668 } 670 }
669} 671}
@@ -735,7 +737,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
735 737
736 err = tcf_block_cb_register(f->block, 738 err = tcf_block_cb_register(f->block,
737 mlx5e_rep_indr_setup_block_cb, 739 mlx5e_rep_indr_setup_block_cb,
738 netdev, indr_priv, f->extack); 740 indr_priv, indr_priv, f->extack);
739 if (err) { 741 if (err) {
740 list_del(&indr_priv->list); 742 list_del(&indr_priv->list);
741 kfree(indr_priv); 743 kfree(indr_priv);
@@ -743,14 +745,15 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
743 745
744 return err; 746 return err;
745 case TC_BLOCK_UNBIND: 747 case TC_BLOCK_UNBIND:
748 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
749 if (!indr_priv)
750 return -ENOENT;
751
746 tcf_block_cb_unregister(f->block, 752 tcf_block_cb_unregister(f->block,
747 mlx5e_rep_indr_setup_block_cb, 753 mlx5e_rep_indr_setup_block_cb,
748 netdev); 754 indr_priv);
749 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); 755 list_del(&indr_priv->list);
750 if (indr_priv) { 756 kfree(indr_priv);
751 list_del(&indr_priv->list);
752 kfree(indr_priv);
753 }
754 757
755 return 0; 758 return 0;
756 default: 759 default:
@@ -779,7 +782,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
779 782
780 err = __tc_indr_block_cb_register(netdev, rpriv, 783 err = __tc_indr_block_cb_register(netdev, rpriv,
781 mlx5e_rep_indr_setup_tc_cb, 784 mlx5e_rep_indr_setup_tc_cb,
782 netdev); 785 rpriv);
783 if (err) { 786 if (err) {
784 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); 787 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
785 788
@@ -789,10 +792,11 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
789 return err; 792 return err;
790} 793}
791 794
792static void mlx5e_rep_indr_unregister_block(struct net_device *netdev) 795static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
796 struct net_device *netdev)
793{ 797{
794 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, 798 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
795 netdev); 799 rpriv);
796} 800}
797 801
798static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, 802static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
@@ -811,7 +815,7 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
811 mlx5e_rep_indr_register_block(rpriv, netdev); 815 mlx5e_rep_indr_register_block(rpriv, netdev);
812 break; 816 break;
813 case NETDEV_UNREGISTER: 817 case NETDEV_UNREGISTER:
814 mlx5e_rep_indr_unregister_block(netdev); 818 mlx5e_rep_indr_unregister_block(rpriv, netdev);
815 break; 819 break;
816 } 820 }
817 return NOTIFY_OK; 821 return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1d0bb5ff8c26..f86e4804e83e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -732,6 +732,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
732 ((struct ipv6hdr *)ip_p)->nexthdr; 732 ((struct ipv6hdr *)ip_p)->nexthdr;
733} 733}
734 734
735#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
736
735static inline void mlx5e_handle_csum(struct net_device *netdev, 737static inline void mlx5e_handle_csum(struct net_device *netdev,
736 struct mlx5_cqe64 *cqe, 738 struct mlx5_cqe64 *cqe,
737 struct mlx5e_rq *rq, 739 struct mlx5e_rq *rq,
@@ -754,6 +756,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
754 if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) 756 if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
755 goto csum_unnecessary; 757 goto csum_unnecessary;
756 758
759 /* CQE csum doesn't cover padding octets in short ethernet
760 * frames. And the pad field is appended prior to calculating
761 * and appending the FCS field.
762 *
763 * Detecting these padded frames requires to verify and parse
764 * IP headers, so we simply force all those small frames to be
765 * CHECKSUM_UNNECESSARY even if they are not padded.
766 */
767 if (short_frame(skb->len))
768 goto csum_unnecessary;
769
757 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { 770 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
758 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) 771 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
759 goto csum_unnecessary; 772 goto csum_unnecessary;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 080ddd1942ec..b9a25aed5d11 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
78 depends on IPV6 || IPV6=n 78 depends on IPV6 || IPV6=n
79 depends on NET_IPGRE || NET_IPGRE=n 79 depends on NET_IPGRE || NET_IPGRE=n
80 depends on IPV6_GRE || IPV6_GRE=n 80 depends on IPV6_GRE || IPV6_GRE=n
81 depends on VXLAN || VXLAN=n
81 select GENERIC_ALLOCATOR 82 select GENERIC_ALLOCATOR
82 select PARMAN 83 select PARMAN
83 select OBJAGG 84 select OBJAGG
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 66b8098c6fd2..a2321fe8d6a0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); 604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); 605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); 606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
607 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
608
609 memcpy(ncqe, cqe, q->elem_size);
610 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
607 611
608 if (sendq) { 612 if (sendq) {
609 struct mlxsw_pci_queue *sdq; 613 struct mlxsw_pci_queue *sdq;
610 614
611 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); 615 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
612 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, 616 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
613 wqe_counter, cqe); 617 wqe_counter, ncqe);
614 q->u.cq.comp_sdq_count++; 618 q->u.cq.comp_sdq_count++;
615 } else { 619 } else {
616 struct mlxsw_pci_queue *rdq; 620 struct mlxsw_pci_queue *rdq;
617 621
618 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); 622 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
619 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, 623 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
620 wqe_counter, q->u.cq.v, cqe); 624 wqe_counter, q->u.cq.v, ncqe);
621 q->u.cq.comp_rdq_count++; 625 q->u.cq.comp_rdq_count++;
622 } 626 }
623 if (++items == credits) 627 if (++items == credits)
624 break; 628 break;
625 } 629 }
626 if (items) { 630 if (items)
627 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
628 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 631 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
629 }
630} 632}
631 633
632static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) 634static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
@@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1365 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1367 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1366 1368
1367 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) 1369 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1368 break; 1370 return 0;
1369 cond_resched(); 1371 cond_resched();
1370 } while (time_before(jiffies, end)); 1372 } while (time_before(jiffies, end));
1371 return 0; 1373 return -EBUSY;
1372} 1374}
1373 1375
1374static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) 1376static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index bb99f6d41fe0..ffee38e36ce8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
27 27
28#define MLXSW_PCI_SW_RESET 0xF0010 28#define MLXSW_PCI_SW_RESET 0xF0010
29#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 29#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
30#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 30#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
31#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 31#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
32#define MLXSW_PCI_FW_READY 0xA1844 32#define MLXSW_PCI_FW_READY 0xA1844
33#define MLXSW_PCI_FW_READY_MASK 0xFFFF 33#define MLXSW_PCI_FW_READY_MASK 0xFFFF
@@ -53,6 +53,7 @@
53#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ 53#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
54#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ 54#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
55#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ 55#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
56#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
56#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ 57#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
57#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) 58#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
58#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) 59#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index eed1045e4d96..32519c93df17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -5005,12 +5005,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
5005 lower_dev, 5005 lower_dev,
5006 upper_dev); 5006 upper_dev);
5007 } else if (netif_is_lag_master(upper_dev)) { 5007 } else if (netif_is_lag_master(upper_dev)) {
5008 if (info->linking) 5008 if (info->linking) {
5009 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5009 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
5010 upper_dev); 5010 upper_dev);
5011 else 5011 } else {
5012 mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
5013 false);
5012 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5014 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
5013 upper_dev); 5015 upper_dev);
5016 }
5014 } else if (netif_is_ovs_master(upper_dev)) { 5017 } else if (netif_is_ovs_master(upper_dev)) {
5015 if (info->linking) 5018 if (info->linking)
5016 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5019 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index b0f2d8e8ded0..ac222833a5cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -72,7 +72,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
72 act_set = mlxsw_afa_block_first_set(rulei->act_block); 72 act_set = mlxsw_afa_block_first_set(rulei->act_block);
73 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); 73 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
74 74
75 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 75 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
76 if (err)
77 goto err_ptce2_write;
78
79 return 0;
80
81err_ptce2_write:
82 cregion->ops->entry_remove(cregion, centry);
83 return err;
76} 84}
77 85
78static void 86static void
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 1c19feefa5f2..2941967e1cc5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -1022,7 +1022,6 @@ void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
1022{ 1022{
1023 struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; 1023 struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
1024 1024
1025 ASSERT_RTNL();
1026 objagg_obj_put(aregion->erp_table->objagg, objagg_obj); 1025 objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
1027} 1026}
1028 1027
@@ -1054,7 +1053,6 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
1054 const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); 1053 const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
1055 unsigned int erp_bank; 1054 unsigned int erp_bank;
1056 1055
1057 ASSERT_RTNL();
1058 if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) 1056 if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
1059 return; 1057 return;
1060 1058
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 055cc6943b34..9d9aa28684af 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -997,8 +997,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
997static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { 997static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
998 .type = MLXSW_SP_FID_TYPE_DUMMY, 998 .type = MLXSW_SP_FID_TYPE_DUMMY,
999 .fid_size = sizeof(struct mlxsw_sp_fid), 999 .fid_size = sizeof(struct mlxsw_sp_fid),
1000 .start_index = MLXSW_SP_RFID_BASE - 1, 1000 .start_index = VLAN_N_VID - 1,
1001 .end_index = MLXSW_SP_RFID_BASE - 1, 1001 .end_index = VLAN_N_VID - 1,
1002 .ops = &mlxsw_sp_fid_dummy_ops, 1002 .ops = &mlxsw_sp_fid_dummy_ops,
1003}; 1003};
1004 1004
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 0a31fff2516e..fb1c48c698f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -816,14 +816,14 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
816 ops = nve->nve_ops_arr[params->type]; 816 ops = nve->nve_ops_arr[params->type];
817 817
818 if (!ops->can_offload(nve, params->dev, extack)) 818 if (!ops->can_offload(nve, params->dev, extack))
819 return -EOPNOTSUPP; 819 return -EINVAL;
820 820
821 memset(&config, 0, sizeof(config)); 821 memset(&config, 0, sizeof(config));
822 ops->nve_config(nve, params->dev, &config); 822 ops->nve_config(nve, params->dev, &config);
823 if (nve->num_nve_tunnels && 823 if (nve->num_nve_tunnels &&
824 memcmp(&config, &nve->config, sizeof(config))) { 824 memcmp(&config, &nve->config, sizeof(config))) {
825 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); 825 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
826 return -EOPNOTSUPP; 826 return -EINVAL;
827 } 827 }
828 828
829 err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config); 829 err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 1bd2c6e15f8d..c772109b638d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1078,8 +1078,7 @@ static int
1078mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, 1078mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1079 struct mlxsw_sp_bridge_port *bridge_port, 1079 struct mlxsw_sp_bridge_port *bridge_port,
1080 u16 vid, bool is_untagged, bool is_pvid, 1080 u16 vid, bool is_untagged, bool is_pvid,
1081 struct netlink_ext_ack *extack, 1081 struct netlink_ext_ack *extack)
1082 struct switchdev_trans *trans)
1083{ 1082{
1084 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 1083 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1085 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1084 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
@@ -1095,9 +1094,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1095 mlxsw_sp_port_vlan->bridge_port != bridge_port) 1094 mlxsw_sp_port_vlan->bridge_port != bridge_port)
1096 return -EEXIST; 1095 return -EEXIST;
1097 1096
1098 if (switchdev_trans_ph_prepare(trans))
1099 return 0;
1100
1101 if (!mlxsw_sp_port_vlan) { 1097 if (!mlxsw_sp_port_vlan) {
1102 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1098 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1103 vid); 1099 vid);
@@ -1188,6 +1184,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1188 return err; 1184 return err;
1189 } 1185 }
1190 1186
1187 if (switchdev_trans_ph_commit(trans))
1188 return 0;
1189
1191 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1190 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1192 if (WARN_ON(!bridge_port)) 1191 if (WARN_ON(!bridge_port))
1193 return -EINVAL; 1192 return -EINVAL;
@@ -1200,7 +1199,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1200 1199
1201 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, 1200 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1202 vid, flag_untagged, 1201 vid, flag_untagged,
1203 flag_pvid, extack, trans); 1202 flag_pvid, extack);
1204 if (err) 1203 if (err)
1205 return err; 1204 return err;
1206 } 1205 }
@@ -1234,7 +1233,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1234static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 1233static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1235{ 1234{
1236 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 1235 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1237 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 1236 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1238} 1237}
1239 1238
1240static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 1239static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
@@ -1291,7 +1290,7 @@ out:
1291static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1290static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1292 const char *mac, u16 fid, bool adding, 1291 const char *mac, u16 fid, bool adding,
1293 enum mlxsw_reg_sfd_rec_action action, 1292 enum mlxsw_reg_sfd_rec_action action,
1294 bool dynamic) 1293 enum mlxsw_reg_sfd_rec_policy policy)
1295{ 1294{
1296 char *sfd_pl; 1295 char *sfd_pl;
1297 u8 num_rec; 1296 u8 num_rec;
@@ -1302,8 +1301,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1302 return -ENOMEM; 1301 return -ENOMEM;
1303 1302
1304 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1303 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1305 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1304 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1306 mac, fid, action, local_port);
1307 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1305 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1308 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1306 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1309 if (err) 1307 if (err)
@@ -1322,7 +1320,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1322 bool dynamic) 1320 bool dynamic)
1323{ 1321{
1324 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 1322 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1325 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 1323 MLXSW_REG_SFD_REC_ACTION_NOP,
1324 mlxsw_sp_sfd_rec_policy(dynamic));
1326} 1325}
1327 1326
1328int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 1327int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
@@ -1330,7 +1329,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1330{ 1329{
1331 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 1330 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1332 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 1331 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1333 false); 1332 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1334} 1333}
1335 1334
1336static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 1335static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
@@ -1808,7 +1807,7 @@ static void
1808mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, 1807mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1809 struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 1808 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1810{ 1809{
1811 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; 1810 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1812 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1811 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1813 1812
1814 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1813 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -3207,7 +3206,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3207 struct mlxsw_sp_bridge_device *bridge_device, 3206 struct mlxsw_sp_bridge_device *bridge_device,
3208 const struct net_device *vxlan_dev, u16 vid, 3207 const struct net_device *vxlan_dev, u16 vid,
3209 bool flag_untagged, bool flag_pvid, 3208 bool flag_untagged, bool flag_pvid,
3210 struct switchdev_trans *trans,
3211 struct netlink_ext_ack *extack) 3209 struct netlink_ext_ack *extack)
3212{ 3210{
3213 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 3211 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
@@ -3225,9 +3223,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3225 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) 3223 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
3226 return -EINVAL; 3224 return -EINVAL;
3227 3225
3228 if (switchdev_trans_ph_prepare(trans))
3229 return 0;
3230
3231 if (!netif_running(vxlan_dev)) 3226 if (!netif_running(vxlan_dev))
3232 return 0; 3227 return 0;
3233 3228
@@ -3345,6 +3340,9 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3345 3340
3346 port_obj_info->handled = true; 3341 port_obj_info->handled = true;
3347 3342
3343 if (switchdev_trans_ph_commit(trans))
3344 return 0;
3345
3348 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3346 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3349 if (!bridge_device) 3347 if (!bridge_device)
3350 return -EINVAL; 3348 return -EINVAL;
@@ -3358,8 +3356,7 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3358 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, 3356 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3359 vxlan_dev, vid, 3357 vxlan_dev, vid,
3360 flag_untagged, 3358 flag_untagged,
3361 flag_pvid, trans, 3359 flag_pvid, extack);
3362 extack);
3363 if (err) 3360 if (err)
3364 return err; 3361 return err;
3365 } 3362 }
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 20c9377e99cb..310807ef328b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
962 962
963 memset(&ksettings, 0, sizeof(ksettings)); 963 memset(&ksettings, 0, sizeof(ksettings));
964 phy_ethtool_get_link_ksettings(netdev, &ksettings); 964 phy_ethtool_get_link_ksettings(netdev, &ksettings);
965 local_advertisement = phy_read(phydev, MII_ADVERTISE); 965 local_advertisement =
966 if (local_advertisement < 0) 966 linkmode_adv_to_mii_adv_t(phydev->advertising);
967 return; 967 remote_advertisement =
968 968 linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
969 remote_advertisement = phy_read(phydev, MII_LPA);
970 if (remote_advertisement < 0)
971 return;
972 969
973 lan743x_phy_update_flowcontrol(adapter, 970 lan743x_phy_update_flowcontrol(adapter,
974 ksettings.base.duplex, 971 ksettings.base.duplex,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 5f384f73007d..19ce0e605096 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3604 for (i = 0; i < mgp->num_slices; i++) { 3604 for (i = 0; i < mgp->num_slices; i++) {
3605 ss = &mgp->ss[i]; 3605 ss = &mgp->ss[i];
3606 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3606 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3607 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, 3607 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3608 &ss->rx_done.bus, 3608 &ss->rx_done.bus,
3609 GFP_KERNEL); 3609 GFP_KERNEL);
3610 if (ss->rx_done.entry == NULL) 3610 if (ss->rx_done.entry == NULL)
3611 goto abort; 3611 goto abort;
3612 bytes = sizeof(*ss->fw_stats); 3612 bytes = sizeof(*ss->fw_stats);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e97636d2e6ee..7d2d4241498f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2170,9 +2170,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2170 tx_ring->cnt = dp->txd_cnt; 2170 tx_ring->cnt = dp->txd_cnt;
2171 2171
2172 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); 2172 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
2173 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, 2173 tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
2174 &tx_ring->dma, 2174 &tx_ring->dma,
2175 GFP_KERNEL | __GFP_NOWARN); 2175 GFP_KERNEL | __GFP_NOWARN);
2176 if (!tx_ring->txds) { 2176 if (!tx_ring->txds) {
2177 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2177 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2178 tx_ring->cnt); 2178 tx_ring->cnt);
@@ -2328,9 +2328,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2328 2328
2329 rx_ring->cnt = dp->rxd_cnt; 2329 rx_ring->cnt = dp->rxd_cnt;
2330 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); 2330 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
2331 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, 2331 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
2332 &rx_ring->dma, 2332 &rx_ring->dma,
2333 GFP_KERNEL | __GFP_NOWARN); 2333 GFP_KERNEL | __GFP_NOWARN);
2334 if (!rx_ring->rxds) { 2334 if (!rx_ring->rxds) {
2335 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2335 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2336 rx_ring->cnt); 2336 rx_ring->cnt);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 0611f2335b4a..1e408d1a9b5f 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -287,9 +287,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
287 priv->rx_bd_ci = 0; 287 priv->rx_bd_ci = 0;
288 288
289 /* Allocate the Tx and Rx buffer descriptors. */ 289 /* Allocate the Tx and Rx buffer descriptors. */
290 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 290 priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
291 sizeof(*priv->tx_bd_v) * TX_BD_NUM, 291 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
292 &priv->tx_bd_p, GFP_KERNEL); 292 &priv->tx_bd_p, GFP_KERNEL);
293 if (!priv->tx_bd_v) 293 if (!priv->tx_bd_v)
294 goto out; 294 goto out;
295 295
@@ -299,9 +299,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
299 if (!priv->tx_skb) 299 if (!priv->tx_skb)
300 goto out; 300 goto out;
301 301
302 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 302 priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
303 sizeof(*priv->rx_bd_v) * RX_BD_NUM, 303 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
304 &priv->rx_bd_p, GFP_KERNEL); 304 &priv->rx_bd_p, GFP_KERNEL);
305 if (!priv->rx_bd_v) 305 if (!priv->rx_bd_v)
306 goto out; 306 goto out;
307 307
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 43c0c10dfeb7..552d930e3940 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1440 1440
1441 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1441 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1442 rx_ring->rx_buff_pool = 1442 rx_ring->rx_buff_pool =
1443 dma_zalloc_coherent(&pdev->dev, size, 1443 dma_alloc_coherent(&pdev->dev, size,
1444 &rx_ring->rx_buff_pool_logic, GFP_KERNEL); 1444 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1445 if (!rx_ring->rx_buff_pool) 1445 if (!rx_ring->rx_buff_pool)
1446 return -ENOMEM; 1446 return -ENOMEM;
1447 1447
@@ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1755 1755
1756 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1756 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1757 1757
1758 tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, 1758 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1759 &tx_ring->dma, GFP_KERNEL); 1759 &tx_ring->dma, GFP_KERNEL);
1760 if (!tx_ring->desc) { 1760 if (!tx_ring->desc) {
1761 vfree(tx_ring->buffer_info); 1761 vfree(tx_ring->buffer_info);
1762 return -ENOMEM; 1762 return -ENOMEM;
@@ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1798 return -ENOMEM; 1798 return -ENOMEM;
1799 1799
1800 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1800 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1801 rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, 1801 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1802 &rx_ring->dma, GFP_KERNEL); 1802 &rx_ring->dma, GFP_KERNEL);
1803 if (!rx_ring->desc) { 1803 if (!rx_ring->desc) {
1804 vfree(rx_ring->buffer_info); 1804 vfree(rx_ring->buffer_info);
1805 return -ENOMEM; 1805 return -ENOMEM;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 8a31a02c9f47..d21041554507 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
401 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) 401 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
402 goto out_ring_desc; 402 goto out_ring_desc;
403 403
404 ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, 404 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
405 RX_RING_SIZE * sizeof(u64), 405 RX_RING_SIZE * sizeof(u64),
406 &ring->buf_dma, GFP_KERNEL); 406 &ring->buf_dma, GFP_KERNEL);
407 if (!ring->buffers) 407 if (!ring->buffers)
408 goto out_ring_desc; 408 goto out_ring_desc;
409 409
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index dc1c1b616084..c2ad405b2f50 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
936 u32 size = min_t(u32, total_size, psz); 936 u32 size = min_t(u32, total_size, psz);
937 void **p_virt = &p_mngr->t2[i].p_virt; 937 void **p_virt = &p_mngr->t2[i].p_virt;
938 938
939 *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 939 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
940 size, &p_mngr->t2[i].p_phys, 940 &p_mngr->t2[i].p_phys,
941 GFP_KERNEL); 941 GFP_KERNEL);
942 if (!p_mngr->t2[i].p_virt) { 942 if (!p_mngr->t2[i].p_virt) {
943 rc = -ENOMEM; 943 rc = -ENOMEM;
944 goto t2_fail; 944 goto t2_fail;
@@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
1054 u32 size; 1054 u32 size;
1055 1055
1056 size = min_t(u32, sz_left, p_blk->real_size_in_page); 1056 size = min_t(u32, sz_left, p_blk->real_size_in_page);
1057 p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, 1057 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
1058 &p_phys, GFP_KERNEL); 1058 &p_phys, GFP_KERNEL);
1059 if (!p_virt) 1059 if (!p_virt)
1060 return -ENOMEM; 1060 return -ENOMEM;
1061 1061
@@ -2306,9 +2306,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2306 goto out0; 2306 goto out0;
2307 } 2307 }
2308 2308
2309 p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 2309 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2310 p_blk->real_size_in_page, &p_phys, 2310 p_blk->real_size_in_page, &p_phys,
2311 GFP_KERNEL); 2311 GFP_KERNEL);
2312 if (!p_virt) { 2312 if (!p_virt) {
2313 rc = -ENOMEM; 2313 rc = -ENOMEM;
2314 goto out1; 2314 goto out1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 90afd514ffe1..d9237c65a838 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1619,6 +1619,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1619 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); 1619 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1620 rx_prod.bd_prod = cpu_to_le16(bd_prod); 1620 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1621 rx_prod.cqe_prod = cpu_to_le16(cq_prod); 1621 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1622
1623 /* Make sure chain element is updated before ringing the doorbell */
1624 dma_wmb();
1625
1622 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); 1626 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1623} 1627}
1624 1628
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d344e9d43832..af38d3d73291 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -434,14 +434,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
434 *(tx_ring->hw_consumer) = 0; 434 *(tx_ring->hw_consumer) = 0;
435 435
436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
437 rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, 437 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
438 &rq_phys_addr, GFP_KERNEL); 438 &rq_phys_addr, GFP_KERNEL);
439 if (!rq_addr) 439 if (!rq_addr)
440 return -ENOMEM; 440 return -ENOMEM;
441 441
442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
443 rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, 443 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
444 &rsp_phys_addr, GFP_KERNEL); 444 &rsp_phys_addr, GFP_KERNEL);
445 if (!rsp_addr) { 445 if (!rsp_addr) {
446 err = -ENOMEM; 446 err = -ENOMEM;
447 goto out_free_rq; 447 goto out_free_rq;
@@ -855,8 +855,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
855 struct qlcnic_cmd_args cmd; 855 struct qlcnic_cmd_args cmd;
856 size_t nic_size = sizeof(struct qlcnic_info_le); 856 size_t nic_size = sizeof(struct qlcnic_info_le);
857 857
858 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 858 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
859 &nic_dma_t, GFP_KERNEL); 859 &nic_dma_t, GFP_KERNEL);
860 if (!nic_info_addr) 860 if (!nic_info_addr)
861 return -ENOMEM; 861 return -ENOMEM;
862 862
@@ -909,8 +909,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
910 return err; 910 return err;
911 911
912 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 912 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
913 &nic_dma_t, GFP_KERNEL); 913 &nic_dma_t, GFP_KERNEL);
914 if (!nic_info_addr) 914 if (!nic_info_addr)
915 return -ENOMEM; 915 return -ENOMEM;
916 916
@@ -964,8 +964,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
964 void *pci_info_addr; 964 void *pci_info_addr;
965 int err = 0, i; 965 int err = 0, i;
966 966
967 pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, 967 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
968 &pci_info_dma_t, GFP_KERNEL); 968 &pci_info_dma_t, GFP_KERNEL);
969 if (!pci_info_addr) 969 if (!pci_info_addr)
970 return -ENOMEM; 970 return -ENOMEM;
971 971
@@ -1078,8 +1078,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
1078 return -EIO; 1078 return -EIO;
1079 } 1079 }
1080 1080
1081 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1081 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1082 &stats_dma_t, GFP_KERNEL); 1082 &stats_dma_t, GFP_KERNEL);
1083 if (!stats_addr) 1083 if (!stats_addr)
1084 return -ENOMEM; 1084 return -ENOMEM;
1085 1085
@@ -1134,8 +1134,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1134 if (mac_stats == NULL) 1134 if (mac_stats == NULL)
1135 return -ENOMEM; 1135 return -ENOMEM;
1136 1136
1137 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1137 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1138 &stats_dma_t, GFP_KERNEL); 1138 &stats_dma_t, GFP_KERNEL);
1139 if (!stats_addr) 1139 if (!stats_addr)
1140 return -ENOMEM; 1140 return -ENOMEM;
1141 1141
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 031f6e6ee9c1..8d790313ee3d 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
776 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ 776 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
777 777
778 ring_header->used = 0; 778 ring_header->used = 0;
779 ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, 779 ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
780 &ring_header->dma_addr, 780 &ring_header->dma_addr,
781 GFP_KERNEL); 781 GFP_KERNEL);
782 if (!ring_header->v_addr) 782 if (!ring_header->v_addr)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 298930d39b79..abb94c543aa2 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -205,6 +205,8 @@ enum cfg_version {
205}; 205};
206 206
207static const struct pci_device_id rtl8169_pci_tbl[] = { 207static const struct pci_device_id rtl8169_pci_tbl[] = {
208 { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
209 { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
208 { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, 210 { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
209 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, 211 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
210 { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, 212 { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
@@ -706,6 +708,7 @@ module_param(use_dac, int, 0);
706MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); 708MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
707module_param_named(debug, debug.msg_enable, int, 0); 709module_param_named(debug, debug.msg_enable, int, 0);
708MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 710MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
711MODULE_SOFTDEP("pre: realtek");
709MODULE_LICENSE("GPL"); 712MODULE_LICENSE("GPL");
710MODULE_FIRMWARE(FIRMWARE_8168D_1); 713MODULE_FIRMWARE(FIRMWARE_8168D_1);
711MODULE_FIRMWARE(FIRMWARE_8168D_2); 714MODULE_FIRMWARE(FIRMWARE_8168D_2);
@@ -1679,11 +1682,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
1679 1682
1680static bool rtl8169_update_counters(struct rtl8169_private *tp) 1683static bool rtl8169_update_counters(struct rtl8169_private *tp)
1681{ 1684{
1685 u8 val = RTL_R8(tp, ChipCmd);
1686
1682 /* 1687 /*
1683 * Some chips are unable to dump tally counters when the receiver 1688 * Some chips are unable to dump tally counters when the receiver
1684 * is disabled. 1689 * is disabled. If 0xff chip may be in a PCI power-save state.
1685 */ 1690 */
1686 if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) 1691 if (!(val & CmdRxEnb) || val == 0xff)
1687 return true; 1692 return true;
1688 1693
1689 return rtl8169_do_counters(tp, CounterDump); 1694 return rtl8169_do_counters(tp, CounterDump);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 690aee88f0eb..6d22dd500790 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -400,9 +400,9 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
400 } 400 }
401 401
402 /* allocate memory for TX descriptors */ 402 /* allocate memory for TX descriptors */
403 tx_ring->dma_tx = dma_zalloc_coherent(dev, 403 tx_ring->dma_tx = dma_alloc_coherent(dev,
404 tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 404 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
405 &tx_ring->dma_tx_phy, GFP_KERNEL); 405 &tx_ring->dma_tx_phy, GFP_KERNEL);
406 if (!tx_ring->dma_tx) 406 if (!tx_ring->dma_tx)
407 return -ENOMEM; 407 return -ENOMEM;
408 408
@@ -479,9 +479,9 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
479 rx_ring->queue_no = queue_no; 479 rx_ring->queue_no = queue_no;
480 480
481 /* allocate memory for RX descriptors */ 481 /* allocate memory for RX descriptors */
482 rx_ring->dma_rx = dma_zalloc_coherent(priv->device, 482 rx_ring->dma_rx = dma_alloc_coherent(priv->device,
483 rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 483 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
484 &rx_ring->dma_rx_phy, GFP_KERNEL); 484 &rx_ring->dma_rx_phy, GFP_KERNEL);
485 485
486 if (rx_ring->dma_rx == NULL) 486 if (rx_ring->dma_rx == NULL)
487 return -ENOMEM; 487 return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index a8ecb33390da..9c07b5175581 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -33,8 +33,8 @@
33int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer, 33int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
34 unsigned int len, gfp_t gfp_flags) 34 unsigned int len, gfp_t gfp_flags)
35{ 35{
36 buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 36 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
37 &buffer->dma_addr, gfp_flags); 37 &buffer->dma_addr, gfp_flags);
38 if (!buffer->addr) 38 if (!buffer->addr)
39 return -ENOMEM; 39 return -ENOMEM;
40 buffer->len = len; 40 buffer->len = len;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index aa1945a858d5..c2d45a40eb48 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -34,8 +34,8 @@
34int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 34int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
35 unsigned int len, gfp_t gfp_flags) 35 unsigned int len, gfp_t gfp_flags)
36{ 36{
37 buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 37 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
38 &buffer->dma_addr, gfp_flags); 38 &buffer->dma_addr, gfp_flags);
39 if (!buffer->addr) 39 if (!buffer->addr)
40 return -ENOMEM; 40 return -ENOMEM;
41 buffer->len = len; 41 buffer->len = len;
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 703fbbefea44..0e1b7e960b98 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -211,8 +211,8 @@ static void meth_check_link(struct net_device *dev)
211static int meth_init_tx_ring(struct meth_private *priv) 211static int meth_init_tx_ring(struct meth_private *priv)
212{ 212{
213 /* Init TX ring */ 213 /* Init TX ring */
214 priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, 214 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
215 &priv->tx_ring_dma, GFP_ATOMIC); 215 &priv->tx_ring_dma, GFP_ATOMIC);
216 if (!priv->tx_ring) 216 if (!priv->tx_ring)
217 return -ENOMEM; 217 return -ENOMEM;
218 218
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 05a0948ad929..a18149720aa2 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1029,8 +1029,8 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1029 struct netsec_desc_ring *dring = &priv->desc_ring[id]; 1029 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1030 int i; 1030 int i;
1031 1031
1032 dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, 1032 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1033 &dring->desc_dma, GFP_KERNEL); 1033 &dring->desc_dma, GFP_KERNEL);
1034 if (!dring->vaddr) 1034 if (!dring->vaddr)
1035 goto err; 1035 goto err;
1036 1036
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 6c5092e7771c..c5e25580a43f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
263 struct stmmac_extra_stats *x, u32 chan) 263 struct stmmac_extra_stats *x, u32 chan)
264{ 264{
265 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); 265 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
266 u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
266 int ret = 0; 267 int ret = 0;
267 268
268 /* ABNORMAL interrupts */ 269 /* ABNORMAL interrupts */
@@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
282 x->normal_irq_n++; 283 x->normal_irq_n++;
283 284
284 if (likely(intr_status & XGMAC_RI)) { 285 if (likely(intr_status & XGMAC_RI)) {
285 u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 286 if (likely(intr_en & XGMAC_RIE)) {
286 if (likely(value & XGMAC_RIE)) {
287 x->rx_normal_irq_n++; 287 x->rx_normal_irq_n++;
288 ret |= handle_rx; 288 ret |= handle_rx;
289 } 289 }
@@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
295 } 295 }
296 296
297 /* Clear interrupts */ 297 /* Clear interrupts */
298 writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); 298 writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
299 299
300 return ret; 300 return ret;
301} 301}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0e0a0789c2ed..5afba69981cf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1549,22 +1549,18 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1549 goto err_dma; 1549 goto err_dma;
1550 1550
1551 if (priv->extend_desc) { 1551 if (priv->extend_desc) {
1552 rx_q->dma_erx = dma_zalloc_coherent(priv->device, 1552 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1553 DMA_RX_SIZE * 1553 DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1554 sizeof(struct 1554 &rx_q->dma_rx_phy,
1555 dma_extended_desc), 1555 GFP_KERNEL);
1556 &rx_q->dma_rx_phy,
1557 GFP_KERNEL);
1558 if (!rx_q->dma_erx) 1556 if (!rx_q->dma_erx)
1559 goto err_dma; 1557 goto err_dma;
1560 1558
1561 } else { 1559 } else {
1562 rx_q->dma_rx = dma_zalloc_coherent(priv->device, 1560 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1563 DMA_RX_SIZE * 1561 DMA_RX_SIZE * sizeof(struct dma_desc),
1564 sizeof(struct 1562 &rx_q->dma_rx_phy,
1565 dma_desc), 1563 GFP_KERNEL);
1566 &rx_q->dma_rx_phy,
1567 GFP_KERNEL);
1568 if (!rx_q->dma_rx) 1564 if (!rx_q->dma_rx)
1569 goto err_dma; 1565 goto err_dma;
1570 } 1566 }
@@ -1612,21 +1608,17 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1612 goto err_dma; 1608 goto err_dma;
1613 1609
1614 if (priv->extend_desc) { 1610 if (priv->extend_desc) {
1615 tx_q->dma_etx = dma_zalloc_coherent(priv->device, 1611 tx_q->dma_etx = dma_alloc_coherent(priv->device,
1616 DMA_TX_SIZE * 1612 DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1617 sizeof(struct 1613 &tx_q->dma_tx_phy,
1618 dma_extended_desc), 1614 GFP_KERNEL);
1619 &tx_q->dma_tx_phy,
1620 GFP_KERNEL);
1621 if (!tx_q->dma_etx) 1615 if (!tx_q->dma_etx)
1622 goto err_dma; 1616 goto err_dma;
1623 } else { 1617 } else {
1624 tx_q->dma_tx = dma_zalloc_coherent(priv->device, 1618 tx_q->dma_tx = dma_alloc_coherent(priv->device,
1625 DMA_TX_SIZE * 1619 DMA_TX_SIZE * sizeof(struct dma_desc),
1626 sizeof(struct 1620 &tx_q->dma_tx_phy,
1627 dma_desc), 1621 GFP_KERNEL);
1628 &tx_q->dma_tx_phy,
1629 GFP_KERNEL);
1630 if (!tx_q->dma_tx) 1622 if (!tx_q->dma_tx)
1631 goto err_dma; 1623 goto err_dma;
1632 } 1624 }
@@ -3525,27 +3517,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3525 struct stmmac_channel *ch = 3517 struct stmmac_channel *ch =
3526 container_of(napi, struct stmmac_channel, napi); 3518 container_of(napi, struct stmmac_channel, napi);
3527 struct stmmac_priv *priv = ch->priv_data; 3519 struct stmmac_priv *priv = ch->priv_data;
3528 int work_done = 0, work_rem = budget; 3520 int work_done, rx_done = 0, tx_done = 0;
3529 u32 chan = ch->index; 3521 u32 chan = ch->index;
3530 3522
3531 priv->xstats.napi_poll++; 3523 priv->xstats.napi_poll++;
3532 3524
3533 if (ch->has_tx) { 3525 if (ch->has_tx)
3534 int done = stmmac_tx_clean(priv, work_rem, chan); 3526 tx_done = stmmac_tx_clean(priv, budget, chan);
3535 3527 if (ch->has_rx)
3536 work_done += done; 3528 rx_done = stmmac_rx(priv, budget, chan);
3537 work_rem -= done;
3538 }
3539 3529
3540 if (ch->has_rx) { 3530 work_done = max(rx_done, tx_done);
3541 int done = stmmac_rx(priv, work_rem, chan); 3531 work_done = min(work_done, budget);
3542 3532
3543 work_done += done; 3533 if (work_done < budget && napi_complete_done(napi, work_done)) {
3544 work_rem -= done; 3534 int stat;
3545 }
3546 3535
3547 if (work_done < budget && napi_complete_done(napi, work_done))
3548 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3536 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3537 stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3538 &priv->xstats, chan);
3539 if (stat && napi_reschedule(napi))
3540 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
3541 }
3549 3542
3550 return work_done; 3543 return work_done;
3551} 3544}
@@ -4168,6 +4161,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
4168 return ret; 4161 return ret;
4169 } 4162 }
4170 4163
4164 /* Rx Watchdog is available in the COREs newer than the 3.40.
4165 * In some case, for example on bugged HW this feature
4166 * has to be disable and this can be done by passing the
4167 * riwt_off field from the platform.
4168 */
4169 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4170 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4171 priv->use_riwt = 1;
4172 dev_info(priv->device,
4173 "Enable RX Mitigation via HW Watchdog Timer\n");
4174 }
4175
4171 return 0; 4176 return 0;
4172} 4177}
4173 4178
@@ -4300,18 +4305,6 @@ int stmmac_dvr_probe(struct device *device,
4300 if (flow_ctrl) 4305 if (flow_ctrl)
4301 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 4306 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4302 4307
4303 /* Rx Watchdog is available in the COREs newer than the 3.40.
4304 * In some case, for example on bugged HW this feature
4305 * has to be disable and this can be done by passing the
4306 * riwt_off field from the platform.
4307 */
4308 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4309 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4310 priv->use_riwt = 1;
4311 dev_info(priv->device,
4312 "Enable RX Mitigation via HW Watchdog Timer\n");
4313 }
4314
4315 /* Setup channels NAPI */ 4308 /* Setup channels NAPI */
4316 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 4309 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4317 4310
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index c54a50dbd5ac..d819e8eaba12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
299 */ 299 */
300static void stmmac_pci_remove(struct pci_dev *pdev) 300static void stmmac_pci_remove(struct pci_dev *pdev)
301{ 301{
302 int i;
303
302 stmmac_dvr_remove(&pdev->dev); 304 stmmac_dvr_remove(&pdev->dev);
305
306 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
307 if (pci_resource_len(pdev, i) == 0)
308 continue;
309 pcim_iounmap_regions(pdev, BIT(i));
310 break;
311 }
312
303 pci_disable_device(pdev); 313 pci_disable_device(pdev);
304} 314}
305 315
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 531294f4978b..58ea18af9813 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
301 /* Queue 0 is not AVB capable */ 301 /* Queue 0 is not AVB capable */
302 if (queue <= 0 || queue >= tx_queues_count) 302 if (queue <= 0 || queue >= tx_queues_count)
303 return -EINVAL; 303 return -EINVAL;
304 if (!priv->dma_cap.av)
305 return -EOPNOTSUPP;
304 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) 306 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
305 return -EOPNOTSUPP; 307 return -EOPNOTSUPP;
306 308
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index edcd1e60b30d..37925a1d58de 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1311,13 +1311,13 @@ static int tsi108_open(struct net_device *dev)
1311 data->id, dev->irq, dev->name); 1311 data->id, dev->irq, dev->name);
1312 } 1312 }
1313 1313
1314 data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size, 1314 data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
1315 &data->rxdma, GFP_KERNEL); 1315 &data->rxdma, GFP_KERNEL);
1316 if (!data->rxring) 1316 if (!data->rxring)
1317 return -ENOMEM; 1317 return -ENOMEM;
1318 1318
1319 data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size, 1319 data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
1320 &data->txdma, GFP_KERNEL); 1320 &data->txdma, GFP_KERNEL);
1321 if (!data->txring) { 1321 if (!data->txring) {
1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, 1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
1323 data->rxdma); 1323 data->rxdma);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2241f9897092..15bb058db392 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
243 243
244 /* allocate the tx and rx ring buffer descriptors. */ 244 /* allocate the tx and rx ring buffer descriptors. */
245 /* returns a virtual address and a physical address. */ 245 /* returns a virtual address and a physical address. */
246 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
247 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 247 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
248 &lp->tx_bd_p, GFP_KERNEL); 248 &lp->tx_bd_p, GFP_KERNEL);
249 if (!lp->tx_bd_v) 249 if (!lp->tx_bd_v)
250 goto out; 250 goto out;
251 251
252 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 252 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
253 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 253 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
254 &lp->rx_bd_p, GFP_KERNEL); 254 &lp->rx_bd_p, GFP_KERNEL);
255 if (!lp->rx_bd_v) 255 if (!lp->rx_bd_v)
256 goto out; 256 goto out;
257 257
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 12a14609ec47..0789d8af7d72 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -199,15 +199,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
199 lp->rx_bd_ci = 0; 199 lp->rx_bd_ci = 0;
200 200
201 /* Allocate the Tx and Rx buffer descriptors. */ 201 /* Allocate the Tx and Rx buffer descriptors. */
202 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 202 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
203 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 203 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
204 &lp->tx_bd_p, GFP_KERNEL); 204 &lp->tx_bd_p, GFP_KERNEL);
205 if (!lp->tx_bd_v) 205 if (!lp->tx_bd_v)
206 goto out; 206 goto out;
207 207
208 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 208 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
209 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 209 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
210 &lp->rx_bd_p, GFP_KERNEL); 210 &lp->rx_bd_p, GFP_KERNEL);
211 if (!lp->rx_bd_v) 211 if (!lp->rx_bd_v)
212 goto out; 212 goto out;
213 213
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 61fceee73c1b..38ac8ef41f5f 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1139,9 +1139,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
1139#endif 1139#endif
1140 sizeof(PI_CONSUMER_BLOCK) + 1140 sizeof(PI_CONSUMER_BLOCK) +
1141 (PI_ALIGN_K_DESC_BLK - 1); 1141 (PI_ALIGN_K_DESC_BLK - 1);
1142 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, 1142 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1143 &bp->kmalloced_dma, 1143 &bp->kmalloced_dma,
1144 GFP_ATOMIC); 1144 GFP_ATOMIC);
1145 if (top_v == NULL) 1145 if (top_v == NULL)
1146 return DFX_K_FAILURE; 1146 return DFX_K_FAILURE;
1147 1147
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 72433f3efc74..5d661f60b101 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -409,10 +409,10 @@ static int skfp_driver_init(struct net_device *dev)
409 if (bp->SharedMemSize > 0) { 409 if (bp->SharedMemSize > 0) {
410 bp->SharedMemSize += 16; // for descriptor alignment 410 bp->SharedMemSize += 16; // for descriptor alignment
411 411
412 bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev, 412 bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
413 bp->SharedMemSize, 413 bp->SharedMemSize,
414 &bp->SharedMemDMA, 414 &bp->SharedMemDMA,
415 GFP_ATOMIC); 415 GFP_ATOMIC);
416 if (!bp->SharedMemAddr) { 416 if (!bp->SharedMemAddr) {
417 printk("could not allocate mem for "); 417 printk("could not allocate mem for ");
418 printk("hardware module: %ld byte\n", 418 printk("hardware module: %ld byte\n",
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fc726ce4c164..6d067176320f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -337,7 +337,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
337 337
338 if (src) 338 if (src)
339 dev_put(src->dev); 339 dev_put(src->dev);
340 kfree_skb(skb); 340 consume_skb(skb);
341 } 341 }
342} 342}
343 343
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 1b350183bffb..a271239748f2 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -197,6 +197,7 @@ static struct phy_driver bcm87xx_driver[] = {
197 .phy_id = PHY_ID_BCM8706, 197 .phy_id = PHY_ID_BCM8706,
198 .phy_id_mask = 0xffffffff, 198 .phy_id_mask = 0xffffffff,
199 .name = "Broadcom BCM8706", 199 .name = "Broadcom BCM8706",
200 .features = PHY_10GBIT_FEC_FEATURES,
200 .config_init = bcm87xx_config_init, 201 .config_init = bcm87xx_config_init,
201 .config_aneg = bcm87xx_config_aneg, 202 .config_aneg = bcm87xx_config_aneg,
202 .read_status = bcm87xx_read_status, 203 .read_status = bcm87xx_read_status,
@@ -208,6 +209,7 @@ static struct phy_driver bcm87xx_driver[] = {
208 .phy_id = PHY_ID_BCM8727, 209 .phy_id = PHY_ID_BCM8727,
209 .phy_id_mask = 0xffffffff, 210 .phy_id_mask = 0xffffffff,
210 .name = "Broadcom BCM8727", 211 .name = "Broadcom BCM8727",
212 .features = PHY_10GBIT_FEC_FEATURES,
211 .config_init = bcm87xx_config_init, 213 .config_init = bcm87xx_config_init,
212 .config_aneg = bcm87xx_config_aneg, 214 .config_aneg = bcm87xx_config_aneg,
213 .read_status = bcm87xx_read_status, 215 .read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 8022cd317f62..1a4d04afb7f0 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = {
88 .phy_id = PHY_ID_CS4340, 88 .phy_id = PHY_ID_CS4340,
89 .phy_id_mask = 0xffffffff, 89 .phy_id_mask = 0xffffffff,
90 .name = "Cortina CS4340", 90 .name = "Cortina CS4340",
91 .features = PHY_10GBIT_FEATURES,
91 .config_init = gen10g_config_init, 92 .config_init = gen10g_config_init,
92 .config_aneg = gen10g_config_aneg, 93 .config_aneg = gen10g_config_aneg,
93 .read_status = cortina_read_status, 94 .read_status = cortina_read_status,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a9c7c7f41b0c..2e12f982534f 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1046,6 +1046,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
1046 return 0; 1046 return 0;
1047} 1047}
1048 1048
1049/* The VOD can be out of specification on link up. Poke an
1050 * undocumented register, in an undocumented page, with a magic value
1051 * to fix this.
1052 */
1053static int m88e6390_errata(struct phy_device *phydev)
1054{
1055 int err;
1056
1057 err = phy_write(phydev, MII_BMCR,
1058 BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
1059 if (err)
1060 return err;
1061
1062 usleep_range(300, 400);
1063
1064 err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
1065 if (err)
1066 return err;
1067
1068 return genphy_soft_reset(phydev);
1069}
1070
1071static int m88e6390_config_aneg(struct phy_device *phydev)
1072{
1073 int err;
1074
1075 err = m88e6390_errata(phydev);
1076 if (err)
1077 return err;
1078
1079 return m88e1510_config_aneg(phydev);
1080}
1081
1049/** 1082/**
1050 * fiber_lpa_mod_linkmode_lpa_t 1083 * fiber_lpa_mod_linkmode_lpa_t
1051 * @advertising: the linkmode advertisement settings 1084 * @advertising: the linkmode advertisement settings
@@ -1402,7 +1435,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1402 * before enabling it if !phy_interrupt_is_valid() 1435 * before enabling it if !phy_interrupt_is_valid()
1403 */ 1436 */
1404 if (!phy_interrupt_is_valid(phydev)) 1437 if (!phy_interrupt_is_valid(phydev))
1405 phy_read(phydev, MII_M1011_IEVENT); 1438 __phy_read(phydev, MII_M1011_IEVENT);
1406 1439
1407 /* Enable the WOL interrupt */ 1440 /* Enable the WOL interrupt */
1408 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, 1441 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
@@ -2283,7 +2316,7 @@ static struct phy_driver marvell_drivers[] = {
2283 .features = PHY_GBIT_FEATURES, 2316 .features = PHY_GBIT_FEATURES,
2284 .probe = m88e6390_probe, 2317 .probe = m88e6390_probe,
2285 .config_init = &marvell_config_init, 2318 .config_init = &marvell_config_init,
2286 .config_aneg = &m88e1510_config_aneg, 2319 .config_aneg = &m88e6390_config_aneg,
2287 .read_status = &marvell_read_status, 2320 .read_status = &marvell_read_status,
2288 .ack_interrupt = &marvell_ack_interrupt, 2321 .ack_interrupt = &marvell_ack_interrupt,
2289 .config_intr = &marvell_config_intr, 2322 .config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e59a8419b17..66b9cfe692fc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -390,6 +390,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
390 if (IS_ERR(gpiod)) { 390 if (IS_ERR(gpiod)) {
391 dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", 391 dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
392 bus->id); 392 bus->id);
393 device_del(&bus->dev);
393 return PTR_ERR(gpiod); 394 return PTR_ERR(gpiod);
394 } else if (gpiod) { 395 } else if (gpiod) {
395 bus->reset_gpiod = gpiod; 396 bus->reset_gpiod = gpiod;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index b03bcf2c388a..3ddaf9595697 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = {
233 .name = "Meson GXL Internal PHY", 233 .name = "Meson GXL Internal PHY",
234 .features = PHY_BASIC_FEATURES, 234 .features = PHY_BASIC_FEATURES,
235 .flags = PHY_IS_INTERNAL, 235 .flags = PHY_IS_INTERNAL,
236 .soft_reset = genphy_soft_reset,
236 .config_init = meson_gxl_config_init, 237 .config_init = meson_gxl_config_init,
237 .aneg_done = genphy_aneg_done, 238 .aneg_done = genphy_aneg_done,
238 .read_status = meson_gxl_read_status, 239 .read_status = meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c33384710d26..b1f959935f50 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1070,6 +1070,7 @@ static struct phy_driver ksphy_driver[] = {
1070 .driver_data = &ksz9021_type, 1070 .driver_data = &ksz9021_type,
1071 .probe = kszphy_probe, 1071 .probe = kszphy_probe,
1072 .config_init = ksz9031_config_init, 1072 .config_init = ksz9031_config_init,
1073 .soft_reset = genphy_soft_reset,
1073 .read_status = ksz9031_read_status, 1074 .read_status = ksz9031_read_status,
1074 .ack_interrupt = kszphy_ack_interrupt, 1075 .ack_interrupt = kszphy_ack_interrupt,
1075 .config_intr = kszphy_config_intr, 1076 .config_intr = kszphy_config_intr,
@@ -1098,6 +1099,7 @@ static struct phy_driver ksphy_driver[] = {
1098 .phy_id = PHY_ID_KSZ8873MLL, 1099 .phy_id = PHY_ID_KSZ8873MLL,
1099 .phy_id_mask = MICREL_PHY_ID_MASK, 1100 .phy_id_mask = MICREL_PHY_ID_MASK,
1100 .name = "Micrel KSZ8873MLL Switch", 1101 .name = "Micrel KSZ8873MLL Switch",
1102 .features = PHY_BASIC_FEATURES,
1101 .config_init = kszphy_config_init, 1103 .config_init = kszphy_config_init,
1102 .config_aneg = ksz8873mll_config_aneg, 1104 .config_aneg = ksz8873mll_config_aneg,
1103 .read_status = ksz8873mll_read_status, 1105 .read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d33e7b3caf03..189cd2048c3a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -543,13 +543,6 @@ int phy_start_aneg(struct phy_device *phydev)
543 543
544 mutex_lock(&phydev->lock); 544 mutex_lock(&phydev->lock);
545 545
546 if (!__phy_is_started(phydev)) {
547 WARN(1, "called from state %s\n",
548 phy_state_to_str(phydev->state));
549 err = -EBUSY;
550 goto out_unlock;
551 }
552
553 if (AUTONEG_DISABLE == phydev->autoneg) 546 if (AUTONEG_DISABLE == phydev->autoneg)
554 phy_sanitize_settings(phydev); 547 phy_sanitize_settings(phydev);
555 548
@@ -560,11 +553,13 @@ int phy_start_aneg(struct phy_device *phydev)
560 if (err < 0) 553 if (err < 0)
561 goto out_unlock; 554 goto out_unlock;
562 555
563 if (phydev->autoneg == AUTONEG_ENABLE) { 556 if (__phy_is_started(phydev)) {
564 err = phy_check_link_status(phydev); 557 if (phydev->autoneg == AUTONEG_ENABLE) {
565 } else { 558 err = phy_check_link_status(phydev);
566 phydev->state = PHY_FORCING; 559 } else {
567 phydev->link_timeout = PHY_FORCE_TIMEOUT; 560 phydev->state = PHY_FORCING;
561 phydev->link_timeout = PHY_FORCE_TIMEOUT;
562 }
568 } 563 }
569 564
570out_unlock: 565out_unlock:
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 51990002d495..46c86725a693 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
61__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; 61__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
62EXPORT_SYMBOL_GPL(phy_10gbit_features); 62EXPORT_SYMBOL_GPL(phy_10gbit_features);
63 63
64__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
65EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
66
64static const int phy_basic_ports_array[] = { 67static const int phy_basic_ports_array[] = {
65 ETHTOOL_LINK_MODE_Autoneg_BIT, 68 ETHTOOL_LINK_MODE_Autoneg_BIT,
66 ETHTOOL_LINK_MODE_TP_BIT, 69 ETHTOOL_LINK_MODE_TP_BIT,
@@ -109,6 +112,11 @@ const int phy_10gbit_features_array[1] = {
109}; 112};
110EXPORT_SYMBOL_GPL(phy_10gbit_features_array); 113EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
111 114
115const int phy_10gbit_fec_features_array[1] = {
116 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
117};
118EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
119
112__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; 120__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
113EXPORT_SYMBOL_GPL(phy_10gbit_full_features); 121EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
114 122
@@ -191,6 +199,10 @@ static void features_init(void)
191 linkmode_set_bit_array(phy_10gbit_full_features_array, 199 linkmode_set_bit_array(phy_10gbit_full_features_array,
192 ARRAY_SIZE(phy_10gbit_full_features_array), 200 ARRAY_SIZE(phy_10gbit_full_features_array),
193 phy_10gbit_full_features); 201 phy_10gbit_full_features);
202 /* 10G FEC only */
203 linkmode_set_bit_array(phy_10gbit_fec_features_array,
204 ARRAY_SIZE(phy_10gbit_fec_features_array),
205 phy_10gbit_fec_features);
194} 206}
195 207
196void phy_device_free(struct phy_device *phydev) 208void phy_device_free(struct phy_device *phydev)
@@ -2243,6 +2255,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
2243{ 2255{
2244 int retval; 2256 int retval;
2245 2257
2258 if (WARN_ON(!new_driver->features)) {
2259 pr_err("%s: Driver features are missing\n", new_driver->name);
2260 return -EINVAL;
2261 }
2262
2246 new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; 2263 new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
2247 new_driver->mdiodrv.driver.name = new_driver->name; 2264 new_driver->mdiodrv.driver.name = new_driver->name;
2248 new_driver->mdiodrv.driver.bus = &mdio_bus_type; 2265 new_driver->mdiodrv.driver.bus = &mdio_bus_type;
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 22f3bdd8206c..91247182bc52 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = {
80 .phy_id = PHY_ID_TN2020, 80 .phy_id = PHY_ID_TN2020,
81 .phy_id_mask = 0xffffffff, 81 .phy_id_mask = 0xffffffff,
82 .name = "Teranetics TN2020", 82 .name = "Teranetics TN2020",
83 .features = PHY_10GBIT_FEATURES,
83 .soft_reset = gen10g_no_soft_reset, 84 .soft_reset = gen10g_no_soft_reset,
84 .aneg_done = teranetics_aneg_done, 85 .aneg_done = teranetics_aneg_done,
85 .config_init = gen10g_config_init, 86 .config_init = gen10g_config_init,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 62dc564b251d..f22639f0116a 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
445 if (pskb_trim_rcsum(skb, len)) 445 if (pskb_trim_rcsum(skb, len))
446 goto drop; 446 goto drop;
447 447
448 ph = pppoe_hdr(skb);
448 pn = pppoe_pernet(dev_net(dev)); 449 pn = pppoe_pernet(dev_net(dev));
449 450
450 /* Note that get_item does a sock_hold(), so sk_pppox(po) 451 /* Note that get_item does a sock_hold(), so sk_pppox(po)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a4fdad475594..18656c4094b3 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -856,10 +856,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
856 err = 0; 856 err = 0;
857 } 857 }
858 858
859 rcu_assign_pointer(tfile->tun, tun);
860 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
861 tun->numqueues++;
862
863 if (tfile->detached) { 859 if (tfile->detached) {
864 tun_enable_queue(tfile); 860 tun_enable_queue(tfile);
865 } else { 861 } else {
@@ -876,6 +872,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
876 * refcnt. 872 * refcnt.
877 */ 873 */
878 874
875 /* Publish tfile->tun and tun->tfiles only after we've fully
876 * initialized tfile; otherwise we risk using half-initialized
877 * object.
878 */
879 rcu_assign_pointer(tfile->tun, tun);
880 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
881 tun->numqueues++;
879out: 882out:
880 return err; 883 return err;
881} 884}
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 57f1c94fca0b..820a2fe7d027 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1287,6 +1287,20 @@ static const struct driver_info asix112_info = {
1287 1287
1288#undef ASIX112_DESC 1288#undef ASIX112_DESC
1289 1289
1290static const struct driver_info trendnet_info = {
1291 .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter",
1292 .bind = aqc111_bind,
1293 .unbind = aqc111_unbind,
1294 .status = aqc111_status,
1295 .link_reset = aqc111_link_reset,
1296 .reset = aqc111_reset,
1297 .stop = aqc111_stop,
1298 .flags = FLAG_ETHER | FLAG_FRAMING_AX |
1299 FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
1300 .rx_fixup = aqc111_rx_fixup,
1301 .tx_fixup = aqc111_tx_fixup,
1302};
1303
1290static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) 1304static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
1291{ 1305{
1292 struct usbnet *dev = usb_get_intfdata(intf); 1306 struct usbnet *dev = usb_get_intfdata(intf);
@@ -1440,6 +1454,7 @@ static const struct usb_device_id products[] = {
1440 {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, 1454 {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)},
1441 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, 1455 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
1442 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, 1456 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
1457 {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
1443 { },/* END */ 1458 { },/* END */
1444}; 1459};
1445MODULE_DEVICE_TABLE(usb, products); 1460MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index b3b3c05903a1..5512a1038721 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -179,10 +179,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
179 * probed with) and a slave/data interface; union 179 * probed with) and a slave/data interface; union
180 * descriptors sort this all out. 180 * descriptors sort this all out.
181 */ 181 */
182 info->control = usb_ifnum_to_if(dev->udev, 182 info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0);
183 info->u->bMasterInterface0); 183 info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0);
184 info->data = usb_ifnum_to_if(dev->udev,
185 info->u->bSlaveInterface0);
186 if (!info->control || !info->data) { 184 if (!info->control || !info->data) {
187 dev_dbg(&intf->dev, 185 dev_dbg(&intf->dev,
188 "master #%u/%p slave #%u/%p\n", 186 "master #%u/%p slave #%u/%p\n",
@@ -216,18 +214,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
216 /* a data interface altsetting does the real i/o */ 214 /* a data interface altsetting does the real i/o */
217 d = &info->data->cur_altsetting->desc; 215 d = &info->data->cur_altsetting->desc;
218 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { 216 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) {
219 dev_dbg(&intf->dev, "slave class %u\n", 217 dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass);
220 d->bInterfaceClass);
221 goto bad_desc; 218 goto bad_desc;
222 } 219 }
223skip: 220skip:
224 if ( rndis && 221 if (rndis && header.usb_cdc_acm_descriptor &&
225 header.usb_cdc_acm_descriptor && 222 header.usb_cdc_acm_descriptor->bmCapabilities) {
226 header.usb_cdc_acm_descriptor->bmCapabilities) { 223 dev_dbg(&intf->dev,
227 dev_dbg(&intf->dev, 224 "ACM capabilities %02x, not really RNDIS?\n",
228 "ACM capabilities %02x, not really RNDIS?\n", 225 header.usb_cdc_acm_descriptor->bmCapabilities);
229 header.usb_cdc_acm_descriptor->bmCapabilities); 226 goto bad_desc;
230 goto bad_desc;
231 } 227 }
232 228
233 if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { 229 if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
@@ -238,7 +234,7 @@ skip:
238 } 234 }
239 235
240 if (header.usb_cdc_mdlm_desc && 236 if (header.usb_cdc_mdlm_desc &&
241 memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { 237 memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) {
242 dev_dbg(&intf->dev, "GUID doesn't match\n"); 238 dev_dbg(&intf->dev, "GUID doesn't match\n");
243 goto bad_desc; 239 goto bad_desc;
244 } 240 }
@@ -302,7 +298,7 @@ skip:
302 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { 298 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) {
303 struct usb_endpoint_descriptor *desc; 299 struct usb_endpoint_descriptor *desc;
304 300
305 dev->status = &info->control->cur_altsetting->endpoint [0]; 301 dev->status = &info->control->cur_altsetting->endpoint[0];
306 desc = &dev->status->desc; 302 desc = &dev->status->desc;
307 if (!usb_endpoint_is_int_in(desc) || 303 if (!usb_endpoint_is_int_in(desc) ||
308 (le16_to_cpu(desc->wMaxPacketSize) 304 (le16_to_cpu(desc->wMaxPacketSize)
@@ -847,6 +843,14 @@ static const struct usb_device_id products[] = {
847 .driver_info = 0, 843 .driver_info = 0,
848}, 844},
849 845
846/* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */
847{
848 USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM,
849 USB_CDC_SUBCLASS_ETHERNET,
850 USB_CDC_PROTO_NONE),
851 .driver_info = 0,
852},
853
850/* WHITELIST!!! 854/* WHITELIST!!!
851 * 855 *
852 * CDC Ether uses two interfaces, not necessarily consecutive. 856 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 774e1ff01c9a..735ad838e2ba 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev)
123 dev->addr_len = 0; 123 dev->addr_len = 0;
124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
125 dev->netdev_ops = &qmimux_netdev_ops; 125 dev->netdev_ops = &qmimux_netdev_ops;
126 dev->mtu = 1500;
126 dev->needs_free_netdev = true; 127 dev->needs_free_netdev = true;
127} 128}
128 129
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 023725086046..8fadd8eaf601 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1330,7 +1330,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1330 return stats.packets; 1330 return stats.packets;
1331} 1331}
1332 1332
1333static void free_old_xmit_skbs(struct send_queue *sq) 1333static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1334{ 1334{
1335 struct sk_buff *skb; 1335 struct sk_buff *skb;
1336 unsigned int len; 1336 unsigned int len;
@@ -1343,7 +1343,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
1343 bytes += skb->len; 1343 bytes += skb->len;
1344 packets++; 1344 packets++;
1345 1345
1346 dev_consume_skb_any(skb); 1346 napi_consume_skb(skb, in_napi);
1347 } 1347 }
1348 1348
1349 /* Avoid overhead when no packets have been processed 1349 /* Avoid overhead when no packets have been processed
@@ -1369,7 +1369,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
1369 return; 1369 return;
1370 1370
1371 if (__netif_tx_trylock(txq)) { 1371 if (__netif_tx_trylock(txq)) {
1372 free_old_xmit_skbs(sq); 1372 free_old_xmit_skbs(sq, true);
1373 __netif_tx_unlock(txq); 1373 __netif_tx_unlock(txq);
1374 } 1374 }
1375 1375
@@ -1445,7 +1445,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1445 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1445 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
1446 1446
1447 __netif_tx_lock(txq, raw_smp_processor_id()); 1447 __netif_tx_lock(txq, raw_smp_processor_id());
1448 free_old_xmit_skbs(sq); 1448 free_old_xmit_skbs(sq, true);
1449 __netif_tx_unlock(txq); 1449 __netif_tx_unlock(txq);
1450 1450
1451 virtqueue_napi_complete(napi, sq->vq, 0); 1451 virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1514,7 +1514,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1514 bool use_napi = sq->napi.weight; 1514 bool use_napi = sq->napi.weight;
1515 1515
1516 /* Free up any pending old buffers before queueing new ones. */ 1516 /* Free up any pending old buffers before queueing new ones. */
1517 free_old_xmit_skbs(sq); 1517 free_old_xmit_skbs(sq, false);
1518 1518
1519 if (use_napi && kick) 1519 if (use_napi && kick)
1520 virtqueue_enable_cb_delayed(sq->vq); 1520 virtqueue_enable_cb_delayed(sq->vq);
@@ -1557,7 +1557,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1557 if (!use_napi && 1557 if (!use_napi &&
1558 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1558 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1559 /* More just got used, free them then recheck. */ 1559 /* More just got used, free them then recheck. */
1560 free_old_xmit_skbs(sq); 1560 free_old_xmit_skbs(sq, false);
1561 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1561 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1562 netif_start_subqueue(dev, qnum); 1562 netif_start_subqueue(dev, qnum);
1563 virtqueue_disable_cb(sq->vq); 1563 virtqueue_disable_cb(sq->vq);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e454dfc9ad8f..89984fcab01e 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -535,8 +535,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
535 } 535 }
536 536
537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); 537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
538 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, 538 tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
539 &tq->buf_info_pa, GFP_KERNEL); 539 &tq->buf_info_pa, GFP_KERNEL);
540 if (!tq->buf_info) 540 if (!tq->buf_info)
541 goto err; 541 goto err;
542 542
@@ -1815,8 +1815,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1815 1815
1816 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1816 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1817 rq->rx_ring[1].size); 1817 rq->rx_ring[1].size);
1818 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1818 bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1819 GFP_KERNEL); 1819 GFP_KERNEL);
1820 if (!bi) 1820 if (!bi)
1821 goto err; 1821 goto err;
1822 1822
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 839fa7715709..66d889d54e58 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -279,10 +279,9 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
280 280
281 /* Get BD buffer */ 281 /* Get BD buffer */
282 bd_buffer = dma_zalloc_coherent(priv->dev, 282 bd_buffer = dma_alloc_coherent(priv->dev,
283 (RX_BD_RING_LEN + TX_BD_RING_LEN) * 283 (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
284 MAX_RX_BUF_LENGTH, 284 &bd_dma_addr, GFP_KERNEL);
285 &bd_dma_addr, GFP_KERNEL);
286 285
287 if (!bd_buffer) { 286 if (!bd_buffer) {
288 dev_err(priv->dev, "Could not allocate buffer descriptors\n"); 287 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
@@ -1057,6 +1056,54 @@ static const struct net_device_ops uhdlc_ops = {
1057 .ndo_tx_timeout = uhdlc_tx_timeout, 1056 .ndo_tx_timeout = uhdlc_tx_timeout,
1058}; 1057};
1059 1058
1059static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1060{
1061 struct device_node *np;
1062 struct platform_device *pdev;
1063 struct resource *res;
1064 static int siram_init_flag;
1065 int ret = 0;
1066
1067 np = of_find_compatible_node(NULL, NULL, name);
1068 if (!np)
1069 return -EINVAL;
1070
1071 pdev = of_find_device_by_node(np);
1072 if (!pdev) {
1073 pr_err("%pOFn: failed to lookup pdev\n", np);
1074 of_node_put(np);
1075 return -EINVAL;
1076 }
1077
1078 of_node_put(np);
1079 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1080 if (!res) {
1081 ret = -EINVAL;
1082 goto error_put_device;
1083 }
1084 *ptr = ioremap(res->start, resource_size(res));
1085 if (!*ptr) {
1086 ret = -ENOMEM;
1087 goto error_put_device;
1088 }
1089
1090 /* We've remapped the addresses, and we don't need the device any
1091 * more, so we should release it.
1092 */
1093 put_device(&pdev->dev);
1094
1095 if (init_flag && siram_init_flag == 0) {
1096 memset_io(*ptr, 0, resource_size(res));
1097 siram_init_flag = 1;
1098 }
1099 return 0;
1100
1101error_put_device:
1102 put_device(&pdev->dev);
1103
1104 return ret;
1105}
1106
1060static int ucc_hdlc_probe(struct platform_device *pdev) 1107static int ucc_hdlc_probe(struct platform_device *pdev)
1061{ 1108{
1062 struct device_node *np = pdev->dev.of_node; 1109 struct device_node *np = pdev->dev.of_node;
@@ -1151,6 +1198,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1151 ret = ucc_of_parse_tdm(np, utdm, ut_info); 1198 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1152 if (ret) 1199 if (ret)
1153 goto free_utdm; 1200 goto free_utdm;
1201
1202 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1203 (void __iomem **)&utdm->si_regs);
1204 if (ret)
1205 goto free_utdm;
1206 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1207 (void __iomem **)&utdm->siram);
1208 if (ret)
1209 goto unmap_si_regs;
1154 } 1210 }
1155 1211
1156 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) 1212 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
@@ -1159,7 +1215,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1159 ret = uhdlc_init(uhdlc_priv); 1215 ret = uhdlc_init(uhdlc_priv);
1160 if (ret) { 1216 if (ret) {
1161 dev_err(&pdev->dev, "Failed to init uhdlc\n"); 1217 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1162 goto free_utdm; 1218 goto undo_uhdlc_init;
1163 } 1219 }
1164 1220
1165 dev = alloc_hdlcdev(uhdlc_priv); 1221 dev = alloc_hdlcdev(uhdlc_priv);
@@ -1188,6 +1244,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1188free_dev: 1244free_dev:
1189 free_netdev(dev); 1245 free_netdev(dev);
1190undo_uhdlc_init: 1246undo_uhdlc_init:
1247 iounmap(utdm->siram);
1248unmap_si_regs:
1249 iounmap(utdm->si_regs);
1191free_utdm: 1250free_utdm:
1192 if (uhdlc_priv->tsa) 1251 if (uhdlc_priv->tsa)
1193 kfree(utdm); 1252 kfree(utdm);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f6d3ecbdd3a3..2a5668b4f6bc 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1553,10 +1553,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1553 * coherent DMA are unsupported 1553 * coherent DMA are unsupported
1554 */ 1554 */
1555 dest_ring->base_addr_owner_space_unaligned = 1555 dest_ring->base_addr_owner_space_unaligned =
1556 dma_zalloc_coherent(ar->dev, 1556 dma_alloc_coherent(ar->dev,
1557 (nentries * sizeof(struct ce_desc) + 1557 (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN),
1558 CE_DESC_RING_ALIGN), 1558 &base_addr, GFP_KERNEL);
1559 &base_addr, GFP_KERNEL);
1560 if (!dest_ring->base_addr_owner_space_unaligned) { 1559 if (!dest_ring->base_addr_owner_space_unaligned) {
1561 kfree(dest_ring); 1560 kfree(dest_ring);
1562 return ERR_PTR(-ENOMEM); 1561 return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e49b36752ba2..49758490eaba 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5169,10 +5169,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
5169 if (vif->type == NL80211_IFTYPE_ADHOC || 5169 if (vif->type == NL80211_IFTYPE_ADHOC ||
5170 vif->type == NL80211_IFTYPE_MESH_POINT || 5170 vif->type == NL80211_IFTYPE_MESH_POINT ||
5171 vif->type == NL80211_IFTYPE_AP) { 5171 vif->type == NL80211_IFTYPE_AP) {
5172 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5172 arvif->beacon_buf = dma_alloc_coherent(ar->dev,
5173 IEEE80211_MAX_FRAME_LEN, 5173 IEEE80211_MAX_FRAME_LEN,
5174 &arvif->beacon_paddr, 5174 &arvif->beacon_paddr,
5175 GFP_ATOMIC); 5175 GFP_ATOMIC);
5176 if (!arvif->beacon_buf) { 5176 if (!arvif->beacon_buf) {
5177 ret = -ENOMEM; 5177 ret = -ENOMEM;
5178 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5178 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 01b4edb00e9e..39e0b1cc2a12 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -936,8 +936,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
936 */ 936 */
937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
938 938
939 data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, 939 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes,
940 alloc_nbytes,
941 &ce_data_base, 940 &ce_data_base,
942 GFP_ATOMIC); 941 GFP_ATOMIC);
943 942
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ba837403e266..8e236d158ca6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -5193,7 +5193,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5193 void *vaddr; 5193 void *vaddr;
5194 5194
5195 pool_size = num_units * round_up(unit_len, 4); 5195 pool_size = num_units * round_up(unit_len, 4);
5196 vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); 5196 vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5197 5197
5198 if (!vaddr) 5198 if (!vaddr)
5199 return -ENOMEM; 5199 return -ENOMEM;
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 5ab3e31c9ffa..bab30f7a443c 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -174,9 +174,8 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
174 int i; 174 int i;
175 175
176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); 176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177 wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, 177 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
178 &wcn_ch->dma_addr, 178 GFP_KERNEL);
179 GFP_KERNEL);
180 if (!wcn_ch->cpu_addr) 179 if (!wcn_ch->cpu_addr)
181 return -ENOMEM; 180 return -ENOMEM;
182 181
@@ -627,9 +626,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
627 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 626 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
628 627
629 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; 628 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
630 cpu_addr = dma_zalloc_coherent(wcn->dev, s, 629 cpu_addr = dma_alloc_coherent(wcn->dev, s,
631 &wcn->mgmt_mem_pool.phy_addr, 630 &wcn->mgmt_mem_pool.phy_addr,
632 GFP_KERNEL); 631 GFP_KERNEL);
633 if (!cpu_addr) 632 if (!cpu_addr)
634 goto out_err; 633 goto out_err;
635 634
@@ -642,9 +641,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
642 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 641 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
643 642
644 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; 643 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
645 cpu_addr = dma_zalloc_coherent(wcn->dev, s, 644 cpu_addr = dma_alloc_coherent(wcn->dev, s,
646 &wcn->data_mem_pool.phy_addr, 645 &wcn->data_mem_pool.phy_addr,
647 GFP_KERNEL); 646 GFP_KERNEL);
648 if (!cpu_addr) 647 if (!cpu_addr)
649 goto out_err; 648 goto out_err;
650 649
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 05a8348bd7b9..3380aaef456c 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -99,7 +99,7 @@ static int wil_sring_alloc(struct wil6210_priv *wil,
99 /* Status messages are allocated and initialized to 0. This is necessary 99 /* Status messages are allocated and initialized to 0. This is necessary
100 * since DR bit should be initialized to 0. 100 * since DR bit should be initialized to 0.
101 */ 101 */
102 sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 102 sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
103 if (!sring->va) 103 if (!sring->va)
104 return -ENOMEM; 104 return -ENOMEM;
105 105
@@ -381,15 +381,15 @@ static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
381 if (!ring->ctx) 381 if (!ring->ctx)
382 goto err; 382 goto err;
383 383
384 ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 384 ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
385 if (!ring->va) 385 if (!ring->va)
386 goto err_free_ctx; 386 goto err_free_ctx;
387 387
388 if (ring->is_rx) { 388 if (ring->is_rx) {
389 sz = sizeof(*ring->edma_rx_swtail.va); 389 sz = sizeof(*ring->edma_rx_swtail.va);
390 ring->edma_rx_swtail.va = 390 ring->edma_rx_swtail.va =
391 dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 391 dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
392 GFP_KERNEL); 392 GFP_KERNEL);
393 if (!ring->edma_rx_swtail.va) 393 if (!ring->edma_rx_swtail.va)
394 goto err_free_va; 394 goto err_free_va;
395 } 395 }
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index dfc4c34298d4..b34e51933257 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? 431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; 432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
433 433
434 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 434 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
435 ring_mem_size, &(ring->dmabase), 435 ring_mem_size, &(ring->dmabase),
436 GFP_KERNEL); 436 GFP_KERNEL);
437 if (!ring->descbase) 437 if (!ring->descbase)
438 return -ENOMEM; 438 return -ENOMEM;
439 439
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 1b1da7d83652..2ce1537d983c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -331,9 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
331static int alloc_ringmemory(struct b43legacy_dmaring *ring) 331static int alloc_ringmemory(struct b43legacy_dmaring *ring)
332{ 332{
333 /* GFP flags must match the flags in free_ringmemory()! */ 333 /* GFP flags must match the flags in free_ringmemory()! */
334 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
335 B43legacy_DMA_RINGMEMSIZE, 335 B43legacy_DMA_RINGMEMSIZE,
336 &(ring->dmabase), GFP_KERNEL); 336 &(ring->dmabase), GFP_KERNEL);
337 if (!ring->descbase) 337 if (!ring->descbase)
338 return -ENOMEM; 338 return -ENOMEM;
339 339
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 16d7dda965d8..0f69b3fa296e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1281,10 +1281,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1281 u32 addr; 1281 u32 addr;
1282 1282
1283 devinfo->shared.scratch = 1283 devinfo->shared.scratch =
1284 dma_zalloc_coherent(&devinfo->pdev->dev, 1284 dma_alloc_coherent(&devinfo->pdev->dev,
1285 BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1285 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1286 &devinfo->shared.scratch_dmahandle, 1286 &devinfo->shared.scratch_dmahandle,
1287 GFP_KERNEL); 1287 GFP_KERNEL);
1288 if (!devinfo->shared.scratch) 1288 if (!devinfo->shared.scratch)
1289 goto fail; 1289 goto fail;
1290 1290
@@ -1298,10 +1298,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1299 1299
1300 devinfo->shared.ringupd = 1300 devinfo->shared.ringupd =
1301 dma_zalloc_coherent(&devinfo->pdev->dev, 1301 dma_alloc_coherent(&devinfo->pdev->dev,
1302 BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1302 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1303 &devinfo->shared.ringupd_dmahandle, 1303 &devinfo->shared.ringupd_dmahandle,
1304 GFP_KERNEL); 1304 GFP_KERNEL);
1305 if (!devinfo->shared.ringupd) 1305 if (!devinfo->shared.ringupd)
1306 goto fail; 1306 goto fail;
1307 1307
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e965cc588850..9e850c25877b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -711,30 +711,24 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
711 * Allocate the circular buffer of Read Buffer Descriptors 711 * Allocate the circular buffer of Read Buffer Descriptors
712 * (RBDs) 712 * (RBDs)
713 */ 713 */
714 rxq->bd = dma_zalloc_coherent(dev, 714 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
715 free_size * rxq->queue_size, 715 &rxq->bd_dma, GFP_KERNEL);
716 &rxq->bd_dma, GFP_KERNEL);
717 if (!rxq->bd) 716 if (!rxq->bd)
718 goto err; 717 goto err;
719 718
720 if (trans->cfg->mq_rx_supported) { 719 if (trans->cfg->mq_rx_supported) {
721 rxq->used_bd = dma_zalloc_coherent(dev, 720 rxq->used_bd = dma_alloc_coherent(dev,
722 (use_rx_td ? 721 (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
723 sizeof(*rxq->cd) : 722 &rxq->used_bd_dma,
724 sizeof(__le32)) * 723 GFP_KERNEL);
725 rxq->queue_size,
726 &rxq->used_bd_dma,
727 GFP_KERNEL);
728 if (!rxq->used_bd) 724 if (!rxq->used_bd)
729 goto err; 725 goto err;
730 } 726 }
731 727
732 /* Allocate the driver's pointer to receive buffer status */ 728 /* Allocate the driver's pointer to receive buffer status */
733 rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? 729 rxq->rb_stts = dma_alloc_coherent(dev,
734 sizeof(__le16) : 730 use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
735 sizeof(struct iwl_rb_status), 731 &rxq->rb_stts_dma, GFP_KERNEL);
736 &rxq->rb_stts_dma,
737 GFP_KERNEL);
738 if (!rxq->rb_stts) 732 if (!rxq->rb_stts)
739 goto err; 733 goto err;
740 734
@@ -742,16 +736,14 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
742 return 0; 736 return 0;
743 737
744 /* Allocate the driver's pointer to TR tail */ 738 /* Allocate the driver's pointer to TR tail */
745 rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 739 rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
746 &rxq->tr_tail_dma, 740 &rxq->tr_tail_dma, GFP_KERNEL);
747 GFP_KERNEL);
748 if (!rxq->tr_tail) 741 if (!rxq->tr_tail)
749 goto err; 742 goto err;
750 743
751 /* Allocate the driver's pointer to CR tail */ 744 /* Allocate the driver's pointer to CR tail */
752 rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 745 rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
753 &rxq->cr_tail_dma, 746 &rxq->cr_tail_dma, GFP_KERNEL);
754 GFP_KERNEL);
755 if (!rxq->cr_tail) 747 if (!rxq->cr_tail)
756 goto err; 748 goto err;
757 /* 749 /*
@@ -1947,9 +1939,8 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1947 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1939 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1948 1940
1949 trans_pcie->ict_tbl = 1941 trans_pcie->ict_tbl =
1950 dma_zalloc_coherent(trans->dev, ICT_SIZE, 1942 dma_alloc_coherent(trans->dev, ICT_SIZE,
1951 &trans_pcie->ict_tbl_dma, 1943 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
1952 GFP_KERNEL);
1953 if (!trans_pcie->ict_tbl) 1944 if (!trans_pcie->ict_tbl)
1954 return -ENOMEM; 1945 return -ENOMEM;
1955 1946
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
index 528cb0401df1..4956a54151cb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
@@ -119,9 +119,9 @@ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
119 /* 119 /*
120 * Allocate DMA memory for descriptor and buffer. 120 * Allocate DMA memory for descriptor and buffer.
121 */ 121 */
122 addr = dma_zalloc_coherent(rt2x00dev->dev, 122 addr = dma_alloc_coherent(rt2x00dev->dev,
123 queue->limit * queue->desc_size, &dma, 123 queue->limit * queue->desc_size, &dma,
124 GFP_KERNEL); 124 GFP_KERNEL);
125 if (!addr) 125 if (!addr)
126 return -ENOMEM; 126 return -ENOMEM;
127 127
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 5ee5f40b4dfc..f1eaa3c4d46a 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -1339,10 +1339,10 @@ static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1339 int rc; 1339 int rc;
1340 1340
1341 sndev->nr_rsvd_luts++; 1341 sndev->nr_rsvd_luts++;
1342 sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, 1342 sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
1343 LUT_SIZE, 1343 LUT_SIZE,
1344 &sndev->self_shared_dma, 1344 &sndev->self_shared_dma,
1345 GFP_KERNEL); 1345 GFP_KERNEL);
1346 if (!sndev->self_shared) { 1346 if (!sndev->self_shared) {
1347 dev_err(&sndev->stdev->dev, 1347 dev_err(&sndev->stdev->dev,
1348 "unable to allocate memory for shared mw\n"); 1348 "unable to allocate memory for shared mw\n");
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 2b2cf4e554d3..e5ffd5733540 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -54,12 +54,12 @@ struct nvdimm {
54}; 54};
55 55
56static inline enum nvdimm_security_state nvdimm_security_state( 56static inline enum nvdimm_security_state nvdimm_security_state(
57 struct nvdimm *nvdimm, bool master) 57 struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
58{ 58{
59 if (!nvdimm->sec.ops) 59 if (!nvdimm->sec.ops)
60 return -ENXIO; 60 return -ENXIO;
61 61
62 return nvdimm->sec.ops->state(nvdimm, master); 62 return nvdimm->sec.ops->state(nvdimm, ptype);
63} 63}
64int nvdimm_security_freeze(struct nvdimm *nvdimm); 64int nvdimm_security_freeze(struct nvdimm *nvdimm);
65#if IS_ENABLED(CONFIG_NVDIMM_KEYS) 65#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 08f2c92602f4..150e49723c15 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2173,18 +2173,20 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
2173 size_t nqnlen; 2173 size_t nqnlen;
2174 int off; 2174 int off;
2175 2175
2176 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2176 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2177 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2177 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2178 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2178 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2179 return; 2179 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2180 } 2180 return;
2181 }
2181 2182
2182 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2183 if (ctrl->vs >= NVME_VS(1, 2, 1))
2183 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2184 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2185 }
2184 2186
2185 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2187 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2186 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2188 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2187 "nqn.2014.08.org.nvmexpress:%4x%4x", 2189 "nqn.2014.08.org.nvmexpress:%04x%04x",
2188 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2190 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2189 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2191 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2190 off += sizeof(id->sn); 2192 off += sizeof(id->sn);
@@ -2500,7 +2502,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2500 ctrl->oaes = le32_to_cpu(id->oaes); 2502 ctrl->oaes = le32_to_cpu(id->oaes);
2501 atomic_set(&ctrl->abort_limit, id->acl + 1); 2503 atomic_set(&ctrl->abort_limit, id->acl + 1);
2502 ctrl->vwc = id->vwc; 2504 ctrl->vwc = id->vwc;
2503 ctrl->cntlid = le16_to_cpup(&id->cntlid);
2504 if (id->mdts) 2505 if (id->mdts)
2505 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2506 max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2506 else 2507 else
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index b2ab213f43de..3eb908c50e1a 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -874,6 +874,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
874 if (opts->discovery_nqn) { 874 if (opts->discovery_nqn) {
875 opts->kato = 0; 875 opts->kato = 0;
876 opts->nr_io_queues = 0; 876 opts->nr_io_queues = 0;
877 opts->nr_write_queues = 0;
878 opts->nr_poll_queues = 0;
877 opts->duplicate_connect = true; 879 opts->duplicate_connect = true;
878 } 880 }
879 if (ctrl_loss_tmo < 0) 881 if (ctrl_loss_tmo < 0)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 183ec17ba067..df4b3a6db51b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -570,6 +570,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
570 return 0; 570 return 0;
571out_free_ana_log_buf: 571out_free_ana_log_buf:
572 kfree(ctrl->ana_log_buf); 572 kfree(ctrl->ana_log_buf);
573 ctrl->ana_log_buf = NULL;
573out: 574out:
574 return error; 575 return error;
575} 576}
@@ -577,5 +578,6 @@ out:
577void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 578void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
578{ 579{
579 kfree(ctrl->ana_log_buf); 580 kfree(ctrl->ana_log_buf);
581 ctrl->ana_log_buf = NULL;
580} 582}
581 583
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2b36ac922596..ab961bdeea89 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -90,6 +90,11 @@ enum nvme_quirks {
90 * Set MEDIUM priority on SQ creation 90 * Set MEDIUM priority on SQ creation
91 */ 91 */
92 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 92 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
93
94 /*
95 * Ignore device provided subnqn.
96 */
97 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
93}; 98};
94 99
95/* 100/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5a0bf6a24d50..9bc585415d9b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -95,6 +95,7 @@ struct nvme_dev;
95struct nvme_queue; 95struct nvme_queue;
96 96
97static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 97static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
98static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
98 99
99/* 100/*
100 * Represents an NVM Express device. Each nvme_dev is a PCI function. 101 * Represents an NVM Express device. Each nvme_dev is a PCI function.
@@ -1019,9 +1020,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
1019 1020
1020static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 1021static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1021{ 1022{
1022 if (++nvmeq->cq_head == nvmeq->q_depth) { 1023 if (nvmeq->cq_head == nvmeq->q_depth - 1) {
1023 nvmeq->cq_head = 0; 1024 nvmeq->cq_head = 0;
1024 nvmeq->cq_phase = !nvmeq->cq_phase; 1025 nvmeq->cq_phase = !nvmeq->cq_phase;
1026 } else {
1027 nvmeq->cq_head++;
1025 } 1028 }
1026} 1029}
1027 1030
@@ -1420,6 +1423,14 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1420 return 0; 1423 return 0;
1421} 1424}
1422 1425
1426static void nvme_suspend_io_queues(struct nvme_dev *dev)
1427{
1428 int i;
1429
1430 for (i = dev->ctrl.queue_count - 1; i > 0; i--)
1431 nvme_suspend_queue(&dev->queues[i]);
1432}
1433
1423static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 1434static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1424{ 1435{
1425 struct nvme_queue *nvmeq = &dev->queues[0]; 1436 struct nvme_queue *nvmeq = &dev->queues[0];
@@ -1485,8 +1496,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
1485 if (dev->ctrl.queue_count > qid) 1496 if (dev->ctrl.queue_count > qid)
1486 return 0; 1497 return 0;
1487 1498
1488 nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 1499 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
1489 &nvmeq->cq_dma_addr, GFP_KERNEL); 1500 &nvmeq->cq_dma_addr, GFP_KERNEL);
1490 if (!nvmeq->cqes) 1501 if (!nvmeq->cqes)
1491 goto free_nvmeq; 1502 goto free_nvmeq;
1492 1503
@@ -1885,8 +1896,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
1885 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 1896 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1886 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; 1897 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
1887 1898
1888 dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], 1899 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
1889 le64_to_cpu(desc->addr)); 1900 le64_to_cpu(desc->addr),
1901 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1890 } 1902 }
1891 1903
1892 kfree(dev->host_mem_desc_bufs); 1904 kfree(dev->host_mem_desc_bufs);
@@ -1915,8 +1927,8 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1915 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1927 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1916 max_entries = dev->ctrl.hmmaxd; 1928 max_entries = dev->ctrl.hmmaxd;
1917 1929
1918 descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), 1930 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
1919 &descs_dma, GFP_KERNEL); 1931 &descs_dma, GFP_KERNEL);
1920 if (!descs) 1932 if (!descs)
1921 goto out; 1933 goto out;
1922 1934
@@ -1952,8 +1964,9 @@ out_free_bufs:
1952 while (--i >= 0) { 1964 while (--i >= 0) {
1953 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; 1965 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
1954 1966
1955 dma_free_coherent(dev->dev, size, bufs[i], 1967 dma_free_attrs(dev->dev, size, bufs[i],
1956 le64_to_cpu(descs[i].addr)); 1968 le64_to_cpu(descs[i].addr),
1969 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1957 } 1970 }
1958 1971
1959 kfree(bufs); 1972 kfree(bufs);
@@ -2028,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
2028 return ret; 2041 return ret;
2029} 2042}
2030 2043
2044/* irq_queues covers admin queue */
2031static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) 2045static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
2032{ 2046{
2033 unsigned int this_w_queues = write_queues; 2047 unsigned int this_w_queues = write_queues;
2034 2048
2049 WARN_ON(!irq_queues);
2050
2035 /* 2051 /*
2036 * Setup read/write queue split 2052 * Setup read/write queue split, assign admin queue one independent
2053 * irq vector if irq_queues is > 1.
2037 */ 2054 */
2038 if (irq_queues == 1) { 2055 if (irq_queues <= 2) {
2039 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2056 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2040 dev->io_queues[HCTX_TYPE_READ] = 0; 2057 dev->io_queues[HCTX_TYPE_READ] = 0;
2041 return; 2058 return;
@@ -2043,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
2043 2060
2044 /* 2061 /*
2045 * If 'write_queues' is set, ensure it leaves room for at least 2062 * If 'write_queues' is set, ensure it leaves room for at least
2046 * one read queue 2063 * one read queue and one admin queue
2047 */ 2064 */
2048 if (this_w_queues >= irq_queues) 2065 if (this_w_queues >= irq_queues)
2049 this_w_queues = irq_queues - 1; 2066 this_w_queues = irq_queues - 2;
2050 2067
2051 /* 2068 /*
2052 * If 'write_queues' is set to zero, reads and writes will share 2069 * If 'write_queues' is set to zero, reads and writes will share
2053 * a queue set. 2070 * a queue set.
2054 */ 2071 */
2055 if (!this_w_queues) { 2072 if (!this_w_queues) {
2056 dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; 2073 dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1;
2057 dev->io_queues[HCTX_TYPE_READ] = 0; 2074 dev->io_queues[HCTX_TYPE_READ] = 0;
2058 } else { 2075 } else {
2059 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; 2076 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
2060 dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; 2077 dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1;
2061 } 2078 }
2062} 2079}
2063 2080
@@ -2082,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2082 this_p_queues = nr_io_queues - 1; 2099 this_p_queues = nr_io_queues - 1;
2083 irq_queues = 1; 2100 irq_queues = 1;
2084 } else { 2101 } else {
2085 irq_queues = nr_io_queues - this_p_queues; 2102 irq_queues = nr_io_queues - this_p_queues + 1;
2086 } 2103 }
2087 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; 2104 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
2088 2105
@@ -2102,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2102 * If we got a failure and we're down to asking for just 2119 * If we got a failure and we're down to asking for just
2103 * 1 + 1 queues, just ask for a single vector. We'll share 2120 * 1 + 1 queues, just ask for a single vector. We'll share
2104 * that between the single IO queue and the admin queue. 2121 * that between the single IO queue and the admin queue.
2122 * Otherwise, we assign one independent vector to admin queue.
2105 */ 2123 */
2106 if (result >= 0 && irq_queues > 1) 2124 if (irq_queues > 1)
2107 irq_queues = irq_sets[0] + irq_sets[1] + 1; 2125 irq_queues = irq_sets[0] + irq_sets[1] + 1;
2108 2126
2109 result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, 2127 result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
@@ -2132,6 +2150,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2132 return result; 2150 return result;
2133} 2151}
2134 2152
2153static void nvme_disable_io_queues(struct nvme_dev *dev)
2154{
2155 if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
2156 __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2157}
2158
2135static int nvme_setup_io_queues(struct nvme_dev *dev) 2159static int nvme_setup_io_queues(struct nvme_dev *dev)
2136{ 2160{
2137 struct nvme_queue *adminq = &dev->queues[0]; 2161 struct nvme_queue *adminq = &dev->queues[0];
@@ -2168,6 +2192,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2168 } while (1); 2192 } while (1);
2169 adminq->q_db = dev->dbs; 2193 adminq->q_db = dev->dbs;
2170 2194
2195 retry:
2171 /* Deregister the admin queue's interrupt */ 2196 /* Deregister the admin queue's interrupt */
2172 pci_free_irq(pdev, 0, adminq); 2197 pci_free_irq(pdev, 0, adminq);
2173 2198
@@ -2185,25 +2210,34 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2185 result = max(result - 1, 1); 2210 result = max(result - 1, 1);
2186 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 2211 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
2187 2212
2188 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2189 dev->io_queues[HCTX_TYPE_DEFAULT],
2190 dev->io_queues[HCTX_TYPE_READ],
2191 dev->io_queues[HCTX_TYPE_POLL]);
2192
2193 /* 2213 /*
2194 * Should investigate if there's a performance win from allocating 2214 * Should investigate if there's a performance win from allocating
2195 * more queues than interrupt vectors; it might allow the submission 2215 * more queues than interrupt vectors; it might allow the submission
2196 * path to scale better, even if the receive path is limited by the 2216 * path to scale better, even if the receive path is limited by the
2197 * number of interrupts. 2217 * number of interrupts.
2198 */ 2218 */
2199
2200 result = queue_request_irq(adminq); 2219 result = queue_request_irq(adminq);
2201 if (result) { 2220 if (result) {
2202 adminq->cq_vector = -1; 2221 adminq->cq_vector = -1;
2203 return result; 2222 return result;
2204 } 2223 }
2205 set_bit(NVMEQ_ENABLED, &adminq->flags); 2224 set_bit(NVMEQ_ENABLED, &adminq->flags);
2206 return nvme_create_io_queues(dev); 2225
2226 result = nvme_create_io_queues(dev);
2227 if (result || dev->online_queues < 2)
2228 return result;
2229
2230 if (dev->online_queues - 1 < dev->max_qid) {
2231 nr_io_queues = dev->online_queues - 1;
2232 nvme_disable_io_queues(dev);
2233 nvme_suspend_io_queues(dev);
2234 goto retry;
2235 }
2236 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2237 dev->io_queues[HCTX_TYPE_DEFAULT],
2238 dev->io_queues[HCTX_TYPE_READ],
2239 dev->io_queues[HCTX_TYPE_POLL]);
2240 return 0;
2207} 2241}
2208 2242
2209static void nvme_del_queue_end(struct request *req, blk_status_t error) 2243static void nvme_del_queue_end(struct request *req, blk_status_t error)
@@ -2248,7 +2282,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2248 return 0; 2282 return 0;
2249} 2283}
2250 2284
2251static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2285static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
2252{ 2286{
2253 int nr_queues = dev->online_queues - 1, sent = 0; 2287 int nr_queues = dev->online_queues - 1, sent = 0;
2254 unsigned long timeout; 2288 unsigned long timeout;
@@ -2294,7 +2328,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
2294 dev->tagset.nr_maps = 2; /* default + read */ 2328 dev->tagset.nr_maps = 2; /* default + read */
2295 if (dev->io_queues[HCTX_TYPE_POLL]) 2329 if (dev->io_queues[HCTX_TYPE_POLL])
2296 dev->tagset.nr_maps++; 2330 dev->tagset.nr_maps++;
2297 dev->tagset.nr_maps = HCTX_MAX_TYPES;
2298 dev->tagset.timeout = NVME_IO_TIMEOUT; 2331 dev->tagset.timeout = NVME_IO_TIMEOUT;
2299 dev->tagset.numa_node = dev_to_node(dev->dev); 2332 dev->tagset.numa_node = dev_to_node(dev->dev);
2300 dev->tagset.queue_depth = 2333 dev->tagset.queue_depth =
@@ -2410,7 +2443,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
2410 2443
2411static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 2444static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2412{ 2445{
2413 int i;
2414 bool dead = true; 2446 bool dead = true;
2415 struct pci_dev *pdev = to_pci_dev(dev->dev); 2447 struct pci_dev *pdev = to_pci_dev(dev->dev);
2416 2448
@@ -2437,13 +2469,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2437 nvme_stop_queues(&dev->ctrl); 2469 nvme_stop_queues(&dev->ctrl);
2438 2470
2439 if (!dead && dev->ctrl.queue_count > 0) { 2471 if (!dead && dev->ctrl.queue_count > 0) {
2440 if (nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 2472 nvme_disable_io_queues(dev);
2441 nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2442 nvme_disable_admin_queue(dev, shutdown); 2473 nvme_disable_admin_queue(dev, shutdown);
2443 } 2474 }
2444 for (i = dev->ctrl.queue_count - 1; i >= 0; i--) 2475 nvme_suspend_io_queues(dev);
2445 nvme_suspend_queue(&dev->queues[i]); 2476 nvme_suspend_queue(&dev->queues[0]);
2446
2447 nvme_pci_disable(dev); 2477 nvme_pci_disable(dev);
2448 2478
2449 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2479 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2946,6 +2976,8 @@ static const struct pci_device_id nvme_id_table[] = {
2946 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 2976 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
2947 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 2977 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
2948 NVME_QUIRK_MEDIUM_PRIO_SQ }, 2978 NVME_QUIRK_MEDIUM_PRIO_SQ },
2979 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
2980 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
2949 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2981 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2950 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2982 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2951 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 2983 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index de174912445e..265a0543b381 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1565,8 +1565,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1565{ 1565{
1566 nvme_tcp_stop_io_queues(ctrl); 1566 nvme_tcp_stop_io_queues(ctrl);
1567 if (remove) { 1567 if (remove) {
1568 if (ctrl->ops->flags & NVME_F_FABRICS) 1568 blk_cleanup_queue(ctrl->connect_q);
1569 blk_cleanup_queue(ctrl->connect_q);
1570 blk_mq_free_tag_set(ctrl->tagset); 1569 blk_mq_free_tag_set(ctrl->tagset);
1571 } 1570 }
1572 nvme_tcp_free_io_queues(ctrl); 1571 nvme_tcp_free_io_queues(ctrl);
@@ -1587,12 +1586,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1587 goto out_free_io_queues; 1586 goto out_free_io_queues;
1588 } 1587 }
1589 1588
1590 if (ctrl->ops->flags & NVME_F_FABRICS) { 1589 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1591 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); 1590 if (IS_ERR(ctrl->connect_q)) {
1592 if (IS_ERR(ctrl->connect_q)) { 1591 ret = PTR_ERR(ctrl->connect_q);
1593 ret = PTR_ERR(ctrl->connect_q); 1592 goto out_free_tag_set;
1594 goto out_free_tag_set;
1595 }
1596 } 1593 }
1597 } else { 1594 } else {
1598 blk_mq_update_nr_hw_queues(ctrl->tagset, 1595 blk_mq_update_nr_hw_queues(ctrl->tagset,
@@ -1606,7 +1603,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1606 return 0; 1603 return 0;
1607 1604
1608out_cleanup_connect_q: 1605out_cleanup_connect_q:
1609 if (new && (ctrl->ops->flags & NVME_F_FABRICS)) 1606 if (new)
1610 blk_cleanup_queue(ctrl->connect_q); 1607 blk_cleanup_queue(ctrl->connect_q);
1611out_free_tag_set: 1608out_free_tag_set:
1612 if (new) 1609 if (new)
@@ -1620,7 +1617,6 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1620{ 1617{
1621 nvme_tcp_stop_queue(ctrl, 0); 1618 nvme_tcp_stop_queue(ctrl, 0);
1622 if (remove) { 1619 if (remove) {
1623 free_opal_dev(ctrl->opal_dev);
1624 blk_cleanup_queue(ctrl->admin_q); 1620 blk_cleanup_queue(ctrl->admin_q);
1625 blk_mq_free_tag_set(ctrl->admin_tagset); 1621 blk_mq_free_tag_set(ctrl->admin_tagset);
1626 } 1622 }
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 44b37b202e39..ad0df786fe93 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1089,7 +1089,7 @@ out:
1089 1089
1090static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1090static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1091{ 1091{
1092 int result; 1092 int result = 0;
1093 1093
1094 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1094 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1095 return 0; 1095 return 0;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index a09c1c3cf831..49b16f76d78e 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np)
207 207
208 if (!of_node_check_flag(np, OF_OVERLAY)) { 208 if (!of_node_check_flag(np, OF_OVERLAY)) {
209 np->name = __of_get_property(np, "name", NULL); 209 np->name = __of_get_property(np, "name", NULL);
210 np->type = __of_get_property(np, "device_type", NULL);
211 if (!np->name) 210 if (!np->name)
212 np->name = "<NULL>"; 211 np->name = "<NULL>";
213 if (!np->type)
214 np->type = "<NULL>";
215 212
216 phandle = __of_get_property(np, "phandle", &sz); 213 phandle = __of_get_property(np, "phandle", &sz);
217 if (!phandle) 214 if (!phandle)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 7099c652c6a5..9cc1461aac7d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,12 +314,8 @@ static bool populate_node(const void *blob,
314 populate_properties(blob, offset, mem, np, pathp, dryrun); 314 populate_properties(blob, offset, mem, np, pathp, dryrun);
315 if (!dryrun) { 315 if (!dryrun) {
316 np->name = of_get_property(np, "name", NULL); 316 np->name = of_get_property(np, "name", NULL);
317 np->type = of_get_property(np, "device_type", NULL);
318
319 if (!np->name) 317 if (!np->name)
320 np->name = "<NULL>"; 318 np->name = "<NULL>";
321 if (!np->type)
322 np->type = "<NULL>";
323 } 319 }
324 320
325 *pnp = np; 321 *pnp = np;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 2b5ac43a5690..c423e94baf0f 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
423 423
424 tchild->parent = target->np; 424 tchild->parent = target->np;
425 tchild->name = __of_get_property(node, "name", NULL); 425 tchild->name = __of_get_property(node, "name", NULL);
426 tchild->type = __of_get_property(node, "device_type", NULL);
427 426
428 if (!tchild->name) 427 if (!tchild->name)
429 tchild->name = "<NULL>"; 428 tchild->name = "<NULL>";
430 if (!tchild->type)
431 tchild->type = "<NULL>";
432 429
433 /* ignore obsolete "linux,phandle" */ 430 /* ignore obsolete "linux,phandle" */
434 phandle = __of_get_property(node, "phandle", &size); 431 phandle = __of_get_property(node, "phandle", &size);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index d3185063d369..7eda43c66c91 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
155 dp->parent = parent; 155 dp->parent = parent;
156 156
157 dp->name = of_pdt_get_one_property(node, "name"); 157 dp->name = of_pdt_get_one_property(node, "name");
158 dp->type = of_pdt_get_one_property(node, "device_type");
159 dp->phandle = node; 158 dp->phandle = node;
160 159
161 dp->properties = of_pdt_build_prop_list(node); 160 dp->properties = of_pdt_build_prop_list(node);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 08430031bd28..8631efa1daa1 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
806 806
807 if (!of_device_is_available(remote)) { 807 if (!of_device_is_available(remote)) {
808 pr_debug("not available for remote node\n"); 808 pr_debug("not available for remote node\n");
809 of_node_put(remote);
809 return NULL; 810 return NULL;
810 } 811 }
811 812
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e5507add8f04..18f1639dbc4a 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -988,11 +988,9 @@ void _opp_free(struct dev_pm_opp *opp)
988 kfree(opp); 988 kfree(opp);
989} 989}
990 990
991static void _opp_kref_release(struct kref *kref) 991static void _opp_kref_release(struct dev_pm_opp *opp,
992 struct opp_table *opp_table)
992{ 993{
993 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
994 struct opp_table *opp_table = opp->opp_table;
995
996 /* 994 /*
997 * Notify the changes in the availability of the operable 995 * Notify the changes in the availability of the operable
998 * frequency/voltage list. 996 * frequency/voltage list.
@@ -1002,7 +1000,22 @@ static void _opp_kref_release(struct kref *kref)
1002 opp_debug_remove_one(opp); 1000 opp_debug_remove_one(opp);
1003 list_del(&opp->node); 1001 list_del(&opp->node);
1004 kfree(opp); 1002 kfree(opp);
1003}
1005 1004
1005static void _opp_kref_release_unlocked(struct kref *kref)
1006{
1007 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1008 struct opp_table *opp_table = opp->opp_table;
1009
1010 _opp_kref_release(opp, opp_table);
1011}
1012
1013static void _opp_kref_release_locked(struct kref *kref)
1014{
1015 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1016 struct opp_table *opp_table = opp->opp_table;
1017
1018 _opp_kref_release(opp, opp_table);
1006 mutex_unlock(&opp_table->lock); 1019 mutex_unlock(&opp_table->lock);
1007} 1020}
1008 1021
@@ -1013,10 +1026,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
1013 1026
1014void dev_pm_opp_put(struct dev_pm_opp *opp) 1027void dev_pm_opp_put(struct dev_pm_opp *opp)
1015{ 1028{
1016 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1029 kref_put_mutex(&opp->kref, _opp_kref_release_locked,
1030 &opp->opp_table->lock);
1017} 1031}
1018EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1032EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1019 1033
1034static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
1035{
1036 kref_put(&opp->kref, _opp_kref_release_unlocked);
1037}
1038
1020/** 1039/**
1021 * dev_pm_opp_remove() - Remove an OPP from OPP table 1040 * dev_pm_opp_remove() - Remove an OPP from OPP table
1022 * @dev: device for which we do this operation 1041 * @dev: device for which we do this operation
@@ -1060,6 +1079,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1060} 1079}
1061EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1080EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1062 1081
1082/**
1083 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1084 * @dev: device for which we do this operation
1085 *
1086 * This function removes all dynamically created OPPs from the opp table.
1087 */
1088void dev_pm_opp_remove_all_dynamic(struct device *dev)
1089{
1090 struct opp_table *opp_table;
1091 struct dev_pm_opp *opp, *temp;
1092 int count = 0;
1093
1094 opp_table = _find_opp_table(dev);
1095 if (IS_ERR(opp_table))
1096 return;
1097
1098 mutex_lock(&opp_table->lock);
1099 list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
1100 if (opp->dynamic) {
1101 dev_pm_opp_put_unlocked(opp);
1102 count++;
1103 }
1104 }
1105 mutex_unlock(&opp_table->lock);
1106
1107 /* Drop the references taken by dev_pm_opp_add() */
1108 while (count--)
1109 dev_pm_opp_put_opp_table(opp_table);
1110
1111 /* Drop the reference taken by _find_opp_table() */
1112 dev_pm_opp_put_opp_table(opp_table);
1113}
1114EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1115
1063struct dev_pm_opp *_opp_allocate(struct opp_table *table) 1116struct dev_pm_opp *_opp_allocate(struct opp_table *table)
1064{ 1117{
1065 struct dev_pm_opp *opp; 1118 struct dev_pm_opp *opp;
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 4310c7a4212e..2ab92409210a 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,13 +21,14 @@ menuconfig PCI
21 support for PCI-X and the foundations for PCI Express support. 21 support for PCI-X and the foundations for PCI Express support.
22 Say 'Y' here unless you know what you are doing. 22 Say 'Y' here unless you know what you are doing.
23 23
24if PCI
25
24config PCI_DOMAINS 26config PCI_DOMAINS
25 bool 27 bool
26 depends on PCI 28 depends on PCI
27 29
28config PCI_DOMAINS_GENERIC 30config PCI_DOMAINS_GENERIC
29 bool 31 bool
30 depends on PCI
31 select PCI_DOMAINS 32 select PCI_DOMAINS
32 33
33config PCI_SYSCALL 34config PCI_SYSCALL
@@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig"
37 38
38config PCI_MSI 39config PCI_MSI
39 bool "Message Signaled Interrupts (MSI and MSI-X)" 40 bool "Message Signaled Interrupts (MSI and MSI-X)"
40 depends on PCI
41 select GENERIC_MSI_IRQ 41 select GENERIC_MSI_IRQ
42 help 42 help
43 This allows device drivers to enable MSI (Message Signaled 43 This allows device drivers to enable MSI (Message Signaled
@@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN
59config PCI_QUIRKS 59config PCI_QUIRKS
60 default y 60 default y
61 bool "Enable PCI quirk workarounds" if EXPERT 61 bool "Enable PCI quirk workarounds" if EXPERT
62 depends on PCI
63 help 62 help
64 This enables workarounds for various PCI chipset bugs/quirks. 63 This enables workarounds for various PCI chipset bugs/quirks.
65 Disable this only if your target machine is unaffected by PCI 64 Disable this only if your target machine is unaffected by PCI
@@ -67,7 +66,7 @@ config PCI_QUIRKS
67 66
68config PCI_DEBUG 67config PCI_DEBUG
69 bool "PCI Debugging" 68 bool "PCI Debugging"
70 depends on PCI && DEBUG_KERNEL 69 depends on DEBUG_KERNEL
71 help 70 help
72 Say Y here if you want the PCI core to produce a bunch of debug 71 Say Y here if you want the PCI core to produce a bunch of debug
73 messages to the system log. Select this if you are having a 72 messages to the system log. Select this if you are having a
@@ -77,7 +76,6 @@ config PCI_DEBUG
77 76
78config PCI_REALLOC_ENABLE_AUTO 77config PCI_REALLOC_ENABLE_AUTO
79 bool "Enable PCI resource re-allocation detection" 78 bool "Enable PCI resource re-allocation detection"
80 depends on PCI
81 depends on PCI_IOV 79 depends on PCI_IOV
82 help 80 help
83 Say Y here if you want the PCI core to detect if PCI resource 81 Say Y here if you want the PCI core to detect if PCI resource
@@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO
90 88
91config PCI_STUB 89config PCI_STUB
92 tristate "PCI Stub driver" 90 tristate "PCI Stub driver"
93 depends on PCI
94 help 91 help
95 Say Y or M here if you want be able to reserve a PCI device 92 Say Y or M here if you want be able to reserve a PCI device
96 when it is going to be assigned to a guest operating system. 93 when it is going to be assigned to a guest operating system.
@@ -99,7 +96,6 @@ config PCI_STUB
99 96
100config PCI_PF_STUB 97config PCI_PF_STUB
101 tristate "PCI PF Stub driver" 98 tristate "PCI PF Stub driver"
102 depends on PCI
103 depends on PCI_IOV 99 depends on PCI_IOV
104 help 100 help
105 Say Y or M here if you want to enable support for devices that 101 Say Y or M here if you want to enable support for devices that
@@ -111,7 +107,7 @@ config PCI_PF_STUB
111 107
112config XEN_PCIDEV_FRONTEND 108config XEN_PCIDEV_FRONTEND
113 tristate "Xen PCI Frontend" 109 tristate "Xen PCI Frontend"
114 depends on PCI && X86 && XEN 110 depends on X86 && XEN
115 select PCI_XEN 111 select PCI_XEN
116 select XEN_XENBUS_FRONTEND 112 select XEN_XENBUS_FRONTEND
117 default y 113 default y
@@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL
133 129
134config PCI_IOV 130config PCI_IOV
135 bool "PCI IOV support" 131 bool "PCI IOV support"
136 depends on PCI
137 select PCI_ATS 132 select PCI_ATS
138 help 133 help
139 I/O Virtualization is a PCI feature supported by some devices 134 I/O Virtualization is a PCI feature supported by some devices
@@ -144,7 +139,6 @@ config PCI_IOV
144 139
145config PCI_PRI 140config PCI_PRI
146 bool "PCI PRI support" 141 bool "PCI PRI support"
147 depends on PCI
148 select PCI_ATS 142 select PCI_ATS
149 help 143 help
150 PRI is the PCI Page Request Interface. It allows PCI devices that are 144 PRI is the PCI Page Request Interface. It allows PCI devices that are
@@ -154,7 +148,6 @@ config PCI_PRI
154 148
155config PCI_PASID 149config PCI_PASID
156 bool "PCI PASID support" 150 bool "PCI PASID support"
157 depends on PCI
158 select PCI_ATS 151 select PCI_ATS
159 help 152 help
160 Process Address Space Identifiers (PASIDs) can be used by PCI devices 153 Process Address Space Identifiers (PASIDs) can be used by PCI devices
@@ -167,7 +160,7 @@ config PCI_PASID
167 160
168config PCI_P2PDMA 161config PCI_P2PDMA
169 bool "PCI peer-to-peer transfer support" 162 bool "PCI peer-to-peer transfer support"
170 depends on PCI && ZONE_DEVICE 163 depends on ZONE_DEVICE
171 select GENERIC_ALLOCATOR 164 select GENERIC_ALLOCATOR
172 help 165 help
173 Enableѕ drivers to do PCI peer-to-peer transactions to and from 166 Enableѕ drivers to do PCI peer-to-peer transactions to and from
@@ -184,12 +177,11 @@ config PCI_P2PDMA
184 177
185config PCI_LABEL 178config PCI_LABEL
186 def_bool y if (DMI || ACPI) 179 def_bool y if (DMI || ACPI)
187 depends on PCI
188 select NLS 180 select NLS
189 181
190config PCI_HYPERV 182config PCI_HYPERV
191 tristate "Hyper-V PCI Frontend" 183 tristate "Hyper-V PCI Frontend"
192 depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 184 depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
193 help 185 help
194 The PCI device frontend driver allows the kernel to import arbitrary 186 The PCI device frontend driver allows the kernel to import arbitrary
195 PCI devices from a PCI backend to support PCI driver domains. 187 PCI devices from a PCI backend to support PCI driver domains.
@@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig"
198source "drivers/pci/controller/Kconfig" 190source "drivers/pci/controller/Kconfig"
199source "drivers/pci/endpoint/Kconfig" 191source "drivers/pci/endpoint/Kconfig"
200source "drivers/pci/switch/Kconfig" 192source "drivers/pci/switch/Kconfig"
193
194endif
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 241ebe0c4505..e35e9eaa50ee 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gpio/consumer.h>
11#include <linux/of_device.h> 12#include <linux/of_device.h>
12#include <linux/of_gpio.h> 13#include <linux/of_gpio.h>
13#include <linux/pci.h> 14#include <linux/pci.h>
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 9deb56989d72..cb3401a931f8 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -602,9 +602,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
602 } 602 }
603 603
604 /* Reserve memory for event queue and make sure memories are zeroed */ 604 /* Reserve memory for event queue and make sure memories are zeroed */
605 msi->eq_cpu = dma_zalloc_coherent(pcie->dev, 605 msi->eq_cpu = dma_alloc_coherent(pcie->dev,
606 msi->nr_eq_region * EQ_MEM_REGION_SIZE, 606 msi->nr_eq_region * EQ_MEM_REGION_SIZE,
607 &msi->eq_dma, GFP_KERNEL); 607 &msi->eq_dma, GFP_KERNEL);
608 if (!msi->eq_cpu) { 608 if (!msi->eq_cpu) {
609 ret = -ENOMEM; 609 ret = -ENOMEM;
610 goto free_irqs; 610 goto free_irqs;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a1c8a09efa5..4c0b47867258 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1168 const struct irq_affinity *affd) 1168 const struct irq_affinity *affd)
1169{ 1169{
1170 static const struct irq_affinity msi_default_affd; 1170 static const struct irq_affinity msi_default_affd;
1171 int vecs = -ENOSPC; 1171 int msix_vecs = -ENOSPC;
1172 int msi_vecs = -ENOSPC;
1172 1173
1173 if (flags & PCI_IRQ_AFFINITY) { 1174 if (flags & PCI_IRQ_AFFINITY) {
1174 if (!affd) 1175 if (!affd)
@@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1179 } 1180 }
1180 1181
1181 if (flags & PCI_IRQ_MSIX) { 1182 if (flags & PCI_IRQ_MSIX) {
1182 vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, 1183 msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
1183 affd); 1184 max_vecs, affd);
1184 if (vecs > 0) 1185 if (msix_vecs > 0)
1185 return vecs; 1186 return msix_vecs;
1186 } 1187 }
1187 1188
1188 if (flags & PCI_IRQ_MSI) { 1189 if (flags & PCI_IRQ_MSI) {
1189 vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); 1190 msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs,
1190 if (vecs > 0) 1191 affd);
1191 return vecs; 1192 if (msi_vecs > 0)
1193 return msi_vecs;
1192 } 1194 }
1193 1195
1194 /* use legacy irq if allowed */ 1196 /* use legacy irq if allowed */
@@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1199 } 1201 }
1200 } 1202 }
1201 1203
1202 return vecs; 1204 if (msix_vecs == -ENOSPC)
1205 return -ENOSPC;
1206 return msi_vecs;
1203} 1207}
1204EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); 1208EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
1205 1209
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c9d8e3c837de..c25acace7d91 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str)
6195 } else if (!strncmp(str, "pcie_scan_all", 13)) { 6195 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6196 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 6196 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6197 } else if (!strncmp(str, "disable_acs_redir=", 18)) { 6197 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6198 disable_acs_redir_param = str + 18; 6198 disable_acs_redir_param =
6199 kstrdup(str + 18, GFP_KERNEL);
6199 } else { 6200 } else {
6200 printk(KERN_ERR "PCI: Unknown option `%s'\n", 6201 printk(KERN_ERR "PCI: Unknown option `%s'\n",
6201 str); 6202 str);
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 6c5536d3d42a..e22766c79fe9 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1373,10 +1373,10 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) 1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1374 return 0; 1374 return 0;
1375 1375
1376 stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev, 1376 stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1377 sizeof(*stdev->dma_mrpc), 1377 sizeof(*stdev->dma_mrpc),
1378 &stdev->dma_mrpc_dma_addr, 1378 &stdev->dma_mrpc_dma_addr,
1379 GFP_KERNEL); 1379 GFP_KERNEL);
1380 if (stdev->dma_mrpc == NULL) 1380 if (stdev->dma_mrpc == NULL)
1381 return -ENOMEM; 1381 return -ENOMEM;
1382 1382
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index a91fc67fc4e0..d70ba9bc42d9 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -32,7 +32,7 @@
32 32
33/* register 0x01 */ 33/* register 0x01 */
34#define REF_FREF_SEL_25 BIT(0) 34#define REF_FREF_SEL_25 BIT(0)
35#define PHY_MODE_SATA (0x0 << 5) 35#define PHY_BERLIN_MODE_SATA (0x0 << 5)
36 36
37/* register 0x02 */ 37/* register 0x02 */
38#define USE_MAX_PLL_RATE BIT(12) 38#define USE_MAX_PLL_RATE BIT(12)
@@ -102,7 +102,8 @@ static int phy_berlin_sata_power_on(struct phy *phy)
102 102
103 /* set PHY mode and ref freq to 25 MHz */ 103 /* set PHY mode and ref freq to 25 MHz */
104 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01, 104 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01,
105 0x00ff, REF_FREF_SEL_25 | PHY_MODE_SATA); 105 0x00ff,
106 REF_FREF_SEL_25 | PHY_BERLIN_MODE_SATA);
106 107
107 /* set PHY up to 6 Gbps */ 108 /* set PHY up to 6 Gbps */
108 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25, 109 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25,
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index f137e0107764..c4709ed7fb0e 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -82,6 +82,7 @@ config PHY_TI_GMII_SEL
82 default y if TI_CPSW=y 82 default y if TI_CPSW=y
83 depends on TI_CPSW || COMPILE_TEST 83 depends on TI_CPSW || COMPILE_TEST
84 select GENERIC_PHY 84 select GENERIC_PHY
85 select REGMAP
85 default m 86 default m
86 help 87 help
87 This driver supports configuring of the TI CPSW Port mode depending on 88 This driver supports configuring of the TI CPSW Port mode depending on
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e3b62c2ee8d1..5e2109c54c7c 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -1009,7 +1009,7 @@ config INTEL_MFLD_THERMAL
1009 1009
1010config INTEL_IPS 1010config INTEL_IPS
1011 tristate "Intel Intelligent Power Sharing" 1011 tristate "Intel Intelligent Power Sharing"
1012 depends on ACPI 1012 depends on ACPI && PCI
1013 ---help--- 1013 ---help---
1014 Intel Calpella platforms support dynamic power sharing between the 1014 Intel Calpella platforms support dynamic power sharing between the
1015 CPU and GPU, maximizing performance in a given TDP. This driver, 1015 CPU and GPU, maximizing performance in a given TDP. This driver,
@@ -1135,7 +1135,7 @@ config SAMSUNG_Q10
1135 1135
1136config APPLE_GMUX 1136config APPLE_GMUX
1137 tristate "Apple Gmux Driver" 1137 tristate "Apple Gmux Driver"
1138 depends on ACPI 1138 depends on ACPI && PCI
1139 depends on PNP 1139 depends on PNP
1140 depends on BACKLIGHT_CLASS_DEVICE 1140 depends on BACKLIGHT_CLASS_DEVICE
1141 depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE 1141 depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
@@ -1174,7 +1174,7 @@ config INTEL_SMARTCONNECT
1174 1174
1175config INTEL_PMC_IPC 1175config INTEL_PMC_IPC
1176 tristate "Intel PMC IPC Driver" 1176 tristate "Intel PMC IPC Driver"
1177 depends on ACPI 1177 depends on ACPI && PCI
1178 ---help--- 1178 ---help---
1179 This driver provides support for PMC control on some Intel platforms. 1179 This driver provides support for PMC control on some Intel platforms.
1180 The PMC is an ARC processor which defines IPC commands for communication 1180 The PMC is an ARC processor which defines IPC commands for communication
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 797fab33bb98..7cbea796652a 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -224,7 +224,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
224 extoff = NULL; 224 extoff = NULL;
225 break; 225 break;
226 } 226 }
227 if (extoff->n_samples > PTP_MAX_SAMPLES) { 227 if (extoff->n_samples > PTP_MAX_SAMPLES
228 || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
228 err = -EINVAL; 229 err = -EINVAL;
229 break; 230 break;
230 } 231 }
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index bb655854713d..b64c56c33c3b 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -1382,9 +1382,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
1382 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 1382 INIT_WORK(&priv->idb_work, tsi721_db_dpc);
1383 1383
1384 /* Allocate buffer for inbound doorbells queue */ 1384 /* Allocate buffer for inbound doorbells queue */
1385 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, 1385 priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
1386 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1386 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1387 &priv->idb_dma, GFP_KERNEL); 1387 &priv->idb_dma, GFP_KERNEL);
1388 if (!priv->idb_base) 1388 if (!priv->idb_base)
1389 return -ENOMEM; 1389 return -ENOMEM;
1390 1390
@@ -1447,9 +1447,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1447 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); 1447 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
1448 1448
1449 /* Allocate space for DMA descriptors */ 1449 /* Allocate space for DMA descriptors */
1450 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1450 bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
1451 bd_num * sizeof(struct tsi721_dma_desc), 1451 bd_num * sizeof(struct tsi721_dma_desc),
1452 &bd_phys, GFP_KERNEL); 1452 &bd_phys, GFP_KERNEL);
1453 if (!bd_ptr) 1453 if (!bd_ptr)
1454 return -ENOMEM; 1454 return -ENOMEM;
1455 1455
@@ -1464,7 +1464,7 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1464 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 1464 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
1465 bd_num : TSI721_DMA_MINSTSSZ; 1465 bd_num : TSI721_DMA_MINSTSSZ;
1466 sts_size = roundup_pow_of_two(sts_size); 1466 sts_size = roundup_pow_of_two(sts_size);
1467 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1467 sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
1468 sts_size * sizeof(struct tsi721_dma_sts), 1468 sts_size * sizeof(struct tsi721_dma_sts),
1469 &sts_phys, GFP_KERNEL); 1469 &sts_phys, GFP_KERNEL);
1470 if (!sts_ptr) { 1470 if (!sts_ptr) {
@@ -1939,10 +1939,10 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1939 1939
1940 /* Outbound message descriptor status FIFO allocation */ 1940 /* Outbound message descriptor status FIFO allocation */
1941 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1941 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1942 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, 1942 priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
1943 priv->omsg_ring[mbox].sts_size * 1943 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1944 sizeof(struct tsi721_dma_sts), 1944 &priv->omsg_ring[mbox].sts_phys,
1945 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1945 GFP_KERNEL);
1946 if (priv->omsg_ring[mbox].sts_base == NULL) { 1946 if (priv->omsg_ring[mbox].sts_base == NULL) {
1947 tsi_debug(OMSG, &priv->pdev->dev, 1947 tsi_debug(OMSG, &priv->pdev->dev,
1948 "ENOMEM for OB_MSG_%d status FIFO", mbox); 1948 "ENOMEM for OB_MSG_%d status FIFO", mbox);
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 006ea5a45020..7f5d4436f594 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -90,9 +90,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
90 * Allocate space for DMA descriptors 90 * Allocate space for DMA descriptors
91 * (add an extra element for link descriptor) 91 * (add an extra element for link descriptor)
92 */ 92 */
93 bd_ptr = dma_zalloc_coherent(dev, 93 bd_ptr = dma_alloc_coherent(dev,
94 (bd_num + 1) * sizeof(struct tsi721_dma_desc), 94 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
95 &bd_phys, GFP_ATOMIC); 95 &bd_phys, GFP_ATOMIC);
96 if (!bd_ptr) 96 if (!bd_ptr)
97 return -ENOMEM; 97 return -ENOMEM;
98 98
@@ -108,7 +108,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? 108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
109 (bd_num + 1) : TSI721_DMA_MINSTSSZ; 109 (bd_num + 1) : TSI721_DMA_MINSTSSZ;
110 sts_size = roundup_pow_of_two(sts_size); 110 sts_size = roundup_pow_of_two(sts_size);
111 sts_ptr = dma_zalloc_coherent(dev, 111 sts_ptr = dma_alloc_coherent(dev,
112 sts_size * sizeof(struct tsi721_dma_sts), 112 sts_size * sizeof(struct tsi721_dma_sts),
113 &sts_phys, GFP_ATOMIC); 113 &sts_phys, GFP_ATOMIC);
114 if (!sts_ptr) { 114 if (!sts_ptr) {
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 183fc42a510a..2d7cd344f3bf 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
153 const bool * ctx, 153 const bool * ctx,
154 struct irq_affinity *desc) 154 struct irq_affinity *desc)
155{ 155{
156 int i, ret; 156 int i, ret, queue_idx = 0;
157 157
158 for (i = 0; i < nvqs; ++i) { 158 for (i = 0; i < nvqs; ++i) {
159 vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], 159 if (!names[i]) {
160 vqs[i] = NULL;
161 continue;
162 }
163
164 vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
160 ctx ? ctx[i] : false); 165 ctx ? ctx[i] : false);
161 if (IS_ERR(vqs[i])) { 166 if (IS_ERR(vqs[i])) {
162 ret = PTR_ERR(vqs[i]); 167 ret = PTR_ERR(vqs[i]);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index c21da9fe51ec..2e01bd833ffd 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -109,7 +109,7 @@ config RESET_QCOM_PDC
109 109
110config RESET_SIMPLE 110config RESET_SIMPLE
111 bool "Simple Reset Controller Driver" if COMPILE_TEST 111 bool "Simple Reset Controller Driver" if COMPILE_TEST
112 default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED 112 default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
113 help 113 help
114 This enables a simple reset controller driver for reset lines that 114 This enables a simple reset controller driver for reset lines that
115 that can be asserted and deasserted by toggling bits in a contiguous, 115 that can be asserted and deasserted by toggling bits in a contiguous,
@@ -128,6 +128,14 @@ config RESET_STM32MP157
128 help 128 help
129 This enables the RCC reset controller driver for STM32 MPUs. 129 This enables the RCC reset controller driver for STM32 MPUs.
130 130
131config RESET_SOCFPGA
132 bool "SoCFPGA Reset Driver" if COMPILE_TEST && !ARCH_SOCFPGA
133 default ARCH_SOCFPGA
134 select RESET_SIMPLE
135 help
136 This enables the reset driver for the SoCFPGA ARMv7 platforms. This
137 driver gets initialized early during platform init calls.
138
131config RESET_SUNXI 139config RESET_SUNXI
132 bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI 140 bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
133 default ARCH_SUNXI 141 default ARCH_SUNXI
@@ -163,15 +171,15 @@ config RESET_UNIPHIER
163 Say Y if you want to control reset signals provided by System Control 171 Say Y if you want to control reset signals provided by System Control
164 block, Media I/O block, Peripheral Block. 172 block, Media I/O block, Peripheral Block.
165 173
166config RESET_UNIPHIER_USB3 174config RESET_UNIPHIER_GLUE
167 tristate "USB3 reset driver for UniPhier SoCs" 175 tristate "Reset driver in glue layer for UniPhier SoCs"
168 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF 176 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
169 default ARCH_UNIPHIER 177 default ARCH_UNIPHIER
170 select RESET_SIMPLE 178 select RESET_SIMPLE
171 help 179 help
172 Support for the USB3 core reset on UniPhier SoCs. 180 Support for peripheral core reset included in its own glue layer
173 Say Y if you want to control reset signals provided by 181 on UniPhier SoCs. Say Y if you want to control reset signals
174 USB3 glue layer. 182 provided by the glue layer.
175 183
176config RESET_ZYNQ 184config RESET_ZYNQ
177 bool "ZYNQ Reset Driver" if COMPILE_TEST 185 bool "ZYNQ Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d08e8b90046a..dc7874df78d9 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -19,10 +19,11 @@ obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
19obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o 19obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
20obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o 20obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
21obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o 21obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
22obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
22obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o 23obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
23obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o 24obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
24obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o 25obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
25obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o 26obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
26obj-$(CONFIG_RESET_UNIPHIER_USB3) += reset-uniphier-usb3.o 27obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
27obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o 28obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
28 29
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index d1887c0ed5d3..9582efb70025 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -795,3 +795,45 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
795 return rstc; 795 return rstc;
796} 796}
797EXPORT_SYMBOL_GPL(devm_reset_control_array_get); 797EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
798
799static int reset_control_get_count_from_lookup(struct device *dev)
800{
801 const struct reset_control_lookup *lookup;
802 const char *dev_id;
803 int count = 0;
804
805 if (!dev)
806 return -EINVAL;
807
808 dev_id = dev_name(dev);
809 mutex_lock(&reset_lookup_mutex);
810
811 list_for_each_entry(lookup, &reset_lookup_list, list) {
812 if (!strcmp(lookup->dev_id, dev_id))
813 count++;
814 }
815
816 mutex_unlock(&reset_lookup_mutex);
817
818 if (count == 0)
819 count = -ENOENT;
820
821 return count;
822}
823
824/**
825 * reset_control_get_count - Count number of resets available with a device
826 *
827 * @dev: device for which to return the number of resets
828 *
829 * Returns positive reset count on success, or error number on failure and
830 * on count being zero.
831 */
832int reset_control_get_count(struct device *dev)
833{
834 if (dev->of_node)
835 return of_reset_control_get_count(dev->of_node);
836
837 return reset_control_get_count_from_lookup(dev);
838}
839EXPORT_SYMBOL_GPL(reset_control_get_count);
diff --git a/drivers/reset/reset-hsdk.c b/drivers/reset/reset-hsdk.c
index 8bce391c6943..4c7b8647b49c 100644
--- a/drivers/reset/reset-hsdk.c
+++ b/drivers/reset/reset-hsdk.c
@@ -86,6 +86,7 @@ static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
86 86
87static const struct reset_control_ops hsdk_reset_ops = { 87static const struct reset_control_ops hsdk_reset_ops = {
88 .reset = hsdk_reset_reset, 88 .reset = hsdk_reset_reset,
89 .deassert = hsdk_reset_reset,
89}; 90};
90 91
91static int hsdk_reset_probe(struct platform_device *pdev) 92static int hsdk_reset_probe(struct platform_device *pdev)
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index a91107fc9e27..77fbba3100c8 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -109,7 +109,7 @@ struct reset_simple_devdata {
109#define SOCFPGA_NR_BANKS 8 109#define SOCFPGA_NR_BANKS 8
110 110
111static const struct reset_simple_devdata reset_simple_socfpga = { 111static const struct reset_simple_devdata reset_simple_socfpga = {
112 .reg_offset = 0x10, 112 .reg_offset = 0x20,
113 .nr_resets = SOCFPGA_NR_BANKS * 32, 113 .nr_resets = SOCFPGA_NR_BANKS * 32,
114 .status_active_low = true, 114 .status_active_low = true,
115}; 115};
@@ -120,7 +120,8 @@ static const struct reset_simple_devdata reset_simple_active_low = {
120}; 120};
121 121
122static const struct of_device_id reset_simple_dt_ids[] = { 122static const struct of_device_id reset_simple_dt_ids[] = {
123 { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga }, 123 { .compatible = "altr,stratix10-rst-mgr",
124 .data = &reset_simple_socfpga },
124 { .compatible = "st,stm32-rcc", }, 125 { .compatible = "st,stm32-rcc", },
125 { .compatible = "allwinner,sun6i-a31-clock-reset", 126 { .compatible = "allwinner,sun6i-a31-clock-reset",
126 .data = &reset_simple_active_low }, 127 .data = &reset_simple_active_low },
@@ -166,14 +167,6 @@ static int reset_simple_probe(struct platform_device *pdev)
166 data->status_active_low = devdata->status_active_low; 167 data->status_active_low = devdata->status_active_low;
167 } 168 }
168 169
169 if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") &&
170 of_property_read_u32(dev->of_node, "altr,modrst-offset",
171 &reg_offset)) {
172 dev_warn(dev,
173 "missing altr,modrst-offset property, assuming 0x%x!\n",
174 reg_offset);
175 }
176
177 data->membase += reg_offset; 170 data->membase += reg_offset;
178 171
179 return devm_reset_controller_register(dev, &data->rcdev); 172 return devm_reset_controller_register(dev, &data->rcdev);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
new file mode 100644
index 000000000000..318cfc51c441
--- /dev/null
+++ b/drivers/reset/reset-socfpga.c
@@ -0,0 +1,88 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018, Intel Corporation
4 * Copied from reset-sunxi.c
5 */
6
7#include <linux/err.h>
8#include <linux/io.h>
9#include <linux/init.h>
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/platform_device.h>
13#include <linux/reset-controller.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17
18#include "reset-simple.h"
19
20#define SOCFPGA_NR_BANKS 8
21void __init socfpga_reset_init(void);
22
23static int a10_reset_init(struct device_node *np)
24{
25 struct reset_simple_data *data;
26 struct resource res;
27 resource_size_t size;
28 int ret;
29 u32 reg_offset = 0x10;
30
31 data = kzalloc(sizeof(*data), GFP_KERNEL);
32 if (!data)
33 return -ENOMEM;
34
35 ret = of_address_to_resource(np, 0, &res);
36 if (ret)
37 goto err_alloc;
38
39 size = resource_size(&res);
40 if (!request_mem_region(res.start, size, np->name)) {
41 ret = -EBUSY;
42 goto err_alloc;
43 }
44
45 data->membase = ioremap(res.start, size);
46 if (!data->membase) {
47 ret = -ENOMEM;
48 goto err_alloc;
49 }
50
51 if (of_property_read_u32(np, "altr,modrst-offset", &reg_offset))
52 pr_warn("missing altr,modrst-offset property, assuming 0x10\n");
53 data->membase += reg_offset;
54
55 spin_lock_init(&data->lock);
56
57 data->rcdev.owner = THIS_MODULE;
58 data->rcdev.nr_resets = SOCFPGA_NR_BANKS * 32;
59 data->rcdev.ops = &reset_simple_ops;
60 data->rcdev.of_node = np;
61 data->status_active_low = true;
62
63 return reset_controller_register(&data->rcdev);
64
65err_alloc:
66 kfree(data);
67 return ret;
68};
69
70/*
71 * These are the reset controller we need to initialize early on in
72 * our system, before we can even think of using a regular device
73 * driver for it.
74 * The controllers that we can register through the regular device
75 * model are handled by the simple reset driver directly.
76 */
77static const struct of_device_id socfpga_early_reset_dt_ids[] __initconst = {
78 { .compatible = "altr,rst-mgr", },
79 { /* sentinel */ },
80};
81
82void __init socfpga_reset_init(void)
83{
84 struct device_node *np;
85
86 for_each_matching_node(np, socfpga_early_reset_dt_ids)
87 a10_reset_init(np);
88}
diff --git a/drivers/reset/reset-uniphier-usb3.c b/drivers/reset/reset-uniphier-glue.c
index ffa1b19b594d..a45923f4df6d 100644
--- a/drivers/reset/reset-uniphier-usb3.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2// 2//
3// reset-uniphier-usb3.c - USB3 reset driver for UniPhier 3// reset-uniphier-glue.c - Glue layer reset driver for UniPhier
4// Copyright 2018 Socionext Inc. 4// Copyright 2018 Socionext Inc.
5// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> 5// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
6 6
@@ -15,24 +15,24 @@
15#define MAX_CLKS 2 15#define MAX_CLKS 2
16#define MAX_RSTS 2 16#define MAX_RSTS 2
17 17
18struct uniphier_usb3_reset_soc_data { 18struct uniphier_glue_reset_soc_data {
19 int nclks; 19 int nclks;
20 const char * const *clock_names; 20 const char * const *clock_names;
21 int nrsts; 21 int nrsts;
22 const char * const *reset_names; 22 const char * const *reset_names;
23}; 23};
24 24
25struct uniphier_usb3_reset_priv { 25struct uniphier_glue_reset_priv {
26 struct clk_bulk_data clk[MAX_CLKS]; 26 struct clk_bulk_data clk[MAX_CLKS];
27 struct reset_control *rst[MAX_RSTS]; 27 struct reset_control *rst[MAX_RSTS];
28 struct reset_simple_data rdata; 28 struct reset_simple_data rdata;
29 const struct uniphier_usb3_reset_soc_data *data; 29 const struct uniphier_glue_reset_soc_data *data;
30}; 30};
31 31
32static int uniphier_usb3_reset_probe(struct platform_device *pdev) 32static int uniphier_glue_reset_probe(struct platform_device *pdev)
33{ 33{
34 struct device *dev = &pdev->dev; 34 struct device *dev = &pdev->dev;
35 struct uniphier_usb3_reset_priv *priv; 35 struct uniphier_glue_reset_priv *priv;
36 struct resource *res; 36 struct resource *res;
37 resource_size_t size; 37 resource_size_t size;
38 const char *name; 38 const char *name;
@@ -100,9 +100,9 @@ out_rst_assert:
100 return ret; 100 return ret;
101} 101}
102 102
103static int uniphier_usb3_reset_remove(struct platform_device *pdev) 103static int uniphier_glue_reset_remove(struct platform_device *pdev)
104{ 104{
105 struct uniphier_usb3_reset_priv *priv = platform_get_drvdata(pdev); 105 struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev);
106 int i; 106 int i;
107 107
108 for (i = 0; i < priv->data->nrsts; i++) 108 for (i = 0; i < priv->data->nrsts; i++)
@@ -117,7 +117,7 @@ static const char * const uniphier_pro4_clock_reset_names[] = {
117 "gio", "link", 117 "gio", "link",
118}; 118};
119 119
120static const struct uniphier_usb3_reset_soc_data uniphier_pro4_data = { 120static const struct uniphier_glue_reset_soc_data uniphier_pro4_data = {
121 .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names), 121 .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
122 .clock_names = uniphier_pro4_clock_reset_names, 122 .clock_names = uniphier_pro4_clock_reset_names,
123 .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names), 123 .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
@@ -128,14 +128,14 @@ static const char * const uniphier_pxs2_clock_reset_names[] = {
128 "link", 128 "link",
129}; 129};
130 130
131static const struct uniphier_usb3_reset_soc_data uniphier_pxs2_data = { 131static const struct uniphier_glue_reset_soc_data uniphier_pxs2_data = {
132 .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), 132 .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
133 .clock_names = uniphier_pxs2_clock_reset_names, 133 .clock_names = uniphier_pxs2_clock_reset_names,
134 .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), 134 .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
135 .reset_names = uniphier_pxs2_clock_reset_names, 135 .reset_names = uniphier_pxs2_clock_reset_names,
136}; 136};
137 137
138static const struct of_device_id uniphier_usb3_reset_match[] = { 138static const struct of_device_id uniphier_glue_reset_match[] = {
139 { 139 {
140 .compatible = "socionext,uniphier-pro4-usb3-reset", 140 .compatible = "socionext,uniphier-pro4-usb3-reset",
141 .data = &uniphier_pro4_data, 141 .data = &uniphier_pro4_data,
@@ -152,20 +152,32 @@ static const struct of_device_id uniphier_usb3_reset_match[] = {
152 .compatible = "socionext,uniphier-pxs3-usb3-reset", 152 .compatible = "socionext,uniphier-pxs3-usb3-reset",
153 .data = &uniphier_pxs2_data, 153 .data = &uniphier_pxs2_data,
154 }, 154 },
155 {
156 .compatible = "socionext,uniphier-pro4-ahci-reset",
157 .data = &uniphier_pro4_data,
158 },
159 {
160 .compatible = "socionext,uniphier-pxs2-ahci-reset",
161 .data = &uniphier_pxs2_data,
162 },
163 {
164 .compatible = "socionext,uniphier-pxs3-ahci-reset",
165 .data = &uniphier_pxs2_data,
166 },
155 { /* Sentinel */ } 167 { /* Sentinel */ }
156}; 168};
157MODULE_DEVICE_TABLE(of, uniphier_usb3_reset_match); 169MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match);
158 170
159static struct platform_driver uniphier_usb3_reset_driver = { 171static struct platform_driver uniphier_glue_reset_driver = {
160 .probe = uniphier_usb3_reset_probe, 172 .probe = uniphier_glue_reset_probe,
161 .remove = uniphier_usb3_reset_remove, 173 .remove = uniphier_glue_reset_remove,
162 .driver = { 174 .driver = {
163 .name = "uniphier-usb3-reset", 175 .name = "uniphier-glue-reset",
164 .of_match_table = uniphier_usb3_reset_match, 176 .of_match_table = uniphier_glue_reset_match,
165 }, 177 },
166}; 178};
167module_platform_driver(uniphier_usb3_reset_driver); 179module_platform_driver(uniphier_glue_reset_driver);
168 180
169MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); 181MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
170MODULE_DESCRIPTION("UniPhier USB3 Reset Driver"); 182MODULE_DESCRIPTION("UniPhier Glue layer reset driver");
171MODULE_LICENSE("GPL"); 183MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index dcbf5c857743..ed8e58f09054 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -89,8 +89,8 @@ static int register_sba(struct ism_dev *ism)
89 dma_addr_t dma_handle; 89 dma_addr_t dma_handle;
90 struct ism_sba *sba; 90 struct ism_sba *sba;
91 91
92 sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 92 sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
93 &dma_handle, GFP_KERNEL); 93 GFP_KERNEL);
94 if (!sba) 94 if (!sba)
95 return -ENOMEM; 95 return -ENOMEM;
96 96
@@ -116,8 +116,8 @@ static int register_ieq(struct ism_dev *ism)
116 dma_addr_t dma_handle; 116 dma_addr_t dma_handle;
117 struct ism_eq *ieq; 117 struct ism_eq *ieq;
118 118
119 ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 119 ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
120 &dma_handle, GFP_KERNEL); 120 GFP_KERNEL);
121 if (!ieq) 121 if (!ieq)
122 return -ENOMEM; 122 return -ENOMEM;
123 123
@@ -234,10 +234,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) 234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
235 return -EINVAL; 235 return -EINVAL;
236 236
237 dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, 237 dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
238 &dmb->dma_addr, GFP_KERNEL | 238 &dmb->dma_addr,
239 __GFP_NOWARN | __GFP_NOMEMALLOC | 239 GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
240 __GFP_COMP | __GFP_NORETRY);
241 if (!dmb->cpu_addr) 240 if (!dmb->cpu_addr)
242 clear_bit(dmb->sba_idx, ism->sba_bitmap); 241 clear_bit(dmb->sba_idx, ism->sba_bitmap);
243 242
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index fc9dbad476c0..ae1d56da671d 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
635{ 635{
636 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 636 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
637 unsigned long *indicatorp = NULL; 637 unsigned long *indicatorp = NULL;
638 int ret, i; 638 int ret, i, queue_idx = 0;
639 struct ccw1 *ccw; 639 struct ccw1 *ccw;
640 640
641 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 641 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
643 return -ENOMEM; 643 return -ENOMEM;
644 644
645 for (i = 0; i < nvqs; ++i) { 645 for (i = 0; i < nvqs; ++i) {
646 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], 646 if (!names[i]) {
647 ctx ? ctx[i] : false, ccw); 647 vqs[i] = NULL;
648 continue;
649 }
650
651 vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
652 names[i], ctx ? ctx[i] : false,
653 ccw);
648 if (IS_ERR(vqs[i])) { 654 if (IS_ERR(vqs[i])) {
649 ret = PTR_ERR(vqs[i]); 655 ret = PTR_ERR(vqs[i]);
650 vqs[i] = NULL; 656 vqs[i] = NULL;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index e8f5f7c63190..cd096104bcec 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -646,8 +646,9 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
646 unsigned long *cpu_addr; 646 unsigned long *cpu_addr;
647 int retval = 1; 647 int retval = 1;
648 648
649 cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev, 649 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
650 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); 650 size * TW_Q_LENGTH, &dma_handle,
651 GFP_KERNEL);
651 if (!cpu_addr) { 652 if (!cpu_addr) {
652 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 653 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
653 goto out; 654 goto out;
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index ff53fd0d12f2..66c514310f3c 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1123,8 +1123,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
1123 1123
1124 /* Get total memory needed for SCB */ 1124 /* Get total memory needed for SCB */
1125 sz = ORC_MAXQUEUE * sizeof(struct orc_scb); 1125 sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
1126 host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys, 1126 host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys,
1127 GFP_KERNEL); 1127 GFP_KERNEL);
1128 if (!host->scb_virt) { 1128 if (!host->scb_virt) {
1129 printk("inia100: SCB memory allocation error\n"); 1129 printk("inia100: SCB memory allocation error\n");
1130 goto out_host_put; 1130 goto out_host_put;
@@ -1132,8 +1132,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
1132 1132
1133 /* Get total memory needed for ESCB */ 1133 /* Get total memory needed for ESCB */
1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); 1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
1135 host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys, 1135 host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys,
1136 GFP_KERNEL); 1136 GFP_KERNEL);
1137 if (!host->escb_virt) { 1137 if (!host->escb_virt) {
1138 printk("inia100: ESCB memory allocation error\n"); 1138 printk("inia100: ESCB memory allocation error\n");
1139 goto out_free_scb_array; 1139 goto out_free_scb_array;
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 0f6751b0a633..57c6fa388bf6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -587,8 +587,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
587 case ACB_ADAPTER_TYPE_B: { 587 case ACB_ADAPTER_TYPE_B: {
588 struct MessageUnit_B *reg; 588 struct MessageUnit_B *reg;
589 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); 589 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
590 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 590 dma_coherent = dma_alloc_coherent(&pdev->dev,
591 &dma_coherent_handle, GFP_KERNEL); 591 acb->roundup_ccbsize,
592 &dma_coherent_handle,
593 GFP_KERNEL);
592 if (!dma_coherent) { 594 if (!dma_coherent) {
593 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 595 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
594 return false; 596 return false;
@@ -617,8 +619,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
617 struct MessageUnit_D *reg; 619 struct MessageUnit_D *reg;
618 620
619 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); 621 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
620 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 622 dma_coherent = dma_alloc_coherent(&pdev->dev,
621 &dma_coherent_handle, GFP_KERNEL); 623 acb->roundup_ccbsize,
624 &dma_coherent_handle,
625 GFP_KERNEL);
622 if (!dma_coherent) { 626 if (!dma_coherent) {
623 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 627 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
624 return false; 628 return false;
@@ -659,8 +663,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
659 uint32_t completeQ_size; 663 uint32_t completeQ_size;
660 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; 664 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
661 acb->roundup_ccbsize = roundup(completeQ_size, 32); 665 acb->roundup_ccbsize = roundup(completeQ_size, 32);
662 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 666 dma_coherent = dma_alloc_coherent(&pdev->dev,
663 &dma_coherent_handle, GFP_KERNEL); 667 acb->roundup_ccbsize,
668 &dma_coherent_handle,
669 GFP_KERNEL);
664 if (!dma_coherent){ 670 if (!dma_coherent){
665 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 671 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
666 return false; 672 return false;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 39f3820572b4..74e260027c7d 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3321,8 +3321,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3321 q->len = len; 3321 q->len = len;
3322 q->entry_size = entry_size; 3322 q->entry_size = entry_size;
3323 mem->size = len * entry_size; 3323 mem->size = len * entry_size;
3324 mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3324 mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
3325 GFP_KERNEL); 3325 GFP_KERNEL);
3326 if (!mem->va) 3326 if (!mem->va)
3327 return -ENOMEM; 3327 return -ENOMEM;
3328 return 0; 3328 return 0;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index ca7b7bbc8371..d4febaadfaa3 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -293,8 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
293 struct be_dma_mem *cmd, 293 struct be_dma_mem *cmd,
294 u8 subsystem, u8 opcode, u32 size) 294 u8 subsystem, u8 opcode, u32 size)
295{ 295{
296 cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, 296 cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
297 GFP_KERNEL); 297 GFP_KERNEL);
298 if (!cmd->va) { 298 if (!cmd->va) {
299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
300 "BG_%d : Failed to allocate memory for if info\n"); 300 "BG_%d : Failed to allocate memory for if info\n");
@@ -1510,10 +1510,9 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
1510 return -EINVAL; 1510 return -EINVAL;
1511 1511
1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params); 1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
1513 nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, 1513 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
1514 nonemb_cmd.size, 1514 nonemb_cmd.size, &nonemb_cmd.dma,
1515 &nonemb_cmd.dma, 1515 GFP_KERNEL);
1516 GFP_KERNEL);
1517 if (!nonemb_cmd.va) { 1516 if (!nonemb_cmd.va) {
1518 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 1517 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
1519 "BM_%d : invldt_cmds_params alloc failed\n"); 1518 "BM_%d : invldt_cmds_params alloc failed\n");
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 5d163ca1b366..d8e6d7480f35 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3264,9 +3264,9 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3264 /* Allocate dma coherent memory */ 3264 /* Allocate dma coherent memory */
3265 buf_info = buf_base; 3265 buf_info = buf_base;
3266 buf_info->size = payload_len; 3266 buf_info->size = payload_len;
3267 buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev, 3267 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev,
3268 buf_info->size, &buf_info->phys, 3268 buf_info->size, &buf_info->phys,
3269 GFP_KERNEL); 3269 GFP_KERNEL);
3270 if (!buf_info->virt) 3270 if (!buf_info->virt)
3271 goto out_free_mem; 3271 goto out_free_mem;
3272 3272
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index e8ae4d671d23..039328d9ef13 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,10 +1857,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1857 * entries. Hence the limit with one page is 8192 task context 1857 * entries. Hence the limit with one page is 8192 task context
1858 * entries. 1858 * entries.
1859 */ 1859 */
1860 hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev, 1860 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1861 PAGE_SIZE, 1861 PAGE_SIZE,
1862 &hba->task_ctx_bd_dma, 1862 &hba->task_ctx_bd_dma,
1863 GFP_KERNEL); 1863 GFP_KERNEL);
1864 if (!hba->task_ctx_bd_tbl) { 1864 if (!hba->task_ctx_bd_tbl) {
1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n"); 1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1866 rc = -1; 1866 rc = -1;
@@ -1894,10 +1894,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1895 for (i = 0; i < task_ctx_arr_sz; i++) { 1895 for (i = 0; i < task_ctx_arr_sz; i++) {
1896 1896
1897 hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev, 1897 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1898 PAGE_SIZE, 1898 PAGE_SIZE,
1899 &hba->task_ctx_dma[i], 1899 &hba->task_ctx_dma[i],
1900 GFP_KERNEL); 1900 GFP_KERNEL);
1901 if (!hba->task_ctx[i]) { 1901 if (!hba->task_ctx[i]) {
1902 printk(KERN_ERR PFX "unable to alloc task context\n"); 1902 printk(KERN_ERR PFX "unable to alloc task context\n");
1903 rc = -1; 1903 rc = -1;
@@ -2031,19 +2031,19 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2031 } 2031 }
2032 2032
2033 for (i = 0; i < segment_count; ++i) { 2033 for (i = 0; i < segment_count; ++i) {
2034 hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev, 2034 hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
2035 BNX2FC_HASH_TBL_CHUNK_SIZE, 2035 BNX2FC_HASH_TBL_CHUNK_SIZE,
2036 &dma_segment_array[i], 2036 &dma_segment_array[i],
2037 GFP_KERNEL); 2037 GFP_KERNEL);
2038 if (!hba->hash_tbl_segments[i]) { 2038 if (!hba->hash_tbl_segments[i]) {
2039 printk(KERN_ERR PFX "hash segment alloc failed\n"); 2039 printk(KERN_ERR PFX "hash segment alloc failed\n");
2040 goto cleanup_dma; 2040 goto cleanup_dma;
2041 } 2041 }
2042 } 2042 }
2043 2043
2044 hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2044 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2045 &hba->hash_tbl_pbl_dma, 2045 &hba->hash_tbl_pbl_dma,
2046 GFP_KERNEL); 2046 GFP_KERNEL);
2047 if (!hba->hash_tbl_pbl) { 2047 if (!hba->hash_tbl_pbl) {
2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2049 goto cleanup_dma; 2049 goto cleanup_dma;
@@ -2104,10 +2104,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2104 return -ENOMEM; 2104 return -ENOMEM;
2105 2105
2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2107 hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev, 2107 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2108 mem_size, 2108 &hba->t2_hash_tbl_ptr_dma,
2109 &hba->t2_hash_tbl_ptr_dma, 2109 GFP_KERNEL);
2110 GFP_KERNEL);
2111 if (!hba->t2_hash_tbl_ptr) { 2110 if (!hba->t2_hash_tbl_ptr) {
2112 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); 2111 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2113 bnx2fc_free_fw_resc(hba); 2112 bnx2fc_free_fw_resc(hba);
@@ -2116,9 +2115,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2116 2115
2117 mem_size = BNX2FC_NUM_MAX_SESS * 2116 mem_size = BNX2FC_NUM_MAX_SESS *
2118 sizeof(struct fcoe_t2_hash_table_entry); 2117 sizeof(struct fcoe_t2_hash_table_entry);
2119 hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size, 2118 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2120 &hba->t2_hash_tbl_dma, 2119 &hba->t2_hash_tbl_dma,
2121 GFP_KERNEL); 2120 GFP_KERNEL);
2122 if (!hba->t2_hash_tbl) { 2121 if (!hba->t2_hash_tbl) {
2123 printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); 2122 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2124 bnx2fc_free_fw_resc(hba); 2123 bnx2fc_free_fw_resc(hba);
@@ -2140,9 +2139,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2140 return -ENOMEM; 2139 return -ENOMEM;
2141 } 2140 }
2142 2141
2143 hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2142 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2144 &hba->stats_buf_dma, 2143 &hba->stats_buf_dma,
2145 GFP_KERNEL); 2144 GFP_KERNEL);
2146 if (!hba->stats_buffer) { 2145 if (!hba->stats_buffer) {
2147 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); 2146 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2148 bnx2fc_free_fw_resc(hba); 2147 bnx2fc_free_fw_resc(hba);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index e3d1c7c440c8..d735e87e416a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,8 +672,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & 672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
673 CNIC_PAGE_MASK; 673 CNIC_PAGE_MASK;
674 674
675 tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 675 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
676 &tgt->sq_dma, GFP_KERNEL); 676 &tgt->sq_dma, GFP_KERNEL);
677 if (!tgt->sq) { 677 if (!tgt->sq) {
678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", 678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
679 tgt->sq_mem_size); 679 tgt->sq_mem_size);
@@ -685,8 +685,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & 685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
686 CNIC_PAGE_MASK; 686 CNIC_PAGE_MASK;
687 687
688 tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 688 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
689 &tgt->cq_dma, GFP_KERNEL); 689 &tgt->cq_dma, GFP_KERNEL);
690 if (!tgt->cq) { 690 if (!tgt->cq) {
691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", 691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
692 tgt->cq_mem_size); 692 tgt->cq_mem_size);
@@ -698,8 +698,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & 698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
699 CNIC_PAGE_MASK; 699 CNIC_PAGE_MASK;
700 700
701 tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 701 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
702 &tgt->rq_dma, GFP_KERNEL); 702 &tgt->rq_dma, GFP_KERNEL);
703 if (!tgt->rq) { 703 if (!tgt->rq) {
704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", 704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
705 tgt->rq_mem_size); 705 tgt->rq_mem_size);
@@ -710,8 +710,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & 710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
711 CNIC_PAGE_MASK; 711 CNIC_PAGE_MASK;
712 712
713 tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 713 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
714 &tgt->rq_pbl_dma, GFP_KERNEL); 714 &tgt->rq_pbl_dma, GFP_KERNEL);
715 if (!tgt->rq_pbl) { 715 if (!tgt->rq_pbl) {
716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", 716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
717 tgt->rq_pbl_size); 717 tgt->rq_pbl_size);
@@ -735,9 +735,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & 735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
736 CNIC_PAGE_MASK; 736 CNIC_PAGE_MASK;
737 737
738 tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev, 738 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
739 tgt->xferq_mem_size, &tgt->xferq_dma, 739 tgt->xferq_mem_size, &tgt->xferq_dma,
740 GFP_KERNEL); 740 GFP_KERNEL);
741 if (!tgt->xferq) { 741 if (!tgt->xferq) {
742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", 742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
743 tgt->xferq_mem_size); 743 tgt->xferq_mem_size);
@@ -749,9 +749,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & 749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
750 CNIC_PAGE_MASK; 750 CNIC_PAGE_MASK;
751 751
752 tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev, 752 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
753 tgt->confq_mem_size, &tgt->confq_dma, 753 tgt->confq_mem_size, &tgt->confq_dma,
754 GFP_KERNEL); 754 GFP_KERNEL);
755 if (!tgt->confq) { 755 if (!tgt->confq) {
756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", 756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
757 tgt->confq_mem_size); 757 tgt->confq_mem_size);
@@ -763,9 +763,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
763 tgt->confq_pbl_size = 763 tgt->confq_pbl_size =
764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; 764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
765 765
766 tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, 766 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
767 tgt->confq_pbl_size, 767 tgt->confq_pbl_size,
768 &tgt->confq_pbl_dma, GFP_KERNEL); 768 &tgt->confq_pbl_dma, GFP_KERNEL);
769 if (!tgt->confq_pbl) { 769 if (!tgt->confq_pbl) {
770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", 770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
771 tgt->confq_pbl_size); 771 tgt->confq_pbl_size);
@@ -787,9 +787,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
787 /* Allocate and map ConnDB */ 787 /* Allocate and map ConnDB */
788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); 788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
789 789
790 tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev, 790 tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
791 tgt->conn_db_mem_size, 791 tgt->conn_db_mem_size,
792 &tgt->conn_db_dma, GFP_KERNEL); 792 &tgt->conn_db_dma, GFP_KERNEL);
793 if (!tgt->conn_db) { 793 if (!tgt->conn_db) {
794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n", 794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
795 tgt->conn_db_mem_size); 795 tgt->conn_db_mem_size);
@@ -802,8 +802,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & 802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
803 CNIC_PAGE_MASK; 803 CNIC_PAGE_MASK;
804 804
805 tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 805 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
806 &tgt->lcq_dma, GFP_KERNEL); 806 &tgt->lcq_dma, GFP_KERNEL);
807 807
808 if (!tgt->lcq) { 808 if (!tgt->lcq) {
809 printk(KERN_ERR PFX "unable to allocate lcq %d\n", 809 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91f5316aa3ab..fae6f71e677d 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1070,8 +1070,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1070 1070
1071 /* Allocate memory area for actual SQ element */ 1071 /* Allocate memory area for actual SQ element */
1072 ep->qp.sq_virt = 1072 ep->qp.sq_virt =
1073 dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1073 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1074 &ep->qp.sq_phys, GFP_KERNEL); 1074 &ep->qp.sq_phys, GFP_KERNEL);
1075 if (!ep->qp.sq_virt) { 1075 if (!ep->qp.sq_virt) {
1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", 1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
1077 ep->qp.sq_mem_size); 1077 ep->qp.sq_mem_size);
@@ -1106,8 +1106,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1106 1106
1107 /* Allocate memory area for actual CQ element */ 1107 /* Allocate memory area for actual CQ element */
1108 ep->qp.cq_virt = 1108 ep->qp.cq_virt =
1109 dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1109 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1110 &ep->qp.cq_phys, GFP_KERNEL); 1110 &ep->qp.cq_phys, GFP_KERNEL);
1111 if (!ep->qp.cq_virt) { 1111 if (!ep->qp.cq_virt) {
1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", 1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1113 ep->qp.cq_mem_size); 1113 ep->qp.cq_mem_size);
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index dc12933533d5..66bbd21819ae 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -233,8 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
233 233
234 q = wrm->q_arr[free_idx]; 234 q = wrm->q_arr[free_idx];
235 235
236 q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart, 236 q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
237 GFP_KERNEL); 237 GFP_KERNEL);
238 if (!q->vstart) { 238 if (!q->vstart) {
239 csio_err(hw, 239 csio_err(hw,
240 "Failed to allocate DMA memory for " 240 "Failed to allocate DMA memory for "
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 8a20411699d9..75e1273a44b3 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
1144} 1144}
1145 1145
1146static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, 1146static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1147 unsigned int tid, int pg_idx, bool reply) 1147 unsigned int tid, int pg_idx)
1148{ 1148{
1149 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, 1149 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1150 GFP_KERNEL); 1150 GFP_KERNEL);
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1160 req = (struct cpl_set_tcb_field *)skb->head; 1160 req = (struct cpl_set_tcb_field *)skb->head;
1161 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1161 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1162 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1162 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1163 req->reply = V_NO_REPLY(reply ? 0 : 1); 1163 req->reply = V_NO_REPLY(1);
1164 req->cpu_idx = 0; 1164 req->cpu_idx = 0;
1165 req->word = htons(31); 1165 req->word = htons(31);
1166 req->mask = cpu_to_be64(0xF0000000); 1166 req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1177 * @tid: connection id 1177 * @tid: connection id
1178 * @hcrc: header digest enabled 1178 * @hcrc: header digest enabled
1179 * @dcrc: data digest enabled 1179 * @dcrc: data digest enabled
1180 * @reply: request reply from h/w
1181 * set up the iscsi digest settings for a connection identified by tid 1180 * set up the iscsi digest settings for a connection identified by tid
1182 */ 1181 */
1183static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1182static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1184 int hcrc, int dcrc, int reply) 1183 int hcrc, int dcrc)
1185{ 1184{
1186 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, 1185 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1187 GFP_KERNEL); 1186 GFP_KERNEL);
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1197 req = (struct cpl_set_tcb_field *)skb->head; 1196 req = (struct cpl_set_tcb_field *)skb->head;
1198 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1197 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1199 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1198 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1200 req->reply = V_NO_REPLY(reply ? 0 : 1); 1199 req->reply = V_NO_REPLY(1);
1201 req->cpu_idx = 0; 1200 req->cpu_idx = 0;
1202 req->word = htons(31); 1201 req->word = htons(31);
1203 req->mask = cpu_to_be64(0x0F000000); 1202 req->mask = cpu_to_be64(0x0F000000);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 49f8028ac524..d26f50af00ea 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1548 struct cxgbi_sock *csk; 1548 struct cxgbi_sock *csk;
1549 1549
1550 csk = lookup_tid(t, tid); 1550 csk = lookup_tid(t, tid);
1551 if (!csk) 1551 if (!csk) {
1552 pr_err("can't find conn. for tid %u.\n", tid); 1552 pr_err("can't find conn. for tid %u.\n", tid);
1553 return;
1554 }
1553 1555
1554 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1556 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1555 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1557 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1556 csk, csk->state, csk->flags, csk->tid, rpl->status); 1558 csk, csk->state, csk->flags, csk->tid, rpl->status);
1557 1559
1558 if (rpl->status != CPL_ERR_NONE) 1560 if (rpl->status != CPL_ERR_NONE) {
1559 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1561 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1560 csk, tid, rpl->status); 1562 csk, tid, rpl->status);
1563 csk->err = -EINVAL;
1564 }
1565
1566 complete(&csk->cmpl);
1561 1567
1562 __kfree_skb(skb); 1568 __kfree_skb(skb);
1563} 1569}
@@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1983} 1989}
1984 1990
1985static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1991static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1986 int pg_idx, bool reply) 1992 int pg_idx)
1987{ 1993{
1988 struct sk_buff *skb; 1994 struct sk_buff *skb;
1989 struct cpl_set_tcb_field *req; 1995 struct cpl_set_tcb_field *req;
@@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1999 req = (struct cpl_set_tcb_field *)skb->head; 2005 req = (struct cpl_set_tcb_field *)skb->head;
2000 INIT_TP_WR(req, csk->tid); 2006 INIT_TP_WR(req, csk->tid);
2001 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 2007 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2002 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2008 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2003 req->word_cookie = htons(0); 2009 req->word_cookie = htons(0);
2004 req->mask = cpu_to_be64(0x3 << 8); 2010 req->mask = cpu_to_be64(0x3 << 8);
2005 req->val = cpu_to_be64(pg_idx << 8); 2011 req->val = cpu_to_be64(pg_idx << 8);
@@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2008 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2014 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2009 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 2015 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2010 2016
2017 reinit_completion(&csk->cmpl);
2011 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2018 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2012 return 0; 2019 wait_for_completion(&csk->cmpl);
2020
2021 return csk->err;
2013} 2022}
2014 2023
2015static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 2024static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2016 int hcrc, int dcrc, int reply) 2025 int hcrc, int dcrc)
2017{ 2026{
2018 struct sk_buff *skb; 2027 struct sk_buff *skb;
2019 struct cpl_set_tcb_field *req; 2028 struct cpl_set_tcb_field *req;
@@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2031 req = (struct cpl_set_tcb_field *)skb->head; 2040 req = (struct cpl_set_tcb_field *)skb->head;
2032 INIT_TP_WR(req, tid); 2041 INIT_TP_WR(req, tid);
2033 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 2042 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2034 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2043 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2035 req->word_cookie = htons(0); 2044 req->word_cookie = htons(0);
2036 req->mask = cpu_to_be64(0x3 << 4); 2045 req->mask = cpu_to_be64(0x3 << 4);
2037 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 2046 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2041 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2050 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2042 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 2051 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2043 2052
2053 reinit_completion(&csk->cmpl);
2044 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2054 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2045 return 0; 2055 wait_for_completion(&csk->cmpl);
2056
2057 return csk->err;
2046} 2058}
2047 2059
2048static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) 2060static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 75f876409fb9..245742557c03 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
573 skb_queue_head_init(&csk->receive_queue); 573 skb_queue_head_init(&csk->receive_queue);
574 skb_queue_head_init(&csk->write_queue); 574 skb_queue_head_init(&csk->write_queue);
575 timer_setup(&csk->retry_timer, NULL, 0); 575 timer_setup(&csk->retry_timer, NULL, 0);
576 init_completion(&csk->cmpl);
576 rwlock_init(&csk->callback_lock); 577 rwlock_init(&csk->callback_lock);
577 csk->cdev = cdev; 578 csk->cdev = cdev;
578 csk->flags = 0; 579 csk->flags = 0;
@@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2251 if (!err && conn->hdrdgst_en) 2252 if (!err && conn->hdrdgst_en)
2252 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2253 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2253 conn->hdrdgst_en, 2254 conn->hdrdgst_en,
2254 conn->datadgst_en, 0); 2255 conn->datadgst_en);
2255 break; 2256 break;
2256 case ISCSI_PARAM_DATADGST_EN: 2257 case ISCSI_PARAM_DATADGST_EN:
2257 err = iscsi_set_param(cls_conn, param, buf, buflen); 2258 err = iscsi_set_param(cls_conn, param, buf, buflen);
2258 if (!err && conn->datadgst_en) 2259 if (!err && conn->datadgst_en)
2259 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2260 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2260 conn->hdrdgst_en, 2261 conn->hdrdgst_en,
2261 conn->datadgst_en, 0); 2262 conn->datadgst_en);
2262 break; 2263 break;
2263 case ISCSI_PARAM_MAX_R2T: 2264 case ISCSI_PARAM_MAX_R2T:
2264 return iscsi_tcp_set_max_r2t(conn, buf); 2265 return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2384 2385
2385 ppm = csk->cdev->cdev2ppm(csk->cdev); 2386 ppm = csk->cdev->cdev2ppm(csk->cdev);
2386 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, 2387 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2387 ppm->tformat.pgsz_idx_dflt, 0); 2388 ppm->tformat.pgsz_idx_dflt);
2388 if (err < 0) 2389 if (err < 0)
2389 return err; 2390 return err;
2390 2391
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 5d5d8b50d842..1917ff57651d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -149,6 +149,7 @@ struct cxgbi_sock {
149 struct sk_buff_head receive_queue; 149 struct sk_buff_head receive_queue;
150 struct sk_buff_head write_queue; 150 struct sk_buff_head write_queue;
151 struct timer_list retry_timer; 151 struct timer_list retry_timer;
152 struct completion cmpl;
152 int err; 153 int err;
153 rwlock_t callback_lock; 154 rwlock_t callback_lock;
154 void *user_data; 155 void *user_data;
@@ -490,9 +491,9 @@ struct cxgbi_device {
490 struct cxgbi_ppm *, 491 struct cxgbi_ppm *,
491 struct cxgbi_task_tag_info *); 492 struct cxgbi_task_tag_info *);
492 int (*csk_ddp_setup_digest)(struct cxgbi_sock *, 493 int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
493 unsigned int, int, int, int); 494 unsigned int, int, int);
494 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, 495 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
495 unsigned int, int, bool); 496 unsigned int, int);
496 497
497 void (*csk_release_offload_resources)(struct cxgbi_sock *); 498 void (*csk_release_offload_resources)(struct cxgbi_sock *);
498 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); 499 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index e2420a810e99..c92b3822c408 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2507 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2507 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2508 } 2508 }
2509 2509
2510 if (hisi_hba->prot_mask) {
2511 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
2512 prot_mask);
2513 scsi_host_set_prot(hisi_hba->shost, prot_mask);
2514 }
2515
2510 rc = scsi_add_host(shost, dev); 2516 rc = scsi_add_host(shost, dev);
2511 if (rc) 2517 if (rc)
2512 goto err_out_ha; 2518 goto err_out_ha;
@@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2519 if (rc) 2525 if (rc)
2520 goto err_out_register_ha; 2526 goto err_out_register_ha;
2521 2527
2522 if (hisi_hba->prot_mask) {
2523 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
2524 prot_mask);
2525 scsi_host_set_prot(hisi_hba->shost, prot_mask);
2526 }
2527
2528 scsi_scan_host(shost); 2528 scsi_scan_host(shost);
2529 2529
2530 return 0; 2530 return 0;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 68b90c4f79a3..1727d0c71b12 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
576 shost->max_lun = ~0; 576 shost->max_lun = ~0;
577 shost->max_cmd_len = MAX_COMMAND_SIZE; 577 shost->max_cmd_len = MAX_COMMAND_SIZE;
578 578
579 /* turn on DIF support */
580 scsi_host_set_prot(shost,
581 SHOST_DIF_TYPE1_PROTECTION |
582 SHOST_DIF_TYPE2_PROTECTION |
583 SHOST_DIF_TYPE3_PROTECTION);
584 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
585
579 err = scsi_add_host(shost, &pdev->dev); 586 err = scsi_add_host(shost, &pdev->dev);
580 if (err) 587 if (err)
581 goto err_shost; 588 goto err_shost;
@@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
663 goto err_host_alloc; 670 goto err_host_alloc;
664 } 671 }
665 pci_info->hosts[i] = h; 672 pci_info->hosts[i] = h;
666
667 /* turn on DIF support */
668 scsi_host_set_prot(to_shost(h),
669 SHOST_DIF_TYPE1_PROTECTION |
670 SHOST_DIF_TYPE2_PROTECTION |
671 SHOST_DIF_TYPE3_PROTECTION);
672 scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
673 } 673 }
674 674
675 err = isci_setup_interrupts(pdev); 675 err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 8698af86485d..2dc564e59430 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2730,8 +2730,8 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2730 INIT_LIST_HEAD(&dmabuf->list); 2730 INIT_LIST_HEAD(&dmabuf->list);
2731 2731
2732 /* now, allocate dma buffer */ 2732 /* now, allocate dma buffer */
2733 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2733 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2734 &(dmabuf->phys), GFP_KERNEL); 2734 &(dmabuf->phys), GFP_KERNEL);
2735 2735
2736 if (!dmabuf->virt) { 2736 if (!dmabuf->virt) {
2737 kfree(dmabuf); 2737 kfree(dmabuf);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c1c36812c3d2..bede11e16349 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6973,9 +6973,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6973 if (!dmabuf) 6973 if (!dmabuf)
6974 return NULL; 6974 return NULL;
6975 6975
6976 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6976 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6977 LPFC_HDR_TEMPLATE_SIZE, 6977 LPFC_HDR_TEMPLATE_SIZE,
6978 &dmabuf->phys, GFP_KERNEL); 6978 &dmabuf->phys, GFP_KERNEL);
6979 if (!dmabuf->virt) { 6979 if (!dmabuf->virt) {
6980 rpi_hdr = NULL; 6980 rpi_hdr = NULL;
6981 goto err_free_dmabuf; 6981 goto err_free_dmabuf;
@@ -7397,8 +7397,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7397 } 7397 }
7398 7398
7399 /* Allocate memory for SLI-2 structures */ 7399 /* Allocate memory for SLI-2 structures */
7400 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7400 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7401 &phba->slim2p.phys, GFP_KERNEL); 7401 &phba->slim2p.phys, GFP_KERNEL);
7402 if (!phba->slim2p.virt) 7402 if (!phba->slim2p.virt)
7403 goto out_iounmap; 7403 goto out_iounmap;
7404 7404
@@ -7816,8 +7816,8 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
7816 * plus an alignment restriction of 16 bytes. 7816 * plus an alignment restriction of 16 bytes.
7817 */ 7817 */
7818 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 7818 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
7819 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 7819 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
7820 &dmabuf->phys, GFP_KERNEL); 7820 &dmabuf->phys, GFP_KERNEL);
7821 if (!dmabuf->virt) { 7821 if (!dmabuf->virt) {
7822 kfree(dmabuf); 7822 kfree(dmabuf);
7823 return -ENOMEM; 7823 return -ENOMEM;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f6a5083a621e..4d3b94317515 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1827,9 +1827,9 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1827 * page, this is used as a priori size of SLI4_PAGE_SIZE for 1827 * page, this is used as a priori size of SLI4_PAGE_SIZE for
1828 * the later DMA memory free. 1828 * the later DMA memory free.
1829 */ 1829 */
1830 viraddr = dma_zalloc_coherent(&phba->pcidev->dev, 1830 viraddr = dma_alloc_coherent(&phba->pcidev->dev,
1831 SLI4_PAGE_SIZE, &phyaddr, 1831 SLI4_PAGE_SIZE, &phyaddr,
1832 GFP_KERNEL); 1832 GFP_KERNEL);
1833 /* In case of malloc fails, proceed with whatever we have */ 1833 /* In case of malloc fails, proceed with whatever we have */
1834 if (!viraddr) 1834 if (!viraddr)
1835 break; 1835 break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 30734caf77e1..2242e9b3ca12 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5362,8 +5362,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5362 * mailbox command. 5362 * mailbox command.
5363 */ 5363 */
5364 dma_size = *vpd_size; 5364 dma_size = *vpd_size;
5365 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5365 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5366 &dmabuf->phys, GFP_KERNEL); 5366 &dmabuf->phys, GFP_KERNEL);
5367 if (!dmabuf->virt) { 5367 if (!dmabuf->virt) {
5368 kfree(dmabuf); 5368 kfree(dmabuf);
5369 return -ENOMEM; 5369 return -ENOMEM;
@@ -6300,10 +6300,9 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6300 goto free_mem; 6300 goto free_mem;
6301 } 6301 }
6302 6302
6303 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6303 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6304 LPFC_RAS_MAX_ENTRY_SIZE, 6304 LPFC_RAS_MAX_ENTRY_SIZE,
6305 &dmabuf->phys, 6305 &dmabuf->phys, GFP_KERNEL);
6306 GFP_KERNEL);
6307 if (!dmabuf->virt) { 6306 if (!dmabuf->virt) {
6308 kfree(dmabuf); 6307 kfree(dmabuf);
6309 rc = -ENOMEM; 6308 rc = -ENOMEM;
@@ -9408,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9408 cmnd = CMD_XMIT_SEQUENCE64_CR; 9407 cmnd = CMD_XMIT_SEQUENCE64_CR;
9409 if (phba->link_flag & LS_LOOPBACK_MODE) 9408 if (phba->link_flag & LS_LOOPBACK_MODE)
9410 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9409 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9410 /* fall through */
9411 case CMD_XMIT_SEQUENCE64_CR: 9411 case CMD_XMIT_SEQUENCE64_CR:
9412 /* word3 iocb=io_tag32 wqe=reserved */ 9412 /* word3 iocb=io_tag32 wqe=reserved */
9413 wqe->xmit_sequence.rsvd3 = 0; 9413 wqe->xmit_sequence.rsvd3 = 0;
@@ -13529,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13529 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13529 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13531 "2537 Receive Frame Truncated!!\n"); 13531 "2537 Receive Frame Truncated!!\n");
13532 /* fall through */
13532 case FC_STATUS_RQ_SUCCESS: 13533 case FC_STATUS_RQ_SUCCESS:
13533 spin_lock_irqsave(&phba->hbalock, iflags); 13534 spin_lock_irqsave(&phba->hbalock, iflags);
13534 lpfc_sli4_rq_release(hrq, drq); 13535 lpfc_sli4_rq_release(hrq, drq);
@@ -13938,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13938 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13939 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13940 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13940 "6126 Receive Frame Truncated!!\n"); 13941 "6126 Receive Frame Truncated!!\n");
13941 /* Drop thru */ 13942 /* fall through */
13942 case FC_STATUS_RQ_SUCCESS: 13943 case FC_STATUS_RQ_SUCCESS:
13943 spin_lock_irqsave(&phba->hbalock, iflags); 13944 spin_lock_irqsave(&phba->hbalock, iflags);
13944 lpfc_sli4_rq_release(hrq, drq); 13945 lpfc_sli4_rq_release(hrq, drq);
@@ -14613,9 +14614,9 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14613 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 14614 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14614 if (!dmabuf) 14615 if (!dmabuf)
14615 goto out_fail; 14616 goto out_fail;
14616 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 14617 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14617 hw_page_size, &dmabuf->phys, 14618 hw_page_size, &dmabuf->phys,
14618 GFP_KERNEL); 14619 GFP_KERNEL);
14619 if (!dmabuf->virt) { 14620 if (!dmabuf->virt) {
14620 kfree(dmabuf); 14621 kfree(dmabuf);
14621 goto out_fail; 14622 goto out_fail;
@@ -14850,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14850 eq->entry_count); 14851 eq->entry_count);
14851 if (eq->entry_count < 256) 14852 if (eq->entry_count < 256)
14852 return -EINVAL; 14853 return -EINVAL;
14853 /* otherwise default to smallest count (drop through) */ 14854 /* fall through - otherwise default to smallest count */
14854 case 256: 14855 case 256:
14855 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14856 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14856 LPFC_EQ_CNT_256); 14857 LPFC_EQ_CNT_256);
@@ -14981,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14981 LPFC_CQ_CNT_WORD7); 14982 LPFC_CQ_CNT_WORD7);
14982 break; 14983 break;
14983 } 14984 }
14984 /* Fall Thru */ 14985 /* fall through */
14985 default: 14986 default:
14986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14987 "0361 Unsupported CQ count: " 14988 "0361 Unsupported CQ count: "
@@ -14992,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14992 status = -EINVAL; 14993 status = -EINVAL;
14993 goto out; 14994 goto out;
14994 } 14995 }
14995 /* otherwise default to smallest count (drop through) */ 14996 /* fall through - otherwise default to smallest count */
14996 case 256: 14997 case 256:
14997 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14998 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14998 LPFC_CQ_CNT_256); 14999 LPFC_CQ_CNT_256);
@@ -15152,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15152 LPFC_CQ_CNT_WORD7); 15153 LPFC_CQ_CNT_WORD7);
15153 break; 15154 break;
15154 } 15155 }
15155 /* Fall Thru */ 15156 /* fall through */
15156 default: 15157 default:
15157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15158 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15158 "3118 Bad CQ count. (%d)\n", 15159 "3118 Bad CQ count. (%d)\n",
@@ -15161,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15161 status = -EINVAL; 15162 status = -EINVAL;
15162 goto out; 15163 goto out;
15163 } 15164 }
15164 /* otherwise default to smallest (drop thru) */ 15165 /* fall through - otherwise default to smallest */
15165 case 256: 15166 case 256:
15166 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15167 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15167 &cq_set->u.request, LPFC_CQ_CNT_256); 15168 &cq_set->u.request, LPFC_CQ_CNT_256);
@@ -15433,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15433 status = -EINVAL; 15434 status = -EINVAL;
15434 goto out; 15435 goto out;
15435 } 15436 }
15436 /* otherwise default to smallest count (drop through) */ 15437 /* fall through - otherwise default to smallest count */
15437 case 16: 15438 case 16:
15438 bf_set(lpfc_mq_context_ring_size, 15439 bf_set(lpfc_mq_context_ring_size,
15439 &mq_create_ext->u.request.context, 15440 &mq_create_ext->u.request.context,
@@ -15852,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15852 status = -EINVAL; 15853 status = -EINVAL;
15853 goto out; 15854 goto out;
15854 } 15855 }
15855 /* otherwise default to smallest count (drop through) */ 15856 /* fall through - otherwise default to smallest count */
15856 case 512: 15857 case 512:
15857 bf_set(lpfc_rq_context_rqe_count, 15858 bf_set(lpfc_rq_context_rqe_count,
15858 &rq_create->u.request.context, 15859 &rq_create->u.request.context,
@@ -15989,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15989 status = -EINVAL; 15990 status = -EINVAL;
15990 goto out; 15991 goto out;
15991 } 15992 }
15992 /* otherwise default to smallest count (drop through) */ 15993 /* fall through - otherwise default to smallest count */
15993 case 512: 15994 case 512:
15994 bf_set(lpfc_rq_context_rqe_count, 15995 bf_set(lpfc_rq_context_rqe_count,
15995 &rq_create->u.request.context, 15996 &rq_create->u.request.context,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index e836392b75e8..f112458023ff 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -967,9 +967,10 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
967 * Allocate the common 16-byte aligned memory for the handshake 967 * Allocate the common 16-byte aligned memory for the handshake
968 * mailbox. 968 * mailbox.
969 */ 969 */
970 raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev, 970 raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev,
971 sizeof(mbox64_t), &raid_dev->una_mbox64_dma, 971 sizeof(mbox64_t),
972 GFP_KERNEL); 972 &raid_dev->una_mbox64_dma,
973 GFP_KERNEL);
973 974
974 if (!raid_dev->una_mbox64) { 975 if (!raid_dev->una_mbox64) {
975 con_log(CL_ANN, (KERN_WARNING 976 con_log(CL_ANN, (KERN_WARNING
@@ -995,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
995 align; 996 align;
996 997
997 // Allocate memory for commands issued internally 998 // Allocate memory for commands issued internally
998 adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, 999 adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
999 &adapter->ibuf_dma_h, GFP_KERNEL); 1000 &adapter->ibuf_dma_h, GFP_KERNEL);
1000 if (!adapter->ibuf) { 1001 if (!adapter->ibuf) {
1001 1002
1002 con_log(CL_ANN, (KERN_WARNING 1003 con_log(CL_ANN, (KERN_WARNING
@@ -2897,8 +2898,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
2897 * Issue an ENQUIRY3 command to find out certain adapter parameters, 2898 * Issue an ENQUIRY3 command to find out certain adapter parameters,
2898 * e.g., max channels, max commands etc. 2899 * e.g., max channels, max commands etc.
2899 */ 2900 */
2900 pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), 2901 pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
2901 &pinfo_dma_h, GFP_KERNEL); 2902 &pinfo_dma_h, GFP_KERNEL);
2902 if (pinfo == NULL) { 2903 if (pinfo == NULL) {
2903 con_log(CL_ANN, (KERN_WARNING 2904 con_log(CL_ANN, (KERN_WARNING
2904 "megaraid: out of memory, %s %d\n", __func__, 2905 "megaraid: out of memory, %s %d\n", __func__,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f7bdd783360a..fcbff83c0097 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2273,9 +2273,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2273 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2273 sizeof(struct MR_LD_VF_AFFILIATION_111));
2274 else { 2274 else {
2275 new_affiliation_111 = 2275 new_affiliation_111 =
2276 dma_zalloc_coherent(&instance->pdev->dev, 2276 dma_alloc_coherent(&instance->pdev->dev,
2277 sizeof(struct MR_LD_VF_AFFILIATION_111), 2277 sizeof(struct MR_LD_VF_AFFILIATION_111),
2278 &new_affiliation_111_h, GFP_KERNEL); 2278 &new_affiliation_111_h, GFP_KERNEL);
2279 if (!new_affiliation_111) { 2279 if (!new_affiliation_111) {
2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2281 "memory for new affiliation for scsi%d\n", 2281 "memory for new affiliation for scsi%d\n",
@@ -2380,10 +2380,9 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2380 sizeof(struct MR_LD_VF_AFFILIATION)); 2380 sizeof(struct MR_LD_VF_AFFILIATION));
2381 else { 2381 else {
2382 new_affiliation = 2382 new_affiliation =
2383 dma_zalloc_coherent(&instance->pdev->dev, 2383 dma_alloc_coherent(&instance->pdev->dev,
2384 (MAX_LOGICAL_DRIVES + 1) * 2384 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2385 sizeof(struct MR_LD_VF_AFFILIATION), 2385 &new_affiliation_h, GFP_KERNEL);
2386 &new_affiliation_h, GFP_KERNEL);
2387 if (!new_affiliation) { 2386 if (!new_affiliation) {
2388 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2387 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2389 "memory for new affiliation for scsi%d\n", 2388 "memory for new affiliation for scsi%d\n",
@@ -2546,9 +2545,10 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2546 2545
2547 if (initial) { 2546 if (initial) {
2548 instance->hb_host_mem = 2547 instance->hb_host_mem =
2549 dma_zalloc_coherent(&instance->pdev->dev, 2548 dma_alloc_coherent(&instance->pdev->dev,
2550 sizeof(struct MR_CTRL_HB_HOST_MEM), 2549 sizeof(struct MR_CTRL_HB_HOST_MEM),
2551 &instance->hb_host_mem_h, GFP_KERNEL); 2550 &instance->hb_host_mem_h,
2551 GFP_KERNEL);
2552 if (!instance->hb_host_mem) { 2552 if (!instance->hb_host_mem) {
2553 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2553 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2554 " memory for heartbeat host memory for scsi%d\n", 2554 " memory for heartbeat host memory for scsi%d\n",
@@ -5816,9 +5816,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
5816 } 5816 }
5817 5817
5818 dcmd = &cmd->frame->dcmd; 5818 dcmd = &cmd->frame->dcmd;
5819 el_info = dma_zalloc_coherent(&instance->pdev->dev, 5819 el_info = dma_alloc_coherent(&instance->pdev->dev,
5820 sizeof(struct megasas_evt_log_info), &el_info_h, 5820 sizeof(struct megasas_evt_log_info),
5821 GFP_KERNEL); 5821 &el_info_h, GFP_KERNEL);
5822 if (!el_info) { 5822 if (!el_info) {
5823 megasas_return_cmd(instance, cmd); 5823 megasas_return_cmd(instance, cmd);
5824 return -ENOMEM; 5824 return -ENOMEM;
@@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
6236 instance->consistent_mask_64bit = true; 6236 instance->consistent_mask_64bit = true;
6237 6237
6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6239 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), 6239 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6240 (instance->consistent_mask_64bit ? "63" : "32")); 6240 (instance->consistent_mask_64bit ? "63" : "32"));
6241 6241
6242 return 0; 6242 return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 211c17c33aa0..647f48a28f85 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance)
175 /* 175 /*
176 * Check if it is our interrupt 176 * Check if it is our interrupt
177 */ 177 */
178 status = readl(&regs->outbound_intr_status); 178 status = megasas_readl(instance,
179 &regs->outbound_intr_status);
179 180
180 if (status & 1) { 181 if (status & 1) {
181 writel(status, &regs->outbound_intr_status); 182 writel(status, &regs->outbound_intr_status);
@@ -689,8 +690,9 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
689 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 690 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
690 MAX_MSIX_QUEUES_FUSION; 691 MAX_MSIX_QUEUES_FUSION;
691 692
692 fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev, 693 fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
693 array_size, &fusion->rdpq_phys, GFP_KERNEL); 694 array_size, &fusion->rdpq_phys,
695 GFP_KERNEL);
694 if (!fusion->rdpq_virt) { 696 if (!fusion->rdpq_virt) {
695 dev_err(&instance->pdev->dev, 697 dev_err(&instance->pdev->dev,
696 "Failed from %s %d\n", __func__, __LINE__); 698 "Failed from %s %d\n", __func__, __LINE__);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index f3e182eb0970..c9dc7740e9e7 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,9 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1915 /* We use the PCI APIs for now until the generic one gets fixed 1915 /* We use the PCI APIs for now until the generic one gets fixed
1916 * enough or until we get some macio-specific versions 1916 * enough or until we get some macio-specific versions
1917 */ 1917 */
1918 dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev, 1918 dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
1919 ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL); 1919 ms->dma_cmd_size, &dma_cmd_bus,
1920 GFP_KERNEL);
1920 if (dma_cmd_space == NULL) { 1921 if (dma_cmd_space == NULL) {
1921 printk(KERN_ERR "mesh: can't allocate DMA table\n"); 1922 printk(KERN_ERR "mesh: can't allocate DMA table\n");
1922 goto out_unmap; 1923 goto out_unmap;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index dbe753fba486..36f64205ecfa 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,9 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
143 143
144 case RESOURCE_UNCACHED_MEMORY: 144 case RESOURCE_UNCACHED_MEMORY:
145 size = round_up(size, 8); 145 size = round_up(size, 8);
146 res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, 146 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
147 &res->bus_addr, GFP_KERNEL); 147 &res->bus_addr,
148 GFP_KERNEL);
148 if (!res->virt_addr) { 149 if (!res->virt_addr) {
149 dev_err(&mhba->pdev->dev, 150 dev_err(&mhba->pdev->dev,
150 "unable to allocate consistent mem," 151 "unable to allocate consistent mem,"
@@ -246,8 +247,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
246 if (size == 0) 247 if (size == 0)
247 return 0; 248 return 0;
248 249
249 virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, 250 virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
250 GFP_KERNEL); 251 GFP_KERNEL);
251 if (!virt_addr) 252 if (!virt_addr)
252 return -1; 253 return -1;
253 254
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b3be49d41375..084f2fcced0a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
116 u64 align_offset = 0; 116 u64 align_offset = 0;
117 if (align) 117 if (align)
118 align_offset = (dma_addr_t)align - 1; 118 align_offset = (dma_addr_t)align - 1;
119 mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align, 119 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
120 &mem_dma_handle, GFP_KERNEL); 120 &mem_dma_handle, GFP_KERNEL);
121 if (!mem_virt_alloc) { 121 if (!mem_virt_alloc) {
122 pm8001_printk("memory allocation error\n"); 122 pm8001_printk("memory allocation error\n");
123 return -1; 123 return -1;
@@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
657 if (dev->dev_type == SAS_SATA_DEV) { 657 if (dev->dev_type == SAS_SATA_DEV) {
658 pm8001_device->attached_phy = 658 pm8001_device->attached_phy =
659 dev->rphy->identify.phy_identifier; 659 dev->rphy->identify.phy_identifier;
660 flag = 1; /* directly sata*/ 660 flag = 1; /* directly sata */
661 } 661 }
662 } /*register this device to HBA*/ 662 } /*register this device to HBA*/
663 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); 663 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index edcaf4b0cb0b..9bbc19fc190b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1050,16 +1050,17 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1050 sizeof(void *); 1050 sizeof(void *);
1051 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; 1051 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1052 1052
1053 fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, 1053 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1054 fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); 1054 &fcport->sq_dma, GFP_KERNEL);
1055 if (!fcport->sq) { 1055 if (!fcport->sq) {
1056 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); 1056 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1057 rval = 1; 1057 rval = 1;
1058 goto out; 1058 goto out;
1059 } 1059 }
1060 1060
1061 fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, 1061 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1062 fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); 1062 fcport->sq_pbl_size,
1063 &fcport->sq_pbl_dma, GFP_KERNEL);
1063 if (!fcport->sq_pbl) { 1064 if (!fcport->sq_pbl) {
1064 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); 1065 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1065 rval = 1; 1066 rval = 1;
@@ -2680,8 +2681,10 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2680 } 2681 }
2681 2682
2682 /* Allocate list of PBL pages */ 2683 /* Allocate list of PBL pages */
2683 qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, 2684 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
2684 QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); 2685 QEDF_PAGE_SIZE,
2686 &qedf->bdq_pbl_list_dma,
2687 GFP_KERNEL);
2685 if (!qedf->bdq_pbl_list) { 2688 if (!qedf->bdq_pbl_list) {
2686 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); 2689 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
2687 return -ENOMEM; 2690 return -ENOMEM;
@@ -2770,9 +2773,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2770 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); 2773 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
2771 2774
2772 qedf->global_queues[i]->cq = 2775 qedf->global_queues[i]->cq =
2773 dma_zalloc_coherent(&qedf->pdev->dev, 2776 dma_alloc_coherent(&qedf->pdev->dev,
2774 qedf->global_queues[i]->cq_mem_size, 2777 qedf->global_queues[i]->cq_mem_size,
2775 &qedf->global_queues[i]->cq_dma, GFP_KERNEL); 2778 &qedf->global_queues[i]->cq_dma,
2779 GFP_KERNEL);
2776 2780
2777 if (!qedf->global_queues[i]->cq) { 2781 if (!qedf->global_queues[i]->cq) {
2778 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); 2782 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
@@ -2781,9 +2785,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2781 } 2785 }
2782 2786
2783 qedf->global_queues[i]->cq_pbl = 2787 qedf->global_queues[i]->cq_pbl =
2784 dma_zalloc_coherent(&qedf->pdev->dev, 2788 dma_alloc_coherent(&qedf->pdev->dev,
2785 qedf->global_queues[i]->cq_pbl_size, 2789 qedf->global_queues[i]->cq_pbl_size,
2786 &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); 2790 &qedf->global_queues[i]->cq_pbl_dma,
2791 GFP_KERNEL);
2787 2792
2788 if (!qedf->global_queues[i]->cq_pbl) { 2793 if (!qedf->global_queues[i]->cq_pbl) {
2789 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); 2794 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 4da660c1c431..6d6d6013e35b 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
953 953
954 qedi_ep = ep->dd_data; 954 qedi_ep = ep->dd_data;
955 if (qedi_ep->state == EP_STATE_IDLE || 955 if (qedi_ep->state == EP_STATE_IDLE ||
956 qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
956 qedi_ep->state == EP_STATE_OFLDCONN_FAILED) 957 qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
957 return -1; 958 return -1;
958 959
@@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1035 1036
1036 switch (qedi_ep->state) { 1037 switch (qedi_ep->state) {
1037 case EP_STATE_OFLDCONN_START: 1038 case EP_STATE_OFLDCONN_START:
1039 case EP_STATE_OFLDCONN_NONE:
1038 goto ep_release_conn; 1040 goto ep_release_conn;
1039 case EP_STATE_OFLDCONN_FAILED: 1041 case EP_STATE_OFLDCONN_FAILED:
1040 break; 1042 break;
@@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1225 1227
1226 if (!is_valid_ether_addr(&path_data->mac_addr[0])) { 1228 if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
1227 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); 1229 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
1230 qedi_ep->state = EP_STATE_OFLDCONN_NONE;
1228 ret = -EIO; 1231 ret = -EIO;
1229 goto set_path_exit; 1232 goto set_path_exit;
1230 } 1233 }
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 11260776212f..892d70d54553 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -59,6 +59,7 @@ enum {
59 EP_STATE_OFLDCONN_FAILED = 0x2000, 59 EP_STATE_OFLDCONN_FAILED = 0x2000,
60 EP_STATE_CONNECT_FAILED = 0x4000, 60 EP_STATE_CONNECT_FAILED = 0x4000,
61 EP_STATE_DISCONN_TIMEDOUT = 0x8000, 61 EP_STATE_DISCONN_TIMEDOUT = 0x8000,
62 EP_STATE_OFLDCONN_NONE = 0x10000,
62}; 63};
63 64
64struct qedi_conn; 65struct qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5c53409a8cea..e74a62448ba4 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1394,10 +1394,9 @@ static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1394{ 1394{
1395 struct qedi_nvm_iscsi_image nvm_image; 1395 struct qedi_nvm_iscsi_image nvm_image;
1396 1396
1397 qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, 1397 qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
1398 sizeof(nvm_image), 1398 sizeof(nvm_image),
1399 &qedi->nvm_buf_dma, 1399 &qedi->nvm_buf_dma, GFP_KERNEL);
1400 GFP_KERNEL);
1401 if (!qedi->iscsi_image) { 1400 if (!qedi->iscsi_image) {
1402 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1401 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
1403 return -ENOMEM; 1402 return -ENOMEM;
@@ -1510,10 +1509,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1510 } 1509 }
1511 1510
1512 /* Allocate list of PBL pages */ 1511 /* Allocate list of PBL pages */
1513 qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, 1512 qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
1514 QEDI_PAGE_SIZE, 1513 QEDI_PAGE_SIZE,
1515 &qedi->bdq_pbl_list_dma, 1514 &qedi->bdq_pbl_list_dma,
1516 GFP_KERNEL); 1515 GFP_KERNEL);
1517 if (!qedi->bdq_pbl_list) { 1516 if (!qedi->bdq_pbl_list) {
1518 QEDI_ERR(&qedi->dbg_ctx, 1517 QEDI_ERR(&qedi->dbg_ctx,
1519 "Could not allocate list of PBL pages.\n"); 1518 "Could not allocate list of PBL pages.\n");
@@ -1609,10 +1608,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1609 (qedi->global_queues[i]->cq_pbl_size + 1608 (qedi->global_queues[i]->cq_pbl_size +
1610 (QEDI_PAGE_SIZE - 1)); 1609 (QEDI_PAGE_SIZE - 1));
1611 1610
1612 qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev, 1611 qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev,
1613 qedi->global_queues[i]->cq_mem_size, 1612 qedi->global_queues[i]->cq_mem_size,
1614 &qedi->global_queues[i]->cq_dma, 1613 &qedi->global_queues[i]->cq_dma,
1615 GFP_KERNEL); 1614 GFP_KERNEL);
1616 1615
1617 if (!qedi->global_queues[i]->cq) { 1616 if (!qedi->global_queues[i]->cq) {
1618 QEDI_WARN(&qedi->dbg_ctx, 1617 QEDI_WARN(&qedi->dbg_ctx,
@@ -1620,10 +1619,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1620 status = -ENOMEM; 1619 status = -ENOMEM;
1621 goto mem_alloc_failure; 1620 goto mem_alloc_failure;
1622 } 1621 }
1623 qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, 1622 qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
1624 qedi->global_queues[i]->cq_pbl_size, 1623 qedi->global_queues[i]->cq_pbl_size,
1625 &qedi->global_queues[i]->cq_pbl_dma, 1624 &qedi->global_queues[i]->cq_pbl_dma,
1626 GFP_KERNEL); 1625 GFP_KERNEL);
1627 1626
1628 if (!qedi->global_queues[i]->cq_pbl) { 1627 if (!qedi->global_queues[i]->cq_pbl) {
1629 QEDI_WARN(&qedi->dbg_ctx, 1628 QEDI_WARN(&qedi->dbg_ctx,
@@ -1691,16 +1690,16 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
1691 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); 1690 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
1692 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; 1691 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
1693 1692
1694 ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1693 ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
1695 &ep->sq_dma, GFP_KERNEL); 1694 &ep->sq_dma, GFP_KERNEL);
1696 if (!ep->sq) { 1695 if (!ep->sq) {
1697 QEDI_WARN(&qedi->dbg_ctx, 1696 QEDI_WARN(&qedi->dbg_ctx,
1698 "Could not allocate send queue.\n"); 1697 "Could not allocate send queue.\n");
1699 rval = -ENOMEM; 1698 rval = -ENOMEM;
1700 goto out; 1699 goto out;
1701 } 1700 }
1702 ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, 1701 ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
1703 &ep->sq_pbl_dma, GFP_KERNEL); 1702 &ep->sq_pbl_dma, GFP_KERNEL);
1704 if (!ep->sq_pbl) { 1703 if (!ep->sq_pbl) {
1705 QEDI_WARN(&qedi->dbg_ctx, 1704 QEDI_WARN(&qedi->dbg_ctx,
1706 "Could not allocate send queue PBL.\n"); 1705 "Could not allocate send queue PBL.\n");
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index a414f51302b7..6856dfdfa473 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4248 ha->devnum = devnum; /* specifies microcode load address */ 4248 ha->devnum = devnum; /* specifies microcode load address */
4249 4249
4250#ifdef QLA_64BIT_PTR 4250#ifdef QLA_64BIT_PTR
4251 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 4251 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
4252 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { 4252 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4253 printk(KERN_WARNING "scsi(%li): Unable to set a " 4253 printk(KERN_WARNING "scsi(%li): Unable to set a "
4254 "suitable DMA mask - aborting\n", ha->host_no); 4254 "suitable DMA mask - aborting\n", ha->host_no);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 00444dc79756..ac504a1ff0ff 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2415,8 +2415,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2415 if (qla2x00_chip_is_down(vha)) 2415 if (qla2x00_chip_is_down(vha))
2416 goto done; 2416 goto done;
2417 2417
2418 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2418 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2419 &stats_dma, GFP_KERNEL); 2419 GFP_KERNEL);
2420 if (!stats) { 2420 if (!stats) {
2421 ql_log(ql_log_warn, vha, 0x707d, 2421 ql_log(ql_log_warn, vha, 0x707d,
2422 "Failed to allocate memory for stats.\n"); 2422 "Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4a9fd8d944d6..17d42658ad9a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2312,8 +2312,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2312 if (!IS_FWI2_CAPABLE(ha)) 2312 if (!IS_FWI2_CAPABLE(ha))
2313 return -EPERM; 2313 return -EPERM;
2314 2314
2315 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2315 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2316 &stats_dma, GFP_KERNEL); 2316 GFP_KERNEL);
2317 if (!stats) { 2317 if (!stats) {
2318 ql_log(ql_log_warn, vha, 0x70e2, 2318 ql_log(ql_log_warn, vha, 0x70e2,
2319 "Failed to allocate memory for stats.\n"); 2319 "Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 26b93c563f92..d1fc4958222a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host {
4394 uint16_t n2n_id; 4394 uint16_t n2n_id;
4395 struct list_head gpnid_list; 4395 struct list_head gpnid_list;
4396 struct fab_scan scan; 4396 struct fab_scan scan;
4397
4398 unsigned int irq_offset;
4397} scsi_qla_host_t; 4399} scsi_qla_host_t;
4398 4400
4399struct qla27xx_image_status { 4401struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 90cfa394f942..cbc3bc49d4d1 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -4147,9 +4147,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4147 return rval; 4147 return rval;
4148 } 4148 }
4149 4149
4150 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( 4150 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4151 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), 4151 sizeof(struct ct_sns_pkt),
4152 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); 4152 &sp->u.iocb_cmd.u.ctarg.req_dma,
4153 GFP_KERNEL);
4153 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 4154 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4154 if (!sp->u.iocb_cmd.u.ctarg.req) { 4155 if (!sp->u.iocb_cmd.u.ctarg.req) {
4155 ql_log(ql_log_warn, vha, 0xffff, 4156 ql_log(ql_log_warn, vha, 0xffff,
@@ -4165,9 +4166,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4165 ((vha->hw->max_fibre_devices - 1) * 4166 ((vha->hw->max_fibre_devices - 1) *
4166 sizeof(struct ct_sns_gpn_ft_data)); 4167 sizeof(struct ct_sns_gpn_ft_data));
4167 4168
4168 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( 4169 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4169 &vha->hw->pdev->dev, rspsz, 4170 rspsz,
4170 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); 4171 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4172 GFP_KERNEL);
4171 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 4173 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
4172 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4174 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4173 ql_log(ql_log_warn, vha, 0xffff, 4175 ql_log(ql_log_warn, vha, 0xffff,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 364bb52ed2a6..aeeb0144bd55 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -3099,8 +3099,8 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3099 FCE_SIZE, ha->fce, ha->fce_dma); 3099 FCE_SIZE, ha->fce, ha->fce_dma);
3100 3100
3101 /* Allocate memory for Fibre Channel Event Buffer. */ 3101 /* Allocate memory for Fibre Channel Event Buffer. */
3102 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3102 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3103 GFP_KERNEL); 3103 GFP_KERNEL);
3104 if (!tc) { 3104 if (!tc) {
3105 ql_log(ql_log_warn, vha, 0x00be, 3105 ql_log(ql_log_warn, vha, 0x00be,
3106 "Unable to allocate (%d KB) for FCE.\n", 3106 "Unable to allocate (%d KB) for FCE.\n",
@@ -3131,8 +3131,8 @@ try_eft:
3131 EFT_SIZE, ha->eft, ha->eft_dma); 3131 EFT_SIZE, ha->eft, ha->eft_dma);
3132 3132
3133 /* Allocate memory for Extended Trace Buffer. */ 3133 /* Allocate memory for Extended Trace Buffer. */
3134 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3134 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3135 GFP_KERNEL); 3135 GFP_KERNEL);
3136 if (!tc) { 3136 if (!tc) {
3137 ql_log(ql_log_warn, vha, 0x00c1, 3137 ql_log(ql_log_warn, vha, 0x00c1,
3138 "Unable to allocate (%d KB) for EFT.\n", 3138 "Unable to allocate (%d KB) for EFT.\n",
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 30d3090842f8..8507c43b918c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3446 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3446 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3447 } 3447 }
3448 } 3448 }
3449 vha->irq_offset = desc.pre_vectors;
3449 ha->msix_entries = kcalloc(ha->msix_count, 3450 ha->msix_entries = kcalloc(ha->msix_count,
3450 sizeof(struct qla_msix_entry), 3451 sizeof(struct qla_msix_entry),
3451 GFP_KERNEL); 3452 GFP_KERNEL);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ea69dafc9774..c6ef83d0d99b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
6939 if (USER_CTRL_IRQ(vha->hw)) 6939 if (USER_CTRL_IRQ(vha->hw))
6940 rc = blk_mq_map_queues(qmap); 6940 rc = blk_mq_map_queues(qmap);
6941 else 6941 else
6942 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); 6942 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
6943 return rc; 6943 return rc;
6944} 6944}
6945 6945
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1ef74aa2d00a..2bf5e3e639e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,8 +153,8 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
153 dma_addr_t sys_info_dma; 153 dma_addr_t sys_info_dma;
154 int status = QLA_ERROR; 154 int status = QLA_ERROR;
155 155
156 sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 156 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
157 &sys_info_dma, GFP_KERNEL); 157 &sys_info_dma, GFP_KERNEL);
158 if (sys_info == NULL) { 158 if (sys_info == NULL) {
159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
160 ha->host_no, __func__)); 160 ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 5d56904687b9..dac9a7013208 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,9 +625,9 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
625 uint32_t mbox_sts[MBOX_REG_COUNT]; 625 uint32_t mbox_sts[MBOX_REG_COUNT];
626 int status = QLA_ERROR; 626 int status = QLA_ERROR;
627 627
628 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 628 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
629 sizeof(struct addr_ctrl_blk), 629 sizeof(struct addr_ctrl_blk),
630 &init_fw_cb_dma, GFP_KERNEL); 630 &init_fw_cb_dma, GFP_KERNEL);
631 if (init_fw_cb == NULL) { 631 if (init_fw_cb == NULL) {
632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", 632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
633 ha->host_no, __func__)); 633 ha->host_no, __func__));
@@ -709,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
709 uint32_t mbox_cmd[MBOX_REG_COUNT]; 709 uint32_t mbox_cmd[MBOX_REG_COUNT];
710 uint32_t mbox_sts[MBOX_REG_COUNT]; 710 uint32_t mbox_sts[MBOX_REG_COUNT];
711 711
712 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 712 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
713 sizeof(struct addr_ctrl_blk), 713 sizeof(struct addr_ctrl_blk),
714 &init_fw_cb_dma, GFP_KERNEL); 714 &init_fw_cb_dma, GFP_KERNEL);
715 if (init_fw_cb == NULL) { 715 if (init_fw_cb == NULL) {
716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, 716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
717 __func__); 717 __func__);
@@ -1340,9 +1340,9 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
1340 uint32_t mbox_sts[MBOX_REG_COUNT]; 1340 uint32_t mbox_sts[MBOX_REG_COUNT];
1341 int status = QLA_ERROR; 1341 int status = QLA_ERROR;
1342 1342
1343 about_fw = dma_zalloc_coherent(&ha->pdev->dev, 1343 about_fw = dma_alloc_coherent(&ha->pdev->dev,
1344 sizeof(struct about_fw_info), 1344 sizeof(struct about_fw_info),
1345 &about_fw_dma, GFP_KERNEL); 1345 &about_fw_dma, GFP_KERNEL);
1346 if (!about_fw) { 1346 if (!about_fw) {
1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " 1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
1348 "for about_fw\n", __func__)); 1348 "for about_fw\n", __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d2b333d629be..5a31877c9d04 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4052,8 +4052,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
4052 dma_addr_t sys_info_dma; 4052 dma_addr_t sys_info_dma;
4053 int status = QLA_ERROR; 4053 int status = QLA_ERROR;
4054 4054
4055 sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 4055 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
4056 &sys_info_dma, GFP_KERNEL); 4056 &sys_info_dma, GFP_KERNEL);
4057 if (sys_info == NULL) { 4057 if (sys_info == NULL) {
4058 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 4058 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
4059 ha->host_no, __func__)); 4059 ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 949e186cc5d7..a77bfb224248 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2704,9 +2704,9 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
2704 uint32_t rem = len; 2704 uint32_t rem = len;
2705 struct nlattr *attr; 2705 struct nlattr *attr;
2706 2706
2707 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 2707 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
2708 sizeof(struct addr_ctrl_blk), 2708 sizeof(struct addr_ctrl_blk),
2709 &init_fw_cb_dma, GFP_KERNEL); 2709 &init_fw_cb_dma, GFP_KERNEL);
2710 if (!init_fw_cb) { 2710 if (!init_fw_cb) {
2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
2712 __func__); 2712 __func__);
@@ -4206,8 +4206,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
4206 sizeof(struct shadow_regs) + 4206 sizeof(struct shadow_regs) +
4207 MEM_ALIGN_VALUE + 4207 MEM_ALIGN_VALUE +
4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4209 ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len, 4209 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
4210 &ha->queues_dma, GFP_KERNEL); 4210 &ha->queues_dma, GFP_KERNEL);
4211 if (ha->queues == NULL) { 4211 if (ha->queues == NULL) {
4212 ql4_printk(KERN_WARNING, ha, 4212 ql4_printk(KERN_WARNING, ha,
4213 "Memory Allocation failed - queues.\n"); 4213 "Memory Allocation failed - queues.\n");
@@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
7232 7232
7233 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7233 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
7234 fw_ddb_entry); 7234 fw_ddb_entry);
7235 if (rc)
7236 goto free_sess;
7235 7237
7236 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7238 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7237 __func__, fnode_sess->dev.kobj.name); 7239 __func__, fnode_sess->dev.kobj.name);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index a2b4179bfdf7..7639df91b110 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
80 80
81 if (err == 0) { 81 if (err == 0) {
82 pm_runtime_disable(dev); 82 pm_runtime_disable(dev);
83 pm_runtime_set_active(dev); 83 err = pm_runtime_set_active(dev);
84 pm_runtime_enable(dev); 84 pm_runtime_enable(dev);
85
86 /*
87 * Forcibly set runtime PM status of request queue to "active"
88 * to make sure we can again get requests from the queue
89 * (see also blk_pm_peek_request()).
90 *
91 * The resume hook will correct runtime PM status of the disk.
92 */
93 if (!err && scsi_is_sdev_device(dev)) {
94 struct scsi_device *sdev = to_scsi_device(dev);
95
96 if (sdev->request_queue->dev)
97 blk_set_runtime_active(sdev->request_queue);
98 }
85 } 99 }
86 100
87 return err; 101 return err;
@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev,
140 else 154 else
141 fn = NULL; 155 fn = NULL;
142 156
143 /*
144 * Forcibly set runtime PM status of request queue to "active" to
145 * make sure we can again get requests from the queue (see also
146 * blk_pm_peek_request()).
147 *
148 * The resume hook will correct runtime PM status of the disk.
149 */
150 if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
151 blk_set_runtime_active(to_scsi_device(dev)->request_queue);
152
153 if (fn) { 157 if (fn) {
154 async_schedule_domain(fn, dev, &scsi_sd_pm_domain); 158 async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
155 159
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a1a44f52e0e8..b2da8a00ec33 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
206 sp = buffer_data[0] & 0x80 ? 1 : 0; 206 sp = buffer_data[0] & 0x80 ? 1 : 0;
207 buffer_data[0] &= ~0x80; 207 buffer_data[0] &= ~0x80;
208 208
209 /*
210 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
211 * received mode parameter buffer before doing MODE SELECT.
212 */
213 data.device_specific = 0;
214
209 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 215 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
210 SD_MAX_RETRIES, &data, &sshdr)) { 216 SD_MAX_RETRIES, &data, &sshdr)) {
211 if (scsi_sense_valid(&sshdr)) 217 if (scsi_sense_valid(&sshdr))
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index e2fa3f476227..f564af8949e8 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
323static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, 323static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
324 struct pqi_scsi_dev *device) 324 struct pqi_scsi_dev *device)
325{ 325{
326 return device->in_remove & !ctrl_info->in_shutdown; 326 return device->in_remove && !ctrl_info->in_shutdown;
327} 327}
328 328
329static inline void pqi_schedule_rescan_worker_with_delay( 329static inline void pqi_schedule_rescan_worker_with_delay(
@@ -3576,9 +3576,9 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3576 alloc_length += PQI_EXTRA_SGL_MEMORY; 3576 alloc_length += PQI_EXTRA_SGL_MEMORY;
3577 3577
3578 ctrl_info->queue_memory_base = 3578 ctrl_info->queue_memory_base =
3579 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3579 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3580 alloc_length, 3580 &ctrl_info->queue_memory_base_dma_handle,
3581 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3581 GFP_KERNEL);
3582 3582
3583 if (!ctrl_info->queue_memory_base) 3583 if (!ctrl_info->queue_memory_base)
3584 return -ENOMEM; 3584 return -ENOMEM;
@@ -3715,10 +3715,9 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3716 3716
3717 ctrl_info->admin_queue_memory_base = 3717 ctrl_info->admin_queue_memory_base =
3718 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3718 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3719 alloc_length, 3719 &ctrl_info->admin_queue_memory_base_dma_handle,
3720 &ctrl_info->admin_queue_memory_base_dma_handle, 3720 GFP_KERNEL);
3721 GFP_KERNEL);
3722 3721
3723 if (!ctrl_info->admin_queue_memory_base) 3722 if (!ctrl_info->admin_queue_memory_base)
3724 return -ENOMEM; 3723 return -ENOMEM;
@@ -4602,9 +4601,10 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4602 4601
4603static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4602static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4604{ 4603{
4605 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4604 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4606 ctrl_info->error_buffer_length, 4605 ctrl_info->error_buffer_length,
4607 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4606 &ctrl_info->error_buffer_dma_handle,
4607 GFP_KERNEL);
4608 4608
4609 if (!ctrl_info->error_buffer) 4609 if (!ctrl_info->error_buffer)
4610 return -ENOMEM; 4610 return -ENOMEM;
@@ -7487,8 +7487,8 @@ static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7487 dma_addr_t dma_handle; 7487 dma_addr_t dma_handle;
7488 7488
7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7490 dma_zalloc_coherent(dev, chunk_size, &dma_handle, 7490 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7491 GFP_KERNEL); 7491 GFP_KERNEL);
7492 7492
7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7494 break; 7494 break;
@@ -7545,10 +7545,10 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7545 struct device *dev; 7545 struct device *dev;
7546 7546
7547 dev = &ctrl_info->pci_dev->dev; 7547 dev = &ctrl_info->pci_dev->dev;
7548 pqi_ofa_memory = dma_zalloc_coherent(dev, 7548 pqi_ofa_memory = dma_alloc_coherent(dev,
7549 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7549 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7550 &ctrl_info->pqi_ofa_mem_dma_handle, 7550 &ctrl_info->pqi_ofa_mem_dma_handle,
7551 GFP_KERNEL); 7551 GFP_KERNEL);
7552 7552
7553 if (!pqi_ofa_memory) 7553 if (!pqi_ofa_memory)
7554 return; 7554 return;
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index dd65fea07687..6d176815e6ce 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
195 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, 195 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
196 QUERY_DESC_UNIT_DEF_SIZE = 0x23, 196 QUERY_DESC_UNIT_DEF_SIZE = 0x23,
197 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, 197 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
198 QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, 198 QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
199 QUERY_DESC_POWER_DEF_SIZE = 0x62, 199 QUERY_DESC_POWER_DEF_SIZE = 0x62,
200 QUERY_DESC_HEALTH_DEF_SIZE = 0x25, 200 QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
201}; 201};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9ba7671b84f8..71334aaf1447 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8001,6 +8001,8 @@ out:
8001 trace_ufshcd_system_resume(dev_name(hba->dev), ret, 8001 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8002 ktime_to_us(ktime_sub(ktime_get(), start)), 8002 ktime_to_us(ktime_sub(ktime_get(), start)),
8003 hba->curr_dev_pwr_mode, hba->uic_link_state); 8003 hba->curr_dev_pwr_mode, hba->uic_link_state);
8004 if (!ret)
8005 hba->is_sys_suspended = false;
8004 return ret; 8006 return ret;
8005} 8007}
8006EXPORT_SYMBOL(ufshcd_system_resume); 8008EXPORT_SYMBOL(ufshcd_system_resume);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 9436aa83ff1b..e6d48dccb8d5 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -62,7 +62,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
62 return -ENODEV; 62 return -ENODEV;
63 } 63 }
64 64
65 if (!dma_zalloc_coherent(dev, *size, addr, 0)) { 65 if (!dma_alloc_coherent(dev, *size, addr, 0)) {
66 dev_err(dev, "DMA Alloc memory failed\n"); 66 dev_err(dev, "DMA Alloc memory failed\n");
67 return -ENODEV; 67 return -ENODEV;
68 } 68 }
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index f78c34647ca2..76480df195a8 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
44 const char *sprop; 44 const char *sprop;
45 int ret = 0; 45 int ret = 0;
46 u32 val; 46 u32 val;
47 struct resource *res;
48 struct device_node *np2;
49 static int siram_init_flag;
50 struct platform_device *pdev;
51 47
52 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); 48 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
53 if (sprop) { 49 if (sprop) {
@@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
124 utdm->siram_entry_id = val; 120 utdm->siram_entry_id = val;
125 121
126 set_si_param(utdm, ut_info); 122 set_si_param(utdm, ut_info);
127
128 np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
129 if (!np2)
130 return -EINVAL;
131
132 pdev = of_find_device_by_node(np2);
133 if (!pdev) {
134 pr_err("%pOFn: failed to lookup pdev\n", np2);
135 of_node_put(np2);
136 return -EINVAL;
137 }
138
139 of_node_put(np2);
140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
141 utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
142 if (IS_ERR(utdm->si_regs)) {
143 ret = PTR_ERR(utdm->si_regs);
144 goto err_miss_siram_property;
145 }
146
147 np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
148 if (!np2) {
149 ret = -EINVAL;
150 goto err_miss_siram_property;
151 }
152
153 pdev = of_find_device_by_node(np2);
154 if (!pdev) {
155 ret = -EINVAL;
156 pr_err("%pOFn: failed to lookup pdev\n", np2);
157 of_node_put(np2);
158 goto err_miss_siram_property;
159 }
160
161 of_node_put(np2);
162 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
163 utdm->siram = devm_ioremap_resource(&pdev->dev, res);
164 if (IS_ERR(utdm->siram)) {
165 ret = PTR_ERR(utdm->siram);
166 goto err_miss_siram_property;
167 }
168
169 if (siram_init_flag == 0) {
170 memset_io(utdm->siram, 0, resource_size(res));
171 siram_init_flag = 1;
172 }
173
174 return ret;
175
176err_miss_siram_property:
177 devm_iounmap(&pdev->dev, utdm->si_regs);
178 return ret; 123 return ret;
179} 124}
180EXPORT_SYMBOL(ucc_of_parse_tdm); 125EXPORT_SYMBOL(ucc_of_parse_tdm);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 4d8012e1205c..68bfca6f20dd 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -44,7 +44,7 @@ config ARCH_RZN1
44 bool 44 bool
45 select ARM_AMBA 45 select ARM_AMBA
46 46
47if ARM 47if ARM && ARCH_RENESAS
48 48
49#comment "Renesas ARM SoCs System Type" 49#comment "Renesas ARM SoCs System Type"
50 50
diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c
index e1ac4c0f6640..11050e17ea81 100644
--- a/drivers/soc/renesas/r8a774c0-sysc.c
+++ b/drivers/soc/renesas/r8a774c0-sysc.c
@@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a774c0_areas[] __initdata = {
28 { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A }, 28 { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A },
29}; 29};
30 30
31static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas,
32 unsigned int num_areas, u8 id,
33 int new_parent)
34{
35 unsigned int i;
36
37 for (i = 0; i < num_areas; i++)
38 if (areas[i].isr_bit == id) {
39 areas[i].parent = new_parent;
40 return;
41 }
42}
43
44/* Fixups for RZ/G2E ES1.0 revision */ 31/* Fixups for RZ/G2E ES1.0 revision */
45static const struct soc_device_attribute r8a774c0[] __initconst = { 32static const struct soc_device_attribute r8a774c0[] __initconst = {
46 { .soc_id = "r8a774c0", .revision = "ES1.0" }, 33 { .soc_id = "r8a774c0", .revision = "ES1.0" },
@@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a774c0[] __initconst = {
50static int __init r8a774c0_sysc_init(void) 37static int __init r8a774c0_sysc_init(void)
51{ 38{
52 if (soc_device_match(r8a774c0)) { 39 if (soc_device_match(r8a774c0)) {
53 rcar_sysc_fix_parent(r8a774c0_areas, 40 /* Fix incorrect 3DG hierarchy */
54 ARRAY_SIZE(r8a774c0_areas), 41 swap(r8a774c0_areas[6], r8a774c0_areas[7]);
55 R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B); 42 r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON;
56 rcar_sysc_fix_parent(r8a774c0_areas, 43 r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B;
57 ARRAY_SIZE(r8a774c0_areas),
58 R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON);
59 } 44 }
60 45
61 return 0; 46 return 0;
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index d7e4e18ec3df..1ae9af5f17ec 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -466,9 +466,9 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
466 int i; 466 int i;
467 467
468 /* allocate coherent DMAable memory for hardware buffer descriptors. */ 468 /* allocate coherent DMAable memory for hardware buffer descriptors. */
469 sqi->bd = dma_zalloc_coherent(&sqi->master->dev, 469 sqi->bd = dma_alloc_coherent(&sqi->master->dev,
470 sizeof(*bd) * PESQI_BD_COUNT, 470 sizeof(*bd) * PESQI_BD_COUNT,
471 &sqi->bd_dma, GFP_KERNEL); 471 &sqi->bd_dma, GFP_KERNEL);
472 if (!sqi->bd) { 472 if (!sqi->bd) {
473 dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); 473 dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
474 return -ENOMEM; 474 return -ENOMEM;
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
index 21a76a8ccc26..6027b19f7bc2 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c
@@ -1396,8 +1396,7 @@ static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
1396 if (!ring->tx_buf) 1396 if (!ring->tx_buf)
1397 goto no_tx_mem; 1397 goto no_tx_mem;
1398 1398
1399 ring->tx_dma = dma_zalloc_coherent(eth->dev, 1399 ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
1400 ring->tx_ring_size * sz,
1401 &ring->tx_phys, 1400 &ring->tx_phys,
1402 GFP_ATOMIC | __GFP_ZERO); 1401 GFP_ATOMIC | __GFP_ZERO);
1403 if (!ring->tx_dma) 1402 if (!ring->tx_dma)
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 364d6ea14bf8..2f90f60f1681 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
154 154
155 pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset; 155 pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
156 156
157 crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); 157 crypto_ops = lib80211_get_crypto_ops("WEP");
158 158
159 if (!crypto_ops) 159 if (!crypto_ops)
160 return; 160 return;
@@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
210 void *crypto_private = NULL; 210 void *crypto_private = NULL;
211 int status = _SUCCESS; 211 int status = _SUCCESS;
212 const int keyindex = prxattrib->key_index; 212 const int keyindex = prxattrib->key_index;
213 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); 213 struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
214 char iv[4], icv[4]; 214 char iv[4], icv[4];
215 215
216 if (!crypto_ops) { 216 if (!crypto_ops) {
@@ -1291,7 +1291,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
1291 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; 1291 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
1292 void *crypto_private = NULL; 1292 void *crypto_private = NULL;
1293 u8 *key, *pframe = skb->data; 1293 u8 *key, *pframe = skb->data;
1294 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp"); 1294 struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
1295 struct security_priv *psecuritypriv = &padapter->securitypriv; 1295 struct security_priv *psecuritypriv = &padapter->securitypriv;
1296 char iv[8], icv[8]; 1296 char iv[8], icv[8];
1297 1297
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 7c03b69b8ed3..6d02904de63f 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -22,7 +22,7 @@ static const struct sdio_device_id sdio_ids[] =
22 { SDIO_DEVICE(0x024c, 0xb723), }, 22 { SDIO_DEVICE(0x024c, 0xb723), },
23 { /* end: all zeroes */ }, 23 { /* end: all zeroes */ },
24}; 24};
25static const struct acpi_device_id acpi_ids[] __used = { 25static const struct acpi_device_id acpi_ids[] = {
26 {"OBDA8723", 0x0000}, 26 {"OBDA8723", 0x0000},
27 {} 27 {}
28}; 28};
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 338b6e952515..dd4898861b83 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -407,10 +407,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
407 /* Allocate enough storage to hold the page pointers and the page 407 /* Allocate enough storage to hold the page pointers and the page
408 * list 408 * list
409 */ 409 */
410 pagelist = dma_zalloc_coherent(g_dev, 410 pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
411 pagelist_size, 411 GFP_KERNEL);
412 &dma_addr,
413 GFP_KERNEL);
414 412
415 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); 413 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
416 414
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1ab0e8562d40..c9097e7367d8 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -440,12 +440,9 @@ static bool device_init_rings(struct vnt_private *priv)
440 void *vir_pool; 440 void *vir_pool;
441 441
442 /*allocate all RD/TD rings a single pool*/ 442 /*allocate all RD/TD rings a single pool*/
443 vir_pool = dma_zalloc_coherent(&priv->pcid->dev, 443 vir_pool = dma_alloc_coherent(&priv->pcid->dev,
444 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + 444 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
445 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + 445 &priv->pool_dma, GFP_ATOMIC);
446 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
447 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
448 &priv->pool_dma, GFP_ATOMIC);
449 if (!vir_pool) { 446 if (!vir_pool) {
450 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); 447 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
451 return false; 448 return false;
@@ -459,13 +456,9 @@ static bool device_init_rings(struct vnt_private *priv)
459 priv->rd1_pool_dma = priv->rd0_pool_dma + 456 priv->rd1_pool_dma = priv->rd0_pool_dma +
460 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc); 457 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
461 458
462 priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev, 459 priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
463 priv->opts.tx_descs[0] * PKT_BUF_SZ + 460 priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE,
464 priv->opts.tx_descs[1] * PKT_BUF_SZ + 461 &priv->tx_bufs_dma0, GFP_ATOMIC);
465 CB_BEACON_BUF_SIZE +
466 CB_MAX_BUF_SIZE,
467 &priv->tx_bufs_dma0,
468 GFP_ATOMIC);
469 if (!priv->tx0_bufs) { 462 if (!priv->tx0_bufs) {
470 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n"); 463 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
471 464
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 984941e036c8..bd15a564fe24 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void)
714 sizeof(struct iscsi_queue_req), 714 sizeof(struct iscsi_queue_req),
715 __alignof__(struct iscsi_queue_req), 0, NULL); 715 __alignof__(struct iscsi_queue_req), 0, NULL);
716 if (!lio_qr_cache) { 716 if (!lio_qr_cache) {
717 pr_err("nable to kmem_cache_create() for" 717 pr_err("Unable to kmem_cache_create() for"
718 " lio_qr_cache\n"); 718 " lio_qr_cache\n");
719 goto bitmap_out; 719 goto bitmap_out;
720 } 720 }
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 1e6d24943565..c34c88ef3319 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -148,7 +148,7 @@ struct tcmu_dev {
148 size_t ring_size; 148 size_t ring_size;
149 149
150 struct mutex cmdr_lock; 150 struct mutex cmdr_lock;
151 struct list_head cmdr_queue; 151 struct list_head qfull_queue;
152 152
153 uint32_t dbi_max; 153 uint32_t dbi_max;
154 uint32_t dbi_thresh; 154 uint32_t dbi_thresh;
@@ -159,6 +159,7 @@ struct tcmu_dev {
159 159
160 struct timer_list cmd_timer; 160 struct timer_list cmd_timer;
161 unsigned int cmd_time_out; 161 unsigned int cmd_time_out;
162 struct list_head inflight_queue;
162 163
163 struct timer_list qfull_timer; 164 struct timer_list qfull_timer;
164 int qfull_time_out; 165 int qfull_time_out;
@@ -179,7 +180,7 @@ struct tcmu_dev {
179struct tcmu_cmd { 180struct tcmu_cmd {
180 struct se_cmd *se_cmd; 181 struct se_cmd *se_cmd;
181 struct tcmu_dev *tcmu_dev; 182 struct tcmu_dev *tcmu_dev;
182 struct list_head cmdr_queue_entry; 183 struct list_head queue_entry;
183 184
184 uint16_t cmd_id; 185 uint16_t cmd_id;
185 186
@@ -192,6 +193,7 @@ struct tcmu_cmd {
192 unsigned long deadline; 193 unsigned long deadline;
193 194
194#define TCMU_CMD_BIT_EXPIRED 0 195#define TCMU_CMD_BIT_EXPIRED 0
196#define TCMU_CMD_BIT_INFLIGHT 1
195 unsigned long flags; 197 unsigned long flags;
196}; 198};
197/* 199/*
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
586 if (!tcmu_cmd) 588 if (!tcmu_cmd)
587 return NULL; 589 return NULL;
588 590
589 INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); 591 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
590 tcmu_cmd->se_cmd = se_cmd; 592 tcmu_cmd->se_cmd = se_cmd;
591 tcmu_cmd->tcmu_dev = udev; 593 tcmu_cmd->tcmu_dev = udev;
592 594
@@ -915,11 +917,13 @@ setup_timer:
915 return 0; 917 return 0;
916 918
917 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 919 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
918 mod_timer(timer, tcmu_cmd->deadline); 920 if (!timer_pending(timer))
921 mod_timer(timer, tcmu_cmd->deadline);
922
919 return 0; 923 return 0;
920} 924}
921 925
922static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) 926static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
923{ 927{
924 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 928 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
925 unsigned int tmo; 929 unsigned int tmo;
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
942 if (ret) 946 if (ret)
943 return ret; 947 return ret;
944 948
945 list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); 949 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
946 pr_debug("adding cmd %u on dev %s to ring space wait queue\n", 950 pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
947 tcmu_cmd->cmd_id, udev->name); 951 tcmu_cmd->cmd_id, udev->name);
948 return 0; 952 return 0;
@@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
999 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 1003 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
1000 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1004 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1001 1005
1002 if (!list_empty(&udev->cmdr_queue)) 1006 if (!list_empty(&udev->qfull_queue))
1003 goto queue; 1007 goto queue;
1004 1008
1005 mb = udev->mb_addr; 1009 mb = udev->mb_addr;
@@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
1096 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1100 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1097 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1101 tcmu_flush_dcache_range(mb, sizeof(*mb));
1098 1102
1103 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1104 set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
1105
1099 /* TODO: only if FLUSH and FUA? */ 1106 /* TODO: only if FLUSH and FUA? */
1100 uio_event_notify(&udev->uio_info); 1107 uio_event_notify(&udev->uio_info);
1101 1108
1102 return 0; 1109 return 0;
1103 1110
1104queue: 1111queue:
1105 if (add_to_cmdr_queue(tcmu_cmd)) { 1112 if (add_to_qfull_queue(tcmu_cmd)) {
1106 *scsi_err = TCM_OUT_OF_RESOURCES; 1113 *scsi_err = TCM_OUT_OF_RESOURCES;
1107 return -1; 1114 return -1;
1108 } 1115 }
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1145 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1152 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1146 goto out; 1153 goto out;
1147 1154
1155 list_del_init(&cmd->queue_entry);
1156
1148 tcmu_cmd_reset_dbi_cur(cmd); 1157 tcmu_cmd_reset_dbi_cur(cmd);
1149 1158
1150 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1159 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
@@ -1194,9 +1203,29 @@ out:
1194 tcmu_free_cmd(cmd); 1203 tcmu_free_cmd(cmd);
1195} 1204}
1196 1205
1206static void tcmu_set_next_deadline(struct list_head *queue,
1207 struct timer_list *timer)
1208{
1209 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1210 unsigned long deadline = 0;
1211
1212 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1213 if (!time_after(jiffies, tcmu_cmd->deadline)) {
1214 deadline = tcmu_cmd->deadline;
1215 break;
1216 }
1217 }
1218
1219 if (deadline)
1220 mod_timer(timer, deadline);
1221 else
1222 del_timer(timer);
1223}
1224
1197static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1225static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1198{ 1226{
1199 struct tcmu_mailbox *mb; 1227 struct tcmu_mailbox *mb;
1228 struct tcmu_cmd *cmd;
1200 int handled = 0; 1229 int handled = 0;
1201 1230
1202 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1231 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1210 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1239 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1211 1240
1212 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1241 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1213 struct tcmu_cmd *cmd;
1214 1242
1215 tcmu_flush_dcache_range(entry, sizeof(*entry)); 1243 tcmu_flush_dcache_range(entry, sizeof(*entry));
1216 1244
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1243 /* no more pending commands */ 1271 /* no more pending commands */
1244 del_timer(&udev->cmd_timer); 1272 del_timer(&udev->cmd_timer);
1245 1273
1246 if (list_empty(&udev->cmdr_queue)) { 1274 if (list_empty(&udev->qfull_queue)) {
1247 /* 1275 /*
1248 * no more pending or waiting commands so try to 1276 * no more pending or waiting commands so try to
1249 * reclaim blocks if needed. 1277 * reclaim blocks if needed.
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1252 tcmu_global_max_blocks) 1280 tcmu_global_max_blocks)
1253 schedule_delayed_work(&tcmu_unmap_work, 0); 1281 schedule_delayed_work(&tcmu_unmap_work, 0);
1254 } 1282 }
1283 } else if (udev->cmd_time_out) {
1284 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1255 } 1285 }
1256 1286
1257 return handled; 1287 return handled;
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1271 if (!time_after(jiffies, cmd->deadline)) 1301 if (!time_after(jiffies, cmd->deadline))
1272 return 0; 1302 return 0;
1273 1303
1274 is_running = list_empty(&cmd->cmdr_queue_entry); 1304 is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
1275 se_cmd = cmd->se_cmd; 1305 se_cmd = cmd->se_cmd;
1276 1306
1277 if (is_running) { 1307 if (is_running) {
@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1288 */ 1318 */
1289 scsi_status = SAM_STAT_CHECK_CONDITION; 1319 scsi_status = SAM_STAT_CHECK_CONDITION;
1290 } else { 1320 } else {
1291 list_del_init(&cmd->cmdr_queue_entry);
1292
1293 idr_remove(&udev->commands, id); 1321 idr_remove(&udev->commands, id);
1294 tcmu_free_cmd(cmd); 1322 tcmu_free_cmd(cmd);
1295 scsi_status = SAM_STAT_TASK_SET_FULL; 1323 scsi_status = SAM_STAT_TASK_SET_FULL;
1296 } 1324 }
1325 list_del_init(&cmd->queue_entry);
1297 1326
1298 pr_debug("Timing out cmd %u on dev %s that is %s.\n", 1327 pr_debug("Timing out cmd %u on dev %s that is %s.\n",
1299 id, udev->name, is_running ? "inflight" : "queued"); 1328 id, udev->name, is_running ? "inflight" : "queued");
@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1372 1401
1373 INIT_LIST_HEAD(&udev->node); 1402 INIT_LIST_HEAD(&udev->node);
1374 INIT_LIST_HEAD(&udev->timedout_entry); 1403 INIT_LIST_HEAD(&udev->timedout_entry);
1375 INIT_LIST_HEAD(&udev->cmdr_queue); 1404 INIT_LIST_HEAD(&udev->qfull_queue);
1405 INIT_LIST_HEAD(&udev->inflight_queue);
1376 idr_init(&udev->commands); 1406 idr_init(&udev->commands);
1377 1407
1378 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1408 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1383 return &udev->se_dev; 1413 return &udev->se_dev;
1384} 1414}
1385 1415
1386static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) 1416static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
1387{ 1417{
1388 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1418 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1389 LIST_HEAD(cmds); 1419 LIST_HEAD(cmds);
@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
1391 sense_reason_t scsi_ret; 1421 sense_reason_t scsi_ret;
1392 int ret; 1422 int ret;
1393 1423
1394 if (list_empty(&udev->cmdr_queue)) 1424 if (list_empty(&udev->qfull_queue))
1395 return true; 1425 return true;
1396 1426
1397 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1427 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1398 1428
1399 list_splice_init(&udev->cmdr_queue, &cmds); 1429 list_splice_init(&udev->qfull_queue, &cmds);
1400 1430
1401 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { 1431 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1402 list_del_init(&tcmu_cmd->cmdr_queue_entry); 1432 list_del_init(&tcmu_cmd->queue_entry);
1403 1433
1404 pr_debug("removing cmd %u on dev %s from queue\n", 1434 pr_debug("removing cmd %u on dev %s from queue\n",
1405 tcmu_cmd->cmd_id, udev->name); 1435 tcmu_cmd->cmd_id, udev->name);
@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
1437 * cmd was requeued, so just put all cmds back in 1467 * cmd was requeued, so just put all cmds back in
1438 * the queue 1468 * the queue
1439 */ 1469 */
1440 list_splice_tail(&cmds, &udev->cmdr_queue); 1470 list_splice_tail(&cmds, &udev->qfull_queue);
1441 drained = false; 1471 drained = false;
1442 goto done; 1472 break;
1443 } 1473 }
1444 } 1474 }
1445 if (list_empty(&udev->cmdr_queue)) 1475
1446 del_timer(&udev->qfull_timer); 1476 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1447done:
1448 return drained; 1477 return drained;
1449} 1478}
1450 1479
@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1454 1483
1455 mutex_lock(&udev->cmdr_lock); 1484 mutex_lock(&udev->cmdr_lock);
1456 tcmu_handle_completions(udev); 1485 tcmu_handle_completions(udev);
1457 run_cmdr_queue(udev, false); 1486 run_qfull_queue(udev, false);
1458 mutex_unlock(&udev->cmdr_lock); 1487 mutex_unlock(&udev->cmdr_lock);
1459 1488
1460 return 0; 1489 return 0;
@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
1982 /* complete IO that has executed successfully */ 2011 /* complete IO that has executed successfully */
1983 tcmu_handle_completions(udev); 2012 tcmu_handle_completions(udev);
1984 /* fail IO waiting to be queued */ 2013 /* fail IO waiting to be queued */
1985 run_cmdr_queue(udev, true); 2014 run_qfull_queue(udev, true);
1986 2015
1987unlock: 2016unlock:
1988 mutex_unlock(&udev->cmdr_lock); 2017 mutex_unlock(&udev->cmdr_lock);
@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
1997 mutex_lock(&udev->cmdr_lock); 2026 mutex_lock(&udev->cmdr_lock);
1998 2027
1999 idr_for_each_entry(&udev->commands, cmd, i) { 2028 idr_for_each_entry(&udev->commands, cmd, i) {
2000 if (!list_empty(&cmd->cmdr_queue_entry)) 2029 if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
2001 continue; 2030 continue;
2002 2031
2003 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2032 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2006 2035
2007 idr_remove(&udev->commands, i); 2036 idr_remove(&udev->commands, i);
2008 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2037 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2038 list_del_init(&cmd->queue_entry);
2009 if (err_level == 1) { 2039 if (err_level == 1) {
2010 /* 2040 /*
2011 * Userspace was not able to start the 2041 * Userspace was not able to start the
@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
2666 2696
2667 mutex_lock(&udev->cmdr_lock); 2697 mutex_lock(&udev->cmdr_lock);
2668 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 2698 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
2699
2700 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
2701 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2702
2669 mutex_unlock(&udev->cmdr_lock); 2703 mutex_unlock(&udev->cmdr_lock);
2670 2704
2671 spin_lock_bh(&timed_out_udevs_lock); 2705 spin_lock_bh(&timed_out_udevs_lock);
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 0582bd12a239..0ca908d12750 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -4,7 +4,7 @@
4 4
5config INT340X_THERMAL 5config INT340X_THERMAL
6 tristate "ACPI INT340X thermal drivers" 6 tristate "ACPI INT340X thermal drivers"
7 depends on X86 && ACPI 7 depends on X86 && ACPI && PCI
8 select THERMAL_GOV_USER_SPACE 8 select THERMAL_GOV_USER_SPACE
9 select ACPI_THERMAL_REL 9 select ACPI_THERMAL_REL
10 select ACPI_FAN 10 select ACPI_FAN
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 67b9bf3b500e..089a6f285d5e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -85,6 +85,18 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
85 with "earlycon=smh" on the kernel command line. The console is 85 with "earlycon=smh" on the kernel command line. The console is
86 enabled when early_param is processed. 86 enabled when early_param is processed.
87 87
88config SERIAL_EARLYCON_RISCV_SBI
89 bool "Early console using RISC-V SBI"
90 depends on RISCV
91 select SERIAL_CORE
92 select SERIAL_CORE_CONSOLE
93 select SERIAL_EARLYCON
94 help
95 Support for early debug console using RISC-V SBI. This enables
96 the console before standard serial driver is probed. This is enabled
97 with "earlycon=sbi" on the kernel command line. The console is
98 enabled when early_param is processed.
99
88config SERIAL_SB1250_DUART 100config SERIAL_SB1250_DUART
89 tristate "BCM1xxx on-chip DUART serial support" 101 tristate "BCM1xxx on-chip DUART serial support"
90 depends on SIBYTE_SB1xxx_SOC=y 102 depends on SIBYTE_SB1xxx_SOC=y
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 8c303736b7e8..1511e8a9f856 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SERIAL_CORE) += serial_core.o
7 7
8obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o 8obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
9obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o 9obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o
10obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o
10 11
11# These Sparc drivers have to appear before others such as 8250 12# These Sparc drivers have to appear before others such as 8250
12# which share ttySx minor node space. Otherwise console device 13# which share ttySx minor node space. Otherwise console device
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c
new file mode 100644
index 000000000000..e1a551aae336
--- /dev/null
+++ b/drivers/tty/serial/earlycon-riscv-sbi.c
@@ -0,0 +1,28 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * RISC-V SBI based earlycon
4 *
5 * Copyright (C) 2018 Anup Patel <anup@brainfault.org>
6 */
7#include <linux/kernel.h>
8#include <linux/console.h>
9#include <linux/init.h>
10#include <linux/serial_core.h>
11#include <asm/sbi.h>
12
13static void sbi_console_write(struct console *con,
14 const char *s, unsigned int n)
15{
16 int i;
17
18 for (i = 0; i < n; ++i)
19 sbi_console_putchar(s[i]);
20}
21
22static int __init early_sbi_setup(struct earlycon_device *device,
23 const char *opt)
24{
25 device->con->write = sbi_console_write;
26 return 0;
27}
28EARLYCON_DECLARE(sbi, early_sbi_setup);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index e052b69ceb98..9de9f0f239a1 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -114,9 +114,9 @@ struct ltq_uart_port {
114 114
115static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg) 115static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg)
116{ 116{
117 u32 tmp = readl(reg); 117 u32 tmp = __raw_readl(reg);
118 118
119 writel((tmp & ~clear) | set, reg); 119 __raw_writel((tmp & ~clear) | set, reg);
120} 120}
121 121
122static inline struct 122static inline struct
@@ -144,7 +144,7 @@ lqasc_start_tx(struct uart_port *port)
144static void 144static void
145lqasc_stop_rx(struct uart_port *port) 145lqasc_stop_rx(struct uart_port *port)
146{ 146{
147 writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); 147 __raw_writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
148} 148}
149 149
150static int 150static int
@@ -153,11 +153,12 @@ lqasc_rx_chars(struct uart_port *port)
153 struct tty_port *tport = &port->state->port; 153 struct tty_port *tport = &port->state->port;
154 unsigned int ch = 0, rsr = 0, fifocnt; 154 unsigned int ch = 0, rsr = 0, fifocnt;
155 155
156 fifocnt = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; 156 fifocnt = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
157 ASCFSTAT_RXFFLMASK;
157 while (fifocnt--) { 158 while (fifocnt--) {
158 u8 flag = TTY_NORMAL; 159 u8 flag = TTY_NORMAL;
159 ch = readb(port->membase + LTQ_ASC_RBUF); 160 ch = readb(port->membase + LTQ_ASC_RBUF);
160 rsr = (readl(port->membase + LTQ_ASC_STATE) 161 rsr = (__raw_readl(port->membase + LTQ_ASC_STATE)
161 & ASCSTATE_ANY) | UART_DUMMY_UER_RX; 162 & ASCSTATE_ANY) | UART_DUMMY_UER_RX;
162 tty_flip_buffer_push(tport); 163 tty_flip_buffer_push(tport);
163 port->icount.rx++; 164 port->icount.rx++;
@@ -217,7 +218,7 @@ lqasc_tx_chars(struct uart_port *port)
217 return; 218 return;
218 } 219 }
219 220
220 while (((readl(port->membase + LTQ_ASC_FSTAT) & 221 while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) &
221 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { 222 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
222 if (port->x_char) { 223 if (port->x_char) {
223 writeb(port->x_char, port->membase + LTQ_ASC_TBUF); 224 writeb(port->x_char, port->membase + LTQ_ASC_TBUF);
@@ -245,7 +246,7 @@ lqasc_tx_int(int irq, void *_port)
245 unsigned long flags; 246 unsigned long flags;
246 struct uart_port *port = (struct uart_port *)_port; 247 struct uart_port *port = (struct uart_port *)_port;
247 spin_lock_irqsave(&ltq_asc_lock, flags); 248 spin_lock_irqsave(&ltq_asc_lock, flags);
248 writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); 249 __raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
249 spin_unlock_irqrestore(&ltq_asc_lock, flags); 250 spin_unlock_irqrestore(&ltq_asc_lock, flags);
250 lqasc_start_tx(port); 251 lqasc_start_tx(port);
251 return IRQ_HANDLED; 252 return IRQ_HANDLED;
@@ -270,7 +271,7 @@ lqasc_rx_int(int irq, void *_port)
270 unsigned long flags; 271 unsigned long flags;
271 struct uart_port *port = (struct uart_port *)_port; 272 struct uart_port *port = (struct uart_port *)_port;
272 spin_lock_irqsave(&ltq_asc_lock, flags); 273 spin_lock_irqsave(&ltq_asc_lock, flags);
273 writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); 274 __raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
274 lqasc_rx_chars(port); 275 lqasc_rx_chars(port);
275 spin_unlock_irqrestore(&ltq_asc_lock, flags); 276 spin_unlock_irqrestore(&ltq_asc_lock, flags);
276 return IRQ_HANDLED; 277 return IRQ_HANDLED;
@@ -280,7 +281,8 @@ static unsigned int
280lqasc_tx_empty(struct uart_port *port) 281lqasc_tx_empty(struct uart_port *port)
281{ 282{
282 int status; 283 int status;
283 status = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; 284 status = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
285 ASCFSTAT_TXFFLMASK;
284 return status ? 0 : TIOCSER_TEMT; 286 return status ? 0 : TIOCSER_TEMT;
285} 287}
286 288
@@ -313,12 +315,12 @@ lqasc_startup(struct uart_port *port)
313 asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), 315 asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
314 port->membase + LTQ_ASC_CLC); 316 port->membase + LTQ_ASC_CLC);
315 317
316 writel(0, port->membase + LTQ_ASC_PISEL); 318 __raw_writel(0, port->membase + LTQ_ASC_PISEL);
317 writel( 319 __raw_writel(
318 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | 320 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) |
319 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, 321 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU,
320 port->membase + LTQ_ASC_TXFCON); 322 port->membase + LTQ_ASC_TXFCON);
321 writel( 323 __raw_writel(
322 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) 324 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK)
323 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, 325 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU,
324 port->membase + LTQ_ASC_RXFCON); 326 port->membase + LTQ_ASC_RXFCON);
@@ -350,7 +352,7 @@ lqasc_startup(struct uart_port *port)
350 goto err2; 352 goto err2;
351 } 353 }
352 354
353 writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, 355 __raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
354 port->membase + LTQ_ASC_IRNREN); 356 port->membase + LTQ_ASC_IRNREN);
355 return 0; 357 return 0;
356 358
@@ -369,7 +371,7 @@ lqasc_shutdown(struct uart_port *port)
369 free_irq(ltq_port->rx_irq, port); 371 free_irq(ltq_port->rx_irq, port);
370 free_irq(ltq_port->err_irq, port); 372 free_irq(ltq_port->err_irq, port);
371 373
372 writel(0, port->membase + LTQ_ASC_CON); 374 __raw_writel(0, port->membase + LTQ_ASC_CON);
373 asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, 375 asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
374 port->membase + LTQ_ASC_RXFCON); 376 port->membase + LTQ_ASC_RXFCON);
375 asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, 377 asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
@@ -461,13 +463,13 @@ lqasc_set_termios(struct uart_port *port,
461 asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); 463 asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON);
462 464
463 /* now we can write the new baudrate into the register */ 465 /* now we can write the new baudrate into the register */
464 writel(divisor, port->membase + LTQ_ASC_BG); 466 __raw_writel(divisor, port->membase + LTQ_ASC_BG);
465 467
466 /* turn the baudrate generator back on */ 468 /* turn the baudrate generator back on */
467 asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON); 469 asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON);
468 470
469 /* enable rx */ 471 /* enable rx */
470 writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); 472 __raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
471 473
472 spin_unlock_irqrestore(&ltq_asc_lock, flags); 474 spin_unlock_irqrestore(&ltq_asc_lock, flags);
473 475
@@ -578,7 +580,7 @@ lqasc_console_putchar(struct uart_port *port, int ch)
578 return; 580 return;
579 581
580 do { 582 do {
581 fifofree = (readl(port->membase + LTQ_ASC_FSTAT) 583 fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT)
582 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; 584 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
583 } while (fifofree == 0); 585 } while (fifofree == 0);
584 writeb(ch, port->membase + LTQ_ASC_TBUF); 586 writeb(ch, port->membase + LTQ_ASC_TBUF);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index bfe9ad85b362..23c6fd238422 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1256,7 +1256,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1256static int tty_reopen(struct tty_struct *tty) 1256static int tty_reopen(struct tty_struct *tty)
1257{ 1257{
1258 struct tty_driver *driver = tty->driver; 1258 struct tty_driver *driver = tty->driver;
1259 int retval; 1259 struct tty_ldisc *ld;
1260 int retval = 0;
1260 1261
1261 if (driver->type == TTY_DRIVER_TYPE_PTY && 1262 if (driver->type == TTY_DRIVER_TYPE_PTY &&
1262 driver->subtype == PTY_TYPE_MASTER) 1263 driver->subtype == PTY_TYPE_MASTER)
@@ -1268,13 +1269,18 @@ static int tty_reopen(struct tty_struct *tty)
1268 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) 1269 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
1269 return -EBUSY; 1270 return -EBUSY;
1270 1271
1271 retval = tty_ldisc_lock(tty, 5 * HZ); 1272 ld = tty_ldisc_ref_wait(tty);
1272 if (retval) 1273 if (ld) {
1273 return retval; 1274 tty_ldisc_deref(ld);
1275 } else {
1276 retval = tty_ldisc_lock(tty, 5 * HZ);
1277 if (retval)
1278 return retval;
1274 1279
1275 if (!tty->ldisc) 1280 if (!tty->ldisc)
1276 retval = tty_ldisc_reinit(tty, tty->termios.c_line); 1281 retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1277 tty_ldisc_unlock(tty); 1282 tty_ldisc_unlock(tty);
1283 }
1278 1284
1279 if (retval == 0) 1285 if (retval == 0)
1280 tty->count++; 1286 tty->count++;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ed8c62b2d9d1..739f8960811a 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1865,6 +1865,13 @@ static const struct usb_device_id acm_ids[] = {
1865 .driver_info = IGNORE_DEVICE, 1865 .driver_info = IGNORE_DEVICE,
1866 }, 1866 },
1867 1867
1868 { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
1869 .driver_info = SEND_ZERO_PACKET,
1870 },
1871 { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
1872 .driver_info = SEND_ZERO_PACKET,
1873 },
1874
1868 /* control interfaces without any protocol set */ 1875 /* control interfaces without any protocol set */
1869 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1876 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1870 USB_CDC_PROTO_NONE) }, 1877 USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 356b05c82dbc..f713cecc1f41 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -143,9 +143,12 @@ int usb_choose_configuration(struct usb_device *udev)
143 continue; 143 continue;
144 } 144 }
145 145
146 if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) { 146 if (i > 0 && desc && is_audio(desc)) {
147 best = c; 147 if (is_uac3_config(desc)) {
148 break; 148 best = c;
149 break;
150 }
151 continue;
149 } 152 }
150 153
151 /* From the remaining configs, choose the first one whose 154 /* From the remaining configs, choose the first one whose
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 514c5214ddb2..8bc35d53408b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -394,7 +394,8 @@ static const struct usb_device_id usb_quirk_list[] = {
394 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET }, 394 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
395 395
396 /* Corsair K70 RGB */ 396 /* Corsair K70 RGB */
397 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 397 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
398 USB_QUIRK_DELAY_CTRL_MSG },
398 399
399 /* Corsair Strafe */ 400 /* Corsair Strafe */
400 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | 401 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 01b44e159623..ccbd1d34eb2a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -172,8 +172,9 @@ static int scratchpad_setup(struct bdc *bdc)
172 /* Refer to BDC spec, Table 4 for description of SPB */ 172 /* Refer to BDC spec, Table 4 for description of SPB */
173 sp_buff_size = 1 << (sp_buff_size + 5); 173 sp_buff_size = 1 << (sp_buff_size + 5);
174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size); 174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
175 bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size, 175 bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
176 &bdc->scratchpad.sp_dma, GFP_KERNEL); 176 &bdc->scratchpad.sp_dma,
177 GFP_KERNEL);
177 178
178 if (!bdc->scratchpad.buff) 179 if (!bdc->scratchpad.buff)
179 goto fail; 180 goto fail;
@@ -202,11 +203,9 @@ static int setup_srr(struct bdc *bdc, int interrupter)
202 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST); 203 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
203 bdc->srr.dqp_index = 0; 204 bdc->srr.dqp_index = 0;
204 /* allocate the status report descriptors */ 205 /* allocate the status report descriptors */
205 bdc->srr.sr_bds = dma_zalloc_coherent( 206 bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
206 bdc->dev, 207 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
207 NUM_SR_ENTRIES * sizeof(struct bdc_bd), 208 &bdc->srr.dma_addr, GFP_KERNEL);
208 &bdc->srr.dma_addr,
209 GFP_KERNEL);
210 if (!bdc->srr.sr_bds) 209 if (!bdc->srr.sr_bds)
211 return -ENOMEM; 210 return -ENOMEM;
212 211
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 6218bfe54f52..98deb5f64268 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -596,9 +596,9 @@ static int uhci_start(struct usb_hcd *hcd)
596 &uhci_debug_operations); 596 &uhci_debug_operations);
597#endif 597#endif
598 598
599 uhci->frame = dma_zalloc_coherent(uhci_dev(uhci), 599 uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
600 UHCI_NUMFRAMES * sizeof(*uhci->frame), 600 UHCI_NUMFRAMES * sizeof(*uhci->frame),
601 &uhci->frame_dma_handle, GFP_KERNEL); 601 &uhci->frame_dma_handle, GFP_KERNEL);
602 if (!uhci->frame) { 602 if (!uhci->frame) {
603 dev_err(uhci_dev(uhci), 603 dev_err(uhci_dev(uhci),
604 "unable to allocate consistent memory for frame list\n"); 604 "unable to allocate consistent memory for frame list\n");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 36a3eb8849f1..8067f178fa84 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1672,8 +1672,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1673 for (i = 0; i < num_sp; i++) { 1673 for (i = 0; i < num_sp; i++) {
1674 dma_addr_t dma; 1674 dma_addr_t dma;
1675 void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, 1675 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1676 flags); 1676 flags);
1677 if (!buf) 1677 if (!buf)
1678 goto fail_sp4; 1678 goto fail_sp4;
1679 1679
@@ -1799,8 +1799,8 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
1799 struct xhci_erst_entry *entry; 1799 struct xhci_erst_entry *entry;
1800 1800
1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; 1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1802 erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1802 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1803 size, &erst->erst_dma_addr, flags); 1803 size, &erst->erst_dma_addr, flags);
1804 if (!erst->entries) 1804 if (!erst->entries)
1805 return -ENOMEM; 1805 return -ENOMEM;
1806 1806
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index fde2e71a6ade..a73ea495d5a7 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -235,8 +235,12 @@ static int slave_configure(struct scsi_device *sdev)
235 if (!(us->fflags & US_FL_NEEDS_CAP16)) 235 if (!(us->fflags & US_FL_NEEDS_CAP16))
236 sdev->try_rc_10_first = 1; 236 sdev->try_rc_10_first = 1;
237 237
238 /* assume SPC3 or latter devices support sense size > 18 */ 238 /*
239 if (sdev->scsi_level > SCSI_SPC_2) 239 * assume SPC3 or latter devices support sense size > 18
240 * unless US_FL_BAD_SENSE quirk is specified.
241 */
242 if (sdev->scsi_level > SCSI_SPC_2 &&
243 !(us->fflags & US_FL_BAD_SENSE))
240 us->fflags |= US_FL_SANE_SENSE; 244 us->fflags |= US_FL_SANE_SENSE;
241 245
242 /* 246 /*
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f7f83b21dc74..ea0d27a94afe 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1266,6 +1266,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1266 US_FL_FIX_CAPACITY ), 1266 US_FL_FIX_CAPACITY ),
1267 1267
1268/* 1268/*
1269 * Reported by Icenowy Zheng <icenowy@aosc.io>
1270 * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
1271 * that do not process read/write command if a long sense is requested,
1272 * so force to use 18-byte sense.
1273 */
1274UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
1275 "SMI",
1276 "SM3350 UFS-to-USB-Mass-Storage bridge",
1277 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1278 US_FL_BAD_SENSE ),
1279
1280/*
1269 * Reported by Paul Hartman <paul.hartman+linux@gmail.com> 1281 * Reported by Paul Hartman <paul.hartman+linux@gmail.com>
1270 * This card reader returns "Illegal Request, Logical Block Address 1282 * This card reader returns "Illegal Request, Logical Block Address
1271 * Out of Range" for the first READ(10) after a new card is inserted. 1283 * Out of Range" for the first READ(10) after a new card is inserted.
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h
index 228ccdb8d1c8..4d13e510590e 100644
--- a/drivers/vfio/pci/trace.h
+++ b/drivers/vfio/pci/trace.h
@@ -94,7 +94,7 @@ TRACE_EVENT(vfio_pci_npu2_mmap,
94#endif /* _TRACE_VFIO_PCI_H */ 94#endif /* _TRACE_VFIO_PCI_H */
95 95
96#undef TRACE_INCLUDE_PATH 96#undef TRACE_INCLUDE_PATH
97#define TRACE_INCLUDE_PATH . 97#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
98#undef TRACE_INCLUDE_FILE 98#undef TRACE_INCLUDE_FILE
99#define TRACE_INCLUDE_FILE trace 99#define TRACE_INCLUDE_FILE trace
100 100
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 7651cfb14836..73652e21efec 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -878,7 +878,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
878 return -EINVAL; 878 return -EINVAL;
879 if (!unmap->size || unmap->size & mask) 879 if (!unmap->size || unmap->size & mask)
880 return -EINVAL; 880 return -EINVAL;
881 if (unmap->iova + unmap->size < unmap->iova || 881 if (unmap->iova + unmap->size - 1 < unmap->iova ||
882 unmap->size > SIZE_MAX) 882 unmap->size > SIZE_MAX)
883 return -EINVAL; 883 return -EINVAL;
884 884
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 36f3d0f49e60..bca86bf7189f 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1236,7 +1236,8 @@ static void handle_rx(struct vhost_net *net)
1236 if (nvq->done_idx > VHOST_NET_BATCH) 1236 if (nvq->done_idx > VHOST_NET_BATCH)
1237 vhost_net_signal_used(nvq); 1237 vhost_net_signal_used(nvq);
1238 if (unlikely(vq_log)) 1238 if (unlikely(vq_log))
1239 vhost_log_write(vq, vq_log, log, vhost_len); 1239 vhost_log_write(vq, vq_log, log, vhost_len,
1240 vq->iov, in);
1240 total_len += vhost_len; 1241 total_len += vhost_len;
1241 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { 1242 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
1242 vhost_poll_queue(&vq->poll); 1243 vhost_poll_queue(&vq->poll);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8e10ab436d1f..344684f3e2e4 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1127 struct vhost_virtqueue *vq, 1127 struct vhost_virtqueue *vq,
1128 struct vhost_scsi_ctx *vc) 1128 struct vhost_scsi_ctx *vc)
1129{ 1129{
1130 struct virtio_scsi_ctrl_tmf_resp __user *resp;
1131 struct virtio_scsi_ctrl_tmf_resp rsp; 1130 struct virtio_scsi_ctrl_tmf_resp rsp;
1131 struct iov_iter iov_iter;
1132 int ret; 1132 int ret;
1133 1133
1134 pr_debug("%s\n", __func__); 1134 pr_debug("%s\n", __func__);
1135 memset(&rsp, 0, sizeof(rsp)); 1135 memset(&rsp, 0, sizeof(rsp));
1136 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; 1136 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1137 resp = vq->iov[vc->out].iov_base; 1137
1138 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1138 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1139 if (!ret) 1139
1140 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1141 if (likely(ret == sizeof(rsp)))
1140 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1142 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1141 else 1143 else
1142 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); 1144 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
@@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1147 struct vhost_virtqueue *vq, 1149 struct vhost_virtqueue *vq,
1148 struct vhost_scsi_ctx *vc) 1150 struct vhost_scsi_ctx *vc)
1149{ 1151{
1150 struct virtio_scsi_ctrl_an_resp __user *resp;
1151 struct virtio_scsi_ctrl_an_resp rsp; 1152 struct virtio_scsi_ctrl_an_resp rsp;
1153 struct iov_iter iov_iter;
1152 int ret; 1154 int ret;
1153 1155
1154 pr_debug("%s\n", __func__); 1156 pr_debug("%s\n", __func__);
1155 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ 1157 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1156 rsp.response = VIRTIO_SCSI_S_OK; 1158 rsp.response = VIRTIO_SCSI_S_OK;
1157 resp = vq->iov[vc->out].iov_base; 1159
1158 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1160 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1159 if (!ret) 1161
1162 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1163 if (likely(ret == sizeof(rsp)))
1160 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1164 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1161 else 1165 else
1162 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); 1166 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9f7942cbcbb2..15a216cdd507 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1034,8 +1034,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1034 int type, ret; 1034 int type, ret;
1035 1035
1036 ret = copy_from_iter(&type, sizeof(type), from); 1036 ret = copy_from_iter(&type, sizeof(type), from);
1037 if (ret != sizeof(type)) 1037 if (ret != sizeof(type)) {
1038 ret = -EINVAL;
1038 goto done; 1039 goto done;
1040 }
1039 1041
1040 switch (type) { 1042 switch (type) {
1041 case VHOST_IOTLB_MSG: 1043 case VHOST_IOTLB_MSG:
@@ -1054,8 +1056,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1054 1056
1055 iov_iter_advance(from, offset); 1057 iov_iter_advance(from, offset);
1056 ret = copy_from_iter(&msg, sizeof(msg), from); 1058 ret = copy_from_iter(&msg, sizeof(msg), from);
1057 if (ret != sizeof(msg)) 1059 if (ret != sizeof(msg)) {
1060 ret = -EINVAL;
1058 goto done; 1061 goto done;
1062 }
1059 if (vhost_process_iotlb_msg(dev, &msg)) { 1063 if (vhost_process_iotlb_msg(dev, &msg)) {
1060 ret = -EFAULT; 1064 ret = -EFAULT;
1061 goto done; 1065 goto done;
@@ -1733,13 +1737,87 @@ static int log_write(void __user *log_base,
1733 return r; 1737 return r;
1734} 1738}
1735 1739
1740static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1741{
1742 struct vhost_umem *umem = vq->umem;
1743 struct vhost_umem_node *u;
1744 u64 start, end, l, min;
1745 int r;
1746 bool hit = false;
1747
1748 while (len) {
1749 min = len;
1750 /* More than one GPAs can be mapped into a single HVA. So
1751 * iterate all possible umems here to be safe.
1752 */
1753 list_for_each_entry(u, &umem->umem_list, link) {
1754 if (u->userspace_addr > hva - 1 + len ||
1755 u->userspace_addr - 1 + u->size < hva)
1756 continue;
1757 start = max(u->userspace_addr, hva);
1758 end = min(u->userspace_addr - 1 + u->size,
1759 hva - 1 + len);
1760 l = end - start + 1;
1761 r = log_write(vq->log_base,
1762 u->start + start - u->userspace_addr,
1763 l);
1764 if (r < 0)
1765 return r;
1766 hit = true;
1767 min = min(l, min);
1768 }
1769
1770 if (!hit)
1771 return -EFAULT;
1772
1773 len -= min;
1774 hva += min;
1775 }
1776
1777 return 0;
1778}
1779
1780static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1781{
1782 struct iovec iov[64];
1783 int i, ret;
1784
1785 if (!vq->iotlb)
1786 return log_write(vq->log_base, vq->log_addr + used_offset, len);
1787
1788 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1789 len, iov, 64, VHOST_ACCESS_WO);
1790 if (ret)
1791 return ret;
1792
1793 for (i = 0; i < ret; i++) {
1794 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1795 iov[i].iov_len);
1796 if (ret)
1797 return ret;
1798 }
1799
1800 return 0;
1801}
1802
1736int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 1803int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1737 unsigned int log_num, u64 len) 1804 unsigned int log_num, u64 len, struct iovec *iov, int count)
1738{ 1805{
1739 int i, r; 1806 int i, r;
1740 1807
1741 /* Make sure data written is seen before log. */ 1808 /* Make sure data written is seen before log. */
1742 smp_wmb(); 1809 smp_wmb();
1810
1811 if (vq->iotlb) {
1812 for (i = 0; i < count; i++) {
1813 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1814 iov[i].iov_len);
1815 if (r < 0)
1816 return r;
1817 }
1818 return 0;
1819 }
1820
1743 for (i = 0; i < log_num; ++i) { 1821 for (i = 0; i < log_num; ++i) {
1744 u64 l = min(log[i].len, len); 1822 u64 l = min(log[i].len, len);
1745 r = log_write(vq->log_base, log[i].addr, l); 1823 r = log_write(vq->log_base, log[i].addr, l);
@@ -1769,9 +1847,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1769 smp_wmb(); 1847 smp_wmb();
1770 /* Log used flag write. */ 1848 /* Log used flag write. */
1771 used = &vq->used->flags; 1849 used = &vq->used->flags;
1772 log_write(vq->log_base, vq->log_addr + 1850 log_used(vq, (used - (void __user *)vq->used),
1773 (used - (void __user *)vq->used), 1851 sizeof vq->used->flags);
1774 sizeof vq->used->flags);
1775 if (vq->log_ctx) 1852 if (vq->log_ctx)
1776 eventfd_signal(vq->log_ctx, 1); 1853 eventfd_signal(vq->log_ctx, 1);
1777 } 1854 }
@@ -1789,9 +1866,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1789 smp_wmb(); 1866 smp_wmb();
1790 /* Log avail event write */ 1867 /* Log avail event write */
1791 used = vhost_avail_event(vq); 1868 used = vhost_avail_event(vq);
1792 log_write(vq->log_base, vq->log_addr + 1869 log_used(vq, (used - (void __user *)vq->used),
1793 (used - (void __user *)vq->used), 1870 sizeof *vhost_avail_event(vq));
1794 sizeof *vhost_avail_event(vq));
1795 if (vq->log_ctx) 1871 if (vq->log_ctx)
1796 eventfd_signal(vq->log_ctx, 1); 1872 eventfd_signal(vq->log_ctx, 1);
1797 } 1873 }
@@ -2191,10 +2267,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2191 /* Make sure data is seen before log. */ 2267 /* Make sure data is seen before log. */
2192 smp_wmb(); 2268 smp_wmb();
2193 /* Log used ring entry write. */ 2269 /* Log used ring entry write. */
2194 log_write(vq->log_base, 2270 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2195 vq->log_addr + 2271 count * sizeof *used);
2196 ((void __user *)used - (void __user *)vq->used),
2197 count * sizeof *used);
2198 } 2272 }
2199 old = vq->last_used_idx; 2273 old = vq->last_used_idx;
2200 new = (vq->last_used_idx += count); 2274 new = (vq->last_used_idx += count);
@@ -2236,9 +2310,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2236 /* Make sure used idx is seen before log. */ 2310 /* Make sure used idx is seen before log. */
2237 smp_wmb(); 2311 smp_wmb();
2238 /* Log used index update. */ 2312 /* Log used index update. */
2239 log_write(vq->log_base, 2313 log_used(vq, offsetof(struct vring_used, idx),
2240 vq->log_addr + offsetof(struct vring_used, idx), 2314 sizeof vq->used->idx);
2241 sizeof vq->used->idx);
2242 if (vq->log_ctx) 2315 if (vq->log_ctx)
2243 eventfd_signal(vq->log_ctx, 1); 2316 eventfd_signal(vq->log_ctx, 1);
2244 } 2317 }
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 466ef7542291..1b675dad5e05 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
205bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); 205bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
206 206
207int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 207int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
208 unsigned int log_num, u64 len); 208 unsigned int log_num, u64 len,
209 struct iovec *iov, int count);
209int vq_iotlb_prefetch(struct vhost_virtqueue *vq); 210int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
210 211
211struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); 212struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bc42d38ae031..3fbc068eaa9b 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -642,7 +642,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
642 hash_del_rcu(&vsock->hash); 642 hash_del_rcu(&vsock->hash);
643 643
644 vsock->guest_cid = guest_cid; 644 vsock->guest_cid = guest_cid;
645 hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); 645 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
646 mutex_unlock(&vhost_vsock_mutex); 646 mutex_unlock(&vhost_vsock_mutex);
647 647
648 return 0; 648 return 0;
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 6d8dc2c77520..51e0c4be08df 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -174,7 +174,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
174 return -ENODEV; 174 return -ENODEV;
175 } 175 }
176 for_each_child_of_node(nproot, np) { 176 for_each_child_of_node(nproot, np) {
177 if (!of_node_cmp(np->name, name)) { 177 if (of_node_name_eq(np, name)) {
178 of_property_read_u32(np, "marvell,88pm860x-iset", 178 of_property_read_u32(np, "marvell,88pm860x-iset",
179 &iset); 179 &iset);
180 data->iset = PM8606_WLED_CURRENT(iset); 180 data->iset = PM8606_WLED_CURRENT(iset);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index f9ef0673a083..feb90764a811 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -30,6 +30,7 @@ struct pwm_bl_data {
30 struct device *dev; 30 struct device *dev;
31 unsigned int lth_brightness; 31 unsigned int lth_brightness;
32 unsigned int *levels; 32 unsigned int *levels;
33 bool enabled;
33 struct regulator *power_supply; 34 struct regulator *power_supply;
34 struct gpio_desc *enable_gpio; 35 struct gpio_desc *enable_gpio;
35 unsigned int scale; 36 unsigned int scale;
@@ -50,7 +51,7 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
50 int err; 51 int err;
51 52
52 pwm_get_state(pb->pwm, &state); 53 pwm_get_state(pb->pwm, &state);
53 if (state.enabled) 54 if (pb->enabled)
54 return; 55 return;
55 56
56 err = regulator_enable(pb->power_supply); 57 err = regulator_enable(pb->power_supply);
@@ -65,6 +66,8 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
65 66
66 if (pb->enable_gpio) 67 if (pb->enable_gpio)
67 gpiod_set_value_cansleep(pb->enable_gpio, 1); 68 gpiod_set_value_cansleep(pb->enable_gpio, 1);
69
70 pb->enabled = true;
68} 71}
69 72
70static void pwm_backlight_power_off(struct pwm_bl_data *pb) 73static void pwm_backlight_power_off(struct pwm_bl_data *pb)
@@ -72,7 +75,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
72 struct pwm_state state; 75 struct pwm_state state;
73 76
74 pwm_get_state(pb->pwm, &state); 77 pwm_get_state(pb->pwm, &state);
75 if (!state.enabled) 78 if (!pb->enabled)
76 return; 79 return;
77 80
78 if (pb->enable_gpio) 81 if (pb->enable_gpio)
@@ -86,6 +89,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
86 pwm_apply_state(pb->pwm, &state); 89 pwm_apply_state(pb->pwm, &state);
87 90
88 regulator_disable(pb->power_supply); 91 regulator_disable(pb->power_supply);
92 pb->enabled = false;
89} 93}
90 94
91static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) 95static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
@@ -269,6 +273,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
269 memset(data, 0, sizeof(*data)); 273 memset(data, 0, sizeof(*data));
270 274
271 /* 275 /*
276 * These values are optional and set as 0 by default, the out values
277 * are modified only if a valid u32 value can be decoded.
278 */
279 of_property_read_u32(node, "post-pwm-on-delay-ms",
280 &data->post_pwm_on_delay);
281 of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
282
283 data->enable_gpio = -EINVAL;
284
285 /*
272 * Determine the number of brightness levels, if this property is not 286 * Determine the number of brightness levels, if this property is not
273 * set a default table of brightness levels will be used. 287 * set a default table of brightness levels will be used.
274 */ 288 */
@@ -380,15 +394,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
380 data->max_brightness--; 394 data->max_brightness--;
381 } 395 }
382 396
383 /*
384 * These values are optional and set as 0 by default, the out values
385 * are modified only if a valid u32 value can be decoded.
386 */
387 of_property_read_u32(node, "post-pwm-on-delay-ms",
388 &data->post_pwm_on_delay);
389 of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
390
391 data->enable_gpio = -EINVAL;
392 return 0; 397 return 0;
393} 398}
394 399
@@ -483,6 +488,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
483 pb->check_fb = data->check_fb; 488 pb->check_fb = data->check_fb;
484 pb->exit = data->exit; 489 pb->exit = data->exit;
485 pb->dev = &pdev->dev; 490 pb->dev = &pdev->dev;
491 pb->enabled = false;
486 pb->post_pwm_on_delay = data->post_pwm_on_delay; 492 pb->post_pwm_on_delay = data->post_pwm_on_delay;
487 pb->pwm_off_delay = data->pwm_off_delay; 493 pb->pwm_off_delay = data->pwm_off_delay;
488 494
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 8976190b6c1f..bfa1360ec750 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt)
510 continue; 510 continue;
511 } 511 }
512#endif 512#endif
513
514 if (!strncmp(options, "logo-pos:", 9)) {
515 options += 9;
516 if (!strcmp(options, "center"))
517 fb_center_logo = true;
518 continue;
519 }
513 } 520 }
514 return 1; 521 return 1;
515} 522}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 558ed2ed3124..cb43a2258c51 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb);
53int num_registered_fb __read_mostly; 53int num_registered_fb __read_mostly;
54EXPORT_SYMBOL(num_registered_fb); 54EXPORT_SYMBOL(num_registered_fb);
55 55
56bool fb_center_logo __read_mostly;
57EXPORT_SYMBOL(fb_center_logo);
58
56static struct fb_info *get_fb_info(unsigned int idx) 59static struct fb_info *get_fb_info(unsigned int idx)
57{ 60{
58 struct fb_info *fb_info; 61 struct fb_info *fb_info;
@@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
506 fb_set_logo(info, logo, logo_new, fb_logo.depth); 509 fb_set_logo(info, logo, logo_new, fb_logo.depth);
507 } 510 }
508 511
509#ifdef CONFIG_FB_LOGO_CENTER 512 if (fb_center_logo) {
510 {
511 int xres = info->var.xres; 513 int xres = info->var.xres;
512 int yres = info->var.yres; 514 int yres = info->var.yres;
513 515
@@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
520 --n; 522 --n;
521 image.dx = (xres - n * (logo->width + 8) - 8) / 2; 523 image.dx = (xres - n * (logo->width + 8) - 8) / 2;
522 image.dy = y ?: (yres - logo->height) / 2; 524 image.dy = y ?: (yres - logo->height) / 2;
525 } else {
526 image.dx = 0;
527 image.dy = y;
523 } 528 }
524#else 529
525 image.dx = 0;
526 image.dy = y;
527#endif
528 image.width = logo->width; 530 image.width = logo->width;
529 image.height = logo->height; 531 image.height = logo->height;
530 532
@@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
684 } 686 }
685 687
686 height = fb_logo.logo->height; 688 height = fb_logo.logo->height;
687#ifdef CONFIG_FB_LOGO_CENTER 689 if (fb_center_logo)
688 height += (yres - fb_logo.logo->height) / 2; 690 height += (yres - fb_logo.logo->height) / 2;
689#endif
690 691
691 return fb_prepare_extra_logos(info, height, yres); 692 return fb_prepare_extra_logos(info, height, yres);
692} 693}
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index a74096c53cb5..43f2a4816860 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1446,9 +1446,9 @@ static int fb_probe(struct platform_device *device)
1446 da8xx_fb_fix.line_length - 1; 1446 da8xx_fb_fix.line_length - 1;
1447 1447
1448 /* allocate palette buffer */ 1448 /* allocate palette buffer */
1449 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, 1449 par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE,
1450 &par->p_palette_base, 1450 &par->p_palette_base,
1451 GFP_KERNEL | GFP_DMA); 1451 GFP_KERNEL | GFP_DMA);
1452 if (!par->v_palette_base) { 1452 if (!par->v_palette_base) {
1453 dev_err(&device->dev, 1453 dev_err(&device->dev,
1454 "GLCD: kmalloc for palette buffer failed\n"); 1454 "GLCD: kmalloc for palette buffer failed\n");
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 31f769d67195..057d3cdef92e 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
318} 318}
319 319
320static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, 320static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp,
321 const char *name, unsigned long address) 321 unsigned long address)
322{ 322{
323 struct offb_par *par = (struct offb_par *) info->par; 323 struct offb_par *par = (struct offb_par *) info->par;
324 324
325 if (dp && !strncmp(name, "ATY,Rage128", 11)) { 325 if (of_node_name_prefix(dp, "ATY,Rage128")) {
326 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 326 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
327 if (par->cmap_adr) 327 if (par->cmap_adr)
328 par->cmap_type = cmap_r128; 328 par->cmap_type = cmap_r128;
329 } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) 329 } else if (of_node_name_prefix(dp, "ATY,RageM3pA") ||
330 || !strncmp(name, "ATY,RageM3p12A", 14))) { 330 of_node_name_prefix(dp, "ATY,RageM3p12A")) {
331 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 331 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
332 if (par->cmap_adr) 332 if (par->cmap_adr)
333 par->cmap_type = cmap_M3A; 333 par->cmap_type = cmap_M3A;
334 } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { 334 } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) {
335 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 335 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
336 if (par->cmap_adr) 336 if (par->cmap_adr)
337 par->cmap_type = cmap_M3B; 337 par->cmap_type = cmap_M3B;
338 } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { 338 } else if (of_node_name_prefix(dp, "ATY,Rage6")) {
339 par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); 339 par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff);
340 if (par->cmap_adr) 340 if (par->cmap_adr)
341 par->cmap_type = cmap_radeon; 341 par->cmap_type = cmap_radeon;
342 } else if (!strncmp(name, "ATY,", 4)) { 342 } else if (of_node_name_prefix(dp, "ATY,")) {
343 unsigned long base = address & 0xff000000UL; 343 unsigned long base = address & 0xff000000UL;
344 par->cmap_adr = 344 par->cmap_adr =
345 ioremap(base + 0x7ff000, 0x1000) + 0xcc0; 345 ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
@@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
350 par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); 350 par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
351 if (par->cmap_adr) 351 if (par->cmap_adr)
352 par->cmap_type = cmap_gxt2000; 352 par->cmap_type = cmap_gxt2000;
353 } else if (dp && !strncmp(name, "vga,Display-", 12)) { 353 } else if (of_node_name_prefix(dp, "vga,Display-")) {
354 /* Look for AVIVO initialized by SLOF */ 354 /* Look for AVIVO initialized by SLOF */
355 struct device_node *pciparent = of_get_parent(dp); 355 struct device_node *pciparent = of_get_parent(dp);
356 const u32 *vid, *did; 356 const u32 *vid, *did;
@@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name,
438 438
439 par->cmap_type = cmap_unknown; 439 par->cmap_type = cmap_unknown;
440 if (depth == 8) 440 if (depth == 8)
441 offb_init_palette_hacks(info, dp, name, address); 441 offb_init_palette_hacks(info, dp, address);
442 else 442 else
443 fix->visual = FB_VISUAL_TRUECOLOR; 443 fix->visual = FB_VISUAL_TRUECOLOR;
444 444
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 53f93616c671..8e23160ec59f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
609 609
610 int r = 0; 610 int r = 0;
611 611
612 memset(&p, 0, sizeof(p));
613
612 switch (cmd) { 614 switch (cmd) {
613 case OMAPFB_SYNC_GFX: 615 case OMAPFB_SYNC_GFX:
614 DBG("ioctl SYNC_GFX\n"); 616 DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 1e972c4e88b1..d1f6196c8b9a 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,15 +10,6 @@ menuconfig LOGO
10 10
11if LOGO 11if LOGO
12 12
13config FB_LOGO_CENTER
14 bool "Center the logo"
15 depends on FB=y
16 help
17 When this option is selected, the bootup logo is centered both
18 horizontally and vertically. If more than one logo is displayed
19 due to multiple CPUs, the collected line of logos is centered
20 as a whole.
21
22config FB_LOGO_EXTRA 13config FB_LOGO_EXTRA
23 bool 14 bool
24 depends on FB=y 15 depends on FB=y
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 728ecd1eea30..fb12fe205f86 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -61,6 +61,10 @@ enum virtio_balloon_vq {
61 VIRTIO_BALLOON_VQ_MAX 61 VIRTIO_BALLOON_VQ_MAX
62}; 62};
63 63
64enum virtio_balloon_config_read {
65 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
66};
67
64struct virtio_balloon { 68struct virtio_balloon {
65 struct virtio_device *vdev; 69 struct virtio_device *vdev;
66 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; 70 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
@@ -77,14 +81,20 @@ struct virtio_balloon {
77 /* Prevent updating balloon when it is being canceled. */ 81 /* Prevent updating balloon when it is being canceled. */
78 spinlock_t stop_update_lock; 82 spinlock_t stop_update_lock;
79 bool stop_update; 83 bool stop_update;
84 /* Bitmap to indicate if reading the related config fields are needed */
85 unsigned long config_read_bitmap;
80 86
81 /* The list of allocated free pages, waiting to be given back to mm */ 87 /* The list of allocated free pages, waiting to be given back to mm */
82 struct list_head free_page_list; 88 struct list_head free_page_list;
83 spinlock_t free_page_list_lock; 89 spinlock_t free_page_list_lock;
84 /* The number of free page blocks on the above list */ 90 /* The number of free page blocks on the above list */
85 unsigned long num_free_page_blocks; 91 unsigned long num_free_page_blocks;
86 /* The cmd id received from host */ 92 /*
87 u32 cmd_id_received; 93 * The cmd id received from host.
94 * Read it via virtio_balloon_cmd_id_received to get the latest value
95 * sent from host.
96 */
97 u32 cmd_id_received_cache;
88 /* The cmd id that is actively in use */ 98 /* The cmd id that is actively in use */
89 __virtio32 cmd_id_active; 99 __virtio32 cmd_id_active;
90 /* Buffer to store the stop sign */ 100 /* Buffer to store the stop sign */
@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
390 return num_returned; 400 return num_returned;
391} 401}
392 402
403static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
404{
405 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
406 return;
407
408 /* No need to queue the work if the bit was already set. */
409 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
410 &vb->config_read_bitmap))
411 return;
412
413 queue_work(vb->balloon_wq, &vb->report_free_page_work);
414}
415
393static void virtballoon_changed(struct virtio_device *vdev) 416static void virtballoon_changed(struct virtio_device *vdev)
394{ 417{
395 struct virtio_balloon *vb = vdev->priv; 418 struct virtio_balloon *vb = vdev->priv;
396 unsigned long flags; 419 unsigned long flags;
397 s64 diff = towards_target(vb);
398
399 if (diff) {
400 spin_lock_irqsave(&vb->stop_update_lock, flags);
401 if (!vb->stop_update)
402 queue_work(system_freezable_wq,
403 &vb->update_balloon_size_work);
404 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
405 }
406 420
407 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 421 spin_lock_irqsave(&vb->stop_update_lock, flags);
408 virtio_cread(vdev, struct virtio_balloon_config, 422 if (!vb->stop_update) {
409 free_page_report_cmd_id, &vb->cmd_id_received); 423 queue_work(system_freezable_wq,
410 if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { 424 &vb->update_balloon_size_work);
411 /* Pass ULONG_MAX to give back all the free pages */ 425 virtio_balloon_queue_free_page_work(vb);
412 return_free_pages_to_mm(vb, ULONG_MAX);
413 } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
414 vb->cmd_id_received !=
415 virtio32_to_cpu(vdev, vb->cmd_id_active)) {
416 spin_lock_irqsave(&vb->stop_update_lock, flags);
417 if (!vb->stop_update) {
418 queue_work(vb->balloon_wq,
419 &vb->report_free_page_work);
420 }
421 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
422 }
423 } 426 }
427 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
424} 428}
425 429
426static void update_balloon_size(struct virtio_balloon *vb) 430static void update_balloon_size(struct virtio_balloon *vb)
@@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb)
527 return 0; 531 return 0;
528} 532}
529 533
534static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
535{
536 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
537 &vb->config_read_bitmap))
538 virtio_cread(vb->vdev, struct virtio_balloon_config,
539 free_page_report_cmd_id,
540 &vb->cmd_id_received_cache);
541
542 return vb->cmd_id_received_cache;
543}
544
530static int send_cmd_id_start(struct virtio_balloon *vb) 545static int send_cmd_id_start(struct virtio_balloon *vb)
531{ 546{
532 struct scatterlist sg; 547 struct scatterlist sg;
@@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
537 while (virtqueue_get_buf(vq, &unused)) 552 while (virtqueue_get_buf(vq, &unused))
538 ; 553 ;
539 554
540 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); 555 vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
556 virtio_balloon_cmd_id_received(vb));
541 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); 557 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
542 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); 558 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
543 if (!err) 559 if (!err)
@@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb)
620 * stop the reporting. 636 * stop the reporting.
621 */ 637 */
622 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); 638 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
623 if (cmd_id_active != vb->cmd_id_received) 639 if (unlikely(cmd_id_active !=
640 virtio_balloon_cmd_id_received(vb)))
624 break; 641 break;
625 642
626 /* 643 /*
@@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb)
637 return 0; 654 return 0;
638} 655}
639 656
640static void report_free_page_func(struct work_struct *work) 657static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
641{ 658{
642 int err; 659 int err;
643 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
644 report_free_page_work);
645 struct device *dev = &vb->vdev->dev; 660 struct device *dev = &vb->vdev->dev;
646 661
647 /* Start by sending the received cmd id to host with an outbuf. */ 662 /* Start by sending the received cmd id to host with an outbuf. */
@@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work)
659 dev_err(dev, "Failed to send a stop id, err = %d\n", err); 674 dev_err(dev, "Failed to send a stop id, err = %d\n", err);
660} 675}
661 676
677static void report_free_page_func(struct work_struct *work)
678{
679 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
680 report_free_page_work);
681 u32 cmd_id_received;
682
683 cmd_id_received = virtio_balloon_cmd_id_received(vb);
684 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
685 /* Pass ULONG_MAX to give back all the free pages */
686 return_free_pages_to_mm(vb, ULONG_MAX);
687 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
688 cmd_id_received !=
689 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
690 virtio_balloon_report_free_page(vb);
691 }
692}
693
662#ifdef CONFIG_BALLOON_COMPACTION 694#ifdef CONFIG_BALLOON_COMPACTION
663/* 695/*
664 * virtballoon_migratepage - perform the balloon page migration on behalf of 696 * virtballoon_migratepage - perform the balloon page migration on behalf of
@@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
885 goto out_del_vqs; 917 goto out_del_vqs;
886 } 918 }
887 INIT_WORK(&vb->report_free_page_work, report_free_page_func); 919 INIT_WORK(&vb->report_free_page_work, report_free_page_func);
888 vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; 920 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
889 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 921 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
890 VIRTIO_BALLOON_CMD_ID_STOP); 922 VIRTIO_BALLOON_CMD_ID_STOP);
891 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, 923 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 4cd9ea5c75be..d9dd0f789279 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
468{ 468{
469 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 469 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
470 unsigned int irq = platform_get_irq(vm_dev->pdev, 0); 470 unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
471 int i, err; 471 int i, err, queue_idx = 0;
472 472
473 err = request_irq(irq, vm_interrupt, IRQF_SHARED, 473 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
474 dev_name(&vdev->dev), vm_dev); 474 dev_name(&vdev->dev), vm_dev);
@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
476 return err; 476 return err;
477 477
478 for (i = 0; i < nvqs; ++i) { 478 for (i = 0; i < nvqs; ++i) {
479 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], 479 if (!names[i]) {
480 vqs[i] = NULL;
481 continue;
482 }
483
484 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
480 ctx ? ctx[i] : false); 485 ctx ? ctx[i] : false);
481 if (IS_ERR(vqs[i])) { 486 if (IS_ERR(vqs[i])) {
482 vm_del_vqs(vdev); 487 vm_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 465a6f5142cc..d0584c040c60 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
285{ 285{
286 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 286 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
287 u16 msix_vec; 287 u16 msix_vec;
288 int i, err, nvectors, allocated_vectors; 288 int i, err, nvectors, allocated_vectors, queue_idx = 0;
289 289
290 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 290 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
291 if (!vp_dev->vqs) 291 if (!vp_dev->vqs)
@@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
321 msix_vec = allocated_vectors++; 321 msix_vec = allocated_vectors++;
322 else 322 else
323 msix_vec = VP_MSIX_VQ_VECTOR; 323 msix_vec = VP_MSIX_VQ_VECTOR;
324 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 324 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
325 ctx ? ctx[i] : false, 325 ctx ? ctx[i] : false,
326 msix_vec); 326 msix_vec);
327 if (IS_ERR(vqs[i])) { 327 if (IS_ERR(vqs[i])) {
@@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
356 const char * const names[], const bool *ctx) 356 const char * const names[], const bool *ctx)
357{ 357{
358 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 358 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
359 int i, err; 359 int i, err, queue_idx = 0;
360 360
361 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 361 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
362 if (!vp_dev->vqs) 362 if (!vp_dev->vqs)
@@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
374 vqs[i] = NULL; 374 vqs[i] = NULL;
375 continue; 375 continue;
376 } 376 }
377 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 377 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
378 ctx ? ctx[i] : false, 378 ctx ? ctx[i] : false,
379 VIRTIO_MSI_NO_VECTOR); 379 VIRTIO_MSI_NO_VECTOR);
380 if (IS_ERR(vqs[i])) { 380 if (IS_ERR(vqs[i])) {
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 5c4a764717c4..81208cd3f4ec 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -17,6 +17,7 @@
17#include <linux/watchdog.h> 17#include <linux/watchdog.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/mod_devicetable.h>
20 21
21#include <asm/mach-ralink/ralink_regs.h> 22#include <asm/mach-ralink/ralink_regs.h>
22 23
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 98967f0a7d10..db7c57d82cfd 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -18,6 +18,7 @@
18#include <linux/watchdog.h> 18#include <linux/watchdog.h>
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/mod_devicetable.h>
21 22
22#include <asm/mach-ralink/ralink_regs.h> 23#include <asm/mach-ralink/ralink_regs.h>
23 24
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c
index 0d3a0fbbd7a5..52941207a12a 100644
--- a/drivers/watchdog/tqmx86_wdt.c
+++ b/drivers/watchdog/tqmx86_wdt.c
@@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
79 return -ENOMEM; 79 return -ENOMEM;
80 80
81 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 81 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
82 if (IS_ERR(res)) 82 if (!res)
83 return PTR_ERR(res); 83 return -ENODEV;
84 84
85 priv->io_base = devm_ioport_map(&pdev->dev, res->start, 85 priv->io_base = devm_ioport_map(&pdev->dev, res->start,
86 resource_size(res)); 86 resource_size(res));
87 if (IS_ERR(priv->io_base)) 87 if (!priv->io_base)
88 return PTR_ERR(priv->io_base); 88 return -ENOMEM;
89 89
90 watchdog_set_drvdata(&priv->wdd, priv); 90 watchdog_set_drvdata(&priv->wdd, priv);
91 91
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 93194f3e7540..117e76b2f939 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
1650 xen_have_vector_callback = 0; 1650 xen_have_vector_callback = 0;
1651 return; 1651 return;
1652 } 1652 }
1653 pr_info("Xen HVM callback vector for event delivery is enabled\n"); 1653 pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
1654 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, 1654 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1655 xen_hvm_callback_vector); 1655 xen_hvm_callback_vector);
1656 } 1656 }
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 2e5d845b5091..7aa64d1b119c 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
160 160
161 /* write the data, then modify the indexes */ 161 /* write the data, then modify the indexes */
162 virt_wmb(); 162 virt_wmb();
163 if (ret < 0) 163 if (ret < 0) {
164 atomic_set(&map->read, 0);
164 intf->in_error = ret; 165 intf->in_error = ret;
165 else 166 } else
166 intf->in_prod = prod + ret; 167 intf->in_prod = prod + ret;
167 /* update the indexes, then notify the other end */ 168 /* update the indexes, then notify the other end */
168 virt_wmb(); 169 virt_wmb();
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
282static void pvcalls_sk_state_change(struct sock *sock) 283static void pvcalls_sk_state_change(struct sock *sock)
283{ 284{
284 struct sock_mapping *map = sock->sk_user_data; 285 struct sock_mapping *map = sock->sk_user_data;
285 struct pvcalls_data_intf *intf;
286 286
287 if (map == NULL) 287 if (map == NULL)
288 return; 288 return;
289 289
290 intf = map->ring; 290 atomic_inc(&map->read);
291 intf->in_error = -ENOTCONN;
292 notify_remote_via_irq(map->irq); 291 notify_remote_via_irq(map->irq);
293} 292}
294 293
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 77224d8f3e6f..8a249c95c193 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -31,6 +31,12 @@
31#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) 31#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
32#define PVCALLS_FRONT_MAX_SPIN 5000 32#define PVCALLS_FRONT_MAX_SPIN 5000
33 33
34static struct proto pvcalls_proto = {
35 .name = "PVCalls",
36 .owner = THIS_MODULE,
37 .obj_size = sizeof(struct sock),
38};
39
34struct pvcalls_bedata { 40struct pvcalls_bedata {
35 struct xen_pvcalls_front_ring ring; 41 struct xen_pvcalls_front_ring ring;
36 grant_ref_t ref; 42 grant_ref_t ref;
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
335 return ret; 341 return ret;
336} 342}
337 343
344static void free_active_ring(struct sock_mapping *map)
345{
346 if (!map->active.ring)
347 return;
348
349 free_pages((unsigned long)map->active.data.in,
350 map->active.ring->ring_order);
351 free_page((unsigned long)map->active.ring);
352}
353
354static int alloc_active_ring(struct sock_mapping *map)
355{
356 void *bytes;
357
358 map->active.ring = (struct pvcalls_data_intf *)
359 get_zeroed_page(GFP_KERNEL);
360 if (!map->active.ring)
361 goto out;
362
363 map->active.ring->ring_order = PVCALLS_RING_ORDER;
364 bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
365 PVCALLS_RING_ORDER);
366 if (!bytes)
367 goto out;
368
369 map->active.data.in = bytes;
370 map->active.data.out = bytes +
371 XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
372
373 return 0;
374
375out:
376 free_active_ring(map);
377 return -ENOMEM;
378}
379
338static int create_active(struct sock_mapping *map, int *evtchn) 380static int create_active(struct sock_mapping *map, int *evtchn)
339{ 381{
340 void *bytes; 382 void *bytes;
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
343 *evtchn = -1; 385 *evtchn = -1;
344 init_waitqueue_head(&map->active.inflight_conn_req); 386 init_waitqueue_head(&map->active.inflight_conn_req);
345 387
346 map->active.ring = (struct pvcalls_data_intf *) 388 bytes = map->active.data.in;
347 __get_free_page(GFP_KERNEL | __GFP_ZERO);
348 if (map->active.ring == NULL)
349 goto out_error;
350 map->active.ring->ring_order = PVCALLS_RING_ORDER;
351 bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
352 PVCALLS_RING_ORDER);
353 if (bytes == NULL)
354 goto out_error;
355 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) 389 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
356 map->active.ring->ref[i] = gnttab_grant_foreign_access( 390 map->active.ring->ref[i] = gnttab_grant_foreign_access(
357 pvcalls_front_dev->otherend_id, 391 pvcalls_front_dev->otherend_id,
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
361 pvcalls_front_dev->otherend_id, 395 pvcalls_front_dev->otherend_id,
362 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); 396 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
363 397
364 map->active.data.in = bytes;
365 map->active.data.out = bytes +
366 XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
367
368 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); 398 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
369 if (ret) 399 if (ret)
370 goto out_error; 400 goto out_error;
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
385out_error: 415out_error:
386 if (*evtchn >= 0) 416 if (*evtchn >= 0)
387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn); 417 xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
388 free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
389 free_page((unsigned long)map->active.ring);
390 return ret; 418 return ret;
391} 419}
392 420
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
406 return PTR_ERR(map); 434 return PTR_ERR(map);
407 435
408 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 436 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
437 ret = alloc_active_ring(map);
438 if (ret < 0) {
439 pvcalls_exit_sock(sock);
440 return ret;
441 }
409 442
410 spin_lock(&bedata->socket_lock); 443 spin_lock(&bedata->socket_lock);
411 ret = get_request(bedata, &req_id); 444 ret = get_request(bedata, &req_id);
412 if (ret < 0) { 445 if (ret < 0) {
413 spin_unlock(&bedata->socket_lock); 446 spin_unlock(&bedata->socket_lock);
447 free_active_ring(map);
414 pvcalls_exit_sock(sock); 448 pvcalls_exit_sock(sock);
415 return ret; 449 return ret;
416 } 450 }
417 ret = create_active(map, &evtchn); 451 ret = create_active(map, &evtchn);
418 if (ret < 0) { 452 if (ret < 0) {
419 spin_unlock(&bedata->socket_lock); 453 spin_unlock(&bedata->socket_lock);
454 free_active_ring(map);
420 pvcalls_exit_sock(sock); 455 pvcalls_exit_sock(sock);
421 return ret; 456 return ret;
422 } 457 }
@@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf,
469 virt_mb(); 504 virt_mb();
470 505
471 size = pvcalls_queued(prod, cons, array_size); 506 size = pvcalls_queued(prod, cons, array_size);
472 if (size >= array_size) 507 if (size > array_size)
473 return -EINVAL; 508 return -EINVAL;
509 if (size == array_size)
510 return 0;
474 if (len > array_size - size) 511 if (len > array_size - size)
475 len = array_size - size; 512 len = array_size - size;
476 513
@@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
560 error = intf->in_error; 597 error = intf->in_error;
561 /* get pointers before reading from the ring */ 598 /* get pointers before reading from the ring */
562 virt_rmb(); 599 virt_rmb();
563 if (error < 0)
564 return error;
565 600
566 size = pvcalls_queued(prod, cons, array_size); 601 size = pvcalls_queued(prod, cons, array_size);
567 masked_prod = pvcalls_mask(prod, array_size); 602 masked_prod = pvcalls_mask(prod, array_size);
568 masked_cons = pvcalls_mask(cons, array_size); 603 masked_cons = pvcalls_mask(cons, array_size);
569 604
570 if (size == 0) 605 if (size == 0)
571 return 0; 606 return error ?: size;
572 607
573 if (len > size) 608 if (len > size)
574 len = size; 609 len = size;
@@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
780 } 815 }
781 } 816 }
782 817
783 spin_lock(&bedata->socket_lock); 818 map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
784 ret = get_request(bedata, &req_id); 819 if (map2 == NULL) {
785 if (ret < 0) {
786 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 820 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
787 (void *)&map->passive.flags); 821 (void *)&map->passive.flags);
788 spin_unlock(&bedata->socket_lock); 822 pvcalls_exit_sock(sock);
823 return -ENOMEM;
824 }
825 ret = alloc_active_ring(map2);
826 if (ret < 0) {
827 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
828 (void *)&map->passive.flags);
829 kfree(map2);
789 pvcalls_exit_sock(sock); 830 pvcalls_exit_sock(sock);
790 return ret; 831 return ret;
791 } 832 }
792 map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); 833 spin_lock(&bedata->socket_lock);
793 if (map2 == NULL) { 834 ret = get_request(bedata, &req_id);
835 if (ret < 0) {
794 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 836 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
795 (void *)&map->passive.flags); 837 (void *)&map->passive.flags);
796 spin_unlock(&bedata->socket_lock); 838 spin_unlock(&bedata->socket_lock);
839 free_active_ring(map2);
840 kfree(map2);
797 pvcalls_exit_sock(sock); 841 pvcalls_exit_sock(sock);
798 return -ENOMEM; 842 return ret;
799 } 843 }
844
800 ret = create_active(map2, &evtchn); 845 ret = create_active(map2, &evtchn);
801 if (ret < 0) { 846 if (ret < 0) {
847 free_active_ring(map2);
802 kfree(map2); 848 kfree(map2);
803 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 849 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
804 (void *)&map->passive.flags); 850 (void *)&map->passive.flags);
@@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
839 885
840received: 886received:
841 map2->sock = newsock; 887 map2->sock = newsock;
842 newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); 888 newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
843 if (!newsock->sk) { 889 if (!newsock->sk) {
844 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; 890 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
845 map->passive.inflight_req_id = PVCALLS_INVALID_ID; 891 map->passive.inflight_req_id = PVCALLS_INVALID_ID;
@@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock)
1032 spin_lock(&bedata->socket_lock); 1078 spin_lock(&bedata->socket_lock);
1033 list_del(&map->list); 1079 list_del(&map->list);
1034 spin_unlock(&bedata->socket_lock); 1080 spin_unlock(&bedata->socket_lock);
1035 if (READ_ONCE(map->passive.inflight_req_id) != 1081 if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
1036 PVCALLS_INVALID_ID) { 1082 READ_ONCE(map->passive.inflight_req_id) != 0) {
1037 pvcalls_front_free_map(bedata, 1083 pvcalls_front_free_map(bedata,
1038 map->passive.accept_map); 1084 map->passive.accept_map);
1039 } 1085 }
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 0568fd986821..e432bd27a2e7 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -208,7 +208,7 @@ again:
208 /* The new front of the queue now owns the state variables. */ 208 /* The new front of the queue now owns the state variables. */
209 next = list_entry(vnode->pending_locks.next, 209 next = list_entry(vnode->pending_locks.next,
210 struct file_lock, fl_u.afs.link); 210 struct file_lock, fl_u.afs.link);
211 vnode->lock_key = afs_file_key(next->fl_file); 211 vnode->lock_key = key_get(afs_file_key(next->fl_file));
212 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 212 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
213 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 213 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
214 goto again; 214 goto again;
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
413 /* The new front of the queue now owns the state variables. */ 413 /* The new front of the queue now owns the state variables. */
414 next = list_entry(vnode->pending_locks.next, 414 next = list_entry(vnode->pending_locks.next,
415 struct file_lock, fl_u.afs.link); 415 struct file_lock, fl_u.afs.link);
416 vnode->lock_key = afs_file_key(next->fl_file); 416 vnode->lock_key = key_get(afs_file_key(next->fl_file));
417 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 417 vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
418 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB; 418 vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
419 afs_lock_may_be_available(vnode); 419 afs_lock_may_be_available(vnode);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 6b17d3620414..1a4ce07fb406 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -414,7 +414,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { 414 } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
415 valid = true; 415 valid = true;
416 } else { 416 } else {
417 vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
418 vnode->cb_v_break = vnode->volume->cb_v_break; 417 vnode->cb_v_break = vnode->volume->cb_v_break;
419 valid = false; 418 valid = false;
420 } 419 }
@@ -546,6 +545,8 @@ void afs_evict_inode(struct inode *inode)
546#endif 545#endif
547 546
548 afs_put_permits(rcu_access_pointer(vnode->permit_cache)); 547 afs_put_permits(rcu_access_pointer(vnode->permit_cache));
548 key_put(vnode->lock_key);
549 vnode->lock_key = NULL;
549 _leave(""); 550 _leave("");
550} 551}
551 552
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index 07bc10f076aa..d443e2bfa094 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -161,3 +161,14 @@ struct yfs_xdr_YFSStoreVolumeStatus {
161 struct yfs_xdr_u64 max_quota; 161 struct yfs_xdr_u64 max_quota;
162 struct yfs_xdr_u64 file_quota; 162 struct yfs_xdr_u64 file_quota;
163} __packed; 163} __packed;
164
165enum yfs_lock_type {
166 yfs_LockNone = -1,
167 yfs_LockRead = 0,
168 yfs_LockWrite = 1,
169 yfs_LockExtend = 2,
170 yfs_LockRelease = 3,
171 yfs_LockMandatoryRead = 0x100,
172 yfs_LockMandatoryWrite = 0x101,
173 yfs_LockMandatoryExtend = 0x102,
174};
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index a7b44863d502..2c588f9bbbda 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -23,6 +23,7 @@ struct workqueue_struct *afs_async_calls;
23static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long); 23static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
24static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *); 24static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
25static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long); 25static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
26static void afs_delete_async_call(struct work_struct *);
26static void afs_process_async_call(struct work_struct *); 27static void afs_process_async_call(struct work_struct *);
27static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long); 28static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
28static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long); 29static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
@@ -203,20 +204,26 @@ void afs_put_call(struct afs_call *call)
203 } 204 }
204} 205}
205 206
207static struct afs_call *afs_get_call(struct afs_call *call,
208 enum afs_call_trace why)
209{
210 int u = atomic_inc_return(&call->usage);
211
212 trace_afs_call(call, why, u,
213 atomic_read(&call->net->nr_outstanding_calls),
214 __builtin_return_address(0));
215 return call;
216}
217
206/* 218/*
207 * Queue the call for actual work. 219 * Queue the call for actual work.
208 */ 220 */
209static void afs_queue_call_work(struct afs_call *call) 221static void afs_queue_call_work(struct afs_call *call)
210{ 222{
211 if (call->type->work) { 223 if (call->type->work) {
212 int u = atomic_inc_return(&call->usage);
213
214 trace_afs_call(call, afs_call_trace_work, u,
215 atomic_read(&call->net->nr_outstanding_calls),
216 __builtin_return_address(0));
217
218 INIT_WORK(&call->work, call->type->work); 224 INIT_WORK(&call->work, call->type->work);
219 225
226 afs_get_call(call, afs_call_trace_work);
220 if (!queue_work(afs_wq, &call->work)) 227 if (!queue_work(afs_wq, &call->work))
221 afs_put_call(call); 228 afs_put_call(call);
222 } 229 }
@@ -398,6 +405,12 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
398 } 405 }
399 } 406 }
400 407
408 /* If the call is going to be asynchronous, we need an extra ref for
409 * the call to hold itself so the caller need not hang on to its ref.
410 */
411 if (call->async)
412 afs_get_call(call, afs_call_trace_get);
413
401 /* create a call */ 414 /* create a call */
402 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, 415 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
403 (unsigned long)call, 416 (unsigned long)call,
@@ -438,15 +451,17 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
438 goto error_do_abort; 451 goto error_do_abort;
439 } 452 }
440 453
441 /* at this point, an async call may no longer exist as it may have 454 /* Note that at this point, we may have received the reply or an abort
442 * already completed */ 455 * - and an asynchronous call may already have completed.
443 if (call->async) 456 */
457 if (call->async) {
458 afs_put_call(call);
444 return -EINPROGRESS; 459 return -EINPROGRESS;
460 }
445 461
446 return afs_wait_for_call_to_complete(call, ac); 462 return afs_wait_for_call_to_complete(call, ac);
447 463
448error_do_abort: 464error_do_abort:
449 call->state = AFS_CALL_COMPLETE;
450 if (ret != -ECONNABORTED) { 465 if (ret != -ECONNABORTED) {
451 rxrpc_kernel_abort_call(call->net->socket, rxcall, 466 rxrpc_kernel_abort_call(call->net->socket, rxcall,
452 RX_USER_ABORT, ret, "KSD"); 467 RX_USER_ABORT, ret, "KSD");
@@ -463,8 +478,24 @@ error_do_abort:
463error_kill_call: 478error_kill_call:
464 if (call->type->done) 479 if (call->type->done)
465 call->type->done(call); 480 call->type->done(call);
466 afs_put_call(call); 481
482 /* We need to dispose of the extra ref we grabbed for an async call.
483 * The call, however, might be queued on afs_async_calls and we need to
484 * make sure we don't get any more notifications that might requeue it.
485 */
486 if (call->rxcall) {
487 rxrpc_kernel_end_call(call->net->socket, call->rxcall);
488 call->rxcall = NULL;
489 }
490 if (call->async) {
491 if (cancel_work_sync(&call->async_work))
492 afs_put_call(call);
493 afs_put_call(call);
494 }
495
467 ac->error = ret; 496 ac->error = ret;
497 call->state = AFS_CALL_COMPLETE;
498 afs_put_call(call);
468 _leave(" = %d", ret); 499 _leave(" = %d", ret);
469 return ret; 500 return ret;
470} 501}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 95d0761cdb34..155dc14caef9 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -42,9 +42,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
42 if (vldb->fs_mask[i] & type_mask) 42 if (vldb->fs_mask[i] & type_mask)
43 nr_servers++; 43 nr_servers++;
44 44
45 slist = kzalloc(sizeof(struct afs_server_list) + 45 slist = kzalloc(struct_size(slist, servers, nr_servers), GFP_KERNEL);
46 sizeof(struct afs_server_entry) * nr_servers,
47 GFP_KERNEL);
48 if (!slist) 46 if (!slist)
49 goto error; 47 goto error;
50 48
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 12658c1363ae..5aa57929e8c2 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -803,7 +803,7 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
803 bp = xdr_encode_YFSFid(bp, &vnode->fid); 803 bp = xdr_encode_YFSFid(bp, &vnode->fid);
804 bp = xdr_encode_string(bp, name, namesz); 804 bp = xdr_encode_string(bp, name, namesz);
805 bp = xdr_encode_YFSStoreStatus_mode(bp, mode); 805 bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
806 bp = xdr_encode_u32(bp, 0); /* ViceLockType */ 806 bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
807 yfs_check_req(call, bp); 807 yfs_check_req(call, bp);
808 808
809 afs_use_fs_server(call, fc->cbi); 809 afs_use_fs_server(call, fc->cbi);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c546cdce77e6..58a4c1217fa8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
104} 104}
105EXPORT_SYMBOL(invalidate_bdev); 105EXPORT_SYMBOL(invalidate_bdev);
106 106
107static void set_init_blocksize(struct block_device *bdev)
108{
109 unsigned bsize = bdev_logical_block_size(bdev);
110 loff_t size = i_size_read(bdev->bd_inode);
111
112 while (bsize < PAGE_SIZE) {
113 if (size & bsize)
114 break;
115 bsize <<= 1;
116 }
117 bdev->bd_block_size = bsize;
118 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
119}
120
107int set_blocksize(struct block_device *bdev, int size) 121int set_blocksize(struct block_device *bdev, int size)
108{ 122{
109 /* Size must be a power of two, and between 512 and PAGE_SIZE */ 123 /* Size must be a power of two, and between 512 and PAGE_SIZE */
@@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change);
1431 1445
1432void bd_set_size(struct block_device *bdev, loff_t size) 1446void bd_set_size(struct block_device *bdev, loff_t size)
1433{ 1447{
1434 unsigned bsize = bdev_logical_block_size(bdev);
1435
1436 inode_lock(bdev->bd_inode); 1448 inode_lock(bdev->bd_inode);
1437 i_size_write(bdev->bd_inode, size); 1449 i_size_write(bdev->bd_inode, size);
1438 inode_unlock(bdev->bd_inode); 1450 inode_unlock(bdev->bd_inode);
1439 while (bsize < PAGE_SIZE) {
1440 if (size & bsize)
1441 break;
1442 bsize <<= 1;
1443 }
1444 bdev->bd_block_size = bsize;
1445 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
1446} 1451}
1447EXPORT_SYMBOL(bd_set_size); 1452EXPORT_SYMBOL(bd_set_size);
1448 1453
@@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1519 } 1524 }
1520 } 1525 }
1521 1526
1522 if (!ret) 1527 if (!ret) {
1523 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); 1528 bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
1529 set_init_blocksize(bdev);
1530 }
1524 1531
1525 /* 1532 /*
1526 * If the device is invalidated, rescan partition 1533 * If the device is invalidated, rescan partition
@@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1555 goto out_clear; 1562 goto out_clear;
1556 } 1563 }
1557 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); 1564 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
1565 set_init_blocksize(bdev);
1558 } 1566 }
1559 1567
1560 if (bdev->bd_bdi == &noop_backing_dev_info) 1568 if (bdev->bd_bdi == &noop_backing_dev_info)
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index d92462fe66c8..f64aad613727 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1016,19 +1016,21 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1016 parent_start = parent->start; 1016 parent_start = parent->start;
1017 1017
1018 /* 1018 /*
1019 * If we are COWing a node/leaf from the extent, chunk or device trees, 1019 * If we are COWing a node/leaf from the extent, chunk, device or free
1020 * make sure that we do not finish block group creation of pending block 1020 * space trees, make sure that we do not finish block group creation of
1021 * groups. We do this to avoid a deadlock. 1021 * pending block groups. We do this to avoid a deadlock.
1022 * COWing can result in allocation of a new chunk, and flushing pending 1022 * COWing can result in allocation of a new chunk, and flushing pending
1023 * block groups (btrfs_create_pending_block_groups()) can be triggered 1023 * block groups (btrfs_create_pending_block_groups()) can be triggered
1024 * when finishing allocation of a new chunk. Creation of a pending block 1024 * when finishing allocation of a new chunk. Creation of a pending block
1025 * group modifies the extent, chunk and device trees, therefore we could 1025 * group modifies the extent, chunk, device and free space trees,
1026 * deadlock with ourselves since we are holding a lock on an extent 1026 * therefore we could deadlock with ourselves since we are holding a
1027 * buffer that btrfs_create_pending_block_groups() may try to COW later. 1027 * lock on an extent buffer that btrfs_create_pending_block_groups() may
1028 * try to COW later.
1028 */ 1029 */
1029 if (root == fs_info->extent_root || 1030 if (root == fs_info->extent_root ||
1030 root == fs_info->chunk_root || 1031 root == fs_info->chunk_root ||
1031 root == fs_info->dev_root) 1032 root == fs_info->dev_root ||
1033 root == fs_info->free_space_root)
1032 trans->can_flush_pending_bgs = false; 1034 trans->can_flush_pending_bgs = false;
1033 1035
1034 cow = btrfs_alloc_tree_block(trans, root, parent_start, 1036 cow = btrfs_alloc_tree_block(trans, root, parent_start,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 0a68cf7032f5..7a2a2621f0d9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -35,6 +35,7 @@
35struct btrfs_trans_handle; 35struct btrfs_trans_handle;
36struct btrfs_transaction; 36struct btrfs_transaction;
37struct btrfs_pending_snapshot; 37struct btrfs_pending_snapshot;
38struct btrfs_delayed_ref_root;
38extern struct kmem_cache *btrfs_trans_handle_cachep; 39extern struct kmem_cache *btrfs_trans_handle_cachep;
39extern struct kmem_cache *btrfs_bit_radix_cachep; 40extern struct kmem_cache *btrfs_bit_radix_cachep;
40extern struct kmem_cache *btrfs_path_cachep; 41extern struct kmem_cache *btrfs_path_cachep;
@@ -786,6 +787,9 @@ enum {
786 * main phase. The fs_info::balance_ctl is initialized. 787 * main phase. The fs_info::balance_ctl is initialized.
787 */ 788 */
788 BTRFS_FS_BALANCE_RUNNING, 789 BTRFS_FS_BALANCE_RUNNING,
790
791 /* Indicate that the cleaner thread is awake and doing something. */
792 BTRFS_FS_CLEANER_RUNNING,
789}; 793};
790 794
791struct btrfs_fs_info { 795struct btrfs_fs_info {
@@ -2661,6 +2665,9 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2661 unsigned long count); 2665 unsigned long count);
2662int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, 2666int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
2663 unsigned long count, u64 transid, int wait); 2667 unsigned long count, u64 transid, int wait);
2668void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
2669 struct btrfs_delayed_ref_root *delayed_refs,
2670 struct btrfs_delayed_ref_head *head);
2664int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); 2671int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
2665int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 2672int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
2666 struct btrfs_fs_info *fs_info, u64 bytenr, 2673 struct btrfs_fs_info *fs_info, u64 bytenr,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8da2f380d3c0..6a2a2a951705 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1682,6 +1682,8 @@ static int cleaner_kthread(void *arg)
1682 while (1) { 1682 while (1) {
1683 again = 0; 1683 again = 0;
1684 1684
1685 set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1686
1685 /* Make the cleaner go to sleep early. */ 1687 /* Make the cleaner go to sleep early. */
1686 if (btrfs_need_cleaner_sleep(fs_info)) 1688 if (btrfs_need_cleaner_sleep(fs_info))
1687 goto sleep; 1689 goto sleep;
@@ -1728,6 +1730,7 @@ static int cleaner_kthread(void *arg)
1728 */ 1730 */
1729 btrfs_delete_unused_bgs(fs_info); 1731 btrfs_delete_unused_bgs(fs_info);
1730sleep: 1732sleep:
1733 clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
1731 if (kthread_should_park()) 1734 if (kthread_should_park())
1732 kthread_parkme(); 1735 kthread_parkme();
1733 if (kthread_should_stop()) 1736 if (kthread_should_stop())
@@ -4201,6 +4204,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4201 spin_lock(&fs_info->ordered_root_lock); 4204 spin_lock(&fs_info->ordered_root_lock);
4202 } 4205 }
4203 spin_unlock(&fs_info->ordered_root_lock); 4206 spin_unlock(&fs_info->ordered_root_lock);
4207
4208 /*
4209 * We need this here because if we've been flipped read-only we won't
4210 * get sync() from the umount, so we need to make sure any ordered
4211 * extents that haven't had their dirty pages IO start writeout yet
4212 * actually get run and error out properly.
4213 */
4214 btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
4204} 4215}
4205 4216
4206static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 4217static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
@@ -4265,6 +4276,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4265 if (pin_bytes) 4276 if (pin_bytes)
4266 btrfs_pin_extent(fs_info, head->bytenr, 4277 btrfs_pin_extent(fs_info, head->bytenr,
4267 head->num_bytes, 1); 4278 head->num_bytes, 1);
4279 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4268 btrfs_put_delayed_ref_head(head); 4280 btrfs_put_delayed_ref_head(head);
4269 cond_resched(); 4281 cond_resched();
4270 spin_lock(&delayed_refs->lock); 4282 spin_lock(&delayed_refs->lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b15afeae16df..d81035b7ea7d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2456,12 +2456,10 @@ static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
2456 return ret ? ret : 1; 2456 return ret ? ret : 1;
2457} 2457}
2458 2458
2459static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans, 2459void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
2460 struct btrfs_delayed_ref_head *head) 2460 struct btrfs_delayed_ref_root *delayed_refs,
2461 struct btrfs_delayed_ref_head *head)
2461{ 2462{
2462 struct btrfs_fs_info *fs_info = trans->fs_info;
2463 struct btrfs_delayed_ref_root *delayed_refs =
2464 &trans->transaction->delayed_refs;
2465 int nr_items = 1; /* Dropping this ref head update. */ 2463 int nr_items = 1; /* Dropping this ref head update. */
2466 2464
2467 if (head->total_ref_mod < 0) { 2465 if (head->total_ref_mod < 0) {
@@ -2544,7 +2542,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
2544 } 2542 }
2545 } 2543 }
2546 2544
2547 cleanup_ref_head_accounting(trans, head); 2545 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
2548 2546
2549 trace_run_delayed_ref_head(fs_info, head, 0); 2547 trace_run_delayed_ref_head(fs_info, head, 0);
2550 btrfs_delayed_ref_unlock(head); 2548 btrfs_delayed_ref_unlock(head);
@@ -4954,6 +4952,15 @@ static void flush_space(struct btrfs_fs_info *fs_info,
4954 ret = 0; 4952 ret = 0;
4955 break; 4953 break;
4956 case COMMIT_TRANS: 4954 case COMMIT_TRANS:
4955 /*
4956 * If we have pending delayed iputs then we could free up a
4957 * bunch of pinned space, so make sure we run the iputs before
4958 * we do our pinned bytes check below.
4959 */
4960 mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
4961 btrfs_run_delayed_iputs(fs_info);
4962 mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
4963
4957 ret = may_commit_transaction(fs_info, space_info); 4964 ret = may_commit_transaction(fs_info, space_info);
4958 break; 4965 break;
4959 default: 4966 default:
@@ -7188,7 +7195,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
7188 if (head->must_insert_reserved) 7195 if (head->must_insert_reserved)
7189 ret = 1; 7196 ret = 1;
7190 7197
7191 cleanup_ref_head_accounting(trans, head); 7198 btrfs_cleanup_ref_head_accounting(trans->fs_info, delayed_refs, head);
7192 mutex_unlock(&head->mutex); 7199 mutex_unlock(&head->mutex);
7193 btrfs_put_delayed_ref_head(head); 7200 btrfs_put_delayed_ref_head(head);
7194 return ret; 7201 return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 43eb4535319d..5c349667c761 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3129,9 +3129,6 @@ out:
3129 /* once for the tree */ 3129 /* once for the tree */
3130 btrfs_put_ordered_extent(ordered_extent); 3130 btrfs_put_ordered_extent(ordered_extent);
3131 3131
3132 /* Try to release some metadata so we don't get an OOM but don't wait */
3133 btrfs_btree_balance_dirty_nodelay(fs_info);
3134
3135 return ret; 3132 return ret;
3136} 3133}
3137 3134
@@ -3254,6 +3251,8 @@ void btrfs_add_delayed_iput(struct inode *inode)
3254 ASSERT(list_empty(&binode->delayed_iput)); 3251 ASSERT(list_empty(&binode->delayed_iput));
3255 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); 3252 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
3256 spin_unlock(&fs_info->delayed_iput_lock); 3253 spin_unlock(&fs_info->delayed_iput_lock);
3254 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags))
3255 wake_up_process(fs_info->cleaner_kthread);
3257} 3256}
3258 3257
3259void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info) 3258void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index fab9443f6a42..9c8e1734429c 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3221,6 +3221,26 @@ static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
3221 inode_lock_nested(inode2, I_MUTEX_CHILD); 3221 inode_lock_nested(inode2, I_MUTEX_CHILD);
3222} 3222}
3223 3223
3224static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
3225 struct inode *inode2, u64 loff2, u64 len)
3226{
3227 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3228 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3229}
3230
3231static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
3232 struct inode *inode2, u64 loff2, u64 len)
3233{
3234 if (inode1 < inode2) {
3235 swap(inode1, inode2);
3236 swap(loff1, loff2);
3237 } else if (inode1 == inode2 && loff2 < loff1) {
3238 swap(loff1, loff2);
3239 }
3240 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
3241 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
3242}
3243
3224static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen, 3244static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3225 struct inode *dst, u64 dst_loff) 3245 struct inode *dst, u64 dst_loff)
3226{ 3246{
@@ -3242,11 +3262,12 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3242 return -EINVAL; 3262 return -EINVAL;
3243 3263
3244 /* 3264 /*
3245 * Lock destination range to serialize with concurrent readpages(). 3265 * Lock destination range to serialize with concurrent readpages() and
3266 * source range to serialize with relocation.
3246 */ 3267 */
3247 lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1); 3268 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
3248 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1); 3269 ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
3249 unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1); 3270 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
3250 3271
3251 return ret; 3272 return ret;
3252} 3273}
@@ -3905,17 +3926,33 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
3905 len = ALIGN(src->i_size, bs) - off; 3926 len = ALIGN(src->i_size, bs) - off;
3906 3927
3907 if (destoff > inode->i_size) { 3928 if (destoff > inode->i_size) {
3929 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
3930
3908 ret = btrfs_cont_expand(inode, inode->i_size, destoff); 3931 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
3909 if (ret) 3932 if (ret)
3910 return ret; 3933 return ret;
3934 /*
3935 * We may have truncated the last block if the inode's size is
3936 * not sector size aligned, so we need to wait for writeback to
3937 * complete before proceeding further, otherwise we can race
3938 * with cloning and attempt to increment a reference to an
3939 * extent that no longer exists (writeback completed right after
3940 * we found the previous extent covering eof and before we
3941 * attempted to increment its reference count).
3942 */
3943 ret = btrfs_wait_ordered_range(inode, wb_start,
3944 destoff - wb_start);
3945 if (ret)
3946 return ret;
3911 } 3947 }
3912 3948
3913 /* 3949 /*
3914 * Lock destination range to serialize with concurrent readpages(). 3950 * Lock destination range to serialize with concurrent readpages() and
3951 * source range to serialize with relocation.
3915 */ 3952 */
3916 lock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1); 3953 btrfs_double_extent_lock(src, off, inode, destoff, len);
3917 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 3954 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
3918 unlock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1); 3955 btrfs_double_extent_unlock(src, off, inode, destoff, len);
3919 /* 3956 /*
3920 * Truncate page cache pages so that future reads will see the cloned 3957 * Truncate page cache pages so that future reads will see the cloned
3921 * data immediately and not the previous data. 3958 * data immediately and not the previous data.
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2576b1a379c9..3e4f8f88353e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7825,6 +7825,18 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7825 ret = -EUCLEAN; 7825 ret = -EUCLEAN;
7826 goto out; 7826 goto out;
7827 } 7827 }
7828
7829 /* It's possible this device is a dummy for seed device */
7830 if (dev->disk_total_bytes == 0) {
7831 dev = find_device(fs_info->fs_devices->seed, devid, NULL);
7832 if (!dev) {
7833 btrfs_err(fs_info, "failed to find seed devid %llu",
7834 devid);
7835 ret = -EUCLEAN;
7836 goto out;
7837 }
7838 }
7839
7828 if (physical_offset + physical_len > dev->disk_total_bytes) { 7840 if (physical_offset + physical_len > dev->disk_total_bytes) {
7829 btrfs_err(fs_info, 7841 btrfs_err(fs_info,
7830"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu", 7842"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 5d0c05e288cc..a47c541f8006 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1494,10 +1494,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
1494 if (err < 0 || off >= i_size_read(inode)) { 1494 if (err < 0 || off >= i_size_read(inode)) {
1495 unlock_page(page); 1495 unlock_page(page);
1496 put_page(page); 1496 put_page(page);
1497 if (err == -ENOMEM) 1497 ret = vmf_error(err);
1498 ret = VM_FAULT_OOM;
1499 else
1500 ret = VM_FAULT_SIGBUS;
1501 goto out_inline; 1498 goto out_inline;
1502 } 1499 }
1503 if (err < PAGE_SIZE) 1500 if (err < PAGE_SIZE)
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 4e9a7cc488da..da2cd8e89062 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -530,7 +530,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
530 seq_putc(m, ','); 530 seq_putc(m, ',');
531 pos = m->count; 531 pos = m->count;
532 532
533 ret = ceph_print_client_options(m, fsc->client); 533 ret = ceph_print_client_options(m, fsc->client, false);
534 if (ret) 534 if (ret)
535 return ret; 535 return ret;
536 536
@@ -640,7 +640,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
640 opt = NULL; /* fsc->client now owns this */ 640 opt = NULL; /* fsc->client now owns this */
641 641
642 fsc->client->extra_mon_dispatch = extra_mon_dispatch; 642 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
643 fsc->client->osdc.abort_on_full = true; 643 ceph_set_opt(fsc->client, ABORT_ON_FULL);
644 644
645 if (!fsopt->mds_namespace) { 645 if (!fsopt->mds_namespace) {
646 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 646 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 26776eddd85d..d1f9c2f3f575 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
150extern const struct export_operations cifs_export_ops; 150extern const struct export_operations cifs_export_ops;
151#endif /* CONFIG_CIFS_NFSD_EXPORT */ 151#endif /* CONFIG_CIFS_NFSD_EXPORT */
152 152
153#define CIFS_VERSION "2.15" 153#define CIFS_VERSION "2.16"
154#endif /* _CIFSFS_H */ 154#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 01ded7038b19..94dbdbe5be34 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1438,6 +1438,7 @@ struct mid_q_entry {
1438 int mid_state; /* wish this were enum but can not pass to wait_event */ 1438 int mid_state; /* wish this were enum but can not pass to wait_event */
1439 unsigned int mid_flags; 1439 unsigned int mid_flags;
1440 __le16 command; /* smb command code */ 1440 __le16 command; /* smb command code */
1441 unsigned int optype; /* operation type */
1441 bool large_buf:1; /* if valid response, is pointer to large buf */ 1442 bool large_buf:1; /* if valid response, is pointer to large buf */
1442 bool multiRsp:1; /* multiple trans2 responses for one request */ 1443 bool multiRsp:1; /* multiple trans2 responses for one request */
1443 bool multiEnd:1; /* both received */ 1444 bool multiEnd:1; /* both received */
@@ -1574,6 +1575,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
1574 kfree(param); 1575 kfree(param);
1575} 1576}
1576 1577
1578static inline bool is_interrupt_error(int error)
1579{
1580 switch (error) {
1581 case -EINTR:
1582 case -ERESTARTSYS:
1583 case -ERESTARTNOHAND:
1584 case -ERESTARTNOINTR:
1585 return true;
1586 }
1587 return false;
1588}
1589
1590static inline bool is_retryable_error(int error)
1591{
1592 if (is_interrupt_error(error) || error == -EAGAIN)
1593 return true;
1594 return false;
1595}
1596
1577#define MID_FREE 0 1597#define MID_FREE 0
1578#define MID_REQUEST_ALLOCATED 1 1598#define MID_REQUEST_ALLOCATED 1
1579#define MID_REQUEST_SUBMITTED 2 1599#define MID_REQUEST_SUBMITTED 2
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b1f49c1c543a..e18915415e13 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -128,24 +128,31 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
128 int rc; 128 int rc;
129 struct dfs_cache_tgt_list tl; 129 struct dfs_cache_tgt_list tl;
130 struct dfs_cache_tgt_iterator *it = NULL; 130 struct dfs_cache_tgt_iterator *it = NULL;
131 char tree[MAX_TREE_SIZE + 1]; 131 char *tree;
132 const char *tcp_host; 132 const char *tcp_host;
133 size_t tcp_host_len; 133 size_t tcp_host_len;
134 const char *dfs_host; 134 const char *dfs_host;
135 size_t dfs_host_len; 135 size_t dfs_host_len;
136 136
137 tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
138 if (!tree)
139 return -ENOMEM;
140
137 if (tcon->ipc) { 141 if (tcon->ipc) {
138 snprintf(tree, sizeof(tree), "\\\\%s\\IPC$", 142 snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
139 tcon->ses->server->hostname); 143 tcon->ses->server->hostname);
140 return CIFSTCon(0, tcon->ses, tree, tcon, nlsc); 144 rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
145 goto out;
141 } 146 }
142 147
143 if (!tcon->dfs_path) 148 if (!tcon->dfs_path) {
144 return CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc); 149 rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
150 goto out;
151 }
145 152
146 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); 153 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
147 if (rc) 154 if (rc)
148 return rc; 155 goto out;
149 156
150 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, 157 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
151 &tcp_host_len); 158 &tcp_host_len);
@@ -165,7 +172,7 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
165 continue; 172 continue;
166 } 173 }
167 174
168 snprintf(tree, sizeof(tree), "\\%s", tgt); 175 snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
169 176
170 rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); 177 rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
171 if (!rc) 178 if (!rc)
@@ -182,6 +189,8 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
182 rc = -ENOENT; 189 rc = -ENOENT;
183 } 190 }
184 dfs_cache_free_tgts(&tl); 191 dfs_cache_free_tgts(&tl);
192out:
193 kfree(tree);
185 return rc; 194 return rc;
186} 195}
187#else 196#else
@@ -2114,7 +2123,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
2114 2123
2115 for (j = 0; j < nr_pages; j++) { 2124 for (j = 0; j < nr_pages; j++) {
2116 unlock_page(wdata2->pages[j]); 2125 unlock_page(wdata2->pages[j]);
2117 if (rc != 0 && rc != -EAGAIN) { 2126 if (rc != 0 && !is_retryable_error(rc)) {
2118 SetPageError(wdata2->pages[j]); 2127 SetPageError(wdata2->pages[j]);
2119 end_page_writeback(wdata2->pages[j]); 2128 end_page_writeback(wdata2->pages[j]);
2120 put_page(wdata2->pages[j]); 2129 put_page(wdata2->pages[j]);
@@ -2123,7 +2132,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
2123 2132
2124 if (rc) { 2133 if (rc) {
2125 kref_put(&wdata2->refcount, cifs_writedata_release); 2134 kref_put(&wdata2->refcount, cifs_writedata_release);
2126 if (rc == -EAGAIN) 2135 if (is_retryable_error(rc))
2127 continue; 2136 continue;
2128 break; 2137 break;
2129 } 2138 }
@@ -2132,7 +2141,8 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
2132 i += nr_pages; 2141 i += nr_pages;
2133 } while (i < wdata->nr_pages); 2142 } while (i < wdata->nr_pages);
2134 2143
2135 mapping_set_error(inode->i_mapping, rc); 2144 if (rc != 0 && !is_retryable_error(rc))
2145 mapping_set_error(inode->i_mapping, rc);
2136 kref_put(&wdata->refcount, cifs_writedata_release); 2146 kref_put(&wdata->refcount, cifs_writedata_release);
2137} 2147}
2138 2148
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f66529679ca2..683310f26171 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -433,9 +433,10 @@ static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
433 kfree(server->hostname); 433 kfree(server->hostname);
434 434
435 server->hostname = extract_hostname(name); 435 server->hostname = extract_hostname(name);
436 if (!server->hostname) { 436 if (IS_ERR(server->hostname)) {
437 cifs_dbg(FYI, "%s: failed to extract hostname from target: %d\n", 437 cifs_dbg(FYI,
438 __func__, -ENOMEM); 438 "%s: failed to extract hostname from target: %ld\n",
439 __func__, PTR_ERR(server->hostname));
439 } 440 }
440} 441}
441 442
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index cd63c4a70875..09b7d0d4f6e4 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -776,6 +776,7 @@ static int get_tgt_list(const struct dfs_cache_entry *ce,
776 it->it_name = kstrndup(t->t_name, strlen(t->t_name), 776 it->it_name = kstrndup(t->t_name, strlen(t->t_name),
777 GFP_KERNEL); 777 GFP_KERNEL);
778 if (!it->it_name) { 778 if (!it->it_name) {
779 kfree(it);
779 rc = -ENOMEM; 780 rc = -ENOMEM;
780 goto err_free_it; 781 goto err_free_it;
781 } 782 }
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index e3e3a7550205..2c7689f3998d 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -733,7 +733,8 @@ reopen_success:
733 733
734 if (can_flush) { 734 if (can_flush) {
735 rc = filemap_write_and_wait(inode->i_mapping); 735 rc = filemap_write_and_wait(inode->i_mapping);
736 mapping_set_error(inode->i_mapping, rc); 736 if (!is_interrupt_error(rc))
737 mapping_set_error(inode->i_mapping, rc);
737 738
738 if (tcon->unix_ext) 739 if (tcon->unix_ext)
739 rc = cifs_get_inode_info_unix(&inode, full_path, 740 rc = cifs_get_inode_info_unix(&inode, full_path,
@@ -1132,14 +1133,18 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1132 1133
1133 /* 1134 /*
1134 * Accessing maxBuf is racy with cifs_reconnect - need to store value 1135 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1135 * and check it for zero before using. 1136 * and check it before using.
1136 */ 1137 */
1137 max_buf = tcon->ses->server->maxBuf; 1138 max_buf = tcon->ses->server->maxBuf;
1138 if (!max_buf) { 1139 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1139 free_xid(xid); 1140 free_xid(xid);
1140 return -EINVAL; 1141 return -EINVAL;
1141 } 1142 }
1142 1143
1144 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1145 PAGE_SIZE);
1146 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1147 PAGE_SIZE);
1143 max_num = (max_buf - sizeof(struct smb_hdr)) / 1148 max_num = (max_buf - sizeof(struct smb_hdr)) /
1144 sizeof(LOCKING_ANDX_RANGE); 1149 sizeof(LOCKING_ANDX_RANGE);
1145 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 1150 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -1472,12 +1477,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1472 1477
1473 /* 1478 /*
1474 * Accessing maxBuf is racy with cifs_reconnect - need to store value 1479 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1475 * and check it for zero before using. 1480 * and check it before using.
1476 */ 1481 */
1477 max_buf = tcon->ses->server->maxBuf; 1482 max_buf = tcon->ses->server->maxBuf;
1478 if (!max_buf) 1483 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1479 return -EINVAL; 1484 return -EINVAL;
1480 1485
1486 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1487 PAGE_SIZE);
1488 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1489 PAGE_SIZE);
1481 max_num = (max_buf - sizeof(struct smb_hdr)) / 1490 max_num = (max_buf - sizeof(struct smb_hdr)) /
1482 sizeof(LOCKING_ANDX_RANGE); 1491 sizeof(LOCKING_ANDX_RANGE);
1483 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 1492 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -2110,6 +2119,7 @@ static int cifs_writepages(struct address_space *mapping,
2110 pgoff_t end, index; 2119 pgoff_t end, index;
2111 struct cifs_writedata *wdata; 2120 struct cifs_writedata *wdata;
2112 int rc = 0; 2121 int rc = 0;
2122 int saved_rc = 0;
2113 unsigned int xid; 2123 unsigned int xid;
2114 2124
2115 /* 2125 /*
@@ -2138,8 +2148,10 @@ retry:
2138 2148
2139 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize, 2149 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2140 &wsize, &credits); 2150 &wsize, &credits);
2141 if (rc) 2151 if (rc != 0) {
2152 done = true;
2142 break; 2153 break;
2154 }
2143 2155
2144 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1; 2156 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2145 2157
@@ -2147,6 +2159,7 @@ retry:
2147 &found_pages); 2159 &found_pages);
2148 if (!wdata) { 2160 if (!wdata) {
2149 rc = -ENOMEM; 2161 rc = -ENOMEM;
2162 done = true;
2150 add_credits_and_wake_if(server, credits, 0); 2163 add_credits_and_wake_if(server, credits, 0);
2151 break; 2164 break;
2152 } 2165 }
@@ -2175,7 +2188,7 @@ retry:
2175 if (rc != 0) { 2188 if (rc != 0) {
2176 add_credits_and_wake_if(server, wdata->credits, 0); 2189 add_credits_and_wake_if(server, wdata->credits, 0);
2177 for (i = 0; i < nr_pages; ++i) { 2190 for (i = 0; i < nr_pages; ++i) {
2178 if (rc == -EAGAIN) 2191 if (is_retryable_error(rc))
2179 redirty_page_for_writepage(wbc, 2192 redirty_page_for_writepage(wbc,
2180 wdata->pages[i]); 2193 wdata->pages[i]);
2181 else 2194 else
@@ -2183,7 +2196,7 @@ retry:
2183 end_page_writeback(wdata->pages[i]); 2196 end_page_writeback(wdata->pages[i]);
2184 put_page(wdata->pages[i]); 2197 put_page(wdata->pages[i]);
2185 } 2198 }
2186 if (rc != -EAGAIN) 2199 if (!is_retryable_error(rc))
2187 mapping_set_error(mapping, rc); 2200 mapping_set_error(mapping, rc);
2188 } 2201 }
2189 kref_put(&wdata->refcount, cifs_writedata_release); 2202 kref_put(&wdata->refcount, cifs_writedata_release);
@@ -2193,6 +2206,15 @@ retry:
2193 continue; 2206 continue;
2194 } 2207 }
2195 2208
2209 /* Return immediately if we received a signal during writing */
2210 if (is_interrupt_error(rc)) {
2211 done = true;
2212 break;
2213 }
2214
2215 if (rc != 0 && saved_rc == 0)
2216 saved_rc = rc;
2217
2196 wbc->nr_to_write -= nr_pages; 2218 wbc->nr_to_write -= nr_pages;
2197 if (wbc->nr_to_write <= 0) 2219 if (wbc->nr_to_write <= 0)
2198 done = true; 2220 done = true;
@@ -2210,6 +2232,9 @@ retry:
2210 goto retry; 2232 goto retry;
2211 } 2233 }
2212 2234
2235 if (saved_rc != 0)
2236 rc = saved_rc;
2237
2213 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 2238 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2214 mapping->writeback_index = index; 2239 mapping->writeback_index = index;
2215 2240
@@ -2242,8 +2267,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2242 set_page_writeback(page); 2267 set_page_writeback(page);
2243retry_write: 2268retry_write:
2244 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE); 2269 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2245 if (rc == -EAGAIN) { 2270 if (is_retryable_error(rc)) {
2246 if (wbc->sync_mode == WB_SYNC_ALL) 2271 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
2247 goto retry_write; 2272 goto retry_write;
2248 redirty_page_for_writepage(wbc, page); 2273 redirty_page_for_writepage(wbc, page);
2249 } else if (rc != 0) { 2274 } else if (rc != 0) {
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 13fb59aadebc..478003644916 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2257,6 +2257,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
2257 * the flush returns error? 2257 * the flush returns error?
2258 */ 2258 */
2259 rc = filemap_write_and_wait(inode->i_mapping); 2259 rc = filemap_write_and_wait(inode->i_mapping);
2260 if (is_interrupt_error(rc)) {
2261 rc = -ERESTARTSYS;
2262 goto out;
2263 }
2264
2260 mapping_set_error(inode->i_mapping, rc); 2265 mapping_set_error(inode->i_mapping, rc);
2261 rc = 0; 2266 rc = 0;
2262 2267
@@ -2400,6 +2405,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2400 * the flush returns error? 2405 * the flush returns error?
2401 */ 2406 */
2402 rc = filemap_write_and_wait(inode->i_mapping); 2407 rc = filemap_write_and_wait(inode->i_mapping);
2408 if (is_interrupt_error(rc)) {
2409 rc = -ERESTARTSYS;
2410 goto cifs_setattr_exit;
2411 }
2412
2403 mapping_set_error(inode->i_mapping, rc); 2413 mapping_set_error(inode->i_mapping, rc);
2404 rc = 0; 2414 rc = 0;
2405 2415
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 4ed10dd086e6..b204e84b87fb 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -122,12 +122,14 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
122 122
123 /* 123 /*
124 * Accessing maxBuf is racy with cifs_reconnect - need to store value 124 * Accessing maxBuf is racy with cifs_reconnect - need to store value
125 * and check it for zero before using. 125 * and check it before using.
126 */ 126 */
127 max_buf = tcon->ses->server->maxBuf; 127 max_buf = tcon->ses->server->maxBuf;
128 if (!max_buf) 128 if (max_buf < sizeof(struct smb2_lock_element))
129 return -EINVAL; 129 return -EINVAL;
130 130
131 BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
132 max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
131 max_num = max_buf / sizeof(struct smb2_lock_element); 133 max_num = max_buf / sizeof(struct smb2_lock_element);
132 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); 134 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
133 if (!buf) 135 if (!buf)
@@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
264 return -EINVAL; 266 return -EINVAL;
265 } 267 }
266 268
269 BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
270 max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
267 max_num = max_buf / sizeof(struct smb2_lock_element); 271 max_num = max_buf / sizeof(struct smb2_lock_element);
268 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL); 272 buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
269 if (!buf) { 273 if (!buf) {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index e57f6aa1d638..50811a7dc0e0 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -162,24 +162,31 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
162 int rc; 162 int rc;
163 struct dfs_cache_tgt_list tl; 163 struct dfs_cache_tgt_list tl;
164 struct dfs_cache_tgt_iterator *it = NULL; 164 struct dfs_cache_tgt_iterator *it = NULL;
165 char tree[MAX_TREE_SIZE + 1]; 165 char *tree;
166 const char *tcp_host; 166 const char *tcp_host;
167 size_t tcp_host_len; 167 size_t tcp_host_len;
168 const char *dfs_host; 168 const char *dfs_host;
169 size_t dfs_host_len; 169 size_t dfs_host_len;
170 170
171 tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
172 if (!tree)
173 return -ENOMEM;
174
171 if (tcon->ipc) { 175 if (tcon->ipc) {
172 snprintf(tree, sizeof(tree), "\\\\%s\\IPC$", 176 snprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
173 tcon->ses->server->hostname); 177 tcon->ses->server->hostname);
174 return SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); 178 rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
179 goto out;
175 } 180 }
176 181
177 if (!tcon->dfs_path) 182 if (!tcon->dfs_path) {
178 return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); 183 rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
184 goto out;
185 }
179 186
180 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); 187 rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl);
181 if (rc) 188 if (rc)
182 return rc; 189 goto out;
183 190
184 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host, 191 extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
185 &tcp_host_len); 192 &tcp_host_len);
@@ -199,7 +206,7 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
199 continue; 206 continue;
200 } 207 }
201 208
202 snprintf(tree, sizeof(tree), "\\%s", tgt); 209 snprintf(tree, MAX_TREE_SIZE, "\\%s", tgt);
203 210
204 rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); 211 rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
205 if (!rc) 212 if (!rc)
@@ -216,6 +223,8 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
216 rc = -ENOENT; 223 rc = -ENOENT;
217 } 224 }
218 dfs_cache_free_tgts(&tl); 225 dfs_cache_free_tgts(&tl);
226out:
227 kfree(tree);
219 return rc; 228 return rc;
220} 229}
221#else 230#else
@@ -3278,12 +3287,14 @@ smb2_async_readv(struct cifs_readdata *rdata)
3278 if (rdata->credits) { 3287 if (rdata->credits) {
3279 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes, 3288 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
3280 SMB2_MAX_BUFFER_SIZE)); 3289 SMB2_MAX_BUFFER_SIZE));
3281 shdr->CreditRequest = shdr->CreditCharge; 3290 shdr->CreditRequest =
3291 cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
3282 spin_lock(&server->req_lock); 3292 spin_lock(&server->req_lock);
3283 server->credits += rdata->credits - 3293 server->credits += rdata->credits -
3284 le16_to_cpu(shdr->CreditCharge); 3294 le16_to_cpu(shdr->CreditCharge);
3285 spin_unlock(&server->req_lock); 3295 spin_unlock(&server->req_lock);
3286 wake_up(&server->request_q); 3296 wake_up(&server->request_q);
3297 rdata->credits = le16_to_cpu(shdr->CreditCharge);
3287 flags |= CIFS_HAS_CREDITS; 3298 flags |= CIFS_HAS_CREDITS;
3288 } 3299 }
3289 3300
@@ -3555,12 +3566,14 @@ smb2_async_writev(struct cifs_writedata *wdata,
3555 if (wdata->credits) { 3566 if (wdata->credits) {
3556 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, 3567 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
3557 SMB2_MAX_BUFFER_SIZE)); 3568 SMB2_MAX_BUFFER_SIZE));
3558 shdr->CreditRequest = shdr->CreditCharge; 3569 shdr->CreditRequest =
3570 cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 1);
3559 spin_lock(&server->req_lock); 3571 spin_lock(&server->req_lock);
3560 server->credits += wdata->credits - 3572 server->credits += wdata->credits -
3561 le16_to_cpu(shdr->CreditCharge); 3573 le16_to_cpu(shdr->CreditCharge);
3562 spin_unlock(&server->req_lock); 3574 spin_unlock(&server->req_lock);
3563 wake_up(&server->request_q); 3575 wake_up(&server->request_q);
3576 wdata->credits = le16_to_cpu(shdr->CreditCharge);
3564 flags |= CIFS_HAS_CREDITS; 3577 flags |= CIFS_HAS_CREDITS;
3565 } 3578 }
3566 3579
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 5be7302853b6..202e0e84efdd 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -387,7 +387,7 @@ smbd_done:
387 if (rc < 0 && rc != -EINTR) 387 if (rc < 0 && rc != -EINTR)
388 cifs_dbg(VFS, "Error %d sending data on socket to server\n", 388 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
389 rc); 389 rc);
390 else 390 else if (rc > 0)
391 rc = 0; 391 rc = 0;
392 392
393 return rc; 393 return rc;
@@ -783,8 +783,34 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
783} 783}
784 784
785static void 785static void
786cifs_noop_callback(struct mid_q_entry *mid) 786cifs_compound_callback(struct mid_q_entry *mid)
787{ 787{
788 struct TCP_Server_Info *server = mid->server;
789 unsigned int optype = mid->optype;
790 unsigned int credits_received = 0;
791
792 if (mid->mid_state == MID_RESPONSE_RECEIVED) {
793 if (mid->resp_buf)
794 credits_received = server->ops->get_credits(mid);
795 else
796 cifs_dbg(FYI, "Bad state for cancelled MID\n");
797 }
798
799 add_credits(server, credits_received, optype);
800}
801
802static void
803cifs_compound_last_callback(struct mid_q_entry *mid)
804{
805 cifs_compound_callback(mid);
806 cifs_wake_up_task(mid);
807}
808
809static void
810cifs_cancelled_callback(struct mid_q_entry *mid)
811{
812 cifs_compound_callback(mid);
813 DeleteMidQEntry(mid);
788} 814}
789 815
790int 816int
@@ -795,7 +821,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
795 int i, j, rc = 0; 821 int i, j, rc = 0;
796 int timeout, optype; 822 int timeout, optype;
797 struct mid_q_entry *midQ[MAX_COMPOUND]; 823 struct mid_q_entry *midQ[MAX_COMPOUND];
798 unsigned int credits = 0; 824 bool cancelled_mid[MAX_COMPOUND] = {false};
825 unsigned int credits[MAX_COMPOUND] = {0};
799 char *buf; 826 char *buf;
800 827
801 timeout = flags & CIFS_TIMEOUT_MASK; 828 timeout = flags & CIFS_TIMEOUT_MASK;
@@ -813,13 +840,31 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
813 return -ENOENT; 840 return -ENOENT;
814 841
815 /* 842 /*
816 * Ensure that we do not send more than 50 overlapping requests 843 * Ensure we obtain 1 credit per request in the compound chain.
817 * to the same server. We may make this configurable later or 844 * It can be optimized further by waiting for all the credits
818 * use ses->maxReq. 845 * at once but this can wait long enough if we don't have enough
846 * credits due to some heavy operations in progress or the server
847 * not granting us much, so a fallback to the current approach is
848 * needed anyway.
819 */ 849 */
820 rc = wait_for_free_request(ses->server, timeout, optype); 850 for (i = 0; i < num_rqst; i++) {
821 if (rc) 851 rc = wait_for_free_request(ses->server, timeout, optype);
822 return rc; 852 if (rc) {
853 /*
854 * We haven't sent an SMB packet to the server yet but
855 * we already obtained credits for i requests in the
856 * compound chain - need to return those credits back
857 * for future use. Note that we need to call add_credits
858 * multiple times to match the way we obtained credits
859 * in the first place and to account for in flight
860 * requests correctly.
861 */
862 for (j = 0; j < i; j++)
863 add_credits(ses->server, 1, optype);
864 return rc;
865 }
866 credits[i] = 1;
867 }
823 868
824 /* 869 /*
825 * Make sure that we sign in the same order that we send on this socket 870 * Make sure that we sign in the same order that we send on this socket
@@ -835,18 +880,24 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
835 for (j = 0; j < i; j++) 880 for (j = 0; j < i; j++)
836 cifs_delete_mid(midQ[j]); 881 cifs_delete_mid(midQ[j]);
837 mutex_unlock(&ses->server->srv_mutex); 882 mutex_unlock(&ses->server->srv_mutex);
883
838 /* Update # of requests on wire to server */ 884 /* Update # of requests on wire to server */
839 add_credits(ses->server, 1, optype); 885 for (j = 0; j < num_rqst; j++)
886 add_credits(ses->server, credits[j], optype);
840 return PTR_ERR(midQ[i]); 887 return PTR_ERR(midQ[i]);
841 } 888 }
842 889
843 midQ[i]->mid_state = MID_REQUEST_SUBMITTED; 890 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
891 midQ[i]->optype = optype;
844 /* 892 /*
845 * We don't invoke the callback compounds unless it is the last 893 * Invoke callback for every part of the compound chain
846 * request. 894 * to calculate credits properly. Wake up this thread only when
895 * the last element is received.
847 */ 896 */
848 if (i < num_rqst - 1) 897 if (i < num_rqst - 1)
849 midQ[i]->callback = cifs_noop_callback; 898 midQ[i]->callback = cifs_compound_callback;
899 else
900 midQ[i]->callback = cifs_compound_last_callback;
850 } 901 }
851 cifs_in_send_inc(ses->server); 902 cifs_in_send_inc(ses->server);
852 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags); 903 rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
@@ -860,8 +911,20 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
860 911
861 mutex_unlock(&ses->server->srv_mutex); 912 mutex_unlock(&ses->server->srv_mutex);
862 913
863 if (rc < 0) 914 if (rc < 0) {
915 /* Sending failed for some reason - return credits back */
916 for (i = 0; i < num_rqst; i++)
917 add_credits(ses->server, credits[i], optype);
864 goto out; 918 goto out;
919 }
920
921 /*
922 * At this point the request is passed to the network stack - we assume
923 * that any credits taken from the server structure on the client have
924 * been spent and we can't return them back. Once we receive responses
925 * we will collect credits granted by the server in the mid callbacks
926 * and add those credits to the server structure.
927 */
865 928
866 /* 929 /*
867 * Compounding is never used during session establish. 930 * Compounding is never used during session establish.
@@ -875,36 +938,34 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
875 938
876 for (i = 0; i < num_rqst; i++) { 939 for (i = 0; i < num_rqst; i++) {
877 rc = wait_for_response(ses->server, midQ[i]); 940 rc = wait_for_response(ses->server, midQ[i]);
878 if (rc != 0) { 941 if (rc != 0)
942 break;
943 }
944 if (rc != 0) {
945 for (; i < num_rqst; i++) {
879 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n", 946 cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
880 midQ[i]->mid, le16_to_cpu(midQ[i]->command)); 947 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
881 send_cancel(ses->server, &rqst[i], midQ[i]); 948 send_cancel(ses->server, &rqst[i], midQ[i]);
882 spin_lock(&GlobalMid_Lock); 949 spin_lock(&GlobalMid_Lock);
883 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { 950 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
884 midQ[i]->mid_flags |= MID_WAIT_CANCELLED; 951 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
885 midQ[i]->callback = DeleteMidQEntry; 952 midQ[i]->callback = cifs_cancelled_callback;
886 spin_unlock(&GlobalMid_Lock); 953 cancelled_mid[i] = true;
887 add_credits(ses->server, 1, optype); 954 credits[i] = 0;
888 return rc;
889 } 955 }
890 spin_unlock(&GlobalMid_Lock); 956 spin_unlock(&GlobalMid_Lock);
891 } 957 }
892 } 958 }
893 959
894 for (i = 0; i < num_rqst; i++)
895 if (midQ[i]->resp_buf)
896 credits += ses->server->ops->get_credits(midQ[i]);
897 if (!credits)
898 credits = 1;
899
900 for (i = 0; i < num_rqst; i++) { 960 for (i = 0; i < num_rqst; i++) {
901 if (rc < 0) 961 if (rc < 0)
902 goto out; 962 goto out;
903 963
904 rc = cifs_sync_mid_result(midQ[i], ses->server); 964 rc = cifs_sync_mid_result(midQ[i], ses->server);
905 if (rc != 0) { 965 if (rc != 0) {
906 add_credits(ses->server, credits, optype); 966 /* mark this mid as cancelled to not free it below */
907 return rc; 967 cancelled_mid[i] = true;
968 goto out;
908 } 969 }
909 970
910 if (!midQ[i]->resp_buf || 971 if (!midQ[i]->resp_buf ||
@@ -951,9 +1012,10 @@ out:
951 * This is prevented above by using a noop callback that will not 1012 * This is prevented above by using a noop callback that will not
952 * wake this thread except for the very last PDU. 1013 * wake this thread except for the very last PDU.
953 */ 1014 */
954 for (i = 0; i < num_rqst; i++) 1015 for (i = 0; i < num_rqst; i++) {
955 cifs_delete_mid(midQ[i]); 1016 if (!cancelled_mid[i])
956 add_credits(ses->server, credits, optype); 1017 cifs_delete_mid(midQ[i]);
1018 }
957 1019
958 return rc; 1020 return rc;
959} 1021}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a2fcea5f8225..32920a10100e 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -383,16 +383,17 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
383 * truncation is indicated by end of range being LLONG_MAX 383 * truncation is indicated by end of range being LLONG_MAX
384 * In this case, we first scan the range and release found pages. 384 * In this case, we first scan the range and release found pages.
385 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv 385 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
386 * maps and global counts. 386 * maps and global counts. Page faults can not race with truncation
387 * in this routine. hugetlb_no_page() prevents page faults in the
388 * truncated range. It checks i_size before allocation, and again after
389 * with the page table lock for the page held. The same lock must be
390 * acquired to unmap a page.
387 * hole punch is indicated if end is not LLONG_MAX 391 * hole punch is indicated if end is not LLONG_MAX
388 * In the hole punch case we scan the range and release found pages. 392 * In the hole punch case we scan the range and release found pages.
389 * Only when releasing a page is the associated region/reserv map 393 * Only when releasing a page is the associated region/reserv map
390 * deleted. The region/reserv map for ranges without associated 394 * deleted. The region/reserv map for ranges without associated
391 * pages are not modified. 395 * pages are not modified. Page faults can race with hole punch.
392 * 396 * This is indicated if we find a mapped page.
393 * Callers of this routine must hold the i_mmap_rwsem in write mode to prevent
394 * races with page faults.
395 *
396 * Note: If the passed end of range value is beyond the end of file, but 397 * Note: If the passed end of range value is beyond the end of file, but
397 * not LLONG_MAX this routine still performs a hole punch operation. 398 * not LLONG_MAX this routine still performs a hole punch operation.
398 */ 399 */
@@ -422,14 +423,32 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
422 423
423 for (i = 0; i < pagevec_count(&pvec); ++i) { 424 for (i = 0; i < pagevec_count(&pvec); ++i) {
424 struct page *page = pvec.pages[i]; 425 struct page *page = pvec.pages[i];
426 u32 hash;
425 427
426 index = page->index; 428 index = page->index;
429 hash = hugetlb_fault_mutex_hash(h, current->mm,
430 &pseudo_vma,
431 mapping, index, 0);
432 mutex_lock(&hugetlb_fault_mutex_table[hash]);
433
427 /* 434 /*
428 * A mapped page is impossible as callers should unmap 435 * If page is mapped, it was faulted in after being
429 * all references before calling. And, i_mmap_rwsem 436 * unmapped in caller. Unmap (again) now after taking
430 * prevents the creation of additional mappings. 437 * the fault mutex. The mutex will prevent faults
438 * until we finish removing the page.
439 *
440 * This race can only happen in the hole punch case.
441 * Getting here in a truncate operation is a bug.
431 */ 442 */
432 VM_BUG_ON(page_mapped(page)); 443 if (unlikely(page_mapped(page))) {
444 BUG_ON(truncate_op);
445
446 i_mmap_lock_write(mapping);
447 hugetlb_vmdelete_list(&mapping->i_mmap,
448 index * pages_per_huge_page(h),
449 (index + 1) * pages_per_huge_page(h));
450 i_mmap_unlock_write(mapping);
451 }
433 452
434 lock_page(page); 453 lock_page(page);
435 /* 454 /*
@@ -451,6 +470,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
451 } 470 }
452 471
453 unlock_page(page); 472 unlock_page(page);
473 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
454 } 474 }
455 huge_pagevec_release(&pvec); 475 huge_pagevec_release(&pvec);
456 cond_resched(); 476 cond_resched();
@@ -462,20 +482,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
462 482
463static void hugetlbfs_evict_inode(struct inode *inode) 483static void hugetlbfs_evict_inode(struct inode *inode)
464{ 484{
465 struct address_space *mapping = inode->i_mapping;
466 struct resv_map *resv_map; 485 struct resv_map *resv_map;
467 486
468 /*
469 * The vfs layer guarantees that there are no other users of this
470 * inode. Therefore, it would be safe to call remove_inode_hugepages
471 * without holding i_mmap_rwsem. We acquire and hold here to be
472 * consistent with other callers. Since there will be no contention
473 * on the semaphore, overhead is negligible.
474 */
475 i_mmap_lock_write(mapping);
476 remove_inode_hugepages(inode, 0, LLONG_MAX); 487 remove_inode_hugepages(inode, 0, LLONG_MAX);
477 i_mmap_unlock_write(mapping);
478
479 resv_map = (struct resv_map *)inode->i_mapping->private_data; 488 resv_map = (struct resv_map *)inode->i_mapping->private_data;
480 /* root inode doesn't have the resv_map, so we should check it */ 489 /* root inode doesn't have the resv_map, so we should check it */
481 if (resv_map) 490 if (resv_map)
@@ -496,8 +505,8 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
496 i_mmap_lock_write(mapping); 505 i_mmap_lock_write(mapping);
497 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) 506 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
498 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); 507 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
499 remove_inode_hugepages(inode, offset, LLONG_MAX);
500 i_mmap_unlock_write(mapping); 508 i_mmap_unlock_write(mapping);
509 remove_inode_hugepages(inode, offset, LLONG_MAX);
501 return 0; 510 return 0;
502} 511}
503 512
@@ -531,8 +540,8 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
531 hugetlb_vmdelete_list(&mapping->i_mmap, 540 hugetlb_vmdelete_list(&mapping->i_mmap,
532 hole_start >> PAGE_SHIFT, 541 hole_start >> PAGE_SHIFT,
533 hole_end >> PAGE_SHIFT); 542 hole_end >> PAGE_SHIFT);
534 remove_inode_hugepages(inode, hole_start, hole_end);
535 i_mmap_unlock_write(mapping); 543 i_mmap_unlock_write(mapping);
544 remove_inode_hugepages(inode, hole_start, hole_end);
536 inode_unlock(inode); 545 inode_unlock(inode);
537 } 546 }
538 547
@@ -615,11 +624,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
615 /* addr is the offset within the file (zero based) */ 624 /* addr is the offset within the file (zero based) */
616 addr = index * hpage_size; 625 addr = index * hpage_size;
617 626
618 /* 627 /* mutex taken here, fault path and hole punch */
619 * fault mutex taken here, protects against fault path
620 * and hole punch. inode_lock previously taken protects
621 * against truncation.
622 */
623 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, 628 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
624 index, addr); 629 index, addr);
625 mutex_lock(&hugetlb_fault_mutex_table[hash]); 630 mutex_lock(&hugetlb_fault_mutex_table[hash]);
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 46d691ba04bc..45b2322e092d 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -133,15 +133,9 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
133 struct file *file_out, loff_t pos_out, 133 struct file *file_out, loff_t pos_out,
134 size_t count, unsigned int flags) 134 size_t count, unsigned int flags)
135{ 135{
136 ssize_t ret;
137
138 if (file_inode(file_in) == file_inode(file_out)) 136 if (file_inode(file_in) == file_inode(file_out))
139 return -EINVAL; 137 return -EINVAL;
140retry: 138 return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
141 ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
142 if (ret == -EAGAIN)
143 goto retry;
144 return ret;
145} 139}
146 140
147static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) 141static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 96f7d32cd184..898c8321b343 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -128,7 +128,6 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
128 struct pstore_record *record) 128 struct pstore_record *record)
129{ 129{
130 struct persistent_ram_zone *prz; 130 struct persistent_ram_zone *prz;
131 bool update = (record->type == PSTORE_TYPE_DMESG);
132 131
133 /* Give up if we never existed or have hit the end. */ 132 /* Give up if we never existed or have hit the end. */
134 if (!przs) 133 if (!przs)
@@ -139,7 +138,7 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
139 return NULL; 138 return NULL;
140 139
141 /* Update old/shadowed buffer. */ 140 /* Update old/shadowed buffer. */
142 if (update) 141 if (prz->type == PSTORE_TYPE_DMESG)
143 persistent_ram_save_old(prz); 142 persistent_ram_save_old(prz);
144 143
145 if (!persistent_ram_old_size(prz)) 144 if (!persistent_ram_old_size(prz))
@@ -711,18 +710,15 @@ static int ramoops_probe(struct platform_device *pdev)
711{ 710{
712 struct device *dev = &pdev->dev; 711 struct device *dev = &pdev->dev;
713 struct ramoops_platform_data *pdata = dev->platform_data; 712 struct ramoops_platform_data *pdata = dev->platform_data;
713 struct ramoops_platform_data pdata_local;
714 struct ramoops_context *cxt = &oops_cxt; 714 struct ramoops_context *cxt = &oops_cxt;
715 size_t dump_mem_sz; 715 size_t dump_mem_sz;
716 phys_addr_t paddr; 716 phys_addr_t paddr;
717 int err = -EINVAL; 717 int err = -EINVAL;
718 718
719 if (dev_of_node(dev) && !pdata) { 719 if (dev_of_node(dev) && !pdata) {
720 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 720 pdata = &pdata_local;
721 if (!pdata) { 721 memset(pdata, 0, sizeof(*pdata));
722 pr_err("cannot allocate platform data buffer\n");
723 err = -ENOMEM;
724 goto fail_out;
725 }
726 722
727 err = ramoops_parse_dt(pdev, pdata); 723 err = ramoops_parse_dt(pdev, pdata);
728 if (err < 0) 724 if (err < 0)
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index feeae8081c22..aa85f2874a9f 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -43,7 +43,8 @@ int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
43 kuid_t uid; 43 kuid_t uid;
44 kgid_t gid; 44 kgid_t gid;
45 45
46 BUG_ON(!kobj); 46 if (WARN_ON(!kobj))
47 return -EINVAL;
47 48
48 if (kobj->parent) 49 if (kobj->parent)
49 parent = kobj->parent->sd; 50 parent = kobj->parent->sd;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index bb71db63c99c..51398457fe00 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -325,7 +325,8 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
325 kuid_t uid; 325 kuid_t uid;
326 kgid_t gid; 326 kgid_t gid;
327 327
328 BUG_ON(!kobj || !kobj->sd || !attr); 328 if (WARN_ON(!kobj || !kobj->sd || !attr))
329 return -EINVAL;
329 330
330 kobject_get_ownership(kobj, &uid, &gid); 331 kobject_get_ownership(kobj, &uid, &gid);
331 return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode, 332 return sysfs_add_file_mode_ns(kobj->sd, attr, false, attr->mode,
@@ -537,7 +538,8 @@ int sysfs_create_bin_file(struct kobject *kobj,
537 kuid_t uid; 538 kuid_t uid;
538 kgid_t gid; 539 kgid_t gid;
539 540
540 BUG_ON(!kobj || !kobj->sd || !attr); 541 if (WARN_ON(!kobj || !kobj->sd || !attr))
542 return -EINVAL;
541 543
542 kobject_get_ownership(kobj, &uid, &gid); 544 kobject_get_ownership(kobj, &uid, &gid);
543 return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true, 545 return sysfs_add_file_mode_ns(kobj->sd, &attr->attr, true,
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 1eb2d6307663..57038604d4a8 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -112,7 +112,8 @@ static int internal_create_group(struct kobject *kobj, int update,
112 kgid_t gid; 112 kgid_t gid;
113 int error; 113 int error;
114 114
115 BUG_ON(!kobj || (!update && !kobj->sd)); 115 if (WARN_ON(!kobj || (!update && !kobj->sd)))
116 return -EINVAL;
116 117
117 /* Updates may happen before the object has been instantiated */ 118 /* Updates may happen before the object has been instantiated */
118 if (unlikely(update && !kobj->sd)) 119 if (unlikely(update && !kobj->sd))
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 215c225b2ca1..c4deecc80f67 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -23,7 +23,8 @@ static int sysfs_do_create_link_sd(struct kernfs_node *parent,
23{ 23{
24 struct kernfs_node *kn, *target = NULL; 24 struct kernfs_node *kn, *target = NULL;
25 25
26 BUG_ON(!name || !parent); 26 if (WARN_ON(!name || !parent))
27 return -EINVAL;
27 28
28 /* 29 /*
29 * We don't own @target_kobj and it may be removed at any time. 30 * We don't own @target_kobj and it may be removed at any time.
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 5736c942c85b..2d4fc2d33810 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1365,6 +1365,13 @@ enum drm_dp_quirk {
1365 * to 16 bits. So will give a constant value (0x8000) for compatability. 1365 * to 16 bits. So will give a constant value (0x8000) for compatability.
1366 */ 1366 */
1367 DP_DPCD_QUIRK_CONSTANT_N, 1367 DP_DPCD_QUIRK_CONSTANT_N,
1368 /**
1369 * @DP_DPCD_QUIRK_NO_PSR:
1370 *
1371 * The device does not support PSR even if reports that it supports or
1372 * driver still need to implement proper handling for such device.
1373 */
1374 DP_DPCD_QUIRK_NO_PSR,
1368}; 1375};
1369 1376
1370/** 1377/**
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 59f005b419cf..727af08e5ea6 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -616,7 +616,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
616 struct drm_dp_mst_topology_mgr *mgr); 616 struct drm_dp_mst_topology_mgr *mgr);
617 617
618void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); 618void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
619int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); 619int __must_check
620drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
620struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, 621struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
621 struct drm_dp_mst_topology_mgr *mgr); 622 struct drm_dp_mst_topology_mgr *mgr);
622int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, 623int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-reset.h b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
index ad6f55dabd6d..0f2e0fe45ca4 100644
--- a/include/dt-bindings/reset/amlogic,meson-axg-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-axg-reset.h
@@ -1,12 +1,11 @@
1/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
1/* 2/*
2 *
3 * Copyright (c) 2016 BayLibre, SAS. 3 * Copyright (c) 2016 BayLibre, SAS.
4 * Author: Neil Armstrong <narmstrong@baylibre.com> 4 * Author: Neil Armstrong <narmstrong@baylibre.com>
5 * 5 *
6 * Copyright (c) 2017 Amlogic, inc. 6 * Copyright (c) 2017 Amlogic, inc.
7 * Author: Yixun Lan <yixun.lan@amlogic.com> 7 * Author: Yixun Lan <yixun.lan@amlogic.com>
8 * 8 *
9 * SPDX-License-Identifier: (GPL-2.0+ OR BSD)
10 */ 9 */
11 10
12#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H 11#ifndef _DT_BINDINGS_AMLOGIC_MESON_AXG_RESET_H
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 7cca5f859a90..f3c43519baa7 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -6,6 +6,7 @@
6 6
7struct bcma_soc { 7struct bcma_soc {
8 struct bcma_bus bus; 8 struct bcma_bus bus;
9 struct device *dev;
9}; 10};
10 11
11int __init bcma_host_soc_register(struct bcma_soc *soc); 12int __init bcma_host_soc_register(struct bcma_soc *soc);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 27b74947cd2b..573cca00a0e6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -172,6 +172,7 @@ struct bpf_verifier_state_list {
172#define BPF_ALU_SANITIZE_SRC 1U 172#define BPF_ALU_SANITIZE_SRC 1U
173#define BPF_ALU_SANITIZE_DST 2U 173#define BPF_ALU_SANITIZE_DST 2U
174#define BPF_ALU_NEG_VALUE (1U << 2) 174#define BPF_ALU_NEG_VALUE (1U << 2)
175#define BPF_ALU_NON_POINTER (1U << 3)
175#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 176#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
176 BPF_ALU_SANITIZE_DST) 177 BPF_ALU_SANITIZE_DST)
177 178
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index f02cee0225d4..d815622cd31e 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -3,13 +3,22 @@
3#define _LINUX_BPFILTER_H 3#define _LINUX_BPFILTER_H
4 4
5#include <uapi/linux/bpfilter.h> 5#include <uapi/linux/bpfilter.h>
6#include <linux/umh.h>
6 7
7struct sock; 8struct sock;
8int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, 9int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
9 unsigned int optlen); 10 unsigned int optlen);
10int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, 11int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
11 int __user *optlen); 12 int __user *optlen);
12extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 13struct bpfilter_umh_ops {
13 char __user *optval, 14 struct umh_info info;
14 unsigned int optlen, bool is_set); 15 /* since ip_getsockopt() can run in parallel, serialize access to umh */
16 struct mutex lock;
17 int (*sockopt)(struct sock *sk, int optname,
18 char __user *optval,
19 unsigned int optlen, bool is_set);
20 int (*start)(void);
21 bool stop;
22};
23extern struct bpfilter_umh_ops bpfilter_ops;
15#endif 24#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 68bb09c29ce8..a420c07904bc 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -35,6 +35,7 @@
35#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ 35#define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */
36#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ 36#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
37#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ 37#define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */
38#define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */
38 39
39#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) 40#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
40 41
@@ -53,7 +54,7 @@ struct ceph_options {
53 unsigned long osd_request_timeout; /* jiffies */ 54 unsigned long osd_request_timeout; /* jiffies */
54 55
55 /* 56 /*
56 * any type that can't be simply compared or doesn't need need 57 * any type that can't be simply compared or doesn't need
57 * to be compared should go beyond this point, 58 * to be compared should go beyond this point,
58 * ceph_compare_options() should be updated accordingly 59 * ceph_compare_options() should be updated accordingly
59 */ 60 */
@@ -281,7 +282,8 @@ extern struct ceph_options *ceph_parse_options(char *options,
281 const char *dev_name, const char *dev_name_end, 282 const char *dev_name, const char *dev_name_end,
282 int (*parse_extra_token)(char *c, void *private), 283 int (*parse_extra_token)(char *c, void *private),
283 void *private); 284 void *private);
284int ceph_print_client_options(struct seq_file *m, struct ceph_client *client); 285int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
286 bool show_all);
285extern void ceph_destroy_options(struct ceph_options *opt); 287extern void ceph_destroy_options(struct ceph_options *opt);
286extern int ceph_compare_options(struct ceph_options *new_opt, 288extern int ceph_compare_options(struct ceph_options *new_opt,
287 struct ceph_client *client); 289 struct ceph_client *client);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 7a2af5034278..2294f963dab7 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -354,7 +354,6 @@ struct ceph_osd_client {
354 struct rb_root linger_map_checks; 354 struct rb_root linger_map_checks;
355 atomic_t num_requests; 355 atomic_t num_requests;
356 atomic_t num_homeless; 356 atomic_t num_homeless;
357 bool abort_on_full; /* abort w/ ENOSPC when full */
358 int abort_err; 357 int abort_err;
359 struct delayed_work timeout_work; 358 struct delayed_work timeout_work;
360 struct delayed_work osds_timeout_work; 359 struct delayed_work osds_timeout_work;
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 39f668d5066b..333a6695a918 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -3,9 +3,8 @@
3#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." 3#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
4#endif 4#endif
5 5
6/* Some compiler specific definitions are overwritten here 6/* Compiler specific definitions for Clang compiler */
7 * for Clang compiler 7
8 */
9#define uninitialized_var(x) x = *(&(x)) 8#define uninitialized_var(x) x = *(&(x))
10 9
11/* same as gcc, this was present in clang-2.6 so we can assume it works 10/* same as gcc, this was present in clang-2.6 so we can assume it works
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 5776da43da97..e8579412ad21 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -58,17 +58,13 @@
58 (typeof(ptr)) (__ptr + (off)); \ 58 (typeof(ptr)) (__ptr + (off)); \
59}) 59})
60 60
61/* Make the optimizer believe the variable can be manipulated arbitrarily. */
62#define OPTIMIZER_HIDE_VAR(var) \
63 __asm__ ("" : "=r" (var) : "0" (var))
64
65/* 61/*
66 * A trick to suppress uninitialized variable warning without generating any 62 * A trick to suppress uninitialized variable warning without generating any
67 * code 63 * code
68 */ 64 */
69#define uninitialized_var(x) x = x 65#define uninitialized_var(x) x = x
70 66
71#ifdef RETPOLINE 67#ifdef CONFIG_RETPOLINE
72#define __noretpoline __attribute__((__indirect_branch__("keep"))) 68#define __noretpoline __attribute__((__indirect_branch__("keep")))
73#endif 69#endif
74 70
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 517bd14e1222..b17f3cd18334 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -5,9 +5,7 @@
5 5
6#ifdef __ECC 6#ifdef __ECC
7 7
8/* Some compiler specific definitions are overwritten here 8/* Compiler specific definitions for Intel ECC compiler */
9 * for Intel ECC compiler
10 */
11 9
12#include <asm/intrinsics.h> 10#include <asm/intrinsics.h>
13 11
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index fc5004a4b07d..445348facea9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
161#endif 161#endif
162 162
163#ifndef OPTIMIZER_HIDE_VAR 163#ifndef OPTIMIZER_HIDE_VAR
164#define OPTIMIZER_HIDE_VAR(var) barrier() 164/* Make the optimizer believe the variable can be manipulated arbitrarily. */
165#define OPTIMIZER_HIDE_VAR(var) \
166 __asm__ ("" : "=r" (var) : "0" (var))
165#endif 167#endif
166 168
167/* Not-quite-unique ID. */ 169/* Not-quite-unique ID. */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index cef2127e1d70..f6ded992c183 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -717,15 +717,6 @@ static inline unsigned long dma_max_pfn(struct device *dev)
717} 717}
718#endif 718#endif
719 719
720/*
721 * Please always use dma_alloc_coherent instead as it already zeroes the memory!
722 */
723static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
724 dma_addr_t *dma_handle, gfp_t flag)
725{
726 return dma_alloc_coherent(dev, size, dma_handle, flag);
727}
728
729static inline int dma_get_cache_alignment(void) 720static inline int dma_get_cache_alignment(void)
730{ 721{
731#ifdef ARCH_DMA_MINALIGN 722#ifdef ARCH_DMA_MINALIGN
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 7cdd31a69719..f52ef0ad6781 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info);
653 653
654extern struct fb_info *registered_fb[FB_MAX]; 654extern struct fb_info *registered_fb[FB_MAX];
655extern int num_registered_fb; 655extern int num_registered_fb;
656extern bool fb_center_logo;
656extern struct class *fb_class; 657extern struct class *fb_class;
657 658
658#define for_each_registered_fb(i) \ 659#define for_each_registered_fb(i) \
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 5440f11b0907..7315977b64da 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -160,6 +160,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
160} 160}
161 161
162enum nvdimm_security_state { 162enum nvdimm_security_state {
163 NVDIMM_SECURITY_ERROR = -1,
163 NVDIMM_SECURITY_DISABLED, 164 NVDIMM_SECURITY_DISABLED,
164 NVDIMM_SECURITY_UNLOCKED, 165 NVDIMM_SECURITY_UNLOCKED,
165 NVDIMM_SECURITY_LOCKED, 166 NVDIMM_SECURITY_LOCKED,
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 9a9631f0559e..fc91082d4c35 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2791,6 +2791,100 @@ struct ec_response_battery_vendor_param {
2791} __packed; 2791} __packed;
2792 2792
2793/*****************************************************************************/ 2793/*****************************************************************************/
2794/* Commands for I2S recording on audio codec. */
2795
2796#define EC_CMD_CODEC_I2S 0x00BC
2797
2798enum ec_codec_i2s_subcmd {
2799 EC_CODEC_SET_SAMPLE_DEPTH = 0x0,
2800 EC_CODEC_SET_GAIN = 0x1,
2801 EC_CODEC_GET_GAIN = 0x2,
2802 EC_CODEC_I2S_ENABLE = 0x3,
2803 EC_CODEC_I2S_SET_CONFIG = 0x4,
2804 EC_CODEC_I2S_SET_TDM_CONFIG = 0x5,
2805 EC_CODEC_I2S_SET_BCLK = 0x6,
2806};
2807
2808enum ec_sample_depth_value {
2809 EC_CODEC_SAMPLE_DEPTH_16 = 0,
2810 EC_CODEC_SAMPLE_DEPTH_24 = 1,
2811};
2812
2813enum ec_i2s_config {
2814 EC_DAI_FMT_I2S = 0,
2815 EC_DAI_FMT_RIGHT_J = 1,
2816 EC_DAI_FMT_LEFT_J = 2,
2817 EC_DAI_FMT_PCM_A = 3,
2818 EC_DAI_FMT_PCM_B = 4,
2819 EC_DAI_FMT_PCM_TDM = 5,
2820};
2821
2822struct ec_param_codec_i2s {
2823 /*
2824 * enum ec_codec_i2s_subcmd
2825 */
2826 uint8_t cmd;
2827 union {
2828 /*
2829 * EC_CODEC_SET_SAMPLE_DEPTH
2830 * Value should be one of ec_sample_depth_value.
2831 */
2832 uint8_t depth;
2833
2834 /*
2835 * EC_CODEC_SET_GAIN
2836 * Value should be 0~43 for both channels.
2837 */
2838 struct ec_param_codec_i2s_set_gain {
2839 uint8_t left;
2840 uint8_t right;
2841 } __packed gain;
2842
2843 /*
2844 * EC_CODEC_I2S_ENABLE
2845 * 1 to enable, 0 to disable.
2846 */
2847 uint8_t i2s_enable;
2848
2849 /*
2850 * EC_CODEC_I2S_SET_COFNIG
2851 * Value should be one of ec_i2s_config.
2852 */
2853 uint8_t i2s_config;
2854
2855 /*
2856 * EC_CODEC_I2S_SET_TDM_CONFIG
2857 * Value should be one of ec_i2s_config.
2858 */
2859 struct ec_param_codec_i2s_tdm {
2860 /*
2861 * 0 to 496
2862 */
2863 int16_t ch0_delay;
2864 /*
2865 * -1 to 496
2866 */
2867 int16_t ch1_delay;
2868 uint8_t adjacent_to_ch0;
2869 uint8_t adjacent_to_ch1;
2870 } __packed tdm_param;
2871
2872 /*
2873 * EC_CODEC_I2S_SET_BCLK
2874 */
2875 uint32_t bclk;
2876 };
2877} __packed;
2878
2879/*
2880 * For subcommand EC_CODEC_GET_GAIN.
2881 */
2882struct ec_response_codec_gain {
2883 uint8_t left;
2884 uint8_t right;
2885} __packed;
2886
2887/*****************************************************************************/
2794/* System commands */ 2888/* System commands */
2795 2889
2796/* 2890/*
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
index ab16ad283def..2083fa20821d 100644
--- a/include/linux/mfd/ingenic-tcu.h
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -41,7 +41,7 @@
41#define TCU_TCSR_PRESCALE_LSB 3 41#define TCU_TCSR_PRESCALE_LSB 3
42#define TCU_TCSR_PRESCALE_MASK 0x38 42#define TCU_TCSR_PRESCALE_MASK 0x38
43 43
44#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */ 44#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */
45#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ 45#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
46#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ 46#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
47 47
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index fe69c0f4398f..4d5d51a9c8a6 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -15,6 +15,7 @@
15#include <linux/gpio/consumer.h> 15#include <linux/gpio/consumer.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/mfd/madera/pdata.h> 17#include <linux/mfd/madera/pdata.h>
18#include <linux/mutex.h>
18#include <linux/notifier.h> 19#include <linux/notifier.h>
19#include <linux/regmap.h> 20#include <linux/regmap.h>
20#include <linux/regulator/consumer.h> 21#include <linux/regulator/consumer.h>
@@ -37,6 +38,8 @@ enum madera_type {
37 38
38#define MADERA_MAX_MICBIAS 4 39#define MADERA_MAX_MICBIAS 4
39 40
41#define MADERA_MAX_HP_OUTPUT 3
42
40/* Notifier events */ 43/* Notifier events */
41#define MADERA_NOTIFY_VOICE_TRIGGER 0x1 44#define MADERA_NOTIFY_VOICE_TRIGGER 0x1
42#define MADERA_NOTIFY_HPDET 0x2 45#define MADERA_NOTIFY_HPDET 0x2
@@ -183,6 +186,10 @@ struct madera {
183 unsigned int num_childbias[MADERA_MAX_MICBIAS]; 186 unsigned int num_childbias[MADERA_MAX_MICBIAS];
184 187
185 struct snd_soc_dapm_context *dapm; 188 struct snd_soc_dapm_context *dapm;
189 struct mutex dapm_ptr_lock;
190 unsigned int hp_ena;
191 bool out_clamp[MADERA_MAX_HP_OUTPUT];
192 bool out_shorted[MADERA_MAX_HP_OUTPUT];
186 193
187 struct blocking_notifier_head notifier; 194 struct blocking_notifier_head notifier;
188}; 195};
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index b9a53e013bff..483168403ae5 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -78,6 +78,8 @@
78#define STEPCONFIG_YNN BIT(8) 78#define STEPCONFIG_YNN BIT(8)
79#define STEPCONFIG_XNP BIT(9) 79#define STEPCONFIG_XNP BIT(9)
80#define STEPCONFIG_YPN BIT(10) 80#define STEPCONFIG_YPN BIT(10)
81#define STEPCONFIG_RFP(val) ((val) << 12)
82#define STEPCONFIG_RFP_VREFP (0x3 << 12)
81#define STEPCONFIG_INM_MASK (0xF << 15) 83#define STEPCONFIG_INM_MASK (0xF << 15)
82#define STEPCONFIG_INM(val) ((val) << 15) 84#define STEPCONFIG_INM(val) ((val) << 15)
83#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) 85#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
@@ -86,6 +88,8 @@
86#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) 88#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
87#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) 89#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
88#define STEPCONFIG_FIFO1 BIT(26) 90#define STEPCONFIG_FIFO1 BIT(26)
91#define STEPCONFIG_RFM(val) ((val) << 23)
92#define STEPCONFIG_RFM_VREFN (0x3 << 23)
89 93
90/* Delay register */ 94/* Delay register */
91#define STEPDELAY_OPEN_MASK (0x3FFFF << 0) 95#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index e2687a30e5a1..739b7bf37eaa 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -79,7 +79,7 @@
79/* Some controllers have a CBSY bit */ 79/* Some controllers have a CBSY bit */
80#define TMIO_MMC_HAVE_CBSY BIT(11) 80#define TMIO_MMC_HAVE_CBSY BIT(11)
81 81
82/* Some controllers that support HS400 use use 4 taps while others use 8. */ 82/* Some controllers that support HS400 use 4 taps while others use 8. */
83#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) 83#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13)
84 84
85int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); 85int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index cc4a507d7ca4..842f9189537b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -520,6 +520,12 @@ enum pgdat_flags {
520 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 520 PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */
521}; 521};
522 522
523enum zone_flags {
524 ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks.
525 * Cleared when kswapd is woken.
526 */
527};
528
523static inline unsigned long zone_managed_pages(struct zone *zone) 529static inline unsigned long zone_managed_pages(struct zone *zone)
524{ 530{
525 return (unsigned long)atomic_long_read(&zone->managed_pages); 531 return (unsigned long)atomic_long_read(&zone->managed_pages);
diff --git a/include/linux/module.h b/include/linux/module.h
index 9a21fe3509af..8fa38d3e7538 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -828,7 +828,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr,
828static inline void module_bug_cleanup(struct module *mod) {} 828static inline void module_bug_cleanup(struct module *mod) {}
829#endif /* CONFIG_GENERIC_BUG */ 829#endif /* CONFIG_GENERIC_BUG */
830 830
831#ifdef RETPOLINE 831#ifdef CONFIG_RETPOLINE
832extern bool retpoline_module_ok(bool has_retpoline); 832extern bool retpoline_module_ok(bool has_retpoline);
833#else 833#else
834static inline bool retpoline_module_ok(bool has_retpoline) 834static inline bool retpoline_module_ok(bool has_retpoline)
diff --git a/include/linux/of.h b/include/linux/of.h
index fe472e5195a9..e240992e5cb6 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -50,7 +50,6 @@ struct of_irq_controller;
50 50
51struct device_node { 51struct device_node {
52 const char *name; 52 const char *name;
53 const char *type;
54 phandle phandle; 53 phandle phandle;
55 const char *full_name; 54 const char *full_name;
56 struct fwnode_handle fwnode; 55 struct fwnode_handle fwnode;
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index cb1adf0b78a9..249d4d7fbf18 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -24,7 +24,7 @@ static inline void *
24pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, 24pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
25 dma_addr_t *dma_handle) 25 dma_addr_t *dma_handle)
26{ 26{
27 return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); 27 return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
28} 28}
29 29
30static inline void 30static inline void
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3b051f761450..ef20aeea10cc 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
48extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; 48extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init;
49extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; 49extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init;
50extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; 50extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
51extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
51extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; 52extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
52 53
53#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) 54#define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features)
@@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
56#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) 57#define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features)
57#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) 58#define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features)
58#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) 59#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
60#define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features)
59#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) 61#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
60 62
61extern const int phy_10_100_features_array[4]; 63extern const int phy_10_100_features_array[4];
@@ -467,8 +469,8 @@ struct phy_device {
467 * only works for PHYs with IDs which match this field 469 * only works for PHYs with IDs which match this field
468 * name: The friendly name of this PHY type 470 * name: The friendly name of this PHY type
469 * phy_id_mask: Defines the important bits of the phy_id 471 * phy_id_mask: Defines the important bits of the phy_id
470 * features: A list of features (speed, duplex, etc) supported 472 * features: A mandatory list of features (speed, duplex, etc)
471 * by this PHY 473 * supported by this PHY
472 * flags: A bitfield defining certain other features this PHY 474 * flags: A bitfield defining certain other features this PHY
473 * supports (like interrupts) 475 * supports (like interrupts)
474 * 476 *
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index e8e118d70fd7..3f350e2749fe 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -42,6 +42,7 @@ enum phy_mode {
42 PHY_MODE_PCIE, 42 PHY_MODE_PCIE,
43 PHY_MODE_ETHERNET, 43 PHY_MODE_ETHERNET,
44 PHY_MODE_MIPI_DPHY, 44 PHY_MODE_MIPI_DPHY,
45 PHY_MODE_SATA
45}; 46};
46 47
47/** 48/**
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0a2a88e5a383..b895f4e79868 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -108,6 +108,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp);
108int dev_pm_opp_add(struct device *dev, unsigned long freq, 108int dev_pm_opp_add(struct device *dev, unsigned long freq,
109 unsigned long u_volt); 109 unsigned long u_volt);
110void dev_pm_opp_remove(struct device *dev, unsigned long freq); 110void dev_pm_opp_remove(struct device *dev, unsigned long freq);
111void dev_pm_opp_remove_all_dynamic(struct device *dev);
111 112
112int dev_pm_opp_enable(struct device *dev, unsigned long freq); 113int dev_pm_opp_enable(struct device *dev, unsigned long freq);
113 114
@@ -217,6 +218,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
217{ 218{
218} 219}
219 220
221static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
222{
223}
224
220static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) 225static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
221{ 226{
222 return 0; 227 return 0;
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 1637385bcc17..d0aecc04c54b 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -13,6 +13,7 @@
13#ifndef __QCOM_SCM_H 13#ifndef __QCOM_SCM_H
14#define __QCOM_SCM_H 14#define __QCOM_SCM_H
15 15
16#include <linux/err.h>
16#include <linux/types.h> 17#include <linux/types.h>
17#include <linux/cpumask.h> 18#include <linux/cpumask.h>
18 19
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 59ddf9af909e..2dd0a9ed5b36 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -663,6 +663,37 @@ out:
663static inline void qed_chain_set_prod(struct qed_chain *p_chain, 663static inline void qed_chain_set_prod(struct qed_chain *p_chain,
664 u32 prod_idx, void *p_prod_elem) 664 u32 prod_idx, void *p_prod_elem)
665{ 665{
666 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
667 u32 cur_prod, page_mask, page_cnt, page_diff;
668
669 cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
670 p_chain->u.chain32.prod_idx;
671
672 /* Assume that number of elements in a page is power of 2 */
673 page_mask = ~p_chain->elem_per_page_mask;
674
675 /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
676 * reaches the first element of next page before the page index
677 * is incremented. See qed_chain_produce().
678 * Index wrap around is not a problem because the difference
679 * between current and given producer indices is always
680 * positive and lower than the chain's capacity.
681 */
682 page_diff = (((cur_prod - 1) & page_mask) -
683 ((prod_idx - 1) & page_mask)) /
684 p_chain->elem_per_page;
685
686 page_cnt = qed_chain_get_page_cnt(p_chain);
687 if (is_chain_u16(p_chain))
688 p_chain->pbl.c.u16.prod_page_idx =
689 (p_chain->pbl.c.u16.prod_page_idx -
690 page_diff + page_cnt) % page_cnt;
691 else
692 p_chain->pbl.c.u32.prod_page_idx =
693 (p_chain->pbl.c.u32.prod_page_idx -
694 page_diff + page_cnt) % page_cnt;
695 }
696
666 if (is_chain_u16(p_chain)) 697 if (is_chain_u16(p_chain))
667 p_chain->u.chain16.prod_idx = (u16) prod_idx; 698 p_chain->u.chain16.prod_idx = (u16) prod_idx;
668 else 699 else
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 29af6d6b2f4b..c1901b61ca30 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -32,6 +32,8 @@ struct reset_control *devm_reset_control_array_get(struct device *dev,
32struct reset_control *of_reset_control_array_get(struct device_node *np, 32struct reset_control *of_reset_control_array_get(struct device_node *np,
33 bool shared, bool optional); 33 bool shared, bool optional);
34 34
35int reset_control_get_count(struct device *dev);
36
35#else 37#else
36 38
37static inline int reset_control_reset(struct reset_control *rstc) 39static inline int reset_control_reset(struct reset_control *rstc)
@@ -97,6 +99,11 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
97 return optional ? NULL : ERR_PTR(-ENOTSUPP); 99 return optional ? NULL : ERR_PTR(-ENOTSUPP);
98} 100}
99 101
102static inline int reset_control_get_count(struct device *dev)
103{
104 return -ENOENT;
105}
106
100#endif /* CONFIG_RESET_CONTROLLER */ 107#endif /* CONFIG_RESET_CONTROLLER */
101 108
102static inline int __must_check device_reset(struct device *dev) 109static inline int __must_check device_reset(struct device *dev)
@@ -138,7 +145,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
138 * 145 *
139 * Returns a struct reset_control or IS_ERR() condition containing errno. 146 * Returns a struct reset_control or IS_ERR() condition containing errno.
140 * This function is intended for use with reset-controls which are shared 147 * This function is intended for use with reset-controls which are shared
141 * between hardware-blocks. 148 * between hardware blocks.
142 * 149 *
143 * When a reset-control is shared, the behavior of reset_control_assert / 150 * When a reset-control is shared, the behavior of reset_control_assert /
144 * deassert is changed, the reset-core will keep track of a deassert_count 151 * deassert is changed, the reset-core will keep track of a deassert_count
@@ -187,7 +194,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
187} 194}
188 195
189/** 196/**
190 * of_reset_control_get_shared - Lookup and obtain an shared reference 197 * of_reset_control_get_shared - Lookup and obtain a shared reference
191 * to a reset controller. 198 * to a reset controller.
192 * @node: device to be reset by the controller 199 * @node: device to be reset by the controller
193 * @id: reset line name 200 * @id: reset line name
@@ -229,7 +236,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
229} 236}
230 237
231/** 238/**
232 * of_reset_control_get_shared_by_index - Lookup and obtain an shared 239 * of_reset_control_get_shared_by_index - Lookup and obtain a shared
233 * reference to a reset controller 240 * reference to a reset controller
234 * by index. 241 * by index.
235 * @node: device to be reset by the controller 242 * @node: device to be reset by the controller
@@ -322,7 +329,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
322 329
323/** 330/**
324 * devm_reset_control_get_shared_by_index - resource managed 331 * devm_reset_control_get_shared_by_index - resource managed
325 * reset_control_get_shared 332 * reset_control_get_shared
326 * @dev: device to be reset by the controller 333 * @dev: device to be reset by the controller
327 * @index: index of the reset controller 334 * @index: index of the reset controller
328 * 335 *
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89541d248893..d2f90fa92468 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -995,7 +995,7 @@ struct task_struct {
995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */ 995 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
996 struct list_head cg_list; 996 struct list_head cg_list;
997#endif 997#endif
998#ifdef CONFIG_RESCTRL 998#ifdef CONFIG_X86_RESCTRL
999 u32 closid; 999 u32 closid;
1000 u32 rmid; 1000 u32 rmid;
1001#endif 1001#endif
@@ -1406,6 +1406,7 @@ extern struct pid *cad_pid;
1406#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ 1406#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
1407#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ 1407#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1408#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ 1408#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
1409#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
1409#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ 1410#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1410#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1411#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1411#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1412#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
@@ -1904,6 +1905,14 @@ static inline void rseq_execve(struct task_struct *t)
1904 1905
1905#endif 1906#endif
1906 1907
1908void __exit_umh(struct task_struct *tsk);
1909
1910static inline void exit_umh(struct task_struct *tsk)
1911{
1912 if (unlikely(tsk->flags & PF_UMH))
1913 __exit_umh(tsk);
1914}
1915
1907#ifdef CONFIG_DEBUG_RSEQ 1916#ifdef CONFIG_DEBUG_RSEQ
1908 1917
1909void rseq_syscall(struct pt_regs *regs); 1918void rseq_syscall(struct pt_regs *regs);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 93f56fddd92a..95d25b010a25 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3218,6 +3218,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3218 * 3218 *
3219 * This is exactly the same as pskb_trim except that it ensures the 3219 * This is exactly the same as pskb_trim except that it ensures the
3220 * checksum of received packets are still valid after the operation. 3220 * checksum of received packets are still valid after the operation.
3221 * It can change skb pointers.
3221 */ 3222 */
3222 3223
3223static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 3224static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
diff --git a/include/linux/umh.h b/include/linux/umh.h
index 235f51b62c71..0c08de356d0d 100644
--- a/include/linux/umh.h
+++ b/include/linux/umh.h
@@ -47,6 +47,8 @@ struct umh_info {
47 const char *cmdline; 47 const char *cmdline;
48 struct file *pipe_to_umh; 48 struct file *pipe_to_umh;
49 struct file *pipe_from_umh; 49 struct file *pipe_from_umh;
50 struct list_head list;
51 void (*cleanup)(struct umh_info *info);
50 pid_t pid; 52 pid_t pid;
51}; 53};
52int fork_usermode_blob(void *data, size_t len, struct umh_info *info); 54int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 32baf8e26735..987b6491b946 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -12,6 +12,11 @@ struct irq_affinity;
12 12
13/** 13/**
14 * virtio_config_ops - operations for configuring a virtio device 14 * virtio_config_ops - operations for configuring a virtio device
15 * Note: Do not assume that a transport implements all of the operations
16 * getting/setting a value as a simple read/write! Generally speaking,
17 * any of @get/@set, @get_status/@set_status, or @get_features/
18 * @finalize_features are NOT safe to be called from an atomic
19 * context.
15 * @get: read the value of a configuration field 20 * @get: read the value of a configuration field
16 * vdev: the virtio_device 21 * vdev: the virtio_device
17 * offset: the offset of the configuration field 22 * offset: the offset of the configuration field
@@ -22,7 +27,7 @@ struct irq_affinity;
22 * offset: the offset of the configuration field 27 * offset: the offset of the configuration field
23 * buf: the buffer to read the field value from. 28 * buf: the buffer to read the field value from.
24 * len: the length of the buffer 29 * len: the length of the buffer
25 * @generation: config generation counter 30 * @generation: config generation counter (optional)
26 * vdev: the virtio_device 31 * vdev: the virtio_device
27 * Returns the config generation counter 32 * Returns the config generation counter
28 * @get_status: read the status byte 33 * @get_status: read the status byte
@@ -48,17 +53,17 @@ struct irq_affinity;
48 * @del_vqs: free virtqueues found by find_vqs(). 53 * @del_vqs: free virtqueues found by find_vqs().
49 * @get_features: get the array of feature bits for this device. 54 * @get_features: get the array of feature bits for this device.
50 * vdev: the virtio_device 55 * vdev: the virtio_device
51 * Returns the first 32 feature bits (all we currently need). 56 * Returns the first 64 feature bits (all we currently need).
52 * @finalize_features: confirm what device features we'll be using. 57 * @finalize_features: confirm what device features we'll be using.
53 * vdev: the virtio_device 58 * vdev: the virtio_device
54 * This gives the final feature bits for the device: it can change 59 * This gives the final feature bits for the device: it can change
55 * the dev->feature bits if it wants. 60 * the dev->feature bits if it wants.
56 * Returns 0 on success or error status 61 * Returns 0 on success or error status
57 * @bus_name: return the bus name associated with the device 62 * @bus_name: return the bus name associated with the device (optional)
58 * vdev: the virtio_device 63 * vdev: the virtio_device
59 * This returns a pointer to the bus name a la pci_name from which 64 * This returns a pointer to the bus name a la pci_name from which
60 * the caller can then copy. 65 * the caller can then copy.
61 * @set_vq_affinity: set the affinity for a virtqueue. 66 * @set_vq_affinity: set the affinity for a virtqueue (optional).
62 * @get_vq_affinity: get the affinity for a virtqueue (optional). 67 * @get_vq_affinity: get the affinity for a virtqueue (optional).
63 */ 68 */
64typedef void vq_callback_t(struct virtqueue *); 69typedef void vq_callback_t(struct virtqueue *);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 1adefe42c0a6..2bfb87eb98ce 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -21,18 +21,6 @@ struct socket;
21struct rxrpc_call; 21struct rxrpc_call;
22 22
23/* 23/*
24 * Call completion condition (state == RXRPC_CALL_COMPLETE).
25 */
26enum rxrpc_call_completion {
27 RXRPC_CALL_SUCCEEDED, /* - Normal termination */
28 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
29 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
30 RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
31 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
32 NR__RXRPC_CALL_COMPLETIONS
33};
34
35/*
36 * Debug ID counter for tracing. 24 * Debug ID counter for tracing.
37 */ 25 */
38extern atomic_t rxrpc_debug_id; 26extern atomic_t rxrpc_debug_id;
@@ -73,10 +61,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
73 rxrpc_user_attach_call_t, unsigned long, gfp_t, 61 rxrpc_user_attach_call_t, unsigned long, gfp_t,
74 unsigned int); 62 unsigned int);
75void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64); 63void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
76int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
77 struct sockaddr_rxrpc *, struct key *);
78int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
79 enum rxrpc_call_completion *, u32 *);
80u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *); 64u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
81void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *); 65void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
82u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *); 66u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index c5969762a8f4..9c8214d2116d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -241,7 +241,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *,
241 struct netlink_ext_ack *extack); 241 struct netlink_ext_ack *extack);
242int fib_table_dump(struct fib_table *table, struct sk_buff *skb, 242int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
243 struct netlink_callback *cb, struct fib_dump_filter *filter); 243 struct netlink_callback *cb, struct fib_dump_filter *filter);
244int fib_table_flush(struct net *net, struct fib_table *table); 244int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); 245struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
246void fib_table_flush_external(struct fib_table *table); 246void fib_table_flush_external(struct fib_table *table);
247void fib_free_table(struct fib_table *tb); 247void fib_free_table(struct fib_table *tb);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 7d5cda7ce32a..3e370cb36263 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -84,7 +84,6 @@ struct flow_offload {
84struct nf_flow_route { 84struct nf_flow_route {
85 struct { 85 struct {
86 struct dst_entry *dst; 86 struct dst_entry *dst;
87 int ifindex;
88 } tuple[FLOW_OFFLOAD_DIR_MAX]; 87 } tuple[FLOW_OFFLOAD_DIR_MAX];
89}; 88};
90 89
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 33d291888ba9..e3f005eae1f7 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -25,6 +25,7 @@
25enum afs_call_trace { 25enum afs_call_trace {
26 afs_call_trace_alloc, 26 afs_call_trace_alloc,
27 afs_call_trace_free, 27 afs_call_trace_free,
28 afs_call_trace_get,
28 afs_call_trace_put, 29 afs_call_trace_put,
29 afs_call_trace_wake, 30 afs_call_trace_wake,
30 afs_call_trace_work, 31 afs_call_trace_work,
@@ -159,6 +160,7 @@ enum afs_file_error {
159#define afs_call_traces \ 160#define afs_call_traces \
160 EM(afs_call_trace_alloc, "ALLOC") \ 161 EM(afs_call_trace_alloc, "ALLOC") \
161 EM(afs_call_trace_free, "FREE ") \ 162 EM(afs_call_trace_free, "FREE ") \
163 EM(afs_call_trace_get, "GET ") \
162 EM(afs_call_trace_put, "PUT ") \ 164 EM(afs_call_trace_put, "PUT ") \
163 EM(afs_call_trace_wake, "WAKE ") \ 165 EM(afs_call_trace_wake, "WAKE ") \
164 E_(afs_call_trace_work, "WORK ") 166 E_(afs_call_trace_work, "WORK ")
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index 36a7e3f18e69..f28acd952d03 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -400,6 +400,8 @@ enum {
400/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */ 400/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */
401#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) 401#define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT)
402#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) 402#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
403#define AUDIT_ARCH_RISCV32 (EM_RISCV|__AUDIT_ARCH_LE)
404#define AUDIT_ARCH_RISCV64 (EM_RISCV|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
403#define AUDIT_ARCH_S390 (EM_S390) 405#define AUDIT_ARCH_S390 (EM_S390)
404#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) 406#define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT)
405#define AUDIT_ARCH_SH (EM_SH) 407#define AUDIT_ARCH_SH (EM_SH)
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index f6052e70bf40..a55cb8b10165 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -268,7 +268,7 @@ struct sockaddr_in {
268#define IN_MULTICAST(a) IN_CLASSD(a) 268#define IN_MULTICAST(a) IN_CLASSD(a)
269#define IN_MULTICAST_NET 0xe0000000 269#define IN_MULTICAST_NET 0xe0000000
270 270
271#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) 271#define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff)
272#define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) 272#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
273 273
274#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) 274#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index d73d83950265..1bc794ad957a 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -147,7 +147,7 @@ struct ptp_pin_desc {
147#define PTP_SYS_OFFSET_PRECISE \ 147#define PTP_SYS_OFFSET_PRECISE \
148 _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) 148 _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
149#define PTP_SYS_OFFSET_EXTENDED \ 149#define PTP_SYS_OFFSET_EXTENDED \
150 _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) 150 _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
151 151
152struct ptp_extts_event { 152struct ptp_extts_event {
153 struct ptp_clock_time t; /* Time event occured. */ 153 struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index d13fd490b66d..6e73f0274e41 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
78 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, 78 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
79 PVRDMA_WR_BIND_MW, 79 PVRDMA_WR_BIND_MW,
80 PVRDMA_WR_REG_SIG_MR, 80 PVRDMA_WR_REG_SIG_MR,
81 PVRDMA_WR_ERROR,
81}; 82};
82 83
83enum pvrdma_wc_status { 84enum pvrdma_wc_status {
diff --git a/init/Kconfig b/init/Kconfig
index d47cb77a220e..513fa544a134 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1124,6 +1124,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
1124 bool "Dead code and data elimination (EXPERIMENTAL)" 1124 bool "Dead code and data elimination (EXPERIMENTAL)"
1125 depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION 1125 depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
1126 depends on EXPERT 1126 depends on EXPERT
1127 depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
1127 depends on $(cc-option,-ffunction-sections -fdata-sections) 1128 depends on $(cc-option,-ffunction-sections -fdata-sections)
1128 depends on $(ld-option,--gc-sections) 1129 depends on $(ld-option,--gc-sections)
1129 help 1130 help
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 715f9fcf4712..befe570be5ba 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -467,7 +467,7 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
467 return kind_ops[BTF_INFO_KIND(t->info)]; 467 return kind_ops[BTF_INFO_KIND(t->info)];
468} 468}
469 469
470bool btf_name_offset_valid(const struct btf *btf, u32 offset) 470static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
471{ 471{
472 return BTF_STR_OFFSET_VALID(offset) && 472 return BTF_STR_OFFSET_VALID(offset) &&
473 offset < btf->hdr.str_len; 473 offset < btf->hdr.str_len;
@@ -1219,8 +1219,6 @@ static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1219 u8 nr_copy_bits; 1219 u8 nr_copy_bits;
1220 u64 print_num; 1220 u64 print_num;
1221 1221
1222 data += BITS_ROUNDDOWN_BYTES(bits_offset);
1223 bits_offset = BITS_PER_BYTE_MASKED(bits_offset);
1224 nr_copy_bits = nr_bits + bits_offset; 1222 nr_copy_bits = nr_bits + bits_offset;
1225 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); 1223 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1226 1224
@@ -1255,7 +1253,9 @@ static void btf_int_bits_seq_show(const struct btf *btf,
1255 * BTF_INT_OFFSET() cannot exceed 64 bits. 1253 * BTF_INT_OFFSET() cannot exceed 64 bits.
1256 */ 1254 */
1257 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); 1255 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1258 btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m); 1256 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1257 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1258 btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1259} 1259}
1260 1260
1261static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, 1261static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
@@ -2001,12 +2001,12 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2001 2001
2002 member_offset = btf_member_bit_offset(t, member); 2002 member_offset = btf_member_bit_offset(t, member);
2003 bitfield_size = btf_member_bitfield_size(t, member); 2003 bitfield_size = btf_member_bitfield_size(t, member);
2004 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2005 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2004 if (bitfield_size) { 2006 if (bitfield_size) {
2005 btf_bitfield_seq_show(data, member_offset, 2007 btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2006 bitfield_size, m); 2008 bitfield_size, m);
2007 } else { 2009 } else {
2008 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2009 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2010 ops = btf_type_ops(member_type); 2010 ops = btf_type_ops(member_type);
2011 ops->seq_show(btf, member_type, member->type, 2011 ops->seq_show(btf, member_type, member->type,
2012 data + bytes_offset, bits8_offset, m); 2012 data + bytes_offset, bits8_offset, m);
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 9425c2fb872f..ab612fe9862f 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -718,6 +718,7 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
718 case BPF_FUNC_trace_printk: 718 case BPF_FUNC_trace_printk:
719 if (capable(CAP_SYS_ADMIN)) 719 if (capable(CAP_SYS_ADMIN))
720 return bpf_get_trace_printk_proto(); 720 return bpf_get_trace_printk_proto();
721 /* fall through */
721 default: 722 default:
722 return NULL; 723 return NULL;
723 } 724 }
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 99d243e1ad6e..52378d3e34b3 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -12,6 +12,7 @@
12struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) 12struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
13{ 13{
14 struct bpf_map *inner_map, *inner_map_meta; 14 struct bpf_map *inner_map, *inner_map_meta;
15 u32 inner_map_meta_size;
15 struct fd f; 16 struct fd f;
16 17
17 f = fdget(inner_map_ufd); 18 f = fdget(inner_map_ufd);
@@ -36,7 +37,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
36 return ERR_PTR(-EINVAL); 37 return ERR_PTR(-EINVAL);
37 } 38 }
38 39
39 inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); 40 inner_map_meta_size = sizeof(*inner_map_meta);
41 /* In some cases verifier needs to access beyond just base map. */
42 if (inner_map->ops == &array_map_ops)
43 inner_map_meta_size = sizeof(struct bpf_array);
44
45 inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
40 if (!inner_map_meta) { 46 if (!inner_map_meta) {
41 fdput(f); 47 fdput(f);
42 return ERR_PTR(-ENOMEM); 48 return ERR_PTR(-ENOMEM);
@@ -46,9 +52,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
46 inner_map_meta->key_size = inner_map->key_size; 52 inner_map_meta->key_size = inner_map->key_size;
47 inner_map_meta->value_size = inner_map->value_size; 53 inner_map_meta->value_size = inner_map->value_size;
48 inner_map_meta->map_flags = inner_map->map_flags; 54 inner_map_meta->map_flags = inner_map->map_flags;
49 inner_map_meta->ops = inner_map->ops;
50 inner_map_meta->max_entries = inner_map->max_entries; 55 inner_map_meta->max_entries = inner_map->max_entries;
51 56
57 /* Misc members not needed in bpf_map_meta_equal() check. */
58 inner_map_meta->ops = inner_map->ops;
59 if (inner_map->ops == &array_map_ops) {
60 inner_map_meta->unpriv_array = inner_map->unpriv_array;
61 container_of(inner_map_meta, struct bpf_array, map)->index_mask =
62 container_of(inner_map, struct bpf_array, map)->index_mask;
63 }
64
52 fdput(f); 65 fdput(f);
53 return inner_map_meta; 66 return inner_map_meta;
54} 67}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 90daf285de03..d43b14535827 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
180 180
181 if (nhdr->n_type == BPF_BUILD_ID && 181 if (nhdr->n_type == BPF_BUILD_ID &&
182 nhdr->n_namesz == sizeof("GNU") && 182 nhdr->n_namesz == sizeof("GNU") &&
183 nhdr->n_descsz == BPF_BUILD_ID_SIZE) { 183 nhdr->n_descsz > 0 &&
184 nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
184 memcpy(build_id, 185 memcpy(build_id,
185 note_start + note_offs + 186 note_start + note_offs +
186 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), 187 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
187 BPF_BUILD_ID_SIZE); 188 nhdr->n_descsz);
189 memset(build_id + nhdr->n_descsz, 0,
190 BPF_BUILD_ID_SIZE - nhdr->n_descsz);
188 return 0; 191 return 0;
189 } 192 }
190 new_offs = note_offs + sizeof(Elf32_Nhdr) + 193 new_offs = note_offs + sizeof(Elf32_Nhdr) +
@@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
260 return -EFAULT; /* page not mapped */ 263 return -EFAULT; /* page not mapped */
261 264
262 ret = -EINVAL; 265 ret = -EINVAL;
263 page_addr = page_address(page); 266 page_addr = kmap_atomic(page);
264 ehdr = (Elf32_Ehdr *)page_addr; 267 ehdr = (Elf32_Ehdr *)page_addr;
265 268
266 /* compare magic x7f "ELF" */ 269 /* compare magic x7f "ELF" */
@@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
276 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) 279 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
277 ret = stack_map_get_build_id_64(page_addr, build_id); 280 ret = stack_map_get_build_id_64(page_addr, build_id);
278out: 281out:
282 kunmap_atomic(page_addr);
279 put_page(page); 283 put_page(page);
280 return ret; 284 return ret;
281} 285}
@@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
310 for (i = 0; i < trace_nr; i++) { 314 for (i = 0; i < trace_nr; i++) {
311 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 315 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
312 id_offs[i].ip = ips[i]; 316 id_offs[i].ip = ips[i];
317 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
313 } 318 }
314 return; 319 return;
315 } 320 }
@@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
320 /* per entry fall back to ips */ 325 /* per entry fall back to ips */
321 id_offs[i].status = BPF_STACK_BUILD_ID_IP; 326 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
322 id_offs[i].ip = ips[i]; 327 id_offs[i].ip = ips[i];
328 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
323 continue; 329 continue;
324 } 330 }
325 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] 331 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f6bc62a9ee8e..56674a7c3778 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3103,6 +3103,40 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
3103 } 3103 }
3104} 3104}
3105 3105
3106static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
3107 const struct bpf_insn *insn)
3108{
3109 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
3110}
3111
3112static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
3113 u32 alu_state, u32 alu_limit)
3114{
3115 /* If we arrived here from different branches with different
3116 * state or limits to sanitize, then this won't work.
3117 */
3118 if (aux->alu_state &&
3119 (aux->alu_state != alu_state ||
3120 aux->alu_limit != alu_limit))
3121 return -EACCES;
3122
3123 /* Corresponding fixup done in fixup_bpf_calls(). */
3124 aux->alu_state = alu_state;
3125 aux->alu_limit = alu_limit;
3126 return 0;
3127}
3128
3129static int sanitize_val_alu(struct bpf_verifier_env *env,
3130 struct bpf_insn *insn)
3131{
3132 struct bpf_insn_aux_data *aux = cur_aux(env);
3133
3134 if (can_skip_alu_sanitation(env, insn))
3135 return 0;
3136
3137 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
3138}
3139
3106static int sanitize_ptr_alu(struct bpf_verifier_env *env, 3140static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3107 struct bpf_insn *insn, 3141 struct bpf_insn *insn,
3108 const struct bpf_reg_state *ptr_reg, 3142 const struct bpf_reg_state *ptr_reg,
@@ -3117,7 +3151,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3117 struct bpf_reg_state tmp; 3151 struct bpf_reg_state tmp;
3118 bool ret; 3152 bool ret;
3119 3153
3120 if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K) 3154 if (can_skip_alu_sanitation(env, insn))
3121 return 0; 3155 return 0;
3122 3156
3123 /* We already marked aux for masking from non-speculative 3157 /* We already marked aux for masking from non-speculative
@@ -3133,19 +3167,8 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3133 3167
3134 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) 3168 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
3135 return 0; 3169 return 0;
3136 3170 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
3137 /* If we arrived here from different branches with different
3138 * limits to sanitize, then this won't work.
3139 */
3140 if (aux->alu_state &&
3141 (aux->alu_state != alu_state ||
3142 aux->alu_limit != alu_limit))
3143 return -EACCES; 3171 return -EACCES;
3144
3145 /* Corresponding fixup done in fixup_bpf_calls(). */
3146 aux->alu_state = alu_state;
3147 aux->alu_limit = alu_limit;
3148
3149do_sim: 3172do_sim:
3150 /* Simulate and find potential out-of-bounds access under 3173 /* Simulate and find potential out-of-bounds access under
3151 * speculative execution from truncation as a result of 3174 * speculative execution from truncation as a result of
@@ -3418,6 +3441,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3418 s64 smin_val, smax_val; 3441 s64 smin_val, smax_val;
3419 u64 umin_val, umax_val; 3442 u64 umin_val, umax_val;
3420 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 3443 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3444 u32 dst = insn->dst_reg;
3445 int ret;
3421 3446
3422 if (insn_bitness == 32) { 3447 if (insn_bitness == 32) {
3423 /* Relevant for 32-bit RSH: Information can propagate towards 3448 /* Relevant for 32-bit RSH: Information can propagate towards
@@ -3452,6 +3477,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3452 3477
3453 switch (opcode) { 3478 switch (opcode) {
3454 case BPF_ADD: 3479 case BPF_ADD:
3480 ret = sanitize_val_alu(env, insn);
3481 if (ret < 0) {
3482 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
3483 return ret;
3484 }
3455 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 3485 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
3456 signed_add_overflows(dst_reg->smax_value, smax_val)) { 3486 signed_add_overflows(dst_reg->smax_value, smax_val)) {
3457 dst_reg->smin_value = S64_MIN; 3487 dst_reg->smin_value = S64_MIN;
@@ -3471,6 +3501,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
3471 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 3501 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
3472 break; 3502 break;
3473 case BPF_SUB: 3503 case BPF_SUB:
3504 ret = sanitize_val_alu(env, insn);
3505 if (ret < 0) {
3506 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
3507 return ret;
3508 }
3474 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 3509 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
3475 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 3510 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
3476 /* Overflow possible, we know nothing */ 3511 /* Overflow possible, we know nothing */
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d6361776dc5c..1fb6fd68b9c7 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -378,6 +378,8 @@ void __init swiotlb_exit(void)
378 memblock_free_late(io_tlb_start, 378 memblock_free_late(io_tlb_start,
379 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 379 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
380 } 380 }
381 io_tlb_start = 0;
382 io_tlb_end = 0;
381 io_tlb_nslabs = 0; 383 io_tlb_nslabs = 0;
382 max_segment = 0; 384 max_segment = 0;
383} 385}
diff --git a/kernel/exit.c b/kernel/exit.c
index 2d14979577ee..284f2fe9a293 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -866,6 +866,7 @@ void __noreturn do_exit(long code)
866 exit_task_namespaces(tsk); 866 exit_task_namespaces(tsk);
867 exit_task_work(tsk); 867 exit_task_work(tsk);
868 exit_thread(tsk); 868 exit_thread(tsk);
869 exit_umh(tsk);
869 870
870 /* 871 /*
871 * Flush inherited counters to the parent - before the parent 872 * Flush inherited counters to the parent - before the parent
diff --git a/kernel/fork.c b/kernel/fork.c
index a60459947f18..b69248e6f0e0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -217,6 +217,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
217 memset(s->addr, 0, THREAD_SIZE); 217 memset(s->addr, 0, THREAD_SIZE);
218 218
219 tsk->stack_vm_area = s; 219 tsk->stack_vm_area = s;
220 tsk->stack = s->addr;
220 return s->addr; 221 return s->addr;
221 } 222 }
222 223
@@ -1833,8 +1834,6 @@ static __latent_entropy struct task_struct *copy_process(
1833 1834
1834 posix_cpu_timers_init(p); 1835 posix_cpu_timers_init(p);
1835 1836
1836 p->start_time = ktime_get_ns();
1837 p->real_start_time = ktime_get_boot_ns();
1838 p->io_context = NULL; 1837 p->io_context = NULL;
1839 audit_set_context(p, NULL); 1838 audit_set_context(p, NULL);
1840 cgroup_fork(p); 1839 cgroup_fork(p);
@@ -2001,6 +2000,17 @@ static __latent_entropy struct task_struct *copy_process(
2001 goto bad_fork_free_pid; 2000 goto bad_fork_free_pid;
2002 2001
2003 /* 2002 /*
2003 * From this point on we must avoid any synchronous user-space
2004 * communication until we take the tasklist-lock. In particular, we do
2005 * not want user-space to be able to predict the process start-time by
2006 * stalling fork(2) after we recorded the start_time but before it is
2007 * visible to the system.
2008 */
2009
2010 p->start_time = ktime_get_ns();
2011 p->real_start_time = ktime_get_boot_ns();
2012
2013 /*
2004 * Make it visible to the rest of the system, but dont wake it up yet. 2014 * Make it visible to the rest of the system, but dont wake it up yet.
2005 * Need tasklist lock for parent etc handling! 2015 * Need tasklist lock for parent etc handling!
2006 */ 2016 */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index d7f538847b84..e815781ed751 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -976,6 +976,9 @@ static int seccomp_notify_release(struct inode *inode, struct file *file)
976 struct seccomp_filter *filter = file->private_data; 976 struct seccomp_filter *filter = file->private_data;
977 struct seccomp_knotif *knotif; 977 struct seccomp_knotif *knotif;
978 978
979 if (!filter)
980 return 0;
981
979 mutex_lock(&filter->notify_lock); 982 mutex_lock(&filter->notify_lock);
980 983
981 /* 984 /*
@@ -1300,6 +1303,7 @@ out:
1300out_put_fd: 1303out_put_fd:
1301 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) { 1304 if (flags & SECCOMP_FILTER_FLAG_NEW_LISTENER) {
1302 if (ret < 0) { 1305 if (ret < 0) {
1306 listener_f->private_data = NULL;
1303 fput(listener_f); 1307 fput(listener_f);
1304 put_unused_fd(listener); 1308 put_unused_fd(listener);
1305 } else { 1309 } else {
diff --git a/kernel/sys.c b/kernel/sys.c
index a48cbf1414b8..f7eb62eceb24 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1207,7 +1207,8 @@ DECLARE_RWSEM(uts_sem);
1207/* 1207/*
1208 * Work around broken programs that cannot handle "Linux 3.0". 1208 * Work around broken programs that cannot handle "Linux 3.0".
1209 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1209 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1210 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. 1210 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1211 * 2.6.60.
1211 */ 1212 */
1212static int override_release(char __user *release, size_t len) 1213static int override_release(char __user *release, size_t len)
1213{ 1214{
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 5c19b8c41c7e..d5fb09ebba8b 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -607,11 +607,17 @@ static int trace_kprobe_create(int argc, const char *argv[])
607 char buf[MAX_EVENT_NAME_LEN]; 607 char buf[MAX_EVENT_NAME_LEN];
608 unsigned int flags = TPARG_FL_KERNEL; 608 unsigned int flags = TPARG_FL_KERNEL;
609 609
610 /* argc must be >= 1 */ 610 switch (argv[0][0]) {
611 if (argv[0][0] == 'r') { 611 case 'r':
612 is_return = true; 612 is_return = true;
613 flags |= TPARG_FL_RETURN; 613 flags |= TPARG_FL_RETURN;
614 } else if (argv[0][0] != 'p' || argc < 2) 614 break;
615 case 'p':
616 break;
617 default:
618 return -ECANCELED;
619 }
620 if (argc < 2)
615 return -ECANCELED; 621 return -ECANCELED;
616 622
617 event = strchr(&argv[0][1], ':'); 623 event = strchr(&argv[0][1], ':');
diff --git a/kernel/umh.c b/kernel/umh.c
index 0baa672e023c..d937cbad903a 100644
--- a/kernel/umh.c
+++ b/kernel/umh.c
@@ -37,6 +37,8 @@ static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
37static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET; 37static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
38static DEFINE_SPINLOCK(umh_sysctl_lock); 38static DEFINE_SPINLOCK(umh_sysctl_lock);
39static DECLARE_RWSEM(umhelper_sem); 39static DECLARE_RWSEM(umhelper_sem);
40static LIST_HEAD(umh_list);
41static DEFINE_MUTEX(umh_list_lock);
40 42
41static void call_usermodehelper_freeinfo(struct subprocess_info *info) 43static void call_usermodehelper_freeinfo(struct subprocess_info *info)
42{ 44{
@@ -100,10 +102,12 @@ static int call_usermodehelper_exec_async(void *data)
100 commit_creds(new); 102 commit_creds(new);
101 103
102 sub_info->pid = task_pid_nr(current); 104 sub_info->pid = task_pid_nr(current);
103 if (sub_info->file) 105 if (sub_info->file) {
104 retval = do_execve_file(sub_info->file, 106 retval = do_execve_file(sub_info->file,
105 sub_info->argv, sub_info->envp); 107 sub_info->argv, sub_info->envp);
106 else 108 if (!retval)
109 current->flags |= PF_UMH;
110 } else
107 retval = do_execve(getname_kernel(sub_info->path), 111 retval = do_execve(getname_kernel(sub_info->path),
108 (const char __user *const __user *)sub_info->argv, 112 (const char __user *const __user *)sub_info->argv,
109 (const char __user *const __user *)sub_info->envp); 113 (const char __user *const __user *)sub_info->envp);
@@ -517,6 +521,11 @@ int fork_usermode_blob(void *data, size_t len, struct umh_info *info)
517 goto out; 521 goto out;
518 522
519 err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); 523 err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
524 if (!err) {
525 mutex_lock(&umh_list_lock);
526 list_add(&info->list, &umh_list);
527 mutex_unlock(&umh_list_lock);
528 }
520out: 529out:
521 fput(file); 530 fput(file);
522 return err; 531 return err;
@@ -679,6 +688,26 @@ static int proc_cap_handler(struct ctl_table *table, int write,
679 return 0; 688 return 0;
680} 689}
681 690
691void __exit_umh(struct task_struct *tsk)
692{
693 struct umh_info *info;
694 pid_t pid = tsk->pid;
695
696 mutex_lock(&umh_list_lock);
697 list_for_each_entry(info, &umh_list, list) {
698 if (info->pid == pid) {
699 list_del(&info->list);
700 mutex_unlock(&umh_list_lock);
701 goto out;
702 }
703 }
704 mutex_unlock(&umh_list_lock);
705 return;
706out:
707 if (info->cleanup)
708 info->cleanup(info);
709}
710
682struct ctl_table usermodehelper_table[] = { 711struct ctl_table usermodehelper_table[] = {
683 { 712 {
684 .procname = "bset", 713 .procname = "bset",
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 14436f4ca6bd..30e0f9770f88 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
52 if (x <= ULONG_MAX) 52 if (x <= ULONG_MAX)
53 return int_sqrt((unsigned long) x); 53 return int_sqrt((unsigned long) x);
54 54
55 m = 1ULL << (fls64(x) & ~1ULL); 55 m = 1ULL << ((fls64(x) - 1) & ~1ULL);
56 while (m != 0) { 56 while (m != 0) {
57 b = y + m; 57 b = y + m;
58 y >>= 1; 58 y >>= 1;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 65c2d06250a6..5b382c1244ed 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -26,14 +26,10 @@
26static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index) 26static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
27{ 27{
28 unsigned long mask, val; 28 unsigned long mask, val;
29 unsigned long __maybe_unused flags;
30 bool ret = false; 29 bool ret = false;
30 unsigned long flags;
31 31
32 /* Silence bogus lockdep warning */ 32 spin_lock_irqsave(&sb->map[index].swap_lock, flags);
33#if defined(CONFIG_LOCKDEP)
34 local_irq_save(flags);
35#endif
36 spin_lock(&sb->map[index].swap_lock);
37 33
38 if (!sb->map[index].cleared) 34 if (!sb->map[index].cleared)
39 goto out_unlock; 35 goto out_unlock;
@@ -54,10 +50,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
54 50
55 ret = true; 51 ret = true;
56out_unlock: 52out_unlock:
57 spin_unlock(&sb->map[index].swap_lock); 53 spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
58#if defined(CONFIG_LOCKDEP)
59 local_irq_restore(flags);
60#endif
61 return ret; 54 return ret;
62} 55}
63 56
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 745088810965..df2e7dd5ff17 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3238,7 +3238,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3238 struct page *ptepage; 3238 struct page *ptepage;
3239 unsigned long addr; 3239 unsigned long addr;
3240 int cow; 3240 int cow;
3241 struct address_space *mapping = vma->vm_file->f_mapping;
3242 struct hstate *h = hstate_vma(vma); 3241 struct hstate *h = hstate_vma(vma);
3243 unsigned long sz = huge_page_size(h); 3242 unsigned long sz = huge_page_size(h);
3244 struct mmu_notifier_range range; 3243 struct mmu_notifier_range range;
@@ -3250,23 +3249,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3250 mmu_notifier_range_init(&range, src, vma->vm_start, 3249 mmu_notifier_range_init(&range, src, vma->vm_start,
3251 vma->vm_end); 3250 vma->vm_end);
3252 mmu_notifier_invalidate_range_start(&range); 3251 mmu_notifier_invalidate_range_start(&range);
3253 } else {
3254 /*
3255 * For shared mappings i_mmap_rwsem must be held to call
3256 * huge_pte_alloc, otherwise the returned ptep could go
3257 * away if part of a shared pmd and another thread calls
3258 * huge_pmd_unshare.
3259 */
3260 i_mmap_lock_read(mapping);
3261 } 3252 }
3262 3253
3263 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { 3254 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3264 spinlock_t *src_ptl, *dst_ptl; 3255 spinlock_t *src_ptl, *dst_ptl;
3265
3266 src_pte = huge_pte_offset(src, addr, sz); 3256 src_pte = huge_pte_offset(src, addr, sz);
3267 if (!src_pte) 3257 if (!src_pte)
3268 continue; 3258 continue;
3269
3270 dst_pte = huge_pte_alloc(dst, addr, sz); 3259 dst_pte = huge_pte_alloc(dst, addr, sz);
3271 if (!dst_pte) { 3260 if (!dst_pte) {
3272 ret = -ENOMEM; 3261 ret = -ENOMEM;
@@ -3337,8 +3326,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3337 3326
3338 if (cow) 3327 if (cow)
3339 mmu_notifier_invalidate_range_end(&range); 3328 mmu_notifier_invalidate_range_end(&range);
3340 else
3341 i_mmap_unlock_read(mapping);
3342 3329
3343 return ret; 3330 return ret;
3344} 3331}
@@ -3755,16 +3742,16 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3755 } 3742 }
3756 3743
3757 /* 3744 /*
3758 * We can not race with truncation due to holding i_mmap_rwsem. 3745 * Use page lock to guard against racing truncation
3759 * Check once here for faults beyond end of file. 3746 * before we get page_table_lock.
3760 */ 3747 */
3761 size = i_size_read(mapping->host) >> huge_page_shift(h);
3762 if (idx >= size)
3763 goto out;
3764
3765retry: 3748retry:
3766 page = find_lock_page(mapping, idx); 3749 page = find_lock_page(mapping, idx);
3767 if (!page) { 3750 if (!page) {
3751 size = i_size_read(mapping->host) >> huge_page_shift(h);
3752 if (idx >= size)
3753 goto out;
3754
3768 /* 3755 /*
3769 * Check for page in userfault range 3756 * Check for page in userfault range
3770 */ 3757 */
@@ -3784,18 +3771,14 @@ retry:
3784 }; 3771 };
3785 3772
3786 /* 3773 /*
3787 * hugetlb_fault_mutex and i_mmap_rwsem must be 3774 * hugetlb_fault_mutex must be dropped before
3788 * dropped before handling userfault. Reacquire 3775 * handling userfault. Reacquire after handling
3789 * after handling fault to make calling code simpler. 3776 * fault to make calling code simpler.
3790 */ 3777 */
3791 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, 3778 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3792 idx, haddr); 3779 idx, haddr);
3793 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 3780 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3794 i_mmap_unlock_read(mapping);
3795
3796 ret = handle_userfault(&vmf, VM_UFFD_MISSING); 3781 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3797
3798 i_mmap_lock_read(mapping);
3799 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3782 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3800 goto out; 3783 goto out;
3801 } 3784 }
@@ -3854,6 +3837,9 @@ retry:
3854 } 3837 }
3855 3838
3856 ptl = huge_pte_lock(h, mm, ptep); 3839 ptl = huge_pte_lock(h, mm, ptep);
3840 size = i_size_read(mapping->host) >> huge_page_shift(h);
3841 if (idx >= size)
3842 goto backout;
3857 3843
3858 ret = 0; 3844 ret = 0;
3859 if (!huge_pte_none(huge_ptep_get(ptep))) 3845 if (!huge_pte_none(huge_ptep_get(ptep)))
@@ -3940,11 +3926,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3940 3926
3941 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 3927 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3942 if (ptep) { 3928 if (ptep) {
3943 /*
3944 * Since we hold no locks, ptep could be stale. That is
3945 * OK as we are only making decisions based on content and
3946 * not actually modifying content here.
3947 */
3948 entry = huge_ptep_get(ptep); 3929 entry = huge_ptep_get(ptep);
3949 if (unlikely(is_hugetlb_entry_migration(entry))) { 3930 if (unlikely(is_hugetlb_entry_migration(entry))) {
3950 migration_entry_wait_huge(vma, mm, ptep); 3931 migration_entry_wait_huge(vma, mm, ptep);
@@ -3952,33 +3933,20 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3952 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 3933 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3953 return VM_FAULT_HWPOISON_LARGE | 3934 return VM_FAULT_HWPOISON_LARGE |
3954 VM_FAULT_SET_HINDEX(hstate_index(h)); 3935 VM_FAULT_SET_HINDEX(hstate_index(h));
3936 } else {
3937 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3938 if (!ptep)
3939 return VM_FAULT_OOM;
3955 } 3940 }
3956 3941
3957 /*
3958 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
3959 * until finished with ptep. This serves two purposes:
3960 * 1) It prevents huge_pmd_unshare from being called elsewhere
3961 * and making the ptep no longer valid.
3962 * 2) It synchronizes us with file truncation.
3963 *
3964 * ptep could have already be assigned via huge_pte_offset. That
3965 * is OK, as huge_pte_alloc will return the same value unless
3966 * something changed.
3967 */
3968 mapping = vma->vm_file->f_mapping; 3942 mapping = vma->vm_file->f_mapping;
3969 i_mmap_lock_read(mapping); 3943 idx = vma_hugecache_offset(h, vma, haddr);
3970 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3971 if (!ptep) {
3972 i_mmap_unlock_read(mapping);
3973 return VM_FAULT_OOM;
3974 }
3975 3944
3976 /* 3945 /*
3977 * Serialize hugepage allocation and instantiation, so that we don't 3946 * Serialize hugepage allocation and instantiation, so that we don't
3978 * get spurious allocation failures if two CPUs race to instantiate 3947 * get spurious allocation failures if two CPUs race to instantiate
3979 * the same page in the page cache. 3948 * the same page in the page cache.
3980 */ 3949 */
3981 idx = vma_hugecache_offset(h, vma, haddr);
3982 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); 3950 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
3983 mutex_lock(&hugetlb_fault_mutex_table[hash]); 3951 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3984 3952
@@ -4066,7 +4034,6 @@ out_ptl:
4066 } 4034 }
4067out_mutex: 4035out_mutex:
4068 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 4036 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4069 i_mmap_unlock_read(mapping);
4070 /* 4037 /*
4071 * Generally it's safe to hold refcount during waiting page lock. But 4038 * Generally it's safe to hold refcount during waiting page lock. But
4072 * here we just wait to defer the next page fault to avoid busy loop and 4039 * here we just wait to defer the next page fault to avoid busy loop and
@@ -4671,12 +4638,10 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4671 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 4638 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4672 * and returns the corresponding pte. While this is not necessary for the 4639 * and returns the corresponding pte. While this is not necessary for the
4673 * !shared pmd case because we can allocate the pmd later as well, it makes the 4640 * !shared pmd case because we can allocate the pmd later as well, it makes the
4674 * code much cleaner. 4641 * code much cleaner. pmd allocation is essential for the shared case because
4675 * 4642 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4676 * This routine must be called with i_mmap_rwsem held in at least read mode. 4643 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4677 * For hugetlbfs, this prevents removal of any page table entries associated 4644 * bad pmd for sharing.
4678 * with the address space. This is important as we are setting up sharing
4679 * based on existing page table entries (mappings).
4680 */ 4645 */
4681pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) 4646pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4682{ 4647{
@@ -4693,6 +4658,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4693 if (!vma_shareable(vma, addr)) 4658 if (!vma_shareable(vma, addr))
4694 return (pte_t *)pmd_alloc(mm, pud, addr); 4659 return (pte_t *)pmd_alloc(mm, pud, addr);
4695 4660
4661 i_mmap_lock_write(mapping);
4696 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 4662 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4697 if (svma == vma) 4663 if (svma == vma)
4698 continue; 4664 continue;
@@ -4722,6 +4688,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4722 spin_unlock(ptl); 4688 spin_unlock(ptl);
4723out: 4689out:
4724 pte = (pte_t *)pmd_alloc(mm, pud, addr); 4690 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4691 i_mmap_unlock_write(mapping);
4725 return pte; 4692 return pte;
4726} 4693}
4727 4694
@@ -4732,7 +4699,7 @@ out:
4732 * indicated by page_count > 1, unmap is achieved by clearing pud and 4699 * indicated by page_count > 1, unmap is achieved by clearing pud and
4733 * decrementing the ref count. If count == 1, the pte page is not shared. 4700 * decrementing the ref count. If count == 1, the pte page is not shared.
4734 * 4701 *
4735 * Called with page table lock held and i_mmap_rwsem held in write mode. 4702 * called with page table lock held.
4736 * 4703 *
4737 * returns: 1 successfully unmapped a shared pte page 4704 * returns: 1 successfully unmapped a shared pte page
4738 * 0 the underlying pte page is not shared, or it is the last user 4705 * 0 the underlying pte page is not shared, or it is the last user
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 03d5d1374ca7..73c9cbfdedf4 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -298,8 +298,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
298 return; 298 return;
299 } 299 }
300 300
301 cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
302
303 *flags |= SLAB_KASAN; 301 *flags |= SLAB_KASAN;
304} 302}
305 303
@@ -349,28 +347,43 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
349} 347}
350 348
351/* 349/*
352 * Since it's desirable to only call object contructors once during slab 350 * This function assigns a tag to an object considering the following:
353 * allocation, we preassign tags to all such objects. Also preassign tags for 351 * 1. A cache might have a constructor, which might save a pointer to a slab
354 * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports. 352 * object somewhere (e.g. in the object itself). We preassign a tag for
355 * For SLAB allocator we can't preassign tags randomly since the freelist is 353 * each object in caches with constructors during slab creation and reuse
356 * stored as an array of indexes instead of a linked list. Assign tags based 354 * the same tag each time a particular object is allocated.
357 * on objects indexes, so that objects that are next to each other get 355 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
358 * different tags. 356 * accessed after being freed. We preassign tags for objects in these
359 * After a tag is assigned, the object always gets allocated with the same tag. 357 * caches as well.
360 * The reason is that we can't change tags for objects with constructors on 358 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
361 * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor 359 * is stored as an array of indexes instead of a linked list. Assign tags
362 * code can save the pointer to the object somewhere (e.g. in the object 360 * based on objects indexes, so that objects that are next to each other
363 * itself). Then if we retag it, the old saved pointer will become invalid. 361 * get different tags.
364 */ 362 */
365static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new) 363static u8 assign_tag(struct kmem_cache *cache, const void *object,
364 bool init, bool krealloc)
366{ 365{
366 /* Reuse the same tag for krealloc'ed objects. */
367 if (krealloc)
368 return get_tag(object);
369
370 /*
371 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
372 * set, assign a tag when the object is being allocated (init == false).
373 */
367 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) 374 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
368 return new ? KASAN_TAG_KERNEL : random_tag(); 375 return init ? KASAN_TAG_KERNEL : random_tag();
369 376
377 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
370#ifdef CONFIG_SLAB 378#ifdef CONFIG_SLAB
379 /* For SLAB assign tags based on the object index in the freelist. */
371 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); 380 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
372#else 381#else
373 return new ? random_tag() : get_tag(object); 382 /*
383 * For SLUB assign a random tag during slab creation, otherwise reuse
384 * the already assigned tag.
385 */
386 return init ? random_tag() : get_tag(object);
374#endif 387#endif
375} 388}
376 389
@@ -386,7 +399,8 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
386 __memset(alloc_info, 0, sizeof(*alloc_info)); 399 __memset(alloc_info, 0, sizeof(*alloc_info));
387 400
388 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 401 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
389 object = set_tag(object, assign_tag(cache, object, true)); 402 object = set_tag(object,
403 assign_tag(cache, object, true, false));
390 404
391 return (void *)object; 405 return (void *)object;
392} 406}
@@ -452,8 +466,8 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
452 return __kasan_slab_free(cache, object, ip, true); 466 return __kasan_slab_free(cache, object, ip, true);
453} 467}
454 468
455void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, 469static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
456 size_t size, gfp_t flags) 470 size_t size, gfp_t flags, bool krealloc)
457{ 471{
458 unsigned long redzone_start; 472 unsigned long redzone_start;
459 unsigned long redzone_end; 473 unsigned long redzone_end;
@@ -471,7 +485,7 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
471 KASAN_SHADOW_SCALE_SIZE); 485 KASAN_SHADOW_SCALE_SIZE);
472 486
473 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 487 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
474 tag = assign_tag(cache, object, false); 488 tag = assign_tag(cache, object, false, krealloc);
475 489
476 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ 490 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
477 kasan_unpoison_shadow(set_tag(object, tag), size); 491 kasan_unpoison_shadow(set_tag(object, tag), size);
@@ -483,6 +497,12 @@ void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
483 497
484 return set_tag(object, tag); 498 return set_tag(object, tag);
485} 499}
500
501void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
502 size_t size, gfp_t flags)
503{
504 return __kasan_kmalloc(cache, object, size, flags, false);
505}
486EXPORT_SYMBOL(kasan_kmalloc); 506EXPORT_SYMBOL(kasan_kmalloc);
487 507
488void * __must_check kasan_kmalloc_large(const void *ptr, size_t size, 508void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
@@ -522,7 +542,8 @@ void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
522 if (unlikely(!PageSlab(page))) 542 if (unlikely(!PageSlab(page)))
523 return kasan_kmalloc_large(object, size, flags); 543 return kasan_kmalloc_large(object, size, flags);
524 else 544 else
525 return kasan_kmalloc(page->slab_cache, object, size, flags); 545 return __kasan_kmalloc(page->slab_cache, object, size,
546 flags, true);
526} 547}
527 548
528void kasan_poison_kfree(void *ptr, unsigned long ip) 549void kasan_poison_kfree(void *ptr, unsigned long ip)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 6379fff1a5ff..7c72f2a95785 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
966 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 966 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
967 struct address_space *mapping; 967 struct address_space *mapping;
968 LIST_HEAD(tokill); 968 LIST_HEAD(tokill);
969 bool unmap_success = true; 969 bool unmap_success;
970 int kill = 1, forcekill; 970 int kill = 1, forcekill;
971 struct page *hpage = *hpagep; 971 struct page *hpage = *hpagep;
972 bool mlocked = PageMlocked(hpage); 972 bool mlocked = PageMlocked(hpage);
@@ -1028,19 +1028,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1028 if (kill) 1028 if (kill)
1029 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); 1029 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1030 1030
1031 if (!PageHuge(hpage)) { 1031 unmap_success = try_to_unmap(hpage, ttu);
1032 unmap_success = try_to_unmap(hpage, ttu);
1033 } else if (mapping) {
1034 /*
1035 * For hugetlb pages, try_to_unmap could potentially call
1036 * huge_pmd_unshare. Because of this, take semaphore in
1037 * write mode here and set TTU_RMAP_LOCKED to indicate we
1038 * have taken the lock at this higer level.
1039 */
1040 i_mmap_lock_write(mapping);
1041 unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
1042 i_mmap_unlock_write(mapping);
1043 }
1044 if (!unmap_success) 1032 if (!unmap_success)
1045 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", 1033 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1046 pfn, page_mapcount(hpage)); 1034 pfn, page_mapcount(hpage));
diff --git a/mm/memory.c b/mm/memory.c
index a52663c0612d..e11ca9dd823f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2994,6 +2994,28 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
2994 struct vm_area_struct *vma = vmf->vma; 2994 struct vm_area_struct *vma = vmf->vma;
2995 vm_fault_t ret; 2995 vm_fault_t ret;
2996 2996
2997 /*
2998 * Preallocate pte before we take page_lock because this might lead to
2999 * deadlocks for memcg reclaim which waits for pages under writeback:
3000 * lock_page(A)
3001 * SetPageWriteback(A)
3002 * unlock_page(A)
3003 * lock_page(B)
3004 * lock_page(B)
3005 * pte_alloc_pne
3006 * shrink_page_list
3007 * wait_on_page_writeback(A)
3008 * SetPageWriteback(B)
3009 * unlock_page(B)
3010 * # flush A, B to clear the writeback
3011 */
3012 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3013 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3014 if (!vmf->prealloc_pte)
3015 return VM_FAULT_OOM;
3016 smp_wmb(); /* See comment in __pte_alloc() */
3017 }
3018
2997 ret = vma->vm_ops->fault(vmf); 3019 ret = vma->vm_ops->fault(vmf);
2998 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 3020 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
2999 VM_FAULT_DONE_COW))) 3021 VM_FAULT_DONE_COW)))
@@ -4077,8 +4099,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4077 goto out; 4099 goto out;
4078 4100
4079 if (range) { 4101 if (range) {
4080 range->start = address & PAGE_MASK; 4102 mmu_notifier_range_init(range, mm, address & PAGE_MASK,
4081 range->end = range->start + PAGE_SIZE; 4103 (address & PAGE_MASK) + PAGE_SIZE);
4082 mmu_notifier_invalidate_range_start(range); 4104 mmu_notifier_invalidate_range_start(range);
4083 } 4105 }
4084 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 4106 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
diff --git a/mm/migrate.c b/mm/migrate.c
index ccf8966caf6f..a16b15090df3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1324,19 +1324,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
1324 goto put_anon; 1324 goto put_anon;
1325 1325
1326 if (page_mapped(hpage)) { 1326 if (page_mapped(hpage)) {
1327 struct address_space *mapping = page_mapping(hpage);
1328
1329 /*
1330 * try_to_unmap could potentially call huge_pmd_unshare.
1331 * Because of this, take semaphore in write mode here and
1332 * set TTU_RMAP_LOCKED to let lower levels know we have
1333 * taken the lock.
1334 */
1335 i_mmap_lock_write(mapping);
1336 try_to_unmap(hpage, 1327 try_to_unmap(hpage,
1337 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS| 1328 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1338 TTU_RMAP_LOCKED);
1339 i_mmap_unlock_write(mapping);
1340 page_was_mapped = 1; 1329 page_was_mapped = 1;
1341 } 1330 }
1342 1331
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cde5dac6229a..d295c9bc01a8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2214,7 +2214,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
2214 */ 2214 */
2215 boost_watermark(zone); 2215 boost_watermark(zone);
2216 if (alloc_flags & ALLOC_KSWAPD) 2216 if (alloc_flags & ALLOC_KSWAPD)
2217 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 2217 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2218 2218
2219 /* We are not allowed to try stealing from the whole block */ 2219 /* We are not allowed to try stealing from the whole block */
2220 if (!whole_block) 2220 if (!whole_block)
@@ -3102,6 +3102,12 @@ struct page *rmqueue(struct zone *preferred_zone,
3102 local_irq_restore(flags); 3102 local_irq_restore(flags);
3103 3103
3104out: 3104out:
3105 /* Separate test+clear to avoid unnecessary atomics */
3106 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3107 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3108 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3109 }
3110
3105 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3111 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3106 return page; 3112 return page;
3107 3113
diff --git a/mm/rmap.c b/mm/rmap.c
index 21a26cf51114..0454ecc29537 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -25,7 +25,6 @@
25 * page->flags PG_locked (lock_page) 25 * page->flags PG_locked (lock_page)
26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
27 * mapping->i_mmap_rwsem 27 * mapping->i_mmap_rwsem
28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
29 * anon_vma->rwsem 28 * anon_vma->rwsem
30 * mm->page_table_lock or pte_lock 29 * mm->page_table_lock or pte_lock
31 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1372,16 +1371,13 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1372 * Note that the page can not be free in this function as call of 1371 * Note that the page can not be free in this function as call of
1373 * try_to_unmap() must hold a reference on the page. 1372 * try_to_unmap() must hold a reference on the page.
1374 */ 1373 */
1375 mmu_notifier_range_init(&range, vma->vm_mm, vma->vm_start, 1374 mmu_notifier_range_init(&range, vma->vm_mm, address,
1376 min(vma->vm_end, vma->vm_start + 1375 min(vma->vm_end, address +
1377 (PAGE_SIZE << compound_order(page)))); 1376 (PAGE_SIZE << compound_order(page))));
1378 if (PageHuge(page)) { 1377 if (PageHuge(page)) {
1379 /* 1378 /*
1380 * If sharing is possible, start and end will be adjusted 1379 * If sharing is possible, start and end will be adjusted
1381 * accordingly. 1380 * accordingly.
1382 *
1383 * If called for a huge page, caller must hold i_mmap_rwsem
1384 * in write mode as it is possible to call huge_pmd_unshare.
1385 */ 1381 */
1386 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1382 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1387 &range.end); 1383 &range.end);
diff --git a/mm/slab.c b/mm/slab.c
index 73fe23e649c9..78eb8c5bf4e4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -666,8 +666,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
666 struct alien_cache *alc = NULL; 666 struct alien_cache *alc = NULL;
667 667
668 alc = kmalloc_node(memsize, gfp, node); 668 alc = kmalloc_node(memsize, gfp, node);
669 init_arraycache(&alc->ac, entries, batch); 669 if (alc) {
670 spin_lock_init(&alc->lock); 670 init_arraycache(&alc->ac, entries, batch);
671 spin_lock_init(&alc->lock);
672 }
671 return alc; 673 return alc;
672} 674}
673 675
diff --git a/mm/slub.c b/mm/slub.c
index 36c0befeebd8..1e3d0ec4e200 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3846,6 +3846,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3846 unsigned int offset; 3846 unsigned int offset;
3847 size_t object_size; 3847 size_t object_size;
3848 3848
3849 ptr = kasan_reset_tag(ptr);
3850
3849 /* Find object and usable object size. */ 3851 /* Find object and usable object size. */
3850 s = page->slab_cache; 3852 s = page->slab_cache;
3851 3853
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 852eb4e53f06..14faadcedd06 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -247,7 +247,8 @@ static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
247/* 247/*
248 * Validates that the given object is: 248 * Validates that the given object is:
249 * - not bogus address 249 * - not bogus address
250 * - known-safe heap or stack object 250 * - fully contained by stack (or stack frame, when available)
251 * - fully within SLAB object (or object whitelist area, when available)
251 * - not in kernel text 252 * - not in kernel text
252 */ 253 */
253void __check_object_size(const void *ptr, unsigned long n, bool to_user) 254void __check_object_size(const void *ptr, unsigned long n, bool to_user)
@@ -262,9 +263,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
262 /* Check for invalid addresses. */ 263 /* Check for invalid addresses. */
263 check_bogus_address((const unsigned long)ptr, n, to_user); 264 check_bogus_address((const unsigned long)ptr, n, to_user);
264 265
265 /* Check for bad heap object. */
266 check_heap_object(ptr, n, to_user);
267
268 /* Check for bad stack object. */ 266 /* Check for bad stack object. */
269 switch (check_stack_object(ptr, n)) { 267 switch (check_stack_object(ptr, n)) {
270 case NOT_STACK: 268 case NOT_STACK:
@@ -282,6 +280,9 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
282 usercopy_abort("process stack", NULL, to_user, 0, n); 280 usercopy_abort("process stack", NULL, to_user, 0, n);
283 } 281 }
284 282
283 /* Check for bad heap object. */
284 check_heap_object(ptr, n, to_user);
285
285 /* Check for object in kernel to avoid text exposure. */ 286 /* Check for object in kernel to avoid text exposure. */
286 check_kernel_text_object((const unsigned long)ptr, n, to_user); 287 check_kernel_text_object((const unsigned long)ptr, n, to_user);
287} 288}
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 065c1ce191c4..d59b5a73dfb3 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -267,14 +267,10 @@ retry:
267 VM_BUG_ON(dst_addr & ~huge_page_mask(h)); 267 VM_BUG_ON(dst_addr & ~huge_page_mask(h));
268 268
269 /* 269 /*
270 * Serialize via i_mmap_rwsem and hugetlb_fault_mutex. 270 * Serialize via hugetlb_fault_mutex
271 * i_mmap_rwsem ensures the dst_pte remains valid even
272 * in the case of shared pmds. fault mutex prevents
273 * races with other faulting threads.
274 */ 271 */
275 mapping = dst_vma->vm_file->f_mapping;
276 i_mmap_lock_read(mapping);
277 idx = linear_page_index(dst_vma, dst_addr); 272 idx = linear_page_index(dst_vma, dst_addr);
273 mapping = dst_vma->vm_file->f_mapping;
278 hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping, 274 hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
279 idx, dst_addr); 275 idx, dst_addr);
280 mutex_lock(&hugetlb_fault_mutex_table[hash]); 276 mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -283,7 +279,6 @@ retry:
283 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h)); 279 dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
284 if (!dst_pte) { 280 if (!dst_pte) {
285 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 281 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
286 i_mmap_unlock_read(mapping);
287 goto out_unlock; 282 goto out_unlock;
288 } 283 }
289 284
@@ -291,7 +286,6 @@ retry:
291 dst_pteval = huge_ptep_get(dst_pte); 286 dst_pteval = huge_ptep_get(dst_pte);
292 if (!huge_pte_none(dst_pteval)) { 287 if (!huge_pte_none(dst_pteval)) {
293 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 288 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
294 i_mmap_unlock_read(mapping);
295 goto out_unlock; 289 goto out_unlock;
296 } 290 }
297 291
@@ -299,7 +293,6 @@ retry:
299 dst_addr, src_addr, &page); 293 dst_addr, src_addr, &page);
300 294
301 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 295 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
302 i_mmap_unlock_read(mapping);
303 vm_alloc_shared = vm_shared; 296 vm_alloc_shared = vm_shared;
304 297
305 cond_resched(); 298 cond_resched();
diff --git a/mm/util.c b/mm/util.c
index 4df23d64aac7..1ea055138043 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -478,7 +478,7 @@ bool page_mapped(struct page *page)
478 return true; 478 return true;
479 if (PageHuge(page)) 479 if (PageHuge(page))
480 return false; 480 return false;
481 for (i = 0; i < hpage_nr_pages(page); i++) { 481 for (i = 0; i < (1 << compound_order(page)); i++) {
482 if (atomic_read(&page[i]._mapcount) >= 0) 482 if (atomic_read(&page[i]._mapcount) >= 0)
483 return true; 483 return true;
484 } 484 }
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index 7acfc83087d5..7ee4fea93637 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -13,39 +13,24 @@
13extern char bpfilter_umh_start; 13extern char bpfilter_umh_start;
14extern char bpfilter_umh_end; 14extern char bpfilter_umh_end;
15 15
16static struct umh_info info; 16static void shutdown_umh(void)
17/* since ip_getsockopt() can run in parallel, serialize access to umh */
18static DEFINE_MUTEX(bpfilter_lock);
19
20static void shutdown_umh(struct umh_info *info)
21{ 17{
22 struct task_struct *tsk; 18 struct task_struct *tsk;
23 19
24 if (!info->pid) 20 if (bpfilter_ops.stop)
25 return; 21 return;
26 tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID); 22
23 tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID);
27 if (tsk) { 24 if (tsk) {
28 force_sig(SIGKILL, tsk); 25 force_sig(SIGKILL, tsk);
29 put_task_struct(tsk); 26 put_task_struct(tsk);
30 } 27 }
31 fput(info->pipe_to_umh);
32 fput(info->pipe_from_umh);
33 info->pid = 0;
34} 28}
35 29
36static void __stop_umh(void) 30static void __stop_umh(void)
37{ 31{
38 if (IS_ENABLED(CONFIG_INET)) { 32 if (IS_ENABLED(CONFIG_INET))
39 bpfilter_process_sockopt = NULL; 33 shutdown_umh();
40 shutdown_umh(&info);
41 }
42}
43
44static void stop_umh(void)
45{
46 mutex_lock(&bpfilter_lock);
47 __stop_umh();
48 mutex_unlock(&bpfilter_lock);
49} 34}
50 35
51static int __bpfilter_process_sockopt(struct sock *sk, int optname, 36static int __bpfilter_process_sockopt(struct sock *sk, int optname,
@@ -63,10 +48,10 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
63 req.cmd = optname; 48 req.cmd = optname;
64 req.addr = (long __force __user)optval; 49 req.addr = (long __force __user)optval;
65 req.len = optlen; 50 req.len = optlen;
66 mutex_lock(&bpfilter_lock); 51 if (!bpfilter_ops.info.pid)
67 if (!info.pid)
68 goto out; 52 goto out;
69 n = __kernel_write(info.pipe_to_umh, &req, sizeof(req), &pos); 53 n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
54 &pos);
70 if (n != sizeof(req)) { 55 if (n != sizeof(req)) {
71 pr_err("write fail %zd\n", n); 56 pr_err("write fail %zd\n", n);
72 __stop_umh(); 57 __stop_umh();
@@ -74,7 +59,8 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
74 goto out; 59 goto out;
75 } 60 }
76 pos = 0; 61 pos = 0;
77 n = kernel_read(info.pipe_from_umh, &reply, sizeof(reply), &pos); 62 n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply),
63 &pos);
78 if (n != sizeof(reply)) { 64 if (n != sizeof(reply)) {
79 pr_err("read fail %zd\n", n); 65 pr_err("read fail %zd\n", n);
80 __stop_umh(); 66 __stop_umh();
@@ -83,37 +69,59 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
83 } 69 }
84 ret = reply.status; 70 ret = reply.status;
85out: 71out:
86 mutex_unlock(&bpfilter_lock);
87 return ret; 72 return ret;
88} 73}
89 74
90static int __init load_umh(void) 75static int start_umh(void)
91{ 76{
92 int err; 77 int err;
93 78
94 /* fork usermode process */ 79 /* fork usermode process */
95 info.cmdline = "bpfilter_umh";
96 err = fork_usermode_blob(&bpfilter_umh_start, 80 err = fork_usermode_blob(&bpfilter_umh_start,
97 &bpfilter_umh_end - &bpfilter_umh_start, 81 &bpfilter_umh_end - &bpfilter_umh_start,
98 &info); 82 &bpfilter_ops.info);
99 if (err) 83 if (err)
100 return err; 84 return err;
101 pr_info("Loaded bpfilter_umh pid %d\n", info.pid); 85 bpfilter_ops.stop = false;
86 pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid);
102 87
103 /* health check that usermode process started correctly */ 88 /* health check that usermode process started correctly */
104 if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { 89 if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
105 stop_umh(); 90 shutdown_umh();
106 return -EFAULT; 91 return -EFAULT;
107 } 92 }
108 if (IS_ENABLED(CONFIG_INET))
109 bpfilter_process_sockopt = &__bpfilter_process_sockopt;
110 93
111 return 0; 94 return 0;
112} 95}
113 96
97static int __init load_umh(void)
98{
99 int err;
100
101 mutex_lock(&bpfilter_ops.lock);
102 if (!bpfilter_ops.stop) {
103 err = -EFAULT;
104 goto out;
105 }
106 err = start_umh();
107 if (!err && IS_ENABLED(CONFIG_INET)) {
108 bpfilter_ops.sockopt = &__bpfilter_process_sockopt;
109 bpfilter_ops.start = &start_umh;
110 }
111out:
112 mutex_unlock(&bpfilter_ops.lock);
113 return err;
114}
115
114static void __exit fini_umh(void) 116static void __exit fini_umh(void)
115{ 117{
116 stop_umh(); 118 mutex_lock(&bpfilter_ops.lock);
119 if (IS_ENABLED(CONFIG_INET)) {
120 shutdown_umh();
121 bpfilter_ops.start = NULL;
122 bpfilter_ops.sockopt = NULL;
123 }
124 mutex_unlock(&bpfilter_ops.lock);
117} 125}
118module_init(load_umh); 126module_init(load_umh);
119module_exit(fini_umh); 127module_exit(fini_umh);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
index 40311d10d2f2..9ea6100dca87 100644
--- a/net/bpfilter/bpfilter_umh_blob.S
+++ b/net/bpfilter/bpfilter_umh_blob.S
@@ -1,5 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2 .section .init.rodata, "a" 2 .section .rodata, "a"
3 .global bpfilter_umh_start 3 .global bpfilter_umh_start
4bpfilter_umh_start: 4bpfilter_umh_start:
5 .incbin "net/bpfilter/bpfilter_umh" 5 .incbin "net/bpfilter/bpfilter_umh"
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index fe3c758791ca..9e14767500ea 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -1128,6 +1128,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1128 err = -ENOMEM; 1128 err = -ENOMEM;
1129 goto err_unlock; 1129 goto err_unlock;
1130 } 1130 }
1131 if (swdev_notify)
1132 fdb->added_by_user = 1;
1131 fdb->added_by_external_learn = 1; 1133 fdb->added_by_external_learn = 1;
1132 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); 1134 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1133 } else { 1135 } else {
@@ -1147,6 +1149,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1147 modified = true; 1149 modified = true;
1148 } 1150 }
1149 1151
1152 if (swdev_notify)
1153 fdb->added_by_user = 1;
1154
1150 if (modified) 1155 if (modified)
1151 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); 1156 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1152 } 1157 }
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 5372e2042adf..48ddc60b4fbd 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
36 36
37int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) 37int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
38{ 38{
39 skb_push(skb, ETH_HLEN);
39 if (!is_skb_forwardable(skb->dev, skb)) 40 if (!is_skb_forwardable(skb->dev, skb))
40 goto drop; 41 goto drop;
41 42
42 skb_push(skb, ETH_HLEN);
43 br_drop_fake_rtable(skb); 43 br_drop_fake_rtable(skb);
44 44
45 if (skb->ip_summed == CHECKSUM_PARTIAL && 45 if (skb->ip_summed == CHECKSUM_PARTIAL &&
@@ -65,6 +65,7 @@ EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
65 65
66int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 66int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
67{ 67{
68 skb->tstamp = 0;
68 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, 69 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
69 net, sk, skb, NULL, skb->dev, 70 net, sk, skb, NULL, skb->dev,
70 br_dev_queue_push_xmit); 71 br_dev_queue_push_xmit);
@@ -97,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to,
97 net = dev_net(indev); 98 net = dev_net(indev);
98 } else { 99 } else {
99 if (unlikely(netpoll_tx_running(to->br->dev))) { 100 if (unlikely(netpoll_tx_running(to->br->dev))) {
100 if (!is_skb_forwardable(skb->dev, skb)) { 101 skb_push(skb, ETH_HLEN);
102 if (!is_skb_forwardable(skb->dev, skb))
101 kfree_skb(skb); 103 kfree_skb(skb);
102 } else { 104 else
103 skb_push(skb, ETH_HLEN);
104 br_netpoll_send_skb(to, skb); 105 br_netpoll_send_skb(to, skb);
105 }
106 return; 106 return;
107 } 107 }
108 br_hook = NF_BR_LOCAL_OUT; 108 br_hook = NF_BR_LOCAL_OUT;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index d21a23698410..c93c35bb73dd 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -265,7 +265,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
265 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); 265 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
266 int ret; 266 int ret;
267 267
268 if (neigh->hh.hh_len) { 268 if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
269 neigh_hh_bridge(&neigh->hh, skb); 269 neigh_hh_bridge(&neigh->hh, skb);
270 skb->dev = nf_bridge->physindev; 270 skb->dev = nf_bridge->physindev;
271 ret = br_handle_frame_finish(net, sk, skb); 271 ret = br_handle_frame_finish(net, sk, skb);
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 94039f588f1d..564710f88f93 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
131 IPSTATS_MIB_INDISCARDS); 131 IPSTATS_MIB_INDISCARDS);
132 goto drop; 132 goto drop;
133 } 133 }
134 hdr = ipv6_hdr(skb);
134 } 135 }
135 if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) 136 if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
136 goto drop; 137 goto drop;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d240b3e7919f..eabf8bf28a3f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -107,6 +107,7 @@ struct br_tunnel_info {
107/* private vlan flags */ 107/* private vlan flags */
108enum { 108enum {
109 BR_VLFLAG_PER_PORT_STATS = BIT(0), 109 BR_VLFLAG_PER_PORT_STATS = BIT(0),
110 BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1),
110}; 111};
111 112
112/** 113/**
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4a2f31157ef5..96abf8feb9dc 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -80,16 +80,18 @@ static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
80} 80}
81 81
82static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br, 82static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
83 u16 vid, u16 flags, struct netlink_ext_ack *extack) 83 struct net_bridge_vlan *v, u16 flags,
84 struct netlink_ext_ack *extack)
84{ 85{
85 int err; 86 int err;
86 87
87 /* Try switchdev op first. In case it is not supported, fallback to 88 /* Try switchdev op first. In case it is not supported, fallback to
88 * 8021q add. 89 * 8021q add.
89 */ 90 */
90 err = br_switchdev_port_vlan_add(dev, vid, flags, extack); 91 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
91 if (err == -EOPNOTSUPP) 92 if (err == -EOPNOTSUPP)
92 return vlan_vid_add(dev, br->vlan_proto, vid); 93 return vlan_vid_add(dev, br->vlan_proto, v->vid);
94 v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
93 return err; 95 return err;
94} 96}
95 97
@@ -121,19 +123,17 @@ static void __vlan_del_list(struct net_bridge_vlan *v)
121} 123}
122 124
123static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br, 125static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
124 u16 vid) 126 const struct net_bridge_vlan *v)
125{ 127{
126 int err; 128 int err;
127 129
128 /* Try switchdev op first. In case it is not supported, fallback to 130 /* Try switchdev op first. In case it is not supported, fallback to
129 * 8021q del. 131 * 8021q del.
130 */ 132 */
131 err = br_switchdev_port_vlan_del(dev, vid); 133 err = br_switchdev_port_vlan_del(dev, v->vid);
132 if (err == -EOPNOTSUPP) { 134 if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
133 vlan_vid_del(dev, br->vlan_proto, vid); 135 vlan_vid_del(dev, br->vlan_proto, v->vid);
134 return 0; 136 return err == -EOPNOTSUPP ? 0 : err;
135 }
136 return err;
137} 137}
138 138
139/* Returns a master vlan, if it didn't exist it gets created. In all cases a 139/* Returns a master vlan, if it didn't exist it gets created. In all cases a
@@ -242,7 +242,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
242 * This ensures tagged traffic enters the bridge when 242 * This ensures tagged traffic enters the bridge when
243 * promiscuous mode is disabled by br_manage_promisc(). 243 * promiscuous mode is disabled by br_manage_promisc().
244 */ 244 */
245 err = __vlan_vid_add(dev, br, v->vid, flags, extack); 245 err = __vlan_vid_add(dev, br, v, flags, extack);
246 if (err) 246 if (err)
247 goto out; 247 goto out;
248 248
@@ -305,7 +305,7 @@ out_fdb_insert:
305 305
306out_filt: 306out_filt:
307 if (p) { 307 if (p) {
308 __vlan_vid_del(dev, br, v->vid); 308 __vlan_vid_del(dev, br, v);
309 if (masterv) { 309 if (masterv) {
310 if (v->stats && masterv->stats != v->stats) 310 if (v->stats && masterv->stats != v->stats)
311 free_percpu(v->stats); 311 free_percpu(v->stats);
@@ -338,7 +338,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
338 338
339 __vlan_delete_pvid(vg, v->vid); 339 __vlan_delete_pvid(vg, v->vid);
340 if (p) { 340 if (p) {
341 err = __vlan_vid_del(p->dev, p->br, v->vid); 341 err = __vlan_vid_del(p->dev, p->br, v);
342 if (err) 342 if (err)
343 goto out; 343 goto out;
344 } else { 344 } else {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 491828713e0b..5e55cef0cec3 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
1137 tmp.name[sizeof(tmp.name) - 1] = 0; 1137 tmp.name[sizeof(tmp.name) - 1] = 0;
1138 1138
1139 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1139 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1140 newinfo = vmalloc(sizeof(*newinfo) + countersize); 1140 newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
1141 PAGE_KERNEL);
1141 if (!newinfo) 1142 if (!newinfo)
1142 return -ENOMEM; 1143 return -ENOMEM;
1143 1144
1144 if (countersize) 1145 if (countersize)
1145 memset(newinfo->counters, 0, countersize); 1146 memset(newinfo->counters, 0, countersize);
1146 1147
1147 newinfo->entries = vmalloc(tmp.entries_size); 1148 newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
1149 PAGE_KERNEL);
1148 if (!newinfo->entries) { 1150 if (!newinfo->entries) {
1149 ret = -ENOMEM; 1151 ret = -ENOMEM;
1150 goto free_newinfo; 1152 goto free_newinfo;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 08cbed7d940e..419e8edf23ba 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
229 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) 229 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
230 return false; 230 return false;
231 231
232 ip6h = ipv6_hdr(skb);
232 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); 233 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
233 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) 234 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
234 return false; 235 return false;
diff --git a/net/can/gw.c b/net/can/gw.c
index faa3da88a127..53859346dc9a 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
416 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) 416 while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
417 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); 417 (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
418 418
419 /* check for checksum updates when the CAN frame has been modified */ 419 /* Has the CAN frame been modified? */
420 if (modidx) { 420 if (modidx) {
421 if (gwj->mod.csumfunc.crc8) 421 /* get available space for the processed CAN frame type */
422 int max_len = nskb->len - offsetof(struct can_frame, data);
423
424 /* dlc may have changed, make sure it fits to the CAN frame */
425 if (cf->can_dlc > max_len)
426 goto out_delete;
427
428 /* check for checksum updates in classic CAN length only */
429 if (gwj->mod.csumfunc.crc8) {
430 if (cf->can_dlc > 8)
431 goto out_delete;
432
422 (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); 433 (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
434 }
435
436 if (gwj->mod.csumfunc.xor) {
437 if (cf->can_dlc > 8)
438 goto out_delete;
423 439
424 if (gwj->mod.csumfunc.xor)
425 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); 440 (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
441 }
426 } 442 }
427 443
428 /* clear the skb timestamp if not configured the other way */ 444 /* clear the skb timestamp if not configured the other way */
@@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
434 gwj->dropped_frames++; 450 gwj->dropped_frames++;
435 else 451 else
436 gwj->handled_frames++; 452 gwj->handled_frames++;
453
454 return;
455
456 out_delete:
457 /* delete frame due to misconfiguration */
458 gwj->deleted_frames++;
459 kfree_skb(nskb);
460 return;
437} 461}
438 462
439static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) 463static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 87afb9ec4c68..9cab80207ced 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -255,6 +255,7 @@ enum {
255 Opt_nocephx_sign_messages, 255 Opt_nocephx_sign_messages,
256 Opt_tcp_nodelay, 256 Opt_tcp_nodelay,
257 Opt_notcp_nodelay, 257 Opt_notcp_nodelay,
258 Opt_abort_on_full,
258}; 259};
259 260
260static match_table_t opt_tokens = { 261static match_table_t opt_tokens = {
@@ -280,6 +281,7 @@ static match_table_t opt_tokens = {
280 {Opt_nocephx_sign_messages, "nocephx_sign_messages"}, 281 {Opt_nocephx_sign_messages, "nocephx_sign_messages"},
281 {Opt_tcp_nodelay, "tcp_nodelay"}, 282 {Opt_tcp_nodelay, "tcp_nodelay"},
282 {Opt_notcp_nodelay, "notcp_nodelay"}, 283 {Opt_notcp_nodelay, "notcp_nodelay"},
284 {Opt_abort_on_full, "abort_on_full"},
283 {-1, NULL} 285 {-1, NULL}
284}; 286};
285 287
@@ -535,6 +537,10 @@ ceph_parse_options(char *options, const char *dev_name,
535 opt->flags &= ~CEPH_OPT_TCP_NODELAY; 537 opt->flags &= ~CEPH_OPT_TCP_NODELAY;
536 break; 538 break;
537 539
540 case Opt_abort_on_full:
541 opt->flags |= CEPH_OPT_ABORT_ON_FULL;
542 break;
543
538 default: 544 default:
539 BUG_ON(token); 545 BUG_ON(token);
540 } 546 }
@@ -549,7 +555,8 @@ out:
549} 555}
550EXPORT_SYMBOL(ceph_parse_options); 556EXPORT_SYMBOL(ceph_parse_options);
551 557
552int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) 558int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
559 bool show_all)
553{ 560{
554 struct ceph_options *opt = client->options; 561 struct ceph_options *opt = client->options;
555 size_t pos = m->count; 562 size_t pos = m->count;
@@ -574,6 +581,8 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
574 seq_puts(m, "nocephx_sign_messages,"); 581 seq_puts(m, "nocephx_sign_messages,");
575 if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) 582 if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
576 seq_puts(m, "notcp_nodelay,"); 583 seq_puts(m, "notcp_nodelay,");
584 if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL))
585 seq_puts(m, "abort_on_full,");
577 586
578 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) 587 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
579 seq_printf(m, "mount_timeout=%d,", 588 seq_printf(m, "mount_timeout=%d,",
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 02952605d121..46f65709a6ff 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -375,7 +375,7 @@ static int client_options_show(struct seq_file *s, void *p)
375 struct ceph_client *client = s->private; 375 struct ceph_client *client = s->private;
376 int ret; 376 int ret;
377 377
378 ret = ceph_print_client_options(s, client); 378 ret = ceph_print_client_options(s, client, true);
379 if (ret) 379 if (ret)
380 return ret; 380 return ret;
381 381
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index d23a9f81f3d7..fa9530dd876e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2315,7 +2315,7 @@ again:
2315 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || 2315 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2316 pool_full(osdc, req->r_t.base_oloc.pool))) { 2316 pool_full(osdc, req->r_t.base_oloc.pool))) {
2317 dout("req %p full/pool_full\n", req); 2317 dout("req %p full/pool_full\n", req);
2318 if (osdc->abort_on_full) { 2318 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2319 err = -ENOSPC; 2319 err = -ENOSPC;
2320 } else { 2320 } else {
2321 pr_warn_ratelimited("FULL or reached pool quota\n"); 2321 pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2545,7 +2545,7 @@ static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2545{ 2545{
2546 bool victims = false; 2546 bool victims = false;
2547 2547
2548 if (osdc->abort_on_full && 2548 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2549 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))) 2549 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2550 for_each_request(osdc, abort_on_full_fn, &victims); 2550 for_each_request(osdc, abort_on_full_fn, &victims);
2551} 2551}
diff --git a/net/core/filter.c b/net/core/filter.c
index 447dd1bad31f..7559d6835ecb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2020,18 +2020,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2020static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, 2020static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2021 u32 flags) 2021 u32 flags)
2022{ 2022{
2023 /* skb->mac_len is not set on normal egress */ 2023 unsigned int mlen = skb_network_offset(skb);
2024 unsigned int mlen = skb->network_header - skb->mac_header;
2025 2024
2026 __skb_pull(skb, mlen); 2025 if (mlen) {
2026 __skb_pull(skb, mlen);
2027 2027
2028 /* At ingress, the mac header has already been pulled once. 2028 /* At ingress, the mac header has already been pulled once.
2029 * At egress, skb_pospull_rcsum has to be done in case that 2029 * At egress, skb_pospull_rcsum has to be done in case that
2030 * the skb is originated from ingress (i.e. a forwarded skb) 2030 * the skb is originated from ingress (i.e. a forwarded skb)
2031 * to ensure that rcsum starts at net header. 2031 * to ensure that rcsum starts at net header.
2032 */ 2032 */
2033 if (!skb_at_tc_ingress(skb)) 2033 if (!skb_at_tc_ingress(skb))
2034 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); 2034 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2035 }
2035 skb_pop_mac_header(skb); 2036 skb_pop_mac_header(skb);
2036 skb_reset_mac_len(skb); 2037 skb_reset_mac_len(skb);
2037 return flags & BPF_F_INGRESS ? 2038 return flags & BPF_F_INGRESS ?
@@ -4119,6 +4120,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4119 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); 4120 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4120 break; 4121 break;
4121 case SO_MAX_PACING_RATE: /* 32bit version */ 4122 case SO_MAX_PACING_RATE: /* 32bit version */
4123 if (val != ~0U)
4124 cmpxchg(&sk->sk_pacing_status,
4125 SK_PACING_NONE,
4126 SK_PACING_NEEDED);
4122 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; 4127 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4123 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 4128 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4124 sk->sk_max_pacing_rate); 4129 sk->sk_max_pacing_rate);
@@ -4132,7 +4137,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4132 sk->sk_rcvlowat = val ? : 1; 4137 sk->sk_rcvlowat = val ? : 1;
4133 break; 4138 break;
4134 case SO_MARK: 4139 case SO_MARK:
4135 sk->sk_mark = val; 4140 if (sk->sk_mark != val) {
4141 sk->sk_mark = val;
4142 sk_dst_reset(sk);
4143 }
4136 break; 4144 break;
4137 default: 4145 default:
4138 ret = -EINVAL; 4146 ret = -EINVAL;
@@ -4203,7 +4211,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4203 /* Only some options are supported */ 4211 /* Only some options are supported */
4204 switch (optname) { 4212 switch (optname) {
4205 case TCP_BPF_IW: 4213 case TCP_BPF_IW:
4206 if (val <= 0 || tp->data_segs_out > 0) 4214 if (val <= 0 || tp->data_segs_out > tp->syn_data)
4207 ret = -EINVAL; 4215 ret = -EINVAL;
4208 else 4216 else
4209 tp->snd_cwnd = val; 4217 tp->snd_cwnd = val;
@@ -5309,7 +5317,7 @@ bpf_base_func_proto(enum bpf_func_id func_id)
5309 case BPF_FUNC_trace_printk: 5317 case BPF_FUNC_trace_printk:
5310 if (capable(CAP_SYS_ADMIN)) 5318 if (capable(CAP_SYS_ADMIN))
5311 return bpf_get_trace_printk_proto(); 5319 return bpf_get_trace_printk_proto();
5312 /* else: fall through */ 5320 /* else, fall through */
5313 default: 5321 default:
5314 return NULL; 5322 return NULL;
5315 } 5323 }
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 3e85437f7106..a648568c5e8f 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
63 lwt->name ? : "<unknown>"); 63 lwt->name ? : "<unknown>");
64 ret = BPF_OK; 64 ret = BPF_OK;
65 } else { 65 } else {
66 skb_reset_mac_header(skb);
66 ret = skb_do_redirect(skb); 67 ret = skb_do_redirect(skb);
67 if (ret == 0) 68 if (ret == 0)
68 ret = BPF_REDIRECT; 69 ret = BPF_REDIRECT;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 763a7b08df67..4230400b9a30 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -18,6 +18,7 @@
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 19
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/kmemleak.h>
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/module.h> 24#include <linux/module.h>
@@ -443,12 +444,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
443 ret = kmalloc(sizeof(*ret), GFP_ATOMIC); 444 ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
444 if (!ret) 445 if (!ret)
445 return NULL; 446 return NULL;
446 if (size <= PAGE_SIZE) 447 if (size <= PAGE_SIZE) {
447 buckets = kzalloc(size, GFP_ATOMIC); 448 buckets = kzalloc(size, GFP_ATOMIC);
448 else 449 } else {
449 buckets = (struct neighbour __rcu **) 450 buckets = (struct neighbour __rcu **)
450 __get_free_pages(GFP_ATOMIC | __GFP_ZERO, 451 __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
451 get_order(size)); 452 get_order(size));
453 kmemleak_alloc(buckets, size, 1, GFP_ATOMIC);
454 }
452 if (!buckets) { 455 if (!buckets) {
453 kfree(ret); 456 kfree(ret);
454 return NULL; 457 return NULL;
@@ -468,10 +471,12 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
468 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *); 471 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
469 struct neighbour __rcu **buckets = nht->hash_buckets; 472 struct neighbour __rcu **buckets = nht->hash_buckets;
470 473
471 if (size <= PAGE_SIZE) 474 if (size <= PAGE_SIZE) {
472 kfree(buckets); 475 kfree(buckets);
473 else 476 } else {
477 kmemleak_free(buckets);
474 free_pages((unsigned long)buckets, get_order(size)); 478 free_pages((unsigned long)buckets, get_order(size));
479 }
475 kfree(nht); 480 kfree(nht);
476} 481}
477 482
@@ -1002,7 +1007,7 @@ static void neigh_probe(struct neighbour *neigh)
1002 if (neigh->ops->solicit) 1007 if (neigh->ops->solicit)
1003 neigh->ops->solicit(neigh, skb); 1008 neigh->ops->solicit(neigh, skb);
1004 atomic_inc(&neigh->probes); 1009 atomic_inc(&neigh->probes);
1005 kfree_skb(skb); 1010 consume_skb(skb);
1006} 1011}
1007 1012
1008/* Called when a timer expires for a neighbour entry. */ 1013/* Called when a timer expires for a neighbour entry. */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 37317ffec146..26d848484912 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5270,7 +5270,6 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5270 unsigned long chunk; 5270 unsigned long chunk;
5271 struct sk_buff *skb; 5271 struct sk_buff *skb;
5272 struct page *page; 5272 struct page *page;
5273 gfp_t gfp_head;
5274 int i; 5273 int i;
5275 5274
5276 *errcode = -EMSGSIZE; 5275 *errcode = -EMSGSIZE;
@@ -5280,12 +5279,8 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5280 if (npages > MAX_SKB_FRAGS) 5279 if (npages > MAX_SKB_FRAGS)
5281 return NULL; 5280 return NULL;
5282 5281
5283 gfp_head = gfp_mask;
5284 if (gfp_head & __GFP_DIRECT_RECLAIM)
5285 gfp_head |= __GFP_RETRY_MAYFAIL;
5286
5287 *errcode = -ENOBUFS; 5282 *errcode = -ENOBUFS;
5288 skb = alloc_skb(header_len, gfp_head); 5283 skb = alloc_skb(header_len, gfp_mask);
5289 if (!skb) 5284 if (!skb)
5290 return NULL; 5285 return NULL;
5291 5286
diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c
index 5e04ed25bc0e..1e976bb93d99 100644
--- a/net/ipv4/bpfilter/sockopt.c
+++ b/net/ipv4/bpfilter/sockopt.c
@@ -1,28 +1,54 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/module.h>
2#include <linux/uaccess.h> 4#include <linux/uaccess.h>
3#include <linux/bpfilter.h> 5#include <linux/bpfilter.h>
4#include <uapi/linux/bpf.h> 6#include <uapi/linux/bpf.h>
5#include <linux/wait.h> 7#include <linux/wait.h>
6#include <linux/kmod.h> 8#include <linux/kmod.h>
9#include <linux/fs.h>
10#include <linux/file.h>
7 11
8int (*bpfilter_process_sockopt)(struct sock *sk, int optname, 12struct bpfilter_umh_ops bpfilter_ops;
9 char __user *optval, 13EXPORT_SYMBOL_GPL(bpfilter_ops);
10 unsigned int optlen, bool is_set); 14
11EXPORT_SYMBOL_GPL(bpfilter_process_sockopt); 15static void bpfilter_umh_cleanup(struct umh_info *info)
16{
17 mutex_lock(&bpfilter_ops.lock);
18 bpfilter_ops.stop = true;
19 fput(info->pipe_to_umh);
20 fput(info->pipe_from_umh);
21 info->pid = 0;
22 mutex_unlock(&bpfilter_ops.lock);
23}
12 24
13static int bpfilter_mbox_request(struct sock *sk, int optname, 25static int bpfilter_mbox_request(struct sock *sk, int optname,
14 char __user *optval, 26 char __user *optval,
15 unsigned int optlen, bool is_set) 27 unsigned int optlen, bool is_set)
16{ 28{
17 if (!bpfilter_process_sockopt) { 29 int err;
18 int err = request_module("bpfilter"); 30 mutex_lock(&bpfilter_ops.lock);
31 if (!bpfilter_ops.sockopt) {
32 mutex_unlock(&bpfilter_ops.lock);
33 err = request_module("bpfilter");
34 mutex_lock(&bpfilter_ops.lock);
19 35
20 if (err) 36 if (err)
21 return err; 37 goto out;
22 if (!bpfilter_process_sockopt) 38 if (!bpfilter_ops.sockopt) {
23 return -ECHILD; 39 err = -ECHILD;
40 goto out;
41 }
42 }
43 if (bpfilter_ops.stop) {
44 err = bpfilter_ops.start();
45 if (err)
46 goto out;
24 } 47 }
25 return bpfilter_process_sockopt(sk, optname, optval, optlen, is_set); 48 err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set);
49out:
50 mutex_unlock(&bpfilter_ops.lock);
51 return err;
26} 52}
27 53
28int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, 54int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
@@ -41,3 +67,15 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
41 67
42 return bpfilter_mbox_request(sk, optname, optval, len, false); 68 return bpfilter_mbox_request(sk, optname, optval, len, false);
43} 69}
70
71static int __init bpfilter_sockopt_init(void)
72{
73 mutex_init(&bpfilter_ops.lock);
74 bpfilter_ops.stop = true;
75 bpfilter_ops.info.cmdline = "bpfilter_umh";
76 bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup;
77
78 return 0;
79}
80
81module_init(bpfilter_sockopt_init);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 04ba321ae5ce..e258a00b4a3d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1826,7 +1826,7 @@ put_tgt_net:
1826 if (fillargs.netnsid >= 0) 1826 if (fillargs.netnsid >= 0)
1827 put_net(tgt_net); 1827 put_net(tgt_net);
1828 1828
1829 return err < 0 ? err : skb->len; 1829 return skb->len ? : err;
1830} 1830}
1831 1831
1832static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, 1832static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6df95be96311..fe4f6a624238 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -203,7 +203,7 @@ static void fib_flush(struct net *net)
203 struct fib_table *tb; 203 struct fib_table *tb;
204 204
205 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) 205 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
206 flushed += fib_table_flush(net, tb); 206 flushed += fib_table_flush(net, tb, false);
207 } 207 }
208 208
209 if (flushed) 209 if (flushed)
@@ -1463,7 +1463,7 @@ static void ip_fib_net_exit(struct net *net)
1463 1463
1464 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { 1464 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1465 hlist_del(&tb->tb_hlist); 1465 hlist_del(&tb->tb_hlist);
1466 fib_table_flush(net, tb); 1466 fib_table_flush(net, tb, true);
1467 fib_free_table(tb); 1467 fib_free_table(tb);
1468 } 1468 }
1469 } 1469 }
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 237c9f72b265..a573e37e0615 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb)
1856} 1856}
1857 1857
1858/* Caller must hold RTNL. */ 1858/* Caller must hold RTNL. */
1859int fib_table_flush(struct net *net, struct fib_table *tb) 1859int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
1860{ 1860{
1861 struct trie *t = (struct trie *)tb->tb_data; 1861 struct trie *t = (struct trie *)tb->tb_data;
1862 struct key_vector *pn = t->kv; 1862 struct key_vector *pn = t->kv;
@@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
1904 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { 1904 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1905 struct fib_info *fi = fa->fa_info; 1905 struct fib_info *fi = fa->fa_info;
1906 1906
1907 if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || 1907 if (!fi || tb->tb_id != fa->tb_id ||
1908 tb->tb_id != fa->tb_id) { 1908 (!(fi->fib_flags & RTNH_F_DEAD) &&
1909 !fib_props[fa->fa_type].error)) {
1910 slen = fa->fa_slen;
1911 continue;
1912 }
1913
1914 /* Do not flush error routes if network namespace is
1915 * not being dismantled
1916 */
1917 if (!flush_all && fib_props[fa->fa_type].error) {
1909 slen = fa->fa_slen; 1918 slen = fa->fa_slen;
1910 continue; 1919 continue;
1911 } 1920 }
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 0c9f171fb085..437070d1ffb1 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -1020,10 +1020,11 @@ static int gue_err(struct sk_buff *skb, u32 info)
1020{ 1020{
1021 int transport_offset = skb_transport_offset(skb); 1021 int transport_offset = skb_transport_offset(skb);
1022 struct guehdr *guehdr; 1022 struct guehdr *guehdr;
1023 size_t optlen; 1023 size_t len, optlen;
1024 int ret; 1024 int ret;
1025 1025
1026 if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) 1026 len = sizeof(struct udphdr) + sizeof(struct guehdr);
1027 if (!pskb_may_pull(skb, len))
1027 return -EINVAL; 1028 return -EINVAL;
1028 1029
1029 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 1030 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -1058,6 +1059,10 @@ static int gue_err(struct sk_buff *skb, u32 info)
1058 1059
1059 optlen = guehdr->hlen << 2; 1060 optlen = guehdr->hlen << 2;
1060 1061
1062 if (!pskb_may_pull(skb, len + optlen))
1063 return -EINVAL;
1064
1065 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
1061 if (validate_gue_flags(guehdr, optlen)) 1066 if (validate_gue_flags(guehdr, optlen))
1062 return -EINVAL; 1067 return -EINVAL;
1063 1068
@@ -1065,7 +1070,8 @@ static int gue_err(struct sk_buff *skb, u32 info)
1065 * recursion. Besides, this kind of encapsulation can't even be 1070 * recursion. Besides, this kind of encapsulation can't even be
1066 * configured currently. Discard this. 1071 * configured currently. Discard this.
1067 */ 1072 */
1068 if (guehdr->proto_ctype == IPPROTO_UDP) 1073 if (guehdr->proto_ctype == IPPROTO_UDP ||
1074 guehdr->proto_ctype == IPPROTO_UDPLITE)
1069 return -EOPNOTSUPP; 1075 return -EOPNOTSUPP;
1070 1076
1071 skb_set_transport_header(skb, -(int)sizeof(struct icmphdr)); 1077 skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d1d09f3e5f9e..b1a74d80d868 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -569,8 +569,7 @@ err_free_skb:
569 dev->stats.tx_dropped++; 569 dev->stats.tx_dropped++;
570} 570}
571 571
572static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, 572static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
573 __be16 proto)
574{ 573{
575 struct ip_tunnel *tunnel = netdev_priv(dev); 574 struct ip_tunnel *tunnel = netdev_priv(dev);
576 struct ip_tunnel_info *tun_info; 575 struct ip_tunnel_info *tun_info;
@@ -578,10 +577,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
578 struct erspan_metadata *md; 577 struct erspan_metadata *md;
579 struct rtable *rt = NULL; 578 struct rtable *rt = NULL;
580 bool truncate = false; 579 bool truncate = false;
580 __be16 df, proto;
581 struct flowi4 fl; 581 struct flowi4 fl;
582 int tunnel_hlen; 582 int tunnel_hlen;
583 int version; 583 int version;
584 __be16 df;
585 int nhoff; 584 int nhoff;
586 int thoff; 585 int thoff;
587 586
@@ -626,18 +625,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
626 if (version == 1) { 625 if (version == 1) {
627 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), 626 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
628 ntohl(md->u.index), truncate, true); 627 ntohl(md->u.index), truncate, true);
628 proto = htons(ETH_P_ERSPAN);
629 } else if (version == 2) { 629 } else if (version == 2) {
630 erspan_build_header_v2(skb, 630 erspan_build_header_v2(skb,
631 ntohl(tunnel_id_to_key32(key->tun_id)), 631 ntohl(tunnel_id_to_key32(key->tun_id)),
632 md->u.md2.dir, 632 md->u.md2.dir,
633 get_hwid(&md->u.md2), 633 get_hwid(&md->u.md2),
634 truncate, true); 634 truncate, true);
635 proto = htons(ETH_P_ERSPAN2);
635 } else { 636 } else {
636 goto err_free_rt; 637 goto err_free_rt;
637 } 638 }
638 639
639 gre_build_header(skb, 8, TUNNEL_SEQ, 640 gre_build_header(skb, 8, TUNNEL_SEQ,
640 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); 641 proto, 0, htonl(tunnel->o_seqno++));
641 642
642 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 643 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
643 644
@@ -721,12 +722,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
721{ 722{
722 struct ip_tunnel *tunnel = netdev_priv(dev); 723 struct ip_tunnel *tunnel = netdev_priv(dev);
723 bool truncate = false; 724 bool truncate = false;
725 __be16 proto;
724 726
725 if (!pskb_inet_may_pull(skb)) 727 if (!pskb_inet_may_pull(skb))
726 goto free_skb; 728 goto free_skb;
727 729
728 if (tunnel->collect_md) { 730 if (tunnel->collect_md) {
729 erspan_fb_xmit(skb, dev, skb->protocol); 731 erspan_fb_xmit(skb, dev);
730 return NETDEV_TX_OK; 732 return NETDEV_TX_OK;
731 } 733 }
732 734
@@ -742,19 +744,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
742 } 744 }
743 745
744 /* Push ERSPAN header */ 746 /* Push ERSPAN header */
745 if (tunnel->erspan_ver == 1) 747 if (tunnel->erspan_ver == 1) {
746 erspan_build_header(skb, ntohl(tunnel->parms.o_key), 748 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
747 tunnel->index, 749 tunnel->index,
748 truncate, true); 750 truncate, true);
749 else if (tunnel->erspan_ver == 2) 751 proto = htons(ETH_P_ERSPAN);
752 } else if (tunnel->erspan_ver == 2) {
750 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), 753 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
751 tunnel->dir, tunnel->hwid, 754 tunnel->dir, tunnel->hwid,
752 truncate, true); 755 truncate, true);
753 else 756 proto = htons(ETH_P_ERSPAN2);
757 } else {
754 goto free_skb; 758 goto free_skb;
759 }
755 760
756 tunnel->parms.o_flags &= ~TUNNEL_KEY; 761 tunnel->parms.o_flags &= ~TUNNEL_KEY;
757 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); 762 __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
758 return NETDEV_TX_OK; 763 return NETDEV_TX_OK;
759 764
760free_skb: 765free_skb:
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 26921f6b3b92..51d8efba6de2 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -488,6 +488,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
488 goto drop; 488 goto drop;
489 } 489 }
490 490
491 iph = ip_hdr(skb);
491 skb->transport_header = skb->network_header + iph->ihl*4; 492 skb->transport_header = skb->network_header + iph->ihl*4;
492 493
493 /* Remove any debris in the socket control block */ 494 /* Remove any debris in the socket control block */
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fffcc130900e..82f341e84fae 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
148 148
149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 149static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
150{ 150{
151 __be16 _ports[2], *ports;
151 struct sockaddr_in sin; 152 struct sockaddr_in sin;
152 __be16 *ports;
153 int end;
154
155 end = skb_transport_offset(skb) + 4;
156 if (end > 0 && !pskb_may_pull(skb, end))
157 return;
158 153
159 /* All current transport protocols have the port numbers in the 154 /* All current transport protocols have the port numbers in the
160 * first four bytes of the transport header and this function is 155 * first four bytes of the transport header and this function is
161 * written with this assumption in mind. 156 * written with this assumption in mind.
162 */ 157 */
163 ports = (__be16 *)skb_transport_header(skb); 158 ports = skb_header_pointer(skb, skb_transport_offset(skb),
159 sizeof(_ports), &_ports);
160 if (!ports)
161 return;
164 162
165 sin.sin_family = AF_INET; 163 sin.sin_family = AF_INET;
166 sin.sin_addr.s_addr = ip_hdr(skb)->daddr; 164 sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 27e2f6837062..2079145a3b7c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1186 flags = msg->msg_flags; 1186 flags = msg->msg_flags;
1187 1187
1188 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { 1188 if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
1189 if (sk->sk_state != TCP_ESTABLISHED) { 1189 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
1190 err = -EINVAL; 1190 err = -EINVAL;
1191 goto out_err; 1191 goto out_err;
1192 } 1192 }
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index f87dbc78b6bc..71a29e9c0620 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -226,7 +226,7 @@ static int tcp_write_timeout(struct sock *sk)
226 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 226 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
227 if (icsk->icsk_retransmits) { 227 if (icsk->icsk_retransmits) {
228 dst_negative_advice(sk); 228 dst_negative_advice(sk);
229 } else if (!tp->syn_data && !tp->syn_fastopen) { 229 } else {
230 sk_rethink_txhash(sk); 230 sk_rethink_txhash(sk);
231 } 231 }
232 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; 232 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 3fb0ed5e4789..5c3cd5d84a6f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -847,15 +847,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
847 const int hlen = skb_network_header_len(skb) + 847 const int hlen = skb_network_header_len(skb) +
848 sizeof(struct udphdr); 848 sizeof(struct udphdr);
849 849
850 if (hlen + cork->gso_size > cork->fragsize) 850 if (hlen + cork->gso_size > cork->fragsize) {
851 kfree_skb(skb);
851 return -EINVAL; 852 return -EINVAL;
852 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 853 }
854 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
855 kfree_skb(skb);
853 return -EINVAL; 856 return -EINVAL;
854 if (sk->sk_no_check_tx) 857 }
858 if (sk->sk_no_check_tx) {
859 kfree_skb(skb);
855 return -EINVAL; 860 return -EINVAL;
861 }
856 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 862 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
857 dst_xfrm(skb_dst(skb))) 863 dst_xfrm(skb_dst(skb))) {
864 kfree_skb(skb);
858 return -EIO; 865 return -EIO;
866 }
859 867
860 skb_shinfo(skb)->gso_size = cork->gso_size; 868 skb_shinfo(skb)->gso_size = cork->gso_size;
861 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 869 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1918,7 +1926,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash)
1918} 1926}
1919EXPORT_SYMBOL(udp_lib_rehash); 1927EXPORT_SYMBOL(udp_lib_rehash);
1920 1928
1921static void udp_v4_rehash(struct sock *sk) 1929void udp_v4_rehash(struct sock *sk)
1922{ 1930{
1923 u16 new_hash = ipv4_portaddr_hash(sock_net(sk), 1931 u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
1924 inet_sk(sk)->inet_rcv_saddr, 1932 inet_sk(sk)->inet_rcv_saddr,
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 322672655419..6b2fa77eeb1c 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -10,6 +10,7 @@ int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
10int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); 10int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
11 11
12int udp_v4_get_port(struct sock *sk, unsigned short snum); 12int udp_v4_get_port(struct sock *sk, unsigned short snum);
13void udp_v4_rehash(struct sock *sk);
13 14
14int udp_setsockopt(struct sock *sk, int level, int optname, 15int udp_setsockopt(struct sock *sk, int level, int optname,
15 char __user *optval, unsigned int optlen); 16 char __user *optval, unsigned int optlen);
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 39c7f17d916f..3c94b8f0ff27 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -53,6 +53,7 @@ struct proto udplite_prot = {
53 .sendpage = udp_sendpage, 53 .sendpage = udp_sendpage,
54 .hash = udp_lib_hash, 54 .hash = udp_lib_hash,
55 .unhash = udp_lib_unhash, 55 .unhash = udp_lib_unhash,
56 .rehash = udp_v4_rehash,
56 .get_port = udp_v4_get_port, 57 .get_port = udp_v4_get_port,
57 .memory_allocated = &udp_memory_allocated, 58 .memory_allocated = &udp_memory_allocated,
58 .sysctl_mem = sysctl_udp_mem, 59 .sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8eeec6eb2bd3..93d5ad2b1a69 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5154,7 +5154,7 @@ put_tgt_net:
5154 if (fillargs.netnsid >= 0) 5154 if (fillargs.netnsid >= 0)
5155 put_net(tgt_net); 5155 put_net(tgt_net);
5156 5156
5157 return err < 0 ? err : skb->len; 5157 return skb->len ? : err;
5158} 5158}
5159 5159
5160static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) 5160static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0bfb6cc0a30a..d99753b5e39b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -310,6 +310,7 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
310 310
311 /* Check if the address belongs to the host. */ 311 /* Check if the address belongs to the host. */
312 if (addr_type == IPV6_ADDR_MAPPED) { 312 if (addr_type == IPV6_ADDR_MAPPED) {
313 struct net_device *dev = NULL;
313 int chk_addr_ret; 314 int chk_addr_ret;
314 315
315 /* Binding to v4-mapped address on a v6-only socket 316 /* Binding to v4-mapped address on a v6-only socket
@@ -320,9 +321,20 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
320 goto out; 321 goto out;
321 } 322 }
322 323
324 rcu_read_lock();
325 if (sk->sk_bound_dev_if) {
326 dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
327 if (!dev) {
328 err = -ENODEV;
329 goto out_unlock;
330 }
331 }
332
323 /* Reproduce AF_INET checks to make the bindings consistent */ 333 /* Reproduce AF_INET checks to make the bindings consistent */
324 v4addr = addr->sin6_addr.s6_addr32[3]; 334 v4addr = addr->sin6_addr.s6_addr32[3];
325 chk_addr_ret = inet_addr_type(net, v4addr); 335 chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
336 rcu_read_unlock();
337
326 if (!inet_can_nonlocal_bind(net, inet) && 338 if (!inet_can_nonlocal_bind(net, inet) &&
327 v4addr != htonl(INADDR_ANY) && 339 v4addr != htonl(INADDR_ANY) &&
328 chk_addr_ret != RTN_LOCAL && 340 chk_addr_ret != RTN_LOCAL &&
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index bde08aa549f3..ee4a4e54d016 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
341 skb_reset_network_header(skb); 341 skb_reset_network_header(skb);
342 iph = ipv6_hdr(skb); 342 iph = ipv6_hdr(skb);
343 iph->daddr = fl6->daddr; 343 iph->daddr = fl6->daddr;
344 ip6_flow_hdr(iph, 0, 0);
344 345
345 serr = SKB_EXT_ERR(skb); 346 serr = SKB_EXT_ERR(skb);
346 serr->ee.ee_errno = err; 347 serr->ee.ee_errno = err;
@@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
700 } 701 }
701 if (np->rxopt.bits.rxorigdstaddr) { 702 if (np->rxopt.bits.rxorigdstaddr) {
702 struct sockaddr_in6 sin6; 703 struct sockaddr_in6 sin6;
703 __be16 *ports; 704 __be16 _ports[2], *ports;
704 int end;
705 705
706 end = skb_transport_offset(skb) + 4; 706 ports = skb_header_pointer(skb, skb_transport_offset(skb),
707 if (end <= 0 || pskb_may_pull(skb, end)) { 707 sizeof(_ports), &_ports);
708 if (ports) {
708 /* All current transport protocols have the port numbers in the 709 /* All current transport protocols have the port numbers in the
709 * first four bytes of the transport header and this function is 710 * first four bytes of the transport header and this function is
710 * written with this assumption in mind. 711 * written with this assumption in mind.
711 */ 712 */
712 ports = (__be16 *)skb_transport_header(skb);
713
714 sin6.sin6_family = AF_INET6; 713 sin6.sin6_family = AF_INET6;
715 sin6.sin6_addr = ipv6_hdr(skb)->daddr; 714 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
716 sin6.sin6_port = ports[1]; 715 sin6.sin6_port = ports[1];
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index bd675c61deb1..b858bd5280bf 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -90,10 +90,11 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
90{ 90{
91 int transport_offset = skb_transport_offset(skb); 91 int transport_offset = skb_transport_offset(skb);
92 struct guehdr *guehdr; 92 struct guehdr *guehdr;
93 size_t optlen; 93 size_t len, optlen;
94 int ret; 94 int ret;
95 95
96 if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr)) 96 len = sizeof(struct udphdr) + sizeof(struct guehdr);
97 if (!pskb_may_pull(skb, len))
97 return -EINVAL; 98 return -EINVAL;
98 99
99 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 100 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
@@ -128,9 +129,21 @@ static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
128 129
129 optlen = guehdr->hlen << 2; 130 optlen = guehdr->hlen << 2;
130 131
132 if (!pskb_may_pull(skb, len + optlen))
133 return -EINVAL;
134
135 guehdr = (struct guehdr *)&udp_hdr(skb)[1];
131 if (validate_gue_flags(guehdr, optlen)) 136 if (validate_gue_flags(guehdr, optlen))
132 return -EINVAL; 137 return -EINVAL;
133 138
139 /* Handling exceptions for direct UDP encapsulation in GUE would lead to
140 * recursion. Besides, this kind of encapsulation can't even be
141 * configured currently. Discard this.
142 */
143 if (guehdr->proto_ctype == IPPROTO_UDP ||
144 guehdr->proto_ctype == IPPROTO_UDPLITE)
145 return -EOPNOTSUPP;
146
134 skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr)); 147 skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
135 ret = gue6_err_proto_handler(guehdr->proto_ctype, skb, 148 ret = gue6_err_proto_handler(guehdr->proto_ctype, skb,
136 opt, type, code, offset, info); 149 opt, type, code, offset, info);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 5d7aa2c2770c..bbcdfd299692 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -423,10 +423,10 @@ static int icmp6_iif(const struct sk_buff *skb)
423static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, 423static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
424 const struct in6_addr *force_saddr) 424 const struct in6_addr *force_saddr)
425{ 425{
426 struct net *net = dev_net(skb->dev);
427 struct inet6_dev *idev = NULL; 426 struct inet6_dev *idev = NULL;
428 struct ipv6hdr *hdr = ipv6_hdr(skb); 427 struct ipv6hdr *hdr = ipv6_hdr(skb);
429 struct sock *sk; 428 struct sock *sk;
429 struct net *net;
430 struct ipv6_pinfo *np; 430 struct ipv6_pinfo *np;
431 const struct in6_addr *saddr = NULL; 431 const struct in6_addr *saddr = NULL;
432 struct dst_entry *dst; 432 struct dst_entry *dst;
@@ -437,12 +437,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
437 int iif = 0; 437 int iif = 0;
438 int addr_type = 0; 438 int addr_type = 0;
439 int len; 439 int len;
440 u32 mark = IP6_REPLY_MARK(net, skb->mark); 440 u32 mark;
441 441
442 if ((u8 *)hdr < skb->head || 442 if ((u8 *)hdr < skb->head ||
443 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) 443 (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
444 return; 444 return;
445 445
446 if (!skb->dev)
447 return;
448 net = dev_net(skb->dev);
449 mark = IP6_REPLY_MARK(net, skb->mark);
446 /* 450 /*
447 * Make sure we respect the rules 451 * Make sure we respect the rules
448 * i.e. RFC 1885 2.4(e) 452 * i.e. RFC 1885 2.4(e)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 09d0826742f8..b1be67ca6768 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -922,6 +922,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
922 __u8 dsfield = false; 922 __u8 dsfield = false;
923 struct flowi6 fl6; 923 struct flowi6 fl6;
924 int err = -EINVAL; 924 int err = -EINVAL;
925 __be16 proto;
925 __u32 mtu; 926 __u32 mtu;
926 int nhoff; 927 int nhoff;
927 int thoff; 928 int thoff;
@@ -1035,8 +1036,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
1035 } 1036 }
1036 1037
1037 /* Push GRE header. */ 1038 /* Push GRE header. */
1038 gre_build_header(skb, 8, TUNNEL_SEQ, 1039 proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
1039 htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); 1040 : htons(ETH_P_ERSPAN2);
1041 gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++));
1040 1042
1041 /* TooBig packet may have updated dst->dev's mtu */ 1043 /* TooBig packet may have updated dst->dev's mtu */
1042 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) 1044 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu)
@@ -1169,6 +1171,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1169 t->parms.i_flags = p->i_flags; 1171 t->parms.i_flags = p->i_flags;
1170 t->parms.o_flags = p->o_flags; 1172 t->parms.o_flags = p->o_flags;
1171 t->parms.fwmark = p->fwmark; 1173 t->parms.fwmark = p->fwmark;
1174 t->parms.erspan_ver = p->erspan_ver;
1175 t->parms.index = p->index;
1176 t->parms.dir = p->dir;
1177 t->parms.hwid = p->hwid;
1172 dst_cache_reset(&t->dst_cache); 1178 dst_cache_reset(&t->dst_cache);
1173} 1179}
1174 1180
@@ -2025,9 +2031,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2025 struct nlattr *data[], 2031 struct nlattr *data[],
2026 struct netlink_ext_ack *extack) 2032 struct netlink_ext_ack *extack)
2027{ 2033{
2028 struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); 2034 struct ip6_tnl *t = netdev_priv(dev);
2035 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2029 struct __ip6_tnl_parm p; 2036 struct __ip6_tnl_parm p;
2030 struct ip6_tnl *t;
2031 2037
2032 t = ip6gre_changelink_common(dev, tb, data, &p, extack); 2038 t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2033 if (IS_ERR(t)) 2039 if (IS_ERR(t))
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 40b225f87d5e..964491cf3672 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4251,17 +4251,6 @@ struct rt6_nh {
4251 struct list_head next; 4251 struct list_head next;
4252}; 4252};
4253 4253
4254static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
4255{
4256 struct rt6_nh *nh;
4257
4258 list_for_each_entry(nh, rt6_nh_list, next) {
4259 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
4260 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
4261 nh->r_cfg.fc_ifindex);
4262 }
4263}
4264
4265static int ip6_route_info_append(struct net *net, 4254static int ip6_route_info_append(struct net *net,
4266 struct list_head *rt6_nh_list, 4255 struct list_head *rt6_nh_list,
4267 struct fib6_info *rt, 4256 struct fib6_info *rt,
@@ -4407,7 +4396,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
4407 nh->fib6_info = NULL; 4396 nh->fib6_info = NULL;
4408 if (err) { 4397 if (err) {
4409 if (replace && nhn) 4398 if (replace && nhn)
4410 ip6_print_replace_route_err(&rt6_nh_list); 4399 NL_SET_ERR_MSG_MOD(extack,
4400 "multipath route replace failed (check consistency of installed routes)");
4411 err_nh = nh; 4401 err_nh = nh;
4412 goto add_errout; 4402 goto add_errout;
4413 } 4403 }
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9cbf363172bd..2596ffdeebea 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -102,7 +102,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
102 return udp_lib_get_port(sk, snum, hash2_nulladdr); 102 return udp_lib_get_port(sk, snum, hash2_nulladdr);
103} 103}
104 104
105static void udp_v6_rehash(struct sock *sk) 105void udp_v6_rehash(struct sock *sk)
106{ 106{
107 u16 new_hash = ipv6_portaddr_hash(sock_net(sk), 107 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
108 &sk->sk_v6_rcv_saddr, 108 &sk->sk_v6_rcv_saddr,
@@ -1132,15 +1132,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1132 const int hlen = skb_network_header_len(skb) + 1132 const int hlen = skb_network_header_len(skb) +
1133 sizeof(struct udphdr); 1133 sizeof(struct udphdr);
1134 1134
1135 if (hlen + cork->gso_size > cork->fragsize) 1135 if (hlen + cork->gso_size > cork->fragsize) {
1136 kfree_skb(skb);
1136 return -EINVAL; 1137 return -EINVAL;
1137 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) 1138 }
1139 if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
1140 kfree_skb(skb);
1138 return -EINVAL; 1141 return -EINVAL;
1139 if (udp_sk(sk)->no_check6_tx) 1142 }
1143 if (udp_sk(sk)->no_check6_tx) {
1144 kfree_skb(skb);
1140 return -EINVAL; 1145 return -EINVAL;
1146 }
1141 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || 1147 if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1142 dst_xfrm(skb_dst(skb))) 1148 dst_xfrm(skb_dst(skb))) {
1149 kfree_skb(skb);
1143 return -EIO; 1150 return -EIO;
1151 }
1144 1152
1145 skb_shinfo(skb)->gso_size = cork->gso_size; 1153 skb_shinfo(skb)->gso_size = cork->gso_size;
1146 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; 1154 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
@@ -1390,10 +1398,7 @@ do_udp_sendmsg:
1390 ipc6.opt = opt; 1398 ipc6.opt = opt;
1391 1399
1392 fl6.flowi6_proto = sk->sk_protocol; 1400 fl6.flowi6_proto = sk->sk_protocol;
1393 if (!ipv6_addr_any(daddr)) 1401 fl6.daddr = *daddr;
1394 fl6.daddr = *daddr;
1395 else
1396 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1397 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) 1402 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1398 fl6.saddr = np->saddr; 1403 fl6.saddr = np->saddr;
1399 fl6.fl6_sport = inet->inet_sport; 1404 fl6.fl6_sport = inet->inet_sport;
@@ -1421,6 +1426,9 @@ do_udp_sendmsg:
1421 } 1426 }
1422 } 1427 }
1423 1428
1429 if (ipv6_addr_any(&fl6.daddr))
1430 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1431
1424 final_p = fl6_update_dst(&fl6, opt, &final); 1432 final_p = fl6_update_dst(&fl6, opt, &final);
1425 if (final_p) 1433 if (final_p)
1426 connected = false; 1434 connected = false;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 5730e6503cb4..20e324b6f358 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -13,6 +13,7 @@ int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
13 __be32, struct udp_table *); 13 __be32, struct udp_table *);
14 14
15int udp_v6_get_port(struct sock *sk, unsigned short snum); 15int udp_v6_get_port(struct sock *sk, unsigned short snum);
16void udp_v6_rehash(struct sock *sk);
16 17
17int udpv6_getsockopt(struct sock *sk, int level, int optname, 18int udpv6_getsockopt(struct sock *sk, int level, int optname,
18 char __user *optval, int __user *optlen); 19 char __user *optval, int __user *optlen);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index a125aebc29e5..f35907836444 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -49,6 +49,7 @@ struct proto udplitev6_prot = {
49 .recvmsg = udpv6_recvmsg, 49 .recvmsg = udpv6_recvmsg,
50 .hash = udp_lib_hash, 50 .hash = udp_lib_hash,
51 .unhash = udp_lib_unhash, 51 .unhash = udp_lib_unhash,
52 .rehash = udp_v6_rehash,
52 .get_port = udp_v6_get_port, 53 .get_port = udp_v6_get_port,
53 .memory_allocated = &udp_memory_allocated, 54 .memory_allocated = &udp_memory_allocated,
54 .sysctl_mem = sysctl_udp_mem, 55 .sysctl_mem = sysctl_udp_mem,
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index fa0844e2a68d..c0c72ae9df42 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
28{ 28{
29 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; 29 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
30 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple; 30 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
31 struct dst_entry *other_dst = route->tuple[!dir].dst;
31 struct dst_entry *dst = route->tuple[dir].dst; 32 struct dst_entry *dst = route->tuple[dir].dst;
32 33
33 ft->dir = dir; 34 ft->dir = dir;
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
50 ft->src_port = ctt->src.u.tcp.port; 51 ft->src_port = ctt->src.u.tcp.port;
51 ft->dst_port = ctt->dst.u.tcp.port; 52 ft->dst_port = ctt->dst.u.tcp.port;
52 53
53 ft->iifidx = route->tuple[dir].ifindex; 54 ft->iifidx = other_dst->dev->ifindex;
54 ft->oifidx = route->tuple[!dir].ifindex; 55 ft->oifidx = dst->dev->ifindex;
55 ft->dst_cache = dst; 56 ft->dst_cache = dst;
56} 57}
57 58
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2b0a93300dd7..fb07f6cfc719 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2304,7 +2304,6 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
2304 struct net *net = sock_net(skb->sk); 2304 struct net *net = sock_net(skb->sk);
2305 unsigned int s_idx = cb->args[0]; 2305 unsigned int s_idx = cb->args[0];
2306 const struct nft_rule *rule; 2306 const struct nft_rule *rule;
2307 int rc = 1;
2308 2307
2309 list_for_each_entry_rcu(rule, &chain->rules, list) { 2308 list_for_each_entry_rcu(rule, &chain->rules, list) {
2310 if (!nft_is_active(net, rule)) 2309 if (!nft_is_active(net, rule))
@@ -2321,16 +2320,13 @@ static int __nf_tables_dump_rules(struct sk_buff *skb,
2321 NLM_F_MULTI | NLM_F_APPEND, 2320 NLM_F_MULTI | NLM_F_APPEND,
2322 table->family, 2321 table->family,
2323 table, chain, rule) < 0) 2322 table, chain, rule) < 0)
2324 goto out_unfinished; 2323 return 1;
2325 2324
2326 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 2325 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2327cont: 2326cont:
2328 (*idx)++; 2327 (*idx)++;
2329 } 2328 }
2330 rc = 0; 2329 return 0;
2331out_unfinished:
2332 cb->args[0] = *idx;
2333 return rc;
2334} 2330}
2335 2331
2336static int nf_tables_dump_rules(struct sk_buff *skb, 2332static int nf_tables_dump_rules(struct sk_buff *skb,
@@ -2354,7 +2350,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
2354 if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) 2350 if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
2355 continue; 2351 continue;
2356 2352
2357 if (ctx && ctx->chain) { 2353 if (ctx && ctx->table && ctx->chain) {
2358 struct rhlist_head *list, *tmp; 2354 struct rhlist_head *list, *tmp;
2359 2355
2360 list = rhltable_lookup(&table->chains_ht, ctx->chain, 2356 list = rhltable_lookup(&table->chains_ht, ctx->chain,
@@ -2382,6 +2378,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
2382 } 2378 }
2383done: 2379done:
2384 rcu_read_unlock(); 2380 rcu_read_unlock();
2381
2382 cb->args[0] = idx;
2385 return skb->len; 2383 return skb->len;
2386} 2384}
2387 2385
@@ -4508,6 +4506,8 @@ err6:
4508err5: 4506err5:
4509 kfree(trans); 4507 kfree(trans);
4510err4: 4508err4:
4509 if (obj)
4510 obj->use--;
4511 kfree(elem.priv); 4511 kfree(elem.priv);
4512err3: 4512err3:
4513 if (nla[NFTA_SET_ELEM_DATA] != NULL) 4513 if (nla[NFTA_SET_ELEM_DATA] != NULL)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 974525eb92df..6e6b9adf7d38 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -12,6 +12,7 @@
12#include <net/netfilter/nf_conntrack_core.h> 12#include <net/netfilter/nf_conntrack_core.h>
13#include <linux/netfilter/nf_conntrack_common.h> 13#include <linux/netfilter/nf_conntrack_common.h>
14#include <net/netfilter/nf_flow_table.h> 14#include <net/netfilter/nf_flow_table.h>
15#include <net/netfilter/nf_conntrack_helper.h>
15 16
16struct nft_flow_offload { 17struct nft_flow_offload {
17 struct nft_flowtable *flowtable; 18 struct nft_flowtable *flowtable;
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
29 memset(&fl, 0, sizeof(fl)); 30 memset(&fl, 0, sizeof(fl));
30 switch (nft_pf(pkt)) { 31 switch (nft_pf(pkt)) {
31 case NFPROTO_IPV4: 32 case NFPROTO_IPV4:
32 fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip; 33 fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
34 fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
33 break; 35 break;
34 case NFPROTO_IPV6: 36 case NFPROTO_IPV6:
35 fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6; 37 fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
38 fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
36 break; 39 break;
37 } 40 }
38 41
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
41 return -ENOENT; 44 return -ENOENT;
42 45
43 route->tuple[dir].dst = this_dst; 46 route->tuple[dir].dst = this_dst;
44 route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
45 route->tuple[!dir].dst = other_dst; 47 route->tuple[!dir].dst = other_dst;
46 route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
47 48
48 return 0; 49 return 0;
49} 50}
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
66{ 67{
67 struct nft_flow_offload *priv = nft_expr_priv(expr); 68 struct nft_flow_offload *priv = nft_expr_priv(expr);
68 struct nf_flowtable *flowtable = &priv->flowtable->data; 69 struct nf_flowtable *flowtable = &priv->flowtable->data;
70 const struct nf_conn_help *help;
69 enum ip_conntrack_info ctinfo; 71 enum ip_conntrack_info ctinfo;
70 struct nf_flow_route route; 72 struct nf_flow_route route;
71 struct flow_offload *flow; 73 struct flow_offload *flow;
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
88 goto out; 90 goto out;
89 } 91 }
90 92
91 if (test_bit(IPS_HELPER_BIT, &ct->status)) 93 help = nfct_help(ct);
94 if (help)
92 goto out; 95 goto out;
93 96
94 if (ctinfo == IP_CT_NEW || 97 if (ctinfo == IP_CT_NEW ||
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 57e07768c9d1..f54cf17ef7a8 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -276,10 +276,12 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
276 276
277 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); 277 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
278 if (flags & IP6_FH_F_FRAG) { 278 if (flags & IP6_FH_F_FRAG) {
279 if (frag_off) 279 if (frag_off) {
280 key->ip.frag = OVS_FRAG_TYPE_LATER; 280 key->ip.frag = OVS_FRAG_TYPE_LATER;
281 else 281 key->ip.proto = nexthdr;
282 key->ip.frag = OVS_FRAG_TYPE_FIRST; 282 return 0;
283 }
284 key->ip.frag = OVS_FRAG_TYPE_FIRST;
283 } else { 285 } else {
284 key->ip.frag = OVS_FRAG_TYPE_NONE; 286 key->ip.frag = OVS_FRAG_TYPE_NONE;
285 } 287 }
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 435a4bdf8f89..691da853bef5 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
500 return -EINVAL; 500 return -EINVAL;
501 } 501 }
502 502
503 if (!nz || !is_all_zero(nla_data(nla), expected_len)) { 503 if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
504 attrs |= 1 << type; 504 attrs |= 1 << type;
505 a[type] = nla; 505 a[type] = nla;
506 } 506 }
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index eedacdebcd4c..3b1a78906bc0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2628 addr = saddr->sll_halen ? saddr->sll_addr : NULL; 2628 addr = saddr->sll_halen ? saddr->sll_addr : NULL;
2629 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2629 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2630 if (addr && dev && saddr->sll_halen < dev->addr_len) 2630 if (addr && dev && saddr->sll_halen < dev->addr_len)
2631 goto out; 2631 goto out_put;
2632 } 2632 }
2633 2633
2634 err = -ENXIO; 2634 err = -ENXIO;
@@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2828 addr = saddr->sll_halen ? saddr->sll_addr : NULL; 2828 addr = saddr->sll_halen ? saddr->sll_addr : NULL;
2829 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); 2829 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2830 if (addr && dev && saddr->sll_halen < dev->addr_len) 2830 if (addr && dev && saddr->sll_halen < dev->addr_len)
2831 goto out; 2831 goto out_unlock;
2832 } 2832 }
2833 2833
2834 err = -ENXIO; 2834 err = -ENXIO;
@@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2887 goto out_free; 2887 goto out_free;
2888 } else if (reserve) { 2888 } else if (reserve) {
2889 skb_reserve(skb, -reserve); 2889 skb_reserve(skb, -reserve);
2890 if (len < reserve) 2890 if (len < reserve + sizeof(struct ipv6hdr) &&
2891 dev->min_header_len != dev->hard_header_len)
2891 skb_reset_network_header(skb); 2892 skb_reset_network_header(skb);
2892 } 2893 }
2893 2894
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 2dcb555e6350..4e0c36acf866 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -522,7 +522,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
522 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) 522 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
523 i = 1; 523 i = 1;
524 else 524 else
525 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); 525 i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
526 526
527 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 527 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
528 if (work_alloc == 0) { 528 if (work_alloc == 0) {
@@ -879,7 +879,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
879 * Instead of knowing how to return a partial rdma read/write we insist that there 879 * Instead of knowing how to return a partial rdma read/write we insist that there
880 * be enough work requests to send the entire message. 880 * be enough work requests to send the entire message.
881 */ 881 */
882 i = ceil(op->op_count, max_sge); 882 i = DIV_ROUND_UP(op->op_count, max_sge);
883 883
884 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 884 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
885 if (work_alloc != i) { 885 if (work_alloc != i) {
diff --git a/net/rds/message.c b/net/rds/message.c
index f139420ba1f6..50f13f1d4ae0 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -341,7 +341,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
341{ 341{
342 struct rds_message *rm; 342 struct rds_message *rm;
343 unsigned int i; 343 unsigned int i;
344 int num_sgs = ceil(total_len, PAGE_SIZE); 344 int num_sgs = DIV_ROUND_UP(total_len, PAGE_SIZE);
345 int extra_bytes = num_sgs * sizeof(struct scatterlist); 345 int extra_bytes = num_sgs * sizeof(struct scatterlist);
346 int ret; 346 int ret;
347 347
@@ -351,7 +351,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
351 351
352 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags); 352 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
353 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); 353 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
354 rm->data.op_nents = ceil(total_len, PAGE_SIZE); 354 rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
355 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret); 355 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
356 if (!rm->data.op_sg) { 356 if (!rm->data.op_sg) {
357 rds_message_put(rm); 357 rds_message_put(rm);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 02ec4a3b2799..4ffe100ff5e6 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -48,10 +48,6 @@ void rdsdebug(char *fmt, ...)
48} 48}
49#endif 49#endif
50 50
51/* XXX is there one of these somewhere? */
52#define ceil(x, y) \
53 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
54
55#define RDS_FRAG_SHIFT 12 51#define RDS_FRAG_SHIFT 12
56#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT)) 52#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
57 53
diff --git a/net/rds/send.c b/net/rds/send.c
index 3d822bad7de9..fd8b687d5c05 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1107,7 +1107,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
1107 size_t total_payload_len = payload_len, rdma_payload_len = 0; 1107 size_t total_payload_len = payload_len, rdma_payload_len = 0;
1108 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && 1108 bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) &&
1109 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); 1109 sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
1110 int num_sgs = ceil(payload_len, PAGE_SIZE); 1110 int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE);
1111 int namelen; 1111 int namelen;
1112 struct rds_iov_vector_arr vct; 1112 struct rds_iov_vector_arr vct;
1113 int ind; 1113 int ind;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index a2522f9d71e2..96f2952bbdfd 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -419,76 +419,6 @@ u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
419EXPORT_SYMBOL(rxrpc_kernel_get_epoch); 419EXPORT_SYMBOL(rxrpc_kernel_get_epoch);
420 420
421/** 421/**
422 * rxrpc_kernel_check_call - Check a call's state
423 * @sock: The socket the call is on
424 * @call: The call to check
425 * @_compl: Where to store the completion state
426 * @_abort_code: Where to store any abort code
427 *
428 * Allow a kernel service to query the state of a call and find out the manner
429 * of its termination if it has completed. Returns -EINPROGRESS if the call is
430 * still going, 0 if the call finished successfully, -ECONNABORTED if the call
431 * was aborted and an appropriate error if the call failed in some other way.
432 */
433int rxrpc_kernel_check_call(struct socket *sock, struct rxrpc_call *call,
434 enum rxrpc_call_completion *_compl, u32 *_abort_code)
435{
436 if (call->state != RXRPC_CALL_COMPLETE)
437 return -EINPROGRESS;
438 smp_rmb();
439 *_compl = call->completion;
440 *_abort_code = call->abort_code;
441 return call->error;
442}
443EXPORT_SYMBOL(rxrpc_kernel_check_call);
444
445/**
446 * rxrpc_kernel_retry_call - Allow a kernel service to retry a call
447 * @sock: The socket the call is on
448 * @call: The call to retry
449 * @srx: The address of the peer to contact
450 * @key: The security context to use (defaults to socket setting)
451 *
452 * Allow a kernel service to try resending a client call that failed due to a
453 * network error to a new address. The Tx queue is maintained intact, thereby
454 * relieving the need to re-encrypt any request data that has already been
455 * buffered.
456 */
457int rxrpc_kernel_retry_call(struct socket *sock, struct rxrpc_call *call,
458 struct sockaddr_rxrpc *srx, struct key *key)
459{
460 struct rxrpc_conn_parameters cp;
461 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
462 int ret;
463
464 _enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
465
466 if (!key)
467 key = rx->key;
468 if (key && !key->payload.data[0])
469 key = NULL; /* a no-security key */
470
471 memset(&cp, 0, sizeof(cp));
472 cp.local = rx->local;
473 cp.key = key;
474 cp.security_level = 0;
475 cp.exclusive = false;
476 cp.service_id = srx->srx_service;
477
478 mutex_lock(&call->user_mutex);
479
480 ret = rxrpc_prepare_call_for_retry(rx, call);
481 if (ret == 0)
482 ret = rxrpc_retry_client_call(rx, call, &cp, srx, GFP_KERNEL);
483
484 mutex_unlock(&call->user_mutex);
485 rxrpc_put_peer(cp.peer);
486 _leave(" = %d", ret);
487 return ret;
488}
489EXPORT_SYMBOL(rxrpc_kernel_retry_call);
490
491/**
492 * rxrpc_kernel_new_call_notification - Get notifications of new calls 422 * rxrpc_kernel_new_call_notification - Get notifications of new calls
493 * @sock: The socket to intercept received messages on 423 * @sock: The socket to intercept received messages on
494 * @notify_new_call: Function to be called when new calls appear 424 * @notify_new_call: Function to be called when new calls appear
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index bc628acf4f4f..4b1a534d290a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -476,7 +476,6 @@ enum rxrpc_call_flag {
476 RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ 476 RXRPC_CALL_EXPOSED, /* The call was exposed to the world */
477 RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ 477 RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
478 RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ 478 RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
479 RXRPC_CALL_TX_LASTQ, /* Last packet has been queued */
480 RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ 479 RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
481 RXRPC_CALL_PINGING, /* Ping in process */ 480 RXRPC_CALL_PINGING, /* Ping in process */
482 RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ 481 RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
@@ -518,6 +517,18 @@ enum rxrpc_call_state {
518}; 517};
519 518
520/* 519/*
520 * Call completion condition (state == RXRPC_CALL_COMPLETE).
521 */
522enum rxrpc_call_completion {
523 RXRPC_CALL_SUCCEEDED, /* - Normal termination */
524 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */
525 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */
526 RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */
527 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */
528 NR__RXRPC_CALL_COMPLETIONS
529};
530
531/*
521 * Call Tx congestion management modes. 532 * Call Tx congestion management modes.
522 */ 533 */
523enum rxrpc_congest_mode { 534enum rxrpc_congest_mode {
@@ -761,15 +772,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
761 struct sockaddr_rxrpc *, 772 struct sockaddr_rxrpc *,
762 struct rxrpc_call_params *, gfp_t, 773 struct rxrpc_call_params *, gfp_t,
763 unsigned int); 774 unsigned int);
764int rxrpc_retry_client_call(struct rxrpc_sock *,
765 struct rxrpc_call *,
766 struct rxrpc_conn_parameters *,
767 struct sockaddr_rxrpc *,
768 gfp_t);
769void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, 775void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
770 struct sk_buff *); 776 struct sk_buff *);
771void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); 777void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
772int rxrpc_prepare_call_for_retry(struct rxrpc_sock *, struct rxrpc_call *);
773void rxrpc_release_calls_on_socket(struct rxrpc_sock *); 778void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
774bool __rxrpc_queue_call(struct rxrpc_call *); 779bool __rxrpc_queue_call(struct rxrpc_call *);
775bool rxrpc_queue_call(struct rxrpc_call *); 780bool rxrpc_queue_call(struct rxrpc_call *);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 8f1a8f85b1f9..8aa2937b069f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -325,48 +325,6 @@ error:
325} 325}
326 326
327/* 327/*
328 * Retry a call to a new address. It is expected that the Tx queue of the call
329 * will contain data previously packaged for an old call.
330 */
331int rxrpc_retry_client_call(struct rxrpc_sock *rx,
332 struct rxrpc_call *call,
333 struct rxrpc_conn_parameters *cp,
334 struct sockaddr_rxrpc *srx,
335 gfp_t gfp)
336{
337 const void *here = __builtin_return_address(0);
338 int ret;
339
340 /* Set up or get a connection record and set the protocol parameters,
341 * including channel number and call ID.
342 */
343 ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
344 if (ret < 0)
345 goto error;
346
347 trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
348 here, NULL);
349
350 rxrpc_start_call_timer(call);
351
352 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
353
354 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
355 rxrpc_queue_call(call);
356
357 _leave(" = 0");
358 return 0;
359
360error:
361 rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
362 RX_CALL_DEAD, ret);
363 trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
364 here, ERR_PTR(ret));
365 _leave(" = %d", ret);
366 return ret;
367}
368
369/*
370 * Set up an incoming call. call->conn points to the connection. 328 * Set up an incoming call. call->conn points to the connection.
371 * This is called in BH context and isn't allowed to fail. 329 * This is called in BH context and isn't allowed to fail.
372 */ 330 */
@@ -534,61 +492,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
534} 492}
535 493
536/* 494/*
537 * Prepare a kernel service call for retry.
538 */
539int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
540{
541 const void *here = __builtin_return_address(0);
542 int i;
543 u8 last = 0;
544
545 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
546
547 trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
548 here, (const void *)call->flags);
549
550 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
551 ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
552 ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
553 ASSERT(list_empty(&call->recvmsg_link));
554
555 del_timer_sync(&call->timer);
556
557 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);
558
559 if (call->conn)
560 rxrpc_disconnect_call(call);
561
562 if (rxrpc_is_service_call(call) ||
563 !call->tx_phase ||
564 call->tx_hard_ack != 0 ||
565 call->rx_hard_ack != 0 ||
566 call->rx_top != 0)
567 return -EINVAL;
568
569 call->state = RXRPC_CALL_UNINITIALISED;
570 call->completion = RXRPC_CALL_SUCCEEDED;
571 call->call_id = 0;
572 call->cid = 0;
573 call->cong_cwnd = 0;
574 call->cong_extra = 0;
575 call->cong_ssthresh = 0;
576 call->cong_mode = 0;
577 call->cong_dup_acks = 0;
578 call->cong_cumul_acks = 0;
579 call->acks_lowest_nak = 0;
580
581 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
582 last |= call->rxtx_annotations[i];
583 call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
584 call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
585 }
586
587 _leave(" = 0");
588 return 0;
589}
590
591/*
592 * release all the calls associated with a socket 495 * release all the calls associated with a socket
593 */ 496 */
594void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx) 497void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 521189f4b666..b2adfa825363 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -562,10 +562,7 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
562 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); 562 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
563 563
564 write_lock_bh(&call->state_lock); 564 write_lock_bh(&call->state_lock);
565 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) 565 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
566 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
567 else
568 call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
569 write_unlock_bh(&call->state_lock); 566 write_unlock_bh(&call->state_lock);
570 567
571 rxrpc_see_call(call); 568 rxrpc_see_call(call);
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index be01f9c5d963..46c9312085b1 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -169,10 +169,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
169 169
170 ASSERTCMP(seq, ==, call->tx_top + 1); 170 ASSERTCMP(seq, ==, call->tx_top + 1);
171 171
172 if (last) { 172 if (last)
173 annotation |= RXRPC_TX_ANNO_LAST; 173 annotation |= RXRPC_TX_ANNO_LAST;
174 set_bit(RXRPC_CALL_TX_LASTQ, &call->flags);
175 }
176 174
177 /* We have to set the timestamp before queueing as the retransmit 175 /* We have to set the timestamp before queueing as the retransmit
178 * algorithm can see the packet as soon as we queue it. 176 * algorithm can see the packet as soon as we queue it.
@@ -386,6 +384,11 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
386 call->tx_total_len -= copy; 384 call->tx_total_len -= copy;
387 } 385 }
388 386
387 /* check for the far side aborting the call or a network error
388 * occurring */
389 if (call->state == RXRPC_CALL_COMPLETE)
390 goto call_terminated;
391
389 /* add the packet to the send queue if it's now full */ 392 /* add the packet to the send queue if it's now full */
390 if (sp->remain <= 0 || 393 if (sp->remain <= 0 ||
391 (msg_data_left(msg) == 0 && !more)) { 394 (msg_data_left(msg) == 0 && !more)) {
@@ -425,16 +428,6 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
425 notify_end_tx); 428 notify_end_tx);
426 skb = NULL; 429 skb = NULL;
427 } 430 }
428
429 /* Check for the far side aborting the call or a network error
430 * occurring. If this happens, save any packet that was under
431 * construction so that in the case of a network error, the
432 * call can be retried or redirected.
433 */
434 if (call->state == RXRPC_CALL_COMPLETE) {
435 ret = call->error;
436 goto out;
437 }
438 } while (msg_data_left(msg) > 0); 431 } while (msg_data_left(msg) > 0);
439 432
440success: 433success:
@@ -444,6 +437,11 @@ out:
444 _leave(" = %d", ret); 437 _leave(" = %d", ret);
445 return ret; 438 return ret;
446 439
440call_terminated:
441 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
442 _leave(" = %d", call->error);
443 return call->error;
444
447maybe_error: 445maybe_error:
448 if (copied) 446 if (copied)
449 goto success; 447 goto success;
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index c3b90fadaff6..8b43fe0130f7 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = {
197 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, 197 [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 },
198}; 198};
199 199
200static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
201{
202 if (!p)
203 return;
204 if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
205 dst_release(&p->tcft_enc_metadata->dst);
206 kfree_rcu(p, rcu);
207}
208
200static int tunnel_key_init(struct net *net, struct nlattr *nla, 209static int tunnel_key_init(struct net *net, struct nlattr *nla,
201 struct nlattr *est, struct tc_action **a, 210 struct nlattr *est, struct tc_action **a,
202 int ovr, int bind, bool rtnl_held, 211 int ovr, int bind, bool rtnl_held,
@@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
360 rcu_swap_protected(t->params, params_new, 369 rcu_swap_protected(t->params, params_new,
361 lockdep_is_held(&t->tcf_lock)); 370 lockdep_is_held(&t->tcf_lock));
362 spin_unlock_bh(&t->tcf_lock); 371 spin_unlock_bh(&t->tcf_lock);
363 if (params_new) 372 tunnel_key_release_params(params_new);
364 kfree_rcu(params_new, rcu);
365 373
366 if (ret == ACT_P_CREATED) 374 if (ret == ACT_P_CREATED)
367 tcf_idr_insert(tn, *a); 375 tcf_idr_insert(tn, *a);
@@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a)
385 struct tcf_tunnel_key_params *params; 393 struct tcf_tunnel_key_params *params;
386 394
387 params = rcu_dereference_protected(t->params, 1); 395 params = rcu_dereference_protected(t->params, 1);
388 if (params) { 396 tunnel_key_release_params(params);
389 if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
390 dst_release(&params->tcft_enc_metadata->dst);
391
392 kfree_rcu(params, rcu);
393 }
394} 397}
395 398
396static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, 399static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8ce2a0507970..e2b5cb2eb34e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1277,7 +1277,6 @@ EXPORT_SYMBOL(tcf_block_cb_unregister);
1277int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1277int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1278 struct tcf_result *res, bool compat_mode) 1278 struct tcf_result *res, bool compat_mode)
1279{ 1279{
1280 __be16 protocol = tc_skb_protocol(skb);
1281#ifdef CONFIG_NET_CLS_ACT 1280#ifdef CONFIG_NET_CLS_ACT
1282 const int max_reclassify_loop = 4; 1281 const int max_reclassify_loop = 4;
1283 const struct tcf_proto *orig_tp = tp; 1282 const struct tcf_proto *orig_tp = tp;
@@ -1287,6 +1286,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1287reclassify: 1286reclassify:
1288#endif 1287#endif
1289 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1288 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1289 __be16 protocol = tc_skb_protocol(skb);
1290 int err; 1290 int err;
1291 1291
1292 if (tp->protocol != protocol && 1292 if (tp->protocol != protocol &&
@@ -1319,7 +1319,6 @@ reset:
1319 } 1319 }
1320 1320
1321 tp = first_tp; 1321 tp = first_tp;
1322 protocol = tc_skb_protocol(skb);
1323 goto reclassify; 1322 goto reclassify;
1324#endif 1323#endif
1325} 1324}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index dad04e710493..f6aa57fbbbaf 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1290,17 +1290,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1290 struct cls_fl_head *head = rtnl_dereference(tp->root); 1290 struct cls_fl_head *head = rtnl_dereference(tp->root);
1291 struct cls_fl_filter *fold = *arg; 1291 struct cls_fl_filter *fold = *arg;
1292 struct cls_fl_filter *fnew; 1292 struct cls_fl_filter *fnew;
1293 struct fl_flow_mask *mask;
1293 struct nlattr **tb; 1294 struct nlattr **tb;
1294 struct fl_flow_mask mask = {};
1295 int err; 1295 int err;
1296 1296
1297 if (!tca[TCA_OPTIONS]) 1297 if (!tca[TCA_OPTIONS])
1298 return -EINVAL; 1298 return -EINVAL;
1299 1299
1300 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1300 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1301 if (!tb) 1301 if (!mask)
1302 return -ENOBUFS; 1302 return -ENOBUFS;
1303 1303
1304 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1305 if (!tb) {
1306 err = -ENOBUFS;
1307 goto errout_mask_alloc;
1308 }
1309
1304 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], 1310 err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1305 fl_policy, NULL); 1311 fl_policy, NULL);
1306 if (err < 0) 1312 if (err < 0)
@@ -1343,12 +1349,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1343 } 1349 }
1344 } 1350 }
1345 1351
1346 err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, 1352 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1347 tp->chain->tmplt_priv, extack); 1353 tp->chain->tmplt_priv, extack);
1348 if (err) 1354 if (err)
1349 goto errout_idr; 1355 goto errout_idr;
1350 1356
1351 err = fl_check_assign_mask(head, fnew, fold, &mask); 1357 err = fl_check_assign_mask(head, fnew, fold, mask);
1352 if (err) 1358 if (err)
1353 goto errout_idr; 1359 goto errout_idr;
1354 1360
@@ -1392,6 +1398,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1392 } 1398 }
1393 1399
1394 kfree(tb); 1400 kfree(tb);
1401 kfree(mask);
1395 return 0; 1402 return 0;
1396 1403
1397errout_mask: 1404errout_mask:
@@ -1405,6 +1412,8 @@ errout:
1405 kfree(fnew); 1412 kfree(fnew);
1406errout_tb: 1413errout_tb:
1407 kfree(tb); 1414 kfree(tb);
1415errout_mask_alloc:
1416 kfree(mask);
1408 return err; 1417 return err;
1409} 1418}
1410 1419
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index b910cd5c56f7..73940293700d 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1667,7 +1667,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1667 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { 1667 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) {
1668 struct sk_buff *segs, *nskb; 1668 struct sk_buff *segs, *nskb;
1669 netdev_features_t features = netif_skb_features(skb); 1669 netdev_features_t features = netif_skb_features(skb);
1670 unsigned int slen = 0; 1670 unsigned int slen = 0, numsegs = 0;
1671 1671
1672 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 1672 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
1673 if (IS_ERR_OR_NULL(segs)) 1673 if (IS_ERR_OR_NULL(segs))
@@ -1683,6 +1683,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1683 flow_queue_add(flow, segs); 1683 flow_queue_add(flow, segs);
1684 1684
1685 sch->q.qlen++; 1685 sch->q.qlen++;
1686 numsegs++;
1686 slen += segs->len; 1687 slen += segs->len;
1687 q->buffer_used += segs->truesize; 1688 q->buffer_used += segs->truesize;
1688 b->packets++; 1689 b->packets++;
@@ -1696,7 +1697,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1696 sch->qstats.backlog += slen; 1697 sch->qstats.backlog += slen;
1697 q->avg_window_bytes += slen; 1698 q->avg_window_bytes += slen;
1698 1699
1699 qdisc_tree_reduce_backlog(sch, 1, len); 1700 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen);
1700 consume_skb(skb); 1701 consume_skb(skb);
1701 } else { 1702 } else {
1702 /* not splitting */ 1703 /* not splitting */
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index e689e11b6d0f..c6a502933fe7 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -88,13 +88,14 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
88 struct Qdisc *child, 88 struct Qdisc *child,
89 struct sk_buff **to_free) 89 struct sk_buff **to_free)
90{ 90{
91 unsigned int len = qdisc_pkt_len(skb);
91 int err; 92 int err;
92 93
93 err = child->ops->enqueue(skb, child, to_free); 94 err = child->ops->enqueue(skb, child, to_free);
94 if (err != NET_XMIT_SUCCESS) 95 if (err != NET_XMIT_SUCCESS)
95 return err; 96 return err;
96 97
97 qdisc_qstats_backlog_inc(sch, skb); 98 sch->qstats.backlog += len;
98 sch->q.qlen++; 99 sch->q.qlen++;
99 100
100 return NET_XMIT_SUCCESS; 101 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index cdebaed0f8cf..09b800991065 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -350,9 +350,11 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
350static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, 350static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
351 struct sk_buff **to_free) 351 struct sk_buff **to_free)
352{ 352{
353 unsigned int len = qdisc_pkt_len(skb);
353 struct drr_sched *q = qdisc_priv(sch); 354 struct drr_sched *q = qdisc_priv(sch);
354 struct drr_class *cl; 355 struct drr_class *cl;
355 int err = 0; 356 int err = 0;
357 bool first;
356 358
357 cl = drr_classify(skb, sch, &err); 359 cl = drr_classify(skb, sch, &err);
358 if (cl == NULL) { 360 if (cl == NULL) {
@@ -362,6 +364,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
362 return err; 364 return err;
363 } 365 }
364 366
367 first = !cl->qdisc->q.qlen;
365 err = qdisc_enqueue(skb, cl->qdisc, to_free); 368 err = qdisc_enqueue(skb, cl->qdisc, to_free);
366 if (unlikely(err != NET_XMIT_SUCCESS)) { 369 if (unlikely(err != NET_XMIT_SUCCESS)) {
367 if (net_xmit_drop_count(err)) { 370 if (net_xmit_drop_count(err)) {
@@ -371,12 +374,12 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
371 return err; 374 return err;
372 } 375 }
373 376
374 if (cl->qdisc->q.qlen == 1) { 377 if (first) {
375 list_add_tail(&cl->alist, &q->active); 378 list_add_tail(&cl->alist, &q->active);
376 cl->deficit = cl->quantum; 379 cl->deficit = cl->quantum;
377 } 380 }
378 381
379 qdisc_qstats_backlog_inc(sch, skb); 382 sch->qstats.backlog += len;
380 sch->q.qlen++; 383 sch->q.qlen++;
381 return err; 384 return err;
382} 385}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index f6f480784bc6..42471464ded3 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -199,6 +199,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
199static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, 199static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
200 struct sk_buff **to_free) 200 struct sk_buff **to_free)
201{ 201{
202 unsigned int len = qdisc_pkt_len(skb);
202 struct dsmark_qdisc_data *p = qdisc_priv(sch); 203 struct dsmark_qdisc_data *p = qdisc_priv(sch);
203 int err; 204 int err;
204 205
@@ -271,7 +272,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
271 return err; 272 return err;
272 } 273 }
273 274
274 qdisc_qstats_backlog_inc(sch, skb); 275 sch->qstats.backlog += len;
275 sch->q.qlen++; 276 sch->q.qlen++;
276 277
277 return NET_XMIT_SUCCESS; 278 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index b18ec1f6de60..24cc220a3218 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1539,8 +1539,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1539static int 1539static int
1540hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) 1540hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1541{ 1541{
1542 unsigned int len = qdisc_pkt_len(skb);
1542 struct hfsc_class *cl; 1543 struct hfsc_class *cl;
1543 int uninitialized_var(err); 1544 int uninitialized_var(err);
1545 bool first;
1544 1546
1545 cl = hfsc_classify(skb, sch, &err); 1547 cl = hfsc_classify(skb, sch, &err);
1546 if (cl == NULL) { 1548 if (cl == NULL) {
@@ -1550,6 +1552,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1550 return err; 1552 return err;
1551 } 1553 }
1552 1554
1555 first = !cl->qdisc->q.qlen;
1553 err = qdisc_enqueue(skb, cl->qdisc, to_free); 1556 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1554 if (unlikely(err != NET_XMIT_SUCCESS)) { 1557 if (unlikely(err != NET_XMIT_SUCCESS)) {
1555 if (net_xmit_drop_count(err)) { 1558 if (net_xmit_drop_count(err)) {
@@ -1559,9 +1562,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1559 return err; 1562 return err;
1560 } 1563 }
1561 1564
1562 if (cl->qdisc->q.qlen == 1) { 1565 if (first) {
1563 unsigned int len = qdisc_pkt_len(skb);
1564
1565 if (cl->cl_flags & HFSC_RSC) 1566 if (cl->cl_flags & HFSC_RSC)
1566 init_ed(cl, len); 1567 init_ed(cl, len);
1567 if (cl->cl_flags & HFSC_FSC) 1568 if (cl->cl_flags & HFSC_FSC)
@@ -1576,7 +1577,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1576 1577
1577 } 1578 }
1578 1579
1579 qdisc_qstats_backlog_inc(sch, skb); 1580 sch->qstats.backlog += len;
1580 sch->q.qlen++; 1581 sch->q.qlen++;
1581 1582
1582 return NET_XMIT_SUCCESS; 1583 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 58b449490757..30f9da7e1076 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -581,6 +581,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
581 struct sk_buff **to_free) 581 struct sk_buff **to_free)
582{ 582{
583 int uninitialized_var(ret); 583 int uninitialized_var(ret);
584 unsigned int len = qdisc_pkt_len(skb);
584 struct htb_sched *q = qdisc_priv(sch); 585 struct htb_sched *q = qdisc_priv(sch);
585 struct htb_class *cl = htb_classify(skb, sch, &ret); 586 struct htb_class *cl = htb_classify(skb, sch, &ret);
586 587
@@ -610,7 +611,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
610 htb_activate(q, cl); 611 htb_activate(q, cl);
611 } 612 }
612 613
613 qdisc_qstats_backlog_inc(sch, skb); 614 sch->qstats.backlog += len;
614 sch->q.qlen++; 615 sch->q.qlen++;
615 return NET_XMIT_SUCCESS; 616 return NET_XMIT_SUCCESS;
616} 617}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index cdf68706e40f..847141cd900f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -72,6 +72,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
72static int 72static int
73prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) 73prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
74{ 74{
75 unsigned int len = qdisc_pkt_len(skb);
75 struct Qdisc *qdisc; 76 struct Qdisc *qdisc;
76 int ret; 77 int ret;
77 78
@@ -88,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
88 89
89 ret = qdisc_enqueue(skb, qdisc, to_free); 90 ret = qdisc_enqueue(skb, qdisc, to_free);
90 if (ret == NET_XMIT_SUCCESS) { 91 if (ret == NET_XMIT_SUCCESS) {
91 qdisc_qstats_backlog_inc(sch, skb); 92 sch->qstats.backlog += len;
92 sch->q.qlen++; 93 sch->q.qlen++;
93 return NET_XMIT_SUCCESS; 94 return NET_XMIT_SUCCESS;
94 } 95 }
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index dc37c4ead439..29f5c4a24688 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1210,10 +1210,12 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
1210static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, 1210static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1211 struct sk_buff **to_free) 1211 struct sk_buff **to_free)
1212{ 1212{
1213 unsigned int len = qdisc_pkt_len(skb), gso_segs;
1213 struct qfq_sched *q = qdisc_priv(sch); 1214 struct qfq_sched *q = qdisc_priv(sch);
1214 struct qfq_class *cl; 1215 struct qfq_class *cl;
1215 struct qfq_aggregate *agg; 1216 struct qfq_aggregate *agg;
1216 int err = 0; 1217 int err = 0;
1218 bool first;
1217 1219
1218 cl = qfq_classify(skb, sch, &err); 1220 cl = qfq_classify(skb, sch, &err);
1219 if (cl == NULL) { 1221 if (cl == NULL) {
@@ -1224,17 +1226,18 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1224 } 1226 }
1225 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid); 1227 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
1226 1228
1227 if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) { 1229 if (unlikely(cl->agg->lmax < len)) {
1228 pr_debug("qfq: increasing maxpkt from %u to %u for class %u", 1230 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
1229 cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); 1231 cl->agg->lmax, len, cl->common.classid);
1230 err = qfq_change_agg(sch, cl, cl->agg->class_weight, 1232 err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
1231 qdisc_pkt_len(skb));
1232 if (err) { 1233 if (err) {
1233 cl->qstats.drops++; 1234 cl->qstats.drops++;
1234 return qdisc_drop(skb, sch, to_free); 1235 return qdisc_drop(skb, sch, to_free);
1235 } 1236 }
1236 } 1237 }
1237 1238
1239 gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
1240 first = !cl->qdisc->q.qlen;
1238 err = qdisc_enqueue(skb, cl->qdisc, to_free); 1241 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1239 if (unlikely(err != NET_XMIT_SUCCESS)) { 1242 if (unlikely(err != NET_XMIT_SUCCESS)) {
1240 pr_debug("qfq_enqueue: enqueue failed %d\n", err); 1243 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
@@ -1245,16 +1248,17 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1245 return err; 1248 return err;
1246 } 1249 }
1247 1250
1248 bstats_update(&cl->bstats, skb); 1251 cl->bstats.bytes += len;
1249 qdisc_qstats_backlog_inc(sch, skb); 1252 cl->bstats.packets += gso_segs;
1253 sch->qstats.backlog += len;
1250 ++sch->q.qlen; 1254 ++sch->q.qlen;
1251 1255
1252 agg = cl->agg; 1256 agg = cl->agg;
1253 /* if the queue was not empty, then done here */ 1257 /* if the queue was not empty, then done here */
1254 if (cl->qdisc->q.qlen != 1) { 1258 if (!first) {
1255 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && 1259 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
1256 list_first_entry(&agg->active, struct qfq_class, alist) 1260 list_first_entry(&agg->active, struct qfq_class, alist)
1257 == cl && cl->deficit < qdisc_pkt_len(skb)) 1261 == cl && cl->deficit < len)
1258 list_move_tail(&cl->alist, &agg->active); 1262 list_move_tail(&cl->alist, &agg->active);
1259 1263
1260 return err; 1264 return err;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 942dcca09cf2..7f272a9070c5 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -185,6 +185,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185 struct sk_buff **to_free) 185 struct sk_buff **to_free)
186{ 186{
187 struct tbf_sched_data *q = qdisc_priv(sch); 187 struct tbf_sched_data *q = qdisc_priv(sch);
188 unsigned int len = qdisc_pkt_len(skb);
188 int ret; 189 int ret;
189 190
190 if (qdisc_pkt_len(skb) > q->max_size) { 191 if (qdisc_pkt_len(skb) > q->max_size) {
@@ -200,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
200 return ret; 201 return ret;
201 } 202 }
202 203
203 qdisc_qstats_backlog_inc(sch, skb); 204 sch->qstats.backlog += len;
204 sch->q.qlen++; 205 sch->q.qlen++;
205 return NET_XMIT_SUCCESS; 206 return NET_XMIT_SUCCESS;
206} 207}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index b9ed271b7ef7..ed8e006dae85 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
97 97
98 switch (ev) { 98 switch (ev) {
99 case NETDEV_UP: 99 case NETDEV_UP:
100 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 100 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
101 if (addr) { 101 if (addr) {
102 addr->a.v6.sin6_family = AF_INET6; 102 addr->a.v6.sin6_family = AF_INET6;
103 addr->a.v6.sin6_port = 0;
104 addr->a.v6.sin6_flowinfo = 0;
105 addr->a.v6.sin6_addr = ifa->addr; 103 addr->a.v6.sin6_addr = ifa->addr;
106 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; 104 addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
107 addr->valid = 1; 105 addr->valid = 1;
@@ -434,7 +432,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
434 addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 432 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
435 if (addr) { 433 if (addr) {
436 addr->a.v6.sin6_family = AF_INET6; 434 addr->a.v6.sin6_family = AF_INET6;
437 addr->a.v6.sin6_port = 0;
438 addr->a.v6.sin6_addr = ifp->addr; 435 addr->a.v6.sin6_addr = ifp->addr;
439 addr->a.v6.sin6_scope_id = dev->ifindex; 436 addr->a.v6.sin6_scope_id = dev->ifindex;
440 addr->valid = 1; 437 addr->valid = 1;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d5878ae55840..4e0eeb113ef5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
101 addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 101 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
102 if (addr) { 102 if (addr) {
103 addr->a.v4.sin_family = AF_INET; 103 addr->a.v4.sin_family = AF_INET;
104 addr->a.v4.sin_port = 0;
105 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 104 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
106 addr->valid = 1; 105 addr->valid = 1;
107 INIT_LIST_HEAD(&addr->list); 106 INIT_LIST_HEAD(&addr->list);
@@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
776 775
777 switch (ev) { 776 switch (ev) {
778 case NETDEV_UP: 777 case NETDEV_UP:
779 addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); 778 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
780 if (addr) { 779 if (addr) {
781 addr->a.v4.sin_family = AF_INET; 780 addr->a.v4.sin_family = AF_INET;
782 addr->a.v4.sin_port = 0;
783 addr->a.v4.sin_addr.s_addr = ifa->ifa_local; 781 addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
784 addr->valid = 1; 782 addr->valid = 1;
785 spin_lock_bh(&net->sctp.local_addr_lock); 783 spin_lock_bh(&net->sctp.local_addr_lock);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index c4da4a78d369..c4e56602e0c6 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -146,6 +146,9 @@ static int smc_release(struct socket *sock)
146 sock_set_flag(sk, SOCK_DEAD); 146 sock_set_flag(sk, SOCK_DEAD);
147 sk->sk_shutdown |= SHUTDOWN_MASK; 147 sk->sk_shutdown |= SHUTDOWN_MASK;
148 } 148 }
149
150 sk->sk_prot->unhash(sk);
151
149 if (smc->clcsock) { 152 if (smc->clcsock) {
150 if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { 153 if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
151 /* wake up clcsock accept */ 154 /* wake up clcsock accept */
@@ -170,7 +173,6 @@ static int smc_release(struct socket *sock)
170 smc_conn_free(&smc->conn); 173 smc_conn_free(&smc->conn);
171 release_sock(sk); 174 release_sock(sk);
172 175
173 sk->sk_prot->unhash(sk);
174 sock_put(sk); /* final sock_put */ 176 sock_put(sk); /* final sock_put */
175out: 177out:
176 return rc; 178 return rc;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 1ff9768f5456..f3023bbc0b7f 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -41,6 +41,9 @@ static unsigned long number_cred_unused;
41 41
42static struct cred machine_cred = { 42static struct cred machine_cred = {
43 .usage = ATOMIC_INIT(1), 43 .usage = ATOMIC_INIT(1),
44#ifdef CONFIG_DEBUG_CREDENTIALS
45 .magic = CRED_MAGIC,
46#endif
44}; 47};
45 48
46/* 49/*
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index dc86713b32b6..1531b0219344 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1549,8 +1549,10 @@ gss_marshal(struct rpc_task *task, __be32 *p)
1549 cred_len = p++; 1549 cred_len = p++;
1550 1550
1551 spin_lock(&ctx->gc_seq_lock); 1551 spin_lock(&ctx->gc_seq_lock);
1552 req->rq_seqno = ctx->gc_seq++; 1552 req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
1553 spin_unlock(&ctx->gc_seq_lock); 1553 spin_unlock(&ctx->gc_seq_lock);
1554 if (req->rq_seqno == MAXSEQ)
1555 goto out_expired;
1554 1556
1555 *p++ = htonl((u32) RPC_GSS_VERSION); 1557 *p++ = htonl((u32) RPC_GSS_VERSION);
1556 *p++ = htonl((u32) ctx->gc_proc); 1558 *p++ = htonl((u32) ctx->gc_proc);
@@ -1572,14 +1574,18 @@ gss_marshal(struct rpc_task *task, __be32 *p)
1572 mic.data = (u8 *)(p + 1); 1574 mic.data = (u8 *)(p + 1);
1573 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1575 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1574 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1576 if (maj_stat == GSS_S_CONTEXT_EXPIRED) {
1575 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1577 goto out_expired;
1576 } else if (maj_stat != 0) { 1578 } else if (maj_stat != 0) {
1577 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1579 pr_warn("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
1580 task->tk_status = -EIO;
1578 goto out_put_ctx; 1581 goto out_put_ctx;
1579 } 1582 }
1580 p = xdr_encode_opaque(p, NULL, mic.len); 1583 p = xdr_encode_opaque(p, NULL, mic.len);
1581 gss_put_ctx(ctx); 1584 gss_put_ctx(ctx);
1582 return p; 1585 return p;
1586out_expired:
1587 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1588 task->tk_status = -EKEYEXPIRED;
1583out_put_ctx: 1589out_put_ctx:
1584 gss_put_ctx(ctx); 1590 gss_put_ctx(ctx);
1585 return NULL; 1591 return NULL;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 71d9599b5816..d7ec6132c046 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1739,14 +1739,10 @@ rpc_xdr_encode(struct rpc_task *task)
1739 xdr_buf_init(&req->rq_rcv_buf, 1739 xdr_buf_init(&req->rq_rcv_buf,
1740 req->rq_rbuffer, 1740 req->rq_rbuffer,
1741 req->rq_rcvsize); 1741 req->rq_rcvsize);
1742 req->rq_bytes_sent = 0;
1743 1742
1744 p = rpc_encode_header(task); 1743 p = rpc_encode_header(task);
1745 if (p == NULL) { 1744 if (p == NULL)
1746 printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n");
1747 rpc_exit(task, -EIO);
1748 return; 1745 return;
1749 }
1750 1746
1751 encode = task->tk_msg.rpc_proc->p_encode; 1747 encode = task->tk_msg.rpc_proc->p_encode;
1752 if (encode == NULL) 1748 if (encode == NULL)
@@ -1771,10 +1767,17 @@ call_encode(struct rpc_task *task)
1771 /* Did the encode result in an error condition? */ 1767 /* Did the encode result in an error condition? */
1772 if (task->tk_status != 0) { 1768 if (task->tk_status != 0) {
1773 /* Was the error nonfatal? */ 1769 /* Was the error nonfatal? */
1774 if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM) 1770 switch (task->tk_status) {
1771 case -EAGAIN:
1772 case -ENOMEM:
1775 rpc_delay(task, HZ >> 4); 1773 rpc_delay(task, HZ >> 4);
1776 else 1774 break;
1775 case -EKEYEXPIRED:
1776 task->tk_action = call_refresh;
1777 break;
1778 default:
1777 rpc_exit(task, task->tk_status); 1779 rpc_exit(task, task->tk_status);
1780 }
1778 return; 1781 return;
1779 } 1782 }
1780 1783
@@ -2336,7 +2339,8 @@ rpc_encode_header(struct rpc_task *task)
2336 *p++ = htonl(clnt->cl_vers); /* program version */ 2339 *p++ = htonl(clnt->cl_vers); /* program version */
2337 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ 2340 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
2338 p = rpcauth_marshcred(task, p); 2341 p = rpcauth_marshcred(task, p);
2339 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); 2342 if (p)
2343 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
2340 return p; 2344 return p;
2341} 2345}
2342 2346
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 73547d17d3c6..f1ec2110efeb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1151,6 +1151,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
1151 struct rpc_xprt *xprt = req->rq_xprt; 1151 struct rpc_xprt *xprt = req->rq_xprt;
1152 1152
1153 if (xprt_request_need_enqueue_transmit(task, req)) { 1153 if (xprt_request_need_enqueue_transmit(task, req)) {
1154 req->rq_bytes_sent = 0;
1154 spin_lock(&xprt->queue_lock); 1155 spin_lock(&xprt->queue_lock);
1155 /* 1156 /*
1156 * Requests that carry congestion control credits are added 1157 * Requests that carry congestion control credits are added
@@ -1177,7 +1178,7 @@ xprt_request_enqueue_transmit(struct rpc_task *task)
1177 INIT_LIST_HEAD(&req->rq_xmit2); 1178 INIT_LIST_HEAD(&req->rq_xmit2);
1178 goto out; 1179 goto out;
1179 } 1180 }
1180 } else { 1181 } else if (!req->rq_seqno) {
1181 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) { 1182 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1182 if (pos->rq_task->tk_owner != task->tk_owner) 1183 if (pos->rq_task->tk_owner != task->tk_owner)
1183 continue; 1184 continue;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 7749a2bf6887..4994e75945b8 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -845,17 +845,13 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
845 for (i = 0; i <= buf->rb_sc_last; i++) { 845 for (i = 0; i <= buf->rb_sc_last; i++) {
846 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia); 846 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
847 if (!sc) 847 if (!sc)
848 goto out_destroy; 848 return -ENOMEM;
849 849
850 sc->sc_xprt = r_xprt; 850 sc->sc_xprt = r_xprt;
851 buf->rb_sc_ctxs[i] = sc; 851 buf->rb_sc_ctxs[i] = sc;
852 } 852 }
853 853
854 return 0; 854 return 0;
855
856out_destroy:
857 rpcrdma_sendctxs_destroy(buf);
858 return -ENOMEM;
859} 855}
860 856
861/* The sendctx queue is not guaranteed to have a size that is a 857/* The sendctx queue is not guaranteed to have a size that is a
@@ -1113,8 +1109,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1113 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1109 WQ_MEM_RECLAIM | WQ_HIGHPRI,
1114 0, 1110 0,
1115 r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]); 1111 r_xprt->rx_xprt.address_strings[RPC_DISPLAY_ADDR]);
1116 if (!buf->rb_completion_wq) 1112 if (!buf->rb_completion_wq) {
1113 rc = -ENOMEM;
1117 goto out; 1114 goto out;
1115 }
1118 1116
1119 return 0; 1117 return 0;
1120out: 1118out:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 13559e6a460b..7754aa3e434f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -48,6 +48,7 @@
48#include <net/udp.h> 48#include <net/udp.h>
49#include <net/tcp.h> 49#include <net/tcp.h>
50#include <linux/bvec.h> 50#include <linux/bvec.h>
51#include <linux/highmem.h>
51#include <linux/uio.h> 52#include <linux/uio.h>
52 53
53#include <trace/events/sunrpc.h> 54#include <trace/events/sunrpc.h>
@@ -376,6 +377,26 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
376 return sock_recvmsg(sock, msg, flags); 377 return sock_recvmsg(sock, msg, flags);
377} 378}
378 379
380#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
381static void
382xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
383{
384 struct bvec_iter bi = {
385 .bi_size = count,
386 };
387 struct bio_vec bv;
388
389 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
390 for_each_bvec(bv, bvec, bi, bi)
391 flush_dcache_page(bv.bv_page);
392}
393#else
394static inline void
395xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
396{
397}
398#endif
399
379static ssize_t 400static ssize_t
380xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags, 401xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
381 struct xdr_buf *buf, size_t count, size_t seek, size_t *read) 402 struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
@@ -409,6 +430,7 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
409 seek + buf->page_base); 430 seek + buf->page_base);
410 if (ret <= 0) 431 if (ret <= 0)
411 goto sock_err; 432 goto sock_err;
433 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
412 offset += ret - buf->page_base; 434 offset += ret - buf->page_base;
413 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC)) 435 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
414 goto out; 436 goto out;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 40f5cae623a7..4ad3586da8f0 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
87 return limit; 87 return limit;
88} 88}
89 89
90static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
91{
92 return TLV_GET_LEN(tlv) - TLV_SPACE(0);
93}
94
90static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) 95static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
91{ 96{
92 struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); 97 struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
166 return buf; 171 return buf;
167} 172}
168 173
174static inline bool string_is_valid(char *s, int len)
175{
176 return memchr(s, '\0', len) ? true : false;
177}
178
169static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, 179static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
170 struct tipc_nl_compat_msg *msg, 180 struct tipc_nl_compat_msg *msg,
171 struct sk_buff *arg) 181 struct sk_buff *arg)
@@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
379 struct nlattr *prop; 389 struct nlattr *prop;
380 struct nlattr *bearer; 390 struct nlattr *bearer;
381 struct tipc_bearer_config *b; 391 struct tipc_bearer_config *b;
392 int len;
382 393
383 b = (struct tipc_bearer_config *)TLV_DATA(msg->req); 394 b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
384 395
@@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
386 if (!bearer) 397 if (!bearer)
387 return -EMSGSIZE; 398 return -EMSGSIZE;
388 399
400 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
401 if (!string_is_valid(b->name, len))
402 return -EINVAL;
403
389 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) 404 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
390 return -EMSGSIZE; 405 return -EMSGSIZE;
391 406
@@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
411{ 426{
412 char *name; 427 char *name;
413 struct nlattr *bearer; 428 struct nlattr *bearer;
429 int len;
414 430
415 name = (char *)TLV_DATA(msg->req); 431 name = (char *)TLV_DATA(msg->req);
416 432
@@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
418 if (!bearer) 434 if (!bearer)
419 return -EMSGSIZE; 435 return -EMSGSIZE;
420 436
437 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
438 if (!string_is_valid(name, len))
439 return -EINVAL;
440
421 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) 441 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
422 return -EMSGSIZE; 442 return -EMSGSIZE;
423 443
@@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
478 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; 498 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
479 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; 499 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
480 int err; 500 int err;
501 int len;
481 502
482 if (!attrs[TIPC_NLA_LINK]) 503 if (!attrs[TIPC_NLA_LINK])
483 return -EINVAL; 504 return -EINVAL;
@@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
504 return err; 525 return err;
505 526
506 name = (char *)TLV_DATA(msg->req); 527 name = (char *)TLV_DATA(msg->req);
528
529 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
530 if (!string_is_valid(name, len))
531 return -EINVAL;
532
507 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) 533 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
508 return 0; 534 return 0;
509 535
@@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
644 struct nlattr *prop; 670 struct nlattr *prop;
645 struct nlattr *media; 671 struct nlattr *media;
646 struct tipc_link_config *lc; 672 struct tipc_link_config *lc;
673 int len;
647 674
648 lc = (struct tipc_link_config *)TLV_DATA(msg->req); 675 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
649 676
@@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
651 if (!media) 678 if (!media)
652 return -EMSGSIZE; 679 return -EMSGSIZE;
653 680
681 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
682 if (!string_is_valid(lc->name, len))
683 return -EINVAL;
684
654 if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) 685 if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
655 return -EMSGSIZE; 686 return -EMSGSIZE;
656 687
@@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
671 struct nlattr *prop; 702 struct nlattr *prop;
672 struct nlattr *bearer; 703 struct nlattr *bearer;
673 struct tipc_link_config *lc; 704 struct tipc_link_config *lc;
705 int len;
674 706
675 lc = (struct tipc_link_config *)TLV_DATA(msg->req); 707 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
676 708
@@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
678 if (!bearer) 710 if (!bearer)
679 return -EMSGSIZE; 711 return -EMSGSIZE;
680 712
713 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
714 if (!string_is_valid(lc->name, len))
715 return -EINVAL;
716
681 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) 717 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
682 return -EMSGSIZE; 718 return -EMSGSIZE;
683 719
@@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
726 struct tipc_link_config *lc; 762 struct tipc_link_config *lc;
727 struct tipc_bearer *bearer; 763 struct tipc_bearer *bearer;
728 struct tipc_media *media; 764 struct tipc_media *media;
765 int len;
729 766
730 lc = (struct tipc_link_config *)TLV_DATA(msg->req); 767 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
731 768
769 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
770 if (!string_is_valid(lc->name, len))
771 return -EINVAL;
772
732 media = tipc_media_find(lc->name); 773 media = tipc_media_find(lc->name);
733 if (media) { 774 if (media) {
734 cmd->doit = &__tipc_nl_media_set; 775 cmd->doit = &__tipc_nl_media_set;
@@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
750{ 791{
751 char *name; 792 char *name;
752 struct nlattr *link; 793 struct nlattr *link;
794 int len;
753 795
754 name = (char *)TLV_DATA(msg->req); 796 name = (char *)TLV_DATA(msg->req);
755 797
@@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
757 if (!link) 799 if (!link)
758 return -EMSGSIZE; 800 return -EMSGSIZE;
759 801
802 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
803 if (!string_is_valid(name, len))
804 return -EINVAL;
805
760 if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) 806 if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
761 return -EMSGSIZE; 807 return -EMSGSIZE;
762 808
@@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
778 }; 824 };
779 825
780 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); 826 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
827 if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
828 return -EINVAL;
781 829
782 depth = ntohl(ntq->depth); 830 depth = ntohl(ntq->depth);
783 831
@@ -904,8 +952,10 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
904 952
905 hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, 953 hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
906 TIPC_NL_PUBL_GET); 954 TIPC_NL_PUBL_GET);
907 if (!hdr) 955 if (!hdr) {
956 kfree_skb(args);
908 return -EMSGSIZE; 957 return -EMSGSIZE;
958 }
909 959
910 nest = nla_nest_start(args, TIPC_NLA_SOCK); 960 nest = nla_nest_start(args, TIPC_NLA_SOCK);
911 if (!nest) { 961 if (!nest) {
@@ -1206,7 +1256,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
1206 } 1256 }
1207 1257
1208 len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); 1258 len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1209 if (len && !TLV_OK(msg.req, len)) { 1259 if (!len || !TLV_OK(msg.req, len)) {
1210 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); 1260 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1211 err = -EOPNOTSUPP; 1261 err = -EOPNOTSUPP;
1212 goto send; 1262 goto send;
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index efb16f69bd2c..a457c0fbbef1 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -398,7 +398,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
398 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); 398 ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
399 if (ret == -EWOULDBLOCK) 399 if (ret == -EWOULDBLOCK)
400 return -EWOULDBLOCK; 400 return -EWOULDBLOCK;
401 if (ret > 0) { 401 if (ret == sizeof(s)) {
402 read_lock_bh(&sk->sk_callback_lock); 402 read_lock_bh(&sk->sk_callback_lock);
403 ret = tipc_conn_rcv_sub(srv, con, &s); 403 ret = tipc_conn_rcv_sub(srv, con, &s);
404 read_unlock_bh(&sk->sk_callback_lock); 404 read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index a264cf2accd0..d4de871e7d4d 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -41,13 +41,20 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
41 * not know if the device has more tx queues than rx, or the opposite. 41 * not know if the device has more tx queues than rx, or the opposite.
42 * This might also change during run time. 42 * This might also change during run time.
43 */ 43 */
44static void xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem, 44static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
45 u16 queue_id) 45 u16 queue_id)
46{ 46{
47 if (queue_id >= max_t(unsigned int,
48 dev->real_num_rx_queues,
49 dev->real_num_tx_queues))
50 return -EINVAL;
51
47 if (queue_id < dev->real_num_rx_queues) 52 if (queue_id < dev->real_num_rx_queues)
48 dev->_rx[queue_id].umem = umem; 53 dev->_rx[queue_id].umem = umem;
49 if (queue_id < dev->real_num_tx_queues) 54 if (queue_id < dev->real_num_tx_queues)
50 dev->_tx[queue_id].umem = umem; 55 dev->_tx[queue_id].umem = umem;
56
57 return 0;
51} 58}
52 59
53struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, 60struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
@@ -88,7 +95,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
88 goto out_rtnl_unlock; 95 goto out_rtnl_unlock;
89 } 96 }
90 97
91 xdp_reg_umem_at_qid(dev, umem, queue_id); 98 err = xdp_reg_umem_at_qid(dev, umem, queue_id);
99 if (err)
100 goto out_rtnl_unlock;
101
92 umem->dev = dev; 102 umem->dev = dev;
93 umem->queue_id = queue_id; 103 umem->queue_id = queue_id;
94 if (force_copy) 104 if (force_copy)
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 66ae15f27c70..db1a91dfa702 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -279,6 +279,7 @@ $(obj)/%.o: $(src)/%.c
279 -Wno-gnu-variable-sized-type-not-at-end \ 279 -Wno-gnu-variable-sized-type-not-at-end \
280 -Wno-address-of-packed-member -Wno-tautological-compare \ 280 -Wno-address-of-packed-member -Wno-tautological-compare \
281 -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \ 281 -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
282 -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
282 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@ 283 -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
283ifeq ($(DWARF2BTF),y) 284ifeq ($(DWARF2BTF),y)
284 $(BTF_PAHOLE) -J $@ 285 $(BTF_PAHOLE) -J $@
diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h
new file mode 100644
index 000000000000..5cd7c1d1a5d5
--- /dev/null
+++ b/samples/bpf/asm_goto_workaround.h
@@ -0,0 +1,16 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2019 Facebook */
3#ifndef __ASM_GOTO_WORKAROUND_H
4#define __ASM_GOTO_WORKAROUND_H
5
6/* this will bring in asm_volatile_goto macro definition
7 * if enabled by compiler and config options.
8 */
9#include <linux/types.h>
10
11#ifdef asm_volatile_goto
12#undef asm_volatile_goto
13#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
14#endif
15
16#endif
diff --git a/samples/bpf/test_cgrp2_attach2.c b/samples/bpf/test_cgrp2_attach2.c
index d7b68ef5ba79..0bb6507256b7 100644
--- a/samples/bpf/test_cgrp2_attach2.c
+++ b/samples/bpf/test_cgrp2_attach2.c
@@ -77,7 +77,7 @@ static int test_foo_bar(void)
77 77
78 /* Create cgroup /foo, get fd, and join it */ 78 /* Create cgroup /foo, get fd, and join it */
79 foo = create_and_get_cgroup(FOO); 79 foo = create_and_get_cgroup(FOO);
80 if (!foo) 80 if (foo < 0)
81 goto err; 81 goto err;
82 82
83 if (join_cgroup(FOO)) 83 if (join_cgroup(FOO))
@@ -94,7 +94,7 @@ static int test_foo_bar(void)
94 94
95 /* Create cgroup /foo/bar, get fd, and join it */ 95 /* Create cgroup /foo/bar, get fd, and join it */
96 bar = create_and_get_cgroup(BAR); 96 bar = create_and_get_cgroup(BAR);
97 if (!bar) 97 if (bar < 0)
98 goto err; 98 goto err;
99 99
100 if (join_cgroup(BAR)) 100 if (join_cgroup(BAR))
@@ -298,19 +298,19 @@ static int test_multiprog(void)
298 goto err; 298 goto err;
299 299
300 cg1 = create_and_get_cgroup("/cg1"); 300 cg1 = create_and_get_cgroup("/cg1");
301 if (!cg1) 301 if (cg1 < 0)
302 goto err; 302 goto err;
303 cg2 = create_and_get_cgroup("/cg1/cg2"); 303 cg2 = create_and_get_cgroup("/cg1/cg2");
304 if (!cg2) 304 if (cg2 < 0)
305 goto err; 305 goto err;
306 cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); 306 cg3 = create_and_get_cgroup("/cg1/cg2/cg3");
307 if (!cg3) 307 if (cg3 < 0)
308 goto err; 308 goto err;
309 cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); 309 cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4");
310 if (!cg4) 310 if (cg4 < 0)
311 goto err; 311 goto err;
312 cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); 312 cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5");
313 if (!cg5) 313 if (cg5 < 0)
314 goto err; 314 goto err;
315 315
316 if (join_cgroup("/cg1/cg2/cg3/cg4/cg5")) 316 if (join_cgroup("/cg1/cg2/cg3/cg4/cg5"))
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
index 2259f997a26c..f082d6ac59f0 100644
--- a/samples/bpf/test_current_task_under_cgroup_user.c
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -32,7 +32,7 @@ int main(int argc, char **argv)
32 32
33 cg2 = create_and_get_cgroup(CGROUP_PATH); 33 cg2 = create_and_get_cgroup(CGROUP_PATH);
34 34
35 if (!cg2) 35 if (cg2 < 0)
36 goto err; 36 goto err;
37 37
38 if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) { 38 if (bpf_map_update_elem(map_fd[0], &idx, &cg2, BPF_ANY)) {
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index 0a197f86ac43..8bfda95c77ad 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -103,7 +103,7 @@ int main(int argc, char **argv)
103 return 1; 103 return 1;
104 } 104 }
105 105
106 ifindex = if_nametoindex(argv[1]); 106 ifindex = if_nametoindex(argv[optind]);
107 if (!ifindex) { 107 if (!ifindex) {
108 perror("if_nametoindex"); 108 perror("if_nametoindex");
109 return 1; 109 return 1;
diff --git a/samples/seccomp/Makefile b/samples/seccomp/Makefile
index 4920903c8009..fb43a814d4c0 100644
--- a/samples/seccomp/Makefile
+++ b/samples/seccomp/Makefile
@@ -34,6 +34,7 @@ HOSTCFLAGS_bpf-direct.o += $(MFLAG)
34HOSTCFLAGS_dropper.o += $(MFLAG) 34HOSTCFLAGS_dropper.o += $(MFLAG)
35HOSTCFLAGS_bpf-helper.o += $(MFLAG) 35HOSTCFLAGS_bpf-helper.o += $(MFLAG)
36HOSTCFLAGS_bpf-fancy.o += $(MFLAG) 36HOSTCFLAGS_bpf-fancy.o += $(MFLAG)
37HOSTCFLAGS_user-trap.o += $(MFLAG)
37HOSTLDLIBS_bpf-direct += $(MFLAG) 38HOSTLDLIBS_bpf-direct += $(MFLAG)
38HOSTLDLIBS_bpf-fancy += $(MFLAG) 39HOSTLDLIBS_bpf-fancy += $(MFLAG)
39HOSTLDLIBS_dropper += $(MFLAG) 40HOSTLDLIBS_dropper += $(MFLAG)
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 525bff667a52..30816037036e 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -24,10 +24,6 @@ depfile = $(subst $(comma),_,$(dot-target).d)
24basetarget = $(basename $(notdir $@)) 24basetarget = $(basename $(notdir $@))
25 25
26### 26###
27# filename of first prerequisite with directory and extension stripped
28baseprereq = $(basename $(notdir $<))
29
30###
31# Escape single quote for use in echo statements 27# Escape single quote for use in echo statements
32escsq = $(subst $(squote),'\$(squote)',$1) 28escsq = $(subst $(squote),'\$(squote)',$1)
33 29
diff --git a/scripts/coccinelle/api/alloc/alloc_cast.cocci b/scripts/coccinelle/api/alloc/alloc_cast.cocci
index 408ee3879f9b..18fedf7c60ed 100644
--- a/scripts/coccinelle/api/alloc/alloc_cast.cocci
+++ b/scripts/coccinelle/api/alloc/alloc_cast.cocci
@@ -32,7 +32,7 @@ type T;
32 (T *) 32 (T *)
33 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 33 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
34 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 34 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
35 dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 35 dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
36 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 36 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
37 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 37 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
38 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) 38 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -55,7 +55,7 @@ type r1.T;
55* (T *) 55* (T *)
56 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 56 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
57 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 57 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
58 dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 58 dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
59 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 59 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
60 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 60 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
61 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) 61 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -78,7 +78,7 @@ type r1.T;
78- (T *) 78- (T *)
79 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 79 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
80 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 80 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
81 dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 81 dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
82 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 82 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
83 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 83 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
84 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) 84 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
@@ -95,7 +95,7 @@ position p;
95 (T@p *) 95 (T@p *)
96 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\| 96 \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|
97 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\| 97 kmem_cache_alloc_node\|kmalloc_node\|kzalloc_node\|vmalloc\|vzalloc\|
98 dma_alloc_coherent\|dma_zalloc_coherent\|devm_kmalloc\|devm_kzalloc\| 98 dma_alloc_coherent\|devm_kmalloc\|devm_kzalloc\|
99 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\| 99 kvmalloc\|kvzalloc\|kvmalloc_node\|kvzalloc_node\|pci_alloc_consistent\|
100 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\| 100 pci_zalloc_consistent\|kmem_alloc\|kmem_zalloc\|kmem_zone_alloc\|
101 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...) 101 kmem_zone_zalloc\|vmalloc_node\|vzalloc_node\)(...)
diff --git a/scripts/coccinelle/api/alloc/zalloc-simple.cocci b/scripts/coccinelle/api/alloc/zalloc-simple.cocci
index d819275b7fde..5cd1991c582e 100644
--- a/scripts/coccinelle/api/alloc/zalloc-simple.cocci
+++ b/scripts/coccinelle/api/alloc/zalloc-simple.cocci
@@ -69,15 +69,6 @@ statement S;
69- x = (T)vmalloc(E1); 69- x = (T)vmalloc(E1);
70+ x = (T)vzalloc(E1); 70+ x = (T)vzalloc(E1);
71| 71|
72- x = dma_alloc_coherent(E2,E1,E3,E4);
73+ x = dma_zalloc_coherent(E2,E1,E3,E4);
74|
75- x = (T *)dma_alloc_coherent(E2,E1,E3,E4);
76+ x = dma_zalloc_coherent(E2,E1,E3,E4);
77|
78- x = (T)dma_alloc_coherent(E2,E1,E3,E4);
79+ x = (T)dma_zalloc_coherent(E2,E1,E3,E4);
80|
81- x = kmalloc_node(E1,E2,E3); 72- x = kmalloc_node(E1,E2,E3);
82+ x = kzalloc_node(E1,E2,E3); 73+ x = kzalloc_node(E1,E2,E3);
83| 74|
@@ -225,7 +216,7 @@ p << r2.p;
225x << r2.x; 216x << r2.x;
226@@ 217@@
227 218
228msg="WARNING: dma_zalloc_coherent should be used for %s, instead of dma_alloc_coherent/memset" % (x) 219msg="WARNING: dma_alloc_coherent use in %s already zeroes out memory, so memset is not needed" % (x)
229coccilib.report.print_report(p[0], msg) 220coccilib.report.print_report(p[0], msg)
230 221
231//----------------------------------------------------------------- 222//-----------------------------------------------------------------
diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
index de70b8470971..89c47f57d1ce 100644
--- a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
+++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
@@ -13,7 +13,7 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
13 for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { 13 for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
14 const char *sym; 14 const char *sym;
15 rtx body; 15 rtx body;
16 rtx masked_sp; 16 rtx mask, masked_sp;
17 17
18 /* 18 /*
19 * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard 19 * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
@@ -33,12 +33,13 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
33 * produces the address of the copy of the stack canary value 33 * produces the address of the copy of the stack canary value
34 * stored in struct thread_info 34 * stored in struct thread_info
35 */ 35 */
36 mask = GEN_INT(sext_hwi(sp_mask, GET_MODE_PRECISION(Pmode)));
36 masked_sp = gen_reg_rtx(Pmode); 37 masked_sp = gen_reg_rtx(Pmode);
37 38
38 emit_insn_before(gen_rtx_SET(masked_sp, 39 emit_insn_before(gen_rtx_SET(masked_sp,
39 gen_rtx_AND(Pmode, 40 gen_rtx_AND(Pmode,
40 stack_pointer_rtx, 41 stack_pointer_rtx,
41 GEN_INT(sp_mask))), 42 mask)),
42 insn); 43 insn);
43 44
44 SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp, 45 SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
@@ -52,6 +53,19 @@ static unsigned int arm_pertask_ssp_rtl_execute(void)
52#define NO_GATE 53#define NO_GATE
53#include "gcc-generate-rtl-pass.h" 54#include "gcc-generate-rtl-pass.h"
54 55
56#if BUILDING_GCC_VERSION >= 9000
57static bool no(void)
58{
59 return false;
60}
61
62static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data)
63{
64 targetm.have_stack_protect_combined_set = no;
65 targetm.have_stack_protect_combined_test = no;
66}
67#endif
68
55__visible int plugin_init(struct plugin_name_args *plugin_info, 69__visible int plugin_init(struct plugin_name_args *plugin_info,
56 struct plugin_gcc_version *version) 70 struct plugin_gcc_version *version)
57{ 71{
@@ -99,5 +113,10 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
99 register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, 113 register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP,
100 NULL, &arm_pertask_ssp_rtl_pass_info); 114 NULL, &arm_pertask_ssp_rtl_pass_info);
101 115
116#if BUILDING_GCC_VERSION >= 9000
117 register_callback(plugin_info->base_name, PLUGIN_START_UNIT,
118 arm_pertask_ssp_start_unit, NULL);
119#endif
120
102 return 0; 121 return 0;
103} 122}
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index c05ab001b54c..181973509a05 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -206,4 +206,4 @@ filechk_conf_cfg = $(CONFIG_SHELL) $<
206$(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE 206$(obj)/%conf-cfg: $(src)/%conf-cfg.sh FORCE
207 $(call filechk,conf_cfg) 207 $(call filechk,conf_cfg)
208 208
209clean-files += conf-cfg 209clean-files += *conf-cfg
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0de2fb236640..26bf886bd168 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -2185,7 +2185,7 @@ static void add_intree_flag(struct buffer *b, int is_intree)
2185/* Cannot check for assembler */ 2185/* Cannot check for assembler */
2186static void add_retpoline(struct buffer *b) 2186static void add_retpoline(struct buffer *b)
2187{ 2187{
2188 buf_printf(b, "\n#ifdef RETPOLINE\n"); 2188 buf_printf(b, "\n#ifdef CONFIG_RETPOLINE\n");
2189 buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n"); 2189 buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n");
2190 buf_printf(b, "#endif\n"); 2190 buf_printf(b, "#endif\n");
2191} 2191}
diff --git a/security/security.c b/security/security.c
index f1b8d2587639..55bc49027ba9 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1027,6 +1027,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
1027 1027
1028void security_cred_free(struct cred *cred) 1028void security_cred_free(struct cred *cred)
1029{ 1029{
1030 /*
1031 * There is a failure case in prepare_creds() that
1032 * may result in a call here with ->security being NULL.
1033 */
1034 if (unlikely(cred->security == NULL))
1035 return;
1036
1030 call_void_hook(cred_free, cred); 1037 call_void_hook(cred_free, cred);
1031} 1038}
1032 1039
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index a50d625e7946..c1c31e33657a 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
732 kfree(key); 732 kfree(key);
733 if (datum) { 733 if (datum) {
734 levdatum = datum; 734 levdatum = datum;
735 ebitmap_destroy(&levdatum->level->cat); 735 if (levdatum->level)
736 ebitmap_destroy(&levdatum->level->cat);
736 kfree(levdatum->level); 737 kfree(levdatum->level);
737 } 738 }
738 kfree(datum); 739 kfree(datum);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index ffda91a4a1aa..02514fe558b4 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
368 break; 368 break;
369 case YAMA_SCOPE_RELATIONAL: 369 case YAMA_SCOPE_RELATIONAL:
370 rcu_read_lock(); 370 rcu_read_lock();
371 if (!task_is_descendant(current, child) && 371 if (!pid_alive(child))
372 rc = -EPERM;
373 if (!rc && !task_is_descendant(current, child) &&
372 !ptracer_exception_found(current, child) && 374 !ptracer_exception_found(current, child) &&
373 !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) 375 !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
374 rc = -EPERM; 376 rc = -EPERM;
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index c3f57a3fb1a5..40ebde2e1ab1 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -47,8 +47,8 @@ static int alloc_dbdma_descriptor_ring(struct i2sbus_dev *i2sdev,
47 /* We use the PCI APIs for now until the generic one gets fixed 47 /* We use the PCI APIs for now until the generic one gets fixed
48 * enough or until we get some macio-specific versions 48 * enough or until we get some macio-specific versions
49 */ 49 */
50 r->space = dma_zalloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev, 50 r->space = dma_alloc_coherent(&macio_get_pci_dev(i2sdev->macio)->dev,
51 r->size, &r->bus_addr, GFP_KERNEL); 51 r->size, &r->bus_addr, GFP_KERNEL);
52 if (!r->space) 52 if (!r->space)
53 return -ENOMEM; 53 return -ENOMEM;
54 54
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 598d140bb7cb..5fc497c6d738 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -903,6 +903,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
903 struct dsp_spos_instance * ins = chip->dsp_spos_instance; 903 struct dsp_spos_instance * ins = chip->dsp_spos_instance;
904 int i; 904 int i;
905 905
906 if (!ins)
907 return 0;
908
906 snd_info_free_entry(ins->proc_sym_info_entry); 909 snd_info_free_entry(ins->proc_sym_info_entry);
907 ins->proc_sym_info_entry = NULL; 910 ins->proc_sym_info_entry = NULL;
908 911
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index aee4cbd29d53..0b3e7a18ca78 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4102,6 +4102,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
4102 case 0x10ec0295: 4102 case 0x10ec0295:
4103 case 0x10ec0289: 4103 case 0x10ec0289:
4104 case 0x10ec0299: 4104 case 0x10ec0299:
4105 alc_process_coef_fw(codec, alc225_pre_hsmode);
4105 alc_process_coef_fw(codec, coef0225); 4106 alc_process_coef_fw(codec, coef0225);
4106 break; 4107 break;
4107 case 0x10ec0867: 4108 case 0x10ec0867:
@@ -5440,6 +5441,13 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
5440 } 5441 }
5441} 5442}
5442 5443
5444static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
5445 const struct hda_fixup *fix, int action)
5446{
5447 if (action == HDA_FIXUP_ACT_PRE_PROBE)
5448 snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
5449}
5450
5443/* for hda_fixup_thinkpad_acpi() */ 5451/* for hda_fixup_thinkpad_acpi() */
5444#include "thinkpad_helper.c" 5452#include "thinkpad_helper.c"
5445 5453
@@ -5549,6 +5557,7 @@ enum {
5549 ALC293_FIXUP_LENOVO_SPK_NOISE, 5557 ALC293_FIXUP_LENOVO_SPK_NOISE,
5550 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, 5558 ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
5551 ALC255_FIXUP_DELL_SPK_NOISE, 5559 ALC255_FIXUP_DELL_SPK_NOISE,
5560 ALC225_FIXUP_DISABLE_MIC_VREF,
5552 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, 5561 ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5553 ALC295_FIXUP_DISABLE_DAC3, 5562 ALC295_FIXUP_DISABLE_DAC3,
5554 ALC280_FIXUP_HP_HEADSET_MIC, 5563 ALC280_FIXUP_HP_HEADSET_MIC,
@@ -6268,6 +6277,12 @@ static const struct hda_fixup alc269_fixups[] = {
6268 .chained = true, 6277 .chained = true,
6269 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE 6278 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
6270 }, 6279 },
6280 [ALC225_FIXUP_DISABLE_MIC_VREF] = {
6281 .type = HDA_FIXUP_FUNC,
6282 .v.func = alc_fixup_disable_mic_vref,
6283 .chained = true,
6284 .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
6285 },
6271 [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = { 6286 [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
6272 .type = HDA_FIXUP_VERBS, 6287 .type = HDA_FIXUP_VERBS,
6273 .v.verbs = (const struct hda_verb[]) { 6288 .v.verbs = (const struct hda_verb[]) {
@@ -6277,7 +6292,7 @@ static const struct hda_fixup alc269_fixups[] = {
6277 {} 6292 {}
6278 }, 6293 },
6279 .chained = true, 6294 .chained = true,
6280 .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE 6295 .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
6281 }, 6296 },
6282 [ALC280_FIXUP_HP_HEADSET_MIC] = { 6297 [ALC280_FIXUP_HP_HEADSET_MIC] = {
6283 .type = HDA_FIXUP_FUNC, 6298 .type = HDA_FIXUP_FUNC,
@@ -6584,6 +6599,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6584 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), 6599 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
6585 SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC), 6600 SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
6586 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB), 6601 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
6602 SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
6587 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6603 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6588 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 6604 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
6589 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 6605 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c
index 7609eceba1a2..9e71d7cda999 100644
--- a/sound/sparc/dbri.c
+++ b/sound/sparc/dbri.c
@@ -2541,8 +2541,8 @@ static int snd_dbri_create(struct snd_card *card,
2541 dbri->op = op; 2541 dbri->op = op;
2542 dbri->irq = irq; 2542 dbri->irq = irq;
2543 2543
2544 dbri->dma = dma_zalloc_coherent(&op->dev, sizeof(struct dbri_dma), 2544 dbri->dma = dma_alloc_coherent(&op->dev, sizeof(struct dbri_dma),
2545 &dbri->dma_dvma, GFP_KERNEL); 2545 &dbri->dma_dvma, GFP_KERNEL);
2546 if (!dbri->dma) 2546 if (!dbri->dma)
2547 return -ENOMEM; 2547 return -ENOMEM;
2548 2548
diff --git a/sound/usb/card.c b/sound/usb/card.c
index a105947eaf55..746a72e23cf9 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -246,7 +246,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
246 h1 = snd_usb_find_csint_desc(host_iface->extra, 246 h1 = snd_usb_find_csint_desc(host_iface->extra,
247 host_iface->extralen, 247 host_iface->extralen,
248 NULL, UAC_HEADER); 248 NULL, UAC_HEADER);
249 if (!h1) { 249 if (!h1 || h1->bLength < sizeof(*h1)) {
250 dev_err(&dev->dev, "cannot find UAC_HEADER\n"); 250 dev_err(&dev->dev, "cannot find UAC_HEADER\n");
251 return -EINVAL; 251 return -EINVAL;
252 } 252 }
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index c63c84b54969..e7d441d0e839 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -753,8 +753,9 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
753 struct uac_mixer_unit_descriptor *desc) 753 struct uac_mixer_unit_descriptor *desc)
754{ 754{
755 int mu_channels; 755 int mu_channels;
756 void *c;
756 757
757 if (desc->bLength < 11) 758 if (desc->bLength < sizeof(*desc))
758 return -EINVAL; 759 return -EINVAL;
759 if (!desc->bNrInPins) 760 if (!desc->bNrInPins)
760 return -EINVAL; 761 return -EINVAL;
@@ -763,6 +764,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
763 case UAC_VERSION_1: 764 case UAC_VERSION_1:
764 case UAC_VERSION_2: 765 case UAC_VERSION_2:
765 default: 766 default:
767 if (desc->bLength < sizeof(*desc) + desc->bNrInPins + 1)
768 return 0; /* no bmControls -> skip */
766 mu_channels = uac_mixer_unit_bNrChannels(desc); 769 mu_channels = uac_mixer_unit_bNrChannels(desc);
767 break; 770 break;
768 case UAC_VERSION_3: 771 case UAC_VERSION_3:
@@ -772,7 +775,11 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
772 } 775 }
773 776
774 if (!mu_channels) 777 if (!mu_channels)
775 return -EINVAL; 778 return 0;
779
780 c = uac_mixer_unit_bmControls(desc, state->mixer->protocol);
781 if (c - (void *)desc + (mu_channels - 1) / 8 >= desc->bLength)
782 return 0; /* no bmControls -> skip */
776 783
777 return mu_channels; 784 return mu_channels;
778} 785}
@@ -944,7 +951,7 @@ static int check_input_term(struct mixer_build *state, int id,
944 struct uac_mixer_unit_descriptor *d = p1; 951 struct uac_mixer_unit_descriptor *d = p1;
945 952
946 err = uac_mixer_unit_get_channels(state, d); 953 err = uac_mixer_unit_get_channels(state, d);
947 if (err < 0) 954 if (err <= 0)
948 return err; 955 return err;
949 956
950 term->channels = err; 957 term->channels = err;
@@ -2068,11 +2075,15 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
2068 2075
2069 if (state->mixer->protocol == UAC_VERSION_2) { 2076 if (state->mixer->protocol == UAC_VERSION_2) {
2070 struct uac2_input_terminal_descriptor *d_v2 = raw_desc; 2077 struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
2078 if (d_v2->bLength < sizeof(*d_v2))
2079 return -EINVAL;
2071 control = UAC2_TE_CONNECTOR; 2080 control = UAC2_TE_CONNECTOR;
2072 term_id = d_v2->bTerminalID; 2081 term_id = d_v2->bTerminalID;
2073 bmctls = le16_to_cpu(d_v2->bmControls); 2082 bmctls = le16_to_cpu(d_v2->bmControls);
2074 } else if (state->mixer->protocol == UAC_VERSION_3) { 2083 } else if (state->mixer->protocol == UAC_VERSION_3) {
2075 struct uac3_input_terminal_descriptor *d_v3 = raw_desc; 2084 struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
2085 if (d_v3->bLength < sizeof(*d_v3))
2086 return -EINVAL;
2076 control = UAC3_TE_INSERTION; 2087 control = UAC3_TE_INSERTION;
2077 term_id = d_v3->bTerminalID; 2088 term_id = d_v3->bTerminalID;
2078 bmctls = le32_to_cpu(d_v3->bmControls); 2089 bmctls = le32_to_cpu(d_v3->bmControls);
@@ -2118,7 +2129,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2118 if (err < 0) 2129 if (err < 0)
2119 continue; 2130 continue;
2120 /* no bmControls field (e.g. Maya44) -> ignore */ 2131 /* no bmControls field (e.g. Maya44) -> ignore */
2121 if (desc->bLength <= 10 + input_pins) 2132 if (!num_outs)
2122 continue; 2133 continue;
2123 err = check_input_term(state, desc->baSourceID[pin], &iterm); 2134 err = check_input_term(state, desc->baSourceID[pin], &iterm);
2124 if (err < 0) 2135 if (err < 0)
@@ -2314,7 +2325,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
2314 char *name) 2325 char *name)
2315{ 2326{
2316 struct uac_processing_unit_descriptor *desc = raw_desc; 2327 struct uac_processing_unit_descriptor *desc = raw_desc;
2317 int num_ins = desc->bNrInPins; 2328 int num_ins;
2318 struct usb_mixer_elem_info *cval; 2329 struct usb_mixer_elem_info *cval;
2319 struct snd_kcontrol *kctl; 2330 struct snd_kcontrol *kctl;
2320 int i, err, nameid, type, len; 2331 int i, err, nameid, type, len;
@@ -2329,7 +2340,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
2329 0, NULL, default_value_info 2340 0, NULL, default_value_info
2330 }; 2341 };
2331 2342
2332 if (desc->bLength < 13 || desc->bLength < 13 + num_ins || 2343 if (desc->bLength < 13) {
2344 usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
2345 return -EINVAL;
2346 }
2347
2348 num_ins = desc->bNrInPins;
2349 if (desc->bLength < 13 + num_ins ||
2333 desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) { 2350 desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
2334 usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid); 2351 usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
2335 return -EINVAL; 2352 return -EINVAL;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 37fc0447c071..b345beb447bd 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3326,6 +3326,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
3326 } 3326 }
3327 } 3327 }
3328 }, 3328 },
3329 {
3330 .ifnum = -1
3331 },
3329 } 3332 }
3330 } 3333 }
3331}, 3334},
@@ -3369,6 +3372,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
3369 } 3372 }
3370 } 3373 }
3371 }, 3374 },
3375 {
3376 .ifnum = -1
3377 },
3372 } 3378 }
3373 } 3379 }
3374}, 3380},
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 96340f23f86d..ebbadb3a7094 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -768,7 +768,7 @@ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
768 * REG1: PLL binary search enable, soft mute enable. 768 * REG1: PLL binary search enable, soft mute enable.
769 */ 769 */
770 CM6206_REG1_PLLBIN_EN | 770 CM6206_REG1_PLLBIN_EN |
771 CM6206_REG1_SOFT_MUTE_EN | 771 CM6206_REG1_SOFT_MUTE_EN,
772 /* 772 /*
773 * REG2: enable output drivers, 773 * REG2: enable output drivers,
774 * select front channels to the headphone output, 774 * select front channels to the headphone output,
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 67cf849aa16b..d9e3de495c16 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -596,12 +596,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
596 csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT); 596 csep = snd_usb_find_desc(alts->extra, alts->extralen, NULL, USB_DT_CS_ENDPOINT);
597 597
598 if (!csep || csep->bLength < 7 || 598 if (!csep || csep->bLength < 7 ||
599 csep->bDescriptorSubtype != UAC_EP_GENERAL) { 599 csep->bDescriptorSubtype != UAC_EP_GENERAL)
600 usb_audio_warn(chip, 600 goto error;
601 "%u:%d : no or invalid class specific endpoint descriptor\n",
602 iface_no, altsd->bAlternateSetting);
603 return 0;
604 }
605 601
606 if (protocol == UAC_VERSION_1) { 602 if (protocol == UAC_VERSION_1) {
607 attributes = csep->bmAttributes; 603 attributes = csep->bmAttributes;
@@ -609,6 +605,8 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
609 struct uac2_iso_endpoint_descriptor *csep2 = 605 struct uac2_iso_endpoint_descriptor *csep2 =
610 (struct uac2_iso_endpoint_descriptor *) csep; 606 (struct uac2_iso_endpoint_descriptor *) csep;
611 607
608 if (csep2->bLength < sizeof(*csep2))
609 goto error;
612 attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX; 610 attributes = csep->bmAttributes & UAC_EP_CS_ATTR_FILL_MAX;
613 611
614 /* emulate the endpoint attributes of a v1 device */ 612 /* emulate the endpoint attributes of a v1 device */
@@ -618,12 +616,20 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
618 struct uac3_iso_endpoint_descriptor *csep3 = 616 struct uac3_iso_endpoint_descriptor *csep3 =
619 (struct uac3_iso_endpoint_descriptor *) csep; 617 (struct uac3_iso_endpoint_descriptor *) csep;
620 618
619 if (csep3->bLength < sizeof(*csep3))
620 goto error;
621 /* emulate the endpoint attributes of a v1 device */ 621 /* emulate the endpoint attributes of a v1 device */
622 if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH) 622 if (le32_to_cpu(csep3->bmControls) & UAC2_CONTROL_PITCH)
623 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL; 623 attributes |= UAC_EP_CS_ATTR_PITCH_CONTROL;
624 } 624 }
625 625
626 return attributes; 626 return attributes;
627
628 error:
629 usb_audio_warn(chip,
630 "%u:%d : no or invalid class specific endpoint descriptor\n",
631 iface_no, altsd->bAlternateSetting);
632 return 0;
627} 633}
628 634
629/* find an input terminal descriptor (either UAC1 or UAC2) with the given 635/* find an input terminal descriptor (either UAC1 or UAC2) with the given
@@ -631,13 +637,17 @@ static int parse_uac_endpoint_attributes(struct snd_usb_audio *chip,
631 */ 637 */
632static void * 638static void *
633snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface, 639snd_usb_find_input_terminal_descriptor(struct usb_host_interface *ctrl_iface,
634 int terminal_id) 640 int terminal_id, bool uac23)
635{ 641{
636 struct uac2_input_terminal_descriptor *term = NULL; 642 struct uac2_input_terminal_descriptor *term = NULL;
643 size_t minlen = uac23 ? sizeof(struct uac2_input_terminal_descriptor) :
644 sizeof(struct uac_input_terminal_descriptor);
637 645
638 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, 646 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
639 ctrl_iface->extralen, 647 ctrl_iface->extralen,
640 term, UAC_INPUT_TERMINAL))) { 648 term, UAC_INPUT_TERMINAL))) {
649 if (term->bLength < minlen)
650 continue;
641 if (term->bTerminalID == terminal_id) 651 if (term->bTerminalID == terminal_id)
642 return term; 652 return term;
643 } 653 }
@@ -655,7 +665,8 @@ snd_usb_find_output_terminal_descriptor(struct usb_host_interface *ctrl_iface,
655 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra, 665 while ((term = snd_usb_find_csint_desc(ctrl_iface->extra,
656 ctrl_iface->extralen, 666 ctrl_iface->extralen,
657 term, UAC_OUTPUT_TERMINAL))) { 667 term, UAC_OUTPUT_TERMINAL))) {
658 if (term->bTerminalID == terminal_id) 668 if (term->bLength >= sizeof(*term) &&
669 term->bTerminalID == terminal_id)
659 return term; 670 return term;
660 } 671 }
661 672
@@ -729,7 +740,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
729 format = le16_to_cpu(as->wFormatTag); /* remember the format value */ 740 format = le16_to_cpu(as->wFormatTag); /* remember the format value */
730 741
731 iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 742 iterm = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
732 as->bTerminalLink); 743 as->bTerminalLink,
744 false);
733 if (iterm) { 745 if (iterm) {
734 num_channels = iterm->bNrChannels; 746 num_channels = iterm->bNrChannels;
735 chconfig = le16_to_cpu(iterm->wChannelConfig); 747 chconfig = le16_to_cpu(iterm->wChannelConfig);
@@ -764,7 +776,8 @@ snd_usb_get_audioformat_uac12(struct snd_usb_audio *chip,
764 * to extract the clock 776 * to extract the clock
765 */ 777 */
766 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 778 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
767 as->bTerminalLink); 779 as->bTerminalLink,
780 true);
768 if (input_term) { 781 if (input_term) {
769 clock = input_term->bCSourceID; 782 clock = input_term->bCSourceID;
770 if (!chconfig && (num_channels == input_term->bNrChannels)) 783 if (!chconfig && (num_channels == input_term->bNrChannels))
@@ -998,7 +1011,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
998 * to extract the clock 1011 * to extract the clock
999 */ 1012 */
1000 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf, 1013 input_term = snd_usb_find_input_terminal_descriptor(chip->ctrl_intf,
1001 as->bTerminalLink); 1014 as->bTerminalLink,
1015 true);
1002 if (input_term) { 1016 if (input_term) {
1003 clock = input_term->bCSourceID; 1017 clock = input_term->bCSourceID;
1004 goto found_clock; 1018 goto found_clock;
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
index ff91192407d1..f599064dd8dc 100644
--- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -47,6 +47,7 @@ enum perf_event_powerpc_regs {
47 PERF_REG_POWERPC_DAR, 47 PERF_REG_POWERPC_DAR,
48 PERF_REG_POWERPC_DSISR, 48 PERF_REG_POWERPC_DSISR,
49 PERF_REG_POWERPC_SIER, 49 PERF_REG_POWERPC_SIER,
50 PERF_REG_POWERPC_MMCRA,
50 PERF_REG_POWERPC_MAX, 51 PERF_REG_POWERPC_MAX,
51}; 52};
52#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */ 53#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h
deleted file mode 100644
index 985534d0b448..000000000000
--- a/tools/arch/powerpc/include/uapi/asm/unistd.h
+++ /dev/null
@@ -1,404 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
2/*
3 * This file contains the system call numbers.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10#ifndef _UAPI_ASM_POWERPC_UNISTD_H_
11#define _UAPI_ASM_POWERPC_UNISTD_H_
12
13
14#define __NR_restart_syscall 0
15#define __NR_exit 1
16#define __NR_fork 2
17#define __NR_read 3
18#define __NR_write 4
19#define __NR_open 5
20#define __NR_close 6
21#define __NR_waitpid 7
22#define __NR_creat 8
23#define __NR_link 9
24#define __NR_unlink 10
25#define __NR_execve 11
26#define __NR_chdir 12
27#define __NR_time 13
28#define __NR_mknod 14
29#define __NR_chmod 15
30#define __NR_lchown 16
31#define __NR_break 17
32#define __NR_oldstat 18
33#define __NR_lseek 19
34#define __NR_getpid 20
35#define __NR_mount 21
36#define __NR_umount 22
37#define __NR_setuid 23
38#define __NR_getuid 24
39#define __NR_stime 25
40#define __NR_ptrace 26
41#define __NR_alarm 27
42#define __NR_oldfstat 28
43#define __NR_pause 29
44#define __NR_utime 30
45#define __NR_stty 31
46#define __NR_gtty 32
47#define __NR_access 33
48#define __NR_nice 34
49#define __NR_ftime 35
50#define __NR_sync 36
51#define __NR_kill 37
52#define __NR_rename 38
53#define __NR_mkdir 39
54#define __NR_rmdir 40
55#define __NR_dup 41
56#define __NR_pipe 42
57#define __NR_times 43
58#define __NR_prof 44
59#define __NR_brk 45
60#define __NR_setgid 46
61#define __NR_getgid 47
62#define __NR_signal 48
63#define __NR_geteuid 49
64#define __NR_getegid 50
65#define __NR_acct 51
66#define __NR_umount2 52
67#define __NR_lock 53
68#define __NR_ioctl 54
69#define __NR_fcntl 55
70#define __NR_mpx 56
71#define __NR_setpgid 57
72#define __NR_ulimit 58
73#define __NR_oldolduname 59
74#define __NR_umask 60
75#define __NR_chroot 61
76#define __NR_ustat 62
77#define __NR_dup2 63
78#define __NR_getppid 64
79#define __NR_getpgrp 65
80#define __NR_setsid 66
81#define __NR_sigaction 67
82#define __NR_sgetmask 68
83#define __NR_ssetmask 69
84#define __NR_setreuid 70
85#define __NR_setregid 71
86#define __NR_sigsuspend 72
87#define __NR_sigpending 73
88#define __NR_sethostname 74
89#define __NR_setrlimit 75
90#define __NR_getrlimit 76
91#define __NR_getrusage 77
92#define __NR_gettimeofday 78
93#define __NR_settimeofday 79
94#define __NR_getgroups 80
95#define __NR_setgroups 81
96#define __NR_select 82
97#define __NR_symlink 83
98#define __NR_oldlstat 84
99#define __NR_readlink 85
100#define __NR_uselib 86
101#define __NR_swapon 87
102#define __NR_reboot 88
103#define __NR_readdir 89
104#define __NR_mmap 90
105#define __NR_munmap 91
106#define __NR_truncate 92
107#define __NR_ftruncate 93
108#define __NR_fchmod 94
109#define __NR_fchown 95
110#define __NR_getpriority 96
111#define __NR_setpriority 97
112#define __NR_profil 98
113#define __NR_statfs 99
114#define __NR_fstatfs 100
115#define __NR_ioperm 101
116#define __NR_socketcall 102
117#define __NR_syslog 103
118#define __NR_setitimer 104
119#define __NR_getitimer 105
120#define __NR_stat 106
121#define __NR_lstat 107
122#define __NR_fstat 108
123#define __NR_olduname 109
124#define __NR_iopl 110
125#define __NR_vhangup 111
126#define __NR_idle 112
127#define __NR_vm86 113
128#define __NR_wait4 114
129#define __NR_swapoff 115
130#define __NR_sysinfo 116
131#define __NR_ipc 117
132#define __NR_fsync 118
133#define __NR_sigreturn 119
134#define __NR_clone 120
135#define __NR_setdomainname 121
136#define __NR_uname 122
137#define __NR_modify_ldt 123
138#define __NR_adjtimex 124
139#define __NR_mprotect 125
140#define __NR_sigprocmask 126
141#define __NR_create_module 127
142#define __NR_init_module 128
143#define __NR_delete_module 129
144#define __NR_get_kernel_syms 130
145#define __NR_quotactl 131
146#define __NR_getpgid 132
147#define __NR_fchdir 133
148#define __NR_bdflush 134
149#define __NR_sysfs 135
150#define __NR_personality 136
151#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
152#define __NR_setfsuid 138
153#define __NR_setfsgid 139
154#define __NR__llseek 140
155#define __NR_getdents 141
156#define __NR__newselect 142
157#define __NR_flock 143
158#define __NR_msync 144
159#define __NR_readv 145
160#define __NR_writev 146
161#define __NR_getsid 147
162#define __NR_fdatasync 148
163#define __NR__sysctl 149
164#define __NR_mlock 150
165#define __NR_munlock 151
166#define __NR_mlockall 152
167#define __NR_munlockall 153
168#define __NR_sched_setparam 154
169#define __NR_sched_getparam 155
170#define __NR_sched_setscheduler 156
171#define __NR_sched_getscheduler 157
172#define __NR_sched_yield 158
173#define __NR_sched_get_priority_max 159
174#define __NR_sched_get_priority_min 160
175#define __NR_sched_rr_get_interval 161
176#define __NR_nanosleep 162
177#define __NR_mremap 163
178#define __NR_setresuid 164
179#define __NR_getresuid 165
180#define __NR_query_module 166
181#define __NR_poll 167
182#define __NR_nfsservctl 168
183#define __NR_setresgid 169
184#define __NR_getresgid 170
185#define __NR_prctl 171
186#define __NR_rt_sigreturn 172
187#define __NR_rt_sigaction 173
188#define __NR_rt_sigprocmask 174
189#define __NR_rt_sigpending 175
190#define __NR_rt_sigtimedwait 176
191#define __NR_rt_sigqueueinfo 177
192#define __NR_rt_sigsuspend 178
193#define __NR_pread64 179
194#define __NR_pwrite64 180
195#define __NR_chown 181
196#define __NR_getcwd 182
197#define __NR_capget 183
198#define __NR_capset 184
199#define __NR_sigaltstack 185
200#define __NR_sendfile 186
201#define __NR_getpmsg 187 /* some people actually want streams */
202#define __NR_putpmsg 188 /* some people actually want streams */
203#define __NR_vfork 189
204#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
205#define __NR_readahead 191
206#ifndef __powerpc64__ /* these are 32-bit only */
207#define __NR_mmap2 192
208#define __NR_truncate64 193
209#define __NR_ftruncate64 194
210#define __NR_stat64 195
211#define __NR_lstat64 196
212#define __NR_fstat64 197
213#endif
214#define __NR_pciconfig_read 198
215#define __NR_pciconfig_write 199
216#define __NR_pciconfig_iobase 200
217#define __NR_multiplexer 201
218#define __NR_getdents64 202
219#define __NR_pivot_root 203
220#ifndef __powerpc64__
221#define __NR_fcntl64 204
222#endif
223#define __NR_madvise 205
224#define __NR_mincore 206
225#define __NR_gettid 207
226#define __NR_tkill 208
227#define __NR_setxattr 209
228#define __NR_lsetxattr 210
229#define __NR_fsetxattr 211
230#define __NR_getxattr 212
231#define __NR_lgetxattr 213
232#define __NR_fgetxattr 214
233#define __NR_listxattr 215
234#define __NR_llistxattr 216
235#define __NR_flistxattr 217
236#define __NR_removexattr 218
237#define __NR_lremovexattr 219
238#define __NR_fremovexattr 220
239#define __NR_futex 221
240#define __NR_sched_setaffinity 222
241#define __NR_sched_getaffinity 223
242/* 224 currently unused */
243#define __NR_tuxcall 225
244#ifndef __powerpc64__
245#define __NR_sendfile64 226
246#endif
247#define __NR_io_setup 227
248#define __NR_io_destroy 228
249#define __NR_io_getevents 229
250#define __NR_io_submit 230
251#define __NR_io_cancel 231
252#define __NR_set_tid_address 232
253#define __NR_fadvise64 233
254#define __NR_exit_group 234
255#define __NR_lookup_dcookie 235
256#define __NR_epoll_create 236
257#define __NR_epoll_ctl 237
258#define __NR_epoll_wait 238
259#define __NR_remap_file_pages 239
260#define __NR_timer_create 240
261#define __NR_timer_settime 241
262#define __NR_timer_gettime 242
263#define __NR_timer_getoverrun 243
264#define __NR_timer_delete 244
265#define __NR_clock_settime 245
266#define __NR_clock_gettime 246
267#define __NR_clock_getres 247
268#define __NR_clock_nanosleep 248
269#define __NR_swapcontext 249
270#define __NR_tgkill 250
271#define __NR_utimes 251
272#define __NR_statfs64 252
273#define __NR_fstatfs64 253
274#ifndef __powerpc64__
275#define __NR_fadvise64_64 254
276#endif
277#define __NR_rtas 255
278#define __NR_sys_debug_setcontext 256
279/* Number 257 is reserved for vserver */
280#define __NR_migrate_pages 258
281#define __NR_mbind 259
282#define __NR_get_mempolicy 260
283#define __NR_set_mempolicy 261
284#define __NR_mq_open 262
285#define __NR_mq_unlink 263
286#define __NR_mq_timedsend 264
287#define __NR_mq_timedreceive 265
288#define __NR_mq_notify 266
289#define __NR_mq_getsetattr 267
290#define __NR_kexec_load 268
291#define __NR_add_key 269
292#define __NR_request_key 270
293#define __NR_keyctl 271
294#define __NR_waitid 272
295#define __NR_ioprio_set 273
296#define __NR_ioprio_get 274
297#define __NR_inotify_init 275
298#define __NR_inotify_add_watch 276
299#define __NR_inotify_rm_watch 277
300#define __NR_spu_run 278
301#define __NR_spu_create 279
302#define __NR_pselect6 280
303#define __NR_ppoll 281
304#define __NR_unshare 282
305#define __NR_splice 283
306#define __NR_tee 284
307#define __NR_vmsplice 285
308#define __NR_openat 286
309#define __NR_mkdirat 287
310#define __NR_mknodat 288
311#define __NR_fchownat 289
312#define __NR_futimesat 290
313#ifdef __powerpc64__
314#define __NR_newfstatat 291
315#else
316#define __NR_fstatat64 291
317#endif
318#define __NR_unlinkat 292
319#define __NR_renameat 293
320#define __NR_linkat 294
321#define __NR_symlinkat 295
322#define __NR_readlinkat 296
323#define __NR_fchmodat 297
324#define __NR_faccessat 298
325#define __NR_get_robust_list 299
326#define __NR_set_robust_list 300
327#define __NR_move_pages 301
328#define __NR_getcpu 302
329#define __NR_epoll_pwait 303
330#define __NR_utimensat 304
331#define __NR_signalfd 305
332#define __NR_timerfd_create 306
333#define __NR_eventfd 307
334#define __NR_sync_file_range2 308
335#define __NR_fallocate 309
336#define __NR_subpage_prot 310
337#define __NR_timerfd_settime 311
338#define __NR_timerfd_gettime 312
339#define __NR_signalfd4 313
340#define __NR_eventfd2 314
341#define __NR_epoll_create1 315
342#define __NR_dup3 316
343#define __NR_pipe2 317
344#define __NR_inotify_init1 318
345#define __NR_perf_event_open 319
346#define __NR_preadv 320
347#define __NR_pwritev 321
348#define __NR_rt_tgsigqueueinfo 322
349#define __NR_fanotify_init 323
350#define __NR_fanotify_mark 324
351#define __NR_prlimit64 325
352#define __NR_socket 326
353#define __NR_bind 327
354#define __NR_connect 328
355#define __NR_listen 329
356#define __NR_accept 330
357#define __NR_getsockname 331
358#define __NR_getpeername 332
359#define __NR_socketpair 333
360#define __NR_send 334
361#define __NR_sendto 335
362#define __NR_recv 336
363#define __NR_recvfrom 337
364#define __NR_shutdown 338
365#define __NR_setsockopt 339
366#define __NR_getsockopt 340
367#define __NR_sendmsg 341
368#define __NR_recvmsg 342
369#define __NR_recvmmsg 343
370#define __NR_accept4 344
371#define __NR_name_to_handle_at 345
372#define __NR_open_by_handle_at 346
373#define __NR_clock_adjtime 347
374#define __NR_syncfs 348
375#define __NR_sendmmsg 349
376#define __NR_setns 350
377#define __NR_process_vm_readv 351
378#define __NR_process_vm_writev 352
379#define __NR_finit_module 353
380#define __NR_kcmp 354
381#define __NR_sched_setattr 355
382#define __NR_sched_getattr 356
383#define __NR_renameat2 357
384#define __NR_seccomp 358
385#define __NR_getrandom 359
386#define __NR_memfd_create 360
387#define __NR_bpf 361
388#define __NR_execveat 362
389#define __NR_switch_endian 363
390#define __NR_userfaultfd 364
391#define __NR_membarrier 365
392#define __NR_mlock2 378
393#define __NR_copy_file_range 379
394#define __NR_preadv2 380
395#define __NR_pwritev2 381
396#define __NR_kexec_file_load 382
397#define __NR_statx 383
398#define __NR_pkey_alloc 384
399#define __NR_pkey_free 385
400#define __NR_pkey_mprotect 386
401#define __NR_rseq 387
402#define __NR_io_pgetevents 388
403
404#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000000..0b3cb52fd29d
--- /dev/null
+++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Copyright (C) 2015 Regents of the University of California
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
19#define _UAPI_ASM_RISCV_BITSPERLONG_H
20
21#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
22
23#include <asm-generic/bitsperlong.h>
24
25#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 28c4a502b419..6d6122524711 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -281,9 +281,11 @@
281#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ 281#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
282#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ 282#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
283#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ 283#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
284#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
284#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */ 285#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
285#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */ 286#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
286#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */ 287#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
288#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
287#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */ 289#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
288#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */ 290#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
289#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */ 291#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 33833d1909af..a5ea841cc6d2 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31)) 16# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
17#endif 17#endif
18 18
19#ifdef CONFIG_X86_SMAP
20# define DISABLE_SMAP 0
21#else
22# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
23#endif
24
19#ifdef CONFIG_X86_INTEL_UMIP 25#ifdef CONFIG_X86_INTEL_UMIP
20# define DISABLE_UMIP 0 26# define DISABLE_UMIP 0
21#else 27#else
@@ -68,7 +74,7 @@
68#define DISABLED_MASK6 0 74#define DISABLED_MASK6 0
69#define DISABLED_MASK7 (DISABLE_PTI) 75#define DISABLED_MASK7 (DISABLE_PTI)
70#define DISABLED_MASK8 0 76#define DISABLED_MASK8 0
71#define DISABLED_MASK9 (DISABLE_MPX) 77#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP)
72#define DISABLED_MASK10 0 78#define DISABLED_MASK10 0
73#define DISABLED_MASK11 0 79#define DISABLED_MASK11 0
74#define DISABLED_MASK12 0 80#define DISABLED_MASK12 0
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 492f0f24e2d3..4ad1f0894d53 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -93,9 +93,16 @@ BFD_SRCS = jit_disasm.c
93SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c)) 93SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
94 94
95ifeq ($(feature-libbfd),1) 95ifeq ($(feature-libbfd),1)
96 LIBS += -lbfd -ldl -lopcodes
97else ifeq ($(feature-libbfd-liberty),1)
98 LIBS += -lbfd -ldl -lopcodes -liberty
99else ifeq ($(feature-libbfd-liberty-z),1)
100 LIBS += -lbfd -ldl -lopcodes -liberty -lz
101endif
102
103ifneq ($(filter -lbfd,$(LIBS)),)
96CFLAGS += -DHAVE_LIBBFD_SUPPORT 104CFLAGS += -DHAVE_LIBBFD_SUPPORT
97SRCS += $(BFD_SRCS) 105SRCS += $(BFD_SRCS)
98LIBS += -lbfd -lopcodes
99endif 106endif
100 107
101OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o 108OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 3f0629edbca5..6ba5f567a9d8 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -82,8 +82,6 @@ static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
82 int bits_to_copy; 82 int bits_to_copy;
83 __u64 print_num; 83 __u64 print_num;
84 84
85 data += BITS_ROUNDDOWN_BYTES(bit_offset);
86 bit_offset = BITS_PER_BYTE_MASKED(bit_offset);
87 bits_to_copy = bit_offset + nr_bits; 85 bits_to_copy = bit_offset + nr_bits;
88 bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy); 86 bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
89 87
@@ -118,7 +116,9 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
118 * BTF_INT_OFFSET() cannot exceed 64 bits. 116 * BTF_INT_OFFSET() cannot exceed 64 bits.
119 */ 117 */
120 total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type); 118 total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
121 btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw, 119 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
120 bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
121 btf_dumper_bitfield(nr_bits, bit_offset, data, jw,
122 is_plain_text); 122 is_plain_text);
123} 123}
124 124
@@ -216,11 +216,12 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
216 } 216 }
217 217
218 jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off)); 218 jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
219 data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
219 if (bitfield_size) { 220 if (bitfield_size) {
220 btf_dumper_bitfield(bitfield_size, bit_offset, 221 btf_dumper_bitfield(bitfield_size,
221 data, d->jw, d->is_plain_text); 222 BITS_PER_BYTE_MASKED(bit_offset),
223 data_off, d->jw, d->is_plain_text);
222 } else { 224 } else {
223 data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
224 ret = btf_dumper_do_type(d, m[i].type, 225 ret = btf_dumper_do_type(d, m[i].type,
225 BITS_PER_BYTE_MASKED(bit_offset), 226 BITS_PER_BYTE_MASKED(bit_offset),
226 data_off); 227 data_off);
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index bff7ee026680..6046dcab51cc 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -1,15 +1,10 @@
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 1// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
2/* 2/*
3 * Simple streaming JSON writer 3 * Simple streaming JSON writer
4 * 4 *
5 * This takes care of the annoying bits of JSON syntax like the commas 5 * This takes care of the annoying bits of JSON syntax like the commas
6 * after elements 6 * after elements
7 * 7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Authors: Stephen Hemminger <stephen@networkplumber.org> 8 * Authors: Stephen Hemminger <stephen@networkplumber.org>
14 */ 9 */
15 10
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index c1ab51aed99c..cb9a1993681c 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -5,11 +5,6 @@
5 * This takes care of the annoying bits of JSON syntax like the commas 5 * This takes care of the annoying bits of JSON syntax like the commas
6 * after elements 6 * after elements
7 * 7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Authors: Stephen Hemminger <stephen@networkplumber.org> 8 * Authors: Stephen Hemminger <stephen@networkplumber.org>
14 */ 9 */
15 10
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index c7f3321fbe43..d90127298f12 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -738,9 +738,11 @@ __SYSCALL(__NR_statx, sys_statx)
738__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents) 738__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
739#define __NR_rseq 293 739#define __NR_rseq 293
740__SYSCALL(__NR_rseq, sys_rseq) 740__SYSCALL(__NR_rseq, sys_rseq)
741#define __NR_kexec_file_load 294
742__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
741 743
742#undef __NR_syscalls 744#undef __NR_syscalls
743#define __NR_syscalls 294 745#define __NR_syscalls 295
744 746
745/* 747/*
746 * 32 bit systems traditionally used different 748 * 32 bit systems traditionally used different
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index 8dd6aefdafa4..fd92ce8388fc 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -13,6 +13,8 @@
13#include "../../arch/mips/include/uapi/asm/bitsperlong.h" 13#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
14#elif defined(__ia64__) 14#elif defined(__ia64__)
15#include "../../arch/ia64/include/uapi/asm/bitsperlong.h" 15#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
16#elif defined(__riscv)
17#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
16#else 18#else
17#include <asm-generic/bitsperlong.h> 19#include <asm-generic/bitsperlong.h>
18#endif 20#endif
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index a4446f452040..298b2e197744 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
412 int irq_seq; 412 int irq_seq;
413} drm_i915_irq_wait_t; 413} drm_i915_irq_wait_t;
414 414
415/*
416 * Different modes of per-process Graphics Translation Table,
417 * see I915_PARAM_HAS_ALIASING_PPGTT
418 */
419#define I915_GEM_PPGTT_NONE 0
420#define I915_GEM_PPGTT_ALIASING 1
421#define I915_GEM_PPGTT_FULL 2
422
415/* Ioctl to query kernel params: 423/* Ioctl to query kernel params:
416 */ 424 */
417#define I915_PARAM_IRQ_ACTIVE 1 425#define I915_PARAM_IRQ_ACTIVE 1
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index a441ea1bfe6d..121e82ce296b 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -14,6 +14,11 @@
14#include <linux/ioctl.h> 14#include <linux/ioctl.h>
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17/* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
18#if !defined(__KERNEL__)
19#include <linux/mount.h>
20#endif
21
17/* 22/*
18 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change 23 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
19 * the file limit at runtime and only root can increase the per-process 24 * the file limit at runtime and only root can increase the per-process
@@ -101,57 +106,6 @@ struct inodes_stat_t {
101 106
102#define NR_FILE 8192 /* this can well be larger on a larger system */ 107#define NR_FILE 8192 /* this can well be larger on a larger system */
103 108
104
105/*
106 * These are the fs-independent mount-flags: up to 32 flags are supported
107 */
108#define MS_RDONLY 1 /* Mount read-only */
109#define MS_NOSUID 2 /* Ignore suid and sgid bits */
110#define MS_NODEV 4 /* Disallow access to device special files */
111#define MS_NOEXEC 8 /* Disallow program execution */
112#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
113#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
114#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
115#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
116#define MS_NOATIME 1024 /* Do not update access times. */
117#define MS_NODIRATIME 2048 /* Do not update directory access times */
118#define MS_BIND 4096
119#define MS_MOVE 8192
120#define MS_REC 16384
121#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
122 MS_VERBOSE is deprecated. */
123#define MS_SILENT 32768
124#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
125#define MS_UNBINDABLE (1<<17) /* change to unbindable */
126#define MS_PRIVATE (1<<18) /* change to private */
127#define MS_SLAVE (1<<19) /* change to slave */
128#define MS_SHARED (1<<20) /* change to shared */
129#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
130#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
131#define MS_I_VERSION (1<<23) /* Update inode I_version field */
132#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
133#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
134
135/* These sb flags are internal to the kernel */
136#define MS_SUBMOUNT (1<<26)
137#define MS_NOREMOTELOCK (1<<27)
138#define MS_NOSEC (1<<28)
139#define MS_BORN (1<<29)
140#define MS_ACTIVE (1<<30)
141#define MS_NOUSER (1<<31)
142
143/*
144 * Superblock flags that can be altered by MS_REMOUNT
145 */
146#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
147 MS_LAZYTIME)
148
149/*
150 * Old magic mount flag and mask
151 */
152#define MS_MGC_VAL 0xC0ED0000
153#define MS_MGC_MSK 0xffff0000
154
155/* 109/*
156 * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR. 110 * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR.
157 */ 111 */
@@ -269,7 +223,8 @@ struct fsxattr {
269#define FS_POLICY_FLAGS_PAD_16 0x02 223#define FS_POLICY_FLAGS_PAD_16 0x02
270#define FS_POLICY_FLAGS_PAD_32 0x03 224#define FS_POLICY_FLAGS_PAD_32 0x03
271#define FS_POLICY_FLAGS_PAD_MASK 0x03 225#define FS_POLICY_FLAGS_PAD_MASK 0x03
272#define FS_POLICY_FLAGS_VALID 0x03 226#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */
227#define FS_POLICY_FLAGS_VALID 0x07
273 228
274/* Encryption algorithms */ 229/* Encryption algorithms */
275#define FS_ENCRYPTION_MODE_INVALID 0 230#define FS_ENCRYPTION_MODE_INVALID 0
@@ -281,6 +236,7 @@ struct fsxattr {
281#define FS_ENCRYPTION_MODE_AES_128_CTS 6 236#define FS_ENCRYPTION_MODE_AES_128_CTS 6
282#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ 237#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
283#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ 238#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
239#define FS_ENCRYPTION_MODE_ADIANTUM 9
284 240
285struct fscrypt_policy { 241struct fscrypt_policy {
286 __u8 version; 242 __u8 version;
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 1debfa42cba1..d6533828123a 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -288,6 +288,7 @@ enum {
288 IFLA_BR_MCAST_IGMP_VERSION, 288 IFLA_BR_MCAST_IGMP_VERSION,
289 IFLA_BR_MCAST_MLD_VERSION, 289 IFLA_BR_MCAST_MLD_VERSION,
290 IFLA_BR_VLAN_STATS_PER_PORT, 290 IFLA_BR_VLAN_STATS_PER_PORT,
291 IFLA_BR_MULTI_BOOLOPT,
291 __IFLA_BR_MAX, 292 __IFLA_BR_MAX,
292}; 293};
293 294
@@ -533,6 +534,7 @@ enum {
533 IFLA_VXLAN_LABEL, 534 IFLA_VXLAN_LABEL,
534 IFLA_VXLAN_GPE, 535 IFLA_VXLAN_GPE,
535 IFLA_VXLAN_TTL_INHERIT, 536 IFLA_VXLAN_TTL_INHERIT,
537 IFLA_VXLAN_DF,
536 __IFLA_VXLAN_MAX 538 __IFLA_VXLAN_MAX
537}; 539};
538#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) 540#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -542,6 +544,14 @@ struct ifla_vxlan_port_range {
542 __be16 high; 544 __be16 high;
543}; 545};
544 546
547enum ifla_vxlan_df {
548 VXLAN_DF_UNSET = 0,
549 VXLAN_DF_SET,
550 VXLAN_DF_INHERIT,
551 __VXLAN_DF_END,
552 VXLAN_DF_MAX = __VXLAN_DF_END - 1,
553};
554
545/* GENEVE section */ 555/* GENEVE section */
546enum { 556enum {
547 IFLA_GENEVE_UNSPEC, 557 IFLA_GENEVE_UNSPEC,
@@ -557,10 +567,19 @@ enum {
557 IFLA_GENEVE_UDP_ZERO_CSUM6_RX, 567 IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
558 IFLA_GENEVE_LABEL, 568 IFLA_GENEVE_LABEL,
559 IFLA_GENEVE_TTL_INHERIT, 569 IFLA_GENEVE_TTL_INHERIT,
570 IFLA_GENEVE_DF,
560 __IFLA_GENEVE_MAX 571 __IFLA_GENEVE_MAX
561}; 572};
562#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) 573#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
563 574
575enum ifla_geneve_df {
576 GENEVE_DF_UNSET = 0,
577 GENEVE_DF_SET,
578 GENEVE_DF_INHERIT,
579 __GENEVE_DF_END,
580 GENEVE_DF_MAX = __GENEVE_DF_END - 1,
581};
582
564/* PPP section */ 583/* PPP section */
565enum { 584enum {
566 IFLA_PPP_UNSPEC, 585 IFLA_PPP_UNSPEC,
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 48e8a225b985..f6052e70bf40 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -266,10 +266,14 @@ struct sockaddr_in {
266 266
267#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000) 267#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
268#define IN_MULTICAST(a) IN_CLASSD(a) 268#define IN_MULTICAST(a) IN_CLASSD(a)
269#define IN_MULTICAST_NET 0xF0000000 269#define IN_MULTICAST_NET 0xe0000000
270 270
271#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) 271#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
272#define IN_BADCLASS(a) IN_EXPERIMENTAL((a)) 272#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
273
274#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
275#define IN_CLASSE_NET 0xffffffff
276#define IN_CLASSE_NSHIFT 0
273 277
274/* Address to accept any incoming messages. */ 278/* Address to accept any incoming messages. */
275#define INADDR_ANY ((unsigned long int) 0x00000000) 279#define INADDR_ANY ((unsigned long int) 0x00000000)
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 2b7a652c9fa4..6d4ea4b6c922 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -492,6 +492,17 @@ struct kvm_dirty_log {
492 }; 492 };
493}; 493};
494 494
495/* for KVM_CLEAR_DIRTY_LOG */
496struct kvm_clear_dirty_log {
497 __u32 slot;
498 __u32 num_pages;
499 __u64 first_page;
500 union {
501 void __user *dirty_bitmap; /* one bit per page */
502 __u64 padding2;
503 };
504};
505
495/* for KVM_SET_SIGNAL_MASK */ 506/* for KVM_SET_SIGNAL_MASK */
496struct kvm_signal_mask { 507struct kvm_signal_mask {
497 __u32 len; 508 __u32 len;
@@ -975,6 +986,8 @@ struct kvm_ppc_resize_hpt {
975#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 986#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
976#define KVM_CAP_EXCEPTION_PAYLOAD 164 987#define KVM_CAP_EXCEPTION_PAYLOAD 164
977#define KVM_CAP_ARM_VM_IPA_SIZE 165 988#define KVM_CAP_ARM_VM_IPA_SIZE 165
989#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
990#define KVM_CAP_HYPERV_CPUID 167
978 991
979#ifdef KVM_CAP_IRQ_ROUTING 992#ifdef KVM_CAP_IRQ_ROUTING
980 993
@@ -1421,6 +1434,12 @@ struct kvm_enc_region {
1421#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state) 1434#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
1422#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state) 1435#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
1423 1436
1437/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
1438#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
1439
1440/* Available with KVM_CAP_HYPERV_CPUID */
1441#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
1442
1424/* Secure Encrypted Virtualization command */ 1443/* Secure Encrypted Virtualization command */
1425enum sev_cmd_id { 1444enum sev_cmd_id {
1426 /* Guest initialization commands */ 1445 /* Guest initialization commands */
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
new file mode 100644
index 000000000000..3f9ec42510b0
--- /dev/null
+++ b/tools/include/uapi/linux/mount.h
@@ -0,0 +1,58 @@
1#ifndef _UAPI_LINUX_MOUNT_H
2#define _UAPI_LINUX_MOUNT_H
3
4/*
5 * These are the fs-independent mount-flags: up to 32 flags are supported
6 *
7 * Usage of these is restricted within the kernel to core mount(2) code and
8 * callers of sys_mount() only. Filesystems should be using the SB_*
9 * equivalent instead.
10 */
11#define MS_RDONLY 1 /* Mount read-only */
12#define MS_NOSUID 2 /* Ignore suid and sgid bits */
13#define MS_NODEV 4 /* Disallow access to device special files */
14#define MS_NOEXEC 8 /* Disallow program execution */
15#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
16#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
17#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
18#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
19#define MS_NOATIME 1024 /* Do not update access times. */
20#define MS_NODIRATIME 2048 /* Do not update directory access times */
21#define MS_BIND 4096
22#define MS_MOVE 8192
23#define MS_REC 16384
24#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
25 MS_VERBOSE is deprecated. */
26#define MS_SILENT 32768
27#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
28#define MS_UNBINDABLE (1<<17) /* change to unbindable */
29#define MS_PRIVATE (1<<18) /* change to private */
30#define MS_SLAVE (1<<19) /* change to slave */
31#define MS_SHARED (1<<20) /* change to shared */
32#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
33#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
34#define MS_I_VERSION (1<<23) /* Update inode I_version field */
35#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
36#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */
37
38/* These sb flags are internal to the kernel */
39#define MS_SUBMOUNT (1<<26)
40#define MS_NOREMOTELOCK (1<<27)
41#define MS_NOSEC (1<<28)
42#define MS_BORN (1<<29)
43#define MS_ACTIVE (1<<30)
44#define MS_NOUSER (1<<31)
45
46/*
47 * Superblock flags that can be altered by MS_REMOUNT
48 */
49#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
50 MS_LAZYTIME)
51
52/*
53 * Old magic mount flag and mask
54 */
55#define MS_MGC_VAL 0xC0ED0000
56#define MS_MGC_MSK 0xffff0000
57
58#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h
new file mode 100644
index 000000000000..0d18b1d1fbbc
--- /dev/null
+++ b/tools/include/uapi/linux/pkt_sched.h
@@ -0,0 +1,1163 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2#ifndef __LINUX_PKT_SCHED_H
3#define __LINUX_PKT_SCHED_H
4
5#include <linux/types.h>
6
7/* Logical priority bands not depending on specific packet scheduler.
8 Every scheduler will map them to real traffic classes, if it has
9 no more precise mechanism to classify packets.
10
11 These numbers have no special meaning, though their coincidence
12 with obsolete IPv6 values is not occasional :-). New IPv6 drafts
13 preferred full anarchy inspired by diffserv group.
14
15 Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
16 class, actually, as rule it will be handled with more care than
17 filler or even bulk.
18 */
19
20#define TC_PRIO_BESTEFFORT 0
21#define TC_PRIO_FILLER 1
22#define TC_PRIO_BULK 2
23#define TC_PRIO_INTERACTIVE_BULK 4
24#define TC_PRIO_INTERACTIVE 6
25#define TC_PRIO_CONTROL 7
26
27#define TC_PRIO_MAX 15
28
29/* Generic queue statistics, available for all the elements.
30 Particular schedulers may have also their private records.
31 */
32
33struct tc_stats {
34 __u64 bytes; /* Number of enqueued bytes */
35 __u32 packets; /* Number of enqueued packets */
36 __u32 drops; /* Packets dropped because of lack of resources */
37 __u32 overlimits; /* Number of throttle events when this
38 * flow goes out of allocated bandwidth */
39 __u32 bps; /* Current flow byte rate */
40 __u32 pps; /* Current flow packet rate */
41 __u32 qlen;
42 __u32 backlog;
43};
44
45struct tc_estimator {
46 signed char interval;
47 unsigned char ewma_log;
48};
49
50/* "Handles"
51 ---------
52
53 All the traffic control objects have 32bit identifiers, or "handles".
54
55 They can be considered as opaque numbers from user API viewpoint,
56 but actually they always consist of two fields: major and
57 minor numbers, which are interpreted by kernel specially,
58 that may be used by applications, though not recommended.
59
60 F.e. qdisc handles always have minor number equal to zero,
61 classes (or flows) have major equal to parent qdisc major, and
62 minor uniquely identifying class inside qdisc.
63
64 Macros to manipulate handles:
65 */
66
67#define TC_H_MAJ_MASK (0xFFFF0000U)
68#define TC_H_MIN_MASK (0x0000FFFFU)
69#define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
70#define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
71#define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
72
73#define TC_H_UNSPEC (0U)
74#define TC_H_ROOT (0xFFFFFFFFU)
75#define TC_H_INGRESS (0xFFFFFFF1U)
76#define TC_H_CLSACT TC_H_INGRESS
77
78#define TC_H_MIN_PRIORITY 0xFFE0U
79#define TC_H_MIN_INGRESS 0xFFF2U
80#define TC_H_MIN_EGRESS 0xFFF3U
81
82/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
83enum tc_link_layer {
84 TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
85 TC_LINKLAYER_ETHERNET,
86 TC_LINKLAYER_ATM,
87};
88#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
89
90struct tc_ratespec {
91 unsigned char cell_log;
92 __u8 linklayer; /* lower 4 bits */
93 unsigned short overhead;
94 short cell_align;
95 unsigned short mpu;
96 __u32 rate;
97};
98
99#define TC_RTAB_SIZE 1024
100
101struct tc_sizespec {
102 unsigned char cell_log;
103 unsigned char size_log;
104 short cell_align;
105 int overhead;
106 unsigned int linklayer;
107 unsigned int mpu;
108 unsigned int mtu;
109 unsigned int tsize;
110};
111
112enum {
113 TCA_STAB_UNSPEC,
114 TCA_STAB_BASE,
115 TCA_STAB_DATA,
116 __TCA_STAB_MAX
117};
118
119#define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
120
121/* FIFO section */
122
123struct tc_fifo_qopt {
124 __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
125};
126
127/* SKBPRIO section */
128
129/*
130 * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
131 * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
132 * to map one to one the DS field of IPV4 and IPV6 headers.
133 * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
134 */
135
136#define SKBPRIO_MAX_PRIORITY 64
137
138struct tc_skbprio_qopt {
139 __u32 limit; /* Queue length in packets. */
140};
141
142/* PRIO section */
143
144#define TCQ_PRIO_BANDS 16
145#define TCQ_MIN_PRIO_BANDS 2
146
147struct tc_prio_qopt {
148 int bands; /* Number of bands */
149 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
150};
151
152/* MULTIQ section */
153
154struct tc_multiq_qopt {
155 __u16 bands; /* Number of bands */
156 __u16 max_bands; /* Maximum number of queues */
157};
158
159/* PLUG section */
160
161#define TCQ_PLUG_BUFFER 0
162#define TCQ_PLUG_RELEASE_ONE 1
163#define TCQ_PLUG_RELEASE_INDEFINITE 2
164#define TCQ_PLUG_LIMIT 3
165
166struct tc_plug_qopt {
167 /* TCQ_PLUG_BUFFER: Inset a plug into the queue and
168 * buffer any incoming packets
169 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
170 * to beginning of the next plug.
171 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
172 * Stop buffering packets until the next TCQ_PLUG_BUFFER
173 * command is received (just act as a pass-thru queue).
174 * TCQ_PLUG_LIMIT: Increase/decrease queue size
175 */
176 int action;
177 __u32 limit;
178};
179
180/* TBF section */
181
182struct tc_tbf_qopt {
183 struct tc_ratespec rate;
184 struct tc_ratespec peakrate;
185 __u32 limit;
186 __u32 buffer;
187 __u32 mtu;
188};
189
190enum {
191 TCA_TBF_UNSPEC,
192 TCA_TBF_PARMS,
193 TCA_TBF_RTAB,
194 TCA_TBF_PTAB,
195 TCA_TBF_RATE64,
196 TCA_TBF_PRATE64,
197 TCA_TBF_BURST,
198 TCA_TBF_PBURST,
199 TCA_TBF_PAD,
200 __TCA_TBF_MAX,
201};
202
203#define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
204
205
206/* TEQL section */
207
208/* TEQL does not require any parameters */
209
210/* SFQ section */
211
212struct tc_sfq_qopt {
213 unsigned quantum; /* Bytes per round allocated to flow */
214 int perturb_period; /* Period of hash perturbation */
215 __u32 limit; /* Maximal packets in queue */
216 unsigned divisor; /* Hash divisor */
217 unsigned flows; /* Maximal number of flows */
218};
219
220struct tc_sfqred_stats {
221 __u32 prob_drop; /* Early drops, below max threshold */
222 __u32 forced_drop; /* Early drops, after max threshold */
223 __u32 prob_mark; /* Marked packets, below max threshold */
224 __u32 forced_mark; /* Marked packets, after max threshold */
225 __u32 prob_mark_head; /* Marked packets, below max threshold */
226 __u32 forced_mark_head;/* Marked packets, after max threshold */
227};
228
229struct tc_sfq_qopt_v1 {
230 struct tc_sfq_qopt v0;
231 unsigned int depth; /* max number of packets per flow */
232 unsigned int headdrop;
233/* SFQRED parameters */
234 __u32 limit; /* HARD maximal flow queue length (bytes) */
235 __u32 qth_min; /* Min average length threshold (bytes) */
236 __u32 qth_max; /* Max average length threshold (bytes) */
237 unsigned char Wlog; /* log(W) */
238 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
239 unsigned char Scell_log; /* cell size for idle damping */
240 unsigned char flags;
241 __u32 max_P; /* probability, high resolution */
242/* SFQRED stats */
243 struct tc_sfqred_stats stats;
244};
245
246
247struct tc_sfq_xstats {
248 __s32 allot;
249};
250
251/* RED section */
252
253enum {
254 TCA_RED_UNSPEC,
255 TCA_RED_PARMS,
256 TCA_RED_STAB,
257 TCA_RED_MAX_P,
258 __TCA_RED_MAX,
259};
260
261#define TCA_RED_MAX (__TCA_RED_MAX - 1)
262
263struct tc_red_qopt {
264 __u32 limit; /* HARD maximal queue length (bytes) */
265 __u32 qth_min; /* Min average length threshold (bytes) */
266 __u32 qth_max; /* Max average length threshold (bytes) */
267 unsigned char Wlog; /* log(W) */
268 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
269 unsigned char Scell_log; /* cell size for idle damping */
270 unsigned char flags;
271#define TC_RED_ECN 1
272#define TC_RED_HARDDROP 2
273#define TC_RED_ADAPTATIVE 4
274};
275
276struct tc_red_xstats {
277 __u32 early; /* Early drops */
278 __u32 pdrop; /* Drops due to queue limits */
279 __u32 other; /* Drops due to drop() calls */
280 __u32 marked; /* Marked packets */
281};
282
283/* GRED section */
284
285#define MAX_DPs 16
286
287enum {
288 TCA_GRED_UNSPEC,
289 TCA_GRED_PARMS,
290 TCA_GRED_STAB,
291 TCA_GRED_DPS,
292 TCA_GRED_MAX_P,
293 TCA_GRED_LIMIT,
294 TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
295 __TCA_GRED_MAX,
296};
297
298#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
299
300enum {
301 TCA_GRED_VQ_ENTRY_UNSPEC,
302 TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
303 __TCA_GRED_VQ_ENTRY_MAX,
304};
305#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
306
307enum {
308 TCA_GRED_VQ_UNSPEC,
309 TCA_GRED_VQ_PAD,
310 TCA_GRED_VQ_DP, /* u32 */
311 TCA_GRED_VQ_STAT_BYTES, /* u64 */
312 TCA_GRED_VQ_STAT_PACKETS, /* u32 */
313 TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
314 TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
315 TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
316 TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
317 TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
318 TCA_GRED_VQ_STAT_PDROP, /* u32 */
319 TCA_GRED_VQ_STAT_OTHER, /* u32 */
320 TCA_GRED_VQ_FLAGS, /* u32 */
321 __TCA_GRED_VQ_MAX
322};
323
324#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
325
326struct tc_gred_qopt {
327 __u32 limit; /* HARD maximal queue length (bytes) */
328 __u32 qth_min; /* Min average length threshold (bytes) */
329 __u32 qth_max; /* Max average length threshold (bytes) */
330 __u32 DP; /* up to 2^32 DPs */
331 __u32 backlog;
332 __u32 qave;
333 __u32 forced;
334 __u32 early;
335 __u32 other;
336 __u32 pdrop;
337 __u8 Wlog; /* log(W) */
338 __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
339 __u8 Scell_log; /* cell size for idle damping */
340 __u8 prio; /* prio of this VQ */
341 __u32 packets;
342 __u32 bytesin;
343};
344
345/* gred setup */
346struct tc_gred_sopt {
347 __u32 DPs;
348 __u32 def_DP;
349 __u8 grio;
350 __u8 flags;
351 __u16 pad1;
352};
353
354/* CHOKe section */
355
356enum {
357 TCA_CHOKE_UNSPEC,
358 TCA_CHOKE_PARMS,
359 TCA_CHOKE_STAB,
360 TCA_CHOKE_MAX_P,
361 __TCA_CHOKE_MAX,
362};
363
364#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
365
366struct tc_choke_qopt {
367 __u32 limit; /* Hard queue length (packets) */
368 __u32 qth_min; /* Min average threshold (packets) */
369 __u32 qth_max; /* Max average threshold (packets) */
370 unsigned char Wlog; /* log(W) */
371 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
372 unsigned char Scell_log; /* cell size for idle damping */
373 unsigned char flags; /* see RED flags */
374};
375
376struct tc_choke_xstats {
377 __u32 early; /* Early drops */
378 __u32 pdrop; /* Drops due to queue limits */
379 __u32 other; /* Drops due to drop() calls */
380 __u32 marked; /* Marked packets */
381 __u32 matched; /* Drops due to flow match */
382};
383
384/* HTB section */
385#define TC_HTB_NUMPRIO 8
386#define TC_HTB_MAXDEPTH 8
387#define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
388
389struct tc_htb_opt {
390 struct tc_ratespec rate;
391 struct tc_ratespec ceil;
392 __u32 buffer;
393 __u32 cbuffer;
394 __u32 quantum;
395 __u32 level; /* out only */
396 __u32 prio;
397};
398struct tc_htb_glob {
399 __u32 version; /* to match HTB/TC */
400 __u32 rate2quantum; /* bps->quantum divisor */
401 __u32 defcls; /* default class number */
402 __u32 debug; /* debug flags */
403
404 /* stats */
405 __u32 direct_pkts; /* count of non shaped packets */
406};
407enum {
408 TCA_HTB_UNSPEC,
409 TCA_HTB_PARMS,
410 TCA_HTB_INIT,
411 TCA_HTB_CTAB,
412 TCA_HTB_RTAB,
413 TCA_HTB_DIRECT_QLEN,
414 TCA_HTB_RATE64,
415 TCA_HTB_CEIL64,
416 TCA_HTB_PAD,
417 __TCA_HTB_MAX,
418};
419
420#define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
421
422struct tc_htb_xstats {
423 __u32 lends;
424 __u32 borrows;
425 __u32 giants; /* unused since 'Make HTB scheduler work with TSO.' */
426 __s32 tokens;
427 __s32 ctokens;
428};
429
430/* HFSC section */
431
432struct tc_hfsc_qopt {
433 __u16 defcls; /* default class */
434};
435
436struct tc_service_curve {
437 __u32 m1; /* slope of the first segment in bps */
438 __u32 d; /* x-projection of the first segment in us */
439 __u32 m2; /* slope of the second segment in bps */
440};
441
442struct tc_hfsc_stats {
443 __u64 work; /* total work done */
444 __u64 rtwork; /* work done by real-time criteria */
445 __u32 period; /* current period */
446 __u32 level; /* class level in hierarchy */
447};
448
449enum {
450 TCA_HFSC_UNSPEC,
451 TCA_HFSC_RSC,
452 TCA_HFSC_FSC,
453 TCA_HFSC_USC,
454 __TCA_HFSC_MAX,
455};
456
457#define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
458
459
460/* CBQ section */
461
462#define TC_CBQ_MAXPRIO 8
463#define TC_CBQ_MAXLEVEL 8
464#define TC_CBQ_DEF_EWMA 5
465
466struct tc_cbq_lssopt {
467 unsigned char change;
468 unsigned char flags;
469#define TCF_CBQ_LSS_BOUNDED 1
470#define TCF_CBQ_LSS_ISOLATED 2
471 unsigned char ewma_log;
472 unsigned char level;
473#define TCF_CBQ_LSS_FLAGS 1
474#define TCF_CBQ_LSS_EWMA 2
475#define TCF_CBQ_LSS_MAXIDLE 4
476#define TCF_CBQ_LSS_MINIDLE 8
477#define TCF_CBQ_LSS_OFFTIME 0x10
478#define TCF_CBQ_LSS_AVPKT 0x20
479 __u32 maxidle;
480 __u32 minidle;
481 __u32 offtime;
482 __u32 avpkt;
483};
484
485struct tc_cbq_wrropt {
486 unsigned char flags;
487 unsigned char priority;
488 unsigned char cpriority;
489 unsigned char __reserved;
490 __u32 allot;
491 __u32 weight;
492};
493
494struct tc_cbq_ovl {
495 unsigned char strategy;
496#define TC_CBQ_OVL_CLASSIC 0
497#define TC_CBQ_OVL_DELAY 1
498#define TC_CBQ_OVL_LOWPRIO 2
499#define TC_CBQ_OVL_DROP 3
500#define TC_CBQ_OVL_RCLASSIC 4
501 unsigned char priority2;
502 __u16 pad;
503 __u32 penalty;
504};
505
506struct tc_cbq_police {
507 unsigned char police;
508 unsigned char __res1;
509 unsigned short __res2;
510};
511
512struct tc_cbq_fopt {
513 __u32 split;
514 __u32 defmap;
515 __u32 defchange;
516};
517
518struct tc_cbq_xstats {
519 __u32 borrows;
520 __u32 overactions;
521 __s32 avgidle;
522 __s32 undertime;
523};
524
525enum {
526 TCA_CBQ_UNSPEC,
527 TCA_CBQ_LSSOPT,
528 TCA_CBQ_WRROPT,
529 TCA_CBQ_FOPT,
530 TCA_CBQ_OVL_STRATEGY,
531 TCA_CBQ_RATE,
532 TCA_CBQ_RTAB,
533 TCA_CBQ_POLICE,
534 __TCA_CBQ_MAX,
535};
536
537#define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
538
539/* dsmark section */
540
541enum {
542 TCA_DSMARK_UNSPEC,
543 TCA_DSMARK_INDICES,
544 TCA_DSMARK_DEFAULT_INDEX,
545 TCA_DSMARK_SET_TC_INDEX,
546 TCA_DSMARK_MASK,
547 TCA_DSMARK_VALUE,
548 __TCA_DSMARK_MAX,
549};
550
551#define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
552
553/* ATM section */
554
555enum {
556 TCA_ATM_UNSPEC,
557 TCA_ATM_FD, /* file/socket descriptor */
558 TCA_ATM_PTR, /* pointer to descriptor - later */
559 TCA_ATM_HDR, /* LL header */
560 TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
561 TCA_ATM_ADDR, /* PVC address (for output only) */
562 TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
563 __TCA_ATM_MAX,
564};
565
566#define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
567
568/* Network emulator */
569
570enum {
571 TCA_NETEM_UNSPEC,
572 TCA_NETEM_CORR,
573 TCA_NETEM_DELAY_DIST,
574 TCA_NETEM_REORDER,
575 TCA_NETEM_CORRUPT,
576 TCA_NETEM_LOSS,
577 TCA_NETEM_RATE,
578 TCA_NETEM_ECN,
579 TCA_NETEM_RATE64,
580 TCA_NETEM_PAD,
581 TCA_NETEM_LATENCY64,
582 TCA_NETEM_JITTER64,
583 TCA_NETEM_SLOT,
584 TCA_NETEM_SLOT_DIST,
585 __TCA_NETEM_MAX,
586};
587
588#define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
589
590struct tc_netem_qopt {
591 __u32 latency; /* added delay (us) */
592 __u32 limit; /* fifo limit (packets) */
593 __u32 loss; /* random packet loss (0=none ~0=100%) */
594 __u32 gap; /* re-ordering gap (0 for none) */
595 __u32 duplicate; /* random packet dup (0=none ~0=100%) */
596 __u32 jitter; /* random jitter in latency (us) */
597};
598
599struct tc_netem_corr {
600 __u32 delay_corr; /* delay correlation */
601 __u32 loss_corr; /* packet loss correlation */
602 __u32 dup_corr; /* duplicate correlation */
603};
604
605struct tc_netem_reorder {
606 __u32 probability;
607 __u32 correlation;
608};
609
610struct tc_netem_corrupt {
611 __u32 probability;
612 __u32 correlation;
613};
614
615struct tc_netem_rate {
616 __u32 rate; /* byte/s */
617 __s32 packet_overhead;
618 __u32 cell_size;
619 __s32 cell_overhead;
620};
621
622struct tc_netem_slot {
623 __s64 min_delay; /* nsec */
624 __s64 max_delay;
625 __s32 max_packets;
626 __s32 max_bytes;
627 __s64 dist_delay; /* nsec */
628 __s64 dist_jitter; /* nsec */
629};
630
631enum {
632 NETEM_LOSS_UNSPEC,
633 NETEM_LOSS_GI, /* General Intuitive - 4 state model */
634 NETEM_LOSS_GE, /* Gilbert Elliot models */
635 __NETEM_LOSS_MAX
636};
637#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
638
639/* State transition probabilities for 4 state model */
640struct tc_netem_gimodel {
641 __u32 p13;
642 __u32 p31;
643 __u32 p32;
644 __u32 p14;
645 __u32 p23;
646};
647
648/* Gilbert-Elliot models */
649struct tc_netem_gemodel {
650 __u32 p;
651 __u32 r;
652 __u32 h;
653 __u32 k1;
654};
655
656#define NETEM_DIST_SCALE 8192
657#define NETEM_DIST_MAX 16384
658
659/* DRR */
660
661enum {
662 TCA_DRR_UNSPEC,
663 TCA_DRR_QUANTUM,
664 __TCA_DRR_MAX
665};
666
667#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
668
669struct tc_drr_stats {
670 __u32 deficit;
671};
672
673/* MQPRIO */
674#define TC_QOPT_BITMASK 15
675#define TC_QOPT_MAX_QUEUE 16
676
677enum {
678 TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
679 TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
680 __TC_MQPRIO_HW_OFFLOAD_MAX
681};
682
683#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
684
685enum {
686 TC_MQPRIO_MODE_DCB,
687 TC_MQPRIO_MODE_CHANNEL,
688 __TC_MQPRIO_MODE_MAX
689};
690
691#define __TC_MQPRIO_MODE_MAX (__TC_MQPRIO_MODE_MAX - 1)
692
693enum {
694 TC_MQPRIO_SHAPER_DCB,
695 TC_MQPRIO_SHAPER_BW_RATE, /* Add new shapers below */
696 __TC_MQPRIO_SHAPER_MAX
697};
698
699#define __TC_MQPRIO_SHAPER_MAX (__TC_MQPRIO_SHAPER_MAX - 1)
700
701struct tc_mqprio_qopt {
702 __u8 num_tc;
703 __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
704 __u8 hw;
705 __u16 count[TC_QOPT_MAX_QUEUE];
706 __u16 offset[TC_QOPT_MAX_QUEUE];
707};
708
709#define TC_MQPRIO_F_MODE 0x1
710#define TC_MQPRIO_F_SHAPER 0x2
711#define TC_MQPRIO_F_MIN_RATE 0x4
712#define TC_MQPRIO_F_MAX_RATE 0x8
713
714enum {
715 TCA_MQPRIO_UNSPEC,
716 TCA_MQPRIO_MODE,
717 TCA_MQPRIO_SHAPER,
718 TCA_MQPRIO_MIN_RATE64,
719 TCA_MQPRIO_MAX_RATE64,
720 __TCA_MQPRIO_MAX,
721};
722
723#define TCA_MQPRIO_MAX (__TCA_MQPRIO_MAX - 1)
724
725/* SFB */
726
727enum {
728 TCA_SFB_UNSPEC,
729 TCA_SFB_PARMS,
730 __TCA_SFB_MAX,
731};
732
733#define TCA_SFB_MAX (__TCA_SFB_MAX - 1)
734
735/*
736 * Note: increment, decrement are Q0.16 fixed-point values.
737 */
738struct tc_sfb_qopt {
739 __u32 rehash_interval; /* delay between hash move, in ms */
740 __u32 warmup_time; /* double buffering warmup time in ms (warmup_time < rehash_interval) */
741 __u32 max; /* max len of qlen_min */
742 __u32 bin_size; /* maximum queue length per bin */
743 __u32 increment; /* probability increment, (d1 in Blue) */
744 __u32 decrement; /* probability decrement, (d2 in Blue) */
745 __u32 limit; /* max SFB queue length */
746 __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */
747 __u32 penalty_burst;
748};
749
750struct tc_sfb_xstats {
751 __u32 earlydrop;
752 __u32 penaltydrop;
753 __u32 bucketdrop;
754 __u32 queuedrop;
755 __u32 childdrop; /* drops in child qdisc */
756 __u32 marked;
757 __u32 maxqlen;
758 __u32 maxprob;
759 __u32 avgprob;
760};
761
762#define SFB_MAX_PROB 0xFFFF
763
764/* QFQ */
765enum {
766 TCA_QFQ_UNSPEC,
767 TCA_QFQ_WEIGHT,
768 TCA_QFQ_LMAX,
769 __TCA_QFQ_MAX
770};
771
772#define TCA_QFQ_MAX (__TCA_QFQ_MAX - 1)
773
774struct tc_qfq_stats {
775 __u32 weight;
776 __u32 lmax;
777};
778
779/* CODEL */
780
781enum {
782 TCA_CODEL_UNSPEC,
783 TCA_CODEL_TARGET,
784 TCA_CODEL_LIMIT,
785 TCA_CODEL_INTERVAL,
786 TCA_CODEL_ECN,
787 TCA_CODEL_CE_THRESHOLD,
788 __TCA_CODEL_MAX
789};
790
791#define TCA_CODEL_MAX (__TCA_CODEL_MAX - 1)
792
793struct tc_codel_xstats {
794 __u32 maxpacket; /* largest packet we've seen so far */
795 __u32 count; /* how many drops we've done since the last time we
796 * entered dropping state
797 */
798 __u32 lastcount; /* count at entry to dropping state */
799 __u32 ldelay; /* in-queue delay seen by most recently dequeued packet */
800 __s32 drop_next; /* time to drop next packet */
801 __u32 drop_overlimit; /* number of time max qdisc packet limit was hit */
802 __u32 ecn_mark; /* number of packets we ECN marked instead of dropped */
803 __u32 dropping; /* are we in dropping state ? */
804 __u32 ce_mark; /* number of CE marked packets because of ce_threshold */
805};
806
807/* FQ_CODEL */
808
809enum {
810 TCA_FQ_CODEL_UNSPEC,
811 TCA_FQ_CODEL_TARGET,
812 TCA_FQ_CODEL_LIMIT,
813 TCA_FQ_CODEL_INTERVAL,
814 TCA_FQ_CODEL_ECN,
815 TCA_FQ_CODEL_FLOWS,
816 TCA_FQ_CODEL_QUANTUM,
817 TCA_FQ_CODEL_CE_THRESHOLD,
818 TCA_FQ_CODEL_DROP_BATCH_SIZE,
819 TCA_FQ_CODEL_MEMORY_LIMIT,
820 __TCA_FQ_CODEL_MAX
821};
822
823#define TCA_FQ_CODEL_MAX (__TCA_FQ_CODEL_MAX - 1)
824
825enum {
826 TCA_FQ_CODEL_XSTATS_QDISC,
827 TCA_FQ_CODEL_XSTATS_CLASS,
828};
829
830struct tc_fq_codel_qd_stats {
831 __u32 maxpacket; /* largest packet we've seen so far */
832 __u32 drop_overlimit; /* number of time max qdisc
833 * packet limit was hit
834 */
835 __u32 ecn_mark; /* number of packets we ECN marked
836 * instead of being dropped
837 */
838 __u32 new_flow_count; /* number of time packets
839 * created a 'new flow'
840 */
841 __u32 new_flows_len; /* count of flows in new list */
842 __u32 old_flows_len; /* count of flows in old list */
843 __u32 ce_mark; /* packets above ce_threshold */
844 __u32 memory_usage; /* in bytes */
845 __u32 drop_overmemory;
846};
847
848struct tc_fq_codel_cl_stats {
849 __s32 deficit;
850 __u32 ldelay; /* in-queue delay seen by most recently
851 * dequeued packet
852 */
853 __u32 count;
854 __u32 lastcount;
855 __u32 dropping;
856 __s32 drop_next;
857};
858
859struct tc_fq_codel_xstats {
860 __u32 type;
861 union {
862 struct tc_fq_codel_qd_stats qdisc_stats;
863 struct tc_fq_codel_cl_stats class_stats;
864 };
865};
866
867/* FQ */
868
869enum {
870 TCA_FQ_UNSPEC,
871
872 TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
873
874 TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
875
876 TCA_FQ_QUANTUM, /* RR quantum */
877
878 TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
879
880 TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
881
882 TCA_FQ_FLOW_DEFAULT_RATE,/* obsolete, do not use */
883
884 TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
885
886 TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
887
888 TCA_FQ_FLOW_REFILL_DELAY, /* flow credit refill delay in usec */
889
890 TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
891
892 TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
893
894 TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
895
896 __TCA_FQ_MAX
897};
898
899#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
900
901struct tc_fq_qd_stats {
902 __u64 gc_flows;
903 __u64 highprio_packets;
904 __u64 tcp_retrans;
905 __u64 throttled;
906 __u64 flows_plimit;
907 __u64 pkts_too_long;
908 __u64 allocation_errors;
909 __s64 time_next_delayed_flow;
910 __u32 flows;
911 __u32 inactive_flows;
912 __u32 throttled_flows;
913 __u32 unthrottle_latency_ns;
914 __u64 ce_mark; /* packets above ce_threshold */
915};
916
917/* Heavy-Hitter Filter */
918
919enum {
920 TCA_HHF_UNSPEC,
921 TCA_HHF_BACKLOG_LIMIT,
922 TCA_HHF_QUANTUM,
923 TCA_HHF_HH_FLOWS_LIMIT,
924 TCA_HHF_RESET_TIMEOUT,
925 TCA_HHF_ADMIT_BYTES,
926 TCA_HHF_EVICT_TIMEOUT,
927 TCA_HHF_NON_HH_WEIGHT,
928 __TCA_HHF_MAX
929};
930
931#define TCA_HHF_MAX (__TCA_HHF_MAX - 1)
932
933struct tc_hhf_xstats {
934 __u32 drop_overlimit; /* number of times max qdisc packet limit
935 * was hit
936 */
937 __u32 hh_overlimit; /* number of times max heavy-hitters was hit */
938 __u32 hh_tot_count; /* number of captured heavy-hitters so far */
939 __u32 hh_cur_count; /* number of current heavy-hitters */
940};
941
942/* PIE */
943enum {
944 TCA_PIE_UNSPEC,
945 TCA_PIE_TARGET,
946 TCA_PIE_LIMIT,
947 TCA_PIE_TUPDATE,
948 TCA_PIE_ALPHA,
949 TCA_PIE_BETA,
950 TCA_PIE_ECN,
951 TCA_PIE_BYTEMODE,
952 __TCA_PIE_MAX
953};
954#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
955
956struct tc_pie_xstats {
957 __u32 prob; /* current probability */
958 __u32 delay; /* current delay in ms */
959 __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
960 __u32 packets_in; /* total number of packets enqueued */
961 __u32 dropped; /* packets dropped due to pie_action */
962 __u32 overlimit; /* dropped due to lack of space in queue */
963 __u32 maxq; /* maximum queue size */
964 __u32 ecn_mark; /* packets marked with ecn*/
965};
966
967/* CBS */
968struct tc_cbs_qopt {
969 __u8 offload;
970 __u8 _pad[3];
971 __s32 hicredit;
972 __s32 locredit;
973 __s32 idleslope;
974 __s32 sendslope;
975};
976
977enum {
978 TCA_CBS_UNSPEC,
979 TCA_CBS_PARMS,
980 __TCA_CBS_MAX,
981};
982
983#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
984
985
986/* ETF */
987struct tc_etf_qopt {
988 __s32 delta;
989 __s32 clockid;
990 __u32 flags;
991#define TC_ETF_DEADLINE_MODE_ON BIT(0)
992#define TC_ETF_OFFLOAD_ON BIT(1)
993};
994
995enum {
996 TCA_ETF_UNSPEC,
997 TCA_ETF_PARMS,
998 __TCA_ETF_MAX,
999};
1000
1001#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
1002
1003
1004/* CAKE */
1005enum {
1006 TCA_CAKE_UNSPEC,
1007 TCA_CAKE_PAD,
1008 TCA_CAKE_BASE_RATE64,
1009 TCA_CAKE_DIFFSERV_MODE,
1010 TCA_CAKE_ATM,
1011 TCA_CAKE_FLOW_MODE,
1012 TCA_CAKE_OVERHEAD,
1013 TCA_CAKE_RTT,
1014 TCA_CAKE_TARGET,
1015 TCA_CAKE_AUTORATE,
1016 TCA_CAKE_MEMORY,
1017 TCA_CAKE_NAT,
1018 TCA_CAKE_RAW,
1019 TCA_CAKE_WASH,
1020 TCA_CAKE_MPU,
1021 TCA_CAKE_INGRESS,
1022 TCA_CAKE_ACK_FILTER,
1023 TCA_CAKE_SPLIT_GSO,
1024 __TCA_CAKE_MAX
1025};
1026#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
1027
1028enum {
1029 __TCA_CAKE_STATS_INVALID,
1030 TCA_CAKE_STATS_PAD,
1031 TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
1032 TCA_CAKE_STATS_MEMORY_LIMIT,
1033 TCA_CAKE_STATS_MEMORY_USED,
1034 TCA_CAKE_STATS_AVG_NETOFF,
1035 TCA_CAKE_STATS_MIN_NETLEN,
1036 TCA_CAKE_STATS_MAX_NETLEN,
1037 TCA_CAKE_STATS_MIN_ADJLEN,
1038 TCA_CAKE_STATS_MAX_ADJLEN,
1039 TCA_CAKE_STATS_TIN_STATS,
1040 TCA_CAKE_STATS_DEFICIT,
1041 TCA_CAKE_STATS_COBALT_COUNT,
1042 TCA_CAKE_STATS_DROPPING,
1043 TCA_CAKE_STATS_DROP_NEXT_US,
1044 TCA_CAKE_STATS_P_DROP,
1045 TCA_CAKE_STATS_BLUE_TIMER_US,
1046 __TCA_CAKE_STATS_MAX
1047};
1048#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
1049
1050enum {
1051 __TCA_CAKE_TIN_STATS_INVALID,
1052 TCA_CAKE_TIN_STATS_PAD,
1053 TCA_CAKE_TIN_STATS_SENT_PACKETS,
1054 TCA_CAKE_TIN_STATS_SENT_BYTES64,
1055 TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
1056 TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
1057 TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
1058 TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
1059 TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
1060 TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
1061 TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
1062 TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
1063 TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
1064 TCA_CAKE_TIN_STATS_TARGET_US,
1065 TCA_CAKE_TIN_STATS_INTERVAL_US,
1066 TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
1067 TCA_CAKE_TIN_STATS_WAY_MISSES,
1068 TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
1069 TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
1070 TCA_CAKE_TIN_STATS_AVG_DELAY_US,
1071 TCA_CAKE_TIN_STATS_BASE_DELAY_US,
1072 TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
1073 TCA_CAKE_TIN_STATS_BULK_FLOWS,
1074 TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
1075 TCA_CAKE_TIN_STATS_MAX_SKBLEN,
1076 TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
1077 __TCA_CAKE_TIN_STATS_MAX
1078};
1079#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
1080#define TC_CAKE_MAX_TINS (8)
1081
1082enum {
1083 CAKE_FLOW_NONE = 0,
1084 CAKE_FLOW_SRC_IP,
1085 CAKE_FLOW_DST_IP,
1086 CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
1087 CAKE_FLOW_FLOWS,
1088 CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
1089 CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
1090 CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
1091 CAKE_FLOW_MAX,
1092};
1093
1094enum {
1095 CAKE_DIFFSERV_DIFFSERV3 = 0,
1096 CAKE_DIFFSERV_DIFFSERV4,
1097 CAKE_DIFFSERV_DIFFSERV8,
1098 CAKE_DIFFSERV_BESTEFFORT,
1099 CAKE_DIFFSERV_PRECEDENCE,
1100 CAKE_DIFFSERV_MAX
1101};
1102
1103enum {
1104 CAKE_ACK_NONE = 0,
1105 CAKE_ACK_FILTER,
1106 CAKE_ACK_AGGRESSIVE,
1107 CAKE_ACK_MAX
1108};
1109
1110enum {
1111 CAKE_ATM_NONE = 0,
1112 CAKE_ATM_ATM,
1113 CAKE_ATM_PTM,
1114 CAKE_ATM_MAX
1115};
1116
1117
1118/* TAPRIO */
1119enum {
1120 TC_TAPRIO_CMD_SET_GATES = 0x00,
1121 TC_TAPRIO_CMD_SET_AND_HOLD = 0x01,
1122 TC_TAPRIO_CMD_SET_AND_RELEASE = 0x02,
1123};
1124
1125enum {
1126 TCA_TAPRIO_SCHED_ENTRY_UNSPEC,
1127 TCA_TAPRIO_SCHED_ENTRY_INDEX, /* u32 */
1128 TCA_TAPRIO_SCHED_ENTRY_CMD, /* u8 */
1129 TCA_TAPRIO_SCHED_ENTRY_GATE_MASK, /* u32 */
1130 TCA_TAPRIO_SCHED_ENTRY_INTERVAL, /* u32 */
1131 __TCA_TAPRIO_SCHED_ENTRY_MAX,
1132};
1133#define TCA_TAPRIO_SCHED_ENTRY_MAX (__TCA_TAPRIO_SCHED_ENTRY_MAX - 1)
1134
1135/* The format for schedule entry list is:
1136 * [TCA_TAPRIO_SCHED_ENTRY_LIST]
1137 * [TCA_TAPRIO_SCHED_ENTRY]
1138 * [TCA_TAPRIO_SCHED_ENTRY_CMD]
1139 * [TCA_TAPRIO_SCHED_ENTRY_GATES]
1140 * [TCA_TAPRIO_SCHED_ENTRY_INTERVAL]
1141 */
1142enum {
1143 TCA_TAPRIO_SCHED_UNSPEC,
1144 TCA_TAPRIO_SCHED_ENTRY,
1145 __TCA_TAPRIO_SCHED_MAX,
1146};
1147
1148#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
1149
1150enum {
1151 TCA_TAPRIO_ATTR_UNSPEC,
1152 TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
1153 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST, /* nested of entry */
1154 TCA_TAPRIO_ATTR_SCHED_BASE_TIME, /* s64 */
1155 TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
1156 TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
1157 TCA_TAPRIO_PAD,
1158 __TCA_TAPRIO_ATTR_MAX,
1159};
1160
1161#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
1162
1163#endif
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index b17201edfa09..b4875a93363a 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -220,4 +220,12 @@ struct prctl_mm_map {
220# define PR_SPEC_DISABLE (1UL << 2) 220# define PR_SPEC_DISABLE (1UL << 2)
221# define PR_SPEC_FORCE_DISABLE (1UL << 3) 221# define PR_SPEC_FORCE_DISABLE (1UL << 3)
222 222
223/* Reset arm64 pointer authentication keys */
224#define PR_PAC_RESET_KEYS 54
225# define PR_PAC_APIAKEY (1UL << 0)
226# define PR_PAC_APIBKEY (1UL << 1)
227# define PR_PAC_APDAKEY (1UL << 2)
228# define PR_PAC_APDBKEY (1UL << 3)
229# define PR_PAC_APGAKEY (1UL << 4)
230
223#endif /* _LINUX_PRCTL_H */ 231#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index 84c3de89696a..40d028eed645 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -11,94 +11,9 @@
11 * device configuration. 11 * device configuration.
12 */ 12 */
13 13
14#include <linux/vhost_types.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/compiler.h>
16#include <linux/ioctl.h> 16#include <linux/ioctl.h>
17#include <linux/virtio_config.h>
18#include <linux/virtio_ring.h>
19
20struct vhost_vring_state {
21 unsigned int index;
22 unsigned int num;
23};
24
25struct vhost_vring_file {
26 unsigned int index;
27 int fd; /* Pass -1 to unbind from file. */
28
29};
30
31struct vhost_vring_addr {
32 unsigned int index;
33 /* Option flags. */
34 unsigned int flags;
35 /* Flag values: */
36 /* Whether log address is valid. If set enables logging. */
37#define VHOST_VRING_F_LOG 0
38
39 /* Start of array of descriptors (virtually contiguous) */
40 __u64 desc_user_addr;
41 /* Used structure address. Must be 32 bit aligned */
42 __u64 used_user_addr;
43 /* Available structure address. Must be 16 bit aligned */
44 __u64 avail_user_addr;
45 /* Logging support. */
46 /* Log writes to used structure, at offset calculated from specified
47 * address. Address must be 32 bit aligned. */
48 __u64 log_guest_addr;
49};
50
51/* no alignment requirement */
52struct vhost_iotlb_msg {
53 __u64 iova;
54 __u64 size;
55 __u64 uaddr;
56#define VHOST_ACCESS_RO 0x1
57#define VHOST_ACCESS_WO 0x2
58#define VHOST_ACCESS_RW 0x3
59 __u8 perm;
60#define VHOST_IOTLB_MISS 1
61#define VHOST_IOTLB_UPDATE 2
62#define VHOST_IOTLB_INVALIDATE 3
63#define VHOST_IOTLB_ACCESS_FAIL 4
64 __u8 type;
65};
66
67#define VHOST_IOTLB_MSG 0x1
68#define VHOST_IOTLB_MSG_V2 0x2
69
70struct vhost_msg {
71 int type;
72 union {
73 struct vhost_iotlb_msg iotlb;
74 __u8 padding[64];
75 };
76};
77
78struct vhost_msg_v2 {
79 __u32 type;
80 __u32 reserved;
81 union {
82 struct vhost_iotlb_msg iotlb;
83 __u8 padding[64];
84 };
85};
86
87struct vhost_memory_region {
88 __u64 guest_phys_addr;
89 __u64 memory_size; /* bytes */
90 __u64 userspace_addr;
91 __u64 flags_padding; /* No flags are currently specified. */
92};
93
94/* All region addresses and sizes must be 4K aligned. */
95#define VHOST_PAGE_SIZE 0x1000
96
97struct vhost_memory {
98 __u32 nregions;
99 __u32 padding;
100 struct vhost_memory_region regions[0];
101};
102 17
103/* ioctls */ 18/* ioctls */
104 19
@@ -186,31 +101,7 @@ struct vhost_memory {
186 * device. This can be used to stop the ring (e.g. for migration). */ 101 * device. This can be used to stop the ring (e.g. for migration). */
187#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file) 102#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
188 103
189/* Feature bits */ 104/* VHOST_SCSI specific defines */
190/* Log all write descriptors. Can be changed while device is active. */
191#define VHOST_F_LOG_ALL 26
192/* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */
193#define VHOST_NET_F_VIRTIO_NET_HDR 27
194
195/* VHOST_SCSI specific definitions */
196
197/*
198 * Used by QEMU userspace to ensure a consistent vhost-scsi ABI.
199 *
200 * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate +
201 * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage
202 * ABI Rev 1: January 2013. Ignore vhost_tpgt filed in struct vhost_scsi_target.
203 * All the targets under vhost_wwpn can be seen and used by guset.
204 */
205
206#define VHOST_SCSI_ABI_VERSION 1
207
208struct vhost_scsi_target {
209 int abi_version;
210 char vhost_wwpn[224]; /* TRANSPORT_IQN_LEN */
211 unsigned short vhost_tpgt;
212 unsigned short reserved;
213};
214 105
215#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) 106#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
216#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) 107#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index f81e549ddfdb..4db74758c674 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -1,2 +1,3 @@
1libbpf_version.h 1libbpf_version.h
2FEATURE-DUMP.libbpf 2FEATURE-DUMP.libbpf
3test_libbpf
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
index 056f38310722..607aae40f4ed 100644
--- a/tools/lib/bpf/README.rst
+++ b/tools/lib/bpf/README.rst
@@ -132,6 +132,20 @@ For example, if current state of ``libbpf.map`` is:
132Format of version script and ways to handle ABI changes, including 132Format of version script and ways to handle ABI changes, including
133incompatible ones, described in details in [1]. 133incompatible ones, described in details in [1].
134 134
135Stand-alone build
136=================
137
138Under https://github.com/libbpf/libbpf there is a (semi-)automated
139mirror of the mainline's version of libbpf for a stand-alone build.
140
141However, all changes to libbpf's code base must be upstreamed through
142the mainline kernel tree.
143
144License
145=======
146
147libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause.
148
135Links 149Links
136===== 150=====
137 151
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 3caaa3428774..88cbd110ae58 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -65,6 +65,17 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
65 return syscall(__NR_bpf, cmd, attr, size); 65 return syscall(__NR_bpf, cmd, attr, size);
66} 66}
67 67
68static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
69{
70 int fd;
71
72 do {
73 fd = sys_bpf(BPF_PROG_LOAD, attr, size);
74 } while (fd < 0 && errno == EAGAIN);
75
76 return fd;
77}
78
68int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 79int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
69{ 80{
70 __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0; 81 __u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
@@ -232,7 +243,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
232 memcpy(attr.prog_name, load_attr->name, 243 memcpy(attr.prog_name, load_attr->name,
233 min(name_len, BPF_OBJ_NAME_LEN - 1)); 244 min(name_len, BPF_OBJ_NAME_LEN - 1));
234 245
235 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 246 fd = sys_bpf_prog_load(&attr, sizeof(attr));
236 if (fd >= 0) 247 if (fd >= 0)
237 return fd; 248 return fd;
238 249
@@ -269,7 +280,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
269 break; 280 break;
270 } 281 }
271 282
272 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 283 fd = sys_bpf_prog_load(&attr, sizeof(attr));
273 284
274 if (fd >= 0) 285 if (fd >= 0)
275 goto done; 286 goto done;
@@ -283,7 +294,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
283 attr.log_size = log_buf_sz; 294 attr.log_size = log_buf_sz;
284 attr.log_level = 1; 295 attr.log_level = 1;
285 log_buf[0] = 0; 296 log_buf[0] = 0;
286 fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 297 fd = sys_bpf_prog_load(&attr, sizeof(attr));
287done: 298done:
288 free(finfo); 299 free(finfo);
289 free(linfo); 300 free(linfo);
@@ -328,7 +339,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
328 attr.kern_version = kern_version; 339 attr.kern_version = kern_version;
329 attr.prog_flags = prog_flags; 340 attr.prog_flags = prog_flags;
330 341
331 return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); 342 return sys_bpf_prog_load(&attr, sizeof(attr));
332} 343}
333 344
334int bpf_map_update_elem(int fd, const void *key, const void *value, 345int bpf_map_update_elem(int fd, const void *key, const void *value,
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
index 8b31c0e00ba3..d463761a58f4 100644
--- a/tools/lib/traceevent/event-parse-api.c
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -194,13 +194,13 @@ void tep_set_page_size(struct tep_handle *pevent, int _page_size)
194} 194}
195 195
196/** 196/**
197 * tep_is_file_bigendian - get if the file is in big endian order 197 * tep_file_bigendian - get if the file is in big endian order
198 * @pevent: a handle to the tep_handle 198 * @pevent: a handle to the tep_handle
199 * 199 *
200 * This returns if the file is in big endian order 200 * This returns if the file is in big endian order
201 * If @pevent is NULL, 0 is returned. 201 * If @pevent is NULL, 0 is returned.
202 */ 202 */
203int tep_is_file_bigendian(struct tep_handle *pevent) 203int tep_file_bigendian(struct tep_handle *pevent)
204{ 204{
205 if(pevent) 205 if(pevent)
206 return pevent->file_bigendian; 206 return pevent->file_bigendian;
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
index 9a092dd4a86d..35833ee32d6c 100644
--- a/tools/lib/traceevent/event-parse-local.h
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -7,7 +7,7 @@
7#ifndef _PARSE_EVENTS_INT_H 7#ifndef _PARSE_EVENTS_INT_H
8#define _PARSE_EVENTS_INT_H 8#define _PARSE_EVENTS_INT_H
9 9
10struct cmdline; 10struct tep_cmdline;
11struct cmdline_list; 11struct cmdline_list;
12struct func_map; 12struct func_map;
13struct func_list; 13struct func_list;
@@ -36,7 +36,7 @@ struct tep_handle {
36 int long_size; 36 int long_size;
37 int page_size; 37 int page_size;
38 38
39 struct cmdline *cmdlines; 39 struct tep_cmdline *cmdlines;
40 struct cmdline_list *cmdlist; 40 struct cmdline_list *cmdlist;
41 int cmdline_count; 41 int cmdline_count;
42 42
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 69a96e39f0ab..abd4fa5d3088 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -124,15 +124,15 @@ struct tep_print_arg *alloc_arg(void)
124 return calloc(1, sizeof(struct tep_print_arg)); 124 return calloc(1, sizeof(struct tep_print_arg));
125} 125}
126 126
127struct cmdline { 127struct tep_cmdline {
128 char *comm; 128 char *comm;
129 int pid; 129 int pid;
130}; 130};
131 131
132static int cmdline_cmp(const void *a, const void *b) 132static int cmdline_cmp(const void *a, const void *b)
133{ 133{
134 const struct cmdline *ca = a; 134 const struct tep_cmdline *ca = a;
135 const struct cmdline *cb = b; 135 const struct tep_cmdline *cb = b;
136 136
137 if (ca->pid < cb->pid) 137 if (ca->pid < cb->pid)
138 return -1; 138 return -1;
@@ -152,7 +152,7 @@ static int cmdline_init(struct tep_handle *pevent)
152{ 152{
153 struct cmdline_list *cmdlist = pevent->cmdlist; 153 struct cmdline_list *cmdlist = pevent->cmdlist;
154 struct cmdline_list *item; 154 struct cmdline_list *item;
155 struct cmdline *cmdlines; 155 struct tep_cmdline *cmdlines;
156 int i; 156 int i;
157 157
158 cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count); 158 cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count);
@@ -179,8 +179,8 @@ static int cmdline_init(struct tep_handle *pevent)
179 179
180static const char *find_cmdline(struct tep_handle *pevent, int pid) 180static const char *find_cmdline(struct tep_handle *pevent, int pid)
181{ 181{
182 const struct cmdline *comm; 182 const struct tep_cmdline *comm;
183 struct cmdline key; 183 struct tep_cmdline key;
184 184
185 if (!pid) 185 if (!pid)
186 return "<idle>"; 186 return "<idle>";
@@ -208,8 +208,8 @@ static const char *find_cmdline(struct tep_handle *pevent, int pid)
208 */ 208 */
209int tep_pid_is_registered(struct tep_handle *pevent, int pid) 209int tep_pid_is_registered(struct tep_handle *pevent, int pid)
210{ 210{
211 const struct cmdline *comm; 211 const struct tep_cmdline *comm;
212 struct cmdline key; 212 struct tep_cmdline key;
213 213
214 if (!pid) 214 if (!pid)
215 return 1; 215 return 1;
@@ -232,11 +232,13 @@ int tep_pid_is_registered(struct tep_handle *pevent, int pid)
232 * we must add this pid. This is much slower than when cmdlines 232 * we must add this pid. This is much slower than when cmdlines
233 * are added before the array is initialized. 233 * are added before the array is initialized.
234 */ 234 */
235static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid) 235static int add_new_comm(struct tep_handle *pevent,
236 const char *comm, int pid, bool override)
236{ 237{
237 struct cmdline *cmdlines = pevent->cmdlines; 238 struct tep_cmdline *cmdlines = pevent->cmdlines;
238 const struct cmdline *cmdline; 239 struct tep_cmdline *cmdline;
239 struct cmdline key; 240 struct tep_cmdline key;
241 char *new_comm;
240 242
241 if (!pid) 243 if (!pid)
242 return 0; 244 return 0;
@@ -247,8 +249,19 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
247 cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count, 249 cmdline = bsearch(&key, pevent->cmdlines, pevent->cmdline_count,
248 sizeof(*pevent->cmdlines), cmdline_cmp); 250 sizeof(*pevent->cmdlines), cmdline_cmp);
249 if (cmdline) { 251 if (cmdline) {
250 errno = EEXIST; 252 if (!override) {
251 return -1; 253 errno = EEXIST;
254 return -1;
255 }
256 new_comm = strdup(comm);
257 if (!new_comm) {
258 errno = ENOMEM;
259 return -1;
260 }
261 free(cmdline->comm);
262 cmdline->comm = new_comm;
263
264 return 0;
252 } 265 }
253 266
254 cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1)); 267 cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (pevent->cmdline_count + 1));
@@ -275,21 +288,13 @@ static int add_new_comm(struct tep_handle *pevent, const char *comm, int pid)
275 return 0; 288 return 0;
276} 289}
277 290
278/** 291static int _tep_register_comm(struct tep_handle *pevent,
279 * tep_register_comm - register a pid / comm mapping 292 const char *comm, int pid, bool override)
280 * @pevent: handle for the pevent
281 * @comm: the command line to register
282 * @pid: the pid to map the command line to
283 *
284 * This adds a mapping to search for command line names with
285 * a given pid. The comm is duplicated.
286 */
287int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
288{ 293{
289 struct cmdline_list *item; 294 struct cmdline_list *item;
290 295
291 if (pevent->cmdlines) 296 if (pevent->cmdlines)
292 return add_new_comm(pevent, comm, pid); 297 return add_new_comm(pevent, comm, pid, override);
293 298
294 item = malloc(sizeof(*item)); 299 item = malloc(sizeof(*item));
295 if (!item) 300 if (!item)
@@ -312,6 +317,40 @@ int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
312 return 0; 317 return 0;
313} 318}
314 319
320/**
321 * tep_register_comm - register a pid / comm mapping
322 * @pevent: handle for the pevent
323 * @comm: the command line to register
324 * @pid: the pid to map the command line to
325 *
326 * This adds a mapping to search for command line names with
327 * a given pid. The comm is duplicated. If a command with the same pid
328 * already exist, -1 is returned and errno is set to EEXIST
329 */
330int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid)
331{
332 return _tep_register_comm(pevent, comm, pid, false);
333}
334
335/**
336 * tep_override_comm - register a pid / comm mapping
337 * @pevent: handle for the pevent
338 * @comm: the command line to register
339 * @pid: the pid to map the command line to
340 *
341 * This adds a mapping to search for command line names with
342 * a given pid. The comm is duplicated. If a command with the same pid
343 * already exist, the command string is udapted with the new one
344 */
345int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid)
346{
347 if (!pevent->cmdlines && cmdline_init(pevent)) {
348 errno = ENOMEM;
349 return -1;
350 }
351 return _tep_register_comm(pevent, comm, pid, true);
352}
353
315int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock) 354int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock)
316{ 355{
317 pevent->trace_clock = strdup(trace_clock); 356 pevent->trace_clock = strdup(trace_clock);
@@ -5227,18 +5266,6 @@ int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
5227} 5266}
5228 5267
5229/** 5268/**
5230 * tep_data_event_from_type - find the event by a given type
5231 * @pevent: a handle to the pevent
5232 * @type: the type of the event.
5233 *
5234 * This returns the event form a given @type;
5235 */
5236struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type)
5237{
5238 return tep_find_event(pevent, type);
5239}
5240
5241/**
5242 * tep_data_pid - parse the PID from record 5269 * tep_data_pid - parse the PID from record
5243 * @pevent: a handle to the pevent 5270 * @pevent: a handle to the pevent
5244 * @rec: the record to parse 5271 * @rec: the record to parse
@@ -5292,8 +5319,8 @@ const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid)
5292 return comm; 5319 return comm;
5293} 5320}
5294 5321
5295static struct cmdline * 5322static struct tep_cmdline *
5296pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *next) 5323pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct tep_cmdline *next)
5297{ 5324{
5298 struct cmdline_list *cmdlist = (struct cmdline_list *)next; 5325 struct cmdline_list *cmdlist = (struct cmdline_list *)next;
5299 5326
@@ -5305,7 +5332,7 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne
5305 while (cmdlist && strcmp(cmdlist->comm, comm) != 0) 5332 while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
5306 cmdlist = cmdlist->next; 5333 cmdlist = cmdlist->next;
5307 5334
5308 return (struct cmdline *)cmdlist; 5335 return (struct tep_cmdline *)cmdlist;
5309} 5336}
5310 5337
5311/** 5338/**
@@ -5321,10 +5348,10 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne
5321 * next pid. 5348 * next pid.
5322 * Also, it does a linear search, so it may be slow. 5349 * Also, it does a linear search, so it may be slow.
5323 */ 5350 */
5324struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 5351struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
5325 struct cmdline *next) 5352 struct tep_cmdline *next)
5326{ 5353{
5327 struct cmdline *cmdline; 5354 struct tep_cmdline *cmdline;
5328 5355
5329 /* 5356 /*
5330 * If the cmdlines have not been converted yet, then use 5357 * If the cmdlines have not been converted yet, then use
@@ -5363,7 +5390,7 @@ struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *co
5363 * Returns the pid for a give cmdline. If @cmdline is NULL, then 5390 * Returns the pid for a give cmdline. If @cmdline is NULL, then
5364 * -1 is returned. 5391 * -1 is returned.
5365 */ 5392 */
5366int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline) 5393int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline)
5367{ 5394{
5368 struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline; 5395 struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
5369 5396
@@ -6593,6 +6620,12 @@ static struct tep_event *search_event(struct tep_handle *pevent, int id,
6593 * 6620 *
6594 * If @id is >= 0, then it is used to find the event. 6621 * If @id is >= 0, then it is used to find the event.
6595 * else @sys_name and @event_name are used. 6622 * else @sys_name and @event_name are used.
6623 *
6624 * Returns:
6625 * TEP_REGISTER_SUCCESS_OVERWRITE if an existing handler is overwritten
6626 * TEP_REGISTER_SUCCESS if a new handler is registered successfully
6627 * negative TEP_ERRNO_... in case of an error
6628 *
6596 */ 6629 */
6597int tep_register_event_handler(struct tep_handle *pevent, int id, 6630int tep_register_event_handler(struct tep_handle *pevent, int id,
6598 const char *sys_name, const char *event_name, 6631 const char *sys_name, const char *event_name,
@@ -6610,7 +6643,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
6610 6643
6611 event->handler = func; 6644 event->handler = func;
6612 event->context = context; 6645 event->context = context;
6613 return 0; 6646 return TEP_REGISTER_SUCCESS_OVERWRITE;
6614 6647
6615 not_found: 6648 not_found:
6616 /* Save for later use. */ 6649 /* Save for later use. */
@@ -6640,7 +6673,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
6640 pevent->handlers = handle; 6673 pevent->handlers = handle;
6641 handle->context = context; 6674 handle->context = context;
6642 6675
6643 return -1; 6676 return TEP_REGISTER_SUCCESS;
6644} 6677}
6645 6678
6646static int handle_matches(struct event_handler *handler, int id, 6679static int handle_matches(struct event_handler *handler, int id,
@@ -6723,8 +6756,10 @@ struct tep_handle *tep_alloc(void)
6723{ 6756{
6724 struct tep_handle *pevent = calloc(1, sizeof(*pevent)); 6757 struct tep_handle *pevent = calloc(1, sizeof(*pevent));
6725 6758
6726 if (pevent) 6759 if (pevent) {
6727 pevent->ref_count = 1; 6760 pevent->ref_count = 1;
6761 pevent->host_bigendian = tep_host_bigendian();
6762 }
6728 6763
6729 return pevent; 6764 return pevent;
6730} 6765}
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 35d37087d3c5..aec48f2aea8a 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -432,6 +432,7 @@ int tep_set_function_resolver(struct tep_handle *pevent,
432 tep_func_resolver_t *func, void *priv); 432 tep_func_resolver_t *func, void *priv);
433void tep_reset_function_resolver(struct tep_handle *pevent); 433void tep_reset_function_resolver(struct tep_handle *pevent);
434int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid); 434int tep_register_comm(struct tep_handle *pevent, const char *comm, int pid);
435int tep_override_comm(struct tep_handle *pevent, const char *comm, int pid);
435int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock); 436int tep_register_trace_clock(struct tep_handle *pevent, const char *trace_clock);
436int tep_register_function(struct tep_handle *pevent, char *name, 437int tep_register_function(struct tep_handle *pevent, char *name,
437 unsigned long long addr, char *mod); 438 unsigned long long addr, char *mod);
@@ -484,6 +485,11 @@ int tep_print_func_field(struct trace_seq *s, const char *fmt,
484 struct tep_event *event, const char *name, 485 struct tep_event *event, const char *name,
485 struct tep_record *record, int err); 486 struct tep_record *record, int err);
486 487
488enum tep_reg_handler {
489 TEP_REGISTER_SUCCESS = 0,
490 TEP_REGISTER_SUCCESS_OVERWRITE,
491};
492
487int tep_register_event_handler(struct tep_handle *pevent, int id, 493int tep_register_event_handler(struct tep_handle *pevent, int id,
488 const char *sys_name, const char *event_name, 494 const char *sys_name, const char *event_name,
489 tep_event_handler_func func, void *context); 495 tep_event_handler_func func, void *context);
@@ -520,15 +526,14 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
520void tep_data_lat_fmt(struct tep_handle *pevent, 526void tep_data_lat_fmt(struct tep_handle *pevent,
521 struct trace_seq *s, struct tep_record *record); 527 struct trace_seq *s, struct tep_record *record);
522int tep_data_type(struct tep_handle *pevent, struct tep_record *rec); 528int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
523struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type);
524int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec); 529int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
525int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec); 530int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
526int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec); 531int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
527const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid); 532const char *tep_data_comm_from_pid(struct tep_handle *pevent, int pid);
528struct cmdline; 533struct tep_cmdline;
529struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm, 534struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
530 struct cmdline *next); 535 struct tep_cmdline *next);
531int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline); 536int tep_cmdline_pid(struct tep_handle *pevent, struct tep_cmdline *cmdline);
532 537
533void tep_print_field(struct trace_seq *s, void *data, 538void tep_print_field(struct trace_seq *s, void *data,
534 struct tep_format_field *field); 539 struct tep_format_field *field);
@@ -553,7 +558,7 @@ int tep_get_long_size(struct tep_handle *pevent);
553void tep_set_long_size(struct tep_handle *pevent, int long_size); 558void tep_set_long_size(struct tep_handle *pevent, int long_size);
554int tep_get_page_size(struct tep_handle *pevent); 559int tep_get_page_size(struct tep_handle *pevent);
555void tep_set_page_size(struct tep_handle *pevent, int _page_size); 560void tep_set_page_size(struct tep_handle *pevent, int _page_size);
556int tep_is_file_bigendian(struct tep_handle *pevent); 561int tep_file_bigendian(struct tep_handle *pevent);
557void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian); 562void tep_set_file_bigendian(struct tep_handle *pevent, enum tep_endian endian);
558int tep_is_host_bigendian(struct tep_handle *pevent); 563int tep_is_host_bigendian(struct tep_handle *pevent);
559void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian); 564void tep_set_host_bigendian(struct tep_handle *pevent, enum tep_endian endian);
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index 754050eea467..64b9c25a1fd3 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -389,7 +389,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
389 * We can only use the structure if file is of the same 389 * We can only use the structure if file is of the same
390 * endianness. 390 * endianness.
391 */ 391 */
392 if (tep_is_file_bigendian(event->pevent) == 392 if (tep_file_bigendian(event->pevent) ==
393 tep_is_host_bigendian(event->pevent)) { 393 tep_is_host_bigendian(event->pevent)) {
394 394
395 trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s", 395 trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
index 8ff1d55954d1..8d5ecd2bf877 100644
--- a/tools/lib/traceevent/trace-seq.c
+++ b/tools/lib/traceevent/trace-seq.c
@@ -100,7 +100,8 @@ static void expand_buffer(struct trace_seq *s)
100 * @fmt: printf format string 100 * @fmt: printf format string
101 * 101 *
102 * It returns 0 if the trace oversizes the buffer's free 102 * It returns 0 if the trace oversizes the buffer's free
103 * space, 1 otherwise. 103 * space, the number of characters printed, or a negative
104 * value in case of an error.
104 * 105 *
105 * The tracer may use either sequence operations or its own 106 * The tracer may use either sequence operations or its own
106 * copy to user routines. To simplify formating of a trace 107 * copy to user routines. To simplify formating of a trace
@@ -129,9 +130,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
129 goto try_again; 130 goto try_again;
130 } 131 }
131 132
132 s->len += ret; 133 if (ret > 0)
134 s->len += ret;
133 135
134 return 1; 136 return ret;
135} 137}
136 138
137/** 139/**
@@ -139,6 +141,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
139 * @s: trace sequence descriptor 141 * @s: trace sequence descriptor
140 * @fmt: printf format string 142 * @fmt: printf format string
141 * 143 *
144 * It returns 0 if the trace oversizes the buffer's free
145 * space, the number of characters printed, or a negative
146 * value in case of an error.
147 * *
142 * The tracer may use either sequence operations or its own 148 * The tracer may use either sequence operations or its own
143 * copy to user routines. To simplify formating of a trace 149 * copy to user routines. To simplify formating of a trace
144 * trace_seq_printf is used to store strings into a special 150 * trace_seq_printf is used to store strings into a special
@@ -163,9 +169,10 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
163 goto try_again; 169 goto try_again;
164 } 170 }
165 171
166 s->len += ret; 172 if (ret > 0)
173 s->len += ret;
167 174
168 return len; 175 return ret;
169} 176}
170 177
171/** 178/**
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index ff29c3372ec3..0ee6795d82cc 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -524,12 +524,14 @@ $(arch_errno_name_array): $(arch_errno_tbl)
524 524
525all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) 525all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
526 526
527# Create python binding output directory if not already present
528_dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
529
527$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) 530$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
528 $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \ 531 $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
529 CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \ 532 CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
530 $(PYTHON_WORD) util/setup.py \ 533 $(PYTHON_WORD) util/setup.py \
531 --quiet build_ext; \ 534 --quiet build_ext; \
532 mkdir -p $(OUTPUT)python && \
533 cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/ 535 cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
534 536
535please_set_SHELL_PATH_to_a_more_modern_shell: 537please_set_SHELL_PATH_to_a_more_modern_shell:
@@ -660,12 +662,12 @@ $(OUTPUT)perf-%: %.o $(PERFLIBS)
660 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS) 662 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $(LDFLAGS) $(filter %.o,$^) $(LIBS)
661 663
662ifndef NO_PERF_READ_VDSO32 664ifndef NO_PERF_READ_VDSO32
663$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-vdso-map.c 665$(OUTPUT)perf-read-vdso32: perf-read-vdso.c util/find-map.c
664 $(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c 666 $(QUIET_CC)$(CC) -m32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
665endif 667endif
666 668
667ifndef NO_PERF_READ_VDSOX32 669ifndef NO_PERF_READ_VDSOX32
668$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c 670$(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-map.c
669 $(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c 671 $(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
670endif 672endif
671 673
diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build
index 883c57ff0c08..d9ae2733f9cc 100644
--- a/tools/perf/arch/arm/tests/Build
+++ b/tools/perf/arch/arm/tests/Build
@@ -1,4 +1,5 @@
1libperf-y += regs_load.o 1libperf-y += regs_load.o
2libperf-y += dwarf-unwind.o 2libperf-y += dwarf-unwind.o
3libperf-y += vectors-page.o
3 4
4libperf-y += arch-tests.o 5libperf-y += arch-tests.o
diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c
index 5b1543c98022..6848101a855f 100644
--- a/tools/perf/arch/arm/tests/arch-tests.c
+++ b/tools/perf/arch/arm/tests/arch-tests.c
@@ -11,6 +11,10 @@ struct test arch_tests[] = {
11 }, 11 },
12#endif 12#endif
13 { 13 {
14 .desc = "Vectors page",
15 .func = test__vectors_page,
16 },
17 {
14 .func = NULL, 18 .func = NULL,
15 }, 19 },
16}; 20};
diff --git a/tools/perf/arch/arm/tests/vectors-page.c b/tools/perf/arch/arm/tests/vectors-page.c
new file mode 100644
index 000000000000..7ffdd79971c8
--- /dev/null
+++ b/tools/perf/arch/arm/tests/vectors-page.c
@@ -0,0 +1,24 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <stdio.h>
3#include <string.h>
4#include <linux/compiler.h>
5
6#include "debug.h"
7#include "tests/tests.h"
8#include "util/find-map.c"
9
10#define VECTORS__MAP_NAME "[vectors]"
11
12int test__vectors_page(struct test *test __maybe_unused,
13 int subtest __maybe_unused)
14{
15 void *start, *end;
16
17 if (find_map(&start, &end, VECTORS__MAP_NAME)) {
18 pr_err("%s not found, is CONFIG_KUSER_HELPERS enabled?\n",
19 VECTORS__MAP_NAME);
20 return TEST_FAIL;
21 }
22
23 return TEST_OK;
24}
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index a111239df182..e58d00d62f02 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -14,18 +14,25 @@ PERF_HAVE_JITDUMP := 1
14out := $(OUTPUT)arch/powerpc/include/generated/asm 14out := $(OUTPUT)arch/powerpc/include/generated/asm
15header32 := $(out)/syscalls_32.c 15header32 := $(out)/syscalls_32.c
16header64 := $(out)/syscalls_64.c 16header64 := $(out)/syscalls_64.c
17sysdef := $(srctree)/tools/arch/powerpc/include/uapi/asm/unistd.h 17syskrn := $(srctree)/arch/powerpc/kernel/syscalls/syscall.tbl
18sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls/ 18sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls
19sysdef := $(sysprf)/syscall.tbl
19systbl := $(sysprf)/mksyscalltbl 20systbl := $(sysprf)/mksyscalltbl
20 21
21# Create output directory if not already present 22# Create output directory if not already present
22_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') 23_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
23 24
24$(header64): $(sysdef) $(systbl) 25$(header64): $(sysdef) $(systbl)
25 $(Q)$(SHELL) '$(systbl)' '64' '$(CC)' $(sysdef) > $@ 26 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
27 (diff -B $(sysdef) $(syskrn) >/dev/null) \
28 || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
29 $(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@
26 30
27$(header32): $(sysdef) $(systbl) 31$(header32): $(sysdef) $(systbl)
28 $(Q)$(SHELL) '$(systbl)' '32' '$(CC)' $(sysdef) > $@ 32 @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
33 (diff -B $(sysdef) $(syskrn) >/dev/null) \
34 || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
35 $(Q)$(SHELL) '$(systbl)' '32' $(sysdef) > $@
29 36
30clean:: 37clean::
31 $(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64) 38 $(call QUIET_CLEAN, powerpc) $(RM) $(header32) $(header64)
diff --git a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
index ef52e1dd694b..6c58060aa03b 100755
--- a/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/mksyscalltbl
@@ -9,10 +9,9 @@
9# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com> 9# Changed by: Ravi Bangoria <ravi.bangoria@linux.vnet.ibm.com>
10 10
11wordsize=$1 11wordsize=$1
12gcc=$2 12SYSCALL_TBL=$2
13input=$3
14 13
15if ! test -r $input; then 14if ! test -r $SYSCALL_TBL; then
16 echo "Could not read input file" >&2 15 echo "Could not read input file" >&2
17 exit 1 16 exit 1
18fi 17fi
@@ -20,18 +19,21 @@ fi
20create_table() 19create_table()
21{ 20{
22 local wordsize=$1 21 local wordsize=$1
23 local max_nr 22 local max_nr nr abi sc discard
23 max_nr=-1
24 nr=0
24 25
25 echo "static const char *syscalltbl_powerpc_${wordsize}[] = {" 26 echo "static const char *syscalltbl_powerpc_${wordsize}[] = {"
26 while read sc nr; do 27 while read nr abi sc discard; do
27 printf '\t[%d] = "%s",\n' $nr $sc 28 if [ "$max_nr" -lt "$nr" ]; then
28 max_nr=$nr 29 printf '\t[%d] = "%s",\n' $nr $sc
30 max_nr=$nr
31 fi
29 done 32 done
30 echo '};' 33 echo '};'
31 echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr" 34 echo "#define SYSCALLTBL_POWERPC_${wordsize}_MAX_ID $max_nr"
32} 35}
33 36
34$gcc -m${wordsize} -E -dM -x c $input \ 37grep -E "^[[:digit:]]+[[:space:]]+(common|spu|nospu|${wordsize})" $SYSCALL_TBL \
35 |sed -ne 's/^#define __NR_//p' \ 38 |sort -k1 -n \
36 |sort -t' ' -k2 -nu \
37 |create_table ${wordsize} 39 |create_table ${wordsize}
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
new file mode 100644
index 000000000000..db3bbb8744af
--- /dev/null
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -0,0 +1,427 @@
1# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2#
3# system call numbers and entry vectors for powerpc
4#
5# The format is:
6# <number> <abi> <name> <entry point> <compat entry point>
7#
8# The <abi> can be common, spu, nospu, 64, or 32 for this file.
9#
100 nospu restart_syscall sys_restart_syscall
111 nospu exit sys_exit
122 nospu fork ppc_fork
133 common read sys_read
144 common write sys_write
155 common open sys_open compat_sys_open
166 common close sys_close
177 common waitpid sys_waitpid
188 common creat sys_creat
199 common link sys_link
2010 common unlink sys_unlink
2111 nospu execve sys_execve compat_sys_execve
2212 common chdir sys_chdir
2313 common time sys_time compat_sys_time
2414 common mknod sys_mknod
2515 common chmod sys_chmod
2616 common lchown sys_lchown
2717 common break sys_ni_syscall
2818 32 oldstat sys_stat sys_ni_syscall
2918 64 oldstat sys_ni_syscall
3018 spu oldstat sys_ni_syscall
3119 common lseek sys_lseek compat_sys_lseek
3220 common getpid sys_getpid
3321 nospu mount sys_mount compat_sys_mount
3422 32 umount sys_oldumount
3522 64 umount sys_ni_syscall
3622 spu umount sys_ni_syscall
3723 common setuid sys_setuid
3824 common getuid sys_getuid
3925 common stime sys_stime compat_sys_stime
4026 nospu ptrace sys_ptrace compat_sys_ptrace
4127 common alarm sys_alarm
4228 32 oldfstat sys_fstat sys_ni_syscall
4328 64 oldfstat sys_ni_syscall
4428 spu oldfstat sys_ni_syscall
4529 nospu pause sys_pause
4630 nospu utime sys_utime compat_sys_utime
4731 common stty sys_ni_syscall
4832 common gtty sys_ni_syscall
4933 common access sys_access
5034 common nice sys_nice
5135 common ftime sys_ni_syscall
5236 common sync sys_sync
5337 common kill sys_kill
5438 common rename sys_rename
5539 common mkdir sys_mkdir
5640 common rmdir sys_rmdir
5741 common dup sys_dup
5842 common pipe sys_pipe
5943 common times sys_times compat_sys_times
6044 common prof sys_ni_syscall
6145 common brk sys_brk
6246 common setgid sys_setgid
6347 common getgid sys_getgid
6448 nospu signal sys_signal
6549 common geteuid sys_geteuid
6650 common getegid sys_getegid
6751 nospu acct sys_acct
6852 nospu umount2 sys_umount
6953 common lock sys_ni_syscall
7054 common ioctl sys_ioctl compat_sys_ioctl
7155 common fcntl sys_fcntl compat_sys_fcntl
7256 common mpx sys_ni_syscall
7357 common setpgid sys_setpgid
7458 common ulimit sys_ni_syscall
7559 32 oldolduname sys_olduname
7659 64 oldolduname sys_ni_syscall
7759 spu oldolduname sys_ni_syscall
7860 common umask sys_umask
7961 common chroot sys_chroot
8062 nospu ustat sys_ustat compat_sys_ustat
8163 common dup2 sys_dup2
8264 common getppid sys_getppid
8365 common getpgrp sys_getpgrp
8466 common setsid sys_setsid
8567 32 sigaction sys_sigaction compat_sys_sigaction
8667 64 sigaction sys_ni_syscall
8767 spu sigaction sys_ni_syscall
8868 common sgetmask sys_sgetmask
8969 common ssetmask sys_ssetmask
9070 common setreuid sys_setreuid
9171 common setregid sys_setregid
9272 32 sigsuspend sys_sigsuspend
9372 64 sigsuspend sys_ni_syscall
9472 spu sigsuspend sys_ni_syscall
9573 32 sigpending sys_sigpending compat_sys_sigpending
9673 64 sigpending sys_ni_syscall
9773 spu sigpending sys_ni_syscall
9874 common sethostname sys_sethostname
9975 common setrlimit sys_setrlimit compat_sys_setrlimit
10076 32 getrlimit sys_old_getrlimit compat_sys_old_getrlimit
10176 64 getrlimit sys_ni_syscall
10276 spu getrlimit sys_ni_syscall
10377 common getrusage sys_getrusage compat_sys_getrusage
10478 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
10579 common settimeofday sys_settimeofday compat_sys_settimeofday
10680 common getgroups sys_getgroups
10781 common setgroups sys_setgroups
10882 32 select ppc_select sys_ni_syscall
10982 64 select sys_ni_syscall
11082 spu select sys_ni_syscall
11183 common symlink sys_symlink
11284 32 oldlstat sys_lstat sys_ni_syscall
11384 64 oldlstat sys_ni_syscall
11484 spu oldlstat sys_ni_syscall
11585 common readlink sys_readlink
11686 nospu uselib sys_uselib
11787 nospu swapon sys_swapon
11888 nospu reboot sys_reboot
11989 32 readdir sys_old_readdir compat_sys_old_readdir
12089 64 readdir sys_ni_syscall
12189 spu readdir sys_ni_syscall
12290 common mmap sys_mmap
12391 common munmap sys_munmap
12492 common truncate sys_truncate compat_sys_truncate
12593 common ftruncate sys_ftruncate compat_sys_ftruncate
12694 common fchmod sys_fchmod
12795 common fchown sys_fchown
12896 common getpriority sys_getpriority
12997 common setpriority sys_setpriority
13098 common profil sys_ni_syscall
13199 nospu statfs sys_statfs compat_sys_statfs
132100 nospu fstatfs sys_fstatfs compat_sys_fstatfs
133101 common ioperm sys_ni_syscall
134102 common socketcall sys_socketcall compat_sys_socketcall
135103 common syslog sys_syslog
136104 common setitimer sys_setitimer compat_sys_setitimer
137105 common getitimer sys_getitimer compat_sys_getitimer
138106 common stat sys_newstat compat_sys_newstat
139107 common lstat sys_newlstat compat_sys_newlstat
140108 common fstat sys_newfstat compat_sys_newfstat
141109 32 olduname sys_uname
142109 64 olduname sys_ni_syscall
143109 spu olduname sys_ni_syscall
144110 common iopl sys_ni_syscall
145111 common vhangup sys_vhangup
146112 common idle sys_ni_syscall
147113 common vm86 sys_ni_syscall
148114 common wait4 sys_wait4 compat_sys_wait4
149115 nospu swapoff sys_swapoff
150116 common sysinfo sys_sysinfo compat_sys_sysinfo
151117 nospu ipc sys_ipc compat_sys_ipc
152118 common fsync sys_fsync
153119 32 sigreturn sys_sigreturn compat_sys_sigreturn
154119 64 sigreturn sys_ni_syscall
155119 spu sigreturn sys_ni_syscall
156120 nospu clone ppc_clone
157121 common setdomainname sys_setdomainname
158122 common uname sys_newuname
159123 common modify_ldt sys_ni_syscall
160124 common adjtimex sys_adjtimex compat_sys_adjtimex
161125 common mprotect sys_mprotect
162126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
163126 64 sigprocmask sys_ni_syscall
164126 spu sigprocmask sys_ni_syscall
165127 common create_module sys_ni_syscall
166128 nospu init_module sys_init_module
167129 nospu delete_module sys_delete_module
168130 common get_kernel_syms sys_ni_syscall
169131 nospu quotactl sys_quotactl
170132 common getpgid sys_getpgid
171133 common fchdir sys_fchdir
172134 common bdflush sys_bdflush
173135 common sysfs sys_sysfs
174136 32 personality sys_personality ppc64_personality
175136 64 personality ppc64_personality
176136 spu personality ppc64_personality
177137 common afs_syscall sys_ni_syscall
178138 common setfsuid sys_setfsuid
179139 common setfsgid sys_setfsgid
180140 common _llseek sys_llseek
181141 common getdents sys_getdents compat_sys_getdents
182142 common _newselect sys_select compat_sys_select
183143 common flock sys_flock
184144 common msync sys_msync
185145 common readv sys_readv compat_sys_readv
186146 common writev sys_writev compat_sys_writev
187147 common getsid sys_getsid
188148 common fdatasync sys_fdatasync
189149 nospu _sysctl sys_sysctl compat_sys_sysctl
190150 common mlock sys_mlock
191151 common munlock sys_munlock
192152 common mlockall sys_mlockall
193153 common munlockall sys_munlockall
194154 common sched_setparam sys_sched_setparam
195155 common sched_getparam sys_sched_getparam
196156 common sched_setscheduler sys_sched_setscheduler
197157 common sched_getscheduler sys_sched_getscheduler
198158 common sched_yield sys_sched_yield
199159 common sched_get_priority_max sys_sched_get_priority_max
200160 common sched_get_priority_min sys_sched_get_priority_min
201161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
202162 common nanosleep sys_nanosleep compat_sys_nanosleep
203163 common mremap sys_mremap
204164 common setresuid sys_setresuid
205165 common getresuid sys_getresuid
206166 common query_module sys_ni_syscall
207167 common poll sys_poll
208168 common nfsservctl sys_ni_syscall
209169 common setresgid sys_setresgid
210170 common getresgid sys_getresgid
211171 common prctl sys_prctl
212172 nospu rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
213173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
214174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
215175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
216176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
217177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
218178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
219179 common pread64 sys_pread64 compat_sys_pread64
220180 common pwrite64 sys_pwrite64 compat_sys_pwrite64
221181 common chown sys_chown
222182 common getcwd sys_getcwd
223183 common capget sys_capget
224184 common capset sys_capset
225185 nospu sigaltstack sys_sigaltstack compat_sys_sigaltstack
226186 32 sendfile sys_sendfile compat_sys_sendfile
227186 64 sendfile sys_sendfile64
228186 spu sendfile sys_sendfile64
229187 common getpmsg sys_ni_syscall
230188 common putpmsg sys_ni_syscall
231189 nospu vfork ppc_vfork
232190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
233191 common readahead sys_readahead compat_sys_readahead
234192 32 mmap2 sys_mmap2 compat_sys_mmap2
235193 32 truncate64 sys_truncate64 compat_sys_truncate64
236194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
237195 32 stat64 sys_stat64
238196 32 lstat64 sys_lstat64
239197 32 fstat64 sys_fstat64
240198 nospu pciconfig_read sys_pciconfig_read
241199 nospu pciconfig_write sys_pciconfig_write
242200 nospu pciconfig_iobase sys_pciconfig_iobase
243201 common multiplexer sys_ni_syscall
244202 common getdents64 sys_getdents64
245203 common pivot_root sys_pivot_root
246204 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
247205 common madvise sys_madvise
248206 common mincore sys_mincore
249207 common gettid sys_gettid
250208 common tkill sys_tkill
251209 common setxattr sys_setxattr
252210 common lsetxattr sys_lsetxattr
253211 common fsetxattr sys_fsetxattr
254212 common getxattr sys_getxattr
255213 common lgetxattr sys_lgetxattr
256214 common fgetxattr sys_fgetxattr
257215 common listxattr sys_listxattr
258216 common llistxattr sys_llistxattr
259217 common flistxattr sys_flistxattr
260218 common removexattr sys_removexattr
261219 common lremovexattr sys_lremovexattr
262220 common fremovexattr sys_fremovexattr
263221 common futex sys_futex compat_sys_futex
264222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
265223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
266# 224 unused
267225 common tuxcall sys_ni_syscall
268226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64
269227 common io_setup sys_io_setup compat_sys_io_setup
270228 common io_destroy sys_io_destroy
271229 common io_getevents sys_io_getevents compat_sys_io_getevents
272230 common io_submit sys_io_submit compat_sys_io_submit
273231 common io_cancel sys_io_cancel
274232 nospu set_tid_address sys_set_tid_address
275233 common fadvise64 sys_fadvise64 ppc32_fadvise64
276234 nospu exit_group sys_exit_group
277235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
278236 common epoll_create sys_epoll_create
279237 common epoll_ctl sys_epoll_ctl
280238 common epoll_wait sys_epoll_wait
281239 common remap_file_pages sys_remap_file_pages
282240 common timer_create sys_timer_create compat_sys_timer_create
283241 common timer_settime sys_timer_settime compat_sys_timer_settime
284242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
285243 common timer_getoverrun sys_timer_getoverrun
286244 common timer_delete sys_timer_delete
287245 common clock_settime sys_clock_settime compat_sys_clock_settime
288246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
289247 common clock_getres sys_clock_getres compat_sys_clock_getres
290248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
291249 32 swapcontext ppc_swapcontext ppc32_swapcontext
292249 64 swapcontext ppc64_swapcontext
293249 spu swapcontext sys_ni_syscall
294250 common tgkill sys_tgkill
295251 common utimes sys_utimes compat_sys_utimes
296252 common statfs64 sys_statfs64 compat_sys_statfs64
297253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
298254 32 fadvise64_64 ppc_fadvise64_64
299254 spu fadvise64_64 sys_ni_syscall
300255 common rtas sys_rtas
301256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall
302256 64 sys_debug_setcontext sys_ni_syscall
303256 spu sys_debug_setcontext sys_ni_syscall
304# 257 reserved for vserver
305258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
306259 nospu mbind sys_mbind compat_sys_mbind
307260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
308261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
309262 nospu mq_open sys_mq_open compat_sys_mq_open
310263 nospu mq_unlink sys_mq_unlink
311264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
312265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
313266 nospu mq_notify sys_mq_notify compat_sys_mq_notify
314267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
315268 nospu kexec_load sys_kexec_load compat_sys_kexec_load
316269 nospu add_key sys_add_key
317270 nospu request_key sys_request_key
318271 nospu keyctl sys_keyctl compat_sys_keyctl
319272 nospu waitid sys_waitid compat_sys_waitid
320273 nospu ioprio_set sys_ioprio_set
321274 nospu ioprio_get sys_ioprio_get
322275 nospu inotify_init sys_inotify_init
323276 nospu inotify_add_watch sys_inotify_add_watch
324277 nospu inotify_rm_watch sys_inotify_rm_watch
325278 nospu spu_run sys_spu_run
326279 nospu spu_create sys_spu_create
327280 nospu pselect6 sys_pselect6 compat_sys_pselect6
328281 nospu ppoll sys_ppoll compat_sys_ppoll
329282 common unshare sys_unshare
330283 common splice sys_splice
331284 common tee sys_tee
332285 common vmsplice sys_vmsplice compat_sys_vmsplice
333286 common openat sys_openat compat_sys_openat
334287 common mkdirat sys_mkdirat
335288 common mknodat sys_mknodat
336289 common fchownat sys_fchownat
337290 common futimesat sys_futimesat compat_sys_futimesat
338291 32 fstatat64 sys_fstatat64
339291 64 newfstatat sys_newfstatat
340291 spu newfstatat sys_newfstatat
341292 common unlinkat sys_unlinkat
342293 common renameat sys_renameat
343294 common linkat sys_linkat
344295 common symlinkat sys_symlinkat
345296 common readlinkat sys_readlinkat
346297 common fchmodat sys_fchmodat
347298 common faccessat sys_faccessat
348299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
349300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
350301 common move_pages sys_move_pages compat_sys_move_pages
351302 common getcpu sys_getcpu
352303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
353304 common utimensat sys_utimensat compat_sys_utimensat
354305 common signalfd sys_signalfd compat_sys_signalfd
355306 common timerfd_create sys_timerfd_create
356307 common eventfd sys_eventfd
357308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
358309 nospu fallocate sys_fallocate compat_sys_fallocate
359310 nospu subpage_prot sys_subpage_prot
360311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
361312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
362313 common signalfd4 sys_signalfd4 compat_sys_signalfd4
363314 common eventfd2 sys_eventfd2
364315 common epoll_create1 sys_epoll_create1
365316 common dup3 sys_dup3
366317 common pipe2 sys_pipe2
367318 nospu inotify_init1 sys_inotify_init1
368319 common perf_event_open sys_perf_event_open
369320 common preadv sys_preadv compat_sys_preadv
370321 common pwritev sys_pwritev compat_sys_pwritev
371322 nospu rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
372323 nospu fanotify_init sys_fanotify_init
373324 nospu fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
374325 common prlimit64 sys_prlimit64
375326 common socket sys_socket
376327 common bind sys_bind
377328 common connect sys_connect
378329 common listen sys_listen
379330 common accept sys_accept
380331 common getsockname sys_getsockname
381332 common getpeername sys_getpeername
382333 common socketpair sys_socketpair
383334 common send sys_send
384335 common sendto sys_sendto
385336 common recv sys_recv compat_sys_recv
386337 common recvfrom sys_recvfrom compat_sys_recvfrom
387338 common shutdown sys_shutdown
388339 common setsockopt sys_setsockopt compat_sys_setsockopt
389340 common getsockopt sys_getsockopt compat_sys_getsockopt
390341 common sendmsg sys_sendmsg compat_sys_sendmsg
391342 common recvmsg sys_recvmsg compat_sys_recvmsg
392343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
393344 common accept4 sys_accept4
394345 common name_to_handle_at sys_name_to_handle_at
395346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
396347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
397348 common syncfs sys_syncfs
398349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
399350 common setns sys_setns
400351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
401352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
402353 nospu finit_module sys_finit_module
403354 nospu kcmp sys_kcmp
404355 common sched_setattr sys_sched_setattr
405356 common sched_getattr sys_sched_getattr
406357 common renameat2 sys_renameat2
407358 common seccomp sys_seccomp
408359 common getrandom sys_getrandom
409360 common memfd_create sys_memfd_create
410361 common bpf sys_bpf
411362 nospu execveat sys_execveat compat_sys_execveat
412363 32 switch_endian sys_ni_syscall
413363 64 switch_endian ppc_switch_endian
414363 spu switch_endian sys_ni_syscall
415364 common userfaultfd sys_userfaultfd
416365 common membarrier sys_membarrier
417378 nospu mlock2 sys_mlock2
418379 nospu copy_file_range sys_copy_file_range
419380 common preadv2 sys_preadv2 compat_sys_preadv2
420381 common pwritev2 sys_pwritev2 compat_sys_pwritev2
421382 nospu kexec_file_load sys_kexec_file_load
422383 nospu statx sys_statx
423384 nospu pkey_alloc sys_pkey_alloc
424385 nospu pkey_free sys_pkey_free
425386 nospu pkey_mprotect sys_pkey_mprotect
426387 nospu rseq sys_rseq
427388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h
index 1076393e6f43..e18a3556f5e3 100644
--- a/tools/perf/arch/powerpc/include/perf_regs.h
+++ b/tools/perf/arch/powerpc/include/perf_regs.h
@@ -63,7 +63,8 @@ static const char *reg_names[] = {
63 [PERF_REG_POWERPC_TRAP] = "trap", 63 [PERF_REG_POWERPC_TRAP] = "trap",
64 [PERF_REG_POWERPC_DAR] = "dar", 64 [PERF_REG_POWERPC_DAR] = "dar",
65 [PERF_REG_POWERPC_DSISR] = "dsisr", 65 [PERF_REG_POWERPC_DSISR] = "dsisr",
66 [PERF_REG_POWERPC_SIER] = "sier" 66 [PERF_REG_POWERPC_SIER] = "sier",
67 [PERF_REG_POWERPC_MMCRA] = "mmcra"
67}; 68};
68 69
69static inline const char *perf_reg_name(int id) 70static inline const char *perf_reg_name(int id)
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index 07fcd977d93e..34d5134681d9 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -53,6 +53,7 @@ const struct sample_reg sample_reg_masks[] = {
53 SMPL_REG(dar, PERF_REG_POWERPC_DAR), 53 SMPL_REG(dar, PERF_REG_POWERPC_DAR),
54 SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), 54 SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
55 SMPL_REG(sier, PERF_REG_POWERPC_SIER), 55 SMPL_REG(sier, PERF_REG_POWERPC_SIER),
56 SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA),
56 SMPL_REG_END 57 SMPL_REG_END
57}; 58};
58 59
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 1410d66192f7..63a3afc7f32b 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -561,7 +561,8 @@ try_again:
561 break; 561 break;
562 } 562 }
563 } 563 }
564 wait4(child_pid, &status, 0, &stat_config.ru_data); 564 if (child_pid != -1)
565 wait4(child_pid, &status, 0, &stat_config.ru_data);
565 566
566 if (workload_exec_errno) { 567 if (workload_exec_errno) {
567 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); 568 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fe3ecfb2e64b..f64e312db787 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1028,12 +1028,7 @@ out_err:
1028 1028
1029static int callchain_param__setup_sample_type(struct callchain_param *callchain) 1029static int callchain_param__setup_sample_type(struct callchain_param *callchain)
1030{ 1030{
1031 if (!perf_hpp_list.sym) { 1031 if (callchain->mode != CHAIN_NONE) {
1032 if (callchain->enabled) {
1033 ui__error("Selected -g but \"sym\" not present in --sort/-s.");
1034 return -EINVAL;
1035 }
1036 } else if (callchain->mode != CHAIN_NONE) {
1037 if (callchain_register_param(callchain) < 0) { 1032 if (callchain_register_param(callchain) < 0) {
1038 ui__error("Can't register callchain params.\n"); 1033 ui__error("Can't register callchain params.\n");
1039 return -EINVAL; 1034 return -EINVAL;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index adbf28183560..ed4583128b9c 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1758,6 +1758,7 @@ static int trace__printf_interrupted_entry(struct trace *trace)
1758{ 1758{
1759 struct thread_trace *ttrace; 1759 struct thread_trace *ttrace;
1760 size_t printed; 1760 size_t printed;
1761 int len;
1761 1762
1762 if (trace->failure_only || trace->current == NULL) 1763 if (trace->failure_only || trace->current == NULL)
1763 return 0; 1764 return 0;
@@ -1768,9 +1769,14 @@ static int trace__printf_interrupted_entry(struct trace *trace)
1768 return 0; 1769 return 0;
1769 1770
1770 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output); 1771 printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
1771 printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str); 1772 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
1772 ttrace->entry_pending = false; 1773
1774 if (len < trace->args_alignment - 4)
1775 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
1773 1776
1777 printed += fprintf(trace->output, " ...\n");
1778
1779 ttrace->entry_pending = false;
1774 ++trace->nr_events_printed; 1780 ++trace->nr_events_printed;
1775 1781
1776 return printed; 1782 return printed;
@@ -2026,9 +2032,10 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
2026 if (ttrace->entry_pending) { 2032 if (ttrace->entry_pending) {
2027 printed = fprintf(trace->output, "%s", ttrace->entry_str); 2033 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2028 } else { 2034 } else {
2029 fprintf(trace->output, " ... ["); 2035 printed += fprintf(trace->output, " ... [");
2030 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued"); 2036 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2031 fprintf(trace->output, "]: %s()", sc->name); 2037 printed += 9;
2038 printed += fprintf(trace->output, "]: %s()", sc->name);
2032 } 2039 }
2033 2040
2034 printed++; /* the closing ')' */ 2041 printed++; /* the closing ')' */
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 6cb98f8570a2..7b55613924de 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -10,6 +10,7 @@ include/uapi/linux/fs.h
10include/uapi/linux/kcmp.h 10include/uapi/linux/kcmp.h
11include/uapi/linux/kvm.h 11include/uapi/linux/kvm.h
12include/uapi/linux/in.h 12include/uapi/linux/in.h
13include/uapi/linux/mount.h
13include/uapi/linux/perf_event.h 14include/uapi/linux/perf_event.h
14include/uapi/linux/prctl.h 15include/uapi/linux/prctl.h
15include/uapi/linux/sched.h 16include/uapi/linux/sched.h
@@ -49,7 +50,6 @@ arch/parisc/include/uapi/asm/errno.h
49arch/powerpc/include/uapi/asm/errno.h 50arch/powerpc/include/uapi/asm/errno.h
50arch/sparc/include/uapi/asm/errno.h 51arch/sparc/include/uapi/asm/errno.h
51arch/x86/include/uapi/asm/errno.h 52arch/x86/include/uapi/asm/errno.h
52arch/powerpc/include/uapi/asm/unistd.h
53include/asm-generic/bitops/arch_hweight.h 53include/asm-generic/bitops/arch_hweight.h
54include/asm-generic/bitops/const_hweight.h 54include/asm-generic/bitops/const_hweight.h
55include/asm-generic/bitops/__fls.h 55include/asm-generic/bitops/__fls.h
diff --git a/tools/perf/perf-read-vdso.c b/tools/perf/perf-read-vdso.c
index 8c0ca0cc428f..aaa5210ea84a 100644
--- a/tools/perf/perf-read-vdso.c
+++ b/tools/perf/perf-read-vdso.c
@@ -5,17 +5,17 @@
5#define VDSO__MAP_NAME "[vdso]" 5#define VDSO__MAP_NAME "[vdso]"
6 6
7/* 7/*
8 * Include definition of find_vdso_map() also used in util/vdso.c for 8 * Include definition of find_map() also used in util/vdso.c for
9 * building perf. 9 * building perf.
10 */ 10 */
11#include "util/find-vdso-map.c" 11#include "util/find-map.c"
12 12
13int main(void) 13int main(void)
14{ 14{
15 void *start, *end; 15 void *start, *end;
16 size_t size, written; 16 size_t size, written;
17 17
18 if (find_vdso_map(&start, &end)) 18 if (find_map(&start, &end, VDSO__MAP_NAME))
19 return 1; 19 return 1;
20 20
21 size = end - start; 21 size = end - start;
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 1c16e56cd93e..7cb99b433888 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -13,7 +13,8 @@ add_probe_vfs_getname() {
13 local verbose=$1 13 local verbose=$1
14 if [ $had_vfs_getname -eq 1 ] ; then 14 if [ $had_vfs_getname -eq 1 ] ; then
15 line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') 15 line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
16 perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string" 16 perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
17 perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
17 fi 18 fi
18} 19}
19 20
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index b82f55fcc294..399f18ca71a3 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -119,4 +119,9 @@ int test__arch_unwind_sample(struct perf_sample *sample,
119 struct thread *thread); 119 struct thread *thread);
120#endif 120#endif
121#endif 121#endif
122
123#if defined(__arm__)
124int test__vectors_page(struct test *test, int subtest);
125#endif
126
122#endif /* TESTS_H */ 127#endif /* TESTS_H */
diff --git a/tools/perf/trace/beauty/mount_flags.sh b/tools/perf/trace/beauty/mount_flags.sh
index 45547573a1db..847850b2ef6c 100755
--- a/tools/perf/trace/beauty/mount_flags.sh
+++ b/tools/perf/trace/beauty/mount_flags.sh
@@ -5,11 +5,11 @@
5 5
6printf "static const char *mount_flags[] = {\n" 6printf "static const char *mount_flags[] = {\n"
7regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*' 7regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
8egrep $regex ${header_dir}/fs.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \ 8egrep $regex ${header_dir}/mount.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \
9 sed -r "s/$regex/\2 \2 \1/g" | sort -n | \ 9 sed -r "s/$regex/\2 \2 \1/g" | sort -n | \
10 xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n" 10 xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n"
11regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*' 11regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*'
12egrep $regex ${header_dir}/fs.h | \ 12egrep $regex ${header_dir}/mount.h | \
13 sed -r "s/$regex/\2 \1/g" | \ 13 sed -r "s/$regex/\2 \1/g" | \
14 xargs printf "\t[%s + 1] = \"%s\",\n" 14 xargs printf "\t[%s + 1] = \"%s\",\n"
15printf "};\n" 15printf "};\n"
diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh
index d32f8f1124af..3109d7b05e11 100755
--- a/tools/perf/trace/beauty/prctl_option.sh
+++ b/tools/perf/trace/beauty/prctl_option.sh
@@ -4,7 +4,7 @@
4[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ 4[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
5 5
6printf "static const char *prctl_options[] = {\n" 6printf "static const char *prctl_options[] = {\n"
7regex='^#define[[:space:]]+PR_([GS]ET\w+)[[:space:]]*([[:xdigit:]]+).*' 7regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*'
8egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \ 8egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
9 sed -r "s/$regex/\2 \1/g" | \ 9 sed -r "s/$regex/\2 \1/g" | \
10 sort -n | xargs printf "\t[%s] = \"%s\",\n" 10 sort -n | xargs printf "\t[%s] = \"%s\",\n"
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ac9805e0bc76..70de8f6b3aee 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1723,15 +1723,14 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1723 err = asprintf(&command, 1723 err = asprintf(&command,
1724 "%s %s%s --start-address=0x%016" PRIx64 1724 "%s %s%s --start-address=0x%016" PRIx64
1725 " --stop-address=0x%016" PRIx64 1725 " --stop-address=0x%016" PRIx64
1726 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", 1726 " -l -d %s %s -C \"$1\" 2>/dev/null|grep -v \"$1:\"|expand",
1727 opts->objdump_path ?: "objdump", 1727 opts->objdump_path ?: "objdump",
1728 opts->disassembler_style ? "-M " : "", 1728 opts->disassembler_style ? "-M " : "",
1729 opts->disassembler_style ?: "", 1729 opts->disassembler_style ?: "",
1730 map__rip_2objdump(map, sym->start), 1730 map__rip_2objdump(map, sym->start),
1731 map__rip_2objdump(map, sym->end), 1731 map__rip_2objdump(map, sym->end),
1732 opts->show_asm_raw ? "" : "--no-show-raw", 1732 opts->show_asm_raw ? "" : "--no-show-raw",
1733 opts->annotate_src ? "-S" : "", 1733 opts->annotate_src ? "-S" : "");
1734 symfs_filename, symfs_filename);
1735 1734
1736 if (err < 0) { 1735 if (err < 0) {
1737 pr_err("Failure allocating memory for the command to run\n"); 1736 pr_err("Failure allocating memory for the command to run\n");
@@ -1756,7 +1755,8 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
1756 close(stdout_fd[0]); 1755 close(stdout_fd[0]);
1757 dup2(stdout_fd[1], 1); 1756 dup2(stdout_fd[1], 1);
1758 close(stdout_fd[1]); 1757 close(stdout_fd[1]);
1759 execl("/bin/sh", "sh", "-c", command, NULL); 1758 execl("/bin/sh", "sh", "-c", command, "--", symfs_filename,
1759 NULL);
1760 perror(command); 1760 perror(command);
1761 exit(-1); 1761 exit(-1);
1762 } 1762 }
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 32ef7bdca1cf..dc2212e12184 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -766,6 +766,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
766 cnode->cycles_count += node->branch_flags.cycles; 766 cnode->cycles_count += node->branch_flags.cycles;
767 cnode->iter_count += node->nr_loop_iter; 767 cnode->iter_count += node->nr_loop_iter;
768 cnode->iter_cycles += node->iter_cycles; 768 cnode->iter_cycles += node->iter_cycles;
769 cnode->from_count++;
769 } 770 }
770 } 771 }
771 772
@@ -1345,10 +1346,10 @@ static int branch_to_str(char *bf, int bfsize,
1345static int branch_from_str(char *bf, int bfsize, 1346static int branch_from_str(char *bf, int bfsize,
1346 u64 branch_count, 1347 u64 branch_count,
1347 u64 cycles_count, u64 iter_count, 1348 u64 cycles_count, u64 iter_count,
1348 u64 iter_cycles) 1349 u64 iter_cycles, u64 from_count)
1349{ 1350{
1350 int printed = 0, i = 0; 1351 int printed = 0, i = 0;
1351 u64 cycles; 1352 u64 cycles, v = 0;
1352 1353
1353 cycles = cycles_count / branch_count; 1354 cycles = cycles_count / branch_count;
1354 if (cycles) { 1355 if (cycles) {
@@ -1357,14 +1358,16 @@ static int branch_from_str(char *bf, int bfsize,
1357 bf + printed, bfsize - printed); 1358 bf + printed, bfsize - printed);
1358 } 1359 }
1359 1360
1360 if (iter_count) { 1361 if (iter_count && from_count) {
1361 printed += count_pri64_printf(i++, "iter", 1362 v = iter_count / from_count;
1362 iter_count, 1363 if (v) {
1363 bf + printed, bfsize - printed); 1364 printed += count_pri64_printf(i++, "iter",
1365 v, bf + printed, bfsize - printed);
1364 1366
1365 printed += count_pri64_printf(i++, "avg_cycles", 1367 printed += count_pri64_printf(i++, "avg_cycles",
1366 iter_cycles / iter_count, 1368 iter_cycles / iter_count,
1367 bf + printed, bfsize - printed); 1369 bf + printed, bfsize - printed);
1370 }
1368 } 1371 }
1369 1372
1370 if (i) 1373 if (i)
@@ -1377,6 +1380,7 @@ static int counts_str_build(char *bf, int bfsize,
1377 u64 branch_count, u64 predicted_count, 1380 u64 branch_count, u64 predicted_count,
1378 u64 abort_count, u64 cycles_count, 1381 u64 abort_count, u64 cycles_count,
1379 u64 iter_count, u64 iter_cycles, 1382 u64 iter_count, u64 iter_cycles,
1383 u64 from_count,
1380 struct branch_type_stat *brtype_stat) 1384 struct branch_type_stat *brtype_stat)
1381{ 1385{
1382 int printed; 1386 int printed;
@@ -1389,7 +1393,8 @@ static int counts_str_build(char *bf, int bfsize,
1389 predicted_count, abort_count, brtype_stat); 1393 predicted_count, abort_count, brtype_stat);
1390 } else { 1394 } else {
1391 printed = branch_from_str(bf, bfsize, branch_count, 1395 printed = branch_from_str(bf, bfsize, branch_count,
1392 cycles_count, iter_count, iter_cycles); 1396 cycles_count, iter_count, iter_cycles,
1397 from_count);
1393 } 1398 }
1394 1399
1395 if (!printed) 1400 if (!printed)
@@ -1402,13 +1407,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
1402 u64 branch_count, u64 predicted_count, 1407 u64 branch_count, u64 predicted_count,
1403 u64 abort_count, u64 cycles_count, 1408 u64 abort_count, u64 cycles_count,
1404 u64 iter_count, u64 iter_cycles, 1409 u64 iter_count, u64 iter_cycles,
1410 u64 from_count,
1405 struct branch_type_stat *brtype_stat) 1411 struct branch_type_stat *brtype_stat)
1406{ 1412{
1407 char str[256]; 1413 char str[256];
1408 1414
1409 counts_str_build(str, sizeof(str), branch_count, 1415 counts_str_build(str, sizeof(str), branch_count,
1410 predicted_count, abort_count, cycles_count, 1416 predicted_count, abort_count, cycles_count,
1411 iter_count, iter_cycles, brtype_stat); 1417 iter_count, iter_cycles, from_count, brtype_stat);
1412 1418
1413 if (fp) 1419 if (fp)
1414 return fprintf(fp, "%s", str); 1420 return fprintf(fp, "%s", str);
@@ -1422,6 +1428,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
1422 u64 branch_count, predicted_count; 1428 u64 branch_count, predicted_count;
1423 u64 abort_count, cycles_count; 1429 u64 abort_count, cycles_count;
1424 u64 iter_count, iter_cycles; 1430 u64 iter_count, iter_cycles;
1431 u64 from_count;
1425 1432
1426 branch_count = clist->branch_count; 1433 branch_count = clist->branch_count;
1427 predicted_count = clist->predicted_count; 1434 predicted_count = clist->predicted_count;
@@ -1429,11 +1436,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
1429 cycles_count = clist->cycles_count; 1436 cycles_count = clist->cycles_count;
1430 iter_count = clist->iter_count; 1437 iter_count = clist->iter_count;
1431 iter_cycles = clist->iter_cycles; 1438 iter_cycles = clist->iter_cycles;
1439 from_count = clist->from_count;
1432 1440
1433 return callchain_counts_printf(fp, bf, bfsize, branch_count, 1441 return callchain_counts_printf(fp, bf, bfsize, branch_count,
1434 predicted_count, abort_count, 1442 predicted_count, abort_count,
1435 cycles_count, iter_count, iter_cycles, 1443 cycles_count, iter_count, iter_cycles,
1436 &clist->brtype_stat); 1444 from_count, &clist->brtype_stat);
1437} 1445}
1438 1446
1439static void free_callchain_node(struct callchain_node *node) 1447static void free_callchain_node(struct callchain_node *node)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 154560b1eb65..99d38ac019b8 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -118,6 +118,7 @@ struct callchain_list {
118 bool has_children; 118 bool has_children;
119 }; 119 };
120 u64 branch_count; 120 u64 branch_count;
121 u64 from_count;
121 u64 predicted_count; 122 u64 predicted_count;
122 u64 abort_count; 123 u64 abort_count;
123 u64 cycles_count; 124 u64 cycles_count;
diff --git a/tools/perf/util/find-vdso-map.c b/tools/perf/util/find-map.c
index d7823e3508fc..7b2300588ece 100644
--- a/tools/perf/util/find-vdso-map.c
+++ b/tools/perf/util/find-map.c
@@ -1,5 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2static int find_vdso_map(void **start, void **end) 2static int find_map(void **start, void **end, const char *name)
3{ 3{
4 FILE *maps; 4 FILE *maps;
5 char line[128]; 5 char line[128];
@@ -7,7 +7,7 @@ static int find_vdso_map(void **start, void **end)
7 7
8 maps = fopen("/proc/self/maps", "r"); 8 maps = fopen("/proc/self/maps", "r");
9 if (!maps) { 9 if (!maps) {
10 fprintf(stderr, "vdso: cannot open maps\n"); 10 fprintf(stderr, "cannot open maps\n");
11 return -1; 11 return -1;
12 } 12 }
13 13
@@ -21,8 +21,7 @@ static int find_vdso_map(void **start, void **end)
21 if (m < 0) 21 if (m < 0)
22 continue; 22 continue;
23 23
24 if (!strncmp(&line[m], VDSO__MAP_NAME, 24 if (!strncmp(&line[m], name, strlen(name)))
25 sizeof(VDSO__MAP_NAME) - 1))
26 found = 1; 25 found = 1;
27 } 26 }
28 27
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 6fcb3bce0442..143f7057d581 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2005,7 +2005,7 @@ static void save_iterations(struct iterations *iter,
2005{ 2005{
2006 int i; 2006 int i;
2007 2007
2008 iter->nr_loop_iter = nr; 2008 iter->nr_loop_iter++;
2009 iter->cycles = 0; 2009 iter->cycles = 0;
2010 2010
2011 for (i = 0; i < nr; i++) 2011 for (i = 0; i < nr; i++)
diff --git a/tools/perf/util/strbuf.c b/tools/perf/util/strbuf.c
index 9005fbe0780e..23092fd6451d 100644
--- a/tools/perf/util/strbuf.c
+++ b/tools/perf/util/strbuf.c
@@ -109,7 +109,6 @@ static int strbuf_addv(struct strbuf *sb, const char *fmt, va_list ap)
109 return ret; 109 return ret;
110 } 110 }
111 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved); 111 len = vsnprintf(sb->buf + sb->len, sb->alloc - sb->len, fmt, ap_saved);
112 va_end(ap_saved);
113 if (len > strbuf_avail(sb)) { 112 if (len > strbuf_avail(sb)) {
114 pr_debug("this should not happen, your vsnprintf is broken"); 113 pr_debug("this should not happen, your vsnprintf is broken");
115 va_end(ap_saved); 114 va_end(ap_saved);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 01f2c7385e38..48efad6d0f90 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -614,6 +614,7 @@ out:
614static bool symbol__is_idle(const char *name) 614static bool symbol__is_idle(const char *name)
615{ 615{
616 const char * const idle_symbols[] = { 616 const char * const idle_symbols[] = {
617 "arch_cpu_idle",
617 "cpu_idle", 618 "cpu_idle",
618 "cpu_startup_entry", 619 "cpu_startup_entry",
619 "intel_idle", 620 "intel_idle",
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index 741af209b19d..3702cba11d7d 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -18,10 +18,10 @@
18#include "debug.h" 18#include "debug.h"
19 19
20/* 20/*
21 * Include definition of find_vdso_map() also used in perf-read-vdso.c for 21 * Include definition of find_map() also used in perf-read-vdso.c for
22 * building perf-read-vdso32 and perf-read-vdsox32. 22 * building perf-read-vdso32 and perf-read-vdsox32.
23 */ 23 */
24#include "find-vdso-map.c" 24#include "find-map.c"
25 25
26#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX" 26#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX"
27 27
@@ -76,7 +76,7 @@ static char *get_file(struct vdso_file *vdso_file)
76 if (vdso_file->found) 76 if (vdso_file->found)
77 return vdso_file->temp_file_name; 77 return vdso_file->temp_file_name;
78 78
79 if (vdso_file->error || find_vdso_map(&start, &end)) 79 if (vdso_file->error || find_map(&start, &end, VDSO__MAP_NAME))
80 return NULL; 80 return NULL;
81 81
82 size = end - start; 82 size = end - start;
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 4a9785043a39..dd093bd91aa9 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -28,3 +28,4 @@ flow_dissector_load
28test_netcnt 28test_netcnt
29test_section_names 29test_section_names
30test_tcpnotify_user 30test_tcpnotify_user
31test_libbpf
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 73aa6d8f4a2f..41ab7a3668b3 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -55,7 +55,10 @@ TEST_PROGS := test_kmod.sh \
55 test_flow_dissector.sh \ 55 test_flow_dissector.sh \
56 test_xdp_vlan.sh 56 test_xdp_vlan.sh
57 57
58TEST_PROGS_EXTENDED := with_addr.sh 58TEST_PROGS_EXTENDED := with_addr.sh \
59 with_tunnels.sh \
60 tcp_client.py \
61 tcp_server.py
59 62
60# Compile but not part of 'make run_tests' 63# Compile but not part of 'make run_tests'
61TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ 64TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index cf16948aad4a..6692a40a6979 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -155,7 +155,7 @@ void cleanup_cgroup_environment(void)
155 * This function creates a cgroup under the top level workdir and returns the 155 * This function creates a cgroup under the top level workdir and returns the
156 * file descriptor. It is idempotent. 156 * file descriptor. It is idempotent.
157 * 157 *
158 * On success, it returns the file descriptor. On failure it returns 0. 158 * On success, it returns the file descriptor. On failure it returns -1.
159 * If there is a failure, it prints the error to stderr. 159 * If there is a failure, it prints the error to stderr.
160 */ 160 */
161int create_and_get_cgroup(const char *path) 161int create_and_get_cgroup(const char *path)
@@ -166,13 +166,13 @@ int create_and_get_cgroup(const char *path)
166 format_cgroup_path(cgroup_path, path); 166 format_cgroup_path(cgroup_path, path);
167 if (mkdir(cgroup_path, 0777) && errno != EEXIST) { 167 if (mkdir(cgroup_path, 0777) && errno != EEXIST) {
168 log_err("mkdiring cgroup %s .. %s", path, cgroup_path); 168 log_err("mkdiring cgroup %s .. %s", path, cgroup_path);
169 return 0; 169 return -1;
170 } 170 }
171 171
172 fd = open(cgroup_path, O_RDONLY); 172 fd = open(cgroup_path, O_RDONLY);
173 if (fd < 0) { 173 if (fd < 0) {
174 log_err("Opening Cgroup"); 174 log_err("Opening Cgroup");
175 return 0; 175 return -1;
176 } 176 }
177 177
178 return fd; 178 return fd;
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 8bcd38010582..a0bd04befe87 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -3526,6 +3526,8 @@ struct pprint_mapv {
3526 ENUM_TWO, 3526 ENUM_TWO,
3527 ENUM_THREE, 3527 ENUM_THREE,
3528 } aenum; 3528 } aenum;
3529 uint32_t ui32b;
3530 uint32_t bits2c:2;
3529}; 3531};
3530 3532
3531static struct btf_raw_test pprint_test_template[] = { 3533static struct btf_raw_test pprint_test_template[] = {
@@ -3568,7 +3570,7 @@ static struct btf_raw_test pprint_test_template[] = {
3568 BTF_ENUM_ENC(NAME_TBD, 2), 3570 BTF_ENUM_ENC(NAME_TBD, 2),
3569 BTF_ENUM_ENC(NAME_TBD, 3), 3571 BTF_ENUM_ENC(NAME_TBD, 3),
3570 /* struct pprint_mapv */ /* [16] */ 3572 /* struct pprint_mapv */ /* [16] */
3571 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 8), 32), 3573 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 10), 40),
3572 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */ 3574 BTF_MEMBER_ENC(NAME_TBD, 11, 0), /* uint32_t ui32 */
3573 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */ 3575 BTF_MEMBER_ENC(NAME_TBD, 10, 32), /* uint16_t ui16 */
3574 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */ 3576 BTF_MEMBER_ENC(NAME_TBD, 12, 64), /* int32_t si32 */
@@ -3577,9 +3579,11 @@ static struct btf_raw_test pprint_test_template[] = {
3577 BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */ 3579 BTF_MEMBER_ENC(NAME_TBD, 6, 126), /* unused_bits2b */
3578 BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */ 3580 BTF_MEMBER_ENC(0, 14, 128), /* union (anon) */
3579 BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */ 3581 BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */
3582 BTF_MEMBER_ENC(NAME_TBD, 11, 224), /* uint32_t ui32b */
3583 BTF_MEMBER_ENC(NAME_TBD, 6, 256), /* bits2c */
3580 BTF_END_RAW, 3584 BTF_END_RAW,
3581 }, 3585 },
3582 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), 3586 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
3583 .key_size = sizeof(unsigned int), 3587 .key_size = sizeof(unsigned int),
3584 .value_size = sizeof(struct pprint_mapv), 3588 .value_size = sizeof(struct pprint_mapv),
3585 .key_type_id = 3, /* unsigned int */ 3589 .key_type_id = 3, /* unsigned int */
@@ -3628,7 +3632,7 @@ static struct btf_raw_test pprint_test_template[] = {
3628 BTF_ENUM_ENC(NAME_TBD, 2), 3632 BTF_ENUM_ENC(NAME_TBD, 2),
3629 BTF_ENUM_ENC(NAME_TBD, 3), 3633 BTF_ENUM_ENC(NAME_TBD, 3),
3630 /* struct pprint_mapv */ /* [16] */ 3634 /* struct pprint_mapv */ /* [16] */
3631 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), 3635 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
3632 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ 3636 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
3633 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ 3637 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
3634 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ 3638 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3637,9 +3641,11 @@ static struct btf_raw_test pprint_test_template[] = {
3637 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */ 3641 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */
3638 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ 3642 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
3639 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ 3643 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
3644 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
3645 BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
3640 BTF_END_RAW, 3646 BTF_END_RAW,
3641 }, 3647 },
3642 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"), 3648 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c"),
3643 .key_size = sizeof(unsigned int), 3649 .key_size = sizeof(unsigned int),
3644 .value_size = sizeof(struct pprint_mapv), 3650 .value_size = sizeof(struct pprint_mapv),
3645 .key_type_id = 3, /* unsigned int */ 3651 .key_type_id = 3, /* unsigned int */
@@ -3690,7 +3696,7 @@ static struct btf_raw_test pprint_test_template[] = {
3690 BTF_ENUM_ENC(NAME_TBD, 2), 3696 BTF_ENUM_ENC(NAME_TBD, 2),
3691 BTF_ENUM_ENC(NAME_TBD, 3), 3697 BTF_ENUM_ENC(NAME_TBD, 3),
3692 /* struct pprint_mapv */ /* [16] */ 3698 /* struct pprint_mapv */ /* [16] */
3693 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32), 3699 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 10), 40),
3694 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */ 3700 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
3695 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */ 3701 BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
3696 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */ 3702 BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
@@ -3699,13 +3705,15 @@ static struct btf_raw_test pprint_test_template[] = {
3699 BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */ 3705 BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */
3700 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */ 3706 BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
3701 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */ 3707 BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
3708 BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 224)), /* uint32_t ui32b */
3709 BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 256)), /* bits2c */
3702 /* typedef unsigned int ___int */ /* [17] */ 3710 /* typedef unsigned int ___int */ /* [17] */
3703 BTF_TYPEDEF_ENC(NAME_TBD, 18), 3711 BTF_TYPEDEF_ENC(NAME_TBD, 18),
3704 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */ 3712 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */
3705 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */ 3713 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */
3706 BTF_END_RAW, 3714 BTF_END_RAW,
3707 }, 3715 },
3708 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"), 3716 BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0ui32b\0bits2c\0___int"),
3709 .key_size = sizeof(unsigned int), 3717 .key_size = sizeof(unsigned int),
3710 .value_size = sizeof(struct pprint_mapv), 3718 .value_size = sizeof(struct pprint_mapv),
3711 .key_type_id = 3, /* unsigned int */ 3719 .key_type_id = 3, /* unsigned int */
@@ -3793,6 +3801,8 @@ static void set_pprint_mapv(struct pprint_mapv *v, uint32_t i,
3793 v->unused_bits2b = 3; 3801 v->unused_bits2b = 3;
3794 v->ui64 = i; 3802 v->ui64 = i;
3795 v->aenum = i & 0x03; 3803 v->aenum = i & 0x03;
3804 v->ui32b = 4;
3805 v->bits2c = 1;
3796 v = (void *)v + rounded_value_size; 3806 v = (void *)v + rounded_value_size;
3797 } 3807 }
3798} 3808}
@@ -3955,7 +3965,8 @@ static int do_test_pprint(int test_num)
3955 3965
3956 nexpected_line = snprintf(expected_line, sizeof(expected_line), 3966 nexpected_line = snprintf(expected_line, sizeof(expected_line),
3957 "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," 3967 "%s%u: {%u,0,%d,0x%x,0x%x,0x%x,"
3958 "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s}\n", 3968 "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s,"
3969 "%u,0x%x}\n",
3959 percpu_map ? "\tcpu" : "", 3970 percpu_map ? "\tcpu" : "",
3960 percpu_map ? cpu : next_key, 3971 percpu_map ? cpu : next_key,
3961 cmapv->ui32, cmapv->si32, 3972 cmapv->ui32, cmapv->si32,
@@ -3967,7 +3978,9 @@ static int do_test_pprint(int test_num)
3967 cmapv->ui8a[2], cmapv->ui8a[3], 3978 cmapv->ui8a[2], cmapv->ui8a[3],
3968 cmapv->ui8a[4], cmapv->ui8a[5], 3979 cmapv->ui8a[4], cmapv->ui8a[5],
3969 cmapv->ui8a[6], cmapv->ui8a[7], 3980 cmapv->ui8a[6], cmapv->ui8a[7],
3970 pprint_enum_str[cmapv->aenum]); 3981 pprint_enum_str[cmapv->aenum],
3982 cmapv->ui32b,
3983 cmapv->bits2c);
3971 3984
3972 err = check_line(expected_line, nexpected_line, 3985 err = check_line(expected_line, nexpected_line,
3973 sizeof(expected_line), line); 3986 sizeof(expected_line), line);
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index f44834155f25..2fc4625c1a15 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -81,7 +81,7 @@ int main(int argc, char **argv)
81 81
82 /* Create a cgroup, get fd, and join it */ 82 /* Create a cgroup, get fd, and join it */
83 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 83 cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
84 if (!cgroup_fd) { 84 if (cgroup_fd < 0) {
85 printf("Failed to create test cgroup\n"); 85 printf("Failed to create test cgroup\n");
86 goto err; 86 goto err;
87 } 87 }
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 9c8b50bac7e0..76e4993b7c16 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -43,7 +43,7 @@ int main(int argc, char **argv)
43 43
44 /* Create a cgroup, get fd, and join it */ 44 /* Create a cgroup, get fd, and join it */
45 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 45 cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
46 if (!cgroup_fd) { 46 if (cgroup_fd < 0) {
47 printf("Failed to create test cgroup\n"); 47 printf("Failed to create test cgroup\n");
48 goto err; 48 goto err;
49 } 49 }
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
index 44ed7f29f8ab..c1da5404454a 100644
--- a/tools/testing/selftests/bpf/test_netcnt.c
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -65,7 +65,7 @@ int main(int argc, char **argv)
65 65
66 /* Create a cgroup, get fd, and join it */ 66 /* Create a cgroup, get fd, and join it */
67 cgroup_fd = create_and_get_cgroup(TEST_CGROUP); 67 cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
68 if (!cgroup_fd) { 68 if (cgroup_fd < 0) {
69 printf("Failed to create test cgroup\n"); 69 printf("Failed to create test cgroup\n");
70 goto err; 70 goto err;
71 } 71 }
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 126fc624290d..25f0083a9b2e 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -1188,7 +1188,9 @@ static void test_stacktrace_build_id(void)
1188 int i, j; 1188 int i, j;
1189 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 1189 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1190 int build_id_matches = 0; 1190 int build_id_matches = 0;
1191 int retry = 1;
1191 1192
1193retry:
1192 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 1194 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1193 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1195 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1194 goto out; 1196 goto out;
@@ -1301,6 +1303,19 @@ static void test_stacktrace_build_id(void)
1301 previous_key = key; 1303 previous_key = key;
1302 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1304 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1303 1305
1306 /* stack_map_get_build_id_offset() is racy and sometimes can return
1307 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
1308 * try it one more time.
1309 */
1310 if (build_id_matches < 1 && retry--) {
1311 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1312 close(pmu_fd);
1313 bpf_object__close(obj);
1314 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1315 __func__);
1316 goto retry;
1317 }
1318
1304 if (CHECK(build_id_matches < 1, "build id match", 1319 if (CHECK(build_id_matches < 1, "build id match",
1305 "Didn't find expected build ID from the map\n")) 1320 "Didn't find expected build ID from the map\n"))
1306 goto disable_pmu; 1321 goto disable_pmu;
@@ -1341,7 +1356,9 @@ static void test_stacktrace_build_id_nmi(void)
1341 int i, j; 1356 int i, j;
1342 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; 1357 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1343 int build_id_matches = 0; 1358 int build_id_matches = 0;
1359 int retry = 1;
1344 1360
1361retry:
1345 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd); 1362 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
1346 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) 1363 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1347 return; 1364 return;
@@ -1436,6 +1453,19 @@ static void test_stacktrace_build_id_nmi(void)
1436 previous_key = key; 1453 previous_key = key;
1437 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1454 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1438 1455
1456 /* stack_map_get_build_id_offset() is racy and sometimes can return
1457 * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
1458 * try it one more time.
1459 */
1460 if (build_id_matches < 1 && retry--) {
1461 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1462 close(pmu_fd);
1463 bpf_object__close(obj);
1464 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1465 __func__);
1466 goto retry;
1467 }
1468
1439 if (CHECK(build_id_matches < 1, "build id match", 1469 if (CHECK(build_id_matches < 1, "build id match",
1440 "Didn't find expected build ID from the map\n")) 1470 "Didn't find expected build ID from the map\n"))
1441 goto disable_pmu; 1471 goto disable_pmu;
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
index c121cc59f314..9220747c069d 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -164,7 +164,7 @@ int main(int argc, char **argv)
164 goto err; 164 goto err;
165 165
166 cgfd = create_and_get_cgroup(CGROUP_PATH); 166 cgfd = create_and_get_cgroup(CGROUP_PATH);
167 if (!cgfd) 167 if (cgfd < 0)
168 goto err; 168 goto err;
169 169
170 if (join_cgroup(CGROUP_PATH)) 170 if (join_cgroup(CGROUP_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index b8ebe2f58074..561ffb6d6433 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -458,7 +458,7 @@ int main(int argc, char **argv)
458 goto err; 458 goto err;
459 459
460 cgfd = create_and_get_cgroup(CG_PATH); 460 cgfd = create_and_get_cgroup(CG_PATH);
461 if (!cgfd) 461 if (cgfd < 0)
462 goto err; 462 goto err;
463 463
464 if (join_cgroup(CG_PATH)) 464 if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index 73b7493d4120..3f110eaaf29c 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -44,6 +44,7 @@
44#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4" 44#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
45#define SRC6_IP "::1" 45#define SRC6_IP "::1"
46#define SRC6_REWRITE_IP "::6" 46#define SRC6_REWRITE_IP "::6"
47#define WILDCARD6_IP "::"
47#define SERV6_PORT 6060 48#define SERV6_PORT 6060
48#define SERV6_REWRITE_PORT 6666 49#define SERV6_REWRITE_PORT 6666
49 50
@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
85static int bind6_prog_load(const struct sock_addr_test *test); 86static int bind6_prog_load(const struct sock_addr_test *test);
86static int connect4_prog_load(const struct sock_addr_test *test); 87static int connect4_prog_load(const struct sock_addr_test *test);
87static int connect6_prog_load(const struct sock_addr_test *test); 88static int connect6_prog_load(const struct sock_addr_test *test);
89static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
88static int sendmsg_deny_prog_load(const struct sock_addr_test *test); 90static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
89static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test); 91static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
90static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test); 92static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
91static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test); 93static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
92static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test); 94static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
93static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test); 95static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
96static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
94 97
95static struct sock_addr_test tests[] = { 98static struct sock_addr_test tests[] = {
96 /* bind */ 99 /* bind */
@@ -463,6 +466,34 @@ static struct sock_addr_test tests[] = {
463 SYSCALL_ENOTSUPP, 466 SYSCALL_ENOTSUPP,
464 }, 467 },
465 { 468 {
469 "sendmsg6: set dst IP = [::] (BSD'ism)",
470 sendmsg6_rw_wildcard_prog_load,
471 BPF_CGROUP_UDP6_SENDMSG,
472 BPF_CGROUP_UDP6_SENDMSG,
473 AF_INET6,
474 SOCK_DGRAM,
475 SERV6_IP,
476 SERV6_PORT,
477 SERV6_REWRITE_IP,
478 SERV6_REWRITE_PORT,
479 SRC6_REWRITE_IP,
480 SUCCESS,
481 },
482 {
483 "sendmsg6: preserve dst IP = [::] (BSD'ism)",
484 sendmsg_allow_prog_load,
485 BPF_CGROUP_UDP6_SENDMSG,
486 BPF_CGROUP_UDP6_SENDMSG,
487 AF_INET6,
488 SOCK_DGRAM,
489 WILDCARD6_IP,
490 SERV6_PORT,
491 SERV6_REWRITE_IP,
492 SERV6_PORT,
493 SRC6_IP,
494 SUCCESS,
495 },
496 {
466 "sendmsg6: deny call", 497 "sendmsg6: deny call",
467 sendmsg_deny_prog_load, 498 sendmsg_deny_prog_load,
468 BPF_CGROUP_UDP6_SENDMSG, 499 BPF_CGROUP_UDP6_SENDMSG,
@@ -734,16 +765,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
734 return load_path(test, CONNECT6_PROG_PATH); 765 return load_path(test, CONNECT6_PROG_PATH);
735} 766}
736 767
737static int sendmsg_deny_prog_load(const struct sock_addr_test *test) 768static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
769 int32_t rc)
738{ 770{
739 struct bpf_insn insns[] = { 771 struct bpf_insn insns[] = {
740 /* return 0 */ 772 /* return rc */
741 BPF_MOV64_IMM(BPF_REG_0, 0), 773 BPF_MOV64_IMM(BPF_REG_0, rc),
742 BPF_EXIT_INSN(), 774 BPF_EXIT_INSN(),
743 }; 775 };
744 return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn)); 776 return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
745} 777}
746 778
779static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
780{
781 return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
782}
783
784static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
785{
786 return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
787}
788
747static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test) 789static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
748{ 790{
749 struct sockaddr_in dst4_rw_addr; 791 struct sockaddr_in dst4_rw_addr;
@@ -864,6 +906,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
864 return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP); 906 return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
865} 907}
866 908
909static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
910{
911 return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
912}
913
867static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test) 914static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
868{ 915{
869 return load_path(test, SENDMSG6_PROG_PATH); 916 return load_path(test, SENDMSG6_PROG_PATH);
@@ -1395,7 +1442,7 @@ int main(int argc, char **argv)
1395 goto err; 1442 goto err;
1396 1443
1397 cgfd = create_and_get_cgroup(CG_PATH); 1444 cgfd = create_and_get_cgroup(CG_PATH);
1398 if (!cgfd) 1445 if (cgfd < 0)
1399 goto err; 1446 goto err;
1400 1447
1401 if (join_cgroup(CG_PATH)) 1448 if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index b6c2c605d8c0..fc7832ee566b 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -202,7 +202,7 @@ int main(int argc, char **argv)
202 goto err; 202 goto err;
203 203
204 cgfd = create_and_get_cgroup(CG_PATH); 204 cgfd = create_and_get_cgroup(CG_PATH);
205 if (!cgfd) 205 if (cgfd < 0)
206 goto err; 206 goto err;
207 207
208 if (join_cgroup(CG_PATH)) 208 if (join_cgroup(CG_PATH))
diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c
index e6eebda7d112..716b4e3be581 100644
--- a/tools/testing/selftests/bpf/test_tcpbpf_user.c
+++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c
@@ -103,7 +103,7 @@ int main(int argc, char **argv)
103 goto err; 103 goto err;
104 104
105 cg_fd = create_and_get_cgroup(cg_path); 105 cg_fd = create_and_get_cgroup(cg_path);
106 if (!cg_fd) 106 if (cg_fd < 0)
107 goto err; 107 goto err;
108 108
109 if (join_cgroup(cg_path)) 109 if (join_cgroup(cg_path))
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index ff3c4522aed6..4e4353711a86 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -115,7 +115,7 @@ int main(int argc, char **argv)
115 goto err; 115 goto err;
116 116
117 cg_fd = create_and_get_cgroup(cg_path); 117 cg_fd = create_and_get_cgroup(cg_path);
118 if (!cg_fd) 118 if (cg_fd < 0)
119 goto err; 119 goto err;
120 120
121 if (join_cgroup(cg_path)) 121 if (join_cgroup(cg_path))
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 10d44446e801..2fd90d456892 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -6934,6 +6934,126 @@ static struct bpf_test tests[] = {
6934 .retval = 1, 6934 .retval = 1,
6935 }, 6935 },
6936 { 6936 {
6937 "map access: mixing value pointer and scalar, 1",
6938 .insns = {
6939 // load map value pointer into r0 and r2
6940 BPF_MOV64_IMM(BPF_REG_0, 1),
6941 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
6942 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
6943 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
6944 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
6945 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6946 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6947 BPF_EXIT_INSN(),
6948 // load some number from the map into r1
6949 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6950 // depending on r1, branch:
6951 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3),
6952 // branch A
6953 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
6954 BPF_MOV64_IMM(BPF_REG_3, 0),
6955 BPF_JMP_A(2),
6956 // branch B
6957 BPF_MOV64_IMM(BPF_REG_2, 0),
6958 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
6959 // common instruction
6960 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
6961 // depending on r1, branch:
6962 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
6963 // branch A
6964 BPF_JMP_A(4),
6965 // branch B
6966 BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
6967 // verifier follows fall-through
6968 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
6969 BPF_MOV64_IMM(BPF_REG_0, 0),
6970 BPF_EXIT_INSN(),
6971 // fake-dead code; targeted from branch A to
6972 // prevent dead code sanitization
6973 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6974 BPF_MOV64_IMM(BPF_REG_0, 0),
6975 BPF_EXIT_INSN(),
6976 },
6977 .fixup_map_array_48b = { 1 },
6978 .result = ACCEPT,
6979 .result_unpriv = REJECT,
6980 .errstr_unpriv = "R2 tried to add from different pointers or scalars",
6981 .retval = 0,
6982 },
6983 {
6984 "map access: mixing value pointer and scalar, 2",
6985 .insns = {
6986 // load map value pointer into r0 and r2
6987 BPF_MOV64_IMM(BPF_REG_0, 1),
6988 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
6989 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
6990 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
6991 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
6992 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
6993 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6994 BPF_EXIT_INSN(),
6995 // load some number from the map into r1
6996 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6997 // depending on r1, branch:
6998 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
6999 // branch A
7000 BPF_MOV64_IMM(BPF_REG_2, 0),
7001 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
7002 BPF_JMP_A(2),
7003 // branch B
7004 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
7005 BPF_MOV64_IMM(BPF_REG_3, 0),
7006 // common instruction
7007 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7008 // depending on r1, branch:
7009 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
7010 // branch A
7011 BPF_JMP_A(4),
7012 // branch B
7013 BPF_MOV64_IMM(BPF_REG_0, 0x13371337),
7014 // verifier follows fall-through
7015 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2),
7016 BPF_MOV64_IMM(BPF_REG_0, 0),
7017 BPF_EXIT_INSN(),
7018 // fake-dead code; targeted from branch A to
7019 // prevent dead code sanitization
7020 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7021 BPF_MOV64_IMM(BPF_REG_0, 0),
7022 BPF_EXIT_INSN(),
7023 },
7024 .fixup_map_array_48b = { 1 },
7025 .result = ACCEPT,
7026 .result_unpriv = REJECT,
7027 .errstr_unpriv = "R2 tried to add from different maps or paths",
7028 .retval = 0,
7029 },
7030 {
7031 "sanitation: alu with different scalars",
7032 .insns = {
7033 BPF_MOV64_IMM(BPF_REG_0, 1),
7034 BPF_LD_MAP_FD(BPF_REG_ARG1, 0),
7035 BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
7036 BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16),
7037 BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0),
7038 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
7039 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7040 BPF_EXIT_INSN(),
7041 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7042 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
7043 BPF_MOV64_IMM(BPF_REG_2, 0),
7044 BPF_MOV64_IMM(BPF_REG_3, 0x100000),
7045 BPF_JMP_A(2),
7046 BPF_MOV64_IMM(BPF_REG_2, 42),
7047 BPF_MOV64_IMM(BPF_REG_3, 0x100001),
7048 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3),
7049 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
7050 BPF_EXIT_INSN(),
7051 },
7052 .fixup_map_array_48b = { 1 },
7053 .result = ACCEPT,
7054 .retval = 0x100000,
7055 },
7056 {
6937 "map access: value_ptr += known scalar, upper oob arith, test 1", 7057 "map access: value_ptr += known scalar, upper oob arith, test 1",
6938 .insns = { 7058 .insns = {
6939 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 7059 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
index 94fdbf215c14..c4cf6e6d800e 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -25,6 +25,7 @@ ALL_TESTS="
25 lag_unlink_slaves_test 25 lag_unlink_slaves_test
26 lag_dev_deletion_test 26 lag_dev_deletion_test
27 vlan_interface_uppers_test 27 vlan_interface_uppers_test
28 bridge_extern_learn_test
28 devlink_reload_test 29 devlink_reload_test
29" 30"
30NUM_NETIFS=2 31NUM_NETIFS=2
@@ -541,6 +542,25 @@ vlan_interface_uppers_test()
541 ip link del dev br0 542 ip link del dev br0
542} 543}
543 544
545bridge_extern_learn_test()
546{
547 # Test that externally learned entries added from user space are
548 # marked as offloaded
549 RET=0
550
551 ip link add name br0 type bridge
552 ip link set dev $swp1 master br0
553
554 bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn
555
556 bridge fdb show brport $swp1 | grep de:ad:be:ef:13:37 | grep -q offload
557 check_err $? "fdb entry not marked as offloaded when should"
558
559 log_test "externally learned fdb entry"
560
561 ip link del dev br0
562}
563
544devlink_reload_test() 564devlink_reload_test()
545{ 565{
546 # Test that after executing all the above configuration tests, a 566 # Test that after executing all the above configuration tests, a
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
index dcf9f4e913e0..ae6146ec5afd 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -847,6 +847,24 @@ sanitization_vlan_aware_test()
847 847
848 log_test "vlan-aware - failed enslavement to vlan-aware bridge" 848 log_test "vlan-aware - failed enslavement to vlan-aware bridge"
849 849
850 bridge vlan del vid 10 dev vxlan20
851 bridge vlan add vid 20 dev vxlan20 pvid untagged
852
853 # Test that offloading of an unsupported tunnel fails when it is
854 # triggered by addition of VLAN to a local port
855 RET=0
856
857 # TOS must be set to inherit
858 ip link set dev vxlan10 type vxlan tos 42
859
860 ip link set dev $swp1 master br0
861 bridge vlan add vid 10 dev $swp1 &> /dev/null
862 check_fail $?
863
864 log_test "vlan-aware - failed vlan addition to a local port"
865
866 ip link set dev vxlan10 type vxlan tos inherit
867
850 ip link del dev vxlan20 868 ip link del dev vxlan20
851 ip link del dev vxlan10 869 ip link del dev vxlan10
852 ip link del dev br0 870 ip link del dev br0
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
index d8313d0438b7..b90dff8d3a94 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
@@ -1,7 +1,7 @@
1#!/bin/bash 1#!/bin/bash
2# SPDX-License-Identifier: GPL-2.0 2# SPDX-License-Identifier: GPL-2.0
3 3
4ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding" 4ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
5NUM_NETIFS=4 5NUM_NETIFS=4
6CHECK_TC="yes" 6CHECK_TC="yes"
7source lib.sh 7source lib.sh
@@ -96,6 +96,51 @@ flooding()
96 flood_test $swp2 $h1 $h2 96 flood_test $swp2 $h1 $h2
97} 97}
98 98
99vlan_deletion()
100{
101 # Test that the deletion of a VLAN on a bridge port does not affect
102 # the PVID VLAN
103 log_info "Add and delete a VLAN on bridge port $swp1"
104
105 bridge vlan add vid 10 dev $swp1
106 bridge vlan del vid 10 dev $swp1
107
108 ping_ipv4
109 ping_ipv6
110}
111
112extern_learn()
113{
114 local mac=de:ad:be:ef:13:37
115 local ageing_time
116
117 # Test that externally learned FDB entries can roam, but not age out
118 RET=0
119
120 bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
121
122 bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
123 check_err $? "Did not find FDB entry when should"
124
125 # Wait for 10 seconds after the ageing time to make sure the FDB entry
126 # was not aged out
127 ageing_time=$(bridge_ageing_time_get br0)
128 sleep $((ageing_time + 10))
129
130 bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
131 check_err $? "FDB entry was aged out when should not"
132
133 $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
134
135 bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
136 check_err $? "FDB entry did not roam when should"
137
138 log_test "Externally learned FDB entry - ageing & roaming"
139
140 bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
141 bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
142}
143
99trap cleanup EXIT 144trap cleanup EXIT
100 145
101setup_prepare 146setup_prepare
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index 56cef3b1c194..bb10e33690b2 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -629,7 +629,7 @@ __test_ecn_decap()
629 RET=0 629 RET=0
630 630
631 tc filter add dev $h1 ingress pref 77 prot ip \ 631 tc filter add dev $h1 ingress pref 77 prot ip \
632 flower ip_tos $decapped_tos action pass 632 flower ip_tos $decapped_tos action drop
633 sleep 1 633 sleep 1
634 vxlan_encapped_ping_test v2 v1 192.0.2.17 \ 634 vxlan_encapped_ping_test v2 v1 192.0.2.17 \
635 $orig_inner_tos $orig_outer_tos \ 635 $orig_inner_tos $orig_outer_tos \
diff --git a/tools/testing/selftests/net/ip_defrag.c b/tools/testing/selftests/net/ip_defrag.c
index 61ae2782388e..5d56cc0838f6 100644
--- a/tools/testing/selftests/net/ip_defrag.c
+++ b/tools/testing/selftests/net/ip_defrag.c
@@ -203,6 +203,7 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
203{ 203{
204 struct ip *iphdr = (struct ip *)ip_frame; 204 struct ip *iphdr = (struct ip *)ip_frame;
205 struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame; 205 struct ip6_hdr *ip6hdr = (struct ip6_hdr *)ip_frame;
206 const bool ipv4 = !ipv6;
206 int res; 207 int res;
207 int offset; 208 int offset;
208 int frag_len; 209 int frag_len;
@@ -239,19 +240,53 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
239 iphdr->ip_sum = 0; 240 iphdr->ip_sum = 0;
240 } 241 }
241 242
243 /* Occasionally test in-order fragments. */
244 if (!cfg_overlap && (rand() % 100 < 15)) {
245 offset = 0;
246 while (offset < (UDP_HLEN + payload_len)) {
247 send_fragment(fd_raw, addr, alen, offset, ipv6);
248 offset += max_frag_len;
249 }
250 return;
251 }
252
253 /* Occasionally test IPv4 "runs" (see net/ipv4/ip_fragment.c) */
254 if (ipv4 && !cfg_overlap && (rand() % 100 < 20) &&
255 (payload_len > 9 * max_frag_len)) {
256 offset = 6 * max_frag_len;
257 while (offset < (UDP_HLEN + payload_len)) {
258 send_fragment(fd_raw, addr, alen, offset, ipv6);
259 offset += max_frag_len;
260 }
261 offset = 3 * max_frag_len;
262 while (offset < 6 * max_frag_len) {
263 send_fragment(fd_raw, addr, alen, offset, ipv6);
264 offset += max_frag_len;
265 }
266 offset = 0;
267 while (offset < 3 * max_frag_len) {
268 send_fragment(fd_raw, addr, alen, offset, ipv6);
269 offset += max_frag_len;
270 }
271 return;
272 }
273
242 /* Odd fragments. */ 274 /* Odd fragments. */
243 offset = max_frag_len; 275 offset = max_frag_len;
244 while (offset < (UDP_HLEN + payload_len)) { 276 while (offset < (UDP_HLEN + payload_len)) {
245 send_fragment(fd_raw, addr, alen, offset, ipv6); 277 send_fragment(fd_raw, addr, alen, offset, ipv6);
278 /* IPv4 ignores duplicates, so randomly send a duplicate. */
279 if (ipv4 && (1 == rand() % 100))
280 send_fragment(fd_raw, addr, alen, offset, ipv6);
246 offset += 2 * max_frag_len; 281 offset += 2 * max_frag_len;
247 } 282 }
248 283
249 if (cfg_overlap) { 284 if (cfg_overlap) {
250 /* Send an extra random fragment. */ 285 /* Send an extra random fragment. */
251 offset = rand() % (UDP_HLEN + payload_len - 1);
252 /* sendto() returns EINVAL if offset + frag_len is too small. */
253 if (ipv6) { 286 if (ipv6) {
254 struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN); 287 struct ip6_frag *fraghdr = (struct ip6_frag *)(ip_frame + IP6_HLEN);
288 /* sendto() returns EINVAL if offset + frag_len is too small. */
289 offset = rand() % (UDP_HLEN + payload_len - 1);
255 frag_len = max_frag_len + rand() % 256; 290 frag_len = max_frag_len + rand() % 256;
256 /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */ 291 /* In IPv6 if !!(frag_len % 8), the fragment is dropped. */
257 frag_len &= ~0x7; 292 frag_len &= ~0x7;
@@ -259,13 +294,29 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
259 ip6hdr->ip6_plen = htons(frag_len); 294 ip6hdr->ip6_plen = htons(frag_len);
260 frag_len += IP6_HLEN; 295 frag_len += IP6_HLEN;
261 } else { 296 } else {
262 frag_len = IP4_HLEN + UDP_HLEN + rand() % 256; 297 /* In IPv4, duplicates and some fragments completely inside
298 * previously sent fragments are dropped/ignored. So
299 * random offset and frag_len can result in a dropped
300 * fragment instead of a dropped queue/packet. So we
301 * hard-code offset and frag_len.
302 *
303 * See ade446403bfb ("net: ipv4: do not handle duplicate
304 * fragments as overlapping").
305 */
306 if (max_frag_len * 4 < payload_len || max_frag_len < 16) {
307 /* not enough payload to play with random offset and frag_len. */
308 offset = 8;
309 frag_len = IP4_HLEN + UDP_HLEN + max_frag_len;
310 } else {
311 offset = rand() % (payload_len / 2);
312 frag_len = 2 * max_frag_len + 1 + rand() % 256;
313 }
263 iphdr->ip_off = htons(offset / 8 | IP4_MF); 314 iphdr->ip_off = htons(offset / 8 | IP4_MF);
264 iphdr->ip_len = htons(frag_len); 315 iphdr->ip_len = htons(frag_len);
265 } 316 }
266 res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen); 317 res = sendto(fd_raw, ip_frame, frag_len, 0, addr, alen);
267 if (res < 0) 318 if (res < 0)
268 error(1, errno, "sendto overlap"); 319 error(1, errno, "sendto overlap: %d", frag_len);
269 if (res != frag_len) 320 if (res != frag_len)
270 error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len); 321 error(1, 0, "sendto overlap: %d vs %d", (int)res, frag_len);
271 frag_counter++; 322 frag_counter++;
@@ -275,6 +326,9 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
275 offset = 0; 326 offset = 0;
276 while (offset < (UDP_HLEN + payload_len)) { 327 while (offset < (UDP_HLEN + payload_len)) {
277 send_fragment(fd_raw, addr, alen, offset, ipv6); 328 send_fragment(fd_raw, addr, alen, offset, ipv6);
329 /* IPv4 ignores duplicates, so randomly send a duplicate. */
330 if (ipv4 && (1 == rand() % 100))
331 send_fragment(fd_raw, addr, alen, offset, ipv6);
278 offset += 2 * max_frag_len; 332 offset += 2 * max_frag_len;
279 } 333 }
280} 334}
@@ -282,7 +336,11 @@ static void send_udp_frags(int fd_raw, struct sockaddr *addr,
282static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6) 336static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
283{ 337{
284 int fd_tx_raw, fd_rx_udp; 338 int fd_tx_raw, fd_rx_udp;
285 struct timeval tv = { .tv_sec = 0, .tv_usec = 10 * 1000 }; 339 /* Frag queue timeout is set to one second in the calling script;
340 * socket timeout should be just a bit longer to avoid tests interfering
341 * with each other.
342 */
343 struct timeval tv = { .tv_sec = 1, .tv_usec = 10 };
286 int idx; 344 int idx;
287 int min_frag_len = ipv6 ? 1280 : 8; 345 int min_frag_len = ipv6 ? 1280 : 8;
288 346
@@ -308,12 +366,32 @@ static void run_test(struct sockaddr *addr, socklen_t alen, bool ipv6)
308 payload_len += (rand() % 4096)) { 366 payload_len += (rand() % 4096)) {
309 if (cfg_verbose) 367 if (cfg_verbose)
310 printf("payload_len: %d\n", payload_len); 368 printf("payload_len: %d\n", payload_len);
311 max_frag_len = min_frag_len; 369
312 do { 370 if (cfg_overlap) {
371 /* With overlaps, one send/receive pair below takes
372 * at least one second (== timeout) to run, so there
373 * is not enough test time to run a nested loop:
374 * the full overlap test takes 20-30 seconds.
375 */
376 max_frag_len = min_frag_len +
377 rand() % (1500 - FRAG_HLEN - min_frag_len);
313 send_udp_frags(fd_tx_raw, addr, alen, ipv6); 378 send_udp_frags(fd_tx_raw, addr, alen, ipv6);
314 recv_validate_udp(fd_rx_udp); 379 recv_validate_udp(fd_rx_udp);
315 max_frag_len += 8 * (rand() % 8); 380 } else {
316 } while (max_frag_len < (1500 - FRAG_HLEN) && max_frag_len <= payload_len); 381 /* Without overlaps, each packet reassembly (== one
382 * send/receive pair below) takes very little time to
383 * run, so we can easily afford more thourough testing
384 * with a nested loop: the full non-overlap test takes
385 * less than one second).
386 */
387 max_frag_len = min_frag_len;
388 do {
389 send_udp_frags(fd_tx_raw, addr, alen, ipv6);
390 recv_validate_udp(fd_rx_udp);
391 max_frag_len += 8 * (rand() % 8);
392 } while (max_frag_len < (1500 - FRAG_HLEN) &&
393 max_frag_len <= payload_len);
394 }
317 } 395 }
318 396
319 /* Cleanup. */ 397 /* Cleanup. */
diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
index f34672796044..7dd79a9efb17 100755
--- a/tools/testing/selftests/net/ip_defrag.sh
+++ b/tools/testing/selftests/net/ip_defrag.sh
@@ -11,10 +11,17 @@ readonly NETNS="ns-$(mktemp -u XXXXXX)"
11setup() { 11setup() {
12 ip netns add "${NETNS}" 12 ip netns add "${NETNS}"
13 ip -netns "${NETNS}" link set lo up 13 ip -netns "${NETNS}" link set lo up
14
14 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1 15 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_high_thresh=9000000 >/dev/null 2>&1
15 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1 16 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_low_thresh=7000000 >/dev/null 2>&1
17 ip netns exec "${NETNS}" sysctl -w net.ipv4.ipfrag_time=1 >/dev/null 2>&1
18
16 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1 19 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_high_thresh=9000000 >/dev/null 2>&1
17 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1 20 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_low_thresh=7000000 >/dev/null 2>&1
21 ip netns exec "${NETNS}" sysctl -w net.ipv6.ip6frag_time=1 >/dev/null 2>&1
22
23 # DST cache can get full with a lot of frags, with GC not keeping up with the test.
24 ip netns exec "${NETNS}" sysctl -w net.ipv6.route.max_size=65536 >/dev/null 2>&1
18} 25}
19 26
20cleanup() { 27cleanup() {
@@ -27,7 +34,6 @@ setup
27echo "ipv4 defrag" 34echo "ipv4 defrag"
28ip netns exec "${NETNS}" ./ip_defrag -4 35ip netns exec "${NETNS}" ./ip_defrag -4
29 36
30
31echo "ipv4 defrag with overlaps" 37echo "ipv4 defrag with overlaps"
32ip netns exec "${NETNS}" ./ip_defrag -4o 38ip netns exec "${NETNS}" ./ip_defrag -4o
33 39
@@ -37,3 +43,4 @@ ip netns exec "${NETNS}" ./ip_defrag -6
37echo "ipv6 defrag with overlaps" 43echo "ipv6 defrag with overlaps"
38ip netns exec "${NETNS}" ./ip_defrag -6o 44ip netns exec "${NETNS}" ./ip_defrag -6o
39 45
46echo "all tests done"
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
index 2e563d17cf0c..d1bbafb16f47 100644
--- a/tools/testing/selftests/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c
@@ -240,7 +240,7 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
240 cm->cmsg_type == IP_RECVERR) || 240 cm->cmsg_type == IP_RECVERR) ||
241 (cm->cmsg_level == SOL_IPV6 && 241 (cm->cmsg_level == SOL_IPV6 &&
242 cm->cmsg_type == IPV6_RECVERR) || 242 cm->cmsg_type == IPV6_RECVERR) ||
243 (cm->cmsg_level = SOL_PACKET && 243 (cm->cmsg_level == SOL_PACKET &&
244 cm->cmsg_type == PACKET_TX_TIMESTAMP)) { 244 cm->cmsg_type == PACKET_TX_TIMESTAMP)) {
245 serr = (void *) CMSG_DATA(cm); 245 serr = (void *) CMSG_DATA(cm);
246 if (serr->ee_errno != ENOMSG || 246 if (serr->ee_errno != ENOMSG ||
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 637ea0219617..0da3545cabdb 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -17,7 +17,7 @@
17 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2", 17 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
18 "expExitCode": "0", 18 "expExitCode": "0",
19 "verifyCmd": "$TC actions get action ife index 2", 19 "verifyCmd": "$TC actions get action ife index 2",
20 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2", 20 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
21 "matchCount": "1", 21 "matchCount": "1",
22 "teardown": [ 22 "teardown": [
23 "$TC actions flush action ife" 23 "$TC actions flush action ife"
@@ -41,7 +41,7 @@
41 "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2", 41 "cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
42 "expExitCode": "0", 42 "expExitCode": "0",
43 "verifyCmd": "$TC actions get action ife index 2", 43 "verifyCmd": "$TC actions get action ife index 2",
44 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2", 44 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
45 "matchCount": "1", 45 "matchCount": "1",
46 "teardown": [ 46 "teardown": [
47 "$TC actions flush action ife" 47 "$TC actions flush action ife"
@@ -65,7 +65,7 @@
65 "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2", 65 "cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
66 "expExitCode": "0", 66 "expExitCode": "0",
67 "verifyCmd": "$TC actions get action ife index 2", 67 "verifyCmd": "$TC actions get action ife index 2",
68 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2", 68 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
69 "matchCount": "1", 69 "matchCount": "1",
70 "teardown": [ 70 "teardown": [
71 "$TC actions flush action ife" 71 "$TC actions flush action ife"
@@ -89,7 +89,7 @@
89 "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2", 89 "cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
90 "expExitCode": "0", 90 "expExitCode": "0",
91 "verifyCmd": "$TC actions get action ife index 2", 91 "verifyCmd": "$TC actions get action ife index 2",
92 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2", 92 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
93 "matchCount": "1", 93 "matchCount": "1",
94 "teardown": [ 94 "teardown": [
95 "$TC actions flush action ife" 95 "$TC actions flush action ife"
@@ -113,7 +113,7 @@
113 "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2", 113 "cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
114 "expExitCode": "0", 114 "expExitCode": "0",
115 "verifyCmd": "$TC actions get action ife index 2", 115 "verifyCmd": "$TC actions get action ife index 2",
116 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2", 116 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
117 "matchCount": "1", 117 "matchCount": "1",
118 "teardown": [ 118 "teardown": [
119 "$TC actions flush action ife" 119 "$TC actions flush action ife"
@@ -137,7 +137,7 @@
137 "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2", 137 "cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
138 "expExitCode": "0", 138 "expExitCode": "0",
139 "verifyCmd": "$TC actions get action ife index 2", 139 "verifyCmd": "$TC actions get action ife index 2",
140 "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2", 140 "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
141 "matchCount": "1", 141 "matchCount": "1",
142 "teardown": [ 142 "teardown": [
143 "$TC actions flush action ife" 143 "$TC actions flush action ife"
@@ -161,7 +161,7 @@
161 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90", 161 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
162 "expExitCode": "0", 162 "expExitCode": "0",
163 "verifyCmd": "$TC actions get action ife index 90", 163 "verifyCmd": "$TC actions get action ife index 90",
164 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90", 164 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
165 "matchCount": "1", 165 "matchCount": "1",
166 "teardown": [ 166 "teardown": [
167 "$TC actions flush action ife" 167 "$TC actions flush action ife"
@@ -185,7 +185,7 @@
185 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90", 185 "cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
186 "expExitCode": "255", 186 "expExitCode": "255",
187 "verifyCmd": "$TC actions get action ife index 90", 187 "verifyCmd": "$TC actions get action ife index 90",
188 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90", 188 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
189 "matchCount": "0", 189 "matchCount": "0",
190 "teardown": [] 190 "teardown": []
191 }, 191 },
@@ -207,7 +207,7 @@
207 "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9", 207 "cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
208 "expExitCode": "0", 208 "expExitCode": "0",
209 "verifyCmd": "$TC actions get action ife index 9", 209 "verifyCmd": "$TC actions get action ife index 9",
210 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9", 210 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
211 "matchCount": "1", 211 "matchCount": "1",
212 "teardown": [ 212 "teardown": [
213 "$TC actions flush action ife" 213 "$TC actions flush action ife"
@@ -231,7 +231,7 @@
231 "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9", 231 "cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
232 "expExitCode": "0", 232 "expExitCode": "0",
233 "verifyCmd": "$TC actions get action ife index 9", 233 "verifyCmd": "$TC actions get action ife index 9",
234 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9", 234 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
235 "matchCount": "1", 235 "matchCount": "1",
236 "teardown": [ 236 "teardown": [
237 "$TC actions flush action ife" 237 "$TC actions flush action ife"
@@ -255,7 +255,7 @@
255 "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9", 255 "cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
256 "expExitCode": "0", 256 "expExitCode": "0",
257 "verifyCmd": "$TC actions get action ife index 9", 257 "verifyCmd": "$TC actions get action ife index 9",
258 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9", 258 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
259 "matchCount": "1", 259 "matchCount": "1",
260 "teardown": [ 260 "teardown": [
261 "$TC actions flush action ife" 261 "$TC actions flush action ife"
@@ -279,7 +279,7 @@
279 "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9", 279 "cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
280 "expExitCode": "0", 280 "expExitCode": "0",
281 "verifyCmd": "$TC actions get action ife index 9", 281 "verifyCmd": "$TC actions get action ife index 9",
282 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9", 282 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
283 "matchCount": "1", 283 "matchCount": "1",
284 "teardown": [ 284 "teardown": [
285 "$TC actions flush action ife" 285 "$TC actions flush action ife"
@@ -303,7 +303,7 @@
303 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9", 303 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
304 "expExitCode": "0", 304 "expExitCode": "0",
305 "verifyCmd": "$TC actions get action ife index 9", 305 "verifyCmd": "$TC actions get action ife index 9",
306 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9", 306 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
307 "matchCount": "1", 307 "matchCount": "1",
308 "teardown": [ 308 "teardown": [
309 "$TC actions flush action ife" 309 "$TC actions flush action ife"
@@ -327,7 +327,7 @@
327 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9", 327 "cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
328 "expExitCode": "0", 328 "expExitCode": "0",
329 "verifyCmd": "$TC actions get action ife index 9", 329 "verifyCmd": "$TC actions get action ife index 9",
330 "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9", 330 "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
331 "matchCount": "1", 331 "matchCount": "1",
332 "teardown": [ 332 "teardown": [
333 "$TC actions flush action ife" 333 "$TC actions flush action ife"
@@ -351,7 +351,7 @@
351 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99", 351 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
352 "expExitCode": "0", 352 "expExitCode": "0",
353 "verifyCmd": "$TC actions get action ife index 99", 353 "verifyCmd": "$TC actions get action ife index 99",
354 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99", 354 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
355 "matchCount": "1", 355 "matchCount": "1",
356 "teardown": [ 356 "teardown": [
357 "$TC actions flush action ife" 357 "$TC actions flush action ife"
@@ -375,7 +375,7 @@
375 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99", 375 "cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
376 "expExitCode": "255", 376 "expExitCode": "255",
377 "verifyCmd": "$TC actions get action ife index 99", 377 "verifyCmd": "$TC actions get action ife index 99",
378 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99", 378 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
379 "matchCount": "0", 379 "matchCount": "0",
380 "teardown": [] 380 "teardown": []
381 }, 381 },
@@ -397,7 +397,7 @@
397 "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1", 397 "cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
398 "expExitCode": "0", 398 "expExitCode": "0",
399 "verifyCmd": "$TC actions get action ife index 1", 399 "verifyCmd": "$TC actions get action ife index 1",
400 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1", 400 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
401 "matchCount": "1", 401 "matchCount": "1",
402 "teardown": [ 402 "teardown": [
403 "$TC actions flush action ife" 403 "$TC actions flush action ife"
@@ -421,7 +421,7 @@
421 "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1", 421 "cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
422 "expExitCode": "0", 422 "expExitCode": "0",
423 "verifyCmd": "$TC actions get action ife index 1", 423 "verifyCmd": "$TC actions get action ife index 1",
424 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1", 424 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
425 "matchCount": "1", 425 "matchCount": "1",
426 "teardown": [ 426 "teardown": [
427 "$TC actions flush action ife" 427 "$TC actions flush action ife"
@@ -445,7 +445,7 @@
445 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", 445 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
446 "expExitCode": "0", 446 "expExitCode": "0",
447 "verifyCmd": "$TC actions get action ife index 1", 447 "verifyCmd": "$TC actions get action ife index 1",
448 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", 448 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
449 "matchCount": "1", 449 "matchCount": "1",
450 "teardown": [ 450 "teardown": [
451 "$TC actions flush action ife" 451 "$TC actions flush action ife"
@@ -469,7 +469,7 @@
469 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1", 469 "cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
470 "expExitCode": "0", 470 "expExitCode": "0",
471 "verifyCmd": "$TC actions get action ife index 1", 471 "verifyCmd": "$TC actions get action ife index 1",
472 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1", 472 "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
473 "matchCount": "1", 473 "matchCount": "1",
474 "teardown": [ 474 "teardown": [
475 "$TC actions flush action ife" 475 "$TC actions flush action ife"
@@ -493,7 +493,7 @@
493 "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77", 493 "cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
494 "expExitCode": "0", 494 "expExitCode": "0",
495 "verifyCmd": "$TC actions get action ife index 77", 495 "verifyCmd": "$TC actions get action ife index 77",
496 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77", 496 "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
497 "matchCount": "1", 497 "matchCount": "1",
498 "teardown": [ 498 "teardown": [
499 "$TC actions flush action ife" 499 "$TC actions flush action ife"
@@ -517,7 +517,7 @@
517 "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77", 517 "cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
518 "expExitCode": "0", 518 "expExitCode": "0",
519 "verifyCmd": "$TC actions get action ife index 77", 519 "verifyCmd": "$TC actions get action ife index 77",
520 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77", 520 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
521 "matchCount": "1", 521 "matchCount": "1",
522 "teardown": [ 522 "teardown": [
523 "$TC actions flush action ife" 523 "$TC actions flush action ife"
@@ -541,7 +541,7 @@
541 "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77", 541 "cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
542 "expExitCode": "0", 542 "expExitCode": "0",
543 "verifyCmd": "$TC actions get action ife index 77", 543 "verifyCmd": "$TC actions get action ife index 77",
544 "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77", 544 "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
545 "matchCount": "1", 545 "matchCount": "1",
546 "teardown": [ 546 "teardown": [
547 "$TC actions flush action ife" 547 "$TC actions flush action ife"
@@ -565,7 +565,7 @@
565 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1", 565 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
566 "expExitCode": "0", 566 "expExitCode": "0",
567 "verifyCmd": "$TC actions get action ife index 1", 567 "verifyCmd": "$TC actions get action ife index 1",
568 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1", 568 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
569 "matchCount": "1", 569 "matchCount": "1",
570 "teardown": [ 570 "teardown": [
571 "$TC actions flush action ife" 571 "$TC actions flush action ife"
@@ -589,7 +589,7 @@
589 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1", 589 "cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
590 "expExitCode": "255", 590 "expExitCode": "255",
591 "verifyCmd": "$TC actions get action ife index 1", 591 "verifyCmd": "$TC actions get action ife index 1",
592 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1", 592 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
593 "matchCount": "0", 593 "matchCount": "0",
594 "teardown": [] 594 "teardown": []
595 }, 595 },
@@ -611,7 +611,7 @@
611 "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1", 611 "cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
612 "expExitCode": "0", 612 "expExitCode": "0",
613 "verifyCmd": "$TC actions get action ife index 1", 613 "verifyCmd": "$TC actions get action ife index 1",
614 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1", 614 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
615 "matchCount": "1", 615 "matchCount": "1",
616 "teardown": [ 616 "teardown": [
617 "$TC actions flush action ife" 617 "$TC actions flush action ife"
@@ -635,7 +635,7 @@
635 "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1", 635 "cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
636 "expExitCode": "0", 636 "expExitCode": "0",
637 "verifyCmd": "$TC actions get action ife index 1", 637 "verifyCmd": "$TC actions get action ife index 1",
638 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1", 638 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
639 "matchCount": "1", 639 "matchCount": "1",
640 "teardown": [ 640 "teardown": [
641 "$TC actions flush action ife" 641 "$TC actions flush action ife"
@@ -659,7 +659,7 @@
659 "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11", 659 "cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
660 "expExitCode": "0", 660 "expExitCode": "0",
661 "verifyCmd": "$TC actions get action ife index 11", 661 "verifyCmd": "$TC actions get action ife index 11",
662 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11", 662 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
663 "matchCount": "1", 663 "matchCount": "1",
664 "teardown": [ 664 "teardown": [
665 "$TC actions flush action ife" 665 "$TC actions flush action ife"
@@ -683,7 +683,7 @@
683 "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1", 683 "cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
684 "expExitCode": "0", 684 "expExitCode": "0",
685 "verifyCmd": "$TC actions get action ife index 1", 685 "verifyCmd": "$TC actions get action ife index 1",
686 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1", 686 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
687 "matchCount": "1", 687 "matchCount": "1",
688 "teardown": [ 688 "teardown": [
689 "$TC actions flush action ife" 689 "$TC actions flush action ife"
@@ -707,7 +707,7 @@
707 "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21", 707 "cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
708 "expExitCode": "0", 708 "expExitCode": "0",
709 "verifyCmd": "$TC actions get action ife index 21", 709 "verifyCmd": "$TC actions get action ife index 21",
710 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21", 710 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
711 "matchCount": "1", 711 "matchCount": "1",
712 "teardown": [ 712 "teardown": [
713 "$TC actions flush action ife" 713 "$TC actions flush action ife"
@@ -731,7 +731,7 @@
731 "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21", 731 "cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
732 "expExitCode": "0", 732 "expExitCode": "0",
733 "verifyCmd": "$TC actions get action ife index 21", 733 "verifyCmd": "$TC actions get action ife index 21",
734 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21", 734 "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
735 "matchCount": "1", 735 "matchCount": "1",
736 "teardown": [ 736 "teardown": [
737 "$TC actions flush action ife" 737 "$TC actions flush action ife"
@@ -739,7 +739,7 @@
739 }, 739 },
740 { 740 {
741 "id": "fac3", 741 "id": "fac3",
742 "name": "Create valid ife encode action with index at 32-bit maximnum", 742 "name": "Create valid ife encode action with index at 32-bit maximum",
743 "category": [ 743 "category": [
744 "actions", 744 "actions",
745 "ife" 745 "ife"
@@ -755,7 +755,7 @@
755 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295", 755 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
756 "expExitCode": "0", 756 "expExitCode": "0",
757 "verifyCmd": "$TC actions get action ife index 4294967295", 757 "verifyCmd": "$TC actions get action ife index 4294967295",
758 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295", 758 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
759 "matchCount": "1", 759 "matchCount": "1",
760 "teardown": [ 760 "teardown": [
761 "$TC actions flush action ife" 761 "$TC actions flush action ife"
@@ -779,7 +779,7 @@
779 "cmdUnderTest": "$TC actions add action ife decode pass index 1", 779 "cmdUnderTest": "$TC actions add action ife decode pass index 1",
780 "expExitCode": "0", 780 "expExitCode": "0",
781 "verifyCmd": "$TC actions get action ife index 1", 781 "verifyCmd": "$TC actions get action ife index 1",
782 "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 782 "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
783 "matchCount": "1", 783 "matchCount": "1",
784 "teardown": [ 784 "teardown": [
785 "$TC actions flush action ife" 785 "$TC actions flush action ife"
@@ -803,7 +803,7 @@
803 "cmdUnderTest": "$TC actions add action ife decode pipe index 1", 803 "cmdUnderTest": "$TC actions add action ife decode pipe index 1",
804 "expExitCode": "0", 804 "expExitCode": "0",
805 "verifyCmd": "$TC actions get action ife index 1", 805 "verifyCmd": "$TC actions get action ife index 1",
806 "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 806 "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
807 "matchCount": "1", 807 "matchCount": "1",
808 "teardown": [ 808 "teardown": [
809 "$TC actions flush action ife" 809 "$TC actions flush action ife"
@@ -827,7 +827,7 @@
827 "cmdUnderTest": "$TC actions add action ife decode continue index 1", 827 "cmdUnderTest": "$TC actions add action ife decode continue index 1",
828 "expExitCode": "0", 828 "expExitCode": "0",
829 "verifyCmd": "$TC actions get action ife index 1", 829 "verifyCmd": "$TC actions get action ife index 1",
830 "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 830 "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
831 "matchCount": "1", 831 "matchCount": "1",
832 "teardown": [ 832 "teardown": [
833 "$TC actions flush action ife" 833 "$TC actions flush action ife"
@@ -851,7 +851,7 @@
851 "cmdUnderTest": "$TC actions add action ife decode drop index 1", 851 "cmdUnderTest": "$TC actions add action ife decode drop index 1",
852 "expExitCode": "0", 852 "expExitCode": "0",
853 "verifyCmd": "$TC actions get action ife index 1", 853 "verifyCmd": "$TC actions get action ife index 1",
854 "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 854 "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
855 "matchCount": "1", 855 "matchCount": "1",
856 "teardown": [ 856 "teardown": [
857 "$TC actions flush action ife" 857 "$TC actions flush action ife"
@@ -875,7 +875,7 @@
875 "cmdUnderTest": "$TC actions add action ife decode reclassify index 1", 875 "cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
876 "expExitCode": "0", 876 "expExitCode": "0",
877 "verifyCmd": "$TC actions get action ife index 1", 877 "verifyCmd": "$TC actions get action ife index 1",
878 "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 878 "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
879 "matchCount": "1", 879 "matchCount": "1",
880 "teardown": [ 880 "teardown": [
881 "$TC actions flush action ife" 881 "$TC actions flush action ife"
@@ -899,7 +899,7 @@
899 "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1", 899 "cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
900 "expExitCode": "0", 900 "expExitCode": "0",
901 "verifyCmd": "$TC actions get action ife index 1", 901 "verifyCmd": "$TC actions get action ife index 1",
902 "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1", 902 "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
903 "matchCount": "1", 903 "matchCount": "1",
904 "teardown": [ 904 "teardown": [
905 "$TC actions flush action ife" 905 "$TC actions flush action ife"
@@ -923,7 +923,7 @@
923 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999", 923 "cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
924 "expExitCode": "255", 924 "expExitCode": "255",
925 "verifyCmd": "$TC actions get action ife index 4294967295999", 925 "verifyCmd": "$TC actions get action ife index 4294967295999",
926 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999", 926 "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
927 "matchCount": "0", 927 "matchCount": "0",
928 "teardown": [] 928 "teardown": []
929 }, 929 },
@@ -945,7 +945,7 @@
945 "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4", 945 "cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
946 "expExitCode": "255", 946 "expExitCode": "255",
947 "verifyCmd": "$TC actions get action ife index 4", 947 "verifyCmd": "$TC actions get action ife index 4",
948 "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4", 948 "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
949 "matchCount": "0", 949 "matchCount": "0",
950 "teardown": [] 950 "teardown": []
951 }, 951 },
@@ -967,7 +967,7 @@
967 "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1", 967 "cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
968 "expExitCode": "0", 968 "expExitCode": "0",
969 "verifyCmd": "$TC actions get action ife index 4", 969 "verifyCmd": "$TC actions get action ife index 4",
970 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1", 970 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
971 "matchCount": "1", 971 "matchCount": "1",
972 "teardown": [ 972 "teardown": [
973 "$TC actions flush action ife" 973 "$TC actions flush action ife"
@@ -991,7 +991,7 @@
991 "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4", 991 "cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
992 "expExitCode": "255", 992 "expExitCode": "255",
993 "verifyCmd": "$TC actions get action ife index 4", 993 "verifyCmd": "$TC actions get action ife index 4",
994 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4", 994 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
995 "matchCount": "0", 995 "matchCount": "0",
996 "teardown": [] 996 "teardown": []
997 }, 997 },
@@ -1013,7 +1013,7 @@
1013 "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4", 1013 "cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
1014 "expExitCode": "255", 1014 "expExitCode": "255",
1015 "verifyCmd": "$TC actions get action ife index 4", 1015 "verifyCmd": "$TC actions get action ife index 4",
1016 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4", 1016 "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
1017 "matchCount": "0", 1017 "matchCount": "0",
1018 "teardown": [] 1018 "teardown": []
1019 }, 1019 },
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index 10b2d894e436..e7e15a7336b6 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -82,35 +82,6 @@
82 ] 82 ]
83 }, 83 },
84 { 84 {
85 "id": "ba4e",
86 "name": "Add tunnel_key set action with missing mandatory id parameter",
87 "category": [
88 "actions",
89 "tunnel_key"
90 ],
91 "setup": [
92 [
93 "$TC actions flush action tunnel_key",
94 0,
95 1,
96 255
97 ]
98 ],
99 "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
100 "expExitCode": "255",
101 "verifyCmd": "$TC actions list action tunnel_key",
102 "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
103 "matchCount": "0",
104 "teardown": [
105 [
106 "$TC actions flush action tunnel_key",
107 0,
108 1,
109 255
110 ]
111 ]
112 },
113 {
114 "id": "a5e0", 85 "id": "a5e0",
115 "name": "Add tunnel_key set action with invalid src_ip parameter", 86 "name": "Add tunnel_key set action with invalid src_ip parameter",
116 "category": [ 87 "category": [
@@ -634,7 +605,7 @@
634 "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2", 605 "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
635 "expExitCode": "0", 606 "expExitCode": "0",
636 "verifyCmd": "$TC actions get action tunnel_key index 4", 607 "verifyCmd": "$TC actions get action tunnel_key index 4",
637 "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2", 608 "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
638 "matchCount": "1", 609 "matchCount": "1",
639 "teardown": [ 610 "teardown": [
640 "$TC actions flush action tunnel_key" 611 "$TC actions flush action tunnel_key"
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 89a2444c1df2..59e417ec3e13 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -6,7 +6,7 @@ VERSION = 1.0
6 6
7BINDIR=usr/bin 7BINDIR=usr/bin
8WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int 8WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int
9override CFLAGS+= -O1 ${WARNFLAGS} 9override CFLAGS+= $(call cc-option,-O3,-O1) ${WARNFLAGS}
10# Add "-fstack-protector" only if toolchain supports it. 10# Add "-fstack-protector" only if toolchain supports it.
11override CFLAGS+= $(call cc-option,-fstack-protector-strong) 11override CFLAGS+= $(call cc-option,-fstack-protector-strong)
12CC?= $(CROSS_COMPILE)gcc 12CC?= $(CROSS_COMPILE)gcc
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index 18fc112b65cd..d3a8755c039c 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -5,7 +5,9 @@
5 * Example use: 5 * Example use:
6 * cat /sys/kernel/debug/page_owner > page_owner_full.txt 6 * cat /sys/kernel/debug/page_owner > page_owner_full.txt
7 * grep -v ^PFN page_owner_full.txt > page_owner.txt 7 * grep -v ^PFN page_owner_full.txt > page_owner.txt
8 * ./sort page_owner.txt sorted_page_owner.txt 8 * ./page_owner_sort page_owner.txt sorted_page_owner.txt
9 *
10 * See Documentation/vm/page_owner.rst
9*/ 11*/
10 12
11#include <stdio.h> 13#include <stdio.h>
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1f888a103f78..5ecea812cb6a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1227,9 +1227,9 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
1227{ 1227{
1228 struct kvm_memslots *slots; 1228 struct kvm_memslots *slots;
1229 struct kvm_memory_slot *memslot; 1229 struct kvm_memory_slot *memslot;
1230 int as_id, id, n; 1230 int as_id, id;
1231 gfn_t offset; 1231 gfn_t offset;
1232 unsigned long i; 1232 unsigned long i, n;
1233 unsigned long *dirty_bitmap; 1233 unsigned long *dirty_bitmap;
1234 unsigned long *dirty_bitmap_buffer; 1234 unsigned long *dirty_bitmap_buffer;
1235 1235
@@ -1249,6 +1249,11 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm,
1249 return -ENOENT; 1249 return -ENOENT;
1250 1250
1251 n = kvm_dirty_bitmap_bytes(memslot); 1251 n = kvm_dirty_bitmap_bytes(memslot);
1252
1253 if (log->first_page > memslot->npages ||
1254 log->num_pages > memslot->npages - log->first_page)
1255 return -EINVAL;
1256
1252 *flush = false; 1257 *flush = false;
1253 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); 1258 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
1254 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n)) 1259 if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))