aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/arm64/iort.c5
-rw-r--r--drivers/acpi/bus.c27
-rw-r--r--drivers/acpi/cppc_acpi.c42
-rw-r--r--drivers/acpi/internal.h4
-rw-r--r--drivers/acpi/nfit/core.c86
-rw-r--r--drivers/acpi/nfit/intel.c8
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c41
-rw-r--r--drivers/acpi/power.c22
-rw-r--r--drivers/android/binder.c37
-rw-r--r--drivers/android/binder_internal.h9
-rw-r--r--drivers/android/binderfs.c296
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_mvebu.c87
-rw-r--r--drivers/ata/libahci_platform.c13
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/pata_macio.c9
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c22
-rw-r--r--drivers/atm/he.c41
-rw-r--r--drivers/atm/idt77252.c16
-rw-r--r--drivers/auxdisplay/ht16k33.c2
-rw-r--r--drivers/base/cacheinfo.c6
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/base/power/runtime.c21
-rw-r--r--drivers/base/regmap/regmap-irq.c8
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c35
-rw-r--r--drivers/block/nbd.c5
-rw-r--r--drivers/block/null_blk.h1
-rw-r--r--drivers/block/rbd.c9
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/zram/zram_drv.c90
-rw-r--r--drivers/block/zram/zram_drv.h5
-rw-r--r--drivers/bus/ti-sysc.c6
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c173
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c25
-rw-r--r--drivers/char/mwave/mwavedd.c7
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/clk-versaclock5.c4
-rw-r--r--drivers/clk/clk.c16
-rw-r--r--drivers/clk/imx/clk-frac-pll.c5
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c2
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c4
-rw-r--r--drivers/clk/qcom/Kconfig1
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c14
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c2
-rw-r--r--drivers/clk/socfpga/clk-s10.c20
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c4
-rw-r--r--drivers/clk/ti/divider.c11
-rw-r--r--drivers/clk/zynqmp/clkc.c4
-rw-r--r--drivers/clocksource/timer-ti-dm.c5
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/Kconfig.arm5
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c65
-rw-r--r--drivers/cpufreq/cpufreq-dt.c31
-rw-r--r--drivers/cpufreq/cpufreq.c146
-rw-r--r--drivers/cpufreq/cpufreq_stats.c16
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c5
-rw-r--r--drivers/cpufreq/e_powersaver.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c24
-rw-r--r--drivers/cpufreq/intel_pstate.c105
-rw-r--r--drivers/cpufreq/longhaul.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c14
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c10
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c53
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c2
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c15
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c22
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c18
-rw-r--r--drivers/cpufreq/speedstep-ich.c3
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c2
-rw-r--r--drivers/cpuidle/poll_state.c2
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/bcm/cipher.c44
-rw-r--r--drivers/crypto/caam/caamalg.c2
-rw-r--r--drivers/crypto/caam/caamhash.c15
-rw-r--r--drivers/crypto/caam/desc.h1
-rw-r--r--drivers/crypto/caam/error.h9
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c4
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_main.c7
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c10
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c6
-rw-r--r--drivers/crypto/ccree/cc_aead.c40
-rw-r--r--drivers/crypto/ccree/cc_driver.c7
-rw-r--r--drivers/crypto/ccree/cc_pm.c13
-rw-r--r--drivers/crypto/ccree/cc_pm.h3
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c4
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c15
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c16
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c68
-rw-r--r--drivers/crypto/talitos.c26
-rw-r--r--drivers/dma/at_xdmac.c19
-rw-r--r--drivers/dma/bcm2835-dma.c70
-rw-r--r--drivers/dma/dmatest.c32
-rw-r--r--drivers/dma/imx-dma.c8
-rw-r--r--drivers/dma/imx-sdma.c8
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mxs-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c4
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c14
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c6
-rw-r--r--drivers/edac/altera_edac.h4
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/firmware/arm_scmi/bus.c9
-rw-r--r--drivers/firmware/efi/arm-runtime.c5
-rw-r--r--drivers/firmware/efi/efi.c4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c7
-rw-r--r--drivers/fpga/stratix10-soc.c5
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c4
-rw-r--r--drivers/gpio/gpio-eic-sprd.c14
-rw-r--r--drivers/gpio/gpio-pca953x.c2
-rw-r--r--drivers/gpio/gpio-pcf857x.c26
-rw-r--r--drivers/gpio/gpio-vf610.c5
-rw-r--r--drivers/gpio/gpiolib-acpi.c7
-rw-r--r--drivers/gpio/gpiolib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c44
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c19
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c48
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c133
-rw-r--r--drivers/gpu/drm/drm_lease.c3
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c30
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c75
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c23
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c22
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h18
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c240
-rw-r--r--drivers/gpu/drm/i915/intel_display.c50
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c33
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c38
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h9
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c2
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c25
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c4
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c23
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c27
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c14
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c15
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h11
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c14
-rw-r--r--drivers/gpu/drm/vkms/vkms_crc.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c7
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h2
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c6
-rw-r--r--drivers/gpu/vga/Kconfig1
-rw-r--r--drivers/hid/hid-core.c23
-rw-r--r--drivers/hid/hid-debug.c120
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hv/channel.c9
-rw-r--r--drivers/hv/hv_balloon.c10
-rw-r--r--drivers/hv/ring_buffer.c31
-rw-r--r--drivers/hv/vmbus_drv.c91
-rw-r--r--drivers/hwmon/lm80.c4
-rw-r--r--drivers/hwmon/nct6775.c15
-rw-r--r--drivers/hwmon/occ/common.c24
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c12
-rw-r--r--drivers/i2c/busses/i2c-cadence.c9
-rw-r--r--drivers/i2c/busses/i2c-omap.c13
-rw-r--r--drivers/i2c/busses/i2c-tegra.c15
-rw-r--r--drivers/i2c/i2c-dev.c6
-rw-r--r--drivers/i3c/master.c2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c25
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c4
-rw-r--r--drivers/ide/ide-atapi.c9
-rw-r--r--drivers/ide/ide-io.c61
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/ide/ide-probe.c23
-rw-r--r--drivers/ide/ide-proc.c2
-rw-r--r--drivers/iio/adc/axp288_adc.c76
-rw-r--r--drivers/iio/adc/ti-ads8688.c3
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c5
-rw-r--r--drivers/iio/chemical/atlas-ph-sensor.c7
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/core_priv.h1
-rw-r--r--drivers/infiniband/core/device.c13
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/infiniband/core/rdma_core.h2
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c11
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c62
-rw-r--r--drivers/infiniband/core/uverbs_main.c26
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c8
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/init.c29
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c27
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c14
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c1
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h35
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c6
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/keyboard/Kconfig2
-rw-r--r--drivers/input/keyboard/cap11xx.c35
-rw-r--r--drivers/input/keyboard/matrix_keypad.c2
-rw-r--r--drivers/input/keyboard/qt2160.c69
-rw-r--r--drivers/input/keyboard/st-keyscan.c4
-rw-r--r--drivers/input/misc/apanel.c24
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/misc/pwm-vibra.c19
-rw-r--r--drivers/input/misc/uinput.c5
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/mouse/elantech.c9
-rw-r--r--drivers/input/serio/olpc_apsp.c17
-rw-r--r--drivers/input/serio/ps2-gpio.c1
-rw-r--r--drivers/input/touchscreen/Kconfig2
-rw-r--r--drivers/input/touchscreen/raspberrypi-ts.c4
-rw-r--r--drivers/iommu/amd_iommu.c19
-rw-r--r--drivers/iommu/intel-iommu.c10
-rw-r--r--drivers/iommu/mtk_iommu_v1.c9
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c77
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c126
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c2
-rw-r--r--drivers/irqchip/irq-madera.c2
-rw-r--r--drivers/irqchip/irq-mmp.c6
-rw-r--r--drivers/irqchip/irq-stm32-exti.c1
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c40
-rw-r--r--drivers/irqchip/irq-xtensa-pic.c6
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c3
-rw-r--r--drivers/isdn/i4l/isdn_tty.c6
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/md/dm-crypt.c27
-rw-r--r--drivers/md/dm-rq.c2
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin-metadata.h2
-rw-r--r--drivers/md/dm-thin.c65
-rw-r--r--drivers/md/dm.c41
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/raid1.c28
-rw-r--r--drivers/md/raid5-cache.c33
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c2
-rw-r--r--drivers/media/platform/vim2m.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c24
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/axp20x.c126
-rw-r--r--drivers/mfd/bd9571mwv.c1
-rw-r--r--drivers/mfd/cros_ec_dev.c1
-rw-r--r--drivers/mfd/db8500-prcmu.c4
-rw-r--r--drivers/mfd/exynos-lpass.c4
-rw-r--r--drivers/mfd/madera-core.c5
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/mc13xxx-core.c4
-rw-r--r--drivers/mfd/mt6397-core.c3
-rw-r--r--drivers/mfd/qcom_rpm.c4
-rw-r--r--drivers/mfd/rave-sp.c2
-rw-r--r--drivers/mfd/stmpe.c12
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c5
-rw-r--r--drivers/mfd/tps65218.c24
-rw-r--r--drivers/mfd/tps6586x.c24
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/ibmvmc.c7
-rw-r--r--drivers/misc/mei/client.c5
-rw-r--r--drivers/misc/mei/hbm.c12
-rw-r--r--drivers/misc/mei/hw-me-regs.h4
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mic/vop/vop_main.c82
-rw-r--r--drivers/misc/pvpanic.c4
-rw-r--r--drivers/mmc/core/block.c10
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/host/Kconfig4
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/dw_mmc-bluefield.c5
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c33
-rw-r--r--drivers/mmc/host/mtk-sd.c2
-rw-r--r--drivers/mmc/host/sdhci-iproc.c5
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mmc/host/sunxi-mmc.c26
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdcore.h2
-rw-r--r--drivers/mtd/mtdpart.c39
-rw-r--r--drivers/mtd/nand/raw/denali.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c21
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c13
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c20
-rw-r--r--drivers/mtd/nand/spi/core.c46
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/caif/caif_serial.c5
-rw-r--r--drivers/net/can/dev.c27
-rw-r--r--drivers/net/can/flexcan.c4
-rw-r--r--drivers/net/dsa/b53/b53_srab.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/mt7530.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c141
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c21
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c2
-rw-r--r--drivers/net/dsa/realtek-smi.c18
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c12
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c61
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c22
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c8
-rw-r--r--drivers/net/ethernet/apple/bmac.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c7
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c29
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c36
-rw-r--r--drivers/net/ethernet/cavium/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c68
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c14
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c16
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c8
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c14
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c35
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c12
-rw-r--r--drivers/net/ethernet/ni/nixge.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c30
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c12
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/sfc/ef10.c29
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c4
-rw-r--r--drivers/net/ethernet/sfc/nic.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c17
-rw-r--r--drivers/net/ethernet/sun/cassini.h15
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c8
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c12
-rw-r--r--drivers/net/fddi/defxx.c8
-rw-r--r--drivers/net/fddi/skfp/skfddi.c8
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/hyperv/hyperv_net.h12
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/hyperv/netvsc_drv.c145
-rw-r--r--drivers/net/hyperv/rndis_filter.c36
-rw-r--r--drivers/net/ieee802154/mcr20a.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c6
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/asix.c8
-rw-r--r--drivers/net/phy/bcm87xx.c2
-rw-r--r--drivers/net/phy/cortina.c1
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/marvell.c53
-rw-r--r--drivers/net/phy/mdio-hisi-femac.c16
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/meson-gxl.c1
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/phy/phy.c33
-rw-r--r--drivers/net/phy/phy_device.c17
-rw-r--r--drivers/net/phy/phylink.c19
-rw-r--r--drivers/net/phy/rockchip.c9
-rw-r--r--drivers/net/phy/sfp-bus.c2
-rw-r--r--drivers/net/phy/sfp.c30
-rw-r--r--drivers/net/phy/sfp.h2
-rw-r--r--drivers/net/phy/teranetics.c1
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/team/team.c27
-rw-r--r--drivers/net/tun.c14
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/asix_devices.c9
-rw-r--r--drivers/net/usb/cdc_ether.c34
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c181
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vxlan.c14
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c71
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c39
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c15
-rw-r--r--drivers/net/wireless/virt_wifi.c4
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c8
-rw-r--r--drivers/nvdimm/dimm.c6
-rw-r--r--drivers/nvdimm/dimm_devs.c22
-rw-r--r--drivers/nvdimm/nd-core.h4
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvme/host/core.c27
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c126
-rw-r--r--drivers/nvme/host/rdma.c64
-rw-r--r--drivers/nvme/host/tcp.c35
-rw-r--r--drivers/nvme/target/rdma.c15
-rw-r--r--drivers/nvme/target/tcp.c2
-rw-r--r--drivers/of/dynamic.c3
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/overlay.c3
-rw-r--r--drivers/of/pdt.c1
-rw-r--r--drivers/of/property.c1
-rw-r--r--drivers/opp/core.c74
-rw-r--r--drivers/opp/of.c99
-rw-r--r--drivers/pci/Kconfig22
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c11
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-armada8k.c16
-rw-r--r--drivers/pci/controller/pcie-iproc-msi.c6
-rw-r--r--drivers/pci/msi.c22
-rw-r--r--drivers/pci/pci.c3
-rw-r--r--drivers/pci/quirks.c5
-rw-r--r--drivers/pci/switch/switchtec.c8
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c5
-rw-r--r--drivers/phy/qualcomm/phy-ath79-usb.c4
-rw-r--r--drivers/phy/ti/Kconfig1
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c8
-rw-r--r--drivers/pinctrl/mediatek/Kconfig3
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c7
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c44
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h2
-rw-r--r--drivers/platform/x86/Kconfig8
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/rapidio/devices/tsi721.c22
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c9
-rw-r--r--drivers/reset/Kconfig20
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/core.c42
-rw-r--r--drivers/reset/reset-hsdk.c1
-rw-r--r--drivers/reset/reset-simple.c13
-rw-r--r--drivers/reset/reset-socfpga.c88
-rw-r--r--drivers/reset/reset-uniphier-glue.c (renamed from drivers/reset/reset-uniphier-usb3.c)50
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c3
-rw-r--r--drivers/s390/net/ism_drv.c15
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c31
-rw-r--r--drivers/s390/net/qeth_l2_main.c8
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c12
-rw-r--r--drivers/scsi/3w-sas.c5
-rw-r--r--drivers/scsi/53c700.c2
-rw-r--r--drivers/scsi/a100u2w.c8
-rw-r--r--drivers/scsi/aacraid/linit.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c8
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c18
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c49
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c44
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c8
-rw-r--r--drivers/scsi/csiostor/csio_attr.c2
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c9
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c28
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/cxlflash/main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c12
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/libfc/fc_lport.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c35
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c15
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c8
-rw-r--r--drivers/scsi/mesh.c5
-rw-r--r--drivers/scsi/mvumi.c9
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c6
-rw-r--r--drivers/scsi/qedf/qedf_main.c29
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c3
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h1
-rw-r--r--drivers/scsi/qedi/qedi_main.c39
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c12
-rw-r--r--drivers/scsi/scsi_debug.c41
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/scsi_pm.c26
-rw-r--r--drivers/scsi/sd.c18
-rw-r--r--drivers/scsi/sd_zbc.c12
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c34
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c12
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c2
-rw-r--r--drivers/soc/fsl/qbman/qman.c9
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c55
-rw-r--r--drivers/soc/renesas/Kconfig2
-rw-r--r--drivers/soc/renesas/r8a774c0-sysc.c23
-rw-r--r--drivers/spi/spi-pic32-sqi.c6
-rw-r--r--drivers/staging/android/ion/ion.c2
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c3
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c1
-rw-r--r--drivers/staging/vt6655/device_main.c19
-rw-r--r--drivers/staging/wilc1000/host_interface.c5
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c7
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_configfs.c8
-rw-r--r--drivers/target/target_core_user.c89
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c30
-rw-r--r--drivers/thermal/of-thermal.c4
-rw-r--r--drivers/tty/n_hdlc.c1
-rw-r--r--drivers/tty/serial/8250/8250_core.c17
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c3
-rw-r--r--drivers/tty/serial/8250/8250_pci.c9
-rw-r--r--drivers/tty/serial/Kconfig12
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/earlycon-riscv-sbi.c31
-rw-r--r--drivers/tty/serial/fsl_lpuart.c2
-rw-r--r--drivers/tty/serial/lantiq.c36
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c4
-rw-r--r--drivers/tty/serial/serial_core.c18
-rw-r--r--drivers/tty/serial/sh-sci.c9
-rw-r--r--drivers/tty/tty_io.c23
-rw-r--r--drivers/tty/vt/vt.c50
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c3
-rw-r--r--drivers/usb/class/cdc-acm.c7
-rw-r--r--drivers/usb/core/generic.c9
-rw-r--r--drivers/usb/core/ledtrig-usbport.c17
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/gadget.c2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c13
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/host/ehci-mv.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c6
-rw-r--r--drivers/usb/host/xhci-mem.c8
-rw-r--r--drivers/usb/musb/musb_gadget.c13
-rw-r--r--drivers/usb/musb/musbhsdma.c21
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.c15
-rw-r--r--drivers/usb/serial/keyspan_usa26msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa28msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa49msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa67msg.h1
-rw-r--r--drivers/usb/serial/keyspan_usa90msg.h1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/storage/scsiglue.c8
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c3
-rw-r--r--drivers/usb/usbip/README7
-rw-r--r--drivers/vfio/pci/trace.h8
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c36
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/scsi.c22
-rw-r--r--drivers/vhost/vhost.c112
-rw-r--r--drivers/vhost/vhost.h7
-rw-r--r--drivers/vhost/vsock.c4
-rw-r--r--drivers/video/backlight/88pm860x_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c28
-rw-r--r--drivers/video/console/vgacon.c7
-rw-r--r--drivers/video/fbdev/core/fbcon.c7
-rw-r--r--drivers/video/fbdev/core/fbmem.c19
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
-rw-r--r--drivers/video/fbdev/offb.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/logo/Kconfig9
-rw-r--r--drivers/virtio/virtio_balloon.c98
-rw-r--r--drivers/virtio/virtio_mmio.c9
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/virtio/virtio_ring.c15
-rw-r--r--drivers/watchdog/mt7621_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/tqmx86_wdt.c8
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/pvcalls-back.c9
-rw-r--r--drivers/xen/pvcalls-front.c104
-rw-r--r--drivers/xen/swiotlb-xen.c4
871 files changed, 7831 insertions, 5022 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7b65a807b3dd..90ff0a47c12e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -10,6 +10,7 @@ menuconfig ACPI
10 bool "ACPI (Advanced Configuration and Power Interface) Support" 10 bool "ACPI (Advanced Configuration and Power Interface) Support"
11 depends on ARCH_SUPPORTS_ACPI 11 depends on ARCH_SUPPORTS_ACPI
12 select PNP 12 select PNP
13 select NLS
13 default y if X86 14 default y if X86
14 help 15 help
15 Advanced Configuration and Power Interface (ACPI) support for 16 Advanced Configuration and Power Interface (ACPI) support for
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 7c6afc111d76..bb857421c2e8 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -41,7 +41,8 @@ acpi-y += ec.o
41acpi-$(CONFIG_ACPI_DOCK) += dock.o 41acpi-$(CONFIG_ACPI_DOCK) += dock.o
42acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o 42acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
43obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o 43obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
44acpi-y += acpi_lpss.o acpi_apd.o 44acpi-$(CONFIG_PCI) += acpi_lpss.o
45acpi-y += acpi_apd.o
45acpi-y += acpi_platform.o 46acpi-y += acpi_platform.o
46acpi-y += acpi_pnp.o 47acpi-y += acpi_pnp.o
47acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o 48acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index fdd90ffceb85..e48894e002ba 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -876,7 +876,7 @@ int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
876 return (resv == its->its_count) ? resv : -ENODEV; 876 return (resv == its->its_count) ? resv : -ENODEV;
877} 877}
878#else 878#else
879static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev); 879static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
880{ return NULL; } 880{ return NULL; }
881static inline int iort_add_device_replay(const struct iommu_ops *ops, 881static inline int iort_add_device_replay(const struct iommu_ops *ops,
882 struct device *dev) 882 struct device *dev)
@@ -952,9 +952,10 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
952{ 952{
953 struct acpi_iort_node *node; 953 struct acpi_iort_node *node;
954 struct acpi_iort_root_complex *rc; 954 struct acpi_iort_root_complex *rc;
955 struct pci_bus *pbus = to_pci_dev(dev)->bus;
955 956
956 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, 957 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
957 iort_match_node_callback, dev); 958 iort_match_node_callback, &pbus->dev);
958 if (!node || node->revision < 1) 959 if (!node || node->revision < 1)
959 return -ENODEV; 960 return -ENODEV;
960 961
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 99d820a693a8..147f6c7ea59c 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1029,6 +1029,9 @@ void __init acpi_early_init(void)
1029 1029
1030 acpi_permanent_mmap = true; 1030 acpi_permanent_mmap = true;
1031 1031
1032 /* Initialize debug output. Linux does not use ACPICA defaults */
1033 acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
1034
1032#ifdef CONFIG_X86 1035#ifdef CONFIG_X86
1033 /* 1036 /*
1034 * If the machine falls into the DMI check table, 1037 * If the machine falls into the DMI check table,
@@ -1054,18 +1057,6 @@ void __init acpi_early_init(void)
1054 goto error0; 1057 goto error0;
1055 } 1058 }
1056 1059
1057 /*
1058 * ACPI 2.0 requires the EC driver to be loaded and work before
1059 * the EC device is found in the namespace (i.e. before
1060 * acpi_load_tables() is called).
1061 *
1062 * This is accomplished by looking for the ECDT table, and getting
1063 * the EC parameters out of that.
1064 *
1065 * Ignore the result. Not having an ECDT is not fatal.
1066 */
1067 status = acpi_ec_ecdt_probe();
1068
1069#ifdef CONFIG_X86 1060#ifdef CONFIG_X86
1070 if (!acpi_ioapic) { 1061 if (!acpi_ioapic) {
1071 /* compatible (0) means level (3) */ 1062 /* compatible (0) means level (3) */
@@ -1142,6 +1133,18 @@ static int __init acpi_bus_init(void)
1142 goto error1; 1133 goto error1;
1143 } 1134 }
1144 1135
1136 /*
1137 * ACPI 2.0 requires the EC driver to be loaded and work before the EC
1138 * device is found in the namespace.
1139 *
1140 * This is accomplished by looking for the ECDT table and getting the EC
1141 * parameters out of that.
1142 *
1143 * Do that before calling acpi_initialize_objects() which may trigger EC
1144 * address space accesses.
1145 */
1146 acpi_ec_ecdt_probe();
1147
1145 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE); 1148 status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
1146 if (ACPI_FAILURE(status)) { 1149 if (ACPI_FAILURE(status)) {
1147 printk(KERN_ERR PREFIX 1150 printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 217a782c3e55..1b207fca1420 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1051,6 +1051,48 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1051} 1051}
1052 1052
1053/** 1053/**
1054 * cppc_get_desired_perf - Get the value of desired performance register.
1055 * @cpunum: CPU from which to get desired performance.
1056 * @desired_perf: address of a variable to store the returned desired performance
1057 *
1058 * Return: 0 for success, -EIO otherwise.
1059 */
1060int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1061{
1062 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1063 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1064 struct cpc_register_resource *desired_reg;
1065 struct cppc_pcc_data *pcc_ss_data = NULL;
1066
1067 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1068
1069 if (CPC_IN_PCC(desired_reg)) {
1070 int ret = 0;
1071
1072 if (pcc_ss_id < 0)
1073 return -EIO;
1074
1075 pcc_ss_data = pcc_data[pcc_ss_id];
1076
1077 down_write(&pcc_ss_data->pcc_lock);
1078
1079 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1080 cpc_read(cpunum, desired_reg, desired_perf);
1081 else
1082 ret = -EIO;
1083
1084 up_write(&pcc_ss_data->pcc_lock);
1085
1086 return ret;
1087 }
1088
1089 cpc_read(cpunum, desired_reg, desired_perf);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1094
1095/**
1054 * cppc_get_perf_caps - Get a CPUs performance capabilities. 1096 * cppc_get_perf_caps - Get a CPUs performance capabilities.
1055 * @cpunum: CPU from which to get capabilities info. 1097 * @cpunum: CPU from which to get capabilities info.
1056 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h 1098 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7e6952edb5b0..6a9e1fb8913a 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -81,7 +81,11 @@ void acpi_debugfs_init(void);
81#else 81#else
82static inline void acpi_debugfs_init(void) { return; } 82static inline void acpi_debugfs_init(void) { return; }
83#endif 83#endif
84#ifdef CONFIG_PCI
84void acpi_lpss_init(void); 85void acpi_lpss_init(void);
86#else
87static inline void acpi_lpss_init(void) {}
88#endif
85 89
86void acpi_apd_init(void); 90void acpi_apd_init(void);
87 91
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 011d3db19c80..e18ade5d74e9 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -26,7 +26,6 @@
26#include <acpi/nfit.h> 26#include <acpi/nfit.h>
27#include "intel.h" 27#include "intel.h"
28#include "nfit.h" 28#include "nfit.h"
29#include "intel.h"
30 29
31/* 30/*
32 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is 31 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@@ -78,12 +77,6 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id)
78} 77}
79EXPORT_SYMBOL(to_nfit_uuid); 78EXPORT_SYMBOL(to_nfit_uuid);
80 79
81static struct acpi_nfit_desc *to_acpi_nfit_desc(
82 struct nvdimm_bus_descriptor *nd_desc)
83{
84 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
85}
86
87static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) 80static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
88{ 81{
89 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; 82 struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
@@ -416,10 +409,36 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
416 return true; 409 return true;
417} 410}
418 411
412static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
413 struct nd_cmd_pkg *call_pkg)
414{
415 if (call_pkg) {
416 int i;
417
418 if (nfit_mem->family != call_pkg->nd_family)
419 return -ENOTTY;
420
421 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
422 if (call_pkg->nd_reserved2[i])
423 return -EINVAL;
424 return call_pkg->nd_command;
425 }
426
427 /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
428 if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
429 return cmd;
430
431 /*
432 * Force function number validation to fail since 0 is never
433 * published as a valid function in dsm_mask.
434 */
435 return 0;
436}
437
419int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, 438int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
420 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) 439 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
421{ 440{
422 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 441 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
423 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 442 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
424 union acpi_object in_obj, in_buf, *out_obj; 443 union acpi_object in_obj, in_buf, *out_obj;
425 const struct nd_cmd_desc *desc = NULL; 444 const struct nd_cmd_desc *desc = NULL;
@@ -429,30 +448,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
429 unsigned long cmd_mask, dsm_mask; 448 unsigned long cmd_mask, dsm_mask;
430 u32 offset, fw_status = 0; 449 u32 offset, fw_status = 0;
431 acpi_handle handle; 450 acpi_handle handle;
432 unsigned int func;
433 const guid_t *guid; 451 const guid_t *guid;
434 int rc, i; 452 int func, rc, i;
435 453
436 if (cmd_rc) 454 if (cmd_rc)
437 *cmd_rc = -EINVAL; 455 *cmd_rc = -EINVAL;
438 func = cmd;
439 if (cmd == ND_CMD_CALL) {
440 call_pkg = buf;
441 func = call_pkg->nd_command;
442
443 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
444 if (call_pkg->nd_reserved2[i])
445 return -EINVAL;
446 }
447 456
448 if (nvdimm) { 457 if (nvdimm) {
449 struct acpi_device *adev = nfit_mem->adev; 458 struct acpi_device *adev = nfit_mem->adev;
450 459
451 if (!adev) 460 if (!adev)
452 return -ENOTTY; 461 return -ENOTTY;
453 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
454 return -ENOTTY;
455 462
463 if (cmd == ND_CMD_CALL)
464 call_pkg = buf;
465 func = cmd_to_func(nfit_mem, cmd, call_pkg);
466 if (func < 0)
467 return func;
456 dimm_name = nvdimm_name(nvdimm); 468 dimm_name = nvdimm_name(nvdimm);
457 cmd_name = nvdimm_cmd_name(cmd); 469 cmd_name = nvdimm_cmd_name(cmd);
458 cmd_mask = nvdimm_cmd_mask(nvdimm); 470 cmd_mask = nvdimm_cmd_mask(nvdimm);
@@ -463,6 +475,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
463 } else { 475 } else {
464 struct acpi_device *adev = to_acpi_dev(acpi_desc); 476 struct acpi_device *adev = to_acpi_dev(acpi_desc);
465 477
478 func = cmd;
466 cmd_name = nvdimm_bus_cmd_name(cmd); 479 cmd_name = nvdimm_bus_cmd_name(cmd);
467 cmd_mask = nd_desc->cmd_mask; 480 cmd_mask = nd_desc->cmd_mask;
468 dsm_mask = cmd_mask; 481 dsm_mask = cmd_mask;
@@ -477,7 +490,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
477 if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) 490 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
478 return -ENOTTY; 491 return -ENOTTY;
479 492
480 if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) 493 /*
494 * Check for a valid command. For ND_CMD_CALL, we also have to
495 * make sure that the DSM function is supported.
496 */
497 if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
498 return -ENOTTY;
499 else if (!test_bit(cmd, &cmd_mask))
481 return -ENOTTY; 500 return -ENOTTY;
482 501
483 in_obj.type = ACPI_TYPE_PACKAGE; 502 in_obj.type = ACPI_TYPE_PACKAGE;
@@ -721,6 +740,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
721 struct acpi_nfit_memory_map *memdev; 740 struct acpi_nfit_memory_map *memdev;
722 struct acpi_nfit_desc *acpi_desc; 741 struct acpi_nfit_desc *acpi_desc;
723 struct nfit_mem *nfit_mem; 742 struct nfit_mem *nfit_mem;
743 u16 physical_id;
724 744
725 mutex_lock(&acpi_desc_lock); 745 mutex_lock(&acpi_desc_lock);
726 list_for_each_entry(acpi_desc, &acpi_descs, list) { 746 list_for_each_entry(acpi_desc, &acpi_descs, list) {
@@ -728,10 +748,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
728 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { 748 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
729 memdev = __to_nfit_memdev(nfit_mem); 749 memdev = __to_nfit_memdev(nfit_mem);
730 if (memdev->device_handle == device_handle) { 750 if (memdev->device_handle == device_handle) {
751 *flags = memdev->flags;
752 physical_id = memdev->physical_id;
731 mutex_unlock(&acpi_desc->init_mutex); 753 mutex_unlock(&acpi_desc->init_mutex);
732 mutex_unlock(&acpi_desc_lock); 754 mutex_unlock(&acpi_desc_lock);
733 *flags = memdev->flags; 755 return physical_id;
734 return memdev->physical_id;
735 } 756 }
736 } 757 }
737 mutex_unlock(&acpi_desc->init_mutex); 758 mutex_unlock(&acpi_desc->init_mutex);
@@ -1872,6 +1893,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1872 return 0; 1893 return 0;
1873 } 1894 }
1874 1895
1896 /*
1897 * Function 0 is the command interrogation function, don't
1898 * export it to potential userspace use, and enable it to be
1899 * used as an error value in acpi_nfit_ctl().
1900 */
1901 dsm_mask &= ~1UL;
1902
1875 guid = to_nfit_uuid(nfit_mem->family); 1903 guid = to_nfit_uuid(nfit_mem->family);
1876 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) 1904 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1877 if (acpi_check_dsm(adev_dimm->handle, guid, 1905 if (acpi_check_dsm(adev_dimm->handle, guid,
@@ -2047,11 +2075,6 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
2047 if (!nvdimm) 2075 if (!nvdimm)
2048 continue; 2076 continue;
2049 2077
2050 rc = nvdimm_security_setup_events(nvdimm);
2051 if (rc < 0)
2052 dev_warn(acpi_desc->dev,
2053 "security event setup failed: %d\n", rc);
2054
2055 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit"); 2078 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2056 if (nfit_kernfs) 2079 if (nfit_kernfs)
2057 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs, 2080 nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
@@ -2231,7 +2254,6 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2231 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 2254 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2232 if (!nd_set) 2255 if (!nd_set)
2233 return -ENOMEM; 2256 return -ENOMEM;
2234 ndr_desc->nd_set = nd_set;
2235 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid); 2257 guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2236 2258
2237 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); 2259 info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
@@ -3367,7 +3389,7 @@ EXPORT_SYMBOL_GPL(acpi_nfit_init);
3367 3389
3368static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) 3390static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3369{ 3391{
3370 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3392 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3371 struct device *dev = acpi_desc->dev; 3393 struct device *dev = acpi_desc->dev;
3372 3394
3373 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ 3395 /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
@@ -3384,7 +3406,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3384static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, 3406static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3385 struct nvdimm *nvdimm, unsigned int cmd) 3407 struct nvdimm *nvdimm, unsigned int cmd)
3386{ 3408{
3387 struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); 3409 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3388 3410
3389 if (nvdimm) 3411 if (nvdimm)
3390 return 0; 3412 return 0;
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 850b2927b4e7..f70de71f79d6 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -146,7 +146,7 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
146 146
147static void nvdimm_invalidate_cache(void); 147static void nvdimm_invalidate_cache(void);
148 148
149static int intel_security_unlock(struct nvdimm *nvdimm, 149static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
150 const struct nvdimm_key_data *key_data) 150 const struct nvdimm_key_data *key_data)
151{ 151{
152 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 152 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -227,7 +227,7 @@ static int intel_security_disable(struct nvdimm *nvdimm,
227 return 0; 227 return 0;
228} 228}
229 229
230static int intel_security_erase(struct nvdimm *nvdimm, 230static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
231 const struct nvdimm_key_data *key, 231 const struct nvdimm_key_data *key,
232 enum nvdimm_passphrase_type ptype) 232 enum nvdimm_passphrase_type ptype)
233{ 233{
@@ -276,7 +276,7 @@ static int intel_security_erase(struct nvdimm *nvdimm,
276 return 0; 276 return 0;
277} 277}
278 278
279static int intel_security_query_overwrite(struct nvdimm *nvdimm) 279static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
280{ 280{
281 int rc; 281 int rc;
282 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 282 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
@@ -313,7 +313,7 @@ static int intel_security_query_overwrite(struct nvdimm *nvdimm)
313 return 0; 313 return 0;
314} 314}
315 315
316static int intel_security_overwrite(struct nvdimm *nvdimm, 316static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
317 const struct nvdimm_key_data *nkey) 317 const struct nvdimm_key_data *nkey)
318{ 318{
319 int rc; 319 int rc;
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 274699463b4f..7bbbf8256a41 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -146,9 +146,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
146 { 146 {
147 struct acpi_srat_mem_affinity *p = 147 struct acpi_srat_mem_affinity *p =
148 (struct acpi_srat_mem_affinity *)header; 148 (struct acpi_srat_mem_affinity *)header;
149 pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n", 149 pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
150 (unsigned long)p->base_address, 150 (unsigned long long)p->base_address,
151 (unsigned long)p->length, 151 (unsigned long long)p->length,
152 p->proximity_domain, 152 p->proximity_domain,
153 (p->flags & ACPI_SRAT_MEM_ENABLED) ? 153 (p->flags & ACPI_SRAT_MEM_ENABLED) ?
154 "enabled" : "disabled", 154 "enabled" : "disabled",
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 2579675b7082..e7c0006e6602 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -20,8 +20,11 @@
20#define GPI1_LDO_ON (3 << 0) 20#define GPI1_LDO_ON (3 << 0)
21#define GPI1_LDO_OFF (4 << 0) 21#define GPI1_LDO_OFF (4 << 0)
22 22
23#define AXP288_ADC_TS_PIN_GPADC 0xf2 23#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
24#define AXP288_ADC_TS_PIN_ON 0xf3 24#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
25#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
26#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
27#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
25 28
26static struct pmic_table power_table[] = { 29static struct pmic_table power_table[] = {
27 { 30 {
@@ -212,22 +215,44 @@ out:
212 */ 215 */
213static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg) 216static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
214{ 217{
218 int ret, adc_ts_pin_ctrl;
215 u8 buf[2]; 219 u8 buf[2];
216 int ret;
217 220
218 ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, 221 /*
219 AXP288_ADC_TS_PIN_GPADC); 222 * The current-source used for the battery temp-sensor (TS) is shared
223 * with the GPADC. For proper fuel-gauge and charger operation the TS
224 * current-source needs to be permanently on. But to read the GPADC we
225 * need to temporary switch the TS current-source to ondemand, so that
226 * the GPADC can use it, otherwise we will always read an all 0 value.
227 *
228 * Note that the switching from on to on-ondemand is not necessary
229 * when the TS current-source is off (this happens on devices which
230 * do not use the TS-pin).
231 */
232 ret = regmap_read(regmap, AXP288_ADC_TS_PIN_CTRL, &adc_ts_pin_ctrl);
220 if (ret) 233 if (ret)
221 return ret; 234 return ret;
222 235
223 /* After switching to the GPADC pin give things some time to settle */ 236 if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
224 usleep_range(6000, 10000); 237 ret = regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
238 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
239 AXP288_ADC_TS_CURRENT_ON_ONDEMAND);
240 if (ret)
241 return ret;
242
243 /* Wait a bit after switching the current-source */
244 usleep_range(6000, 10000);
245 }
225 246
226 ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2); 247 ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
227 if (ret == 0) 248 if (ret == 0)
228 ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f); 249 ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
229 250
230 regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON); 251 if (adc_ts_pin_ctrl & AXP288_ADC_TS_CURRENT_ON_OFF_MASK) {
252 regmap_update_bits(regmap, AXP288_ADC_TS_PIN_CTRL,
253 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
254 AXP288_ADC_TS_CURRENT_ON);
255 }
231 256
232 return ret; 257 return ret;
233} 258}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1b475bc1ae16..665e93ca0b40 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list)
131 } 131 }
132} 132}
133 133
134static bool acpi_power_resource_is_dup(union acpi_object *package,
135 unsigned int start, unsigned int i)
136{
137 acpi_handle rhandle, dup;
138 unsigned int j;
139
140 /* The caller is expected to check the package element types */
141 rhandle = package->package.elements[i].reference.handle;
142 for (j = start; j < i; j++) {
143 dup = package->package.elements[j].reference.handle;
144 if (dup == rhandle)
145 return true;
146 }
147
148 return false;
149}
150
134int acpi_extract_power_resources(union acpi_object *package, unsigned int start, 151int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
135 struct list_head *list) 152 struct list_head *list)
136{ 153{
@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
150 err = -ENODEV; 167 err = -ENODEV;
151 break; 168 break;
152 } 169 }
170
171 /* Some ACPI tables contain duplicate power resource references */
172 if (acpi_power_resource_is_dup(package, start, i))
173 continue;
174
153 err = acpi_add_power_resource(rhandle); 175 err = acpi_add_power_resource(rhandle);
154 if (err) 176 if (err)
155 break; 177 break;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cdfc87629efb..4d2b2ad1ee0e 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name)
5854static int __init binder_init(void) 5854static int __init binder_init(void)
5855{ 5855{
5856 int ret; 5856 int ret;
5857 char *device_name, *device_names, *device_tmp; 5857 char *device_name, *device_tmp;
5858 struct binder_device *device; 5858 struct binder_device *device;
5859 struct hlist_node *tmp; 5859 struct hlist_node *tmp;
5860 char *device_names = NULL;
5860 5861
5861 ret = binder_alloc_shrinker_init(); 5862 ret = binder_alloc_shrinker_init();
5862 if (ret) 5863 if (ret)
@@ -5898,23 +5899,29 @@ static int __init binder_init(void)
5898 &transaction_log_fops); 5899 &transaction_log_fops);
5899 } 5900 }
5900 5901
5901 /* 5902 if (strcmp(binder_devices_param, "") != 0) {
5902 * Copy the module_parameter string, because we don't want to 5903 /*
5903 * tokenize it in-place. 5904 * Copy the module_parameter string, because we don't want to
5904 */ 5905 * tokenize it in-place.
5905 device_names = kstrdup(binder_devices_param, GFP_KERNEL); 5906 */
5906 if (!device_names) { 5907 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
5907 ret = -ENOMEM; 5908 if (!device_names) {
5908 goto err_alloc_device_names_failed; 5909 ret = -ENOMEM;
5909 } 5910 goto err_alloc_device_names_failed;
5911 }
5910 5912
5911 device_tmp = device_names; 5913 device_tmp = device_names;
5912 while ((device_name = strsep(&device_tmp, ","))) { 5914 while ((device_name = strsep(&device_tmp, ","))) {
5913 ret = init_binder_device(device_name); 5915 ret = init_binder_device(device_name);
5914 if (ret) 5916 if (ret)
5915 goto err_init_binder_device_failed; 5917 goto err_init_binder_device_failed;
5918 }
5916 } 5919 }
5917 5920
5921 ret = init_binderfs();
5922 if (ret)
5923 goto err_init_binder_device_failed;
5924
5918 return ret; 5925 return ret;
5919 5926
5920err_init_binder_device_failed: 5927err_init_binder_device_failed:
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7fb97f503ef2..045b3e42d98b 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode)
46} 46}
47#endif 47#endif
48 48
49#ifdef CONFIG_ANDROID_BINDERFS
50extern int __init init_binderfs(void);
51#else
52static inline int __init init_binderfs(void)
53{
54 return 0;
55}
56#endif
57
49#endif /* _LINUX_BINDER_INTERNAL_H */ 58#endif /* _LINUX_BINDER_INTERNAL_H */
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 7496b10532aa..e773f45d19d9 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -11,6 +11,7 @@
11#include <linux/kdev_t.h> 11#include <linux/kdev_t.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/namei.h>
14#include <linux/magic.h> 15#include <linux/magic.h>
15#include <linux/major.h> 16#include <linux/major.h>
16#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
@@ -20,6 +21,7 @@
20#include <linux/parser.h> 21#include <linux/parser.h>
21#include <linux/radix-tree.h> 22#include <linux/radix-tree.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/seq_file.h>
23#include <linux/slab.h> 25#include <linux/slab.h>
24#include <linux/spinlock_types.h> 26#include <linux/spinlock_types.h>
25#include <linux/stddef.h> 27#include <linux/stddef.h>
@@ -30,7 +32,7 @@
30#include <linux/xarray.h> 32#include <linux/xarray.h>
31#include <uapi/asm-generic/errno-base.h> 33#include <uapi/asm-generic/errno-base.h>
32#include <uapi/linux/android/binder.h> 34#include <uapi/linux/android/binder.h>
33#include <uapi/linux/android/binder_ctl.h> 35#include <uapi/linux/android/binderfs.h>
34 36
35#include "binder_internal.h" 37#include "binder_internal.h"
36 38
@@ -39,14 +41,32 @@
39#define INODE_OFFSET 3 41#define INODE_OFFSET 3
40#define INTSTRLEN 21 42#define INTSTRLEN 21
41#define BINDERFS_MAX_MINOR (1U << MINORBITS) 43#define BINDERFS_MAX_MINOR (1U << MINORBITS)
42 44/* Ensure that the initial ipc namespace always has devices available. */
43static struct vfsmount *binderfs_mnt; 45#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
44 46
45static dev_t binderfs_dev; 47static dev_t binderfs_dev;
46static DEFINE_MUTEX(binderfs_minors_mutex); 48static DEFINE_MUTEX(binderfs_minors_mutex);
47static DEFINE_IDA(binderfs_minors); 49static DEFINE_IDA(binderfs_minors);
48 50
49/** 51/**
52 * binderfs_mount_opts - mount options for binderfs
53 * @max: maximum number of allocatable binderfs binder devices
54 */
55struct binderfs_mount_opts {
56 int max;
57};
58
59enum {
60 Opt_max,
61 Opt_err
62};
63
64static const match_table_t tokens = {
65 { Opt_max, "max=%d" },
66 { Opt_err, NULL }
67};
68
69/**
50 * binderfs_info - information about a binderfs mount 70 * binderfs_info - information about a binderfs mount
51 * @ipc_ns: The ipc namespace the binderfs mount belongs to. 71 * @ipc_ns: The ipc namespace the binderfs mount belongs to.
52 * @control_dentry: This records the dentry of this binderfs mount 72 * @control_dentry: This records the dentry of this binderfs mount
@@ -55,13 +75,16 @@ static DEFINE_IDA(binderfs_minors);
55 * created. 75 * created.
56 * @root_gid: gid that needs to be used when a new binder device is 76 * @root_gid: gid that needs to be used when a new binder device is
57 * created. 77 * created.
78 * @mount_opts: The mount options in use.
79 * @device_count: The current number of allocated binder devices.
58 */ 80 */
59struct binderfs_info { 81struct binderfs_info {
60 struct ipc_namespace *ipc_ns; 82 struct ipc_namespace *ipc_ns;
61 struct dentry *control_dentry; 83 struct dentry *control_dentry;
62 kuid_t root_uid; 84 kuid_t root_uid;
63 kgid_t root_gid; 85 kgid_t root_gid;
64 86 struct binderfs_mount_opts mount_opts;
87 int device_count;
65}; 88};
66 89
67static inline struct binderfs_info *BINDERFS_I(const struct inode *inode) 90static inline struct binderfs_info *BINDERFS_I(const struct inode *inode)
@@ -84,7 +107,7 @@ bool is_binderfs_device(const struct inode *inode)
84 * @userp: buffer to copy information about new device for userspace to 107 * @userp: buffer to copy information about new device for userspace to
85 * @req: struct binderfs_device as copied from userspace 108 * @req: struct binderfs_device as copied from userspace
86 * 109 *
87 * This function allocated a new binder_device and reserves a new minor 110 * This function allocates a new binder_device and reserves a new minor
88 * number for it. 111 * number for it.
89 * Minor numbers are limited and tracked globally in binderfs_minors. The 112 * Minor numbers are limited and tracked globally in binderfs_minors. The
90 * function will stash a struct binder_device for the specific binder 113 * function will stash a struct binder_device for the specific binder
@@ -100,20 +123,34 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
100 struct binderfs_device *req) 123 struct binderfs_device *req)
101{ 124{
102 int minor, ret; 125 int minor, ret;
103 struct dentry *dentry, *dup, *root; 126 struct dentry *dentry, *root;
104 struct binder_device *device; 127 struct binder_device *device;
105 size_t name_len = BINDERFS_MAX_NAME + 1;
106 char *name = NULL; 128 char *name = NULL;
129 size_t name_len;
107 struct inode *inode = NULL; 130 struct inode *inode = NULL;
108 struct super_block *sb = ref_inode->i_sb; 131 struct super_block *sb = ref_inode->i_sb;
109 struct binderfs_info *info = sb->s_fs_info; 132 struct binderfs_info *info = sb->s_fs_info;
133#if defined(CONFIG_IPC_NS)
134 bool use_reserve = (info->ipc_ns == &init_ipc_ns);
135#else
136 bool use_reserve = true;
137#endif
110 138
111 /* Reserve new minor number for the new device. */ 139 /* Reserve new minor number for the new device. */
112 mutex_lock(&binderfs_minors_mutex); 140 mutex_lock(&binderfs_minors_mutex);
113 minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); 141 if (++info->device_count <= info->mount_opts.max)
114 mutex_unlock(&binderfs_minors_mutex); 142 minor = ida_alloc_max(&binderfs_minors,
115 if (minor < 0) 143 use_reserve ? BINDERFS_MAX_MINOR :
144 BINDERFS_MAX_MINOR_CAPPED,
145 GFP_KERNEL);
146 else
147 minor = -ENOSPC;
148 if (minor < 0) {
149 --info->device_count;
150 mutex_unlock(&binderfs_minors_mutex);
116 return minor; 151 return minor;
152 }
153 mutex_unlock(&binderfs_minors_mutex);
117 154
118 ret = -ENOMEM; 155 ret = -ENOMEM;
119 device = kzalloc(sizeof(*device), GFP_KERNEL); 156 device = kzalloc(sizeof(*device), GFP_KERNEL);
@@ -132,12 +169,13 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
132 inode->i_uid = info->root_uid; 169 inode->i_uid = info->root_uid;
133 inode->i_gid = info->root_gid; 170 inode->i_gid = info->root_gid;
134 171
135 name = kmalloc(name_len, GFP_KERNEL); 172 req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
173 name_len = strlen(req->name);
174 /* Make sure to include terminating NUL byte */
175 name = kmemdup(req->name, name_len + 1, GFP_KERNEL);
136 if (!name) 176 if (!name)
137 goto err; 177 goto err;
138 178
139 strscpy(name, req->name, name_len);
140
141 device->binderfs_inode = inode; 179 device->binderfs_inode = inode;
142 device->context.binder_context_mgr_uid = INVALID_UID; 180 device->context.binder_context_mgr_uid = INVALID_UID;
143 device->context.name = name; 181 device->context.name = name;
@@ -156,28 +194,25 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
156 194
157 root = sb->s_root; 195 root = sb->s_root;
158 inode_lock(d_inode(root)); 196 inode_lock(d_inode(root));
159 dentry = d_alloc_name(root, name); 197
160 if (!dentry) { 198 /* look it up */
199 dentry = lookup_one_len(name, root, name_len);
200 if (IS_ERR(dentry)) {
161 inode_unlock(d_inode(root)); 201 inode_unlock(d_inode(root));
162 ret = -ENOMEM; 202 ret = PTR_ERR(dentry);
163 goto err; 203 goto err;
164 } 204 }
165 205
166 /* Verify that the name userspace gave us is not already in use. */ 206 if (d_really_is_positive(dentry)) {
167 dup = d_lookup(root, &dentry->d_name); 207 /* already exists */
168 if (dup) { 208 dput(dentry);
169 if (d_really_is_positive(dup)) { 209 inode_unlock(d_inode(root));
170 dput(dup); 210 ret = -EEXIST;
171 dput(dentry); 211 goto err;
172 inode_unlock(d_inode(root));
173 ret = -EEXIST;
174 goto err;
175 }
176 dput(dup);
177 } 212 }
178 213
179 inode->i_private = device; 214 inode->i_private = device;
180 d_add(dentry, inode); 215 d_instantiate(dentry, inode);
181 fsnotify_create(root->d_inode, dentry); 216 fsnotify_create(root->d_inode, dentry);
182 inode_unlock(d_inode(root)); 217 inode_unlock(d_inode(root));
183 218
@@ -187,6 +222,7 @@ err:
187 kfree(name); 222 kfree(name);
188 kfree(device); 223 kfree(device);
189 mutex_lock(&binderfs_minors_mutex); 224 mutex_lock(&binderfs_minors_mutex);
225 --info->device_count;
190 ida_free(&binderfs_minors, minor); 226 ida_free(&binderfs_minors, minor);
191 mutex_unlock(&binderfs_minors_mutex); 227 mutex_unlock(&binderfs_minors_mutex);
192 iput(inode); 228 iput(inode);
@@ -232,6 +268,7 @@ static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
232static void binderfs_evict_inode(struct inode *inode) 268static void binderfs_evict_inode(struct inode *inode)
233{ 269{
234 struct binder_device *device = inode->i_private; 270 struct binder_device *device = inode->i_private;
271 struct binderfs_info *info = BINDERFS_I(inode);
235 272
236 clear_inode(inode); 273 clear_inode(inode);
237 274
@@ -239,6 +276,7 @@ static void binderfs_evict_inode(struct inode *inode)
239 return; 276 return;
240 277
241 mutex_lock(&binderfs_minors_mutex); 278 mutex_lock(&binderfs_minors_mutex);
279 --info->device_count;
242 ida_free(&binderfs_minors, device->miscdev.minor); 280 ida_free(&binderfs_minors, device->miscdev.minor);
243 mutex_unlock(&binderfs_minors_mutex); 281 mutex_unlock(&binderfs_minors_mutex);
244 282
@@ -246,43 +284,87 @@ static void binderfs_evict_inode(struct inode *inode)
246 kfree(device); 284 kfree(device);
247} 285}
248 286
287/**
288 * binderfs_parse_mount_opts - parse binderfs mount options
289 * @data: options to set (can be NULL in which case defaults are used)
290 */
291static int binderfs_parse_mount_opts(char *data,
292 struct binderfs_mount_opts *opts)
293{
294 char *p;
295 opts->max = BINDERFS_MAX_MINOR;
296
297 while ((p = strsep(&data, ",")) != NULL) {
298 substring_t args[MAX_OPT_ARGS];
299 int token;
300 int max_devices;
301
302 if (!*p)
303 continue;
304
305 token = match_token(p, tokens, args);
306 switch (token) {
307 case Opt_max:
308 if (match_int(&args[0], &max_devices) ||
309 (max_devices < 0 ||
310 (max_devices > BINDERFS_MAX_MINOR)))
311 return -EINVAL;
312
313 opts->max = max_devices;
314 break;
315 default:
316 pr_err("Invalid mount options\n");
317 return -EINVAL;
318 }
319 }
320
321 return 0;
322}
323
324static int binderfs_remount(struct super_block *sb, int *flags, char *data)
325{
326 struct binderfs_info *info = sb->s_fs_info;
327 return binderfs_parse_mount_opts(data, &info->mount_opts);
328}
329
330static int binderfs_show_mount_opts(struct seq_file *seq, struct dentry *root)
331{
332 struct binderfs_info *info;
333
334 info = root->d_sb->s_fs_info;
335 if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
336 seq_printf(seq, ",max=%d", info->mount_opts.max);
337
338 return 0;
339}
340
249static const struct super_operations binderfs_super_ops = { 341static const struct super_operations binderfs_super_ops = {
250 .statfs = simple_statfs, 342 .evict_inode = binderfs_evict_inode,
251 .evict_inode = binderfs_evict_inode, 343 .remount_fs = binderfs_remount,
344 .show_options = binderfs_show_mount_opts,
345 .statfs = simple_statfs,
252}; 346};
253 347
348static inline bool is_binderfs_control_device(const struct dentry *dentry)
349{
350 struct binderfs_info *info = dentry->d_sb->s_fs_info;
351 return info->control_dentry == dentry;
352}
353
254static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry, 354static int binderfs_rename(struct inode *old_dir, struct dentry *old_dentry,
255 struct inode *new_dir, struct dentry *new_dentry, 355 struct inode *new_dir, struct dentry *new_dentry,
256 unsigned int flags) 356 unsigned int flags)
257{ 357{
258 struct inode *inode = d_inode(old_dentry); 358 if (is_binderfs_control_device(old_dentry) ||
259 359 is_binderfs_control_device(new_dentry))
260 /* binderfs doesn't support directories. */
261 if (d_is_dir(old_dentry))
262 return -EPERM; 360 return -EPERM;
263 361
264 if (flags & ~RENAME_NOREPLACE) 362 return simple_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
265 return -EINVAL;
266
267 if (!simple_empty(new_dentry))
268 return -ENOTEMPTY;
269
270 if (d_really_is_positive(new_dentry))
271 simple_unlink(new_dir, new_dentry);
272
273 old_dir->i_ctime = old_dir->i_mtime = new_dir->i_ctime =
274 new_dir->i_mtime = inode->i_ctime = current_time(old_dir);
275
276 return 0;
277} 363}
278 364
279static int binderfs_unlink(struct inode *dir, struct dentry *dentry) 365static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
280{ 366{
281 /* 367 if (is_binderfs_control_device(dentry))
282 * The control dentry is only ever touched during mount so checking it
283 * here should not require us to take lock.
284 */
285 if (BINDERFS_I(dir)->control_dentry == dentry)
286 return -EPERM; 368 return -EPERM;
287 369
288 return simple_unlink(dir, dentry); 370 return simple_unlink(dir, dentry);
@@ -313,13 +395,16 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
313 struct inode *inode = NULL; 395 struct inode *inode = NULL;
314 struct dentry *root = sb->s_root; 396 struct dentry *root = sb->s_root;
315 struct binderfs_info *info = sb->s_fs_info; 397 struct binderfs_info *info = sb->s_fs_info;
398#if defined(CONFIG_IPC_NS)
399 bool use_reserve = (info->ipc_ns == &init_ipc_ns);
400#else
401 bool use_reserve = true;
402#endif
316 403
317 device = kzalloc(sizeof(*device), GFP_KERNEL); 404 device = kzalloc(sizeof(*device), GFP_KERNEL);
318 if (!device) 405 if (!device)
319 return -ENOMEM; 406 return -ENOMEM;
320 407
321 inode_lock(d_inode(root));
322
323 /* If we have already created a binder-control node, return. */ 408 /* If we have already created a binder-control node, return. */
324 if (info->control_dentry) { 409 if (info->control_dentry) {
325 ret = 0; 410 ret = 0;
@@ -333,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
333 418
334 /* Reserve a new minor number for the new device. */ 419 /* Reserve a new minor number for the new device. */
335 mutex_lock(&binderfs_minors_mutex); 420 mutex_lock(&binderfs_minors_mutex);
336 minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); 421 minor = ida_alloc_max(&binderfs_minors,
422 use_reserve ? BINDERFS_MAX_MINOR :
423 BINDERFS_MAX_MINOR_CAPPED,
424 GFP_KERNEL);
337 mutex_unlock(&binderfs_minors_mutex); 425 mutex_unlock(&binderfs_minors_mutex);
338 if (minor < 0) { 426 if (minor < 0) {
339 ret = minor; 427 ret = minor;
@@ -358,12 +446,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
358 inode->i_private = device; 446 inode->i_private = device;
359 info->control_dentry = dentry; 447 info->control_dentry = dentry;
360 d_add(dentry, inode); 448 d_add(dentry, inode);
361 inode_unlock(d_inode(root));
362 449
363 return 0; 450 return 0;
364 451
365out: 452out:
366 inode_unlock(d_inode(root));
367 kfree(device); 453 kfree(device);
368 iput(inode); 454 iput(inode);
369 455
@@ -378,12 +464,9 @@ static const struct inode_operations binderfs_dir_inode_operations = {
378 464
379static int binderfs_fill_super(struct super_block *sb, void *data, int silent) 465static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
380{ 466{
467 int ret;
381 struct binderfs_info *info; 468 struct binderfs_info *info;
382 int ret = -ENOMEM;
383 struct inode *inode = NULL; 469 struct inode *inode = NULL;
384 struct ipc_namespace *ipc_ns = sb->s_fs_info;
385
386 get_ipc_ns(ipc_ns);
387 470
388 sb->s_blocksize = PAGE_SIZE; 471 sb->s_blocksize = PAGE_SIZE;
389 sb->s_blocksize_bits = PAGE_SHIFT; 472 sb->s_blocksize_bits = PAGE_SHIFT;
@@ -405,11 +488,17 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
405 sb->s_op = &binderfs_super_ops; 488 sb->s_op = &binderfs_super_ops;
406 sb->s_time_gran = 1; 489 sb->s_time_gran = 1;
407 490
408 info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); 491 sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
409 if (!info) 492 if (!sb->s_fs_info)
410 goto err_without_dentry; 493 return -ENOMEM;
494 info = sb->s_fs_info;
495
496 info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
497
498 ret = binderfs_parse_mount_opts(data, &info->mount_opts);
499 if (ret)
500 return ret;
411 501
412 info->ipc_ns = ipc_ns;
413 info->root_gid = make_kgid(sb->s_user_ns, 0); 502 info->root_gid = make_kgid(sb->s_user_ns, 0);
414 if (!gid_valid(info->root_gid)) 503 if (!gid_valid(info->root_gid))
415 info->root_gid = GLOBAL_ROOT_GID; 504 info->root_gid = GLOBAL_ROOT_GID;
@@ -417,11 +506,9 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
417 if (!uid_valid(info->root_uid)) 506 if (!uid_valid(info->root_uid))
418 info->root_uid = GLOBAL_ROOT_UID; 507 info->root_uid = GLOBAL_ROOT_UID;
419 508
420 sb->s_fs_info = info;
421
422 inode = new_inode(sb); 509 inode = new_inode(sb);
423 if (!inode) 510 if (!inode)
424 goto err_without_dentry; 511 return -ENOMEM;
425 512
426 inode->i_ino = FIRST_INODE; 513 inode->i_ino = FIRST_INODE;
427 inode->i_fop = &simple_dir_operations; 514 inode->i_fop = &simple_dir_operations;
@@ -432,79 +519,28 @@ static int binderfs_fill_super(struct super_block *sb, void *data, int silent)
432 519
433 sb->s_root = d_make_root(inode); 520 sb->s_root = d_make_root(inode);
434 if (!sb->s_root) 521 if (!sb->s_root)
435 goto err_without_dentry; 522 return -ENOMEM;
436
437 ret = binderfs_binder_ctl_create(sb);
438 if (ret)
439 goto err_with_dentry;
440
441 return 0;
442
443err_with_dentry:
444 dput(sb->s_root);
445 sb->s_root = NULL;
446
447err_without_dentry:
448 put_ipc_ns(ipc_ns);
449 iput(inode);
450 kfree(info);
451
452 return ret;
453}
454
455static int binderfs_test_super(struct super_block *sb, void *data)
456{
457 struct binderfs_info *info = sb->s_fs_info;
458
459 if (info)
460 return info->ipc_ns == data;
461
462 return 0;
463}
464 523
465static int binderfs_set_super(struct super_block *sb, void *data) 524 return binderfs_binder_ctl_create(sb);
466{
467 sb->s_fs_info = data;
468 return set_anon_super(sb, NULL);
469} 525}
470 526
471static struct dentry *binderfs_mount(struct file_system_type *fs_type, 527static struct dentry *binderfs_mount(struct file_system_type *fs_type,
472 int flags, const char *dev_name, 528 int flags, const char *dev_name,
473 void *data) 529 void *data)
474{ 530{
475 struct super_block *sb; 531 return mount_nodev(fs_type, flags, data, binderfs_fill_super);
476 struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
477
478 if (!ns_capable(ipc_ns->user_ns, CAP_SYS_ADMIN))
479 return ERR_PTR(-EPERM);
480
481 sb = sget_userns(fs_type, binderfs_test_super, binderfs_set_super,
482 flags, ipc_ns->user_ns, ipc_ns);
483 if (IS_ERR(sb))
484 return ERR_CAST(sb);
485
486 if (!sb->s_root) {
487 int ret = binderfs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
488 if (ret) {
489 deactivate_locked_super(sb);
490 return ERR_PTR(ret);
491 }
492
493 sb->s_flags |= SB_ACTIVE;
494 }
495
496 return dget(sb->s_root);
497} 532}
498 533
499static void binderfs_kill_super(struct super_block *sb) 534static void binderfs_kill_super(struct super_block *sb)
500{ 535{
501 struct binderfs_info *info = sb->s_fs_info; 536 struct binderfs_info *info = sb->s_fs_info;
502 537
538 kill_litter_super(sb);
539
503 if (info && info->ipc_ns) 540 if (info && info->ipc_ns)
504 put_ipc_ns(info->ipc_ns); 541 put_ipc_ns(info->ipc_ns);
505 542
506 kfree(info); 543 kfree(info);
507 kill_litter_super(sb);
508} 544}
509 545
510static struct file_system_type binder_fs_type = { 546static struct file_system_type binder_fs_type = {
@@ -514,7 +550,7 @@ static struct file_system_type binder_fs_type = {
514 .fs_flags = FS_USERNS_MOUNT, 550 .fs_flags = FS_USERNS_MOUNT,
515}; 551};
516 552
517static int __init init_binderfs(void) 553int __init init_binderfs(void)
518{ 554{
519 int ret; 555 int ret;
520 556
@@ -530,15 +566,5 @@ static int __init init_binderfs(void)
530 return ret; 566 return ret;
531 } 567 }
532 568
533 binderfs_mnt = kern_mount(&binder_fs_type);
534 if (IS_ERR(binderfs_mnt)) {
535 ret = PTR_ERR(binderfs_mnt);
536 binderfs_mnt = NULL;
537 unregister_filesystem(&binder_fs_type);
538 unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
539 }
540
541 return ret; 569 return ret;
542} 570}
543
544device_initcall(init_binderfs);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4ca7a6b4eaae..8218db17ebdb 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1091,7 +1091,7 @@ comment "Generic fallback / legacy drivers"
1091 1091
1092config PATA_ACPI 1092config PATA_ACPI
1093 tristate "ACPI firmware driver for PATA" 1093 tristate "ACPI firmware driver for PATA"
1094 depends on ATA_ACPI && ATA_BMDMA 1094 depends on ATA_ACPI && ATA_BMDMA && PCI
1095 help 1095 help
1096 This option enables an ACPI method driver which drives 1096 This option enables an ACPI method driver which drives
1097 motherboard PATA controller interfaces through the ACPI 1097 motherboard PATA controller interfaces through the ACPI
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index ef356e70e6de..8810475f307a 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -254,6 +254,8 @@ enum {
254 AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use 254 AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
255 SATA_MOBILE_LPM_POLICY 255 SATA_MOBILE_LPM_POLICY
256 as default lpm_policy */ 256 as default lpm_policy */
257 AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
258 suspend/resume */
257 259
258 /* ap->flags bits */ 260 /* ap->flags bits */
259 261
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index f9cb51be38eb..d4bba3ace45d 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -28,6 +28,11 @@
28#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4)) 28#define AHCI_WINDOW_BASE(win) (0x64 + ((win) << 4))
29#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4)) 29#define AHCI_WINDOW_SIZE(win) (0x68 + ((win) << 4))
30 30
31struct ahci_mvebu_plat_data {
32 int (*plat_config)(struct ahci_host_priv *hpriv);
33 unsigned int flags;
34};
35
31static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, 36static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
32 const struct mbus_dram_target_info *dram) 37 const struct mbus_dram_target_info *dram)
33{ 38{
@@ -62,6 +67,35 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
62 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA); 67 writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
63} 68}
64 69
70static int ahci_mvebu_armada_380_config(struct ahci_host_priv *hpriv)
71{
72 const struct mbus_dram_target_info *dram;
73 int rc = 0;
74
75 dram = mv_mbus_dram_info();
76 if (dram)
77 ahci_mvebu_mbus_config(hpriv, dram);
78 else
79 rc = -ENODEV;
80
81 ahci_mvebu_regret_option(hpriv);
82
83 return rc;
84}
85
86static int ahci_mvebu_armada_3700_config(struct ahci_host_priv *hpriv)
87{
88 u32 reg;
89
90 writel(0, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_ADDR);
91
92 reg = readl(hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
93 reg |= BIT(6);
94 writel(reg, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
95
96 return 0;
97}
98
65/** 99/**
66 * ahci_mvebu_stop_engine 100 * ahci_mvebu_stop_engine
67 * 101 *
@@ -126,13 +160,9 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
126{ 160{
127 struct ata_host *host = platform_get_drvdata(pdev); 161 struct ata_host *host = platform_get_drvdata(pdev);
128 struct ahci_host_priv *hpriv = host->private_data; 162 struct ahci_host_priv *hpriv = host->private_data;
129 const struct mbus_dram_target_info *dram; 163 const struct ahci_mvebu_plat_data *pdata = hpriv->plat_data;
130 164
131 dram = mv_mbus_dram_info(); 165 pdata->plat_config(hpriv);
132 if (dram)
133 ahci_mvebu_mbus_config(hpriv, dram);
134
135 ahci_mvebu_regret_option(hpriv);
136 166
137 return ahci_platform_resume_host(&pdev->dev); 167 return ahci_platform_resume_host(&pdev->dev);
138} 168}
@@ -154,29 +184,30 @@ static struct scsi_host_template ahci_platform_sht = {
154 184
155static int ahci_mvebu_probe(struct platform_device *pdev) 185static int ahci_mvebu_probe(struct platform_device *pdev)
156{ 186{
187 const struct ahci_mvebu_plat_data *pdata;
157 struct ahci_host_priv *hpriv; 188 struct ahci_host_priv *hpriv;
158 const struct mbus_dram_target_info *dram;
159 int rc; 189 int rc;
160 190
191 pdata = of_device_get_match_data(&pdev->dev);
192 if (!pdata)
193 return -EINVAL;
194
161 hpriv = ahci_platform_get_resources(pdev, 0); 195 hpriv = ahci_platform_get_resources(pdev, 0);
162 if (IS_ERR(hpriv)) 196 if (IS_ERR(hpriv))
163 return PTR_ERR(hpriv); 197 return PTR_ERR(hpriv);
164 198
199 hpriv->flags |= pdata->flags;
200 hpriv->plat_data = (void *)pdata;
201
165 rc = ahci_platform_enable_resources(hpriv); 202 rc = ahci_platform_enable_resources(hpriv);
166 if (rc) 203 if (rc)
167 return rc; 204 return rc;
168 205
169 hpriv->stop_engine = ahci_mvebu_stop_engine; 206 hpriv->stop_engine = ahci_mvebu_stop_engine;
170 207
171 if (of_device_is_compatible(pdev->dev.of_node, 208 rc = pdata->plat_config(hpriv);
172 "marvell,armada-380-ahci")) { 209 if (rc)
173 dram = mv_mbus_dram_info(); 210 goto disable_resources;
174 if (!dram)
175 return -ENODEV;
176
177 ahci_mvebu_mbus_config(hpriv, dram);
178 ahci_mvebu_regret_option(hpriv);
179 }
180 211
181 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info, 212 rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
182 &ahci_platform_sht); 213 &ahci_platform_sht);
@@ -190,18 +221,28 @@ disable_resources:
190 return rc; 221 return rc;
191} 222}
192 223
224static const struct ahci_mvebu_plat_data ahci_mvebu_armada_380_plat_data = {
225 .plat_config = ahci_mvebu_armada_380_config,
226};
227
228static const struct ahci_mvebu_plat_data ahci_mvebu_armada_3700_plat_data = {
229 .plat_config = ahci_mvebu_armada_3700_config,
230 .flags = AHCI_HFLAG_SUSPEND_PHYS,
231};
232
193static const struct of_device_id ahci_mvebu_of_match[] = { 233static const struct of_device_id ahci_mvebu_of_match[] = {
194 { .compatible = "marvell,armada-380-ahci", }, 234 {
195 { .compatible = "marvell,armada-3700-ahci", }, 235 .compatible = "marvell,armada-380-ahci",
236 .data = &ahci_mvebu_armada_380_plat_data,
237 },
238 {
239 .compatible = "marvell,armada-3700-ahci",
240 .data = &ahci_mvebu_armada_3700_plat_data,
241 },
196 { }, 242 { },
197}; 243};
198MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match); 244MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
199 245
200/*
201 * We currently don't provide power management related operations,
202 * since there is no suspend/resume support at the platform level for
203 * Armada 38x for the moment.
204 */
205static struct platform_driver ahci_mvebu_driver = { 246static struct platform_driver ahci_mvebu_driver = {
206 .probe = ahci_mvebu_probe, 247 .probe = ahci_mvebu_probe,
207 .remove = ata_platform_remove_one, 248 .remove = ata_platform_remove_one,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 4b900fc659f7..81b1a3332ed6 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -56,6 +56,12 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
56 if (rc) 56 if (rc)
57 goto disable_phys; 57 goto disable_phys;
58 58
59 rc = phy_set_mode(hpriv->phys[i], PHY_MODE_SATA);
60 if (rc) {
61 phy_exit(hpriv->phys[i]);
62 goto disable_phys;
63 }
64
59 rc = phy_power_on(hpriv->phys[i]); 65 rc = phy_power_on(hpriv->phys[i]);
60 if (rc) { 66 if (rc) {
61 phy_exit(hpriv->phys[i]); 67 phy_exit(hpriv->phys[i]);
@@ -738,6 +744,9 @@ int ahci_platform_suspend_host(struct device *dev)
738 writel(ctl, mmio + HOST_CTL); 744 writel(ctl, mmio + HOST_CTL);
739 readl(mmio + HOST_CTL); /* flush */ 745 readl(mmio + HOST_CTL); /* flush */
740 746
747 if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
748 ahci_platform_disable_phys(hpriv);
749
741 return ata_host_suspend(host, PMSG_SUSPEND); 750 return ata_host_suspend(host, PMSG_SUSPEND);
742} 751}
743EXPORT_SYMBOL_GPL(ahci_platform_suspend_host); 752EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
@@ -756,6 +765,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
756int ahci_platform_resume_host(struct device *dev) 765int ahci_platform_resume_host(struct device *dev)
757{ 766{
758 struct ata_host *host = dev_get_drvdata(dev); 767 struct ata_host *host = dev_get_drvdata(dev);
768 struct ahci_host_priv *hpriv = host->private_data;
759 int rc; 769 int rc;
760 770
761 if (dev->power.power_state.event == PM_EVENT_SUSPEND) { 771 if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
@@ -766,6 +776,9 @@ int ahci_platform_resume_host(struct device *dev)
766 ahci_init_controller(host); 776 ahci_init_controller(host);
767 } 777 }
768 778
779 if (hpriv->flags & AHCI_HFLAG_SUSPEND_PHYS)
780 ahci_platform_enable_phys(hpriv);
781
769 ata_host_resume(host); 782 ata_host_resume(host);
770 783
771 return 0; 784 return 0;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b8c3f9e6af89..adf28788cab5 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4554 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, 4554 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
4555 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, 4555 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
4556 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, 4556 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
4557 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
4557 4558
4558 /* devices that don't properly handle queued TRIM commands */ 4559 /* devices that don't properly handle queued TRIM commands */
4559 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4560 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 8cc9c429ad95..9e7fc302430f 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -915,6 +915,10 @@ static struct scsi_host_template pata_macio_sht = {
915 .sg_tablesize = MAX_DCMDS, 915 .sg_tablesize = MAX_DCMDS,
916 /* We may not need that strict one */ 916 /* We may not need that strict one */
917 .dma_boundary = ATA_DMA_BOUNDARY, 917 .dma_boundary = ATA_DMA_BOUNDARY,
918 /* Not sure what the real max is but we know it's less than 64K, let's
919 * use 64K minus 256
920 */
921 .max_segment_size = MAX_DBDMA_SEG,
918 .slave_configure = pata_macio_slave_config, 922 .slave_configure = pata_macio_slave_config,
919}; 923};
920 924
@@ -1044,11 +1048,6 @@ static int pata_macio_common_init(struct pata_macio_priv *priv,
1044 /* Make sure we have sane initial timings in the cache */ 1048 /* Make sure we have sane initial timings in the cache */
1045 pata_macio_default_timings(priv); 1049 pata_macio_default_timings(priv);
1046 1050
1047 /* Not sure what the real max is but we know it's less than 64K, let's
1048 * use 64K minus 256
1049 */
1050 dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
1051
1052 /* Allocate libata host for 1 port */ 1051 /* Allocate libata host for 1 port */
1053 memset(&pinfo, 0, sizeof(struct ata_port_info)); 1052 memset(&pinfo, 0, sizeof(struct ata_port_info));
1054 pmac_macio_calc_timing_masks(priv, &pinfo); 1053 pmac_macio_calc_timing_masks(priv, &pinfo);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 4dc528bf8e85..9c1247d42897 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -729,8 +729,8 @@ static int sata_fsl_port_start(struct ata_port *ap)
729 if (!pp) 729 if (!pp)
730 return -ENOMEM; 730 return -ENOMEM;
731 731
732 mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma, 732 mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
733 GFP_KERNEL); 733 GFP_KERNEL);
734 if (!mem) { 734 if (!mem) {
735 kfree(pp); 735 kfree(pp);
736 return -ENOMEM; 736 return -ENOMEM;
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e0bcf9b2dab0..174e84ce4379 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -245,8 +245,15 @@ struct inic_port_priv {
245 245
246static struct scsi_host_template inic_sht = { 246static struct scsi_host_template inic_sht = {
247 ATA_BASE_SHT(DRV_NAME), 247 ATA_BASE_SHT(DRV_NAME),
248 .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */ 248 .sg_tablesize = LIBATA_MAX_PRD, /* maybe it can be larger? */
249 .dma_boundary = INIC_DMA_BOUNDARY, 249
250 /*
251 * This controller is braindamaged. dma_boundary is 0xffff like others
252 * but it will lock up the whole machine HARD if 65536 byte PRD entry
253 * is fed. Reduce maximum segment size.
254 */
255 .dma_boundary = INIC_DMA_BOUNDARY,
256 .max_segment_size = 65536 - 512,
250}; 257};
251 258
252static const int scr_map[] = { 259static const int scr_map[] = {
@@ -868,17 +875,6 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
868 return rc; 875 return rc;
869 } 876 }
870 877
871 /*
872 * This controller is braindamaged. dma_boundary is 0xffff
873 * like others but it will lock up the whole machine HARD if
874 * 65536 byte PRD entry is fed. Reduce maximum segment size.
875 */
876 rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
877 if (rc) {
878 dev_err(&pdev->dev, "failed to set the maximum segment size\n");
879 return rc;
880 }
881
882 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl); 878 rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
883 if (rc) { 879 if (rc) {
884 dev_err(&pdev->dev, "failed to initialize controller\n"); 880 dev_err(&pdev->dev, "failed to initialize controller\n");
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 29f102dcfec4..211607986134 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -533,9 +533,10 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
533 533
534static int he_init_tpdrq(struct he_dev *he_dev) 534static int he_init_tpdrq(struct he_dev *he_dev)
535{ 535{
536 he_dev->tpdrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 536 he_dev->tpdrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), 537 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
538 &he_dev->tpdrq_phys, GFP_KERNEL); 538 &he_dev->tpdrq_phys,
539 GFP_KERNEL);
539 if (he_dev->tpdrq_base == NULL) { 540 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n"); 541 hprintk("failed to alloc tpdrq\n");
541 return -ENOMEM; 542 return -ENOMEM;
@@ -717,7 +718,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
717 instead of '/ 512', use '>> 9' to prevent a call 718 instead of '/ 512', use '>> 9' to prevent a call
718 to divdu3 on x86 platforms 719 to divdu3 on x86 platforms
719 */ 720 */
720 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9; 721 rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
721 722
722 if (rate_cps < 10) 723 if (rate_cps < 10)
723 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */ 724 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
@@ -805,9 +806,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
805 goto out_free_rbpl_virt; 806 goto out_free_rbpl_virt;
806 } 807 }
807 808
808 he_dev->rbpl_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 809 he_dev->rbpl_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
809 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), 810 CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
810 &he_dev->rbpl_phys, GFP_KERNEL); 811 &he_dev->rbpl_phys, GFP_KERNEL);
811 if (he_dev->rbpl_base == NULL) { 812 if (he_dev->rbpl_base == NULL) {
812 hprintk("failed to alloc rbpl_base\n"); 813 hprintk("failed to alloc rbpl_base\n");
813 goto out_destroy_rbpl_pool; 814 goto out_destroy_rbpl_pool;
@@ -844,9 +845,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
844 845
845 /* rx buffer ready queue */ 846 /* rx buffer ready queue */
846 847
847 he_dev->rbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 848 he_dev->rbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
848 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), 849 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
849 &he_dev->rbrq_phys, GFP_KERNEL); 850 &he_dev->rbrq_phys, GFP_KERNEL);
850 if (he_dev->rbrq_base == NULL) { 851 if (he_dev->rbrq_base == NULL) {
851 hprintk("failed to allocate rbrq\n"); 852 hprintk("failed to allocate rbrq\n");
852 goto out_free_rbpl; 853 goto out_free_rbpl;
@@ -868,9 +869,9 @@ static int he_init_group(struct he_dev *he_dev, int group)
868 869
869 /* tx buffer ready queue */ 870 /* tx buffer ready queue */
870 871
871 he_dev->tbrq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 872 he_dev->tbrq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
872 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), 873 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
873 &he_dev->tbrq_phys, GFP_KERNEL); 874 &he_dev->tbrq_phys, GFP_KERNEL);
874 if (he_dev->tbrq_base == NULL) { 875 if (he_dev->tbrq_base == NULL) {
875 hprintk("failed to allocate tbrq\n"); 876 hprintk("failed to allocate tbrq\n");
876 goto out_free_rbpq_base; 877 goto out_free_rbpq_base;
@@ -913,11 +914,9 @@ static int he_init_irq(struct he_dev *he_dev)
913 /* 2.9.3.5 tail offset for each interrupt queue is located after the 914 /* 2.9.3.5 tail offset for each interrupt queue is located after the
914 end of the interrupt queue */ 915 end of the interrupt queue */
915 916
916 he_dev->irq_base = dma_zalloc_coherent(&he_dev->pci_dev->dev, 917 he_dev->irq_base = dma_alloc_coherent(&he_dev->pci_dev->dev,
917 (CONFIG_IRQ_SIZE + 1) 918 (CONFIG_IRQ_SIZE + 1) * sizeof(struct he_irq),
918 * sizeof(struct he_irq), 919 &he_dev->irq_phys, GFP_KERNEL);
919 &he_dev->irq_phys,
920 GFP_KERNEL);
921 if (he_dev->irq_base == NULL) { 920 if (he_dev->irq_base == NULL) {
922 hprintk("failed to allocate irq\n"); 921 hprintk("failed to allocate irq\n");
923 return -ENOMEM; 922 return -ENOMEM;
@@ -1464,9 +1463,9 @@ static int he_start(struct atm_dev *dev)
1464 1463
1465 /* host status page */ 1464 /* host status page */
1466 1465
1467 he_dev->hsp = dma_zalloc_coherent(&he_dev->pci_dev->dev, 1466 he_dev->hsp = dma_alloc_coherent(&he_dev->pci_dev->dev,
1468 sizeof(struct he_hsp), 1467 sizeof(struct he_hsp),
1469 &he_dev->hsp_phys, GFP_KERNEL); 1468 &he_dev->hsp_phys, GFP_KERNEL);
1470 if (he_dev->hsp == NULL) { 1469 if (he_dev->hsp == NULL) {
1471 hprintk("failed to allocate host status page\n"); 1470 hprintk("failed to allocate host status page\n");
1472 return -ENOMEM; 1471 return -ENOMEM;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 6e737142ceaa..43a14579e80e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -641,8 +641,8 @@ alloc_scq(struct idt77252_dev *card, int class)
641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
642 if (!scq) 642 if (!scq)
643 return NULL; 643 return NULL;
644 scq->base = dma_zalloc_coherent(&card->pcidev->dev, SCQ_SIZE, 644 scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE,
645 &scq->paddr, GFP_KERNEL); 645 &scq->paddr, GFP_KERNEL);
646 if (scq->base == NULL) { 646 if (scq->base == NULL) {
647 kfree(scq); 647 kfree(scq);
648 return NULL; 648 return NULL;
@@ -971,8 +971,8 @@ init_rsq(struct idt77252_dev *card)
971{ 971{
972 struct rsq_entry *rsqe; 972 struct rsq_entry *rsqe;
973 973
974 card->rsq.base = dma_zalloc_coherent(&card->pcidev->dev, RSQSIZE, 974 card->rsq.base = dma_alloc_coherent(&card->pcidev->dev, RSQSIZE,
975 &card->rsq.paddr, GFP_KERNEL); 975 &card->rsq.paddr, GFP_KERNEL);
976 if (card->rsq.base == NULL) { 976 if (card->rsq.base == NULL) {
977 printk("%s: can't allocate RSQ.\n", card->name); 977 printk("%s: can't allocate RSQ.\n", card->name);
978 return -1; 978 return -1;
@@ -3390,10 +3390,10 @@ static int init_card(struct atm_dev *dev)
3390 writel(0, SAR_REG_GP); 3390 writel(0, SAR_REG_GP);
3391 3391
3392 /* Initialize RAW Cell Handle Register */ 3392 /* Initialize RAW Cell Handle Register */
3393 card->raw_cell_hnd = dma_zalloc_coherent(&card->pcidev->dev, 3393 card->raw_cell_hnd = dma_alloc_coherent(&card->pcidev->dev,
3394 2 * sizeof(u32), 3394 2 * sizeof(u32),
3395 &card->raw_cell_paddr, 3395 &card->raw_cell_paddr,
3396 GFP_KERNEL); 3396 GFP_KERNEL);
3397 if (!card->raw_cell_hnd) { 3397 if (!card->raw_cell_hnd) {
3398 printk("%s: memory allocation failure.\n", card->name); 3398 printk("%s: memory allocation failure.\n", card->name);
3399 deinit_card(card); 3399 deinit_card(card);
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
index a43276c76fc6..21393ec3b9a4 100644
--- a/drivers/auxdisplay/ht16k33.c
+++ b/drivers/auxdisplay/ht16k33.c
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client)
509 struct ht16k33_priv *priv = i2c_get_clientdata(client); 509 struct ht16k33_priv *priv = i2c_get_clientdata(client);
510 struct ht16k33_fbdev *fbdev = &priv->fbdev; 510 struct ht16k33_fbdev *fbdev = &priv->fbdev;
511 511
512 cancel_delayed_work(&fbdev->work); 512 cancel_delayed_work_sync(&fbdev->work);
513 unregister_framebuffer(fbdev->info); 513 unregister_framebuffer(fbdev->info);
514 framebuffer_release(fbdev->info); 514 framebuffer_release(fbdev->info);
515 free_page((unsigned long) fbdev->buffer); 515 free_page((unsigned long) fbdev->buffer);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index cf78fa6d470d..a7359535caf5 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
79 ct_idx = get_cacheinfo_idx(this_leaf->type); 79 ct_idx = get_cacheinfo_idx(this_leaf->type);
80 propname = cache_type_info[ct_idx].size_prop; 80 propname = cache_type_info[ct_idx].size_prop;
81 81
82 if (of_property_read_u32(np, propname, &this_leaf->size)) 82 of_property_read_u32(np, propname, &this_leaf->size);
83 this_leaf->size = 0;
84} 83}
85 84
86/* not cache_line_size() because that's a macro in include/linux/cache.h */ 85/* not cache_line_size() because that's a macro in include/linux/cache.h */
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
114 ct_idx = get_cacheinfo_idx(this_leaf->type); 113 ct_idx = get_cacheinfo_idx(this_leaf->type);
115 propname = cache_type_info[ct_idx].nr_sets_prop; 114 propname = cache_type_info[ct_idx].nr_sets_prop;
116 115
117 if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) 116 of_property_read_u32(np, propname, &this_leaf->number_of_sets);
118 this_leaf->number_of_sets = 0;
119} 117}
120 118
121static void cache_associativity(struct cacheinfo *this_leaf) 119static void cache_associativity(struct cacheinfo *this_leaf)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a690fd400260..0992e67e862b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,7 @@
32#include <trace/events/power.h> 32#include <trace/events/power.h>
33#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
34#include <linux/cpuidle.h> 34#include <linux/cpuidle.h>
35#include <linux/devfreq.h>
35#include <linux/timer.h> 36#include <linux/timer.h>
36 37
37#include "../base.h" 38#include "../base.h"
@@ -1078,6 +1079,7 @@ void dpm_resume(pm_message_t state)
1078 dpm_show_time(starttime, state, 0, NULL); 1079 dpm_show_time(starttime, state, 0, NULL);
1079 1080
1080 cpufreq_resume(); 1081 cpufreq_resume();
1082 devfreq_resume();
1081 trace_suspend_resume(TPS("dpm_resume"), state.event, false); 1083 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1082} 1084}
1083 1085
@@ -1852,6 +1854,7 @@ int dpm_suspend(pm_message_t state)
1852 trace_suspend_resume(TPS("dpm_suspend"), state.event, true); 1854 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1853 might_sleep(); 1855 might_sleep();
1854 1856
1857 devfreq_suspend();
1855 cpufreq_suspend(); 1858 cpufreq_suspend();
1856 1859
1857 mutex_lock(&dpm_list_mtx); 1860 mutex_lock(&dpm_list_mtx);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 70624695b6d5..0ea2139c50d8 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -121,7 +121,7 @@ static void pm_runtime_cancel_pending(struct device *dev)
121 * Compute the autosuspend-delay expiration time based on the device's 121 * Compute the autosuspend-delay expiration time based on the device's
122 * power.last_busy time. If the delay has already expired or is disabled 122 * power.last_busy time. If the delay has already expired or is disabled
123 * (negative) or the power.use_autosuspend flag isn't set, return 0. 123 * (negative) or the power.use_autosuspend flag isn't set, return 0.
124 * Otherwise return the expiration time in jiffies (adjusted to be nonzero). 124 * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero).
125 * 125 *
126 * This function may be called either with or without dev->power.lock held. 126 * This function may be called either with or without dev->power.lock held.
127 * Either way it can be racy, since power.last_busy may be updated at any time. 127 * Either way it can be racy, since power.last_busy may be updated at any time.
@@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
130{ 130{
131 int autosuspend_delay; 131 int autosuspend_delay;
132 u64 last_busy, expires = 0; 132 u64 last_busy, expires = 0;
133 u64 now = ktime_to_ns(ktime_get()); 133 u64 now = ktime_get_mono_fast_ns();
134 134
135 if (!dev->power.use_autosuspend) 135 if (!dev->power.use_autosuspend)
136 goto out; 136 goto out;
@@ -141,7 +141,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev)
141 141
142 last_busy = READ_ONCE(dev->power.last_busy); 142 last_busy = READ_ONCE(dev->power.last_busy);
143 143
144 expires = last_busy + autosuspend_delay * NSEC_PER_MSEC; 144 expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
145 if (expires <= now) 145 if (expires <= now)
146 expires = 0; /* Already expired. */ 146 expires = 0; /* Already expired. */
147 147
@@ -525,7 +525,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
525 * We add a slack of 25% to gather wakeups 525 * We add a slack of 25% to gather wakeups
526 * without sacrificing the granularity. 526 * without sacrificing the granularity.
527 */ 527 */
528 u64 slack = READ_ONCE(dev->power.autosuspend_delay) * 528 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
529 (NSEC_PER_MSEC >> 2); 529 (NSEC_PER_MSEC >> 2);
530 530
531 dev->power.timer_expires = expires; 531 dev->power.timer_expires = expires;
@@ -905,8 +905,11 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
905 spin_lock_irqsave(&dev->power.lock, flags); 905 spin_lock_irqsave(&dev->power.lock, flags);
906 906
907 expires = dev->power.timer_expires; 907 expires = dev->power.timer_expires;
908 /* If 'expire' is after 'jiffies' we've been called too early. */ 908 /*
909 if (expires > 0 && expires < ktime_to_ns(ktime_get())) { 909 * If 'expires' is after the current time, we've been called
910 * too early.
911 */
912 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
910 dev->power.timer_expires = 0; 913 dev->power.timer_expires = 0;
911 rpm_suspend(dev, dev->power.timer_autosuspends ? 914 rpm_suspend(dev, dev->power.timer_autosuspends ?
912 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); 915 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
@@ -925,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
925int pm_schedule_suspend(struct device *dev, unsigned int delay) 928int pm_schedule_suspend(struct device *dev, unsigned int delay)
926{ 929{
927 unsigned long flags; 930 unsigned long flags;
928 ktime_t expires; 931 u64 expires;
929 int retval; 932 int retval;
930 933
931 spin_lock_irqsave(&dev->power.lock, flags); 934 spin_lock_irqsave(&dev->power.lock, flags);
@@ -942,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
942 /* Other scheduled or pending requests need to be canceled. */ 945 /* Other scheduled or pending requests need to be canceled. */
943 pm_runtime_cancel_pending(dev); 946 pm_runtime_cancel_pending(dev);
944 947
945 expires = ktime_add(ktime_get(), ms_to_ktime(delay)); 948 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
946 dev->power.timer_expires = ktime_to_ns(expires); 949 dev->power.timer_expires = expires;
947 dev->power.timer_autosuspends = 0; 950 dev->power.timer_autosuspends = 0;
948 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); 951 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
949 952
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1bd1145ad8b5..330c1f7e9665 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -108,6 +108,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
108 * suppress pointless writes. 108 * suppress pointless writes.
109 */ 109 */
110 for (i = 0; i < d->chip->num_regs; i++) { 110 for (i = 0; i < d->chip->num_regs; i++) {
111 if (!d->chip->mask_base)
112 continue;
113
111 reg = d->chip->mask_base + 114 reg = d->chip->mask_base +
112 (i * map->reg_stride * d->irq_reg_stride); 115 (i * map->reg_stride * d->irq_reg_stride);
113 if (d->chip->mask_invert) { 116 if (d->chip->mask_invert) {
@@ -258,7 +261,7 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
258 const struct regmap_irq_type *t = &irq_data->type; 261 const struct regmap_irq_type *t = &irq_data->type;
259 262
260 if ((t->types_supported & type) != type) 263 if ((t->types_supported & type) != type)
261 return -ENOTSUPP; 264 return 0;
262 265
263 reg = t->type_reg_offset / map->reg_stride; 266 reg = t->type_reg_offset / map->reg_stride;
264 267
@@ -588,6 +591,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
588 /* Mask all the interrupts by default */ 591 /* Mask all the interrupts by default */
589 for (i = 0; i < chip->num_regs; i++) { 592 for (i = 0; i < chip->num_regs; i++) {
590 d->mask_buf[i] = d->mask_buf_def[i]; 593 d->mask_buf[i] = d->mask_buf_def[i];
594 if (!chip->mask_base)
595 continue;
596
591 reg = chip->mask_base + 597 reg = chip->mask_base +
592 (i * map->reg_stride * d->irq_reg_stride); 598 (i * map->reg_stride * d->irq_reg_stride);
593 if (chip->mask_invert) 599 if (chip->mask_invert)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 6f2856c6d0f2..55481b40df9a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
4075 4075
4076 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 4076 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
4077 if (lock_fdc(drive)) 4077 if (lock_fdc(drive))
4078 return -EINTR; 4078 return 0;
4079 poll_drive(false, 0); 4079 poll_drive(false, 0);
4080 process_fd_request(); 4080 process_fd_request();
4081 } 4081 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b8a0720d3653..cf5538942834 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1190,6 +1190,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1190 goto out_unlock; 1190 goto out_unlock;
1191 } 1191 }
1192 1192
1193 if (lo->lo_offset != info->lo_offset ||
1194 lo->lo_sizelimit != info->lo_sizelimit) {
1195 sync_blockdev(lo->lo_device);
1196 kill_bdev(lo->lo_device);
1197 }
1198
1193 /* I/O need to be drained during transfer transition */ 1199 /* I/O need to be drained during transfer transition */
1194 blk_mq_freeze_queue(lo->lo_queue); 1200 blk_mq_freeze_queue(lo->lo_queue);
1195 1201
@@ -1218,6 +1224,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1218 1224
1219 if (lo->lo_offset != info->lo_offset || 1225 if (lo->lo_offset != info->lo_offset ||
1220 lo->lo_sizelimit != info->lo_sizelimit) { 1226 lo->lo_sizelimit != info->lo_sizelimit) {
1227 /* kill_bdev should have truncated all the pages */
1228 if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1229 err = -EAGAIN;
1230 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1231 __func__, lo->lo_number, lo->lo_file_name,
1232 lo->lo_device->bd_inode->i_mapping->nrpages);
1233 goto out_unfreeze;
1234 }
1221 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { 1235 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
1222 err = -EFBIG; 1236 err = -EFBIG;
1223 goto out_unfreeze; 1237 goto out_unfreeze;
@@ -1443,22 +1457,39 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1443 1457
1444static int loop_set_block_size(struct loop_device *lo, unsigned long arg) 1458static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1445{ 1459{
1460 int err = 0;
1461
1446 if (lo->lo_state != Lo_bound) 1462 if (lo->lo_state != Lo_bound)
1447 return -ENXIO; 1463 return -ENXIO;
1448 1464
1449 if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg)) 1465 if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
1450 return -EINVAL; 1466 return -EINVAL;
1451 1467
1468 if (lo->lo_queue->limits.logical_block_size != arg) {
1469 sync_blockdev(lo->lo_device);
1470 kill_bdev(lo->lo_device);
1471 }
1472
1452 blk_mq_freeze_queue(lo->lo_queue); 1473 blk_mq_freeze_queue(lo->lo_queue);
1453 1474
1475 /* kill_bdev should have truncated all the pages */
1476 if (lo->lo_queue->limits.logical_block_size != arg &&
1477 lo->lo_device->bd_inode->i_mapping->nrpages) {
1478 err = -EAGAIN;
1479 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1480 __func__, lo->lo_number, lo->lo_file_name,
1481 lo->lo_device->bd_inode->i_mapping->nrpages);
1482 goto out_unfreeze;
1483 }
1484
1454 blk_queue_logical_block_size(lo->lo_queue, arg); 1485 blk_queue_logical_block_size(lo->lo_queue, arg);
1455 blk_queue_physical_block_size(lo->lo_queue, arg); 1486 blk_queue_physical_block_size(lo->lo_queue, arg);
1456 blk_queue_io_min(lo->lo_queue, arg); 1487 blk_queue_io_min(lo->lo_queue, arg);
1457 loop_update_dio(lo); 1488 loop_update_dio(lo);
1458 1489out_unfreeze:
1459 blk_mq_unfreeze_queue(lo->lo_queue); 1490 blk_mq_unfreeze_queue(lo->lo_queue);
1460 1491
1461 return 0; 1492 return err;
1462} 1493}
1463 1494
1464static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, 1495static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08696f5f00bb..7c9a949e876b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
288 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 288 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
289 set_capacity(nbd->disk, config->bytesize >> 9); 289 set_capacity(nbd->disk, config->bytesize >> 9);
290 if (bdev) { 290 if (bdev) {
291 if (bdev->bd_disk) 291 if (bdev->bd_disk) {
292 bd_set_size(bdev, config->bytesize); 292 bd_set_size(bdev, config->bytesize);
293 else 293 set_blocksize(bdev, config->blksize);
294 } else
294 bdev->bd_invalidated = 1; 295 bdev->bd_invalidated = 1;
295 bdput(bdev); 296 bdput(bdev);
296 } 297 }
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index b3df2793e7cd..34b22d6523ba 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -97,6 +97,7 @@ void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
97#else 97#else
98static inline int null_zone_init(struct nullb_device *dev) 98static inline int null_zone_init(struct nullb_device *dev)
99{ 99{
100 pr_err("null_blk: CONFIG_BLK_DEV_ZONED not enabled\n");
100 return -EINVAL; 101 return -EINVAL;
101} 102}
102static inline void null_zone_exit(struct nullb_device *dev) {} 103static inline void null_zone_exit(struct nullb_device *dev) {}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e5140bbf241..1e92b61d0bd5 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5986,7 +5986,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
5986 struct list_head *tmp; 5986 struct list_head *tmp;
5987 int dev_id; 5987 int dev_id;
5988 char opt_buf[6]; 5988 char opt_buf[6];
5989 bool already = false;
5990 bool force = false; 5989 bool force = false;
5991 int ret; 5990 int ret;
5992 5991
@@ -6019,13 +6018,13 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
6019 spin_lock_irq(&rbd_dev->lock); 6018 spin_lock_irq(&rbd_dev->lock);
6020 if (rbd_dev->open_count && !force) 6019 if (rbd_dev->open_count && !force)
6021 ret = -EBUSY; 6020 ret = -EBUSY;
6022 else 6021 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6023 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING, 6022 &rbd_dev->flags))
6024 &rbd_dev->flags); 6023 ret = -EINPROGRESS;
6025 spin_unlock_irq(&rbd_dev->lock); 6024 spin_unlock_irq(&rbd_dev->lock);
6026 } 6025 }
6027 spin_unlock(&rbd_dev_list_lock); 6026 spin_unlock(&rbd_dev_list_lock);
6028 if (ret < 0 || already) 6027 if (ret)
6029 return ret; 6028 return ret;
6030 6029
6031 if (force) { 6030 if (force) {
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index a10d5736d8f7..ab893a7571a2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2641,8 +2641,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
2641 "comp pci_alloc, total bytes %zd entries %d\n", 2641 "comp pci_alloc, total bytes %zd entries %d\n",
2642 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); 2642 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
2643 2643
2644 skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE, 2644 skcomp = dma_alloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
2645 &skdev->cq_dma_address, GFP_KERNEL); 2645 &skdev->cq_dma_address, GFP_KERNEL);
2646 2646
2647 if (skcomp == NULL) { 2647 if (skcomp == NULL) {
2648 rc = -ENOMEM; 2648 rc = -ENOMEM;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 33c5cc879f24..04ca65912638 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -316,11 +316,9 @@ static ssize_t idle_store(struct device *dev,
316 * See the comment in writeback_store. 316 * See the comment in writeback_store.
317 */ 317 */
318 zram_slot_lock(zram, index); 318 zram_slot_lock(zram, index);
319 if (!zram_allocated(zram, index) || 319 if (zram_allocated(zram, index) &&
320 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 320 !zram_test_flag(zram, index, ZRAM_UNDER_WB))
321 goto next; 321 zram_set_flag(zram, index, ZRAM_IDLE);
322 zram_set_flag(zram, index, ZRAM_IDLE);
323next:
324 zram_slot_unlock(zram, index); 322 zram_slot_unlock(zram, index);
325 } 323 }
326 324
@@ -330,6 +328,41 @@ next:
330} 328}
331 329
332#ifdef CONFIG_ZRAM_WRITEBACK 330#ifdef CONFIG_ZRAM_WRITEBACK
331static ssize_t writeback_limit_enable_store(struct device *dev,
332 struct device_attribute *attr, const char *buf, size_t len)
333{
334 struct zram *zram = dev_to_zram(dev);
335 u64 val;
336 ssize_t ret = -EINVAL;
337
338 if (kstrtoull(buf, 10, &val))
339 return ret;
340
341 down_read(&zram->init_lock);
342 spin_lock(&zram->wb_limit_lock);
343 zram->wb_limit_enable = val;
344 spin_unlock(&zram->wb_limit_lock);
345 up_read(&zram->init_lock);
346 ret = len;
347
348 return ret;
349}
350
351static ssize_t writeback_limit_enable_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
353{
354 bool val;
355 struct zram *zram = dev_to_zram(dev);
356
357 down_read(&zram->init_lock);
358 spin_lock(&zram->wb_limit_lock);
359 val = zram->wb_limit_enable;
360 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock);
362
363 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
364}
365
333static ssize_t writeback_limit_store(struct device *dev, 366static ssize_t writeback_limit_store(struct device *dev,
334 struct device_attribute *attr, const char *buf, size_t len) 367 struct device_attribute *attr, const char *buf, size_t len)
335{ 368{
@@ -341,9 +374,9 @@ static ssize_t writeback_limit_store(struct device *dev,
341 return ret; 374 return ret;
342 375
343 down_read(&zram->init_lock); 376 down_read(&zram->init_lock);
344 atomic64_set(&zram->stats.bd_wb_limit, val); 377 spin_lock(&zram->wb_limit_lock);
345 if (val == 0) 378 zram->bd_wb_limit = val;
346 zram->stop_writeback = false; 379 spin_unlock(&zram->wb_limit_lock);
347 up_read(&zram->init_lock); 380 up_read(&zram->init_lock);
348 ret = len; 381 ret = len;
349 382
@@ -357,7 +390,9 @@ static ssize_t writeback_limit_show(struct device *dev,
357 struct zram *zram = dev_to_zram(dev); 390 struct zram *zram = dev_to_zram(dev);
358 391
359 down_read(&zram->init_lock); 392 down_read(&zram->init_lock);
360 val = atomic64_read(&zram->stats.bd_wb_limit); 393 spin_lock(&zram->wb_limit_lock);
394 val = zram->bd_wb_limit;
395 spin_unlock(&zram->wb_limit_lock);
361 up_read(&zram->init_lock); 396 up_read(&zram->init_lock);
362 397
363 return scnprintf(buf, PAGE_SIZE, "%llu\n", val); 398 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
@@ -588,8 +623,8 @@ static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
588 return 1; 623 return 1;
589} 624}
590 625
591#define HUGE_WRITEBACK 0x1 626#define HUGE_WRITEBACK 1
592#define IDLE_WRITEBACK 0x2 627#define IDLE_WRITEBACK 2
593 628
594static ssize_t writeback_store(struct device *dev, 629static ssize_t writeback_store(struct device *dev,
595 struct device_attribute *attr, const char *buf, size_t len) 630 struct device_attribute *attr, const char *buf, size_t len)
@@ -602,7 +637,7 @@ static ssize_t writeback_store(struct device *dev,
602 struct page *page; 637 struct page *page;
603 ssize_t ret, sz; 638 ssize_t ret, sz;
604 char mode_buf[8]; 639 char mode_buf[8];
605 unsigned long mode = -1UL; 640 int mode = -1;
606 unsigned long blk_idx = 0; 641 unsigned long blk_idx = 0;
607 642
608 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 643 sz = strscpy(mode_buf, buf, sizeof(mode_buf));
@@ -618,7 +653,7 @@ static ssize_t writeback_store(struct device *dev,
618 else if (!strcmp(mode_buf, "huge")) 653 else if (!strcmp(mode_buf, "huge"))
619 mode = HUGE_WRITEBACK; 654 mode = HUGE_WRITEBACK;
620 655
621 if (mode == -1UL) 656 if (mode == -1)
622 return -EINVAL; 657 return -EINVAL;
623 658
624 down_read(&zram->init_lock); 659 down_read(&zram->init_lock);
@@ -645,10 +680,13 @@ static ssize_t writeback_store(struct device *dev,
645 bvec.bv_len = PAGE_SIZE; 680 bvec.bv_len = PAGE_SIZE;
646 bvec.bv_offset = 0; 681 bvec.bv_offset = 0;
647 682
648 if (zram->stop_writeback) { 683 spin_lock(&zram->wb_limit_lock);
684 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
685 spin_unlock(&zram->wb_limit_lock);
649 ret = -EIO; 686 ret = -EIO;
650 break; 687 break;
651 } 688 }
689 spin_unlock(&zram->wb_limit_lock);
652 690
653 if (!blk_idx) { 691 if (!blk_idx) {
654 blk_idx = alloc_block_bdev(zram); 692 blk_idx = alloc_block_bdev(zram);
@@ -667,10 +705,11 @@ static ssize_t writeback_store(struct device *dev,
667 zram_test_flag(zram, index, ZRAM_UNDER_WB)) 705 zram_test_flag(zram, index, ZRAM_UNDER_WB))
668 goto next; 706 goto next;
669 707
670 if ((mode & IDLE_WRITEBACK && 708 if (mode == IDLE_WRITEBACK &&
671 !zram_test_flag(zram, index, ZRAM_IDLE)) && 709 !zram_test_flag(zram, index, ZRAM_IDLE))
672 (mode & HUGE_WRITEBACK && 710 goto next;
673 !zram_test_flag(zram, index, ZRAM_HUGE))) 711 if (mode == HUGE_WRITEBACK &&
712 !zram_test_flag(zram, index, ZRAM_HUGE))
674 goto next; 713 goto next;
675 /* 714 /*
676 * Clearing ZRAM_UNDER_WB is duty of caller. 715 * Clearing ZRAM_UNDER_WB is duty of caller.
@@ -732,11 +771,10 @@ static ssize_t writeback_store(struct device *dev,
732 zram_set_element(zram, index, blk_idx); 771 zram_set_element(zram, index, blk_idx);
733 blk_idx = 0; 772 blk_idx = 0;
734 atomic64_inc(&zram->stats.pages_stored); 773 atomic64_inc(&zram->stats.pages_stored);
735 if (atomic64_add_unless(&zram->stats.bd_wb_limit, 774 spin_lock(&zram->wb_limit_lock);
736 -1 << (PAGE_SHIFT - 12), 0)) { 775 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
737 if (atomic64_read(&zram->stats.bd_wb_limit) == 0) 776 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
738 zram->stop_writeback = true; 777 spin_unlock(&zram->wb_limit_lock);
739 }
740next: 778next:
741 zram_slot_unlock(zram, index); 779 zram_slot_unlock(zram, index);
742 } 780 }
@@ -1812,6 +1850,7 @@ static DEVICE_ATTR_RW(comp_algorithm);
1812static DEVICE_ATTR_RW(backing_dev); 1850static DEVICE_ATTR_RW(backing_dev);
1813static DEVICE_ATTR_WO(writeback); 1851static DEVICE_ATTR_WO(writeback);
1814static DEVICE_ATTR_RW(writeback_limit); 1852static DEVICE_ATTR_RW(writeback_limit);
1853static DEVICE_ATTR_RW(writeback_limit_enable);
1815#endif 1854#endif
1816 1855
1817static struct attribute *zram_disk_attrs[] = { 1856static struct attribute *zram_disk_attrs[] = {
@@ -1828,6 +1867,7 @@ static struct attribute *zram_disk_attrs[] = {
1828 &dev_attr_backing_dev.attr, 1867 &dev_attr_backing_dev.attr,
1829 &dev_attr_writeback.attr, 1868 &dev_attr_writeback.attr,
1830 &dev_attr_writeback_limit.attr, 1869 &dev_attr_writeback_limit.attr,
1870 &dev_attr_writeback_limit_enable.attr,
1831#endif 1871#endif
1832 &dev_attr_io_stat.attr, 1872 &dev_attr_io_stat.attr,
1833 &dev_attr_mm_stat.attr, 1873 &dev_attr_mm_stat.attr,
@@ -1867,7 +1907,9 @@ static int zram_add(void)
1867 device_id = ret; 1907 device_id = ret;
1868 1908
1869 init_rwsem(&zram->init_lock); 1909 init_rwsem(&zram->init_lock);
1870 1910#ifdef CONFIG_ZRAM_WRITEBACK
1911 spin_lock_init(&zram->wb_limit_lock);
1912#endif
1871 queue = blk_alloc_queue(GFP_KERNEL); 1913 queue = blk_alloc_queue(GFP_KERNEL);
1872 if (!queue) { 1914 if (!queue) {
1873 pr_err("Error allocating disk queue for device %d\n", 1915 pr_err("Error allocating disk queue for device %d\n",
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 4bd3afd15e83..f2fd46daa760 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -86,7 +86,6 @@ struct zram_stats {
86 atomic64_t bd_count; /* no. of pages in backing device */ 86 atomic64_t bd_count; /* no. of pages in backing device */
87 atomic64_t bd_reads; /* no. of reads from backing device */ 87 atomic64_t bd_reads; /* no. of reads from backing device */
88 atomic64_t bd_writes; /* no. of writes from backing device */ 88 atomic64_t bd_writes; /* no. of writes from backing device */
89 atomic64_t bd_wb_limit; /* writeback limit of backing device */
90#endif 89#endif
91}; 90};
92 91
@@ -114,8 +113,10 @@ struct zram {
114 */ 113 */
115 bool claim; /* Protected by bdev->bd_mutex */ 114 bool claim; /* Protected by bdev->bd_mutex */
116 struct file *backing_dev; 115 struct file *backing_dev;
117 bool stop_writeback;
118#ifdef CONFIG_ZRAM_WRITEBACK 116#ifdef CONFIG_ZRAM_WRITEBACK
117 spinlock_t wb_limit_lock;
118 bool wb_limit_enable;
119 u64 bd_wb_limit;
119 struct block_device *bdev; 120 struct block_device *bdev;
120 unsigned int old_block_size; 121 unsigned int old_block_size;
121 unsigned long *bitmap; 122 unsigned long *bitmap;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f94d33525771..d299ec79e4c3 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
781 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, 781 SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
782 SYSC_QUIRK_LEGACY_IDLE), 782 SYSC_QUIRK_LEGACY_IDLE),
783 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, 783 SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
784 SYSC_QUIRK_LEGACY_IDLE), 784 0),
785 /* Some timers on omap4 and later */ 785 /* Some timers on omap4 and later */
786 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, 786 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
787 SYSC_QUIRK_LEGACY_IDLE), 787 0),
788 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, 788 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
789 SYSC_QUIRK_LEGACY_IDLE), 789 0),
790 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, 790 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
791 SYSC_QUIRK_LEGACY_IDLE), 791 SYSC_QUIRK_LEGACY_IDLE),
792 /* Uarts on omap4 and later */ 792 /* Uarts on omap4 and later */
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a74ce885b541..c518659b4d9f 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -32,6 +32,7 @@
32#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/workqueue.h> 33#include <linux/workqueue.h>
34#include <linux/uuid.h> 34#include <linux/uuid.h>
35#include <linux/nospec.h>
35 36
36#define IPMI_DRIVER_VERSION "39.2" 37#define IPMI_DRIVER_VERSION "39.2"
37 38
@@ -62,7 +63,8 @@ static void ipmi_debug_msg(const char *title, unsigned char *data,
62{ } 63{ }
63#endif 64#endif
64 65
65static int initialized; 66static bool initialized;
67static bool drvregistered;
66 68
67enum ipmi_panic_event_op { 69enum ipmi_panic_event_op {
68 IPMI_SEND_PANIC_EVENT_NONE, 70 IPMI_SEND_PANIC_EVENT_NONE,
@@ -612,7 +614,7 @@ static DEFINE_MUTEX(ipmidriver_mutex);
612 614
613static LIST_HEAD(ipmi_interfaces); 615static LIST_HEAD(ipmi_interfaces);
614static DEFINE_MUTEX(ipmi_interfaces_mutex); 616static DEFINE_MUTEX(ipmi_interfaces_mutex);
615DEFINE_STATIC_SRCU(ipmi_interfaces_srcu); 617struct srcu_struct ipmi_interfaces_srcu;
616 618
617/* 619/*
618 * List of watchers that want to know when smi's are added and deleted. 620 * List of watchers that want to know when smi's are added and deleted.
@@ -720,7 +722,15 @@ struct watcher_entry {
720int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 722int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
721{ 723{
722 struct ipmi_smi *intf; 724 struct ipmi_smi *intf;
723 int index; 725 int index, rv;
726
727 /*
728 * Make sure the driver is actually initialized, this handles
729 * problems with initialization order.
730 */
731 rv = ipmi_init_msghandler();
732 if (rv)
733 return rv;
724 734
725 mutex_lock(&smi_watchers_mutex); 735 mutex_lock(&smi_watchers_mutex);
726 736
@@ -884,7 +894,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
884 894
885 if (user) { 895 if (user) {
886 user->handler->ipmi_recv_hndl(msg, user->handler_data); 896 user->handler->ipmi_recv_hndl(msg, user->handler_data);
887 release_ipmi_user(msg->user, index); 897 release_ipmi_user(user, index);
888 } else { 898 } else {
889 /* User went away, give up. */ 899 /* User went away, give up. */
890 ipmi_free_recv_msg(msg); 900 ipmi_free_recv_msg(msg);
@@ -1076,7 +1086,7 @@ int ipmi_create_user(unsigned int if_num,
1076{ 1086{
1077 unsigned long flags; 1087 unsigned long flags;
1078 struct ipmi_user *new_user; 1088 struct ipmi_user *new_user;
1079 int rv = 0, index; 1089 int rv, index;
1080 struct ipmi_smi *intf; 1090 struct ipmi_smi *intf;
1081 1091
1082 /* 1092 /*
@@ -1094,18 +1104,9 @@ int ipmi_create_user(unsigned int if_num,
1094 * Make sure the driver is actually initialized, this handles 1104 * Make sure the driver is actually initialized, this handles
1095 * problems with initialization order. 1105 * problems with initialization order.
1096 */ 1106 */
1097 if (!initialized) { 1107 rv = ipmi_init_msghandler();
1098 rv = ipmi_init_msghandler(); 1108 if (rv)
1099 if (rv) 1109 return rv;
1100 return rv;
1101
1102 /*
1103 * The init code doesn't return an error if it was turned
1104 * off, but it won't initialize. Check that.
1105 */
1106 if (!initialized)
1107 return -ENODEV;
1108 }
1109 1110
1110 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL); 1111 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1111 if (!new_user) 1112 if (!new_user)
@@ -1183,6 +1184,7 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
1183static void free_user(struct kref *ref) 1184static void free_user(struct kref *ref)
1184{ 1185{
1185 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); 1186 struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
1187 cleanup_srcu_struct(&user->release_barrier);
1186 kfree(user); 1188 kfree(user);
1187} 1189}
1188 1190
@@ -1259,7 +1261,6 @@ int ipmi_destroy_user(struct ipmi_user *user)
1259{ 1261{
1260 _ipmi_destroy_user(user); 1262 _ipmi_destroy_user(user);
1261 1263
1262 cleanup_srcu_struct(&user->release_barrier);
1263 kref_put(&user->refcount, free_user); 1264 kref_put(&user->refcount, free_user);
1264 1265
1265 return 0; 1266 return 0;
@@ -1298,10 +1299,12 @@ int ipmi_set_my_address(struct ipmi_user *user,
1298 if (!user) 1299 if (!user)
1299 return -ENODEV; 1300 return -ENODEV;
1300 1301
1301 if (channel >= IPMI_MAX_CHANNELS) 1302 if (channel >= IPMI_MAX_CHANNELS) {
1302 rv = -EINVAL; 1303 rv = -EINVAL;
1303 else 1304 } else {
1305 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1304 user->intf->addrinfo[channel].address = address; 1306 user->intf->addrinfo[channel].address = address;
1307 }
1305 release_ipmi_user(user, index); 1308 release_ipmi_user(user, index);
1306 1309
1307 return rv; 1310 return rv;
@@ -1318,10 +1321,12 @@ int ipmi_get_my_address(struct ipmi_user *user,
1318 if (!user) 1321 if (!user)
1319 return -ENODEV; 1322 return -ENODEV;
1320 1323
1321 if (channel >= IPMI_MAX_CHANNELS) 1324 if (channel >= IPMI_MAX_CHANNELS) {
1322 rv = -EINVAL; 1325 rv = -EINVAL;
1323 else 1326 } else {
1327 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1324 *address = user->intf->addrinfo[channel].address; 1328 *address = user->intf->addrinfo[channel].address;
1329 }
1325 release_ipmi_user(user, index); 1330 release_ipmi_user(user, index);
1326 1331
1327 return rv; 1332 return rv;
@@ -1338,10 +1343,12 @@ int ipmi_set_my_LUN(struct ipmi_user *user,
1338 if (!user) 1343 if (!user)
1339 return -ENODEV; 1344 return -ENODEV;
1340 1345
1341 if (channel >= IPMI_MAX_CHANNELS) 1346 if (channel >= IPMI_MAX_CHANNELS) {
1342 rv = -EINVAL; 1347 rv = -EINVAL;
1343 else 1348 } else {
1349 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1344 user->intf->addrinfo[channel].lun = LUN & 0x3; 1350 user->intf->addrinfo[channel].lun = LUN & 0x3;
1351 }
1345 release_ipmi_user(user, index); 1352 release_ipmi_user(user, index);
1346 1353
1347 return rv; 1354 return rv;
@@ -1358,10 +1365,12 @@ int ipmi_get_my_LUN(struct ipmi_user *user,
1358 if (!user) 1365 if (!user)
1359 return -ENODEV; 1366 return -ENODEV;
1360 1367
1361 if (channel >= IPMI_MAX_CHANNELS) 1368 if (channel >= IPMI_MAX_CHANNELS) {
1362 rv = -EINVAL; 1369 rv = -EINVAL;
1363 else 1370 } else {
1371 channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1364 *address = user->intf->addrinfo[channel].lun; 1372 *address = user->intf->addrinfo[channel].lun;
1373 }
1365 release_ipmi_user(user, index); 1374 release_ipmi_user(user, index);
1366 1375
1367 return rv; 1376 return rv;
@@ -2184,6 +2193,7 @@ static int check_addr(struct ipmi_smi *intf,
2184{ 2193{
2185 if (addr->channel >= IPMI_MAX_CHANNELS) 2194 if (addr->channel >= IPMI_MAX_CHANNELS)
2186 return -EINVAL; 2195 return -EINVAL;
2196 addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2187 *lun = intf->addrinfo[addr->channel].lun; 2197 *lun = intf->addrinfo[addr->channel].lun;
2188 *saddr = intf->addrinfo[addr->channel].address; 2198 *saddr = intf->addrinfo[addr->channel].address;
2189 return 0; 2199 return 0;
@@ -3291,17 +3301,9 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3291 * Make sure the driver is actually initialized, this handles 3301 * Make sure the driver is actually initialized, this handles
3292 * problems with initialization order. 3302 * problems with initialization order.
3293 */ 3303 */
3294 if (!initialized) { 3304 rv = ipmi_init_msghandler();
3295 rv = ipmi_init_msghandler(); 3305 if (rv)
3296 if (rv) 3306 return rv;
3297 return rv;
3298 /*
3299 * The init code doesn't return an error if it was turned
3300 * off, but it won't initialize. Check that.
3301 */
3302 if (!initialized)
3303 return -ENODEV;
3304 }
3305 3307
3306 intf = kzalloc(sizeof(*intf), GFP_KERNEL); 3308 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3307 if (!intf) 3309 if (!intf)
@@ -5017,6 +5019,22 @@ static int panic_event(struct notifier_block *this,
5017 return NOTIFY_DONE; 5019 return NOTIFY_DONE;
5018} 5020}
5019 5021
5022/* Must be called with ipmi_interfaces_mutex held. */
5023static int ipmi_register_driver(void)
5024{
5025 int rv;
5026
5027 if (drvregistered)
5028 return 0;
5029
5030 rv = driver_register(&ipmidriver.driver);
5031 if (rv)
5032 pr_err("Could not register IPMI driver\n");
5033 else
5034 drvregistered = true;
5035 return rv;
5036}
5037
5020static struct notifier_block panic_block = { 5038static struct notifier_block panic_block = {
5021 .notifier_call = panic_event, 5039 .notifier_call = panic_event,
5022 .next = NULL, 5040 .next = NULL,
@@ -5027,66 +5045,75 @@ static int ipmi_init_msghandler(void)
5027{ 5045{
5028 int rv; 5046 int rv;
5029 5047
5048 mutex_lock(&ipmi_interfaces_mutex);
5049 rv = ipmi_register_driver();
5050 if (rv)
5051 goto out;
5030 if (initialized) 5052 if (initialized)
5031 return 0; 5053 goto out;
5032
5033 rv = driver_register(&ipmidriver.driver);
5034 if (rv) {
5035 pr_err("Could not register IPMI driver\n");
5036 return rv;
5037 }
5038 5054
5039 pr_info("version " IPMI_DRIVER_VERSION "\n"); 5055 init_srcu_struct(&ipmi_interfaces_srcu);
5040 5056
5041 timer_setup(&ipmi_timer, ipmi_timeout, 0); 5057 timer_setup(&ipmi_timer, ipmi_timeout, 0);
5042 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); 5058 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5043 5059
5044 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 5060 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5045 5061
5046 initialized = 1; 5062 initialized = true;
5047 5063
5048 return 0; 5064out:
5065 mutex_unlock(&ipmi_interfaces_mutex);
5066 return rv;
5049} 5067}
5050 5068
5051static int __init ipmi_init_msghandler_mod(void) 5069static int __init ipmi_init_msghandler_mod(void)
5052{ 5070{
5053 ipmi_init_msghandler(); 5071 int rv;
5054 return 0; 5072
5073 pr_info("version " IPMI_DRIVER_VERSION "\n");
5074
5075 mutex_lock(&ipmi_interfaces_mutex);
5076 rv = ipmi_register_driver();
5077 mutex_unlock(&ipmi_interfaces_mutex);
5078
5079 return rv;
5055} 5080}
5056 5081
5057static void __exit cleanup_ipmi(void) 5082static void __exit cleanup_ipmi(void)
5058{ 5083{
5059 int count; 5084 int count;
5060 5085
5061 if (!initialized) 5086 if (initialized) {
5062 return; 5087 atomic_notifier_chain_unregister(&panic_notifier_list,
5063 5088 &panic_block);
5064 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
5065 5089
5066 /* 5090 /*
5067 * This can't be called if any interfaces exist, so no worry 5091 * This can't be called if any interfaces exist, so no worry
5068 * about shutting down the interfaces. 5092 * about shutting down the interfaces.
5069 */ 5093 */
5070 5094
5071 /* 5095 /*
5072 * Tell the timer to stop, then wait for it to stop. This 5096 * Tell the timer to stop, then wait for it to stop. This
5073 * avoids problems with race conditions removing the timer 5097 * avoids problems with race conditions removing the timer
5074 * here. 5098 * here.
5075 */ 5099 */
5076 atomic_inc(&stop_operation); 5100 atomic_inc(&stop_operation);
5077 del_timer_sync(&ipmi_timer); 5101 del_timer_sync(&ipmi_timer);
5078 5102
5079 driver_unregister(&ipmidriver.driver); 5103 initialized = false;
5080 5104
5081 initialized = 0; 5105 /* Check for buffer leaks. */
5106 count = atomic_read(&smi_msg_inuse_count);
5107 if (count != 0)
5108 pr_warn("SMI message count %d at exit\n", count);
5109 count = atomic_read(&recv_msg_inuse_count);
5110 if (count != 0)
5111 pr_warn("recv message count %d at exit\n", count);
5082 5112
5083 /* Check for buffer leaks. */ 5113 cleanup_srcu_struct(&ipmi_interfaces_srcu);
5084 count = atomic_read(&smi_msg_inuse_count); 5114 }
5085 if (count != 0) 5115 if (drvregistered)
5086 pr_warn("SMI message count %d at exit\n", count); 5116 driver_unregister(&ipmidriver.driver);
5087 count = atomic_read(&recv_msg_inuse_count);
5088 if (count != 0)
5089 pr_warn("recv message count %d at exit\n", count);
5090} 5117}
5091module_exit(cleanup_ipmi); 5118module_exit(cleanup_ipmi);
5092 5119
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index ca9528c4f183..b7a1ae2afaea 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -632,8 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
632 632
633 /* Remove the multi-part read marker. */ 633 /* Remove the multi-part read marker. */
634 len -= 2; 634 len -= 2;
635 data += 2;
635 for (i = 0; i < len; i++) 636 for (i = 0; i < len; i++)
636 ssif_info->data[i] = data[i+2]; 637 ssif_info->data[i] = data[i];
637 ssif_info->multi_len = len; 638 ssif_info->multi_len = len;
638 ssif_info->multi_pos = 1; 639 ssif_info->multi_pos = 1;
639 640
@@ -661,8 +662,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
661 } 662 }
662 663
663 blocknum = data[0]; 664 blocknum = data[0];
665 len--;
666 data++;
667
668 if (blocknum != 0xff && len != 31) {
669 /* All blocks but the last must have 31 data bytes. */
670 result = -EIO;
671 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
672 pr_info("Received middle message <31\n");
664 673
665 if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { 674 goto continue_op;
675 }
676
677 if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
666 /* Received message too big, abort the operation. */ 678 /* Received message too big, abort the operation. */
667 result = -E2BIG; 679 result = -E2BIG;
668 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) 680 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -671,16 +683,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
671 goto continue_op; 683 goto continue_op;
672 } 684 }
673 685
674 /* Remove the blocknum from the data. */
675 len--;
676 for (i = 0; i < len; i++) 686 for (i = 0; i < len; i++)
677 ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; 687 ssif_info->data[i + ssif_info->multi_len] = data[i];
678 ssif_info->multi_len += len; 688 ssif_info->multi_len += len;
679 if (blocknum == 0xff) { 689 if (blocknum == 0xff) {
680 /* End of read */ 690 /* End of read */
681 len = ssif_info->multi_len; 691 len = ssif_info->multi_len;
682 data = ssif_info->data; 692 data = ssif_info->data;
683 } else if (blocknum + 1 != ssif_info->multi_pos) { 693 } else if (blocknum != ssif_info->multi_pos) {
684 /* 694 /*
685 * Out of sequence block, just abort. Block 695 * Out of sequence block, just abort. Block
686 * numbers start at zero for the second block, 696 * numbers start at zero for the second block,
@@ -707,6 +717,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
707 } 717 }
708 } 718 }
709 719
720 continue_op:
710 if (result < 0) { 721 if (result < 0) {
711 ssif_inc_stat(ssif_info, receive_errors); 722 ssif_inc_stat(ssif_info, receive_errors);
712 } else { 723 } else {
@@ -714,8 +725,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
714 ssif_inc_stat(ssif_info, received_message_parts); 725 ssif_inc_stat(ssif_info, received_message_parts);
715 } 726 }
716 727
717
718 continue_op:
719 if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) 728 if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
720 pr_info("DONE 1: state = %d, result=%d\n", 729 pr_info("DONE 1: state = %d, result=%d\n",
721 ssif_info->ssif_state, result); 730 ssif_info->ssif_state, result);
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index b5e3103c1175..e43c876a9223 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -59,6 +59,7 @@
59#include <linux/mutex.h> 59#include <linux/mutex.h>
60#include <linux/delay.h> 60#include <linux/delay.h>
61#include <linux/serial_8250.h> 61#include <linux/serial_8250.h>
62#include <linux/nospec.h>
62#include "smapi.h" 63#include "smapi.h"
63#include "mwavedd.h" 64#include "mwavedd.h"
64#include "3780i.h" 65#include "3780i.h"
@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
289 ipcnum); 290 ipcnum);
290 return -EINVAL; 291 return -EINVAL;
291 } 292 }
293 ipcnum = array_index_nospec(ipcnum,
294 ARRAY_SIZE(pDrvData->IPCs));
292 PRINTK_3(TRACE_MWAVE, 295 PRINTK_3(TRACE_MWAVE,
293 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" 296 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
294 " ipcnum %x entry usIntCount %x\n", 297 " ipcnum %x entry usIntCount %x\n",
@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
317 " Invalid ipcnum %x\n", ipcnum); 320 " Invalid ipcnum %x\n", ipcnum);
318 return -EINVAL; 321 return -EINVAL;
319 } 322 }
323 ipcnum = array_index_nospec(ipcnum,
324 ARRAY_SIZE(pDrvData->IPCs));
320 PRINTK_3(TRACE_MWAVE, 325 PRINTK_3(TRACE_MWAVE,
321 "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" 326 "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
322 " ipcnum %x, usIntCount %x\n", 327 " ipcnum %x, usIntCount %x\n",
@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
383 ipcnum); 388 ipcnum);
384 return -EINVAL; 389 return -EINVAL;
385 } 390 }
391 ipcnum = array_index_nospec(ipcnum,
392 ARRAY_SIZE(pDrvData->IPCs));
386 mutex_lock(&mwave_mutex); 393 mutex_lock(&mwave_mutex);
387 if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { 394 if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
388 pDrvData->IPCs[ipcnum].bIsEnabled = false; 395 pDrvData->IPCs[ipcnum].bIsEnabled = false;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index e5b2fe80eab4..d2f0bb5ba47e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -293,7 +293,6 @@ config COMMON_CLK_BD718XX
293source "drivers/clk/actions/Kconfig" 293source "drivers/clk/actions/Kconfig"
294source "drivers/clk/bcm/Kconfig" 294source "drivers/clk/bcm/Kconfig"
295source "drivers/clk/hisilicon/Kconfig" 295source "drivers/clk/hisilicon/Kconfig"
296source "drivers/clk/imx/Kconfig"
297source "drivers/clk/imgtec/Kconfig" 296source "drivers/clk/imgtec/Kconfig"
298source "drivers/clk/imx/Kconfig" 297source "drivers/clk/imx/Kconfig"
299source "drivers/clk/ingenic/Kconfig" 298source "drivers/clk/ingenic/Kconfig"
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 5b393e711e94..7d16ab0784ec 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
262 262
263 if (vc5->clk_mux_ins == VC5_MUX_IN_XIN) 263 if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
264 src = VC5_PRIM_SRC_SHDN_EN_XTAL; 264 src = VC5_PRIM_SRC_SHDN_EN_XTAL;
265 if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN) 265 else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
266 src = VC5_PRIM_SRC_SHDN_EN_CLKIN; 266 src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
267 else /* Invalid; should have been caught by vc5_probe() */
268 return -EINVAL;
267 } 269 }
268 270
269 return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src); 271 return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 75d13c0eff12..d2477a5058ac 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core,
1513 if (!parent) 1513 if (!parent)
1514 return -EINVAL; 1514 return -EINVAL;
1515 1515
1516 for (i = 0; i < core->num_parents; i++) 1516 for (i = 0; i < core->num_parents; i++) {
1517 if (clk_core_get_parent_by_index(core, i) == parent) 1517 if (core->parents[i] == parent)
1518 return i;
1519
1520 if (core->parents[i])
1521 continue;
1522
1523 /* Fallback to comparing globally unique names */
1524 if (!strcmp(parent->name, core->parent_names[i])) {
1525 core->parents[i] = parent;
1518 return i; 1526 return i;
1527 }
1528 }
1519 1529
1520 return -EINVAL; 1530 return -EINVAL;
1521} 1531}
@@ -2779,7 +2789,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2779 seq_printf(s, "\"protect_count\": %d,", c->protect_count); 2789 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2780 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); 2790 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2781 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); 2791 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2782 seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); 2792 seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
2783 seq_printf(s, "\"duty_cycle\": %u", 2793 seq_printf(s, "\"duty_cycle\": %u",
2784 clk_core_get_scaled_duty_cycle(c, 100000)); 2794 clk_core_get_scaled_duty_cycle(c, 100000));
2785} 2795}
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
index 0026c3969b1e..76b9eb15604e 100644
--- a/drivers/clk/imx/clk-frac-pll.c
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
155{ 155{
156 struct clk_frac_pll *pll = to_clk_frac_pll(hw); 156 struct clk_frac_pll *pll = to_clk_frac_pll(hw);
157 u32 val, divfi, divff; 157 u32 val, divfi, divff;
158 u64 temp64 = parent_rate; 158 u64 temp64;
159 int ret; 159 int ret;
160 160
161 parent_rate *= 8; 161 parent_rate *= 8;
162 rate *= 2; 162 rate *= 2;
163 divfi = rate / parent_rate; 163 divfi = rate / parent_rate;
164 temp64 *= rate - divfi; 164 temp64 = parent_rate * divfi;
165 temp64 = rate - temp64;
165 temp64 *= PLL_FRAC_DENOM; 166 temp64 *= PLL_FRAC_DENOM;
166 do_div(temp64, parent_rate); 167 do_div(temp64, parent_rate);
167 divff = temp64; 168 divff = temp64;
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index 99c2508de8e5..fb6edf1b8aa2 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -169,6 +169,8 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
169 return -ENODEV; 169 return -ENODEV;
170 170
171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172 if (!res)
173 return -EINVAL;
172 base = devm_ioremap(dev, res->start, resource_size(res)); 174 base = devm_ioremap(dev, res->start, resource_size(res));
173 if (!base) 175 if (!base)
174 return -ENOMEM; 176 return -ENOMEM;
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 61fefc046ec5..d083b860f083 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -53,7 +53,6 @@
53#define APMU_DISP1 0x110 53#define APMU_DISP1 0x110
54#define APMU_CCIC0 0x50 54#define APMU_CCIC0 0x50
55#define APMU_CCIC1 0xf4 55#define APMU_CCIC1 0xf4
56#define APMU_SP 0x68
57#define MPMU_UART_PLL 0x14 56#define MPMU_UART_PLL 0x14
58 57
59struct mmp2_clk_unit { 58struct mmp2_clk_unit {
@@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = {
210 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), 209 .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
211}; 210};
212 211
213static DEFINE_SPINLOCK(sp_lock);
214
215static struct mmp_param_mux_clk apmu_mux_clks[] = { 212static struct mmp_param_mux_clk apmu_mux_clks[] = {
216 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, 213 {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
217 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, 214 {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
@@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = {
242 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, 239 {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
243 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, 240 {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
244 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, 241 {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
245 {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock},
246}; 242};
247 243
248static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) 244static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 1b1ba54e33dd..1c04575c118f 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -215,6 +215,7 @@ config MSM_MMCC_8996
215 215
216config MSM_GCC_8998 216config MSM_GCC_8998
217 tristate "MSM8998 Global Clock Controller" 217 tristate "MSM8998 Global Clock Controller"
218 select QCOM_GDSC
218 help 219 help
219 Support for the global clock controller on msm8998 devices. 220 Support for the global clock controller on msm8998 devices.
220 Say Y if you want to use peripheral devices such as UART, SPI, 221 Say Y if you want to use peripheral devices such as UART, SPI,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index c782e62dd98b..58fa5c247af1 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = {
115 "core_bi_pll_test_se", 115 "core_bi_pll_test_se",
116}; 116};
117 117
118static const char * const gcc_parent_names_7[] = { 118static const char * const gcc_parent_names_7_ao[] = {
119 "bi_tcxo", 119 "bi_tcxo_ao",
120 "gpll0", 120 "gpll0",
121 "gpll0_out_even", 121 "gpll0_out_even",
122 "core_bi_pll_test_se", 122 "core_bi_pll_test_se",
@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = {
128 "core_bi_pll_test_se", 128 "core_bi_pll_test_se",
129}; 129};
130 130
131static const char * const gcc_parent_names_8_ao[] = {
132 "bi_tcxo_ao",
133 "gpll0",
134 "core_bi_pll_test_se",
135};
136
131static const struct parent_map gcc_parent_map_10[] = { 137static const struct parent_map gcc_parent_map_10[] = {
132 { P_BI_TCXO, 0 }, 138 { P_BI_TCXO, 0 },
133 { P_GPLL0_OUT_MAIN, 1 }, 139 { P_GPLL0_OUT_MAIN, 1 },
@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
210 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, 216 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
211 .clkr.hw.init = &(struct clk_init_data){ 217 .clkr.hw.init = &(struct clk_init_data){
212 .name = "gcc_cpuss_ahb_clk_src", 218 .name = "gcc_cpuss_ahb_clk_src",
213 .parent_names = gcc_parent_names_7, 219 .parent_names = gcc_parent_names_7_ao,
214 .num_parents = 4, 220 .num_parents = 4,
215 .ops = &clk_rcg2_ops, 221 .ops = &clk_rcg2_ops,
216 }, 222 },
@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
229 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, 235 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
230 .clkr.hw.init = &(struct clk_init_data){ 236 .clkr.hw.init = &(struct clk_init_data){
231 .name = "gcc_cpuss_rbcpr_clk_src", 237 .name = "gcc_cpuss_rbcpr_clk_src",
232 .parent_names = gcc_parent_names_8, 238 .parent_names = gcc_parent_names_8_ao,
233 .num_parents = 3, 239 .num_parents = 3,
234 .ops = &clk_rcg2_ops, 240 .ops = &clk_rcg2_ops,
235 }, 241 },
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 2d5d8b43727e..c4d0b6f6abf2 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
43 /* Read mdiv and fdiv from the fdbck register */ 43 /* Read mdiv and fdiv from the fdbck register */
44 reg = readl(socfpgaclk->hw.reg + 0x4); 44 reg = readl(socfpgaclk->hw.reg + 0x4);
45 mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; 45 mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT;
46 vco_freq = (unsigned long long)parent_rate * (mdiv + 6); 46 vco_freq = (unsigned long long)vco_freq * (mdiv + 6);
47 47
48 return (unsigned long)vco_freq; 48 return (unsigned long)vco_freq;
49} 49}
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index 5b238fc314ac..8281dfbf38c2 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -12,17 +12,17 @@
12 12
13#include "stratix10-clk.h" 13#include "stratix10-clk.h"
14 14
15static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", 15static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk",
16 "f2s_free_clk",}; 16 "f2s-free-clk",};
17static const char * const cntr_mux[] = { "main_pll", "periph_pll", 17static const char * const cntr_mux[] = { "main_pll", "periph_pll",
18 "osc1", "cb_intosc_hs_div2_clk", 18 "osc1", "cb-intosc-hs-div2-clk",
19 "f2s_free_clk"}; 19 "f2s-free-clk"};
20static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; 20static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",};
21 21
22static const char * const noc_free_mux[] = {"main_noc_base_clk", 22static const char * const noc_free_mux[] = {"main_noc_base_clk",
23 "peri_noc_base_clk", 23 "peri_noc_base_clk",
24 "osc1", "cb_intosc_hs_div2_clk", 24 "osc1", "cb-intosc-hs-div2-clk",
25 "f2s_free_clk"}; 25 "f2s-free-clk"};
26 26
27static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; 27static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"};
28static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; 28static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"};
@@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk"
33static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; 33static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"};
34static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; 34static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",};
35 35
36static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; 36static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"};
37static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; 37static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"};
38static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; 38static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"};
39 39
40static const char * const mpu_free_mux[] = {"main_mpu_base_clk", 40static const char * const mpu_free_mux[] = {"main_mpu_base_clk",
41 "peri_mpu_base_clk", 41 "peri_mpu_base_clk",
42 "osc1", "cb_intosc_hs_div2_clk", 42 "osc1", "cb-intosc-hs-div2-clk",
43 "f2s_free_clk"}; 43 "f2s-free-clk"};
44 44
45/* clocks in AO (always on) controller */ 45/* clocks in AO (always on) controller */
46static const struct stratix10_pll_clock s10_pll_clks[] = { 46static const struct stratix10_pll_clock s10_pll_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 269d3595758b..edc31bb56674 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
133 struct tegra_dfll_soc_data *soc; 133 struct tegra_dfll_soc_data *soc;
134 134
135 soc = tegra_dfll_unregister(pdev); 135 soc = tegra_dfll_unregister(pdev);
136 if (IS_ERR(soc)) 136 if (IS_ERR(soc)) {
137 dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n", 137 dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
138 PTR_ERR(soc)); 138 PTR_ERR(soc));
139 return PTR_ERR(soc);
140 }
139 141
140 tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq); 142 tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
141 143
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 8d77090ad94a..0241450f3eb3 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
403 num_dividers = i; 403 num_dividers = i;
404 404
405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); 405 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
406 if (!tmp) 406 if (!tmp) {
407 *table = ERR_PTR(-ENOMEM);
407 return -ENOMEM; 408 return -ENOMEM;
409 }
408 410
409 valid_div = 0; 411 valid_div = 0;
410 *width = 0; 412 *width = 0;
@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
439{ 441{
440 struct clk_omap_divider *div; 442 struct clk_omap_divider *div;
441 struct clk_omap_reg *reg; 443 struct clk_omap_reg *reg;
444 int ret;
442 445
443 if (!setup) 446 if (!setup)
444 return NULL; 447 return NULL;
@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
458 div->flags |= CLK_DIVIDER_POWER_OF_TWO; 461 div->flags |= CLK_DIVIDER_POWER_OF_TWO;
459 462
460 div->table = _get_div_table_from_setup(setup, &div->width); 463 div->table = _get_div_table_from_setup(setup, &div->width);
464 if (IS_ERR(div->table)) {
465 ret = PTR_ERR(div->table);
466 kfree(div);
467 return ERR_PTR(ret);
468 }
469
461 470
462 div->shift = setup->bit_shift; 471 div->shift = setup->bit_shift;
463 div->latch = -EINVAL; 472 div->latch = -EINVAL;
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index f65cc0ff76ab..b0908ec62f73 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -669,8 +669,8 @@ static int zynqmp_clk_setup(struct device_node *np)
669 if (ret) 669 if (ret)
670 return ret; 670 return ret;
671 671
672 zynqmp_data = kzalloc(sizeof(*zynqmp_data) + sizeof(*zynqmp_data) * 672 zynqmp_data = kzalloc(struct_size(zynqmp_data, hws, clock_max_idx),
673 clock_max_idx, GFP_KERNEL); 673 GFP_KERNEL);
674 if (!zynqmp_data) 674 if (!zynqmp_data)
675 return -ENOMEM; 675 return -ENOMEM;
676 676
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 595124074821..c364027638e1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
154 if (IS_ERR(parent)) 154 if (IS_ERR(parent))
155 return -ENODEV; 155 return -ENODEV;
156 156
157 /* Bail out if both clocks point to fck */
158 if (clk_is_match(parent, timer->fclk))
159 return 0;
160
157 ret = clk_set_parent(timer->fclk, parent); 161 ret = clk_set_parent(timer->fclk, parent);
158 if (ret < 0) 162 if (ret < 0)
159 pr_err("%s: failed to set parent\n", __func__); 163 pr_err("%s: failed to set parent\n", __func__);
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
864 timer->pdev = pdev; 868 timer->pdev = pdev;
865 869
866 pm_runtime_enable(dev); 870 pm_runtime_enable(dev);
867 pm_runtime_irq_safe(dev);
868 871
869 if (!timer->reserved) { 872 if (!timer->reserved) {
870 ret = pm_runtime_get_sync(dev); 873 ret = pm_runtime_get_sync(dev);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 608af20a3494..b22e6bba71f1 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -207,8 +207,6 @@ comment "CPU frequency scaling drivers"
207config CPUFREQ_DT 207config CPUFREQ_DT
208 tristate "Generic DT based cpufreq driver" 208 tristate "Generic DT based cpufreq driver"
209 depends on HAVE_CLK && OF 209 depends on HAVE_CLK && OF
210 # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
211 depends on !CPU_THERMAL || THERMAL
212 select CPUFREQ_DT_PLATDEV 210 select CPUFREQ_DT_PLATDEV
213 select PM_OPP 211 select PM_OPP
214 help 212 help
@@ -327,7 +325,6 @@ endif
327config QORIQ_CPUFREQ 325config QORIQ_CPUFREQ
328 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" 326 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
329 depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64) 327 depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64)
330 depends on !CPU_THERMAL || THERMAL
331 select CLK_QORIQ 328 select CLK_QORIQ
332 help 329 help
333 This adds the CPUFreq driver support for Freescale QorIQ SoCs 330 This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 10bc5c798d17..f08bb0075316 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -40,8 +40,6 @@ config ARM_ARMADA_8K_CPUFREQ
40config ARM_BIG_LITTLE_CPUFREQ 40config ARM_BIG_LITTLE_CPUFREQ
41 tristate "Generic ARM big LITTLE CPUfreq driver" 41 tristate "Generic ARM big LITTLE CPUfreq driver"
42 depends on ARM_CPU_TOPOLOGY && HAVE_CLK 42 depends on ARM_CPU_TOPOLOGY && HAVE_CLK
43 # if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y
44 depends on !CPU_THERMAL || THERMAL
45 select PM_OPP 43 select PM_OPP
46 help 44 help
47 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. 45 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
@@ -49,7 +47,6 @@ config ARM_BIG_LITTLE_CPUFREQ
49config ARM_SCPI_CPUFREQ 47config ARM_SCPI_CPUFREQ
50 tristate "SCPI based CPUfreq driver" 48 tristate "SCPI based CPUfreq driver"
51 depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI 49 depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI
52 depends on !CPU_THERMAL || THERMAL
53 help 50 help
54 This adds the CPUfreq driver support for ARM platforms using SCPI 51 This adds the CPUfreq driver support for ARM platforms using SCPI
55 protocol for CPU power management. 52 protocol for CPU power management.
@@ -104,7 +101,6 @@ config ARM_KIRKWOOD_CPUFREQ
104config ARM_MEDIATEK_CPUFREQ 101config ARM_MEDIATEK_CPUFREQ
105 tristate "CPU Frequency scaling support for MediaTek SoCs" 102 tristate "CPU Frequency scaling support for MediaTek SoCs"
106 depends on ARCH_MEDIATEK && REGULATOR 103 depends on ARCH_MEDIATEK && REGULATOR
107 depends on !CPU_THERMAL || THERMAL
108 select PM_OPP 104 select PM_OPP
109 help 105 help
110 This adds the CPUFreq driver support for MediaTek SoCs. 106 This adds the CPUFreq driver support for MediaTek SoCs.
@@ -244,7 +240,6 @@ config ARM_SA1110_CPUFREQ
244config ARM_SCMI_CPUFREQ 240config ARM_SCMI_CPUFREQ
245 tristate "SCMI based CPUfreq driver" 241 tristate "SCMI based CPUfreq driver"
246 depends on ARM_SCMI_PROTOCOL || COMPILE_TEST 242 depends on ARM_SCMI_PROTOCOL || COMPILE_TEST
247 depends on !CPU_THERMAL || THERMAL
248 select PM_OPP 243 select PM_OPP
249 help 244 help
250 This adds the CPUfreq driver support for ARM platforms using SCMI 245 This adds the CPUfreq driver support for ARM platforms using SCMI
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d62fd374d5c7..c72258a44ba4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void)
916{ 916{
917 int ret; 917 int ret;
918 918
919 if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) 919 if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
920 pr_debug("Boost capabilities not present in the processor\n");
920 return; 921 return;
922 }
921 923
922 acpi_cpufreq_driver.set_boost = set_boost; 924 acpi_cpufreq_driver.set_boost = set_boost;
923 acpi_cpufreq_driver.boost_enabled = boost_state(0); 925 acpi_cpufreq_driver.boost_enabled = boost_state(0);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index fd25c21cee72..2ae978d27e61 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -42,6 +42,66 @@
42 */ 42 */
43static struct cppc_cpudata **all_cpu_data; 43static struct cppc_cpudata **all_cpu_data;
44 44
45struct cppc_workaround_oem_info {
46 char oem_id[ACPI_OEM_ID_SIZE +1];
47 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
48 u32 oem_revision;
49};
50
51static bool apply_hisi_workaround;
52
53static struct cppc_workaround_oem_info wa_info[] = {
54 {
55 .oem_id = "HISI ",
56 .oem_table_id = "HIP07 ",
57 .oem_revision = 0,
58 }, {
59 .oem_id = "HISI ",
60 .oem_table_id = "HIP08 ",
61 .oem_revision = 0,
62 }
63};
64
65static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
66 unsigned int perf);
67
68/*
69 * HISI platform does not support delivered performance counter and
70 * reference performance counter. It can calculate the performance using the
71 * platform specific mechanism. We reuse the desired performance register to
72 * store the real performance calculated by the platform.
73 */
74static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum)
75{
76 struct cppc_cpudata *cpudata = all_cpu_data[cpunum];
77 u64 desired_perf;
78 int ret;
79
80 ret = cppc_get_desired_perf(cpunum, &desired_perf);
81 if (ret < 0)
82 return -EIO;
83
84 return cppc_cpufreq_perf_to_khz(cpudata, desired_perf);
85}
86
87static void cppc_check_hisi_workaround(void)
88{
89 struct acpi_table_header *tbl;
90 acpi_status status = AE_OK;
91 int i;
92
93 status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
94 if (ACPI_FAILURE(status) || !tbl)
95 return;
96
97 for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
98 if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
99 !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
100 wa_info[i].oem_revision == tbl->oem_revision)
101 apply_hisi_workaround = true;
102 }
103}
104
45/* Callback function used to retrieve the max frequency from DMI */ 105/* Callback function used to retrieve the max frequency from DMI */
46static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) 106static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
47{ 107{
@@ -334,6 +394,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
334 struct cppc_cpudata *cpu = all_cpu_data[cpunum]; 394 struct cppc_cpudata *cpu = all_cpu_data[cpunum];
335 int ret; 395 int ret;
336 396
397 if (apply_hisi_workaround)
398 return hisi_cppc_cpufreq_get_rate(cpunum);
399
337 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); 400 ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
338 if (ret) 401 if (ret)
339 return ret; 402 return ret;
@@ -386,6 +449,8 @@ static int __init cppc_cpufreq_init(void)
386 goto out; 449 goto out;
387 } 450 }
388 451
452 cppc_check_hisi_workaround();
453
389 ret = cpufreq_register_driver(&cppc_cpufreq_driver); 454 ret = cpufreq_register_driver(&cppc_cpufreq_driver);
390 if (ret) 455 if (ret)
391 goto out; 456 goto out;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index e58bfcb1169e..1aefaa1b0ca2 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/cpu_cooling.h>
17#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
18#include <linux/cpumask.h> 17#include <linux/cpumask.h>
19#include <linux/err.h> 18#include <linux/err.h>
@@ -30,7 +29,6 @@
30struct private_data { 29struct private_data {
31 struct opp_table *opp_table; 30 struct opp_table *opp_table;
32 struct device *cpu_dev; 31 struct device *cpu_dev;
33 struct thermal_cooling_device *cdev;
34 const char *reg_name; 32 const char *reg_name;
35 bool have_static_opps; 33 bool have_static_opps;
36}; 34};
@@ -297,11 +295,25 @@ out_put_clk:
297 return ret; 295 return ret;
298} 296}
299 297
298static int cpufreq_online(struct cpufreq_policy *policy)
299{
300 /* We did light-weight tear down earlier, nothing to do here */
301 return 0;
302}
303
304static int cpufreq_offline(struct cpufreq_policy *policy)
305{
306 /*
307 * Preserve policy->driver_data and don't free resources on light-weight
308 * tear down.
309 */
310 return 0;
311}
312
300static int cpufreq_exit(struct cpufreq_policy *policy) 313static int cpufreq_exit(struct cpufreq_policy *policy)
301{ 314{
302 struct private_data *priv = policy->driver_data; 315 struct private_data *priv = policy->driver_data;
303 316
304 cpufreq_cooling_unregister(priv->cdev);
305 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 317 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
306 if (priv->have_static_opps) 318 if (priv->have_static_opps)
307 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); 319 dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
@@ -314,21 +326,16 @@ static int cpufreq_exit(struct cpufreq_policy *policy)
314 return 0; 326 return 0;
315} 327}
316 328
317static void cpufreq_ready(struct cpufreq_policy *policy)
318{
319 struct private_data *priv = policy->driver_data;
320
321 priv->cdev = of_cpufreq_cooling_register(policy);
322}
323
324static struct cpufreq_driver dt_cpufreq_driver = { 329static struct cpufreq_driver dt_cpufreq_driver = {
325 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, 330 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
331 CPUFREQ_IS_COOLING_DEV,
326 .verify = cpufreq_generic_frequency_table_verify, 332 .verify = cpufreq_generic_frequency_table_verify,
327 .target_index = set_target, 333 .target_index = set_target,
328 .get = cpufreq_generic_get, 334 .get = cpufreq_generic_get,
329 .init = cpufreq_init, 335 .init = cpufreq_init,
330 .exit = cpufreq_exit, 336 .exit = cpufreq_exit,
331 .ready = cpufreq_ready, 337 .online = cpufreq_online,
338 .offline = cpufreq_offline,
332 .name = "cpufreq-dt", 339 .name = "cpufreq-dt",
333 .attr = cpufreq_dt_attr, 340 .attr = cpufreq_dt_attr,
334 .suspend = cpufreq_generic_suspend, 341 .suspend = cpufreq_generic_suspend,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6f23ebb395f1..0e626b00053b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/cpufreq.h> 21#include <linux/cpufreq.h>
22#include <linux/cpu_cooling.h>
22#include <linux/delay.h> 23#include <linux/delay.h>
23#include <linux/device.h> 24#include <linux/device.h>
24#include <linux/init.h> 25#include <linux/init.h>
@@ -545,13 +546,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
545 * SYSFS INTERFACE * 546 * SYSFS INTERFACE *
546 *********************************************************************/ 547 *********************************************************************/
547static ssize_t show_boost(struct kobject *kobj, 548static ssize_t show_boost(struct kobject *kobj,
548 struct attribute *attr, char *buf) 549 struct kobj_attribute *attr, char *buf)
549{ 550{
550 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); 551 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
551} 552}
552 553
553static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, 554static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
554 const char *buf, size_t count) 555 const char *buf, size_t count)
555{ 556{
556 int ret, enable; 557 int ret, enable;
557 558
@@ -1200,28 +1201,39 @@ static int cpufreq_online(unsigned int cpu)
1200 return -ENOMEM; 1201 return -ENOMEM;
1201 } 1202 }
1202 1203
1203 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1204 if (!new_policy && cpufreq_driver->online) {
1205 ret = cpufreq_driver->online(policy);
1206 if (ret) {
1207 pr_debug("%s: %d: initialization failed\n", __func__,
1208 __LINE__);
1209 goto out_exit_policy;
1210 }
1204 1211
1205 /* call driver. From then on the cpufreq must be able 1212 /* Recover policy->cpus using related_cpus */
1206 * to accept all calls to ->verify and ->setpolicy for this CPU 1213 cpumask_copy(policy->cpus, policy->related_cpus);
1207 */ 1214 } else {
1208 ret = cpufreq_driver->init(policy); 1215 cpumask_copy(policy->cpus, cpumask_of(cpu));
1209 if (ret) {
1210 pr_debug("initialization failed\n");
1211 goto out_free_policy;
1212 }
1213 1216
1214 ret = cpufreq_table_validate_and_sort(policy); 1217 /*
1215 if (ret) 1218 * Call driver. From then on the cpufreq must be able
1216 goto out_exit_policy; 1219 * to accept all calls to ->verify and ->setpolicy for this CPU.
1220 */
1221 ret = cpufreq_driver->init(policy);
1222 if (ret) {
1223 pr_debug("%s: %d: initialization failed\n", __func__,
1224 __LINE__);
1225 goto out_free_policy;
1226 }
1217 1227
1218 down_write(&policy->rwsem); 1228 ret = cpufreq_table_validate_and_sort(policy);
1229 if (ret)
1230 goto out_exit_policy;
1219 1231
1220 if (new_policy) {
1221 /* related_cpus should at least include policy->cpus. */ 1232 /* related_cpus should at least include policy->cpus. */
1222 cpumask_copy(policy->related_cpus, policy->cpus); 1233 cpumask_copy(policy->related_cpus, policy->cpus);
1223 } 1234 }
1224 1235
1236 down_write(&policy->rwsem);
1225 /* 1237 /*
1226 * affected cpus must always be the one, which are online. We aren't 1238 * affected cpus must always be the one, which are online. We aren't
1227 * managing offline cpus here. 1239 * managing offline cpus here.
@@ -1305,8 +1317,6 @@ static int cpufreq_online(unsigned int cpu)
1305 if (ret) { 1317 if (ret) {
1306 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", 1318 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1307 __func__, cpu, ret); 1319 __func__, cpu, ret);
1308 /* cpufreq_policy_free() will notify based on this */
1309 new_policy = false;
1310 goto out_destroy_policy; 1320 goto out_destroy_policy;
1311 } 1321 }
1312 1322
@@ -1318,6 +1328,10 @@ static int cpufreq_online(unsigned int cpu)
1318 if (cpufreq_driver->ready) 1328 if (cpufreq_driver->ready)
1319 cpufreq_driver->ready(policy); 1329 cpufreq_driver->ready(policy);
1320 1330
1331 if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
1332 cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV)
1333 policy->cdev = of_cpufreq_cooling_register(policy);
1334
1321 pr_debug("initialization complete\n"); 1335 pr_debug("initialization complete\n");
1322 1336
1323 return 0; 1337 return 0;
@@ -1405,6 +1419,12 @@ static int cpufreq_offline(unsigned int cpu)
1405 goto unlock; 1419 goto unlock;
1406 } 1420 }
1407 1421
1422 if (IS_ENABLED(CONFIG_CPU_THERMAL) &&
1423 cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) {
1424 cpufreq_cooling_unregister(policy->cdev);
1425 policy->cdev = NULL;
1426 }
1427
1408 if (cpufreq_driver->stop_cpu) 1428 if (cpufreq_driver->stop_cpu)
1409 cpufreq_driver->stop_cpu(policy); 1429 cpufreq_driver->stop_cpu(policy);
1410 1430
@@ -1412,11 +1432,12 @@ static int cpufreq_offline(unsigned int cpu)
1412 cpufreq_exit_governor(policy); 1432 cpufreq_exit_governor(policy);
1413 1433
1414 /* 1434 /*
1415 * Perform the ->exit() even during light-weight tear-down, 1435 * Perform the ->offline() during light-weight tear-down, as
1416 * since this is a core component, and is essential for the 1436 * that allows fast recovery when the CPU comes back.
1417 * subsequent light-weight ->init() to succeed.
1418 */ 1437 */
1419 if (cpufreq_driver->exit) { 1438 if (cpufreq_driver->offline) {
1439 cpufreq_driver->offline(policy);
1440 } else if (cpufreq_driver->exit) {
1420 cpufreq_driver->exit(policy); 1441 cpufreq_driver->exit(policy);
1421 policy->freq_table = NULL; 1442 policy->freq_table = NULL;
1422 } 1443 }
@@ -1445,8 +1466,13 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1445 cpumask_clear_cpu(cpu, policy->real_cpus); 1466 cpumask_clear_cpu(cpu, policy->real_cpus);
1446 remove_cpu_dev_symlink(policy, dev); 1467 remove_cpu_dev_symlink(policy, dev);
1447 1468
1448 if (cpumask_empty(policy->real_cpus)) 1469 if (cpumask_empty(policy->real_cpus)) {
1470 /* We did light-weight exit earlier, do full tear down now */
1471 if (cpufreq_driver->offline)
1472 cpufreq_driver->exit(policy);
1473
1449 cpufreq_policy_free(policy); 1474 cpufreq_policy_free(policy);
1475 }
1450} 1476}
1451 1477
1452/** 1478/**
@@ -1530,17 +1556,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1530{ 1556{
1531 unsigned int ret_freq = 0; 1557 unsigned int ret_freq = 0;
1532 1558
1533 if (!cpufreq_driver->get) 1559 if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
1534 return ret_freq; 1560 return ret_freq;
1535 1561
1536 ret_freq = cpufreq_driver->get(policy->cpu); 1562 ret_freq = cpufreq_driver->get(policy->cpu);
1537 1563
1538 /* 1564 /*
1539 * Updating inactive policies is invalid, so avoid doing that. Also 1565 * If fast frequency switching is used with the given policy, the check
1540 * if fast frequency switching is used with the given policy, the check
1541 * against policy->cur is pointless, so skip it in that case too. 1566 * against policy->cur is pointless, so skip it in that case too.
1542 */ 1567 */
1543 if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled) 1568 if (policy->fast_switch_enabled)
1544 return ret_freq; 1569 return ret_freq;
1545 1570
1546 if (ret_freq && policy->cur && 1571 if (ret_freq && policy->cur &&
@@ -1569,10 +1594,7 @@ unsigned int cpufreq_get(unsigned int cpu)
1569 1594
1570 if (policy) { 1595 if (policy) {
1571 down_read(&policy->rwsem); 1596 down_read(&policy->rwsem);
1572 1597 ret_freq = __cpufreq_get(policy);
1573 if (!policy_is_inactive(policy))
1574 ret_freq = __cpufreq_get(policy);
1575
1576 up_read(&policy->rwsem); 1598 up_read(&policy->rwsem);
1577 1599
1578 cpufreq_cpu_put(policy); 1600 cpufreq_cpu_put(policy);
@@ -2196,12 +2218,25 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2196} 2218}
2197EXPORT_SYMBOL(cpufreq_get_policy); 2219EXPORT_SYMBOL(cpufreq_get_policy);
2198 2220
2199/* 2221/**
2200 * policy : current policy. 2222 * cpufreq_set_policy - Modify cpufreq policy parameters.
2201 * new_policy: policy to be set. 2223 * @policy: Policy object to modify.
2224 * @new_policy: New policy data.
2225 *
2226 * Pass @new_policy to the cpufreq driver's ->verify() callback, run the
2227 * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
2228 * the driver's ->verify() callback again and run the notifiers for it again
2229 * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters
2230 * of @new_policy to @policy and either invoke the driver's ->setpolicy()
2231 * callback (if present) or carry out a governor update for @policy. That is,
2232 * run the current governor's ->limits() callback (if the governor field in
2233 * @new_policy points to the same object as the one in @policy) or replace the
2234 * governor for @policy with the new one stored in @new_policy.
2235 *
2236 * The cpuinfo part of @policy is not updated by this function.
2202 */ 2237 */
2203static int cpufreq_set_policy(struct cpufreq_policy *policy, 2238static int cpufreq_set_policy(struct cpufreq_policy *policy,
2204 struct cpufreq_policy *new_policy) 2239 struct cpufreq_policy *new_policy)
2205{ 2240{
2206 struct cpufreq_governor *old_gov; 2241 struct cpufreq_governor *old_gov;
2207 int ret; 2242 int ret;
@@ -2251,11 +2286,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2251 if (cpufreq_driver->setpolicy) { 2286 if (cpufreq_driver->setpolicy) {
2252 policy->policy = new_policy->policy; 2287 policy->policy = new_policy->policy;
2253 pr_debug("setting range\n"); 2288 pr_debug("setting range\n");
2254 return cpufreq_driver->setpolicy(new_policy); 2289 return cpufreq_driver->setpolicy(policy);
2255 } 2290 }
2256 2291
2257 if (new_policy->governor == policy->governor) { 2292 if (new_policy->governor == policy->governor) {
2258 pr_debug("cpufreq: governor limits update\n"); 2293 pr_debug("governor limits update\n");
2259 cpufreq_governor_limits(policy); 2294 cpufreq_governor_limits(policy);
2260 return 0; 2295 return 0;
2261 } 2296 }
@@ -2276,7 +2311,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2276 if (!ret) { 2311 if (!ret) {
2277 ret = cpufreq_start_governor(policy); 2312 ret = cpufreq_start_governor(policy);
2278 if (!ret) { 2313 if (!ret) {
2279 pr_debug("cpufreq: governor change\n"); 2314 pr_debug("governor change\n");
2280 sched_cpufreq_governor_change(policy, old_gov); 2315 sched_cpufreq_governor_change(policy, old_gov);
2281 return 0; 2316 return 0;
2282 } 2317 }
@@ -2297,11 +2332,14 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
2297} 2332}
2298 2333
2299/** 2334/**
2300 * cpufreq_update_policy - re-evaluate an existing cpufreq policy 2335 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2301 * @cpu: CPU which shall be re-evaluated 2336 * @cpu: CPU to re-evaluate the policy for.
2302 * 2337 *
2303 * Useful for policy notifiers which have different necessities 2338 * Update the current frequency for the cpufreq policy of @cpu and use
2304 * at different times. 2339 * cpufreq_set_policy() to re-apply the min and max limits saved in the
2340 * user_policy sub-structure of that policy, which triggers the evaluation
2341 * of policy notifiers and the cpufreq driver's ->verify() callback for the
2342 * policy in question, among other things.
2305 */ 2343 */
2306void cpufreq_update_policy(unsigned int cpu) 2344void cpufreq_update_policy(unsigned int cpu)
2307{ 2345{
@@ -2316,23 +2354,18 @@ void cpufreq_update_policy(unsigned int cpu)
2316 if (policy_is_inactive(policy)) 2354 if (policy_is_inactive(policy))
2317 goto unlock; 2355 goto unlock;
2318 2356
2319 pr_debug("updating policy for CPU %u\n", cpu);
2320 memcpy(&new_policy, policy, sizeof(*policy));
2321 new_policy.min = policy->user_policy.min;
2322 new_policy.max = policy->user_policy.max;
2323
2324 /* 2357 /*
2325 * BIOS might change freq behind our back 2358 * BIOS might change freq behind our back
2326 * -> ask driver for current freq and notify governors about a change 2359 * -> ask driver for current freq and notify governors about a change
2327 */ 2360 */
2328 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { 2361 if (cpufreq_driver->get && !cpufreq_driver->setpolicy &&
2329 if (cpufreq_suspended) 2362 (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy))))
2330 goto unlock; 2363 goto unlock;
2331 2364
2332 new_policy.cur = cpufreq_update_current_freq(policy); 2365 pr_debug("updating policy for CPU %u\n", cpu);
2333 if (WARN_ON(!new_policy.cur)) 2366 memcpy(&new_policy, policy, sizeof(*policy));
2334 goto unlock; 2367 new_policy.min = policy->user_policy.min;
2335 } 2368 new_policy.max = policy->user_policy.max;
2336 2369
2337 cpufreq_set_policy(policy, &new_policy); 2370 cpufreq_set_policy(policy, &new_policy);
2338 2371
@@ -2483,7 +2516,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2483 driver_data->target) || 2516 driver_data->target) ||
2484 (driver_data->setpolicy && (driver_data->target_index || 2517 (driver_data->setpolicy && (driver_data->target_index ||
2485 driver_data->target)) || 2518 driver_data->target)) ||
2486 (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) 2519 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2520 (!driver_data->online != !driver_data->offline))
2487 return -EINVAL; 2521 return -EINVAL;
2488 2522
2489 pr_debug("trying to register driver %s\n", driver_data->name); 2523 pr_debug("trying to register driver %s\n", driver_data->name);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1572129844a5..e2db5581489a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -31,26 +31,27 @@ static void cpufreq_stats_update(struct cpufreq_stats *stats)
31{ 31{
32 unsigned long long cur_time = get_jiffies_64(); 32 unsigned long long cur_time = get_jiffies_64();
33 33
34 spin_lock(&cpufreq_stats_lock);
35 stats->time_in_state[stats->last_index] += cur_time - stats->last_time; 34 stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
36 stats->last_time = cur_time; 35 stats->last_time = cur_time;
37 spin_unlock(&cpufreq_stats_lock);
38} 36}
39 37
40static void cpufreq_stats_clear_table(struct cpufreq_stats *stats) 38static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
41{ 39{
42 unsigned int count = stats->max_state; 40 unsigned int count = stats->max_state;
43 41
42 spin_lock(&cpufreq_stats_lock);
44 memset(stats->time_in_state, 0, count * sizeof(u64)); 43 memset(stats->time_in_state, 0, count * sizeof(u64));
45 memset(stats->trans_table, 0, count * count * sizeof(int)); 44 memset(stats->trans_table, 0, count * count * sizeof(int));
46 stats->last_time = get_jiffies_64(); 45 stats->last_time = get_jiffies_64();
47 stats->total_trans = 0; 46 stats->total_trans = 0;
47 spin_unlock(&cpufreq_stats_lock);
48} 48}
49 49
50static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) 50static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
51{ 51{
52 return sprintf(buf, "%d\n", policy->stats->total_trans); 52 return sprintf(buf, "%d\n", policy->stats->total_trans);
53} 53}
54cpufreq_freq_attr_ro(total_trans);
54 55
55static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) 56static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
56{ 57{
@@ -61,7 +62,10 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
61 if (policy->fast_switch_enabled) 62 if (policy->fast_switch_enabled)
62 return 0; 63 return 0;
63 64
65 spin_lock(&cpufreq_stats_lock);
64 cpufreq_stats_update(stats); 66 cpufreq_stats_update(stats);
67 spin_unlock(&cpufreq_stats_lock);
68
65 for (i = 0; i < stats->state_num; i++) { 69 for (i = 0; i < stats->state_num; i++) {
66 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], 70 len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
67 (unsigned long long) 71 (unsigned long long)
@@ -69,6 +73,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
69 } 73 }
70 return len; 74 return len;
71} 75}
76cpufreq_freq_attr_ro(time_in_state);
72 77
73static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, 78static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
74 size_t count) 79 size_t count)
@@ -77,6 +82,7 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
77 cpufreq_stats_clear_table(policy->stats); 82 cpufreq_stats_clear_table(policy->stats);
78 return count; 83 return count;
79} 84}
85cpufreq_freq_attr_wo(reset);
80 86
81static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) 87static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
82{ 88{
@@ -126,10 +132,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
126} 132}
127cpufreq_freq_attr_ro(trans_table); 133cpufreq_freq_attr_ro(trans_table);
128 134
129cpufreq_freq_attr_ro(total_trans);
130cpufreq_freq_attr_ro(time_in_state);
131cpufreq_freq_attr_wo(reset);
132
133static struct attribute *default_attrs[] = { 135static struct attribute *default_attrs[] = {
134 &total_trans.attr, 136 &total_trans.attr,
135 &time_in_state.attr, 137 &time_in_state.attr,
@@ -240,9 +242,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
240 if (old_index == -1 || new_index == -1 || old_index == new_index) 242 if (old_index == -1 || new_index == -1 || old_index == new_index)
241 return; 243 return;
242 244
245 spin_lock(&cpufreq_stats_lock);
243 cpufreq_stats_update(stats); 246 cpufreq_stats_update(stats);
244 247
245 stats->last_index = new_index; 248 stats->last_index = new_index;
246 stats->trans_table[old_index * stats->max_state + new_index]++; 249 stats->trans_table[old_index * stats->max_state + new_index]++;
247 stats->total_trans++; 250 stats->total_trans++;
251 spin_unlock(&cpufreq_stats_lock);
248} 252}
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index d54a27c99121..940fe85db97a 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -23,13 +23,10 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/platform_data/davinci-cpufreq.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/export.h> 28#include <linux/export.h>
28 29
29#include <mach/hardware.h>
30#include <mach/cpufreq.h>
31#include <mach/common.h>
32
33struct davinci_cpufreq { 30struct davinci_cpufreq {
34 struct device *dev; 31 struct device *dev;
35 struct clk *armclk; 32 struct clk *armclk;
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 60bea302abbe..2d3ef208dd70 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -323,9 +323,8 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
323 states = 2; 323 states = 2;
324 324
325 /* Allocate private data and frequency table for current cpu */ 325 /* Allocate private data and frequency table for current cpu */
326 centaur = kzalloc(sizeof(*centaur) 326 centaur = kzalloc(struct_size(centaur, freq_table, states + 1),
327 + (states + 1) * sizeof(struct cpufreq_frequency_table), 327 GFP_KERNEL);
328 GFP_KERNEL);
329 if (!centaur) 328 if (!centaur)
330 return -ENOMEM; 329 return -ENOMEM;
331 eps_cpu[0] = centaur; 330 eps_cpu[0] = centaur;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 9fedf627e000..ca955713e070 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -9,7 +9,6 @@
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/cpufreq.h> 11#include <linux/cpufreq.h>
12#include <linux/cpu_cooling.h>
13#include <linux/err.h> 12#include <linux/err.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/nvmem-consumer.h> 14#include <linux/nvmem-consumer.h>
@@ -52,7 +51,6 @@ static struct clk_bulk_data clks[] = {
52}; 51};
53 52
54static struct device *cpu_dev; 53static struct device *cpu_dev;
55static struct thermal_cooling_device *cdev;
56static bool free_opp; 54static bool free_opp;
57static struct cpufreq_frequency_table *freq_table; 55static struct cpufreq_frequency_table *freq_table;
58static unsigned int max_freq; 56static unsigned int max_freq;
@@ -193,16 +191,6 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
193 return 0; 191 return 0;
194} 192}
195 193
196static void imx6q_cpufreq_ready(struct cpufreq_policy *policy)
197{
198 cdev = of_cpufreq_cooling_register(policy);
199
200 if (!cdev)
201 dev_err(cpu_dev,
202 "running cpufreq without cooling device: %ld\n",
203 PTR_ERR(cdev));
204}
205
206static int imx6q_cpufreq_init(struct cpufreq_policy *policy) 194static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
207{ 195{
208 int ret; 196 int ret;
@@ -214,22 +202,14 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
214 return ret; 202 return ret;
215} 203}
216 204
217static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
218{
219 cpufreq_cooling_unregister(cdev);
220
221 return 0;
222}
223
224static struct cpufreq_driver imx6q_cpufreq_driver = { 205static struct cpufreq_driver imx6q_cpufreq_driver = {
225 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, 206 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
207 CPUFREQ_IS_COOLING_DEV,
226 .verify = cpufreq_generic_frequency_table_verify, 208 .verify = cpufreq_generic_frequency_table_verify,
227 .target_index = imx6q_set_target, 209 .target_index = imx6q_set_target,
228 .get = cpufreq_generic_get, 210 .get = cpufreq_generic_get,
229 .init = imx6q_cpufreq_init, 211 .init = imx6q_cpufreq_init,
230 .exit = imx6q_cpufreq_exit,
231 .name = "imx6q-cpufreq", 212 .name = "imx6q-cpufreq",
232 .ready = imx6q_cpufreq_ready,
233 .attr = cpufreq_generic_attr, 213 .attr = cpufreq_generic_attr,
234 .suspend = cpufreq_generic_suspend, 214 .suspend = cpufreq_generic_suspend,
235}; 215};
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index dd66decf2087..002f5169d4eb 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -50,6 +50,8 @@
50#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 50#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
51#define fp_toint(X) ((X) >> FRAC_BITS) 51#define fp_toint(X) ((X) >> FRAC_BITS)
52 52
53#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
54
53#define EXT_BITS 6 55#define EXT_BITS 6
54#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) 56#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
55#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) 57#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
@@ -895,7 +897,7 @@ static void intel_pstate_update_policies(void)
895/************************** sysfs begin ************************/ 897/************************** sysfs begin ************************/
896#define show_one(file_name, object) \ 898#define show_one(file_name, object) \
897 static ssize_t show_##file_name \ 899 static ssize_t show_##file_name \
898 (struct kobject *kobj, struct attribute *attr, char *buf) \ 900 (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
899 { \ 901 { \
900 return sprintf(buf, "%u\n", global.object); \ 902 return sprintf(buf, "%u\n", global.object); \
901 } 903 }
@@ -904,7 +906,7 @@ static ssize_t intel_pstate_show_status(char *buf);
904static int intel_pstate_update_status(const char *buf, size_t size); 906static int intel_pstate_update_status(const char *buf, size_t size);
905 907
906static ssize_t show_status(struct kobject *kobj, 908static ssize_t show_status(struct kobject *kobj,
907 struct attribute *attr, char *buf) 909 struct kobj_attribute *attr, char *buf)
908{ 910{
909 ssize_t ret; 911 ssize_t ret;
910 912
@@ -915,7 +917,7 @@ static ssize_t show_status(struct kobject *kobj,
915 return ret; 917 return ret;
916} 918}
917 919
918static ssize_t store_status(struct kobject *a, struct attribute *b, 920static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
919 const char *buf, size_t count) 921 const char *buf, size_t count)
920{ 922{
921 char *p = memchr(buf, '\n', count); 923 char *p = memchr(buf, '\n', count);
@@ -929,7 +931,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b,
929} 931}
930 932
931static ssize_t show_turbo_pct(struct kobject *kobj, 933static ssize_t show_turbo_pct(struct kobject *kobj,
932 struct attribute *attr, char *buf) 934 struct kobj_attribute *attr, char *buf)
933{ 935{
934 struct cpudata *cpu; 936 struct cpudata *cpu;
935 int total, no_turbo, turbo_pct; 937 int total, no_turbo, turbo_pct;
@@ -955,7 +957,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
955} 957}
956 958
957static ssize_t show_num_pstates(struct kobject *kobj, 959static ssize_t show_num_pstates(struct kobject *kobj,
958 struct attribute *attr, char *buf) 960 struct kobj_attribute *attr, char *buf)
959{ 961{
960 struct cpudata *cpu; 962 struct cpudata *cpu;
961 int total; 963 int total;
@@ -976,7 +978,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
976} 978}
977 979
978static ssize_t show_no_turbo(struct kobject *kobj, 980static ssize_t show_no_turbo(struct kobject *kobj,
979 struct attribute *attr, char *buf) 981 struct kobj_attribute *attr, char *buf)
980{ 982{
981 ssize_t ret; 983 ssize_t ret;
982 984
@@ -998,7 +1000,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
998 return ret; 1000 return ret;
999} 1001}
1000 1002
1001static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 1003static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1002 const char *buf, size_t count) 1004 const char *buf, size_t count)
1003{ 1005{
1004 unsigned int input; 1006 unsigned int input;
@@ -1045,7 +1047,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1045 return count; 1047 return count;
1046} 1048}
1047 1049
1048static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 1050static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1049 const char *buf, size_t count) 1051 const char *buf, size_t count)
1050{ 1052{
1051 unsigned int input; 1053 unsigned int input;
@@ -1075,7 +1077,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1075 return count; 1077 return count;
1076} 1078}
1077 1079
1078static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 1080static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1079 const char *buf, size_t count) 1081 const char *buf, size_t count)
1080{ 1082{
1081 unsigned int input; 1083 unsigned int input;
@@ -1107,12 +1109,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1107} 1109}
1108 1110
1109static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, 1111static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1110 struct attribute *attr, char *buf) 1112 struct kobj_attribute *attr, char *buf)
1111{ 1113{
1112 return sprintf(buf, "%u\n", hwp_boost); 1114 return sprintf(buf, "%u\n", hwp_boost);
1113} 1115}
1114 1116
1115static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, 1117static ssize_t store_hwp_dynamic_boost(struct kobject *a,
1118 struct kobj_attribute *b,
1116 const char *buf, size_t count) 1119 const char *buf, size_t count)
1117{ 1120{
1118 unsigned int input; 1121 unsigned int input;
@@ -1444,12 +1447,6 @@ static int knl_get_turbo_pstate(void)
1444 return ret; 1447 return ret;
1445} 1448}
1446 1449
1447static int intel_pstate_get_base_pstate(struct cpudata *cpu)
1448{
1449 return global.no_turbo || global.turbo_disabled ?
1450 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1451}
1452
1453static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 1450static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1454{ 1451{
1455 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); 1452 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
@@ -1470,11 +1467,9 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1470 1467
1471static void intel_pstate_max_within_limits(struct cpudata *cpu) 1468static void intel_pstate_max_within_limits(struct cpudata *cpu)
1472{ 1469{
1473 int pstate; 1470 int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1474 1471
1475 update_turbo_state(); 1472 update_turbo_state();
1476 pstate = intel_pstate_get_base_pstate(cpu);
1477 pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1478 intel_pstate_set_pstate(cpu, pstate); 1473 intel_pstate_set_pstate(cpu, pstate);
1479} 1474}
1480 1475
@@ -1678,17 +1673,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu)
1678static inline int32_t get_target_pstate(struct cpudata *cpu) 1673static inline int32_t get_target_pstate(struct cpudata *cpu)
1679{ 1674{
1680 struct sample *sample = &cpu->sample; 1675 struct sample *sample = &cpu->sample;
1681 int32_t busy_frac, boost; 1676 int32_t busy_frac;
1682 int target, avg_pstate; 1677 int target, avg_pstate;
1683 1678
1684 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, 1679 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
1685 sample->tsc); 1680 sample->tsc);
1686 1681
1687 boost = cpu->iowait_boost; 1682 if (busy_frac < cpu->iowait_boost)
1688 cpu->iowait_boost >>= 1; 1683 busy_frac = cpu->iowait_boost;
1689
1690 if (busy_frac < boost)
1691 busy_frac = boost;
1692 1684
1693 sample->busy_scaled = busy_frac * 100; 1685 sample->busy_scaled = busy_frac * 100;
1694 1686
@@ -1715,11 +1707,9 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)
1715 1707
1716static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) 1708static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1717{ 1709{
1718 int max_pstate = intel_pstate_get_base_pstate(cpu); 1710 int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
1719 int min_pstate; 1711 int max_pstate = max(min_pstate, cpu->max_perf_ratio);
1720 1712
1721 min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
1722 max_pstate = max(min_pstate, cpu->max_perf_ratio);
1723 return clamp_t(int, pstate, min_pstate, max_pstate); 1713 return clamp_t(int, pstate, min_pstate, max_pstate);
1724} 1714}
1725 1715
@@ -1767,29 +1757,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1767 if (smp_processor_id() != cpu->cpu) 1757 if (smp_processor_id() != cpu->cpu)
1768 return; 1758 return;
1769 1759
1760 delta_ns = time - cpu->last_update;
1770 if (flags & SCHED_CPUFREQ_IOWAIT) { 1761 if (flags & SCHED_CPUFREQ_IOWAIT) {
1771 cpu->iowait_boost = int_tofp(1); 1762 /* Start over if the CPU may have been idle. */
1772 cpu->last_update = time; 1763 if (delta_ns > TICK_NSEC) {
1773 /* 1764 cpu->iowait_boost = ONE_EIGHTH_FP;
1774 * The last time the busy was 100% so P-state was max anyway 1765 } else if (cpu->iowait_boost) {
1775 * so avoid overhead of computation. 1766 cpu->iowait_boost <<= 1;
1776 */ 1767 if (cpu->iowait_boost > int_tofp(1))
1777 if (fp_toint(cpu->sample.busy_scaled) == 100) 1768 cpu->iowait_boost = int_tofp(1);
1778 return; 1769 } else {
1779 1770 cpu->iowait_boost = ONE_EIGHTH_FP;
1780 goto set_pstate; 1771 }
1781 } else if (cpu->iowait_boost) { 1772 } else if (cpu->iowait_boost) {
1782 /* Clear iowait_boost if the CPU may have been idle. */ 1773 /* Clear iowait_boost if the CPU may have been idle. */
1783 delta_ns = time - cpu->last_update;
1784 if (delta_ns > TICK_NSEC) 1774 if (delta_ns > TICK_NSEC)
1785 cpu->iowait_boost = 0; 1775 cpu->iowait_boost = 0;
1776 else
1777 cpu->iowait_boost >>= 1;
1786 } 1778 }
1787 cpu->last_update = time; 1779 cpu->last_update = time;
1788 delta_ns = time - cpu->sample.time; 1780 delta_ns = time - cpu->sample.time;
1789 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) 1781 if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
1790 return; 1782 return;
1791 1783
1792set_pstate:
1793 if (intel_pstate_sample(cpu, time)) 1784 if (intel_pstate_sample(cpu, time))
1794 intel_pstate_adjust_pstate(cpu); 1785 intel_pstate_adjust_pstate(cpu);
1795} 1786}
@@ -1976,7 +1967,8 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
1976 if (hwp_active) { 1967 if (hwp_active) {
1977 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); 1968 intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
1978 } else { 1969 } else {
1979 max_state = intel_pstate_get_base_pstate(cpu); 1970 max_state = global.no_turbo || global.turbo_disabled ?
1971 cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1980 turbo_max = cpu->pstate.turbo_pstate; 1972 turbo_max = cpu->pstate.turbo_pstate;
1981 } 1973 }
1982 1974
@@ -2475,6 +2467,7 @@ static bool __init intel_pstate_no_acpi_pss(void)
2475 kfree(pss); 2467 kfree(pss);
2476 } 2468 }
2477 2469
2470 pr_debug("ACPI _PSS not found\n");
2478 return true; 2471 return true;
2479} 2472}
2480 2473
@@ -2485,9 +2478,14 @@ static bool __init intel_pstate_no_acpi_pcch(void)
2485 2478
2486 status = acpi_get_handle(NULL, "\\_SB", &handle); 2479 status = acpi_get_handle(NULL, "\\_SB", &handle);
2487 if (ACPI_FAILURE(status)) 2480 if (ACPI_FAILURE(status))
2488 return true; 2481 goto not_found;
2482
2483 if (acpi_has_method(handle, "PCCH"))
2484 return false;
2489 2485
2490 return !acpi_has_method(handle, "PCCH"); 2486not_found:
2487 pr_debug("ACPI PCCH not found\n");
2488 return true;
2491} 2489}
2492 2490
2493static bool __init intel_pstate_has_acpi_ppc(void) 2491static bool __init intel_pstate_has_acpi_ppc(void)
@@ -2502,6 +2500,7 @@ static bool __init intel_pstate_has_acpi_ppc(void)
2502 if (acpi_has_method(pr->handle, "_PPC")) 2500 if (acpi_has_method(pr->handle, "_PPC"))
2503 return true; 2501 return true;
2504 } 2502 }
2503 pr_debug("ACPI _PPC not found\n");
2505 return false; 2504 return false;
2506} 2505}
2507 2506
@@ -2539,8 +2538,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2539 id = x86_match_cpu(intel_pstate_cpu_oob_ids); 2538 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
2540 if (id) { 2539 if (id) {
2541 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); 2540 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
2542 if ( misc_pwr & (1 << 8)) 2541 if (misc_pwr & (1 << 8)) {
2542 pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
2543 return true; 2543 return true;
2544 }
2544 } 2545 }
2545 2546
2546 idx = acpi_match_platform_list(plat_info); 2547 idx = acpi_match_platform_list(plat_info);
@@ -2606,22 +2607,28 @@ static int __init intel_pstate_init(void)
2606 } 2607 }
2607 } else { 2608 } else {
2608 id = x86_match_cpu(intel_pstate_cpu_ids); 2609 id = x86_match_cpu(intel_pstate_cpu_ids);
2609 if (!id) 2610 if (!id) {
2611 pr_info("CPU ID not supported\n");
2610 return -ENODEV; 2612 return -ENODEV;
2613 }
2611 2614
2612 copy_cpu_funcs((struct pstate_funcs *)id->driver_data); 2615 copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
2613 } 2616 }
2614 2617
2615 if (intel_pstate_msrs_not_valid()) 2618 if (intel_pstate_msrs_not_valid()) {
2619 pr_info("Invalid MSRs\n");
2616 return -ENODEV; 2620 return -ENODEV;
2621 }
2617 2622
2618hwp_cpu_matched: 2623hwp_cpu_matched:
2619 /* 2624 /*
2620 * The Intel pstate driver will be ignored if the platform 2625 * The Intel pstate driver will be ignored if the platform
2621 * firmware has its own power management modes. 2626 * firmware has its own power management modes.
2622 */ 2627 */
2623 if (intel_pstate_platform_pwr_mgmt_exists()) 2628 if (intel_pstate_platform_pwr_mgmt_exists()) {
2629 pr_info("P-states controlled by the platform\n");
2624 return -ENODEV; 2630 return -ENODEV;
2631 }
2625 2632
2626 if (!hwp_active && hwp_only) 2633 if (!hwp_active && hwp_only)
2627 return -ENOTSUPP; 2634 return -ENOTSUPP;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 279bd9e9fa95..fb546e0d0356 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -851,7 +851,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
851 case TYPE_POWERSAVER: 851 case TYPE_POWERSAVER:
852 pr_cont("Powersaver supported\n"); 852 pr_cont("Powersaver supported\n");
853 break; 853 break;
854 }; 854 }
855 855
856 /* Doesn't hurt */ 856 /* Doesn't hurt */
857 longhaul_setup_southbridge(); 857 longhaul_setup_southbridge();
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index eb8920d39818..4229fcc31310 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -14,7 +14,6 @@
14 14
15#include <linux/clk.h> 15#include <linux/clk.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/cpu_cooling.h>
18#include <linux/cpufreq.h> 17#include <linux/cpufreq.h>
19#include <linux/cpumask.h> 18#include <linux/cpumask.h>
20#include <linux/module.h> 19#include <linux/module.h>
@@ -48,7 +47,6 @@ struct mtk_cpu_dvfs_info {
48 struct regulator *sram_reg; 47 struct regulator *sram_reg;
49 struct clk *cpu_clk; 48 struct clk *cpu_clk;
50 struct clk *inter_clk; 49 struct clk *inter_clk;
51 struct thermal_cooling_device *cdev;
52 struct list_head list_head; 50 struct list_head list_head;
53 int intermediate_voltage; 51 int intermediate_voltage;
54 bool need_voltage_tracking; 52 bool need_voltage_tracking;
@@ -307,13 +305,6 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
307 305
308#define DYNAMIC_POWER "dynamic-power-coefficient" 306#define DYNAMIC_POWER "dynamic-power-coefficient"
309 307
310static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
311{
312 struct mtk_cpu_dvfs_info *info = policy->driver_data;
313
314 info->cdev = of_cpufreq_cooling_register(policy);
315}
316
317static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) 308static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
318{ 309{
319 struct device *cpu_dev; 310 struct device *cpu_dev;
@@ -472,7 +463,6 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
472{ 463{
473 struct mtk_cpu_dvfs_info *info = policy->driver_data; 464 struct mtk_cpu_dvfs_info *info = policy->driver_data;
474 465
475 cpufreq_cooling_unregister(info->cdev);
476 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); 466 dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
477 467
478 return 0; 468 return 0;
@@ -480,13 +470,13 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
480 470
481static struct cpufreq_driver mtk_cpufreq_driver = { 471static struct cpufreq_driver mtk_cpufreq_driver = {
482 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | 472 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
483 CPUFREQ_HAVE_GOVERNOR_PER_POLICY, 473 CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
474 CPUFREQ_IS_COOLING_DEV,
484 .verify = cpufreq_generic_frequency_table_verify, 475 .verify = cpufreq_generic_frequency_table_verify,
485 .target_index = mtk_cpufreq_set_target, 476 .target_index = mtk_cpufreq_set_target,
486 .get = cpufreq_generic_get, 477 .get = cpufreq_generic_get,
487 .init = mtk_cpufreq_init, 478 .init = mtk_cpufreq_init,
488 .exit = mtk_cpufreq_exit, 479 .exit = mtk_cpufreq_exit,
489 .ready = mtk_cpufreq_ready,
490 .name = "mtk-cpufreq", 480 .name = "mtk-cpufreq",
491 .attr = cpufreq_generic_attr, 481 .attr = cpufreq_generic_attr,
492}; 482};
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 099a849396f6..1e5e64643c3a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -268,7 +268,7 @@ static int pcc_get_offset(int cpu)
268 if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { 268 if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
269 ret = -ENODEV; 269 ret = -ENODEV;
270 goto out_free; 270 goto out_free;
271 }; 271 }
272 272
273 offset = &(pccp->package.elements[0]); 273 offset = &(pccp->package.elements[0]);
274 if (!offset || offset->type != ACPI_TYPE_INTEGER) { 274 if (!offset || offset->type != ACPI_TYPE_INTEGER) {
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 7e7ad3879c4e..d2230812fa4b 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -244,6 +244,7 @@ static int init_powernv_pstates(void)
244 u32 len_ids, len_freqs; 244 u32 len_ids, len_freqs;
245 u32 pstate_min, pstate_max, pstate_nominal; 245 u32 pstate_min, pstate_max, pstate_nominal;
246 u32 pstate_turbo, pstate_ultra_turbo; 246 u32 pstate_turbo, pstate_ultra_turbo;
247 int rc = -ENODEV;
247 248
248 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); 249 power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
249 if (!power_mgt) { 250 if (!power_mgt) {
@@ -327,8 +328,11 @@ next:
327 powernv_freqs[i].frequency = freq * 1000; /* kHz */ 328 powernv_freqs[i].frequency = freq * 1000; /* kHz */
328 powernv_freqs[i].driver_data = id & 0xFF; 329 powernv_freqs[i].driver_data = id & 0xFF;
329 330
330 revmap_data = (struct pstate_idx_revmap_data *) 331 revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL);
331 kmalloc(sizeof(*revmap_data), GFP_KERNEL); 332 if (!revmap_data) {
333 rc = -ENOMEM;
334 goto out;
335 }
332 336
333 revmap_data->pstate_id = id & 0xFF; 337 revmap_data->pstate_id = id & 0xFF;
334 revmap_data->cpufreq_table_idx = i; 338 revmap_data->cpufreq_table_idx = i;
@@ -357,7 +361,7 @@ next:
357 return 0; 361 return 0;
358out: 362out:
359 of_node_put(power_mgt); 363 of_node_put(power_mgt);
360 return -ENODEV; 364 return rc;
361} 365}
362 366
363/* Returns the CPU frequency corresponding to the pstate_id. */ 367/* Returns the CPU frequency corresponding to the pstate_id. */
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index d83939a1b3d4..4b0b50403901 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -10,18 +10,21 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/of_address.h> 11#include <linux/of_address.h>
12#include <linux/of_platform.h> 12#include <linux/of_platform.h>
13#include <linux/pm_opp.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14 15
15#define LUT_MAX_ENTRIES 40U 16#define LUT_MAX_ENTRIES 40U
16#define LUT_SRC GENMASK(31, 30) 17#define LUT_SRC GENMASK(31, 30)
17#define LUT_L_VAL GENMASK(7, 0) 18#define LUT_L_VAL GENMASK(7, 0)
18#define LUT_CORE_COUNT GENMASK(18, 16) 19#define LUT_CORE_COUNT GENMASK(18, 16)
20#define LUT_VOLT GENMASK(11, 0)
19#define LUT_ROW_SIZE 32 21#define LUT_ROW_SIZE 32
20#define CLK_HW_DIV 2 22#define CLK_HW_DIV 2
21 23
22/* Register offsets */ 24/* Register offsets */
23#define REG_ENABLE 0x0 25#define REG_ENABLE 0x0
24#define REG_LUT_TABLE 0x110 26#define REG_FREQ_LUT 0x110
27#define REG_VOLT_LUT 0x114
25#define REG_PERF_STATE 0x920 28#define REG_PERF_STATE 0x920
26 29
27static unsigned long cpu_hw_rate, xo_rate; 30static unsigned long cpu_hw_rate, xo_rate;
@@ -70,11 +73,12 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
70 return policy->freq_table[index].frequency; 73 return policy->freq_table[index].frequency;
71} 74}
72 75
73static int qcom_cpufreq_hw_read_lut(struct device *dev, 76static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
74 struct cpufreq_policy *policy, 77 struct cpufreq_policy *policy,
75 void __iomem *base) 78 void __iomem *base)
76{ 79{
77 u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; 80 u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq;
81 u32 volt;
78 unsigned int max_cores = cpumask_weight(policy->cpus); 82 unsigned int max_cores = cpumask_weight(policy->cpus);
79 struct cpufreq_frequency_table *table; 83 struct cpufreq_frequency_table *table;
80 84
@@ -83,23 +87,28 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
83 return -ENOMEM; 87 return -ENOMEM;
84 88
85 for (i = 0; i < LUT_MAX_ENTRIES; i++) { 89 for (i = 0; i < LUT_MAX_ENTRIES; i++) {
86 data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE); 90 data = readl_relaxed(base + REG_FREQ_LUT +
91 i * LUT_ROW_SIZE);
87 src = FIELD_GET(LUT_SRC, data); 92 src = FIELD_GET(LUT_SRC, data);
88 lval = FIELD_GET(LUT_L_VAL, data); 93 lval = FIELD_GET(LUT_L_VAL, data);
89 core_count = FIELD_GET(LUT_CORE_COUNT, data); 94 core_count = FIELD_GET(LUT_CORE_COUNT, data);
90 95
96 data = readl_relaxed(base + REG_VOLT_LUT +
97 i * LUT_ROW_SIZE);
98 volt = FIELD_GET(LUT_VOLT, data) * 1000;
99
91 if (src) 100 if (src)
92 freq = xo_rate * lval / 1000; 101 freq = xo_rate * lval / 1000;
93 else 102 else
94 freq = cpu_hw_rate / 1000; 103 freq = cpu_hw_rate / 1000;
95 104
96 /* Ignore boosts in the middle of the table */ 105 if (freq != prev_freq && core_count == max_cores) {
97 if (core_count != max_cores) {
98 table[i].frequency = CPUFREQ_ENTRY_INVALID;
99 } else {
100 table[i].frequency = freq; 106 table[i].frequency = freq;
101 dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i, 107 dev_pm_opp_add(cpu_dev, freq * 1000, volt);
108 dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i,
102 freq, core_count); 109 freq, core_count);
110 } else {
111 table[i].frequency = CPUFREQ_ENTRY_INVALID;
103 } 112 }
104 113
105 /* 114 /*
@@ -116,6 +125,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
116 if (prev_cc != max_cores) { 125 if (prev_cc != max_cores) {
117 prev->frequency = prev_freq; 126 prev->frequency = prev_freq;
118 prev->flags = CPUFREQ_BOOST_FREQ; 127 prev->flags = CPUFREQ_BOOST_FREQ;
128 dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt);
119 } 129 }
120 130
121 break; 131 break;
@@ -127,6 +137,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev,
127 137
128 table[i].frequency = CPUFREQ_TABLE_END; 138 table[i].frequency = CPUFREQ_TABLE_END;
129 policy->freq_table = table; 139 policy->freq_table = table;
140 dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
130 141
131 return 0; 142 return 0;
132} 143}
@@ -159,10 +170,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
159 struct device *dev = &global_pdev->dev; 170 struct device *dev = &global_pdev->dev;
160 struct of_phandle_args args; 171 struct of_phandle_args args;
161 struct device_node *cpu_np; 172 struct device_node *cpu_np;
173 struct device *cpu_dev;
162 struct resource *res; 174 struct resource *res;
163 void __iomem *base; 175 void __iomem *base;
164 int ret, index; 176 int ret, index;
165 177
178 cpu_dev = get_cpu_device(policy->cpu);
179 if (!cpu_dev) {
180 pr_err("%s: failed to get cpu%d device\n", __func__,
181 policy->cpu);
182 return -ENODEV;
183 }
184
166 cpu_np = of_cpu_device_node_get(policy->cpu); 185 cpu_np = of_cpu_device_node_get(policy->cpu);
167 if (!cpu_np) 186 if (!cpu_np)
168 return -EINVAL; 187 return -EINVAL;
@@ -199,12 +218,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
199 218
200 policy->driver_data = base + REG_PERF_STATE; 219 policy->driver_data = base + REG_PERF_STATE;
201 220
202 ret = qcom_cpufreq_hw_read_lut(dev, policy, base); 221 ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base);
203 if (ret) { 222 if (ret) {
204 dev_err(dev, "Domain-%d failed to read LUT\n", index); 223 dev_err(dev, "Domain-%d failed to read LUT\n", index);
205 goto error; 224 goto error;
206 } 225 }
207 226
227 ret = dev_pm_opp_get_opp_count(cpu_dev);
228 if (ret <= 0) {
229 dev_err(cpu_dev, "Failed to add OPPs\n");
230 ret = -ENODEV;
231 goto error;
232 }
233
234 dev_pm_opp_of_register_em(policy->cpus);
235
208 policy->fast_switch_possible = true; 236 policy->fast_switch_possible = true;
209 237
210 return 0; 238 return 0;
@@ -215,8 +243,10 @@ error:
215 243
216static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) 244static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
217{ 245{
246 struct device *cpu_dev = get_cpu_device(policy->cpu);
218 void __iomem *base = policy->driver_data - REG_PERF_STATE; 247 void __iomem *base = policy->driver_data - REG_PERF_STATE;
219 248
249 dev_pm_opp_remove_all_dynamic(cpu_dev);
220 kfree(policy->freq_table); 250 kfree(policy->freq_table);
221 devm_iounmap(&global_pdev->dev, base); 251 devm_iounmap(&global_pdev->dev, base);
222 252
@@ -231,7 +261,8 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = {
231 261
232static struct cpufreq_driver cpufreq_qcom_hw_driver = { 262static struct cpufreq_driver cpufreq_qcom_hw_driver = {
233 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | 263 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
234 CPUFREQ_HAVE_GOVERNOR_PER_POLICY, 264 CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
265 CPUFREQ_IS_COOLING_DEV,
235 .verify = cpufreq_generic_frequency_table_verify, 266 .verify = cpufreq_generic_frequency_table_verify,
236 .target_index = qcom_cpufreq_hw_target_index, 267 .target_index = qcom_cpufreq_hw_target_index,
237 .get = qcom_cpufreq_hw_get, 268 .get = qcom_cpufreq_hw_get,
@@ -296,7 +327,7 @@ static int __init qcom_cpufreq_hw_init(void)
296{ 327{
297 return platform_driver_register(&qcom_cpufreq_hw_driver); 328 return platform_driver_register(&qcom_cpufreq_hw_driver);
298} 329}
299subsys_initcall(qcom_cpufreq_hw_init); 330device_initcall(qcom_cpufreq_hw_init);
300 331
301static void __exit qcom_cpufreq_hw_exit(void) 332static void __exit qcom_cpufreq_hw_exit(void)
302{ 333{
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index a472b814058f..dd64dcf89c74 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -42,7 +42,7 @@ enum _msm8996_version {
42 NUM_OF_MSM8996_VERSIONS, 42 NUM_OF_MSM8996_VERSIONS,
43}; 43};
44 44
45struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; 45static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
46 46
47static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) 47static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
48{ 48{
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index 3d773f64b4df..4295e5476264 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -13,7 +13,6 @@
13#include <linux/clk.h> 13#include <linux/clk.h>
14#include <linux/clk-provider.h> 14#include <linux/clk-provider.h>
15#include <linux/cpufreq.h> 15#include <linux/cpufreq.h>
16#include <linux/cpu_cooling.h>
17#include <linux/errno.h> 16#include <linux/errno.h>
18#include <linux/init.h> 17#include <linux/init.h>
19#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -31,7 +30,6 @@
31struct cpu_data { 30struct cpu_data {
32 struct clk **pclk; 31 struct clk **pclk;
33 struct cpufreq_frequency_table *table; 32 struct cpufreq_frequency_table *table;
34 struct thermal_cooling_device *cdev;
35}; 33};
36 34
37/* 35/*
@@ -239,7 +237,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
239{ 237{
240 struct cpu_data *data = policy->driver_data; 238 struct cpu_data *data = policy->driver_data;
241 239
242 cpufreq_cooling_unregister(data->cdev);
243 kfree(data->pclk); 240 kfree(data->pclk);
244 kfree(data->table); 241 kfree(data->table);
245 kfree(data); 242 kfree(data);
@@ -258,23 +255,15 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy,
258 return clk_set_parent(policy->clk, parent); 255 return clk_set_parent(policy->clk, parent);
259} 256}
260 257
261
262static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
263{
264 struct cpu_data *cpud = policy->driver_data;
265
266 cpud->cdev = of_cpufreq_cooling_register(policy);
267}
268
269static struct cpufreq_driver qoriq_cpufreq_driver = { 258static struct cpufreq_driver qoriq_cpufreq_driver = {
270 .name = "qoriq_cpufreq", 259 .name = "qoriq_cpufreq",
271 .flags = CPUFREQ_CONST_LOOPS, 260 .flags = CPUFREQ_CONST_LOOPS |
261 CPUFREQ_IS_COOLING_DEV,
272 .init = qoriq_cpufreq_cpu_init, 262 .init = qoriq_cpufreq_cpu_init,
273 .exit = qoriq_cpufreq_cpu_exit, 263 .exit = qoriq_cpufreq_cpu_exit,
274 .verify = cpufreq_generic_frequency_table_verify, 264 .verify = cpufreq_generic_frequency_table_verify,
275 .target_index = qoriq_cpufreq_target, 265 .target_index = qoriq_cpufreq_target,
276 .get = cpufreq_generic_get, 266 .get = cpufreq_generic_get,
277 .ready = qoriq_cpufreq_ready,
278 .attr = cpufreq_generic_attr, 267 .attr = cpufreq_generic_attr,
279}; 268};
280 269
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 50b1551ba894..c47182fc64ea 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -11,7 +11,6 @@
11#include <linux/cpu.h> 11#include <linux/cpu.h>
12#include <linux/cpufreq.h> 12#include <linux/cpufreq.h>
13#include <linux/cpumask.h> 13#include <linux/cpumask.h>
14#include <linux/cpu_cooling.h>
15#include <linux/export.h> 14#include <linux/export.h>
16#include <linux/module.h> 15#include <linux/module.h>
17#include <linux/pm_opp.h> 16#include <linux/pm_opp.h>
@@ -22,7 +21,6 @@
22struct scmi_data { 21struct scmi_data {
23 int domain_id; 22 int domain_id;
24 struct device *cpu_dev; 23 struct device *cpu_dev;
25 struct thermal_cooling_device *cdev;
26}; 24};
27 25
28static const struct scmi_handle *handle; 26static const struct scmi_handle *handle;
@@ -52,9 +50,9 @@ scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
52 int ret; 50 int ret;
53 struct scmi_data *priv = policy->driver_data; 51 struct scmi_data *priv = policy->driver_data;
54 struct scmi_perf_ops *perf_ops = handle->perf_ops; 52 struct scmi_perf_ops *perf_ops = handle->perf_ops;
55 u64 freq = policy->freq_table[index].frequency * 1000; 53 u64 freq = policy->freq_table[index].frequency;
56 54
57 ret = perf_ops->freq_set(handle, priv->domain_id, freq, false); 55 ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
58 if (!ret) 56 if (!ret)
59 arch_set_freq_scale(policy->related_cpus, freq, 57 arch_set_freq_scale(policy->related_cpus, freq,
60 policy->cpuinfo.max_freq); 58 policy->cpuinfo.max_freq);
@@ -176,7 +174,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
176out_free_priv: 174out_free_priv:
177 kfree(priv); 175 kfree(priv);
178out_free_opp: 176out_free_opp:
179 dev_pm_opp_cpumask_remove_table(policy->cpus); 177 dev_pm_opp_remove_all_dynamic(cpu_dev);
180 178
181 return ret; 179 return ret;
182} 180}
@@ -185,25 +183,18 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
185{ 183{
186 struct scmi_data *priv = policy->driver_data; 184 struct scmi_data *priv = policy->driver_data;
187 185
188 cpufreq_cooling_unregister(priv->cdev);
189 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 186 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
187 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
190 kfree(priv); 188 kfree(priv);
191 dev_pm_opp_cpumask_remove_table(policy->related_cpus);
192 189
193 return 0; 190 return 0;
194} 191}
195 192
196static void scmi_cpufreq_ready(struct cpufreq_policy *policy)
197{
198 struct scmi_data *priv = policy->driver_data;
199
200 priv->cdev = of_cpufreq_cooling_register(policy);
201}
202
203static struct cpufreq_driver scmi_cpufreq_driver = { 193static struct cpufreq_driver scmi_cpufreq_driver = {
204 .name = "scmi", 194 .name = "scmi",
205 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | 195 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
206 CPUFREQ_NEED_INITIAL_FREQ_CHECK, 196 CPUFREQ_NEED_INITIAL_FREQ_CHECK |
197 CPUFREQ_IS_COOLING_DEV,
207 .verify = cpufreq_generic_frequency_table_verify, 198 .verify = cpufreq_generic_frequency_table_verify,
208 .attr = cpufreq_generic_attr, 199 .attr = cpufreq_generic_attr,
209 .target_index = scmi_cpufreq_set_target, 200 .target_index = scmi_cpufreq_set_target,
@@ -211,7 +202,6 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
211 .get = scmi_cpufreq_get_rate, 202 .get = scmi_cpufreq_get_rate,
212 .init = scmi_cpufreq_init, 203 .init = scmi_cpufreq_init,
213 .exit = scmi_cpufreq_exit, 204 .exit = scmi_cpufreq_exit,
214 .ready = scmi_cpufreq_ready,
215}; 205};
216 206
217static int scmi_cpufreq_probe(struct scmi_device *sdev) 207static int scmi_cpufreq_probe(struct scmi_device *sdev)
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 87a98ec77773..1db2f6927e13 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -22,7 +22,6 @@
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/cpumask.h> 24#include <linux/cpumask.h>
25#include <linux/cpu_cooling.h>
26#include <linux/export.h> 25#include <linux/export.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <linux/of_platform.h> 27#include <linux/of_platform.h>
@@ -34,7 +33,6 @@
34struct scpi_data { 33struct scpi_data {
35 struct clk *clk; 34 struct clk *clk;
36 struct device *cpu_dev; 35 struct device *cpu_dev;
37 struct thermal_cooling_device *cdev;
38}; 36};
39 37
40static struct scpi_ops *scpi_ops; 38static struct scpi_ops *scpi_ops;
@@ -177,7 +175,7 @@ out_free_cpufreq_table:
177out_free_priv: 175out_free_priv:
178 kfree(priv); 176 kfree(priv);
179out_free_opp: 177out_free_opp:
180 dev_pm_opp_cpumask_remove_table(policy->cpus); 178 dev_pm_opp_remove_all_dynamic(cpu_dev);
181 179
182 return ret; 180 return ret;
183} 181}
@@ -186,32 +184,24 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
186{ 184{
187 struct scpi_data *priv = policy->driver_data; 185 struct scpi_data *priv = policy->driver_data;
188 186
189 cpufreq_cooling_unregister(priv->cdev);
190 clk_put(priv->clk); 187 clk_put(priv->clk);
191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 188 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
192 kfree(priv); 189 kfree(priv);
193 dev_pm_opp_cpumask_remove_table(policy->related_cpus); 190 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
194 191
195 return 0; 192 return 0;
196} 193}
197 194
198static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
199{
200 struct scpi_data *priv = policy->driver_data;
201
202 priv->cdev = of_cpufreq_cooling_register(policy);
203}
204
205static struct cpufreq_driver scpi_cpufreq_driver = { 195static struct cpufreq_driver scpi_cpufreq_driver = {
206 .name = "scpi-cpufreq", 196 .name = "scpi-cpufreq",
207 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | 197 .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
208 CPUFREQ_NEED_INITIAL_FREQ_CHECK, 198 CPUFREQ_NEED_INITIAL_FREQ_CHECK |
199 CPUFREQ_IS_COOLING_DEV,
209 .verify = cpufreq_generic_frequency_table_verify, 200 .verify = cpufreq_generic_frequency_table_verify,
210 .attr = cpufreq_generic_attr, 201 .attr = cpufreq_generic_attr,
211 .get = scpi_cpufreq_get_rate, 202 .get = scpi_cpufreq_get_rate,
212 .init = scpi_cpufreq_init, 203 .init = scpi_cpufreq_init,
213 .exit = scpi_cpufreq_exit, 204 .exit = scpi_cpufreq_exit,
214 .ready = scpi_cpufreq_ready,
215 .target_index = scpi_cpufreq_set_target, 205 .target_index = scpi_cpufreq_set_target,
216}; 206};
217 207
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index fbbcb88db061..5d8a09b82efb 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -243,8 +243,7 @@ static unsigned int speedstep_get(unsigned int cpu)
243 unsigned int speed; 243 unsigned int speed;
244 244
245 /* You're supposed to ensure CPU is online. */ 245 /* You're supposed to ensure CPU is online. */
246 if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) 246 BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1));
247 BUG();
248 247
249 pr_debug("detected %u kHz as current frequency\n", speed); 248 pr_debug("detected %u kHz as current frequency\n", speed);
250 return speed; 249 return speed;
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 43530254201a..4bb154f6c54c 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
134 134
135 platform_set_drvdata(pdev, priv); 135 platform_set_drvdata(pdev, priv);
136 136
137 of_node_put(np);
138
137 return 0; 139 return 0;
138 140
139out_switch_to_pllx: 141out_switch_to_pllx:
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index b17d153e724f..23a1b27579a5 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
21 local_irq_enable(); 21 local_irq_enable();
22 if (!current_set_polling_and_test()) { 22 if (!current_set_polling_and_test()) {
23 unsigned int loop_count = 0; 23 unsigned int loop_count = 0;
24 u64 limit = TICK_USEC; 24 u64 limit = TICK_NSEC;
25 int i; 25 int i;
26 26
27 for (i = 1; i < drv->state_count; i++) { 27 for (i = 1; i < drv->state_count; i++) {
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 5a90075f719d..0be55fcc19ba 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -692,6 +692,7 @@ config CRYPTO_DEV_BCM_SPU
692 depends on ARCH_BCM_IPROC 692 depends on ARCH_BCM_IPROC
693 depends on MAILBOX 693 depends on MAILBOX
694 default m 694 default m
695 select CRYPTO_AUTHENC
695 select CRYPTO_DES 696 select CRYPTO_DES
696 select CRYPTO_MD5 697 select CRYPTO_MD5
697 select CRYPTO_SHA1 698 select CRYPTO_SHA1
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 63cb6956c948..acf79889d903 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -283,9 +283,9 @@ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
283 */ 283 */
284static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev) 284static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
285{ 285{
286 dev->gdr = dma_zalloc_coherent(dev->core_dev->device, 286 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
287 sizeof(struct ce_gd) * PPC4XX_NUM_GD, 287 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
288 &dev->gdr_pa, GFP_ATOMIC); 288 &dev->gdr_pa, GFP_ATOMIC);
289 if (!dev->gdr) 289 if (!dev->gdr)
290 return -ENOMEM; 290 return -ENOMEM;
291 291
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c9393ffb70ed..5567cbda2798 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2845 struct spu_hw *spu = &iproc_priv.spu; 2845 struct spu_hw *spu = &iproc_priv.spu;
2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 2846 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 2847 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2848 struct rtattr *rta = (void *)key; 2848 struct crypto_authenc_keys keys;
2849 struct crypto_authenc_key_param *param; 2849 int ret;
2850 const u8 *origkey = key;
2851 const unsigned int origkeylen = keylen;
2852
2853 int ret = 0;
2854 2850
2855 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, 2851 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2856 keylen); 2852 keylen);
2857 flow_dump(" key: ", key, keylen); 2853 flow_dump(" key: ", key, keylen);
2858 2854
2859 if (!RTA_OK(rta, keylen)) 2855 ret = crypto_authenc_extractkeys(&keys, key, keylen);
2860 goto badkey; 2856 if (ret)
2861 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
2862 goto badkey;
2863 if (RTA_PAYLOAD(rta) < sizeof(*param))
2864 goto badkey; 2857 goto badkey;
2865 2858
2866 param = RTA_DATA(rta); 2859 if (keys.enckeylen > MAX_KEY_SIZE ||
2867 ctx->enckeylen = be32_to_cpu(param->enckeylen); 2860 keys.authkeylen > MAX_KEY_SIZE)
2868
2869 key += RTA_ALIGN(rta->rta_len);
2870 keylen -= RTA_ALIGN(rta->rta_len);
2871
2872 if (keylen < ctx->enckeylen)
2873 goto badkey;
2874 if (ctx->enckeylen > MAX_KEY_SIZE)
2875 goto badkey; 2861 goto badkey;
2876 2862
2877 ctx->authkeylen = keylen - ctx->enckeylen; 2863 ctx->enckeylen = keys.enckeylen;
2878 2864 ctx->authkeylen = keys.authkeylen;
2879 if (ctx->authkeylen > MAX_KEY_SIZE)
2880 goto badkey;
2881 2865
2882 memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); 2866 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2883 /* May end up padding auth key. So make sure it's zeroed. */ 2867 /* May end up padding auth key. So make sure it's zeroed. */
2884 memset(ctx->authkey, 0, sizeof(ctx->authkey)); 2868 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2885 memcpy(ctx->authkey, key, ctx->authkeylen); 2869 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2886 2870
2887 switch (ctx->alg->cipher_info.alg) { 2871 switch (ctx->alg->cipher_info.alg) {
2888 case CIPHER_ALG_DES: 2872 case CIPHER_ALG_DES:
@@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2890 u32 tmp[DES_EXPKEY_WORDS]; 2874 u32 tmp[DES_EXPKEY_WORDS];
2891 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 2875 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2892 2876
2893 if (des_ekey(tmp, key) == 0) { 2877 if (des_ekey(tmp, keys.enckey) == 0) {
2894 if (crypto_aead_get_flags(cipher) & 2878 if (crypto_aead_get_flags(cipher) &
2895 CRYPTO_TFM_REQ_WEAK_KEY) { 2879 CRYPTO_TFM_REQ_WEAK_KEY) {
2896 crypto_aead_set_flags(cipher, flags); 2880 crypto_aead_set_flags(cipher, flags);
@@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2905 break; 2889 break;
2906 case CIPHER_ALG_3DES: 2890 case CIPHER_ALG_3DES:
2907 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2891 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2908 const u32 *K = (const u32 *)key; 2892 const u32 *K = (const u32 *)keys.enckey;
2909 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 2893 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
2910 2894
2911 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 2895 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
2956 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 2940 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2957 ctx->fallback_cipher->base.crt_flags |= 2941 ctx->fallback_cipher->base.crt_flags |=
2958 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 2942 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2959 ret = 2943 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2960 crypto_aead_setkey(ctx->fallback_cipher, origkey,
2961 origkeylen);
2962 if (ret) { 2944 if (ret) {
2963 flow_log(" fallback setkey() returned:%d\n", ret); 2945 flow_log(" fallback setkey() returned:%d\n", ret);
2964 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 2946 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 92e593e2069a..80ae69f906fb 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3476,7 +3476,7 @@ static int __init caam_algapi_init(void)
3476 * Skip algorithms requiring message digests 3476 * Skip algorithms requiring message digests
3477 * if MD or MD size is not supported by device. 3477 * if MD or MD size is not supported by device.
3478 */ 3478 */
3479 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && 3479 if (is_mdha(c2_alg_sel) &&
3480 (!md_inst || t_alg->aead.maxauthsize > md_limit)) 3480 (!md_inst || t_alg->aead.maxauthsize > md_limit))
3481 continue; 3481 continue;
3482 3482
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 81712aa5d0f2..bb1a2cdf1951 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1072,13 +1072,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
1072 1072
1073 desc = edesc->hw_desc; 1073 desc = edesc->hw_desc;
1074 1074
1075 state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); 1075 if (buflen) {
1076 if (dma_mapping_error(jrdev, state->buf_dma)) { 1076 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1077 dev_err(jrdev, "unable to map src\n"); 1077 DMA_TO_DEVICE);
1078 goto unmap; 1078 if (dma_mapping_error(jrdev, state->buf_dma)) {
1079 } 1079 dev_err(jrdev, "unable to map src\n");
1080 goto unmap;
1081 }
1080 1082
1081 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); 1083 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1084 }
1082 1085
1083 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, 1086 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
1084 digestsize); 1087 digestsize);
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index ec10230178c5..4b6854bf896a 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -1155,6 +1155,7 @@
1155#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT) 1155#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
1156#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT) 1156#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
1157#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT) 1157#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
1158#define OP_ALG_CHA_MDHA (0x40 << OP_ALG_ALGSEL_SHIFT)
1158#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT) 1159#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
1159#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT) 1160#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
1160#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT) 1161#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 67ea94079837..8c6b83e02a70 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -7,6 +7,9 @@
7 7
8#ifndef CAAM_ERROR_H 8#ifndef CAAM_ERROR_H
9#define CAAM_ERROR_H 9#define CAAM_ERROR_H
10
11#include "desc.h"
12
10#define CAAM_ERROR_STR_MAX 302 13#define CAAM_ERROR_STR_MAX 302
11 14
12void caam_strstatus(struct device *dev, u32 status, bool qi_v2); 15void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
@@ -17,4 +20,10 @@ void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
17void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, 20void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
18 int rowsize, int groupsize, struct scatterlist *sg, 21 int rowsize, int groupsize, struct scatterlist *sg,
19 size_t tlen, bool ascii); 22 size_t tlen, bool ascii);
23
24static inline bool is_mdha(u32 algtype)
25{
26 return (algtype & OP_ALG_ALGSEL_MASK & ~OP_ALG_ALGSEL_SUBMASK) ==
27 OP_ALG_CHA_MDHA;
28}
20#endif /* CAAM_ERROR_H */ 29#endif /* CAAM_ERROR_H */
diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c
index 06ad85ab5e86..a876535529d1 100644
--- a/drivers/crypto/cavium/cpt/cptpf_main.c
+++ b/drivers/crypto/cavium/cpt/cptpf_main.c
@@ -278,8 +278,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
278 mcode->num_cores = is_ae ? 6 : 10; 278 mcode->num_cores = is_ae ? 6 : 10;
279 279
280 /* Allocate DMAable space */ 280 /* Allocate DMAable space */
281 mcode->code = dma_zalloc_coherent(&cpt->pdev->dev, mcode->code_size, 281 mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size,
282 &mcode->phys_base, GFP_KERNEL); 282 &mcode->phys_base, GFP_KERNEL);
283 if (!mcode->code) { 283 if (!mcode->code) {
284 dev_err(dev, "Unable to allocate space for microcode"); 284 dev_err(dev, "Unable to allocate space for microcode");
285 ret = -ENOMEM; 285 ret = -ENOMEM;
diff --git a/drivers/crypto/cavium/cpt/cptvf_main.c b/drivers/crypto/cavium/cpt/cptvf_main.c
index 5c796ed55eba..2ca431ed1db8 100644
--- a/drivers/crypto/cavium/cpt/cptvf_main.c
+++ b/drivers/crypto/cavium/cpt/cptvf_main.c
@@ -236,9 +236,10 @@ static int alloc_command_queues(struct cpt_vf *cptvf,
236 236
237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : 237 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
238 rem_q_size; 238 rem_q_size;
239 curr->head = (u8 *)dma_zalloc_coherent(&pdev->dev, 239 curr->head = (u8 *)dma_alloc_coherent(&pdev->dev,
240 c_size + CPT_NEXT_CHUNK_PTR_SIZE, 240 c_size + CPT_NEXT_CHUNK_PTR_SIZE,
241 &curr->dma_addr, GFP_KERNEL); 241 &curr->dma_addr,
242 GFP_KERNEL);
242 if (!curr->head) { 243 if (!curr->head) {
243 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", 244 dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n",
244 i, queue->nchunks); 245 i, queue->nchunks);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 9138bae12521..4ace9bcd603a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -25,9 +25,9 @@ static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
25 struct nitrox_device *ndev = cmdq->ndev; 25 struct nitrox_device *ndev = cmdq->ndev;
26 26
27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; 27 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
28 cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize, 28 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
29 &cmdq->unalign_dma, 29 &cmdq->unalign_dma,
30 GFP_KERNEL); 30 GFP_KERNEL);
31 if (!cmdq->unalign_base) 31 if (!cmdq->unalign_base)
32 return -ENOMEM; 32 return -ENOMEM;
33 33
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index e34e4df8fd24..4c97478d44bd 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
537 struct nitrox_device *ndev = cmdq->ndev; 537 struct nitrox_device *ndev = cmdq->ndev;
538 struct nitrox_softreq *sr; 538 struct nitrox_softreq *sr;
539 int req_completed = 0, err = 0, budget; 539 int req_completed = 0, err = 0, budget;
540 completion_t callback;
541 void *cb_arg;
540 542
541 /* check all pending requests */ 543 /* check all pending requests */
542 budget = atomic_read(&cmdq->pending_count); 544 budget = atomic_read(&cmdq->pending_count);
@@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
564 smp_mb__after_atomic(); 566 smp_mb__after_atomic();
565 /* remove from response list */ 567 /* remove from response list */
566 response_list_del(sr, cmdq); 568 response_list_del(sr, cmdq);
567
568 /* ORH error code */ 569 /* ORH error code */
569 err = READ_ONCE(*sr->resp.orh) & 0xff; 570 err = READ_ONCE(*sr->resp.orh) & 0xff;
571 callback = sr->callback;
572 cb_arg = sr->cb_arg;
570 softreq_destroy(sr); 573 softreq_destroy(sr);
571 574 if (callback)
572 if (sr->callback) 575 callback(cb_arg, err);
573 sr->callback(sr->cb_arg, err);
574 576
575 req_completed++; 577 req_completed++;
576 } 578 }
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 44a4d2779b15..c9bfd4f439ce 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -822,9 +822,9 @@ static int ccp5_init(struct ccp_device *ccp)
822 /* Page alignment satisfies our needs for N <= 128 */ 822 /* Page alignment satisfies our needs for N <= 128 */
823 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); 823 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128);
824 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 824 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
825 cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, 825 cmd_q->qbase = dma_alloc_coherent(dev, cmd_q->qsize,
826 &cmd_q->qbase_dma, 826 &cmd_q->qbase_dma,
827 GFP_KERNEL); 827 GFP_KERNEL);
828 if (!cmd_q->qbase) { 828 if (!cmd_q->qbase) {
829 dev_err(dev, "unable to allocate command queue\n"); 829 dev_err(dev, "unable to allocate command queue\n");
830 ret = -ENOMEM; 830 ret = -ENOMEM;
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index f2643cda45db..a3527c00b29a 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -549,13 +549,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
549 unsigned int keylen) 549 unsigned int keylen)
550{ 550{
551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); 551 struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
552 struct rtattr *rta = (struct rtattr *)key;
553 struct cc_crypto_req cc_req = {}; 552 struct cc_crypto_req cc_req = {};
554 struct crypto_authenc_key_param *param;
555 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; 553 struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
556 int rc = -EINVAL;
557 unsigned int seq_len = 0; 554 unsigned int seq_len = 0;
558 struct device *dev = drvdata_to_dev(ctx->drvdata); 555 struct device *dev = drvdata_to_dev(ctx->drvdata);
556 const u8 *enckey, *authkey;
557 int rc;
559 558
560 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", 559 dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
561 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); 560 ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@@ -563,35 +562,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
563 /* STAT_PHASE_0: Init and sanity checks */ 562 /* STAT_PHASE_0: Init and sanity checks */
564 563
565 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ 564 if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
566 if (!RTA_OK(rta, keylen)) 565 struct crypto_authenc_keys keys;
567 goto badkey; 566
568 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 567 rc = crypto_authenc_extractkeys(&keys, key, keylen);
569 goto badkey; 568 if (rc)
570 if (RTA_PAYLOAD(rta) < sizeof(*param))
571 goto badkey;
572 param = RTA_DATA(rta);
573 ctx->enc_keylen = be32_to_cpu(param->enckeylen);
574 key += RTA_ALIGN(rta->rta_len);
575 keylen -= RTA_ALIGN(rta->rta_len);
576 if (keylen < ctx->enc_keylen)
577 goto badkey; 569 goto badkey;
578 ctx->auth_keylen = keylen - ctx->enc_keylen; 570 enckey = keys.enckey;
571 authkey = keys.authkey;
572 ctx->enc_keylen = keys.enckeylen;
573 ctx->auth_keylen = keys.authkeylen;
579 574
580 if (ctx->cipher_mode == DRV_CIPHER_CTR) { 575 if (ctx->cipher_mode == DRV_CIPHER_CTR) {
581 /* the nonce is stored in bytes at end of key */ 576 /* the nonce is stored in bytes at end of key */
577 rc = -EINVAL;
582 if (ctx->enc_keylen < 578 if (ctx->enc_keylen <
583 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) 579 (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
584 goto badkey; 580 goto badkey;
585 /* Copy nonce from last 4 bytes in CTR key to 581 /* Copy nonce from last 4 bytes in CTR key to
586 * first 4 bytes in CTR IV 582 * first 4 bytes in CTR IV
587 */ 583 */
588 memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + 584 memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
589 ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, 585 CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
590 CTR_RFC3686_NONCE_SIZE);
591 /* Set CTR key size */ 586 /* Set CTR key size */
592 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; 587 ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
593 } 588 }
594 } else { /* non-authenc - has just one key */ 589 } else { /* non-authenc - has just one key */
590 enckey = key;
591 authkey = NULL;
595 ctx->enc_keylen = keylen; 592 ctx->enc_keylen = keylen;
596 ctx->auth_keylen = 0; 593 ctx->auth_keylen = 0;
597 } 594 }
@@ -603,13 +600,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
603 /* STAT_PHASE_1: Copy key to ctx */ 600 /* STAT_PHASE_1: Copy key to ctx */
604 601
605 /* Get key material */ 602 /* Get key material */
606 memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); 603 memcpy(ctx->enckey, enckey, ctx->enc_keylen);
607 if (ctx->enc_keylen == 24) 604 if (ctx->enc_keylen == 24)
608 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); 605 memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
609 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { 606 if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
610 memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); 607 memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
608 ctx->auth_keylen);
611 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ 609 } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
612 rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); 610 rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
613 if (rc) 611 if (rc)
614 goto badkey; 612 goto badkey;
615 } 613 }
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 8ada308d72ee..b0125ad65825 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
380 rc = cc_ivgen_init(new_drvdata); 380 rc = cc_ivgen_init(new_drvdata);
381 if (rc) { 381 if (rc) {
382 dev_err(dev, "cc_ivgen_init failed\n"); 382 dev_err(dev, "cc_ivgen_init failed\n");
383 goto post_power_mgr_err; 383 goto post_buf_mgr_err;
384 } 384 }
385 385
386 /* Allocate crypto algs */ 386 /* Allocate crypto algs */
@@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
403 goto post_hash_err; 403 goto post_hash_err;
404 } 404 }
405 405
406 /* All set, we can allow autosuspend */
407 cc_pm_go(new_drvdata);
408
406 /* If we got here and FIPS mode is enabled 409 /* If we got here and FIPS mode is enabled
407 * it means all FIPS test passed, so let TEE 410 * it means all FIPS test passed, so let TEE
408 * know we're good. 411 * know we're good.
@@ -417,8 +420,6 @@ post_cipher_err:
417 cc_cipher_free(new_drvdata); 420 cc_cipher_free(new_drvdata);
418post_ivgen_err: 421post_ivgen_err:
419 cc_ivgen_fini(new_drvdata); 422 cc_ivgen_fini(new_drvdata);
420post_power_mgr_err:
421 cc_pm_fini(new_drvdata);
422post_buf_mgr_err: 423post_buf_mgr_err:
423 cc_buffer_mgr_fini(new_drvdata); 424 cc_buffer_mgr_fini(new_drvdata);
424post_req_mgr_err: 425post_req_mgr_err:
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
index d990f472e89f..6ff7e75ad90e 100644
--- a/drivers/crypto/ccree/cc_pm.c
+++ b/drivers/crypto/ccree/cc_pm.c
@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev)
100 100
101int cc_pm_init(struct cc_drvdata *drvdata) 101int cc_pm_init(struct cc_drvdata *drvdata)
102{ 102{
103 int rc = 0;
104 struct device *dev = drvdata_to_dev(drvdata); 103 struct device *dev = drvdata_to_dev(drvdata);
105 104
106 /* must be before the enabling to avoid resdundent suspending */ 105 /* must be before the enabling to avoid resdundent suspending */
107 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); 106 pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
108 pm_runtime_use_autosuspend(dev); 107 pm_runtime_use_autosuspend(dev);
109 /* activate the PM module */ 108 /* activate the PM module */
110 rc = pm_runtime_set_active(dev); 109 return pm_runtime_set_active(dev);
111 if (rc) 110}
112 return rc;
113 /* enable the PM module*/
114 pm_runtime_enable(dev);
115 111
116 return rc; 112/* enable the PM module*/
113void cc_pm_go(struct cc_drvdata *drvdata)
114{
115 pm_runtime_enable(drvdata_to_dev(drvdata));
117} 116}
118 117
119void cc_pm_fini(struct cc_drvdata *drvdata) 118void cc_pm_fini(struct cc_drvdata *drvdata)
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
index 020a5403c58b..f62624357020 100644
--- a/drivers/crypto/ccree/cc_pm.h
+++ b/drivers/crypto/ccree/cc_pm.h
@@ -16,6 +16,7 @@
16extern const struct dev_pm_ops ccree_pm; 16extern const struct dev_pm_ops ccree_pm;
17 17
18int cc_pm_init(struct cc_drvdata *drvdata); 18int cc_pm_init(struct cc_drvdata *drvdata);
19void cc_pm_go(struct cc_drvdata *drvdata);
19void cc_pm_fini(struct cc_drvdata *drvdata); 20void cc_pm_fini(struct cc_drvdata *drvdata);
20int cc_pm_suspend(struct device *dev); 21int cc_pm_suspend(struct device *dev);
21int cc_pm_resume(struct device *dev); 22int cc_pm_resume(struct device *dev);
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
29 return 0; 30 return 0;
30} 31}
31 32
33static void cc_pm_go(struct cc_drvdata *drvdata) {}
34
32static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} 35static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
33 36
34static inline int cc_pm_suspend(struct device *dev) 37static inline int cc_pm_suspend(struct device *dev)
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index cdc4f9a171d9..adc0cd8ae97b 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -241,8 +241,8 @@ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
241 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); 241 memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
242 } else { 242 } else {
243 /* new key */ 243 /* new key */
244 ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, 244 ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY,
245 &ctx->pkey, GFP_KERNEL); 245 &ctx->pkey, GFP_KERNEL);
246 if (!ctx->key) { 246 if (!ctx->key) {
247 mutex_unlock(&ctx->lock); 247 mutex_unlock(&ctx->lock);
248 return -ENOMEM; 248 return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index c1ee4e7bf996..91ee2bb575df 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1082,9 +1082,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
1082 struct sec_queue_ring_db *ring_db = &queue->ring_db; 1082 struct sec_queue_ring_db *ring_db = &queue->ring_db;
1083 int ret; 1083 int ret;
1084 1084
1085 ring_cmd->vaddr = dma_zalloc_coherent(dev, SEC_Q_CMD_SIZE, 1085 ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
1086 &ring_cmd->paddr, 1086 &ring_cmd->paddr, GFP_KERNEL);
1087 GFP_KERNEL);
1088 if (!ring_cmd->vaddr) 1087 if (!ring_cmd->vaddr)
1089 return -ENOMEM; 1088 return -ENOMEM;
1090 1089
@@ -1092,17 +1091,15 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
1092 mutex_init(&ring_cmd->lock); 1091 mutex_init(&ring_cmd->lock);
1093 ring_cmd->callback = sec_alg_callback; 1092 ring_cmd->callback = sec_alg_callback;
1094 1093
1095 ring_cq->vaddr = dma_zalloc_coherent(dev, SEC_Q_CQ_SIZE, 1094 ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
1096 &ring_cq->paddr, 1095 &ring_cq->paddr, GFP_KERNEL);
1097 GFP_KERNEL);
1098 if (!ring_cq->vaddr) { 1096 if (!ring_cq->vaddr) {
1099 ret = -ENOMEM; 1097 ret = -ENOMEM;
1100 goto err_free_ring_cmd; 1098 goto err_free_ring_cmd;
1101 } 1099 }
1102 1100
1103 ring_db->vaddr = dma_zalloc_coherent(dev, SEC_Q_DB_SIZE, 1101 ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
1104 &ring_db->paddr, 1102 &ring_db->paddr, GFP_KERNEL);
1105 GFP_KERNEL);
1106 if (!ring_db->vaddr) { 1103 if (!ring_db->vaddr) {
1107 ret = -ENOMEM; 1104 ret = -ENOMEM;
1108 goto err_free_ring_cq; 1105 goto err_free_ring_cq;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 19fba998b86b..1b0d156bb9be 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -260,9 +260,9 @@ static int setup_crypt_desc(void)
260{ 260{
261 struct device *dev = &pdev->dev; 261 struct device *dev = &pdev->dev;
262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64); 262 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
263 crypt_virt = dma_zalloc_coherent(dev, 263 crypt_virt = dma_alloc_coherent(dev,
264 NPE_QLEN * sizeof(struct crypt_ctl), 264 NPE_QLEN * sizeof(struct crypt_ctl),
265 &crypt_phys, GFP_ATOMIC); 265 &crypt_phys, GFP_ATOMIC);
266 if (!crypt_virt) 266 if (!crypt_virt)
267 return -ENOMEM; 267 return -ENOMEM;
268 return 0; 268 return 0;
diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c
index ee0404e27a0f..5660e5e5e022 100644
--- a/drivers/crypto/mediatek/mtk-platform.c
+++ b/drivers/crypto/mediatek/mtk-platform.c
@@ -453,17 +453,17 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
453 if (!ring[i]) 453 if (!ring[i])
454 goto err_cleanup; 454 goto err_cleanup;
455 455
456 ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, 456 ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
457 MTK_DESC_RING_SZ, 457 MTK_DESC_RING_SZ,
458 &ring[i]->cmd_dma, 458 &ring[i]->cmd_dma,
459 GFP_KERNEL); 459 GFP_KERNEL);
460 if (!ring[i]->cmd_base) 460 if (!ring[i]->cmd_base)
461 goto err_cleanup; 461 goto err_cleanup;
462 462
463 ring[i]->res_base = dma_zalloc_coherent(cryp->dev, 463 ring[i]->res_base = dma_alloc_coherent(cryp->dev,
464 MTK_DESC_RING_SZ, 464 MTK_DESC_RING_SZ,
465 &ring[i]->res_dma, 465 &ring[i]->res_dma,
466 GFP_KERNEL); 466 GFP_KERNEL);
467 if (!ring[i]->res_base) 467 if (!ring[i]->res_base)
468 goto err_cleanup; 468 goto err_cleanup;
469 469
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index 3744b22f0c46..d28cba34773e 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -244,18 +244,18 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
244 dev_to_node(&GET_DEV(accel_dev))); 244 dev_to_node(&GET_DEV(accel_dev)));
245 if (!admin) 245 if (!admin)
246 return -ENOMEM; 246 return -ENOMEM;
247 admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 247 admin->virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
248 &admin->phy_addr, GFP_KERNEL); 248 &admin->phy_addr, GFP_KERNEL);
249 if (!admin->virt_addr) { 249 if (!admin->virt_addr) {
250 dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); 250 dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
251 kfree(admin); 251 kfree(admin);
252 return -ENOMEM; 252 return -ENOMEM;
253 } 253 }
254 254
255 admin->virt_tbl_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), 255 admin->virt_tbl_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
256 PAGE_SIZE, 256 PAGE_SIZE,
257 &admin->const_tbl_addr, 257 &admin->const_tbl_addr,
258 GFP_KERNEL); 258 GFP_KERNEL);
259 if (!admin->virt_tbl_addr) { 259 if (!admin->virt_tbl_addr) {
260 dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n"); 260 dev_err(&GET_DEV(accel_dev), "Failed to allocate const_tbl\n");
261 dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 261 dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index d2698299896f..975c75198f56 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -601,15 +601,15 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
601 601
602 dev = &GET_DEV(inst->accel_dev); 602 dev = &GET_DEV(inst->accel_dev);
603 ctx->inst = inst; 603 ctx->inst = inst;
604 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 604 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
605 &ctx->enc_cd_paddr, 605 &ctx->enc_cd_paddr,
606 GFP_ATOMIC); 606 GFP_ATOMIC);
607 if (!ctx->enc_cd) { 607 if (!ctx->enc_cd) {
608 return -ENOMEM; 608 return -ENOMEM;
609 } 609 }
610 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 610 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
611 &ctx->dec_cd_paddr, 611 &ctx->dec_cd_paddr,
612 GFP_ATOMIC); 612 GFP_ATOMIC);
613 if (!ctx->dec_cd) { 613 if (!ctx->dec_cd) {
614 goto out_free_enc; 614 goto out_free_enc;
615 } 615 }
@@ -933,16 +933,16 @@ static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
933 933
934 dev = &GET_DEV(inst->accel_dev); 934 dev = &GET_DEV(inst->accel_dev);
935 ctx->inst = inst; 935 ctx->inst = inst;
936 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), 936 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
937 &ctx->enc_cd_paddr, 937 &ctx->enc_cd_paddr,
938 GFP_ATOMIC); 938 GFP_ATOMIC);
939 if (!ctx->enc_cd) { 939 if (!ctx->enc_cd) {
940 spin_unlock(&ctx->lock); 940 spin_unlock(&ctx->lock);
941 return -ENOMEM; 941 return -ENOMEM;
942 } 942 }
943 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), 943 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
944 &ctx->dec_cd_paddr, 944 &ctx->dec_cd_paddr,
945 GFP_ATOMIC); 945 GFP_ATOMIC);
946 if (!ctx->dec_cd) { 946 if (!ctx->dec_cd) {
947 spin_unlock(&ctx->lock); 947 spin_unlock(&ctx->lock);
948 goto out_free_enc; 948 goto out_free_enc;
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index 320e7854b4ee..c9f324730d71 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -332,10 +332,10 @@ static int qat_dh_compute_value(struct kpp_request *req)
332 } else { 332 } else {
333 int shift = ctx->p_size - req->src_len; 333 int shift = ctx->p_size - req->src_len;
334 334
335 qat_req->src_align = dma_zalloc_coherent(dev, 335 qat_req->src_align = dma_alloc_coherent(dev,
336 ctx->p_size, 336 ctx->p_size,
337 &qat_req->in.dh.in.b, 337 &qat_req->in.dh.in.b,
338 GFP_KERNEL); 338 GFP_KERNEL);
339 if (unlikely(!qat_req->src_align)) 339 if (unlikely(!qat_req->src_align))
340 return ret; 340 return ret;
341 341
@@ -360,9 +360,9 @@ static int qat_dh_compute_value(struct kpp_request *req)
360 goto unmap_src; 360 goto unmap_src;
361 361
362 } else { 362 } else {
363 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size, 363 qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size,
364 &qat_req->out.dh.r, 364 &qat_req->out.dh.r,
365 GFP_KERNEL); 365 GFP_KERNEL);
366 if (unlikely(!qat_req->dst_align)) 366 if (unlikely(!qat_req->dst_align))
367 goto unmap_src; 367 goto unmap_src;
368 } 368 }
@@ -447,7 +447,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
447 return -EINVAL; 447 return -EINVAL;
448 448
449 ctx->p_size = params->p_size; 449 ctx->p_size = params->p_size;
450 ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); 450 ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
451 if (!ctx->p) 451 if (!ctx->p)
452 return -ENOMEM; 452 return -ENOMEM;
453 memcpy(ctx->p, params->p, ctx->p_size); 453 memcpy(ctx->p, params->p, ctx->p_size);
@@ -458,7 +458,7 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
458 return 0; 458 return 0;
459 } 459 }
460 460
461 ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); 461 ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
462 if (!ctx->g) 462 if (!ctx->g)
463 return -ENOMEM; 463 return -ENOMEM;
464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, 464 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
@@ -503,8 +503,8 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
503 if (ret < 0) 503 if (ret < 0)
504 goto err_clear_ctx; 504 goto err_clear_ctx;
505 505
506 ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, 506 ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
507 GFP_KERNEL); 507 GFP_KERNEL);
508 if (!ctx->xa) { 508 if (!ctx->xa) {
509 ret = -ENOMEM; 509 ret = -ENOMEM;
510 goto err_clear_ctx; 510 goto err_clear_ctx;
@@ -737,9 +737,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
737 } else { 737 } else {
738 int shift = ctx->key_sz - req->src_len; 738 int shift = ctx->key_sz - req->src_len;
739 739
740 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 740 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
741 &qat_req->in.rsa.enc.m, 741 &qat_req->in.rsa.enc.m,
742 GFP_KERNEL); 742 GFP_KERNEL);
743 if (unlikely(!qat_req->src_align)) 743 if (unlikely(!qat_req->src_align))
744 return ret; 744 return ret;
745 745
@@ -756,9 +756,9 @@ static int qat_rsa_enc(struct akcipher_request *req)
756 goto unmap_src; 756 goto unmap_src;
757 757
758 } else { 758 } else {
759 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 759 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
760 &qat_req->out.rsa.enc.c, 760 &qat_req->out.rsa.enc.c,
761 GFP_KERNEL); 761 GFP_KERNEL);
762 if (unlikely(!qat_req->dst_align)) 762 if (unlikely(!qat_req->dst_align))
763 goto unmap_src; 763 goto unmap_src;
764 764
@@ -881,9 +881,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
881 } else { 881 } else {
882 int shift = ctx->key_sz - req->src_len; 882 int shift = ctx->key_sz - req->src_len;
883 883
884 qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz, 884 qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz,
885 &qat_req->in.rsa.dec.c, 885 &qat_req->in.rsa.dec.c,
886 GFP_KERNEL); 886 GFP_KERNEL);
887 if (unlikely(!qat_req->src_align)) 887 if (unlikely(!qat_req->src_align))
888 return ret; 888 return ret;
889 889
@@ -900,9 +900,9 @@ static int qat_rsa_dec(struct akcipher_request *req)
900 goto unmap_src; 900 goto unmap_src;
901 901
902 } else { 902 } else {
903 qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz, 903 qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz,
904 &qat_req->out.rsa.dec.m, 904 &qat_req->out.rsa.dec.m,
905 GFP_KERNEL); 905 GFP_KERNEL);
906 if (unlikely(!qat_req->dst_align)) 906 if (unlikely(!qat_req->dst_align))
907 goto unmap_src; 907 goto unmap_src;
908 908
@@ -989,7 +989,7 @@ static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
989 goto err; 989 goto err;
990 990
991 ret = -ENOMEM; 991 ret = -ENOMEM;
992 ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); 992 ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
993 if (!ctx->n) 993 if (!ctx->n)
994 goto err; 994 goto err;
995 995
@@ -1018,7 +1018,7 @@ static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
1018 return -EINVAL; 1018 return -EINVAL;
1019 } 1019 }
1020 1020
1021 ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); 1021 ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
1022 if (!ctx->e) 1022 if (!ctx->e)
1023 return -ENOMEM; 1023 return -ENOMEM;
1024 1024
@@ -1044,7 +1044,7 @@ static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
1044 goto err; 1044 goto err;
1045 1045
1046 ret = -ENOMEM; 1046 ret = -ENOMEM;
1047 ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); 1047 ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1048 if (!ctx->d) 1048 if (!ctx->d)
1049 goto err; 1049 goto err;
1050 1050
@@ -1077,7 +1077,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1077 qat_rsa_drop_leading_zeros(&ptr, &len); 1077 qat_rsa_drop_leading_zeros(&ptr, &len);
1078 if (!len) 1078 if (!len)
1079 goto err; 1079 goto err;
1080 ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL); 1080 ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1081 if (!ctx->p) 1081 if (!ctx->p)
1082 goto err; 1082 goto err;
1083 memcpy(ctx->p + (half_key_sz - len), ptr, len); 1083 memcpy(ctx->p + (half_key_sz - len), ptr, len);
@@ -1088,7 +1088,7 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1088 qat_rsa_drop_leading_zeros(&ptr, &len); 1088 qat_rsa_drop_leading_zeros(&ptr, &len);
1089 if (!len) 1089 if (!len)
1090 goto free_p; 1090 goto free_p;
1091 ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); 1091 ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1092 if (!ctx->q) 1092 if (!ctx->q)
1093 goto free_p; 1093 goto free_p;
1094 memcpy(ctx->q + (half_key_sz - len), ptr, len); 1094 memcpy(ctx->q + (half_key_sz - len), ptr, len);
@@ -1099,8 +1099,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1099 qat_rsa_drop_leading_zeros(&ptr, &len); 1099 qat_rsa_drop_leading_zeros(&ptr, &len);
1100 if (!len) 1100 if (!len)
1101 goto free_q; 1101 goto free_q;
1102 ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp, 1102 ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1103 GFP_KERNEL); 1103 GFP_KERNEL);
1104 if (!ctx->dp) 1104 if (!ctx->dp)
1105 goto free_q; 1105 goto free_q;
1106 memcpy(ctx->dp + (half_key_sz - len), ptr, len); 1106 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
@@ -1111,8 +1111,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1111 qat_rsa_drop_leading_zeros(&ptr, &len); 1111 qat_rsa_drop_leading_zeros(&ptr, &len);
1112 if (!len) 1112 if (!len)
1113 goto free_dp; 1113 goto free_dp;
1114 ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq, 1114 ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1115 GFP_KERNEL); 1115 GFP_KERNEL);
1116 if (!ctx->dq) 1116 if (!ctx->dq)
1117 goto free_dp; 1117 goto free_dp;
1118 memcpy(ctx->dq + (half_key_sz - len), ptr, len); 1118 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
@@ -1123,8 +1123,8 @@ static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1123 qat_rsa_drop_leading_zeros(&ptr, &len); 1123 qat_rsa_drop_leading_zeros(&ptr, &len);
1124 if (!len) 1124 if (!len)
1125 goto free_dq; 1125 goto free_dq;
1126 ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv, 1126 ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1127 GFP_KERNEL); 1127 GFP_KERNEL);
1128 if (!ctx->qinv) 1128 if (!ctx->qinv)
1129 goto free_dq; 1129 goto free_dq;
1130 memcpy(ctx->qinv + (half_key_sz - len), ptr, len); 1130 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 45e20707cef8..f8e2c5c3f4eb 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1361 struct talitos_private *priv = dev_get_drvdata(dev); 1361 struct talitos_private *priv = dev_get_drvdata(dev);
1362 bool is_sec1 = has_ftr_sec1(priv); 1362 bool is_sec1 = has_ftr_sec1(priv);
1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1363 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1364 void *err;
1365 1364
1366 if (cryptlen + authsize > max_len) { 1365 if (cryptlen + authsize > max_len) {
1367 dev_err(dev, "length exceeds h/w max limit\n"); 1366 dev_err(dev, "length exceeds h/w max limit\n");
1368 return ERR_PTR(-EINVAL); 1367 return ERR_PTR(-EINVAL);
1369 } 1368 }
1370 1369
1371 if (ivsize)
1372 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1373
1374 if (!dst || dst == src) { 1370 if (!dst || dst == src) {
1375 src_len = assoclen + cryptlen + authsize; 1371 src_len = assoclen + cryptlen + authsize;
1376 src_nents = sg_nents_for_len(src, src_len); 1372 src_nents = sg_nents_for_len(src, src_len);
1377 if (src_nents < 0) { 1373 if (src_nents < 0) {
1378 dev_err(dev, "Invalid number of src SG.\n"); 1374 dev_err(dev, "Invalid number of src SG.\n");
1379 err = ERR_PTR(-EINVAL); 1375 return ERR_PTR(-EINVAL);
1380 goto error_sg;
1381 } 1376 }
1382 src_nents = (src_nents == 1) ? 0 : src_nents; 1377 src_nents = (src_nents == 1) ? 0 : src_nents;
1383 dst_nents = dst ? src_nents : 0; 1378 dst_nents = dst ? src_nents : 0;
@@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1387 src_nents = sg_nents_for_len(src, src_len); 1382 src_nents = sg_nents_for_len(src, src_len);
1388 if (src_nents < 0) { 1383 if (src_nents < 0) {
1389 dev_err(dev, "Invalid number of src SG.\n"); 1384 dev_err(dev, "Invalid number of src SG.\n");
1390 err = ERR_PTR(-EINVAL); 1385 return ERR_PTR(-EINVAL);
1391 goto error_sg;
1392 } 1386 }
1393 src_nents = (src_nents == 1) ? 0 : src_nents; 1387 src_nents = (src_nents == 1) ? 0 : src_nents;
1394 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); 1388 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1395 dst_nents = sg_nents_for_len(dst, dst_len); 1389 dst_nents = sg_nents_for_len(dst, dst_len);
1396 if (dst_nents < 0) { 1390 if (dst_nents < 0) {
1397 dev_err(dev, "Invalid number of dst SG.\n"); 1391 dev_err(dev, "Invalid number of dst SG.\n");
1398 err = ERR_PTR(-EINVAL); 1392 return ERR_PTR(-EINVAL);
1399 goto error_sg;
1400 } 1393 }
1401 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1394 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1402 } 1395 }
@@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1423 /* if its a ahash, add space for a second desc next to the first one */ 1416 /* if its a ahash, add space for a second desc next to the first one */
1424 if (is_sec1 && !dst) 1417 if (is_sec1 && !dst)
1425 alloc_len += sizeof(struct talitos_desc); 1418 alloc_len += sizeof(struct talitos_desc);
1419 alloc_len += ivsize;
1426 1420
1427 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1421 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1428 if (!edesc) { 1422 if (!edesc)
1429 err = ERR_PTR(-ENOMEM); 1423 return ERR_PTR(-ENOMEM);
1430 goto error_sg; 1424 if (ivsize) {
1425 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1426 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1431 } 1427 }
1432 memset(&edesc->desc, 0, sizeof(edesc->desc)); 1428 memset(&edesc->desc, 0, sizeof(edesc->desc));
1433 1429
@@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1445 DMA_BIDIRECTIONAL); 1441 DMA_BIDIRECTIONAL);
1446 } 1442 }
1447 return edesc; 1443 return edesc;
1448error_sg:
1449 if (iv_dma)
1450 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1451 return err;
1452} 1444}
1453 1445
1454static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1446static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4e557684f792..fe69dccfa0c0 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -203,6 +203,7 @@ struct at_xdmac_chan {
203 u32 save_cim; 203 u32 save_cim;
204 u32 save_cnda; 204 u32 save_cnda;
205 u32 save_cndc; 205 u32 save_cndc;
206 u32 irq_status;
206 unsigned long status; 207 unsigned long status;
207 struct tasklet_struct tasklet; 208 struct tasklet_struct tasklet;
208 struct dma_slave_config sconfig; 209 struct dma_slave_config sconfig;
@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
1580 struct at_xdmac_desc *desc; 1581 struct at_xdmac_desc *desc;
1581 u32 error_mask; 1582 u32 error_mask;
1582 1583
1583 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", 1584 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1584 __func__, atchan->status); 1585 __func__, atchan->irq_status);
1585 1586
1586 error_mask = AT_XDMAC_CIS_RBEIS 1587 error_mask = AT_XDMAC_CIS_RBEIS
1587 | AT_XDMAC_CIS_WBEIS 1588 | AT_XDMAC_CIS_WBEIS
@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
1589 1590
1590 if (at_xdmac_chan_is_cyclic(atchan)) { 1591 if (at_xdmac_chan_is_cyclic(atchan)) {
1591 at_xdmac_handle_cyclic(atchan); 1592 at_xdmac_handle_cyclic(atchan);
1592 } else if ((atchan->status & AT_XDMAC_CIS_LIS) 1593 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1593 || (atchan->status & error_mask)) { 1594 || (atchan->irq_status & error_mask)) {
1594 struct dma_async_tx_descriptor *txd; 1595 struct dma_async_tx_descriptor *txd;
1595 1596
1596 if (atchan->status & AT_XDMAC_CIS_RBEIS) 1597 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1597 dev_err(chan2dev(&atchan->chan), "read bus error!!!"); 1598 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1598 if (atchan->status & AT_XDMAC_CIS_WBEIS) 1599 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1599 dev_err(chan2dev(&atchan->chan), "write bus error!!!"); 1600 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1600 if (atchan->status & AT_XDMAC_CIS_ROIS) 1601 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1601 dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); 1602 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1602 1603
1603 spin_lock(&atchan->lock); 1604 spin_lock(&atchan->lock);
@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1652 atchan = &atxdmac->chan[i]; 1653 atchan = &atxdmac->chan[i];
1653 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); 1654 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1654 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); 1655 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1655 atchan->status = chan_status & chan_imr; 1656 atchan->irq_status = chan_status & chan_imr;
1656 dev_vdbg(atxdmac->dma.dev, 1657 dev_vdbg(atxdmac->dma.dev,
1657 "%s: chan%d: imr=0x%x, status=0x%x\n", 1658 "%s: chan%d: imr=0x%x, status=0x%x\n",
1658 __func__, i, chan_imr, chan_status); 1659 __func__, i, chan_imr, chan_status);
@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1666 at_xdmac_chan_read(atchan, AT_XDMAC_CDA), 1667 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1667 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); 1668 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1668 1669
1669 if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) 1670 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1670 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); 1671 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1671 1672
1672 tasklet_schedule(&atchan->tasklet); 1673 tasklet_schedule(&atchan->tasklet);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 1a44c8086d77..ae10f5614f95 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
406 } 406 }
407} 407}
408 408
409static int bcm2835_dma_abort(void __iomem *chan_base) 409static int bcm2835_dma_abort(struct bcm2835_chan *c)
410{ 410{
411 unsigned long cs; 411 void __iomem *chan_base = c->chan_base;
412 long int timeout = 10000; 412 long int timeout = 10000;
413 413
414 cs = readl(chan_base + BCM2835_DMA_CS); 414 /*
415 if (!(cs & BCM2835_DMA_ACTIVE)) 415 * A zero control block address means the channel is idle.
416 * (The ACTIVE flag in the CS register is not a reliable indicator.)
417 */
418 if (!readl(chan_base + BCM2835_DMA_ADDR))
416 return 0; 419 return 0;
417 420
418 /* Write 0 to the active bit - Pause the DMA */ 421 /* Write 0 to the active bit - Pause the DMA */
419 writel(0, chan_base + BCM2835_DMA_CS); 422 writel(0, chan_base + BCM2835_DMA_CS);
420 423
421 /* Wait for any current AXI transfer to complete */ 424 /* Wait for any current AXI transfer to complete */
422 while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { 425 while ((readl(chan_base + BCM2835_DMA_CS) &
426 BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
423 cpu_relax(); 427 cpu_relax();
424 cs = readl(chan_base + BCM2835_DMA_CS);
425 }
426 428
427 /* We'll un-pause when we set of our next DMA */ 429 /* Peripheral might be stuck and fail to signal AXI write responses */
428 if (!timeout) 430 if (!timeout)
429 return -ETIMEDOUT; 431 dev_err(c->vc.chan.device->dev,
430 432 "failed to complete outstanding writes\n");
431 if (!(cs & BCM2835_DMA_ACTIVE))
432 return 0;
433
434 /* Terminate the control block chain */
435 writel(0, chan_base + BCM2835_DMA_NEXTCB);
436
437 /* Abort the whole DMA */
438 writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
439 chan_base + BCM2835_DMA_CS);
440 433
434 writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
441 return 0; 435 return 0;
442} 436}
443 437
@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
476 470
477 spin_lock_irqsave(&c->vc.lock, flags); 471 spin_lock_irqsave(&c->vc.lock, flags);
478 472
479 /* Acknowledge interrupt */ 473 /*
480 writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); 474 * Clear the INT flag to receive further interrupts. Keep the channel
475 * active in case the descriptor is cyclic or in case the client has
476 * already terminated the descriptor and issued a new one. (May happen
477 * if this IRQ handler is threaded.) If the channel is finished, it
478 * will remain idle despite the ACTIVE flag being set.
479 */
480 writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
481 c->chan_base + BCM2835_DMA_CS);
481 482
482 d = c->desc; 483 d = c->desc;
483 484
@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
485 if (d->cyclic) { 486 if (d->cyclic) {
486 /* call the cyclic callback */ 487 /* call the cyclic callback */
487 vchan_cyclic_callback(&d->vd); 488 vchan_cyclic_callback(&d->vd);
488 489 } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
489 /* Keep the DMA engine running */
490 writel(BCM2835_DMA_ACTIVE,
491 c->chan_base + BCM2835_DMA_CS);
492 } else {
493 vchan_cookie_complete(&c->desc->vd); 490 vchan_cookie_complete(&c->desc->vd);
494 bcm2835_dma_start_desc(c); 491 bcm2835_dma_start_desc(c);
495 } 492 }
@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
779 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 776 struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
780 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 777 struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
781 unsigned long flags; 778 unsigned long flags;
782 int timeout = 10000;
783 LIST_HEAD(head); 779 LIST_HEAD(head);
784 780
785 spin_lock_irqsave(&c->vc.lock, flags); 781 spin_lock_irqsave(&c->vc.lock, flags);
@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
789 list_del_init(&c->node); 785 list_del_init(&c->node);
790 spin_unlock(&d->lock); 786 spin_unlock(&d->lock);
791 787
792 /* 788 /* stop DMA activity */
793 * Stop DMA activity: we assume the callback will not be called
794 * after bcm_dma_abort() returns (even if it does, it will see
795 * c->desc is NULL and exit.)
796 */
797 if (c->desc) { 789 if (c->desc) {
798 vchan_terminate_vdesc(&c->desc->vd); 790 vchan_terminate_vdesc(&c->desc->vd);
799 c->desc = NULL; 791 c->desc = NULL;
800 bcm2835_dma_abort(c->chan_base); 792 bcm2835_dma_abort(c);
801
802 /* Wait for stopping */
803 while (--timeout) {
804 if (!(readl(c->chan_base + BCM2835_DMA_CS) &
805 BCM2835_DMA_ACTIVE))
806 break;
807
808 cpu_relax();
809 }
810
811 if (!timeout)
812 dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
813 } 793 }
814 794
815 vchan_get_all_descriptors(&c->vc, &head); 795 vchan_get_all_descriptors(&c->vc, &head);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 2eea4ef72915..6511928b4cdf 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -711,11 +711,9 @@ static int dmatest_func(void *data)
711 srcs[i] = um->addr[i] + src_off; 711 srcs[i] = um->addr[i] + src_off;
712 ret = dma_mapping_error(dev->dev, um->addr[i]); 712 ret = dma_mapping_error(dev->dev, um->addr[i]);
713 if (ret) { 713 if (ret) {
714 dmaengine_unmap_put(um);
715 result("src mapping error", total_tests, 714 result("src mapping error", total_tests,
716 src_off, dst_off, len, ret); 715 src_off, dst_off, len, ret);
717 failed_tests++; 716 goto error_unmap_continue;
718 continue;
719 } 717 }
720 um->to_cnt++; 718 um->to_cnt++;
721 } 719 }
@@ -730,11 +728,9 @@ static int dmatest_func(void *data)
730 DMA_BIDIRECTIONAL); 728 DMA_BIDIRECTIONAL);
731 ret = dma_mapping_error(dev->dev, dsts[i]); 729 ret = dma_mapping_error(dev->dev, dsts[i]);
732 if (ret) { 730 if (ret) {
733 dmaengine_unmap_put(um);
734 result("dst mapping error", total_tests, 731 result("dst mapping error", total_tests,
735 src_off, dst_off, len, ret); 732 src_off, dst_off, len, ret);
736 failed_tests++; 733 goto error_unmap_continue;
737 continue;
738 } 734 }
739 um->bidi_cnt++; 735 um->bidi_cnt++;
740 } 736 }
@@ -762,12 +758,10 @@ static int dmatest_func(void *data)
762 } 758 }
763 759
764 if (!tx) { 760 if (!tx) {
765 dmaengine_unmap_put(um);
766 result("prep error", total_tests, src_off, 761 result("prep error", total_tests, src_off,
767 dst_off, len, ret); 762 dst_off, len, ret);
768 msleep(100); 763 msleep(100);
769 failed_tests++; 764 goto error_unmap_continue;
770 continue;
771 } 765 }
772 766
773 done->done = false; 767 done->done = false;
@@ -776,12 +770,10 @@ static int dmatest_func(void *data)
776 cookie = tx->tx_submit(tx); 770 cookie = tx->tx_submit(tx);
777 771
778 if (dma_submit_error(cookie)) { 772 if (dma_submit_error(cookie)) {
779 dmaengine_unmap_put(um);
780 result("submit error", total_tests, src_off, 773 result("submit error", total_tests, src_off,
781 dst_off, len, ret); 774 dst_off, len, ret);
782 msleep(100); 775 msleep(100);
783 failed_tests++; 776 goto error_unmap_continue;
784 continue;
785 } 777 }
786 dma_async_issue_pending(chan); 778 dma_async_issue_pending(chan);
787 779
@@ -790,22 +782,20 @@ static int dmatest_func(void *data)
790 782
791 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 783 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
792 784
793 dmaengine_unmap_put(um);
794
795 if (!done->done) { 785 if (!done->done) {
796 result("test timed out", total_tests, src_off, dst_off, 786 result("test timed out", total_tests, src_off, dst_off,
797 len, 0); 787 len, 0);
798 failed_tests++; 788 goto error_unmap_continue;
799 continue;
800 } else if (status != DMA_COMPLETE) { 789 } else if (status != DMA_COMPLETE) {
801 result(status == DMA_ERROR ? 790 result(status == DMA_ERROR ?
802 "completion error status" : 791 "completion error status" :
803 "completion busy status", total_tests, src_off, 792 "completion busy status", total_tests, src_off,
804 dst_off, len, ret); 793 dst_off, len, ret);
805 failed_tests++; 794 goto error_unmap_continue;
806 continue;
807 } 795 }
808 796
797 dmaengine_unmap_put(um);
798
809 if (params->noverify) { 799 if (params->noverify) {
810 verbose_result("test passed", total_tests, src_off, 800 verbose_result("test passed", total_tests, src_off,
811 dst_off, len, 0); 801 dst_off, len, 0);
@@ -846,6 +836,12 @@ static int dmatest_func(void *data)
846 verbose_result("test passed", total_tests, src_off, 836 verbose_result("test passed", total_tests, src_off,
847 dst_off, len, 0); 837 dst_off, len, 0);
848 } 838 }
839
840 continue;
841
842error_unmap_continue:
843 dmaengine_unmap_put(um);
844 failed_tests++;
849 } 845 }
850 ktime = ktime_sub(ktime_get(), ktime); 846 ktime = ktime_sub(ktime_get(), ktime);
851 ktime = ktime_sub(ktime, comparetime); 847 ktime = ktime_sub(ktime, comparetime);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index c2fff3f6c9ca..4a09af3cd546 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data)
618{ 618{
619 struct imxdma_channel *imxdmac = (void *)data; 619 struct imxdma_channel *imxdmac = (void *)data;
620 struct imxdma_engine *imxdma = imxdmac->imxdma; 620 struct imxdma_engine *imxdma = imxdmac->imxdma;
621 struct imxdma_desc *desc; 621 struct imxdma_desc *desc, *next_desc;
622 unsigned long flags; 622 unsigned long flags;
623 623
624 spin_lock_irqsave(&imxdma->lock, flags); 624 spin_lock_irqsave(&imxdma->lock, flags);
@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data)
648 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); 648 list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
649 649
650 if (!list_empty(&imxdmac->ld_queue)) { 650 if (!list_empty(&imxdmac->ld_queue)) {
651 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, 651 next_desc = list_first_entry(&imxdmac->ld_queue,
652 node); 652 struct imxdma_desc, node);
653 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); 653 list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
654 if (imxdma_xfer_desc(desc) < 0) 654 if (imxdma_xfer_desc(next_desc) < 0)
655 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", 655 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
656 __func__, imxdmac->channel); 656 __func__, imxdmac->channel);
657 } 657 }
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index a2b0a0e71168..86708fb9bda1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1182,8 +1182,8 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
1182{ 1182{
1183 int ret = -EBUSY; 1183 int ret = -EBUSY;
1184 1184
1185 sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1185 sdma->bd0 = dma_alloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
1186 GFP_NOWAIT); 1186 GFP_NOWAIT);
1187 if (!sdma->bd0) { 1187 if (!sdma->bd0) {
1188 ret = -ENOMEM; 1188 ret = -ENOMEM;
1189 goto out; 1189 goto out;
@@ -1205,8 +1205,8 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
1205 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); 1205 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1206 int ret = 0; 1206 int ret = 0;
1207 1207
1208 desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys, 1208 desc->bd = dma_alloc_coherent(NULL, bd_size, &desc->bd_phys,
1209 GFP_NOWAIT); 1209 GFP_NOWAIT);
1210 if (!desc->bd) { 1210 if (!desc->bd) {
1211 ret = -ENOMEM; 1211 ret = -ENOMEM;
1212 goto out; 1212 goto out;
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index b7ec56ae02a6..1a2028e1c29e 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -325,8 +325,8 @@ static int mtk_hsdma_alloc_pchan(struct mtk_hsdma_device *hsdma,
325 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring. 325 * and [MTK_DMA_SIZE ... 2 * MTK_DMA_SIZE - 1] is for RX ring.
326 */ 326 */
327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd); 327 pc->sz_ring = 2 * MTK_DMA_SIZE * sizeof(*ring->txd);
328 ring->txd = dma_zalloc_coherent(hsdma2dev(hsdma), pc->sz_ring, 328 ring->txd = dma_alloc_coherent(hsdma2dev(hsdma), pc->sz_ring,
329 &ring->tphys, GFP_NOWAIT); 329 &ring->tphys, GFP_NOWAIT);
330 if (!ring->txd) 330 if (!ring->txd)
331 return -ENOMEM; 331 return -ENOMEM;
332 332
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 35193b31a9e0..22cc7f68ef6e 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -416,9 +416,9 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
416 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 416 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
417 int ret; 417 int ret;
418 418
419 mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, 419 mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
420 CCW_BLOCK_SIZE, 420 CCW_BLOCK_SIZE,
421 &mxs_chan->ccw_phys, GFP_KERNEL); 421 &mxs_chan->ccw_phys, GFP_KERNEL);
422 if (!mxs_chan->ccw) { 422 if (!mxs_chan->ccw) {
423 ret = -ENOMEM; 423 ret = -ENOMEM;
424 goto err_alloc; 424 goto err_alloc;
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 1d5988849aa6..eafd6c4b90fe 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -1208,8 +1208,8 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1208 ring->size = ret; 1208 ring->size = ret;
1209 1209
1210 /* Allocate memory for DMA ring descriptor */ 1210 /* Allocate memory for DMA ring descriptor */
1211 ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, 1211 ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
1212 &ring->desc_paddr, GFP_KERNEL); 1212 &ring->desc_paddr, GFP_KERNEL);
1213 if (!ring->desc_vaddr) { 1213 if (!ring->desc_vaddr) {
1214 chan_err(chan, "Failed to allocate ring desc\n"); 1214 chan_err(chan, "Failed to allocate ring desc\n");
1215 return -ENOMEM; 1215 return -ENOMEM;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 02880963092f..cb20b411493e 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -879,10 +879,9 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
879 */ 879 */
880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
881 /* Allocate the buffer descriptors. */ 881 /* Allocate the buffer descriptors. */
882 chan->seg_v = dma_zalloc_coherent(chan->dev, 882 chan->seg_v = dma_alloc_coherent(chan->dev,
883 sizeof(*chan->seg_v) * 883 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
884 XILINX_DMA_NUM_DESCS, 884 &chan->seg_p, GFP_KERNEL);
885 &chan->seg_p, GFP_KERNEL);
886 if (!chan->seg_v) { 885 if (!chan->seg_v) {
887 dev_err(chan->dev, 886 dev_err(chan->dev,
888 "unable to allocate channel %d descriptors\n", 887 "unable to allocate channel %d descriptors\n",
@@ -895,9 +894,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
895 * so allocating a desc segment during channel allocation for 894 * so allocating a desc segment during channel allocation for
896 * programming tail descriptor. 895 * programming tail descriptor.
897 */ 896 */
898 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, 897 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
899 sizeof(*chan->cyclic_seg_v), 898 sizeof(*chan->cyclic_seg_v),
900 &chan->cyclic_seg_p, GFP_KERNEL); 899 &chan->cyclic_seg_p,
900 GFP_KERNEL);
901 if (!chan->cyclic_seg_v) { 901 if (!chan->cyclic_seg_v) {
902 dev_err(chan->dev, 902 dev_err(chan->dev,
903 "unable to allocate desc segment for cyclic DMA\n"); 903 "unable to allocate desc segment for cyclic DMA\n");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 8db51750ce93..4478787a247f 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -490,9 +490,9 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
490 list_add_tail(&desc->node, &chan->free_list); 490 list_add_tail(&desc->node, &chan->free_list);
491 } 491 }
492 492
493 chan->desc_pool_v = dma_zalloc_coherent(chan->dev, 493 chan->desc_pool_v = dma_alloc_coherent(chan->dev,
494 (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), 494 (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
495 &chan->desc_pool_p, GFP_KERNEL); 495 &chan->desc_pool_p, GFP_KERNEL);
496 if (!chan->desc_pool_v) 496 if (!chan->desc_pool_v)
497 return -ENOMEM; 497 return -ENOMEM;
498 498
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 4213cb0bb2a7..f8664bac9fa8 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -295,8 +295,8 @@ struct altr_sdram_mc_data {
295#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 295#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
296 296
297/* Sticky registers for Uncorrected Errors */ 297/* Sticky registers for Uncorrected Errors */
298#define S10_SYSMGR_UE_VAL_OFST 0x120 298#define S10_SYSMGR_UE_VAL_OFST 0x220
299#define S10_SYSMGR_UE_ADDR_OFST 0x124 299#define S10_SYSMGR_UE_ADDR_OFST 0x224
300 300
301#define S10_DDR0_IRQ_MASK BIT(16) 301#define S10_DDR0_IRQ_MASK BIT(16)
302 302
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 09b845e90114..a785ffd5af89 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1144,10 +1144,6 @@ static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
1144 if (device->is_local) 1144 if (device->is_local)
1145 return -ENODEV; 1145 return -ENODEV;
1146 1146
1147 if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE)
1148 WARN_ON(dma_set_max_seg_size(device->card->device,
1149 SBP2_MAX_SEG_SIZE));
1150
1151 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); 1147 shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
1152 if (shost == NULL) 1148 if (shost == NULL)
1153 return -ENOMEM; 1149 return -ENOMEM;
@@ -1610,6 +1606,7 @@ static struct scsi_host_template scsi_driver_template = {
1610 .eh_abort_handler = sbp2_scsi_abort, 1606 .eh_abort_handler = sbp2_scsi_abort,
1611 .this_id = -1, 1607 .this_id = -1,
1612 .sg_tablesize = SG_ALL, 1608 .sg_tablesize = SG_ALL,
1609 .max_segment_size = SBP2_MAX_SEG_SIZE,
1613 .can_queue = 1, 1610 .can_queue = 1,
1614 .sdev_attrs = sbp2_scsi_sysfs_attrs, 1611 .sdev_attrs = sbp2_scsi_sysfs_attrs,
1615}; 1612};
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 472c88ae1c0f..92f843eaf1e0 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
119} 119}
120EXPORT_SYMBOL_GPL(scmi_driver_unregister); 120EXPORT_SYMBOL_GPL(scmi_driver_unregister);
121 121
122static void scmi_device_release(struct device *dev)
123{
124 kfree(to_scmi_dev(dev));
125}
126
122struct scmi_device * 127struct scmi_device *
123scmi_device_create(struct device_node *np, struct device *parent, int protocol) 128scmi_device_create(struct device_node *np, struct device *parent, int protocol)
124{ 129{
@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
138 scmi_dev->dev.parent = parent; 143 scmi_dev->dev.parent = parent;
139 scmi_dev->dev.of_node = np; 144 scmi_dev->dev.of_node = np;
140 scmi_dev->dev.bus = &scmi_bus_type; 145 scmi_dev->dev.bus = &scmi_bus_type;
146 scmi_dev->dev.release = scmi_device_release;
141 dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); 147 dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
142 148
143 retval = device_register(&scmi_dev->dev); 149 retval = device_register(&scmi_dev->dev);
@@ -156,9 +162,8 @@ free_mem:
156void scmi_device_destroy(struct scmi_device *scmi_dev) 162void scmi_device_destroy(struct scmi_device *scmi_dev)
157{ 163{
158 scmi_handle_put(scmi_dev->handle); 164 scmi_handle_put(scmi_dev->handle);
159 device_unregister(&scmi_dev->dev);
160 ida_simple_remove(&scmi_bus_id, scmi_dev->id); 165 ida_simple_remove(&scmi_bus_id, scmi_dev->id);
161 kfree(scmi_dev); 166 device_unregister(&scmi_dev->dev);
162} 167}
163 168
164void scmi_set_handle(struct scmi_device *scmi_dev) 169void scmi_set_handle(struct scmi_device *scmi_dev)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 23ea1ed409d1..352bd2473162 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -37,8 +37,9 @@ extern u64 efi_system_table;
37static struct ptdump_info efi_ptdump_info = { 37static struct ptdump_info efi_ptdump_info = {
38 .mm = &efi_mm, 38 .mm = &efi_mm,
39 .markers = (struct addr_marker[]){ 39 .markers = (struct addr_marker[]){
40 { 0, "UEFI runtime start" }, 40 { 0, "UEFI runtime start" },
41 { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } 41 { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" },
42 { -1, NULL }
42 }, 43 },
43 .base_addr = 0, 44 .base_addr = 0,
44}; 45};
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 4c46ff6f2242..55b77c576c42 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
592 592
593 early_memunmap(tbl, sizeof(*tbl)); 593 early_memunmap(tbl, sizeof(*tbl));
594 } 594 }
595 return 0;
596}
597 595
598int __init efi_apply_persistent_mem_reservations(void)
599{
600 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { 596 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
601 unsigned long prsv = efi.mem_reserve; 597 unsigned long prsv = efi.mem_reserve;
602 598
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index eee42d5e25ee..c037c6c5d0b7 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
75 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; 75 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
76 efi_status_t status; 76 efi_status_t status;
77 77
78 if (IS_ENABLED(CONFIG_ARM))
79 return;
80
81 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), 78 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
82 (void **)&rsv); 79 (void **)&rsv);
83 if (status != EFI_SUCCESS) { 80 if (status != EFI_SUCCESS) {
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 8903b9ccfc2b..e2abfdb5cee6 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
147static DEFINE_SEMAPHORE(efi_runtime_lock); 147static DEFINE_SEMAPHORE(efi_runtime_lock);
148 148
149/* 149/*
150 * Expose the EFI runtime lock to the UV platform
151 */
152#ifdef CONFIG_X86_UV
153extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
154#endif
155
156/*
150 * Calls the appropriate efi_runtime_service() with the appropriate 157 * Calls the appropriate efi_runtime_service() with the appropriate
151 * arguments. 158 * arguments.
152 * 159 *
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index a1a09e04fab8..13851b3d1c56 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -508,14 +508,11 @@ static int __init s10_init(void)
508 return -ENODEV; 508 return -ENODEV;
509 509
510 np = of_find_matching_node(fw_np, s10_of_match); 510 np = of_find_matching_node(fw_np, s10_of_match);
511 if (!np) { 511 if (!np)
512 of_node_put(fw_np);
513 return -ENODEV; 512 return -ENODEV;
514 }
515 513
516 of_node_put(np); 514 of_node_put(np);
517 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); 515 ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
518 of_node_put(fw_np);
519 if (ret) 516 if (ret)
520 return ret; 517 return ret;
521 518
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c
index 6b11f1314248..7f9e0304b510 100644
--- a/drivers/gpio/gpio-altera-a10sr.c
+++ b/drivers/gpio/gpio-altera-a10sr.c
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc,
66static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, 66static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc,
67 unsigned int nr, int value) 67 unsigned int nr, int value)
68{ 68{
69 if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) 69 if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) {
70 altr_a10sr_gpio_set(gc, nr, value);
70 return 0; 71 return 0;
72 }
71 return -EINVAL; 73 return -EINVAL;
72} 74}
73 75
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
index e0d6a0a7bc69..e41223c05f6e 100644
--- a/drivers/gpio/gpio-eic-sprd.c
+++ b/drivers/gpio/gpio-eic-sprd.c
@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset)
180 180
181static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) 181static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset)
182{ 182{
183 return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); 183 struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
184
185 switch (sprd_eic->type) {
186 case SPRD_EIC_DEBOUNCE:
187 return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA);
188 case SPRD_EIC_ASYNC:
189 return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA);
190 case SPRD_EIC_SYNC:
191 return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA);
192 default:
193 return -ENOTSUPP;
194 }
184} 195}
185 196
186static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) 197static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
368 irq_set_handler_locked(data, handle_edge_irq); 379 irq_set_handler_locked(data, handle_edge_irq);
369 break; 380 break;
370 case IRQ_TYPE_EDGE_BOTH: 381 case IRQ_TYPE_EDGE_BOTH:
382 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
371 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); 383 sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
372 irq_set_handler_locked(data, handle_edge_irq); 384 irq_set_handler_locked(data, handle_edge_irq);
373 break; 385 break;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 83617fdc661d..0dc96419efe3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -289,7 +289,7 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
289 return pca953x_check_register(chip, reg, bank); 289 return pca953x_check_register(chip, reg, bank);
290} 290}
291 291
292const struct regmap_config pca953x_i2c_regmap = { 292static const struct regmap_config pca953x_i2c_regmap = {
293 .reg_bits = 8, 293 .reg_bits = 8,
294 .val_bits = 8, 294 .val_bits = 8,
295 295
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index adf72dda25a2..68a35b65925a 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table);
84 */ 84 */
85struct pcf857x { 85struct pcf857x {
86 struct gpio_chip chip; 86 struct gpio_chip chip;
87 struct irq_chip irqchip;
87 struct i2c_client *client; 88 struct i2c_client *client;
88 struct mutex lock; /* protect 'out' */ 89 struct mutex lock; /* protect 'out' */
89 unsigned out; /* software latch */ 90 unsigned out; /* software latch */
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data)
252 mutex_unlock(&gpio->lock); 253 mutex_unlock(&gpio->lock);
253} 254}
254 255
255static struct irq_chip pcf857x_irq_chip = {
256 .name = "pcf857x",
257 .irq_enable = pcf857x_irq_enable,
258 .irq_disable = pcf857x_irq_disable,
259 .irq_ack = noop,
260 .irq_mask = noop,
261 .irq_unmask = noop,
262 .irq_set_wake = pcf857x_irq_set_wake,
263 .irq_bus_lock = pcf857x_irq_bus_lock,
264 .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
265};
266
267/*-------------------------------------------------------------------------*/ 256/*-------------------------------------------------------------------------*/
268 257
269static int pcf857x_probe(struct i2c_client *client, 258static int pcf857x_probe(struct i2c_client *client,
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client,
376 365
377 /* Enable irqchip if we have an interrupt */ 366 /* Enable irqchip if we have an interrupt */
378 if (client->irq) { 367 if (client->irq) {
368 gpio->irqchip.name = "pcf857x",
369 gpio->irqchip.irq_enable = pcf857x_irq_enable,
370 gpio->irqchip.irq_disable = pcf857x_irq_disable,
371 gpio->irqchip.irq_ack = noop,
372 gpio->irqchip.irq_mask = noop,
373 gpio->irqchip.irq_unmask = noop,
374 gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake,
375 gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock,
376 gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock,
379 status = gpiochip_irqchip_add_nested(&gpio->chip, 377 status = gpiochip_irqchip_add_nested(&gpio->chip,
380 &pcf857x_irq_chip, 378 &gpio->irqchip,
381 0, handle_level_irq, 379 0, handle_level_irq,
382 IRQ_TYPE_NONE); 380 IRQ_TYPE_NONE);
383 if (status) { 381 if (status) {
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client,
392 if (status) 390 if (status)
393 goto fail; 391 goto fail;
394 392
395 gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, 393 gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip,
396 client->irq); 394 client->irq);
397 gpio->irq_parent = client->irq; 395 gpio->irq_parent = client->irq;
398 } 396 }
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 1b79ebcfce3e..541fa6ac399d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
253 struct vf610_gpio_port *port; 253 struct vf610_gpio_port *port;
254 struct resource *iores; 254 struct resource *iores;
255 struct gpio_chip *gc; 255 struct gpio_chip *gc;
256 int i;
256 int ret; 257 int ret;
257 258
258 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); 259 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
319 if (ret < 0) 320 if (ret < 0)
320 return ret; 321 return ret;
321 322
323 /* Mask all GPIO interrupts */
324 for (i = 0; i < gc->ngpio; i++)
325 vf610_gpio_writel(0, port->base + PORT_PCR(i));
326
322 /* Clear the interrupt status register for all GPIO's */ 327 /* Clear the interrupt status register for all GPIO's */
323 vf610_gpio_writel(~0, port->base + PORT_ISFR); 328 vf610_gpio_writel(~0, port->base + PORT_ISFR);
324 329
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 48534bda73d3..259cf6ab969b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -357,8 +357,6 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
357 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock); 357 mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
358 358
359 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) { 359 list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
360 struct gpio_desc *desc;
361
362 if (event->irq_requested) { 360 if (event->irq_requested) {
363 if (event->irq_is_wake) 361 if (event->irq_is_wake)
364 disable_irq_wake(event->irq); 362 disable_irq_wake(event->irq);
@@ -366,11 +364,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
366 free_irq(event->irq, event); 364 free_irq(event->irq, event);
367 } 365 }
368 366
369 desc = event->desc;
370 if (WARN_ON(IS_ERR(desc)))
371 continue;
372 gpiochip_unlock_as_irq(chip, event->pin); 367 gpiochip_unlock_as_irq(chip, event->pin);
373 gpiochip_free_own_desc(desc); 368 gpiochip_free_own_desc(event->desc);
374 list_del(&event->node); 369 list_del(&event->node);
375 kfree(event); 370 kfree(event);
376 } 371 }
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 1651d7f0a303..d1adfdf50fb3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
828 /* Do not leak kernel stack to userspace */ 828 /* Do not leak kernel stack to userspace */
829 memset(&ge, 0, sizeof(ge)); 829 memset(&ge, 0, sizeof(ge));
830 830
831 ge.timestamp = le->timestamp; 831 /*
832 * We may be running from a nested threaded interrupt in which case
833 * we didn't get the timestamp from lineevent_irq_handler().
834 */
835 if (!le->timestamp)
836 ge.timestamp = ktime_get_real_ns();
837 else
838 ge.timestamp = le->timestamp;
832 839
833 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 840 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
834 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 841 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index a028661d9e20..92b11de19581 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, 578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
579 { 0, 0, 0, 0, 0 }, 580 { 0, 0, 0, 0, 0 },
580}; 581};
581 582
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8a078f4ae73d..7ff3a28fc903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1701,8 +1701,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1701 amdgpu_xgmi_add_device(adev); 1701 amdgpu_xgmi_add_device(adev);
1702 amdgpu_amdkfd_device_init(adev); 1702 amdgpu_amdkfd_device_init(adev);
1703 1703
1704 if (amdgpu_sriov_vf(adev)) 1704 if (amdgpu_sriov_vf(adev)) {
1705 amdgpu_virt_init_data_exchange(adev);
1705 amdgpu_virt_release_full_gpu(adev, true); 1706 amdgpu_virt_release_full_gpu(adev, true);
1707 }
1706 1708
1707 return 0; 1709 return 0;
1708} 1710}
@@ -2632,9 +2634,6 @@ fence_driver_init:
2632 goto failed; 2634 goto failed;
2633 } 2635 }
2634 2636
2635 if (amdgpu_sriov_vf(adev))
2636 amdgpu_virt_init_data_exchange(adev);
2637
2638 amdgpu_fbdev_init(adev); 2637 amdgpu_fbdev_init(adev);
2639 2638
2640 r = amdgpu_pm_sysfs_init(adev); 2639 r = amdgpu_pm_sysfs_init(adev);
@@ -2798,7 +2797,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2798 struct drm_framebuffer *fb = crtc->primary->fb; 2797 struct drm_framebuffer *fb = crtc->primary->fb;
2799 struct amdgpu_bo *robj; 2798 struct amdgpu_bo *robj;
2800 2799
2801 if (amdgpu_crtc->cursor_bo) { 2800 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
2802 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2801 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2803 r = amdgpu_bo_reserve(aobj, true); 2802 r = amdgpu_bo_reserve(aobj, true);
2804 if (r == 0) { 2803 if (r == 0) {
@@ -2906,7 +2905,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2906 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 2905 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2907 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2906 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2908 2907
2909 if (amdgpu_crtc->cursor_bo) { 2908 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
2910 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2909 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2911 r = amdgpu_bo_reserve(aobj, true); 2910 r = amdgpu_bo_reserve(aobj, true);
2912 if (r == 0) { 2911 if (r == 0) {
@@ -3226,6 +3225,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3226 r = amdgpu_ib_ring_tests(adev); 3225 r = amdgpu_ib_ring_tests(adev);
3227 3226
3228error: 3227error:
3228 amdgpu_virt_init_data_exchange(adev);
3229 amdgpu_virt_release_full_gpu(adev, true); 3229 amdgpu_virt_release_full_gpu(adev, true);
3230 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { 3230 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
3231 atomic_inc(&adev->vram_lost_counter); 3231 atomic_inc(&adev->vram_lost_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 15ce7e681d67..b083b219b1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
188 goto cleanup; 188 goto cleanup;
189 } 189 }
190 190
191 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); 191 if (!adev->enable_virtual_display) {
192 if (unlikely(r != 0)) { 192 r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
193 DRM_ERROR("failed to pin new abo buffer before flip\n"); 193 if (unlikely(r != 0)) {
194 goto unreserve; 194 DRM_ERROR("failed to pin new abo buffer before flip\n");
195 goto unreserve;
196 }
195 } 197 }
196 198
197 r = amdgpu_ttm_alloc_gart(&new_abo->tbo); 199 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
@@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
211 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); 213 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
212 amdgpu_bo_unreserve(new_abo); 214 amdgpu_bo_unreserve(new_abo);
213 215
214 work->base = amdgpu_bo_gpu_offset(new_abo); 216 if (!adev->enable_virtual_display)
217 work->base = amdgpu_bo_gpu_offset(new_abo);
215 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 218 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
216 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 219 amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
217 220
@@ -242,9 +245,10 @@ pflip_cleanup:
242 goto cleanup; 245 goto cleanup;
243 } 246 }
244unpin: 247unpin:
245 if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { 248 if (!adev->enable_virtual_display)
246 DRM_ERROR("failed to unpin new abo in error path\n"); 249 if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
247 } 250 DRM_ERROR("failed to unpin new abo in error path\n");
251
248unreserve: 252unreserve:
249 amdgpu_bo_unreserve(new_abo); 253 amdgpu_bo_unreserve(new_abo);
250 254
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 1f61ed95727c..0ed41a9d2d77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1686 effective_mode &= ~S_IWUSR; 1686 effective_mode &= ~S_IWUSR;
1687 1687
1688 if ((adev->flags & AMD_IS_APU) && 1688 if ((adev->flags & AMD_IS_APU) &&
1689 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 1689 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
1690 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1690 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 1691 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1691 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 1692 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1692 return 0; 1693 return 0;
@@ -2008,6 +2009,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2008 2009
2009int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 2010int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2010{ 2011{
2012 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2011 int ret; 2013 int ret;
2012 2014
2013 if (adev->pm.sysfs_initialized) 2015 if (adev->pm.sysfs_initialized)
@@ -2091,12 +2093,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2091 "pp_power_profile_mode\n"); 2093 "pp_power_profile_mode\n");
2092 return ret; 2094 return ret;
2093 } 2095 }
2094 ret = device_create_file(adev->dev, 2096 if (hwmgr->od_enabled) {
2095 &dev_attr_pp_od_clk_voltage); 2097 ret = device_create_file(adev->dev,
2096 if (ret) { 2098 &dev_attr_pp_od_clk_voltage);
2097 DRM_ERROR("failed to create device file " 2099 if (ret) {
2098 "pp_od_clk_voltage\n"); 2100 DRM_ERROR("failed to create device file "
2099 return ret; 2101 "pp_od_clk_voltage\n");
2102 return ret;
2103 }
2100 } 2104 }
2101 ret = device_create_file(adev->dev, 2105 ret = device_create_file(adev->dev,
2102 &dev_attr_gpu_busy_percent); 2106 &dev_attr_gpu_busy_percent);
@@ -2118,6 +2122,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2118 2122
2119void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 2123void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2120{ 2124{
2125 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2126
2121 if (adev->pm.dpm_enabled == 0) 2127 if (adev->pm.dpm_enabled == 0)
2122 return; 2128 return;
2123 2129
@@ -2138,8 +2144,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2138 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 2144 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
2139 device_remove_file(adev->dev, 2145 device_remove_file(adev->dev,
2140 &dev_attr_pp_power_profile_mode); 2146 &dev_attr_pp_power_profile_mode);
2141 device_remove_file(adev->dev, 2147 if (hwmgr->od_enabled)
2142 &dev_attr_pp_od_clk_voltage); 2148 device_remove_file(adev->dev,
2149 &dev_attr_pp_od_clk_voltage);
2143 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 2150 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
2144} 2151}
2145 2152
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 71913a18d142..a38e0fb4a6fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -38,6 +38,7 @@
38#include "amdgpu_gem.h" 38#include "amdgpu_gem.h"
39#include <drm/amdgpu_drm.h> 39#include <drm/amdgpu_drm.h>
40#include <linux/dma-buf.h> 40#include <linux/dma-buf.h>
41#include <linux/dma-fence-array.h>
41 42
42/** 43/**
43 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table 44 * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
@@ -187,6 +188,48 @@ error:
187 return ERR_PTR(ret); 188 return ERR_PTR(ret);
188} 189}
189 190
191static int
192__reservation_object_make_exclusive(struct reservation_object *obj)
193{
194 struct dma_fence **fences;
195 unsigned int count;
196 int r;
197
198 if (!reservation_object_get_list(obj)) /* no shared fences to convert */
199 return 0;
200
201 r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
202 if (r)
203 return r;
204
205 if (count == 0) {
206 /* Now that was unexpected. */
207 } else if (count == 1) {
208 reservation_object_add_excl_fence(obj, fences[0]);
209 dma_fence_put(fences[0]);
210 kfree(fences);
211 } else {
212 struct dma_fence_array *array;
213
214 array = dma_fence_array_create(count, fences,
215 dma_fence_context_alloc(1), 0,
216 false);
217 if (!array)
218 goto err_fences_put;
219
220 reservation_object_add_excl_fence(obj, &array->base);
221 dma_fence_put(&array->base);
222 }
223
224 return 0;
225
226err_fences_put:
227 while (count--)
228 dma_fence_put(fences[count]);
229 kfree(fences);
230 return -ENOMEM;
231}
232
190/** 233/**
191 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation 234 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
192 * @dma_buf: Shared DMA buffer 235 * @dma_buf: Shared DMA buffer
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
218 261
219 if (attach->dev->driver != adev->dev->driver) { 262 if (attach->dev->driver != adev->dev->driver) {
220 /* 263 /*
221 * Wait for all shared fences to complete before we switch to future 264 * We only create shared fences for internal use, but importers
222 * use of exclusive fence on this prime shared bo. 265 * of the dmabuf rely on exclusive fences for implicitly
266 * tracking write hazards. As any of the current fences may
267 * correspond to a write, we need to convert all existing
268 * fences on the reservation object into a single exclusive
269 * fence.
223 */ 270 */
224 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 271 r = __reservation_object_make_exclusive(bo->tbo.resv);
225 true, false, 272 if (r)
226 MAX_SCHEDULE_TIMEOUT);
227 if (unlikely(r < 0)) {
228 DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
229 goto error_unreserve; 273 goto error_unreserve;
230 }
231 } 274 }
232 275
233 /* pin buffer into GTT */ 276 /* pin buffer into GTT */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8fab0d637ee5..3a9b48b227ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle)
90 adev->psp.sos_fw = NULL; 90 adev->psp.sos_fw = NULL;
91 release_firmware(adev->psp.asd_fw); 91 release_firmware(adev->psp.asd_fw);
92 adev->psp.asd_fw = NULL; 92 adev->psp.asd_fw = NULL;
93 release_firmware(adev->psp.ta_fw); 93 if (adev->psp.ta_fw) {
94 adev->psp.ta_fw = NULL; 94 release_firmware(adev->psp.ta_fw);
95 adev->psp.ta_fw = NULL;
96 }
95 return 0; 97 return 0;
96} 98}
97 99
@@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
435 struct ta_xgmi_shared_memory *xgmi_cmd; 437 struct ta_xgmi_shared_memory *xgmi_cmd;
436 int ret; 438 int ret;
437 439
440 if (!psp->adev->psp.ta_fw)
441 return -ENOENT;
442
438 if (!psp->xgmi_context.initialized) { 443 if (!psp->xgmi_context.initialized) {
439 ret = psp_xgmi_init_shared_buf(psp); 444 ret = psp_xgmi_init_shared_buf(psp);
440 if (ret) 445 if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e73d152659a2..7c108e687683 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -847,9 +847,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
847 bp->size = amdgpu_vm_bo_size(adev, level); 847 bp->size = amdgpu_vm_bo_size(adev, level);
848 bp->byte_align = AMDGPU_GPU_PAGE_SIZE; 848 bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
849 bp->domain = AMDGPU_GEM_DOMAIN_VRAM; 849 bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
850 if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
851 adev->flags & AMD_IS_APU)
852 bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
853 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); 850 bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
854 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | 851 bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
855 AMDGPU_GEM_CREATE_CPU_GTT_USWC; 852 AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -3366,14 +3363,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
3366 struct amdgpu_task_info *task_info) 3363 struct amdgpu_task_info *task_info)
3367{ 3364{
3368 struct amdgpu_vm *vm; 3365 struct amdgpu_vm *vm;
3366 unsigned long flags;
3369 3367
3370 spin_lock(&adev->vm_manager.pasid_lock); 3368 spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
3371 3369
3372 vm = idr_find(&adev->vm_manager.pasid_idr, pasid); 3370 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
3373 if (vm) 3371 if (vm)
3374 *task_info = vm->task_info; 3372 *task_info = vm->task_info;
3375 3373
3376 spin_unlock(&adev->vm_manager.pasid_lock); 3374 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
3377} 3375}
3378 3376
3379/** 3377/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index fdace004544d..e4cc1d48eaab 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 167 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
168 168
169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 169 dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
170 if (crtc->primary->fb) {
171 int r;
172 struct amdgpu_bo *abo;
173
174 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
175 r = amdgpu_bo_reserve(abo, true);
176 if (unlikely(r))
177 DRM_ERROR("failed to reserve abo before unpin\n");
178 else {
179 amdgpu_bo_unpin(abo);
180 amdgpu_bo_unreserve(abo);
181 }
182 }
183 170
184 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 171 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
185 amdgpu_crtc->encoder = NULL; 172 amdgpu_crtc->encoder = NULL;
@@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
692 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 679 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
693 680
694 drm_crtc_vblank_put(&amdgpu_crtc->base); 681 drm_crtc_vblank_put(&amdgpu_crtc->base);
695 schedule_work(&works->unpin_work); 682 amdgpu_bo_unref(&works->old_abo);
683 kfree(works->shared);
684 kfree(works);
696 685
697 return 0; 686 return 0;
698} 687}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 381f593b0cda..57cb3a51bda7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4233,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4233 u32 tmp; 4233 u32 tmp;
4234 u32 rb_bufsz; 4234 u32 rb_bufsz;
4235 u64 rb_addr, rptr_addr, wptr_gpu_addr; 4235 u64 rb_addr, rptr_addr, wptr_gpu_addr;
4236 int r;
4237 4236
4238 /* Set the write pointer delay */ 4237 /* Set the write pointer delay */
4239 WREG32(mmCP_RB_WPTR_DELAY, 0); 4238 WREG32(mmCP_RB_WPTR_DELAY, 0);
@@ -4278,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4278 amdgpu_ring_clear_ring(ring); 4277 amdgpu_ring_clear_ring(ring);
4279 gfx_v8_0_cp_gfx_start(adev); 4278 gfx_v8_0_cp_gfx_start(adev);
4280 ring->sched.ready = true; 4279 ring->sched.ready = true;
4281 r = amdgpu_ring_test_helper(ring);
4282 4280
4283 return r; 4281 return 0;
4284} 4282}
4285 4283
4286static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) 4284static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4369,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4369 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 4367 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4370 } 4368 }
4371 4369
4372 r = amdgpu_ring_test_helper(kiq_ring); 4370 amdgpu_ring_commit(kiq_ring);
4373 if (r) 4371
4374 DRM_ERROR("KCQ enable failed\n"); 4372 return 0;
4375 return r;
4376} 4373}
4377 4374
4378static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req) 4375static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
@@ -4709,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4709 if (r) 4706 if (r)
4710 goto done; 4707 goto done;
4711 4708
4712 /* Test KCQs - reversing the order of rings seems to fix ring test failure 4709done:
4713 * after GPU reset 4710 return r;
4714 */ 4711}
4715 for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { 4712
4713static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4714{
4715 int r, i;
4716 struct amdgpu_ring *ring;
4717
4718 /* collect all the ring_tests here, gfx, kiq, compute */
4719 ring = &adev->gfx.gfx_ring[0];
4720 r = amdgpu_ring_test_helper(ring);
4721 if (r)
4722 return r;
4723
4724 ring = &adev->gfx.kiq.ring;
4725 r = amdgpu_ring_test_helper(ring);
4726 if (r)
4727 return r;
4728
4729 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4716 ring = &adev->gfx.compute_ring[i]; 4730 ring = &adev->gfx.compute_ring[i];
4717 r = amdgpu_ring_test_helper(ring); 4731 amdgpu_ring_test_helper(ring);
4718 } 4732 }
4719 4733
4720done: 4734 return 0;
4721 return r;
4722} 4735}
4723 4736
4724static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) 4737static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
@@ -4739,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4739 r = gfx_v8_0_kcq_resume(adev); 4752 r = gfx_v8_0_kcq_resume(adev);
4740 if (r) 4753 if (r)
4741 return r; 4754 return r;
4755
4756 r = gfx_v8_0_cp_test_all_rings(adev);
4757 if (r)
4758 return r;
4759
4742 gfx_v8_0_enable_gui_idle_interrupt(adev, true); 4760 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4743 4761
4744 return 0; 4762 return 0;
@@ -5086,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
5086 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) 5104 REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5087 gfx_v8_0_cp_gfx_resume(adev); 5105 gfx_v8_0_cp_gfx_resume(adev);
5088 5106
5107 gfx_v8_0_cp_test_all_rings(adev);
5108
5089 adev->gfx.rlc.funcs->start(adev); 5109 adev->gfx.rlc.funcs->start(adev);
5090 5110
5091 return 0; 5111 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7556716038d3..fbca0494f871 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -113,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), 113 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), 114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), 115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) 116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
117}; 120};
118 121
119static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = 122static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -135,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), 138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), 139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), 140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), 141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
142}; 142};
143 143
144static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = 144static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -3587,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3587{ 3587{
3588 uint32_t data, def; 3588 uint32_t data, def;
3589 3589
3590 amdgpu_gfx_rlc_enter_safe_mode(adev);
3591
3590 /* It is disabled by HW by default */ 3592 /* It is disabled by HW by default */
3591 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { 3593 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3592 /* 1 - RLC_CGTT_MGCG_OVERRIDE */ 3594 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -3651,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
3651 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); 3653 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3652 } 3654 }
3653 } 3655 }
3656
3657 amdgpu_gfx_rlc_exit_safe_mode(adev);
3654} 3658}
3655 3659
3656static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, 3660static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8cbb4655896a..b11a1c17a7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
174 return r; 174 return r;
175 } 175 }
176 /* Retrieve checksum from mailbox2 */ 176 /* Retrieve checksum from mailbox2 */
177 if (req == IDH_REQ_GPU_INIT_ACCESS) { 177 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
178 adev->virt.fw_reserve.checksum_key = 178 adev->virt.fw_reserve.checksum_key =
179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, 179 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); 180 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 4cd31a276dcd..186db182f924 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev,
93static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 93static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
94 bool enable) 94 bool enable)
95{ 95{
96 u32 tmp = 0;
96 97
98 if (enable) {
99 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
100 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
101 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
102
103 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW,
104 lower_32_bits(adev->doorbell.base));
105 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH,
106 upper_32_bits(adev->doorbell.base));
107 }
108
109 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp);
97} 110}
98 111
99static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, 112static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0c6e7f9b143f..189fcb004579 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
152 152
153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); 153 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
154 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); 154 err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
155 if (err) 155 if (err) {
156 goto out2; 156 release_firmware(adev->psp.ta_fw);
157 157 adev->psp.ta_fw = NULL;
158 err = amdgpu_ucode_validate(adev->psp.ta_fw); 158 dev_info(adev->dev,
159 if (err) 159 "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
160 goto out2; 160 } else {
161 161 err = amdgpu_ucode_validate(adev->psp.ta_fw);
162 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; 162 if (err)
163 adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); 163 goto out2;
164 adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); 164
165 adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + 165 ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
166 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); 166 adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
167 adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
168 adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
169 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
170 }
167 171
168 return 0; 172 return 0;
169 173
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index fd0bfe140ee0..6811a5d05b27 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 78 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 79 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000), 80 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), 81 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100), 82 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
84 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), 83 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
96static const struct soc15_reg_golden golden_settings_sdma_vg10[] = { 95static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 96 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
98 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002), 97 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
98 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
99 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002), 99 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002) 100 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
101}; 101};
@@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
103static const struct soc15_reg_golden golden_settings_sdma_vg12[] = { 103static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 104 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001), 105 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
106 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
106 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001), 107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
107 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001) 108 SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
108}; 109};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 8849b74078d6..9b639974c70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle)
729 case CHIP_RAVEN: 729 case CHIP_RAVEN:
730 adev->asic_funcs = &soc15_asic_funcs; 730 adev->asic_funcs = &soc15_asic_funcs;
731 if (adev->rev_id >= 0x8) 731 if (adev->rev_id >= 0x8)
732 adev->external_rev_id = adev->rev_id + 0x81; 732 adev->external_rev_id = adev->rev_id + 0x79;
733 else if (adev->pdev->device == 0x15d8) 733 else if (adev->pdev->device == 0x15d8)
734 adev->external_rev_id = adev->rev_id + 0x41; 734 adev->external_rev_id = adev->rev_id + 0x41;
735 else if (adev->rev_id == 1)
736 adev->external_rev_id = adev->rev_id + 0x20;
735 else 737 else
736 adev->external_rev_id = 0x1; 738 adev->external_rev_id = adev->rev_id + 0x01;
737 739
738 if (adev->rev_id >= 0x8) { 740 if (adev->rev_id >= 0x8) {
739 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 741 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index fbf0ee5201c3..c3613604a4f8 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -4,8 +4,8 @@
4 4
5config HSA_AMD 5config HSA_AMD
6 bool "HSA kernel driver for AMD GPU devices" 6 bool "HSA kernel driver for AMD GPU devices"
7 depends on DRM_AMDGPU && X86_64 7 depends on DRM_AMDGPU && (X86_64 || ARM64)
8 imply AMD_IOMMU_V2 8 imply AMD_IOMMU_V2 if X86_64
9 select MMU_NOTIFIER 9 select MMU_NOTIFIER
10 help 10 help
11 Enable this if you want to use HSA features on AMD GPU devices. 11 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index b7bc7d7d048f..2e7c44955f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
863 return 0; 863 return 0;
864} 864}
865 865
866#ifdef CONFIG_X86_64
866static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, 867static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
867 uint32_t *num_entries, 868 uint32_t *num_entries,
868 struct crat_subtype_iolink *sub_type_hdr) 869 struct crat_subtype_iolink *sub_type_hdr)
@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
905 906
906 return 0; 907 return 0;
907} 908}
909#endif
908 910
909/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU 911/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
910 * 912 *
@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
920 struct crat_subtype_generic *sub_type_hdr; 922 struct crat_subtype_generic *sub_type_hdr;
921 int avail_size = *size; 923 int avail_size = *size;
922 int numa_node_id; 924 int numa_node_id;
925#ifdef CONFIG_X86_64
923 uint32_t entries = 0; 926 uint32_t entries = 0;
927#endif
924 int ret = 0; 928 int ret = 0;
925 929
926 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) 930 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
982 sub_type_hdr->length); 986 sub_type_hdr->length);
983 987
984 /* Fill in Subtype: IO Link */ 988 /* Fill in Subtype: IO Link */
989#ifdef CONFIG_X86_64
985 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, 990 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
986 &entries, 991 &entries,
987 (struct crat_subtype_iolink *)sub_type_hdr); 992 (struct crat_subtype_iolink *)sub_type_hdr);
@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
992 997
993 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + 998 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
994 sub_type_hdr->length * entries); 999 sub_type_hdr->length * entries);
1000#else
1001 pr_info("IO link not available for non x86 platforms\n");
1002#endif
995 1003
996 crat_table->num_domains++; 1004 crat_table->num_domains++;
997 } 1005 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5f5b2acedbac..09da91644f9f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
1093 * the GPU device is not already present in the topology device 1093 * the GPU device is not already present in the topology device
1094 * list then return NULL. This means a new topology device has to 1094 * list then return NULL. This means a new topology device has to
1095 * be created for this GPU. 1095 * be created for this GPU.
1096 * TODO: Rather than assiging @gpu to first topology device withtout
1097 * gpu attached, it will better to have more stringent check.
1098 */ 1096 */
1099static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) 1097static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1100{ 1098{
@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1102 struct kfd_topology_device *out_dev = NULL; 1100 struct kfd_topology_device *out_dev = NULL;
1103 1101
1104 down_write(&topology_lock); 1102 down_write(&topology_lock);
1105 list_for_each_entry(dev, &topology_device_list, list) 1103 list_for_each_entry(dev, &topology_device_list, list) {
1104 /* Discrete GPUs need their own topology device list
1105 * entries. Don't assign them to CPU/APU nodes.
1106 */
1107 if (!gpu->device_info->needs_iommu_device &&
1108 dev->node_props.cpu_cores_count)
1109 continue;
1110
1106 if (!dev->gpu && (dev->node_props.simd_count > 0)) { 1111 if (!dev->gpu && (dev->node_props.simd_count > 0)) {
1107 dev->gpu = gpu; 1112 dev->gpu = gpu;
1108 out_dev = dev; 1113 out_dev = dev;
1109 break; 1114 break;
1110 } 1115 }
1116 }
1111 up_write(&topology_lock); 1117 up_write(&topology_lock);
1112 return out_dev; 1118 return out_dev;
1113} 1119}
@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
1392 1398
1393static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) 1399static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1394{ 1400{
1395 const struct cpuinfo_x86 *cpuinfo;
1396 int first_cpu_of_numa_node; 1401 int first_cpu_of_numa_node;
1397 1402
1398 if (!cpumask || cpumask == cpu_none_mask) 1403 if (!cpumask || cpumask == cpu_none_mask)
@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
1400 first_cpu_of_numa_node = cpumask_first(cpumask); 1405 first_cpu_of_numa_node = cpumask_first(cpumask);
1401 if (first_cpu_of_numa_node >= nr_cpu_ids) 1406 if (first_cpu_of_numa_node >= nr_cpu_ids)
1402 return -1; 1407 return -1;
1403 cpuinfo = &cpu_data(first_cpu_of_numa_node); 1408#ifdef CONFIG_X86_64
1404 1409 return cpu_data(first_cpu_of_numa_node).apicid;
1405 return cpuinfo->apicid; 1410#else
1411 return first_cpu_of_numa_node;
1412#endif
1406} 1413}
1407 1414
1408/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor 1415/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a9a28dbc3e24..0b392bfca284 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -699,22 +699,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
699{ 699{
700 struct amdgpu_dm_connector *aconnector; 700 struct amdgpu_dm_connector *aconnector;
701 struct drm_connector *connector; 701 struct drm_connector *connector;
702 struct drm_dp_mst_topology_mgr *mgr;
703 int ret;
704 bool need_hotplug = false;
702 705
703 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 706 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
704 707
705 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 708 list_for_each_entry(connector, &dev->mode_config.connector_list,
706 aconnector = to_amdgpu_dm_connector(connector); 709 head) {
707 if (aconnector->dc_link->type == dc_connection_mst_branch && 710 aconnector = to_amdgpu_dm_connector(connector);
708 !aconnector->mst_port) { 711 if (aconnector->dc_link->type != dc_connection_mst_branch ||
712 aconnector->mst_port)
713 continue;
709 714
710 if (suspend) 715 mgr = &aconnector->mst_mgr;
711 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); 716
712 else 717 if (suspend) {
713 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); 718 drm_dp_mst_topology_mgr_suspend(mgr);
714 } 719 } else {
720 ret = drm_dp_mst_topology_mgr_resume(mgr);
721 if (ret < 0) {
722 drm_dp_mst_topology_mgr_set_mst(mgr, false);
723 need_hotplug = true;
724 }
725 }
715 } 726 }
716 727
717 drm_modeset_unlock(&dev->mode_config.connection_mutex); 728 drm_modeset_unlock(&dev->mode_config.connection_mutex);
729
730 if (need_hotplug)
731 drm_kms_helper_hotplug_event(dev);
718} 732}
719 733
720/** 734/**
@@ -898,7 +912,6 @@ static int dm_resume(void *handle)
898 struct drm_plane_state *new_plane_state; 912 struct drm_plane_state *new_plane_state;
899 struct dm_plane_state *dm_new_plane_state; 913 struct dm_plane_state *dm_new_plane_state;
900 enum dc_connection_type new_connection_type = dc_connection_none; 914 enum dc_connection_type new_connection_type = dc_connection_none;
901 int ret;
902 int i; 915 int i;
903 916
904 /* power on hardware */ 917 /* power on hardware */
@@ -971,13 +984,13 @@ static int dm_resume(void *handle)
971 } 984 }
972 } 985 }
973 986
974 ret = drm_atomic_helper_resume(ddev, dm->cached_state); 987 drm_atomic_helper_resume(ddev, dm->cached_state);
975 988
976 dm->cached_state = NULL; 989 dm->cached_state = NULL;
977 990
978 amdgpu_dm_irq_resume_late(adev); 991 amdgpu_dm_irq_resume_late(adev);
979 992
980 return ret; 993 return 0;
981} 994}
982 995
983/** 996/**
@@ -1759,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1759 + caps.min_input_signal * 0x101; 1772 + caps.min_input_signal * 0x101;
1760 1773
1761 if (dc_link_set_backlight_level(dm->backlight_link, 1774 if (dc_link_set_backlight_level(dm->backlight_link,
1762 brightness, 0, 0)) 1775 brightness, 0))
1763 return 0; 1776 return 0;
1764 else 1777 else
1765 return 1; 1778 return 1;
@@ -4069,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
4069 } 4082 }
4070 4083
4071 if (connector_type == DRM_MODE_CONNECTOR_HDMIA || 4084 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4072 connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 4085 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4086 connector_type == DRM_MODE_CONNECTOR_eDP) {
4073 drm_connector_attach_vrr_capable_property( 4087 drm_connector_attach_vrr_capable_property(
4074 &aconnector->base); 4088 &aconnector->base);
4075 } 4089 }
@@ -5920,7 +5934,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5920 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 5934 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5921 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 5935 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
5922 !new_crtc_state->color_mgmt_changed && 5936 !new_crtc_state->color_mgmt_changed &&
5923 !new_crtc_state->vrr_enabled) 5937 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
5924 continue; 5938 continue;
5925 5939
5926 if (!new_crtc_state->enable) 5940 if (!new_crtc_state->enable)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 9a7ac58eb18e..ddd75a4d8ba5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
671 return bytes_from_user; 671 return bytes_from_user;
672} 672}
673 673
674/*
675 * Returns the min and max vrr vfreq through the connector's debugfs file.
676 * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range
677 */
678static int vrr_range_show(struct seq_file *m, void *data)
679{
680 struct drm_connector *connector = m->private;
681 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
682
683 if (connector->status != connector_status_connected)
684 return -ENODEV;
685
686 seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq);
687 seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq);
688
689 return 0;
690}
691DEFINE_SHOW_ATTRIBUTE(vrr_range);
692
674static const struct file_operations dp_link_settings_debugfs_fops = { 693static const struct file_operations dp_link_settings_debugfs_fops = {
675 .owner = THIS_MODULE, 694 .owner = THIS_MODULE,
676 .read = dp_link_settings_read, 695 .read = dp_link_settings_read,
@@ -697,7 +716,8 @@ static const struct {
697} dp_debugfs_entries[] = { 716} dp_debugfs_entries[] = {
698 {"link_settings", &dp_link_settings_debugfs_fops}, 717 {"link_settings", &dp_link_settings_debugfs_fops},
699 {"phy_settings", &dp_phy_settings_debugfs_fop}, 718 {"phy_settings", &dp_phy_settings_debugfs_fop},
700 {"test_pattern", &dp_phy_test_pattern_fops} 719 {"test_pattern", &dp_phy_test_pattern_fops},
720 {"vrr_range", &vrr_range_fops}
701}; 721};
702 722
703int connector_debugfs_init(struct amdgpu_dm_connector *connector) 723int connector_debugfs_init(struct amdgpu_dm_connector *connector)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 52deacf39841..b0265dbebd4c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
2190 2190
2191bool dc_link_set_backlight_level(const struct dc_link *link, 2191bool dc_link_set_backlight_level(const struct dc_link *link,
2192 uint32_t backlight_pwm_u16_16, 2192 uint32_t backlight_pwm_u16_16,
2193 uint32_t frame_ramp, 2193 uint32_t frame_ramp)
2194 const struct dc_stream_state *stream)
2195{ 2194{
2196 struct dc *core_dc = link->ctx->dc; 2195 struct dc *core_dc = link->ctx->dc;
2197 struct abm *abm = core_dc->res_pool->abm; 2196 struct abm *abm = core_dc->res_pool->abm;
@@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
2206 (abm->funcs->set_backlight_level_pwm == NULL)) 2205 (abm->funcs->set_backlight_level_pwm == NULL))
2207 return false; 2206 return false;
2208 2207
2209 if (stream)
2210 ((struct dc_stream_state *)stream)->bl_pwm_level =
2211 backlight_pwm_u16_16;
2212
2213 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); 2208 use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
2214 2209
2215 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", 2210 DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
@@ -2637,11 +2632,6 @@ void core_link_enable_stream(
2637 2632
2638 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2633 if (dc_is_dp_signal(pipe_ctx->stream->signal))
2639 enable_stream_features(pipe_ctx); 2634 enable_stream_features(pipe_ctx);
2640
2641 dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
2642 pipe_ctx->stream->bl_pwm_level,
2643 0,
2644 pipe_ctx->stream);
2645 } 2635 }
2646 2636
2647} 2637}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 29f19d57ff7a..b2243e0dad1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
146 */ 146 */
147bool dc_link_set_backlight_level(const struct dc_link *dc_link, 147bool dc_link_set_backlight_level(const struct dc_link *dc_link,
148 uint32_t backlight_pwm_u16_16, 148 uint32_t backlight_pwm_u16_16,
149 uint32_t frame_ramp, 149 uint32_t frame_ramp);
150 const struct dc_stream_state *stream);
151 150
152int dc_link_get_backlight_level(const struct dc_link *dc_link); 151int dc_link_get_backlight_level(const struct dc_link *dc_link);
153 152
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index be34d638e15d..d70c9e1cda3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -91,7 +91,6 @@ struct dc_stream_state {
91 91
92 /* DMCU info */ 92 /* DMCU info */
93 unsigned int abm_level; 93 unsigned int abm_level;
94 unsigned int bl_pwm_level;
95 94
96 /* from core_stream struct */ 95 /* from core_stream struct */
97 struct dc_context *ctx; 96 struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index afd287f08bc9..19801bdba0d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements(
591 dc, 591 dc,
592 context->bw.dce.sclk_khz); 592 context->bw.dce.sclk_khz);
593 593
594 pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; 594 /*
595 * As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
596 * This is not required for less than 5 displays,
597 * thus don't request decfclk in dc to avoid impact
598 * on power saving.
599 *
600 */
601 pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)?
602 pp_display_cfg->min_engine_clock_khz : 0;
595 603
596 pp_display_cfg->min_engine_clock_deep_sleep_khz 604 pp_display_cfg->min_engine_clock_deep_sleep_khz
597 = context->bw.dce.sclk_deep_sleep_khz; 605 = context->bw.dce.sclk_deep_sleep_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 4bf24758217f..8f09b8625c5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
1000 1000
1001 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); 1001 pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
1002 1002
1003 if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) 1003 if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
1004 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ 1004 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
1005 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); 1005 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1006 /* un-mute audio */ 1006 /* un-mute audio */
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1017 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 1017 pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
1018 pipe_ctx->stream_res.stream_enc, true); 1018 pipe_ctx->stream_res.stream_enc, true);
1019 if (pipe_ctx->stream_res.audio) { 1019 if (pipe_ctx->stream_res.audio) {
1020 struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
1021
1020 if (option != KEEP_ACQUIRED_RESOURCE || 1022 if (option != KEEP_ACQUIRED_RESOURCE ||
1021 !dc->debug.az_endpoint_mute_only) { 1023 !dc->debug.az_endpoint_mute_only) {
1022 /*only disalbe az_endpoint if power down or free*/ 1024 /*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
1036 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); 1038 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
1037 pipe_ctx->stream_res.audio = NULL; 1039 pipe_ctx->stream_res.audio = NULL;
1038 } 1040 }
1041 if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
1042 /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
1043 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
1039 1044
1040 /* TODO: notify audio driver for if audio modes list changed 1045 /* TODO: notify audio driver for if audio modes list changed
1041 * add audio mode list change flag */ 1046 * add audio mode list change flag */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index dcb3c5530236..cd1ebe57ed59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
463 if (src_y_offset >= (int)param->viewport.height) 463 if (src_y_offset >= (int)param->viewport.height)
464 cur_en = 0; /* not visible beyond bottom edge*/ 464 cur_en = 0; /* not visible beyond bottom edge*/
465 465
466 if (src_y_offset < 0) 466 if (src_y_offset + (int)height <= 0)
467 cur_en = 0; /* not visible beyond top edge*/ 467 cur_en = 0; /* not visible beyond top edge*/
468 468
469 REG_UPDATE(CURSOR0_CONTROL, 469 REG_UPDATE(CURSOR0_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 345af015d061..d1acd7165bc8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position(
1140 if (src_y_offset >= (int)param->viewport.height) 1140 if (src_y_offset >= (int)param->viewport.height)
1141 cur_en = 0; /* not visible beyond bottom edge*/ 1141 cur_en = 0; /* not visible beyond bottom edge*/
1142 1142
1143 if (src_y_offset < 0) //+ (int)hubp->curs_attr.height 1143 if (src_y_offset + (int)hubp->curs_attr.height <= 0)
1144 cur_en = 0; /* not visible beyond top edge*/ 1144 cur_en = 0; /* not visible beyond top edge*/
1145 1145
1146 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) 1146 if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 91e015e14355..58a12ddf12f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface(
2355 top_pipe_to_program->plane_state->update_flags.bits.full_update) 2355 top_pipe_to_program->plane_state->update_flags.bits.full_update)
2356 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2356 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2357 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2358 2358 tg = pipe_ctx->stream_res.tg;
2359 /* Skip inactive pipes and ones already updated */ 2359 /* Skip inactive pipes and ones already updated */
2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream 2360 if (!pipe_ctx->stream || pipe_ctx->stream == stream
2361 || !pipe_ctx->plane_state) 2361 || !pipe_ctx->plane_state
2362 || !tg->funcs->is_tg_enabled(tg))
2362 continue; 2363 continue;
2363 2364
2364 pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); 2365 tg->funcs->lock(tg);
2365 2366
2366 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( 2367 pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2367 pipe_ctx->plane_res.hubp, 2368 pipe_ctx->plane_res.hubp,
2368 &pipe_ctx->dlg_regs, 2369 &pipe_ctx->dlg_regs,
2369 &pipe_ctx->ttu_regs); 2370 &pipe_ctx->ttu_regs);
2370 }
2371
2372 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2373 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2374 2371
2375 if (!pipe_ctx->stream || pipe_ctx->stream == stream 2372 tg->funcs->unlock(tg);
2376 || !pipe_ctx->plane_state) 2373 }
2377 continue;
2378
2379 dcn10_pipe_control_lock(dc, pipe_ctx, false);
2380 }
2381 2374
2382 if (num_planes == 0) 2375 if (num_planes == 0)
2383 false_optc_underflow_wa(dc, stream, tg); 2376 false_optc_underflow_wa(dc, stream, tg);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 00f63b7dd32f..c11a443dcbc8 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
57#define NUM_POWER_FN_SEGS 8 57#define NUM_POWER_FN_SEGS 8
58#define NUM_BL_CURVE_SEGS 16 58#define NUM_BL_CURVE_SEGS 16
59 59
60#pragma pack(push, 1)
60/* NOTE: iRAM is 256B in size */ 61/* NOTE: iRAM is 256B in size */
61struct iram_table_v_2 { 62struct iram_table_v_2 {
62 /* flags */ 63 /* flags */
@@ -100,6 +101,7 @@ struct iram_table_v_2 {
100 uint8_t dummy8; /* 0xfe */ 101 uint8_t dummy8; /* 0xfe */
101 uint8_t dummy9; /* 0xff */ 102 uint8_t dummy9; /* 0xff */
102}; 103};
104#pragma pack(pop)
103 105
104static uint16_t backlight_8_to_16(unsigned int backlight_8bit) 106static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
105{ 107{
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 1479ea1dc3e7..789c4f288485 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -127,12 +127,13 @@ enum amd_pp_task {
127}; 127};
128 128
129enum PP_SMC_POWER_PROFILE { 129enum PP_SMC_POWER_PROFILE {
130 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0, 130 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
131 PP_SMC_POWER_PROFILE_POWERSAVING = 0x1, 131 PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
132 PP_SMC_POWER_PROFILE_VIDEO = 0x2, 132 PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
133 PP_SMC_POWER_PROFILE_VR = 0x3, 133 PP_SMC_POWER_PROFILE_VIDEO = 0x3,
134 PP_SMC_POWER_PROFILE_COMPUTE = 0x4, 134 PP_SMC_POWER_PROFILE_VR = 0x4,
135 PP_SMC_POWER_PROFILE_CUSTOM = 0x5, 135 PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
136 PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
136}; 137};
137 138
138enum { 139enum {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 0173d0480024..310b102a9292 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
64 64
65static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) 65static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
66{ 66{
67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2; 67 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0; 68 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1; 69 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3; 70 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4; 71 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
72 72 hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
73 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING; 73
74 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO; 74 hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
75 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 75 hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
76 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR; 76 hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
77 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE; 77 hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
78 hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
79 hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
78} 80}
79 81
80int hwmgr_early_init(struct pp_hwmgr *hwmgr) 82int hwmgr_early_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index f95c5f50eb0f..5273de3c5b98 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1033 break; 1033 break;
1034 case amd_pp_dpp_clock: 1034 case amd_pp_dpp_clock:
1035 pclk_vol_table = pinfo->vdd_dep_on_dppclk; 1035 pclk_vol_table = pinfo->vdd_dep_on_dppclk;
1036 break;
1036 default: 1037 default:
1037 return -EINVAL; 1038 return -EINVAL;
1038 } 1039 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index d91390459326..c8f5c00dd1e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,8 +77,9 @@
77#define PCIE_BUS_CLK 10000 77#define PCIE_BUS_CLK 10000
78#define TCLK (PCIE_BUS_CLK / 10) 78#define TCLK (PCIE_BUS_CLK / 10)
79 79
80static const struct profile_mode_setting smu7_profiling[6] = 80static const struct profile_mode_setting smu7_profiling[7] =
81 {{1, 0, 100, 30, 1, 0, 100, 10}, 81 {{0, 0, 0, 0, 0, 0, 0, 0},
82 {1, 0, 100, 30, 1, 0, 100, 10},
82 {1, 10, 0, 30, 0, 0, 0, 0}, 83 {1, 10, 0, 30, 0, 0, 0, 0},
83 {0, 0, 0, 0, 1, 10, 16, 31}, 84 {0, 0, 0, 0, 1, 10, 16, 31},
84 {1, 0, 11, 50, 1, 0, 100, 10}, 85 {1, 0, 11, 50, 1, 0, 100, 10},
@@ -4889,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4889 uint32_t i, size = 0; 4890 uint32_t i, size = 0;
4890 uint32_t len; 4891 uint32_t len;
4891 4892
4892 static const char *profile_name[6] = {"3D_FULL_SCREEN", 4893 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4894 "3D_FULL_SCREEN",
4893 "POWER_SAVING", 4895 "POWER_SAVING",
4894 "VIDEO", 4896 "VIDEO",
4895 "VR", 4897 "VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 79c86247d0ac..91e3bbe6d61d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -804,9 +804,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
804 804
805 hwmgr->backend = data; 805 hwmgr->backend = data;
806 806
807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
810 810
811 vega10_set_default_registry_data(hwmgr); 811 vega10_set_default_registry_data(hwmgr);
812 data->disable_dpm_mask = 0xff; 812 data->disable_dpm_mask = 0xff;
@@ -4668,13 +4668,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4668{ 4668{
4669 struct vega10_hwmgr *data = hwmgr->backend; 4669 struct vega10_hwmgr *data = hwmgr->backend;
4670 uint32_t i, size = 0; 4670 uint32_t i, size = 0;
4671 static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,}, 4671 static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
4672 {70, 60, 1, 3,},
4672 {90, 60, 0, 0,}, 4673 {90, 60, 0, 0,},
4673 {70, 60, 0, 0,}, 4674 {70, 60, 0, 0,},
4674 {70, 90, 0, 0,}, 4675 {70, 90, 0, 0,},
4675 {30, 60, 0, 6,}, 4676 {30, 60, 0, 6,},
4676 }; 4677 };
4677 static const char *profile_name[6] = {"3D_FULL_SCREEN", 4678 static const char *profile_name[7] = {"BOOTUP_DEFAULT",
4679 "3D_FULL_SCREEN",
4678 "POWER_SAVING", 4680 "POWER_SAVING",
4679 "VIDEO", 4681 "VIDEO",
4680 "VR", 4682 "VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index b8747a5c9204..99d596dc0e89 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -32,6 +32,7 @@
32#include "vega10_pptable.h" 32#include "vega10_pptable.h"
33 33
34#define NUM_DSPCLK_LEVELS 8 34#define NUM_DSPCLK_LEVELS 8
35#define VEGA10_ENGINECLOCK_HARDMAX 198000
35 36
36static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, 37static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
37 enum phm_platform_caps cap) 38 enum phm_platform_caps cap)
@@ -258,7 +259,26 @@ static int init_over_drive_limits(
258 struct pp_hwmgr *hwmgr, 259 struct pp_hwmgr *hwmgr,
259 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) 260 const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
260{ 261{
261 hwmgr->platform_descriptor.overdriveLimit.engineClock = 262 const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
263 (const ATOM_Vega10_GFXCLK_Dependency_Table *)
264 (((unsigned long) powerplay_table) +
265 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
266 bool is_acg_enabled = false;
267 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
268
269 if (gfxclk_dep_table->ucRevId == 1) {
270 patom_record_v2 =
271 (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
272 is_acg_enabled =
273 (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
274 }
275
276 if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
277 !is_acg_enabled)
278 hwmgr->platform_descriptor.overdriveLimit.engineClock =
279 VEGA10_ENGINECLOCK_HARDMAX;
280 else
281 hwmgr->platform_descriptor.overdriveLimit.engineClock =
262 le32_to_cpu(powerplay_table->ulMaxODEngineClock); 282 le32_to_cpu(powerplay_table->ulMaxODEngineClock);
263 hwmgr->platform_descriptor.overdriveLimit.memoryClock = 283 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
264 le32_to_cpu(powerplay_table->ulMaxODMemoryClock); 284 le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 54364444ecd1..0c8212902275 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
753 return 0; 753 return 0;
754} 754}
755 755
756static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
757{
758 uint32_t result;
759
760 PP_ASSERT_WITH_CODE(
761 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
762 "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
763 return -EINVAL);
764
765 result = smum_get_argument(hwmgr);
766 PP_ASSERT_WITH_CODE(result == 1,
767 "Failed to run ACG BTC!", return -EINVAL);
768
769 return 0;
770}
771
756static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) 772static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
757{ 773{
758 struct vega12_hwmgr *data = 774 struct vega12_hwmgr *data =
@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
931 "Failed to initialize SMC table!", 947 "Failed to initialize SMC table!",
932 result = tmp_result); 948 result = tmp_result);
933 949
950 tmp_result = vega12_run_acg_btc(hwmgr);
951 PP_ASSERT_WITH_CODE(!tmp_result,
952 "Failed to run ACG BTC!",
953 result = tmp_result);
954
934 result = vega12_enable_all_smu_features(hwmgr); 955 result = vega12_enable_all_smu_features(hwmgr);
935 PP_ASSERT_WITH_CODE(!result, 956 PP_ASSERT_WITH_CODE(!result,
936 "Failed to enable all smu features!", 957 "Failed to enable all smu features!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 26154f9b2178..82935a3bd950 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -390,9 +390,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
390 390
391 hwmgr->backend = data; 391 hwmgr->backend = data;
392 392
393 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO]; 393 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
394 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 394 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
395 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO; 395 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
396 396
397 vega20_set_default_registry_data(hwmgr); 397 vega20_set_default_registry_data(hwmgr);
398 398
@@ -980,6 +980,9 @@ static int vega20_od8_set_feature_capabilities(
980 pp_table->FanZeroRpmEnable) 980 pp_table->FanZeroRpmEnable)
981 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL; 981 od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
982 982
983 if (!od_settings->overdrive8_capabilities)
984 hwmgr->od_enabled = false;
985
983 return 0; 986 return 0;
984} 987}
985 988
@@ -1689,13 +1692,6 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
1689 (PPCLK_UCLK << 16) | (min_freq & 0xffff))), 1692 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1690 "Failed to set soft min memclk !", 1693 "Failed to set soft min memclk !",
1691 return ret); 1694 return ret);
1692
1693 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1694 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1695 hwmgr, PPSMC_MSG_SetHardMinByFreq,
1696 (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1697 "Failed to set hard min memclk !",
1698 return ret);
1699 } 1695 }
1700 1696
1701 if (data->smu_features[GNLD_DPM_UVD].enabled && 1697 if (data->smu_features[GNLD_DPM_UVD].enabled &&
@@ -2248,6 +2244,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2248 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2244 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2249 soft_max_level = mask ? (fls(mask) - 1) : 0; 2245 soft_max_level = mask ? (fls(mask) - 1) : 0;
2250 2246
2247 if (soft_max_level >= data->dpm_table.gfx_table.count) {
2248 pr_err("Clock level specified %d is over max allowed %d\n",
2249 soft_max_level,
2250 data->dpm_table.gfx_table.count - 1);
2251 return -EINVAL;
2252 }
2253
2251 data->dpm_table.gfx_table.dpm_state.soft_min_level = 2254 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2252 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; 2255 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2253 data->dpm_table.gfx_table.dpm_state.soft_max_level = 2256 data->dpm_table.gfx_table.dpm_state.soft_max_level =
@@ -2268,6 +2271,13 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
2268 soft_min_level = mask ? (ffs(mask) - 1) : 0; 2271 soft_min_level = mask ? (ffs(mask) - 1) : 0;
2269 soft_max_level = mask ? (fls(mask) - 1) : 0; 2272 soft_max_level = mask ? (fls(mask) - 1) : 0;
2270 2273
2274 if (soft_max_level >= data->dpm_table.mem_table.count) {
2275 pr_err("Clock level specified %d is over max allowed %d\n",
2276 soft_max_level,
2277 data->dpm_table.mem_table.count - 1);
2278 return -EINVAL;
2279 }
2280
2271 data->dpm_table.mem_table.dpm_state.soft_min_level = 2281 data->dpm_table.mem_table.dpm_state.soft_min_level =
2272 data->dpm_table.mem_table.dpm_levels[soft_min_level].value; 2282 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2273 data->dpm_table.mem_table.dpm_state.soft_max_level = 2283 data->dpm_table.mem_table.dpm_state.soft_max_level =
@@ -3261,6 +3271,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
3261 int pplib_workload = 0; 3271 int pplib_workload = 0;
3262 3272
3263 switch (power_profile) { 3273 switch (power_profile) {
3274 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
3275 pplib_workload = WORKLOAD_DEFAULT_BIT;
3276 break;
3264 case PP_SMC_POWER_PROFILE_FULLSCREEN3D: 3277 case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
3265 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; 3278 pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
3266 break; 3279 break;
@@ -3290,6 +3303,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
3290 uint32_t i, size = 0; 3303 uint32_t i, size = 0;
3291 uint16_t workload_type = 0; 3304 uint16_t workload_type = 0;
3292 static const char *profile_name[] = { 3305 static const char *profile_name[] = {
3306 "BOOTUP_DEFAULT",
3293 "3D_FULL_SCREEN", 3307 "3D_FULL_SCREEN",
3294 "POWER_SAVING", 3308 "POWER_SAVING",
3295 "VIDEO", 3309 "VIDEO",
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 0d298a0409f5..8cb831b6a016 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -705,7 +705,7 @@ enum PP_TABLE_VERSION {
705/** 705/**
706 * The main hardware manager structure. 706 * The main hardware manager structure.
707 */ 707 */
708#define Workload_Policy_Max 5 708#define Workload_Policy_Max 6
709 709
710struct pp_hwmgr { 710struct pp_hwmgr {
711 void *adev; 711 void *adev;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8e28e738cb52..e6403b9549f1 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -98,6 +98,8 @@
98#define DP0_STARTVAL 0x064c 98#define DP0_STARTVAL 0x064c
99#define DP0_ACTIVEVAL 0x0650 99#define DP0_ACTIVEVAL 0x0650
100#define DP0_SYNCVAL 0x0654 100#define DP0_SYNCVAL 0x0654
101#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
102#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
101#define DP0_MISC 0x0658 103#define DP0_MISC 0x0658
102#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ 104#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
103#define BPC_6 (0 << 5) 105#define BPC_6 (0 << 5)
@@ -142,6 +144,8 @@
142#define DP0_LTLOOPCTRL 0x06d8 144#define DP0_LTLOOPCTRL 0x06d8
143#define DP0_SNKLTCTRL 0x06e4 145#define DP0_SNKLTCTRL 0x06e4
144 146
147#define DP1_SRCCTRL 0x07a0
148
145/* PHY */ 149/* PHY */
146#define DP_PHY_CTRL 0x0800 150#define DP_PHY_CTRL 0x0800
147#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */ 151#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
@@ -150,6 +154,7 @@
150#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */ 154#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
151#define PHY_RDY BIT(16) /* PHY Main Channels Ready */ 155#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
152#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */ 156#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
157#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
153#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */ 158#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
154#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */ 159#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
155 160
@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
540 unsigned long rate; 545 unsigned long rate;
541 u32 value; 546 u32 value;
542 int ret; 547 int ret;
548 u32 dp_phy_ctrl;
543 549
544 rate = clk_get_rate(tc->refclk); 550 rate = clk_get_rate(tc->refclk);
545 switch (rate) { 551 switch (rate) {
@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
564 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; 570 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
565 tc_write(SYS_PLLPARAM, value); 571 tc_write(SYS_PLLPARAM, value);
566 572
567 tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN); 573 dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
574 if (tc->link.base.num_lanes == 2)
575 dp_phy_ctrl |= PHY_2LANE;
576 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
568 577
569 /* 578 /*
570 * Initially PLLs are in bypass. Force PLL parameter update, 579 * Initially PLLs are in bypass. Force PLL parameter update,
@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
719 728
720 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay)); 729 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
721 730
722 tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0)); 731 tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
732 ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
733 ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
723 734
724 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | 735 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
725 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); 736 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
829 if (!tc->mode) 840 if (!tc->mode)
830 return -EINVAL; 841 return -EINVAL;
831 842
832 /* from excel file - DP0_SrcCtrl */ 843 tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
833 tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B | 844 /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
834 DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 | 845 tc_write(DP1_SRCCTRL,
835 DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT); 846 (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
836 /* from excel file - DP1_SrcCtrl */ 847 ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
837 tc_write(0x07a0, 0x00003083);
838 848
839 rate = clk_get_rate(tc->refclk); 849 rate = clk_get_rate(tc->refclk);
840 switch (rate) { 850 switch (rate) {
@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
855 } 865 }
856 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2; 866 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
857 tc_write(SYS_PLLPARAM, value); 867 tc_write(SYS_PLLPARAM, value);
868
858 /* Setup Main Link */ 869 /* Setup Main Link */
859 dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN; 870 dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
871 if (tc->link.base.num_lanes == 2)
872 dp_phy_ctrl |= PHY_2LANE;
860 tc_write(DP_PHY_CTRL, dp_phy_ctrl); 873 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
861 msleep(100); 874 msleep(100);
862 875
@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1105static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector, 1118static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
1106 struct drm_display_mode *mode) 1119 struct drm_display_mode *mode)
1107{ 1120{
1121 struct tc_data *tc = connector_to_tc(connector);
1122 u32 req, avail;
1123 u32 bits_per_pixel = 24;
1124
1108 /* DPI interface clock limitation: upto 154 MHz */ 1125 /* DPI interface clock limitation: upto 154 MHz */
1109 if (mode->clock > 154000) 1126 if (mode->clock > 154000)
1110 return MODE_CLOCK_HIGH; 1127 return MODE_CLOCK_HIGH;
1111 1128
1129 req = mode->clock * bits_per_pixel / 8;
1130 avail = tc->link.base.num_lanes * tc->link.base.rate;
1131
1132 if (req > avail)
1133 return MODE_BAD;
1134
1112 return MODE_OK; 1135 return MODE_OK;
1113} 1136}
1114 1137
@@ -1186,7 +1209,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
1186 /* Create eDP connector */ 1209 /* Create eDP connector */
1187 drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs); 1210 drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
1188 ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs, 1211 ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
1189 DRM_MODE_CONNECTOR_eDP); 1212 tc->panel ? DRM_MODE_CONNECTOR_eDP :
1213 DRM_MODE_CONNECTOR_DisplayPort);
1190 if (ret) 1214 if (ret)
1191 return ret; 1215 return ret;
1192 1216
@@ -1195,6 +1219,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
1195 1219
1196 drm_display_info_set_bus_formats(&tc->connector.display_info, 1220 drm_display_info_set_bus_formats(&tc->connector.display_info,
1197 &bus_format, 1); 1221 &bus_format, 1);
1222 tc->connector.display_info.bus_flags =
1223 DRM_BUS_FLAG_DE_HIGH |
1224 DRM_BUS_FLAG_PIXDATA_NEGEDGE |
1225 DRM_BUS_FLAG_SYNC_NEGEDGE;
1198 drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); 1226 drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
1199 1227
1200 return 0; 1228 return 0;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c40889888a16..9a1f41adfc67 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1296,12 +1296,11 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
1296 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) 1296 (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
1297 return -EINVAL; 1297 return -EINVAL;
1298 1298
1299 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1300
1301 state = drm_atomic_state_alloc(dev); 1299 state = drm_atomic_state_alloc(dev);
1302 if (!state) 1300 if (!state)
1303 return -ENOMEM; 1301 return -ENOMEM;
1304 1302
1303 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1305 state->acquire_ctx = &ctx; 1304 state->acquire_ctx = &ctx;
1306 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); 1305 state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
1307 1306
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 2d6c491a0542..516e82d0ed50 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1273,6 +1273,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
1273 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1273 { OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
1274 /* LG LP140WF6-SPM1 eDP panel */ 1274 /* LG LP140WF6-SPM1 eDP panel */
1275 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) }, 1275 { OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
1276 /* Apple panels need some additional handling to support PSR */
1277 { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
1276}; 1278};
1277 1279
1278#undef OUI 1280#undef OUI
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d3af098b0922..d73703a695e8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1621,6 +1621,64 @@ static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
1621 var_1->transp.msb_right == var_2->transp.msb_right; 1621 var_1->transp.msb_right == var_2->transp.msb_right;
1622} 1622}
1623 1623
1624static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
1625 u8 depth)
1626{
1627 switch (depth) {
1628 case 8:
1629 var->red.offset = 0;
1630 var->green.offset = 0;
1631 var->blue.offset = 0;
1632 var->red.length = 8; /* 8bit DAC */
1633 var->green.length = 8;
1634 var->blue.length = 8;
1635 var->transp.offset = 0;
1636 var->transp.length = 0;
1637 break;
1638 case 15:
1639 var->red.offset = 10;
1640 var->green.offset = 5;
1641 var->blue.offset = 0;
1642 var->red.length = 5;
1643 var->green.length = 5;
1644 var->blue.length = 5;
1645 var->transp.offset = 15;
1646 var->transp.length = 1;
1647 break;
1648 case 16:
1649 var->red.offset = 11;
1650 var->green.offset = 5;
1651 var->blue.offset = 0;
1652 var->red.length = 5;
1653 var->green.length = 6;
1654 var->blue.length = 5;
1655 var->transp.offset = 0;
1656 break;
1657 case 24:
1658 var->red.offset = 16;
1659 var->green.offset = 8;
1660 var->blue.offset = 0;
1661 var->red.length = 8;
1662 var->green.length = 8;
1663 var->blue.length = 8;
1664 var->transp.offset = 0;
1665 var->transp.length = 0;
1666 break;
1667 case 32:
1668 var->red.offset = 16;
1669 var->green.offset = 8;
1670 var->blue.offset = 0;
1671 var->red.length = 8;
1672 var->green.length = 8;
1673 var->blue.length = 8;
1674 var->transp.offset = 24;
1675 var->transp.length = 8;
1676 break;
1677 default:
1678 break;
1679 }
1680}
1681
1624/** 1682/**
1625 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var 1683 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
1626 * @var: screeninfo to check 1684 * @var: screeninfo to check
@@ -1632,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1632 struct drm_fb_helper *fb_helper = info->par; 1690 struct drm_fb_helper *fb_helper = info->par;
1633 struct drm_framebuffer *fb = fb_helper->fb; 1691 struct drm_framebuffer *fb = fb_helper->fb;
1634 1692
1635 if (var->pixclock != 0 || in_dbg_master()) 1693 if (in_dbg_master())
1636 return -EINVAL; 1694 return -EINVAL;
1637 1695
1696 if (var->pixclock != 0) {
1697 DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
1698 var->pixclock = 0;
1699 }
1700
1638 if ((drm_format_info_block_width(fb->format, 0) > 1) || 1701 if ((drm_format_info_block_width(fb->format, 0) > 1) ||
1639 (drm_format_info_block_height(fb->format, 0) > 1)) 1702 (drm_format_info_block_height(fb->format, 0) > 1))
1640 return -EINVAL; 1703 return -EINVAL;
@@ -1655,6 +1718,20 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
1655 } 1718 }
1656 1719
1657 /* 1720 /*
1721 * Workaround for SDL 1.2, which is known to be setting all pixel format
1722 * fields values to zero in some cases. We treat this situation as a
1723 * kind of "use some reasonable autodetected values".
1724 */
1725 if (!var->red.offset && !var->green.offset &&
1726 !var->blue.offset && !var->transp.offset &&
1727 !var->red.length && !var->green.length &&
1728 !var->blue.length && !var->transp.length &&
1729 !var->red.msb_right && !var->green.msb_right &&
1730 !var->blue.msb_right && !var->transp.msb_right) {
1731 drm_fb_helper_fill_pixel_fmt(var, fb->format->depth);
1732 }
1733
1734 /*
1658 * drm fbdev emulation doesn't support changing the pixel format at all, 1735 * drm fbdev emulation doesn't support changing the pixel format at all,
1659 * so reject all pixel format changing requests. 1736 * so reject all pixel format changing requests.
1660 */ 1737 */
@@ -1967,59 +2044,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
1967 info->var.yoffset = 0; 2044 info->var.yoffset = 0;
1968 info->var.activate = FB_ACTIVATE_NOW; 2045 info->var.activate = FB_ACTIVATE_NOW;
1969 2046
1970 switch (fb->format->depth) { 2047 drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth);
1971 case 8:
1972 info->var.red.offset = 0;
1973 info->var.green.offset = 0;
1974 info->var.blue.offset = 0;
1975 info->var.red.length = 8; /* 8bit DAC */
1976 info->var.green.length = 8;
1977 info->var.blue.length = 8;
1978 info->var.transp.offset = 0;
1979 info->var.transp.length = 0;
1980 break;
1981 case 15:
1982 info->var.red.offset = 10;
1983 info->var.green.offset = 5;
1984 info->var.blue.offset = 0;
1985 info->var.red.length = 5;
1986 info->var.green.length = 5;
1987 info->var.blue.length = 5;
1988 info->var.transp.offset = 15;
1989 info->var.transp.length = 1;
1990 break;
1991 case 16:
1992 info->var.red.offset = 11;
1993 info->var.green.offset = 5;
1994 info->var.blue.offset = 0;
1995 info->var.red.length = 5;
1996 info->var.green.length = 6;
1997 info->var.blue.length = 5;
1998 info->var.transp.offset = 0;
1999 break;
2000 case 24:
2001 info->var.red.offset = 16;
2002 info->var.green.offset = 8;
2003 info->var.blue.offset = 0;
2004 info->var.red.length = 8;
2005 info->var.green.length = 8;
2006 info->var.blue.length = 8;
2007 info->var.transp.offset = 0;
2008 info->var.transp.length = 0;
2009 break;
2010 case 32:
2011 info->var.red.offset = 16;
2012 info->var.green.offset = 8;
2013 info->var.blue.offset = 0;
2014 info->var.red.length = 8;
2015 info->var.green.length = 8;
2016 info->var.blue.length = 8;
2017 info->var.transp.offset = 24;
2018 info->var.transp.length = 8;
2019 break;
2020 default:
2021 break;
2022 }
2023 2048
2024 info->var.xres = fb_width; 2049 info->var.xres = fb_width;
2025 info->var.yres = fb_height; 2050 info->var.yres = fb_height;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 99cba8ea5d82..5df1256618cc 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
528 528
529 object_count = cl->object_count; 529 object_count = cl->object_count;
530 530
531 object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); 531 object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
532 array_size(object_count, sizeof(__u32)));
532 if (IS_ERR(object_ids)) 533 if (IS_ERR(object_ids))
533 return PTR_ERR(object_ids); 534 return PTR_ERR(object_ids);
534 535
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index cd9bc0ce9be0..004191d01772 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -459,11 +459,11 @@ static int set_property_atomic(struct drm_mode_object *obj,
459 struct drm_modeset_acquire_ctx ctx; 459 struct drm_modeset_acquire_ctx ctx;
460 int ret; 460 int ret;
461 461
462 drm_modeset_acquire_init(&ctx, 0);
463
464 state = drm_atomic_state_alloc(dev); 462 state = drm_atomic_state_alloc(dev);
465 if (!state) 463 if (!state)
466 return -ENOMEM; 464 return -ENOMEM;
465
466 drm_modeset_acquire_init(&ctx, 0);
467 state->acquire_ctx = &ctx; 467 state->acquire_ctx = &ctx;
468retry: 468retry:
469 if (prop == state->dev->mode_config.dpms_property) { 469 if (prop == state->dev->mode_config.dpms_property) {
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 24a750436559..f91e02c87fd8 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
758 if (mode->hsync) 758 if (mode->hsync)
759 return mode->hsync; 759 return mode->hsync;
760 760
761 if (mode->htotal < 0) 761 if (mode->htotal <= 0)
762 return 0; 762 return 0;
763 763
764 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ 764 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a9d9df6c85ad..693748ad8b88 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -61,8 +61,9 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
61 return NULL; 61 return NULL;
62 62
63 dmah->size = size; 63 dmah->size = size;
64 dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, 64 dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
65 GFP_KERNEL | __GFP_COMP); 65 &dmah->busaddr,
66 GFP_KERNEL | __GFP_COMP);
66 67
67 if (dmah->vaddr == NULL) { 68 if (dmah->vaddr == NULL) {
68 kfree(dmah); 69 kfree(dmah);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index b5475c91e2ef..e9f343b124b0 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
2799 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2799 MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2800 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2800 MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2801 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2801 MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2802 MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
2802 return 0; 2803 return 0;
2803} 2804}
2804 2805
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 5af11cf1b482..e1675a00df12 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -41,7 +41,7 @@ struct intel_gvt_mpt {
41 int (*host_init)(struct device *dev, void *gvt, const void *ops); 41 int (*host_init)(struct device *dev, void *gvt, const void *ops);
42 void (*host_exit)(struct device *dev, void *gvt); 42 void (*host_exit)(struct device *dev, void *gvt);
43 int (*attach_vgpu)(void *vgpu, unsigned long *handle); 43 int (*attach_vgpu)(void *vgpu, unsigned long *handle);
44 void (*detach_vgpu)(unsigned long handle); 44 void (*detach_vgpu)(void *vgpu);
45 int (*inject_msi)(unsigned long handle, u32 addr, u16 data); 45 int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
46 unsigned long (*from_virt_to_mfn)(void *p); 46 unsigned long (*from_virt_to_mfn)(void *p);
47 int (*enable_page_track)(unsigned long handle, u64 gfn); 47 int (*enable_page_track)(unsigned long handle, u64 gfn);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c1072143da1d..dd3dfd00f4e6 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
996{ 996{
997 unsigned int index; 997 unsigned int index;
998 u64 virtaddr; 998 u64 virtaddr;
999 unsigned long req_size, pgoff = 0; 999 unsigned long req_size, pgoff, req_start;
1000 pgprot_t pg_prot; 1000 pgprot_t pg_prot;
1001 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); 1001 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1002 1002
@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1014 pg_prot = vma->vm_page_prot; 1014 pg_prot = vma->vm_page_prot;
1015 virtaddr = vma->vm_start; 1015 virtaddr = vma->vm_start;
1016 req_size = vma->vm_end - vma->vm_start; 1016 req_size = vma->vm_end - vma->vm_start;
1017 pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; 1017 pgoff = vma->vm_pgoff &
1018 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1019 req_start = pgoff << PAGE_SHIFT;
1020
1021 if (!intel_vgpu_in_aperture(vgpu, req_start))
1022 return -EINVAL;
1023 if (req_start + req_size >
1024 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1025 return -EINVAL;
1026
1027 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1018 1028
1019 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); 1029 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1020} 1030}
@@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1662 return 0; 1672 return 0;
1663} 1673}
1664 1674
1665static void kvmgt_detach_vgpu(unsigned long handle) 1675static void kvmgt_detach_vgpu(void *p_vgpu)
1666{ 1676{
1667 /* nothing to do here */ 1677 int i;
1678 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1679
1680 if (!vgpu->vdev.region)
1681 return;
1682
1683 for (i = 0; i < vgpu->vdev.num_regions; i++)
1684 if (vgpu->vdev.region[i].ops->release)
1685 vgpu->vdev.region[i].ops->release(vgpu,
1686 &vgpu->vdev.region[i]);
1687 vgpu->vdev.num_regions = 0;
1688 kfree(vgpu->vdev.region);
1689 vgpu->vdev.region = NULL;
1668} 1690}
1669 1691
1670static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) 1692static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 67f19992b226..3ed34123d8d1 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
101 if (!intel_gvt_host.mpt->detach_vgpu) 101 if (!intel_gvt_host.mpt->detach_vgpu)
102 return; 102 return;
103 103
104 intel_gvt_host.mpt->detach_vgpu(vgpu->handle); 104 intel_gvt_host.mpt->detach_vgpu(vgpu);
105} 105}
106 106
107#define MSI_CAP_CONTROL(offset) (offset + 2) 107#define MSI_CAP_CONTROL(offset) (offset + 2)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1ad8c5e1455d..55bb7885e228 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
332 332
333 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); 333 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
334 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 334 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
335
336 wa_ctx->indirect_ctx.obj = NULL;
337 wa_ctx->indirect_ctx.shadow_va = NULL;
335} 338}
336 339
337static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, 340static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
@@ -356,6 +359,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
356 return 0; 359 return 0;
357} 360}
358 361
362static int
363intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
364{
365 struct intel_vgpu *vgpu = workload->vgpu;
366 struct intel_vgpu_submission *s = &vgpu->submission;
367 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
368 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
369 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
370 struct i915_request *rq;
371 int ret = 0;
372
373 lockdep_assert_held(&dev_priv->drm.struct_mutex);
374
375 if (workload->req)
376 goto out;
377
378 rq = i915_request_alloc(engine, shadow_ctx);
379 if (IS_ERR(rq)) {
380 gvt_vgpu_err("fail to allocate gem request\n");
381 ret = PTR_ERR(rq);
382 goto out;
383 }
384 workload->req = i915_request_get(rq);
385out:
386 return ret;
387}
388
359/** 389/**
360 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and 390 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
361 * shadow it as well, include ringbuffer,wa_ctx and ctx. 391 * shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -372,12 +402,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
372 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 402 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
373 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; 403 struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
374 struct intel_context *ce; 404 struct intel_context *ce;
375 struct i915_request *rq;
376 int ret; 405 int ret;
377 406
378 lockdep_assert_held(&dev_priv->drm.struct_mutex); 407 lockdep_assert_held(&dev_priv->drm.struct_mutex);
379 408
380 if (workload->req) 409 if (workload->shadow)
381 return 0; 410 return 0;
382 411
383 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); 412 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
@@ -417,22 +446,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
417 goto err_shadow; 446 goto err_shadow;
418 } 447 }
419 448
420 rq = i915_request_alloc(engine, shadow_ctx); 449 workload->shadow = true;
421 if (IS_ERR(rq)) {
422 gvt_vgpu_err("fail to allocate gem request\n");
423 ret = PTR_ERR(rq);
424 goto err_shadow;
425 }
426 workload->req = i915_request_get(rq);
427
428 ret = populate_shadow_context(workload);
429 if (ret)
430 goto err_req;
431
432 return 0; 450 return 0;
433err_req:
434 rq = fetch_and_zero(&workload->req);
435 i915_request_put(rq);
436err_shadow: 451err_shadow:
437 release_shadow_wa_ctx(&workload->wa_ctx); 452 release_shadow_wa_ctx(&workload->wa_ctx);
438err_unpin: 453err_unpin:
@@ -671,23 +686,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
671 mutex_lock(&vgpu->vgpu_lock); 686 mutex_lock(&vgpu->vgpu_lock);
672 mutex_lock(&dev_priv->drm.struct_mutex); 687 mutex_lock(&dev_priv->drm.struct_mutex);
673 688
689 ret = intel_gvt_workload_req_alloc(workload);
690 if (ret)
691 goto err_req;
692
674 ret = intel_gvt_scan_and_shadow_workload(workload); 693 ret = intel_gvt_scan_and_shadow_workload(workload);
675 if (ret) 694 if (ret)
676 goto out; 695 goto out;
677 696
678 ret = prepare_workload(workload); 697 ret = populate_shadow_context(workload);
698 if (ret) {
699 release_shadow_wa_ctx(&workload->wa_ctx);
700 goto out;
701 }
679 702
703 ret = prepare_workload(workload);
680out: 704out:
681 if (ret)
682 workload->status = ret;
683
684 if (!IS_ERR_OR_NULL(workload->req)) { 705 if (!IS_ERR_OR_NULL(workload->req)) {
685 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 706 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
686 ring_id, workload->req); 707 ring_id, workload->req);
687 i915_request_add(workload->req); 708 i915_request_add(workload->req);
688 workload->dispatched = true; 709 workload->dispatched = true;
689 } 710 }
690 711err_req:
712 if (ret)
713 workload->status = ret;
691 mutex_unlock(&dev_priv->drm.struct_mutex); 714 mutex_unlock(&dev_priv->drm.struct_mutex);
692 mutex_unlock(&vgpu->vgpu_lock); 715 mutex_unlock(&vgpu->vgpu_lock);
693 return ret; 716 return ret;
@@ -891,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
891 914
892 list_del_init(&workload->list); 915 list_del_init(&workload->list);
893 916
894 if (!workload->status) {
895 release_shadow_batch_buffer(workload);
896 release_shadow_wa_ctx(&workload->wa_ctx);
897 }
898
899 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { 917 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
900 /* if workload->status is not successful means HW GPU 918 /* if workload->status is not successful means HW GPU
901 * has occurred GPU hang or something wrong with i915/GVT, 919 * has occurred GPU hang or something wrong with i915/GVT,
@@ -1263,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1263{ 1281{
1264 struct intel_vgpu_submission *s = &workload->vgpu->submission; 1282 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1265 1283
1284 release_shadow_batch_buffer(workload);
1285 release_shadow_wa_ctx(&workload->wa_ctx);
1286
1266 if (workload->shadow_mm) 1287 if (workload->shadow_mm)
1267 intel_vgpu_mm_put(workload->shadow_mm); 1288 intel_vgpu_mm_put(workload->shadow_mm);
1268 1289
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index ca5529d0e48e..2065cba59aab 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,6 +83,7 @@ struct intel_vgpu_workload {
83 struct i915_request *req; 83 struct i915_request *req;
84 /* if this workload has been dispatched to i915? */ 84 /* if this workload has been dispatched to i915? */
85 bool dispatched; 85 bool dispatched;
86 bool shadow; /* if workload has done shadow of guest request */
86 int status; 87 int status;
87 88
88 struct intel_vgpu_mm *shadow_mm; 89 struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 38dcee1ca062..40a61ef9aac1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
984 intel_runtime_pm_get(i915); 984 intel_runtime_pm_get(i915);
985 gpu = i915_capture_gpu_state(i915); 985 gpu = i915_capture_gpu_state(i915);
986 intel_runtime_pm_put(i915); 986 intel_runtime_pm_put(i915);
987 if (!gpu) 987 if (IS_ERR(gpu))
988 return -ENOMEM; 988 return PTR_ERR(gpu);
989 989
990 file->private_data = gpu; 990 file->private_data = gpu;
991 return 0; 991 return 0;
@@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp,
1018 1018
1019static int i915_error_state_open(struct inode *inode, struct file *file) 1019static int i915_error_state_open(struct inode *inode, struct file *file)
1020{ 1020{
1021 file->private_data = i915_first_error_state(inode->i_private); 1021 struct i915_gpu_state *error;
1022
1023 error = i915_first_error_state(inode->i_private);
1024 if (IS_ERR(error))
1025 return PTR_ERR(error);
1026
1027 file->private_data = error;
1022 return 0; 1028 return 0;
1023} 1029}
1024 1030
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 216f52b744a6..c882ea94172c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1824 return 0; 1824 return 0;
1825} 1825}
1826 1826
1827static inline bool
1828__vma_matches(struct vm_area_struct *vma, struct file *filp,
1829 unsigned long addr, unsigned long size)
1830{
1831 if (vma->vm_file != filp)
1832 return false;
1833
1834 return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
1835}
1836
1827/** 1837/**
1828 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address 1838 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1829 * it is mapped to. 1839 * it is mapped to.
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1882 return -EINTR; 1892 return -EINTR;
1883 } 1893 }
1884 vma = find_vma(mm, addr); 1894 vma = find_vma(mm, addr);
1885 if (vma) 1895 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1886 vma->vm_page_prot = 1896 vma->vm_page_prot =
1887 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1897 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1888 else 1898 else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index add1fe7aeb93..bd17dd1f5da5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
2075int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) 2075int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2076{ 2076{
2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); 2077 struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
2078 int err;
2078 2079
2079 /* 2080 /*
2080 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt 2081 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
2090 * allocator works in address space sizes, so it's multiplied by page 2091 * allocator works in address space sizes, so it's multiplied by page
2091 * size. We allocate at the top of the GTT to avoid fragmentation. 2092 * size. We allocate at the top of the GTT to avoid fragmentation.
2092 */ 2093 */
2093 return i915_vma_pin(ppgtt->vma, 2094 err = i915_vma_pin(ppgtt->vma,
2094 0, GEN6_PD_ALIGN, 2095 0, GEN6_PD_ALIGN,
2095 PIN_GLOBAL | PIN_HIGH); 2096 PIN_GLOBAL | PIN_HIGH);
2097 if (err)
2098 goto unpin;
2099
2100 return 0;
2101
2102unpin:
2103 ppgtt->pin_count = 0;
2104 return err;
2096} 2105}
2097 2106
2098void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) 2107void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 07465123c166..3f9ce403c755 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
1907{ 1907{
1908 struct i915_gpu_state *error; 1908 struct i915_gpu_state *error;
1909 1909
1910 /* Check if GPU capture has been disabled */
1911 error = READ_ONCE(i915->gpu_error.first_error);
1912 if (IS_ERR(error))
1913 return error;
1914
1910 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1915 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1911 if (!error) 1916 if (!error) {
1912 return NULL; 1917 i915_disable_error_state(i915, -ENOMEM);
1918 return ERR_PTR(-ENOMEM);
1919 }
1913 1920
1914 kref_init(&error->ref); 1921 kref_init(&error->ref);
1915 error->i915 = i915; 1922 error->i915 = i915;
@@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
1945 return; 1952 return;
1946 1953
1947 error = i915_capture_gpu_state(i915); 1954 error = i915_capture_gpu_state(i915);
1948 if (!error) { 1955 if (IS_ERR(error))
1949 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1950 i915_disable_error_state(i915, -ENOMEM);
1951 return; 1956 return;
1952 }
1953 1957
1954 i915_error_capture_msg(i915, error, engine_mask, error_msg); 1958 i915_error_capture_msg(i915, error, engine_mask, error_msg);
1955 DRM_INFO("%s\n", error->error_msg); 1959 DRM_INFO("%s\n", error->error_msg);
@@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915)
1987 1991
1988 spin_lock_irq(&i915->gpu_error.lock); 1992 spin_lock_irq(&i915->gpu_error.lock);
1989 error = i915->gpu_error.first_error; 1993 error = i915->gpu_error.first_error;
1990 if (error) 1994 if (!IS_ERR_OR_NULL(error))
1991 i915_gpu_state_get(error); 1995 i915_gpu_state_get(error);
1992 spin_unlock_irq(&i915->gpu_error.lock); 1996 spin_unlock_irq(&i915->gpu_error.lock);
1993 1997
@@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
2000 2004
2001 spin_lock_irq(&i915->gpu_error.lock); 2005 spin_lock_irq(&i915->gpu_error.lock);
2002 error = i915->gpu_error.first_error; 2006 error = i915->gpu_error.first_error;
2003 i915->gpu_error.first_error = NULL; 2007 if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
2008 i915->gpu_error.first_error = NULL;
2004 spin_unlock_irq(&i915->gpu_error.lock); 2009 spin_unlock_irq(&i915->gpu_error.lock);
2005 2010
2006 if (!IS_ERR(error)) 2011 if (!IS_ERR_OR_NULL(error))
2007 i915_gpu_state_put(error); 2012 i915_gpu_state_put(error);
2008} 2013}
2009 2014
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d6c8f8fdfda5..017fc602a10e 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event)
594 * Update the bitmask of enabled events and increment 594 * Update the bitmask of enabled events and increment
595 * the event reference counter. 595 * the event reference counter.
596 */ 596 */
597 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 597 BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS);
598 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
598 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); 599 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
599 i915->pmu.enable |= BIT_ULL(bit); 600 i915->pmu.enable |= BIT_ULL(bit);
600 i915->pmu.enable_count[bit]++; 601 i915->pmu.enable_count[bit]++;
@@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event)
615 engine = intel_engine_lookup_user(i915, 616 engine = intel_engine_lookup_user(i915,
616 engine_event_class(event), 617 engine_event_class(event),
617 engine_event_instance(event)); 618 engine_event_instance(event));
618 GEM_BUG_ON(!engine);
619 engine->pmu.enable |= BIT(sample);
620 619
621 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 620 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
621 I915_ENGINE_SAMPLE_COUNT);
622 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
623 I915_ENGINE_SAMPLE_COUNT);
624 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
625 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
622 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 626 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
627
628 engine->pmu.enable |= BIT(sample);
623 engine->pmu.enable_count[sample]++; 629 engine->pmu.enable_count[sample]++;
624 } 630 }
625 631
@@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event)
649 engine = intel_engine_lookup_user(i915, 655 engine = intel_engine_lookup_user(i915,
650 engine_event_class(event), 656 engine_event_class(event),
651 engine_event_instance(event)); 657 engine_event_instance(event));
652 GEM_BUG_ON(!engine); 658
653 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); 659 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
660 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
654 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 661 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
662
655 /* 663 /*
656 * Decrement the reference count and clear the enabled 664 * Decrement the reference count and clear the enabled
657 * bitmask when the last listener on an event goes away. 665 * bitmask when the last listener on an event goes away.
@@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event)
660 engine->pmu.enable &= ~BIT(sample); 668 engine->pmu.enable &= ~BIT(sample);
661 } 669 }
662 670
663 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); 671 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count));
664 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); 672 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
665 /* 673 /*
666 * Decrement the reference count and clear the enabled 674 * Decrement the reference count and clear the enabled
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 7f164ca3db12..b3728c5f13e7 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -31,6 +31,8 @@ enum {
31 ((1 << I915_PMU_SAMPLE_BITS) + \ 31 ((1 << I915_PMU_SAMPLE_BITS) + \
32 (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) 32 (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
33 33
34#define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1)
35
34struct i915_pmu_sample { 36struct i915_pmu_sample {
35 u64 cur; 37 u64 cur;
36}; 38};
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0a7d60509ca7..067054cf4a86 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1790,7 +1790,7 @@ enum i915_power_well_id {
1790#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 1790#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40
1791#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 1791#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40
1792#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 1792#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840
1793#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ 1793#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \
1794 _CNL_PORT_TX_AE_GRP_OFFSET, \ 1794 _CNL_PORT_TX_AE_GRP_OFFSET, \
1795 _CNL_PORT_TX_B_GRP_OFFSET, \ 1795 _CNL_PORT_TX_B_GRP_OFFSET, \
1796 _CNL_PORT_TX_B_GRP_OFFSET, \ 1796 _CNL_PORT_TX_B_GRP_OFFSET, \
@@ -1798,7 +1798,7 @@ enum i915_power_well_id {
1798 _CNL_PORT_TX_AE_GRP_OFFSET, \ 1798 _CNL_PORT_TX_AE_GRP_OFFSET, \
1799 _CNL_PORT_TX_F_GRP_OFFSET) + \ 1799 _CNL_PORT_TX_F_GRP_OFFSET) + \
1800 4 * (dw)) 1800 4 * (dw))
1801#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ 1801#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \
1802 _CNL_PORT_TX_AE_LN0_OFFSET, \ 1802 _CNL_PORT_TX_AE_LN0_OFFSET, \
1803 _CNL_PORT_TX_B_LN0_OFFSET, \ 1803 _CNL_PORT_TX_B_LN0_OFFSET, \
1804 _CNL_PORT_TX_B_LN0_OFFSET, \ 1804 _CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1834,9 +1834,9 @@ enum i915_power_well_id {
1834 1834
1835#define _CNL_PORT_TX_DW4_LN0_AE 0x162450 1835#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
1836#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 1836#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
1837#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) 1837#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
1838#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) 1838#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
1839#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ 1839#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
1840 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ 1840 ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
1841 _CNL_PORT_TX_DW4_LN0_AE))) 1841 _CNL_PORT_TX_DW4_LN0_AE)))
1842#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) 1842#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
@@ -1864,8 +1864,12 @@ enum i915_power_well_id {
1864#define RTERM_SELECT(x) ((x) << 3) 1864#define RTERM_SELECT(x) ((x) << 3)
1865#define RTERM_SELECT_MASK (0x7 << 3) 1865#define RTERM_SELECT_MASK (0x7 << 3)
1866 1866
1867#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) 1867#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
1868#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) 1868#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
1869#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
1870#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
1871#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
1872#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
1869#define N_SCALAR(x) ((x) << 24) 1873#define N_SCALAR(x) ((x) << 24)
1870#define N_SCALAR_MASK (0x7F << 24) 1874#define N_SCALAR_MASK (0x7F << 24)
1871 1875
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 535caebd9813..c0cfe7ae2ba5 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
521 ssize_t ret; 521 ssize_t ret;
522 522
523 gpu = i915_first_error_state(i915); 523 gpu = i915_first_error_state(i915);
524 if (gpu) { 524 if (IS_ERR(gpu)) {
525 ret = PTR_ERR(gpu);
526 } else if (gpu) {
525 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); 527 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
526 i915_gpu_state_put(gpu); 528 i915_gpu_state_put(gpu);
527 } else { 529 } else {
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f3e1d6a0b7dd..7edce1b7b348 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
494 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ 494 { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
495}; 495};
496 496
497struct icl_combo_phy_ddi_buf_trans { 497/* icl_combo_phy_ddi_translations */
498 u32 dw2_swing_select; 498static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
499 u32 dw2_swing_scalar; 499 /* NT mV Trans mV db */
500 u32 dw4_scaling; 500 { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
501}; 501 { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
502 502 { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
503/* Voltage Swing Programming for VccIO 0.85V for DP */ 503 { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
504static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { 504 { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
505 /* Voltage mV db */ 505 { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
506 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ 506 { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
507 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ 507 { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
508 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ 508 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
509 { 0x2, 0x98, 0x900F }, /* 400 9.5 */ 509 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
510 { 0xB, 0x70, 0x0018 }, /* 600 0.0 */
511 { 0xB, 0x70, 0x3015 }, /* 600 3.5 */
512 { 0xB, 0x70, 0x6012 }, /* 600 6.0 */
513 { 0x5, 0x00, 0x0018 }, /* 800 0.0 */
514 { 0x5, 0x00, 0x3015 }, /* 800 3.5 */
515 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
516};
517
518/* FIXME - After table is updated in Bspec */
519/* Voltage Swing Programming for VccIO 0.85V for eDP */
520static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
521 /* Voltage mV db */
522 { 0x0, 0x00, 0x00 }, /* 200 0.0 */
523 { 0x0, 0x00, 0x00 }, /* 200 1.5 */
524 { 0x0, 0x00, 0x00 }, /* 200 4.0 */
525 { 0x0, 0x00, 0x00 }, /* 200 6.0 */
526 { 0x0, 0x00, 0x00 }, /* 250 0.0 */
527 { 0x0, 0x00, 0x00 }, /* 250 1.5 */
528 { 0x0, 0x00, 0x00 }, /* 250 4.0 */
529 { 0x0, 0x00, 0x00 }, /* 300 0.0 */
530 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
531 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
532};
533
534/* Voltage Swing Programming for VccIO 0.95V for DP */
535static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
536 /* Voltage mV db */
537 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
538 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
539 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
540 { 0x2, 0x98, 0x900F }, /* 400 9.5 */
541 { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
542 { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
543 { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
544 { 0x5, 0x76, 0x0018 }, /* 800 0.0 */
545 { 0x5, 0x76, 0x3015 }, /* 800 3.5 */
546 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
547}; 510};
548 511
549/* FIXME - After table is updated in Bspec */ 512static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
550/* Voltage Swing Programming for VccIO 0.95V for eDP */ 513 /* NT mV Trans mV db */
551static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { 514 { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
552 /* Voltage mV db */ 515 { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
553 { 0x0, 0x00, 0x00 }, /* 200 0.0 */ 516 { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
554 { 0x0, 0x00, 0x00 }, /* 200 1.5 */ 517 { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
555 { 0x0, 0x00, 0x00 }, /* 200 4.0 */ 518 { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
556 { 0x0, 0x00, 0x00 }, /* 200 6.0 */ 519 { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
557 { 0x0, 0x00, 0x00 }, /* 250 0.0 */ 520 { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
558 { 0x0, 0x00, 0x00 }, /* 250 1.5 */ 521 { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
559 { 0x0, 0x00, 0x00 }, /* 250 4.0 */ 522 { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
560 { 0x0, 0x00, 0x00 }, /* 300 0.0 */ 523 { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
561 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
562 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
563}; 524};
564 525
565/* Voltage Swing Programming for VccIO 1.05V for DP */ 526static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
566static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { 527 /* NT mV Trans mV db */
567 /* Voltage mV db */ 528 { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
568 { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ 529 { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
569 { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ 530 { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
570 { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ 531 { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
571 { 0x2, 0x98, 0x900F }, /* 400 9.5 */ 532 { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
572 { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ 533 { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
573 { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ 534 { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
574 { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ 535 { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
575 { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ 536 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
576 { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ 537 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
577 { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
578}; 538};
579 539
580/* FIXME - After table is updated in Bspec */ 540static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
581/* Voltage Swing Programming for VccIO 1.05V for eDP */ 541 /* NT mV Trans mV db */
582static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { 542 { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
583 /* Voltage mV db */ 543 { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
584 { 0x0, 0x00, 0x00 }, /* 200 0.0 */ 544 { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
585 { 0x0, 0x00, 0x00 }, /* 200 1.5 */ 545 { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
586 { 0x0, 0x00, 0x00 }, /* 200 4.0 */ 546 { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
587 { 0x0, 0x00, 0x00 }, /* 200 6.0 */ 547 { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
588 { 0x0, 0x00, 0x00 }, /* 250 0.0 */ 548 { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
589 { 0x0, 0x00, 0x00 }, /* 250 1.5 */
590 { 0x0, 0x00, 0x00 }, /* 250 4.0 */
591 { 0x0, 0x00, 0x00 }, /* 300 0.0 */
592 { 0x0, 0x00, 0x00 }, /* 300 1.5 */
593 { 0x0, 0x00, 0x00 }, /* 350 0.0 */
594}; 549};
595 550
596struct icl_mg_phy_ddi_buf_trans { 551struct icl_mg_phy_ddi_buf_trans {
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
871 } 826 }
872} 827}
873 828
874static const struct icl_combo_phy_ddi_buf_trans * 829static const struct cnl_ddi_buf_trans *
875icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, 830icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
876 int type, int *n_entries) 831 int type, int rate, int *n_entries)
877{ 832{
878 u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; 833 if (type == INTEL_OUTPUT_HDMI) {
879 834 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
880 if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { 835 return icl_combo_phy_ddi_translations_hdmi;
881 switch (voltage) { 836 } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
882 case VOLTAGE_INFO_0_85V: 837 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
883 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); 838 return icl_combo_phy_ddi_translations_edp_hbr3;
884 return icl_combo_phy_ddi_translations_edp_0_85V; 839 } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
885 case VOLTAGE_INFO_0_95V: 840 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
886 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); 841 return icl_combo_phy_ddi_translations_edp_hbr2;
887 return icl_combo_phy_ddi_translations_edp_0_95V;
888 case VOLTAGE_INFO_1_05V:
889 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
890 return icl_combo_phy_ddi_translations_edp_1_05V;
891 default:
892 MISSING_CASE(voltage);
893 return NULL;
894 }
895 } else {
896 switch (voltage) {
897 case VOLTAGE_INFO_0_85V:
898 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
899 return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
900 case VOLTAGE_INFO_0_95V:
901 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
902 return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
903 case VOLTAGE_INFO_1_05V:
904 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
905 return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
906 default:
907 MISSING_CASE(voltage);
908 return NULL;
909 }
910 } 842 }
843
844 *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
845 return icl_combo_phy_ddi_translations_dp_hbr2;
911} 846}
912 847
913static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) 848static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
918 853
919 if (IS_ICELAKE(dev_priv)) { 854 if (IS_ICELAKE(dev_priv)) {
920 if (intel_port_is_combophy(dev_priv, port)) 855 if (intel_port_is_combophy(dev_priv, port))
921 icl_get_combo_buf_trans(dev_priv, port, 856 icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
922 INTEL_OUTPUT_HDMI, &n_entries); 857 0, &n_entries);
923 else 858 else
924 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); 859 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
925 default_entry = n_entries - 1; 860 default_entry = n_entries - 1;
@@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
1086 return DDI_CLK_SEL_TBT_810; 1021 return DDI_CLK_SEL_TBT_810;
1087 default: 1022 default:
1088 MISSING_CASE(clock); 1023 MISSING_CASE(clock);
1089 break; 1024 return DDI_CLK_SEL_NONE;
1090 } 1025 }
1091 case DPLL_ID_ICL_MGPLL1: 1026 case DPLL_ID_ICL_MGPLL1:
1092 case DPLL_ID_ICL_MGPLL2: 1027 case DPLL_ID_ICL_MGPLL2:
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
2275u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) 2210u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
2276{ 2211{
2277 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2212 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2213 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2278 enum port port = encoder->port; 2214 enum port port = encoder->port;
2279 int n_entries; 2215 int n_entries;
2280 2216
2281 if (IS_ICELAKE(dev_priv)) { 2217 if (IS_ICELAKE(dev_priv)) {
2282 if (intel_port_is_combophy(dev_priv, port)) 2218 if (intel_port_is_combophy(dev_priv, port))
2283 icl_get_combo_buf_trans(dev_priv, port, encoder->type, 2219 icl_get_combo_buf_trans(dev_priv, port, encoder->type,
2284 &n_entries); 2220 intel_dp->link_rate, &n_entries);
2285 else 2221 else
2286 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); 2222 n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
2287 } else if (IS_CANNONLAKE(dev_priv)) { 2223 } else if (IS_CANNONLAKE(dev_priv)) {
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
2462} 2398}
2463 2399
2464static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, 2400static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2465 u32 level, enum port port, int type) 2401 u32 level, enum port port, int type,
2402 int rate)
2466{ 2403{
2467 const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; 2404 const struct cnl_ddi_buf_trans *ddi_translations = NULL;
2468 u32 n_entries, val; 2405 u32 n_entries, val;
2469 int ln; 2406 int ln;
2470 2407
2471 ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, 2408 ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
2472 &n_entries); 2409 rate, &n_entries);
2473 if (!ddi_translations) 2410 if (!ddi_translations)
2474 return; 2411 return;
2475 2412
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2478 level = n_entries - 1; 2415 level = n_entries - 1;
2479 } 2416 }
2480 2417
2481 /* Set PORT_TX_DW5 Rterm Sel to 110b. */ 2418 /* Set PORT_TX_DW5 */
2482 val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); 2419 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
2483 val &= ~RTERM_SELECT_MASK; 2420 val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
2421 TAP2_DISABLE | TAP3_DISABLE);
2422 val |= SCALING_MODE_SEL(0x2);
2484 val |= RTERM_SELECT(0x6); 2423 val |= RTERM_SELECT(0x6);
2485 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2424 val |= TAP3_DISABLE;
2486
2487 /* Program PORT_TX_DW5 */
2488 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
2489 /* Set DisableTap2 and DisableTap3 if MIPI DSI
2490 * Clear DisableTap2 and DisableTap3 for all other Ports
2491 */
2492 if (type == INTEL_OUTPUT_DSI) {
2493 val |= TAP2_DISABLE;
2494 val |= TAP3_DISABLE;
2495 } else {
2496 val &= ~TAP2_DISABLE;
2497 val &= ~TAP3_DISABLE;
2498 }
2499 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2425 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
2500 2426
2501 /* Program PORT_TX_DW2 */ 2427 /* Program PORT_TX_DW2 */
2502 val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); 2428 val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
2503 val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | 2429 val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
2504 RCOMP_SCALAR_MASK); 2430 RCOMP_SCALAR_MASK);
2505 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); 2431 val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
2506 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); 2432 val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
2507 /* Program Rcomp scalar for every table entry */ 2433 /* Program Rcomp scalar for every table entry */
2508 val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); 2434 val |= RCOMP_SCALAR(0x98);
2509 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); 2435 I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
2510 2436
2511 /* Program PORT_TX_DW4 */ 2437 /* Program PORT_TX_DW4 */
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
2514 val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); 2440 val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
2515 val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | 2441 val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
2516 CURSOR_COEFF_MASK); 2442 CURSOR_COEFF_MASK);
2517 val |= ddi_translations[level].dw4_scaling; 2443 val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
2444 val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
2445 val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
2518 I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); 2446 I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
2519 } 2447 }
2448
2449 /* Program PORT_TX_DW7 */
2450 val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
2451 val &= ~N_SCALAR_MASK;
2452 val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
2453 I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
2520} 2454}
2521 2455
2522static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, 2456static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
2581 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); 2515 I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
2582 2516
2583 /* 5. Program swing and de-emphasis */ 2517 /* 5. Program swing and de-emphasis */
2584 icl_ddi_combo_vswing_program(dev_priv, level, port, type); 2518 icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
2585 2519
2586 /* 6. Set training enable to trigger update */ 2520 /* 6. Set training enable to trigger update */
2587 val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); 2521 val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3da9c0f9e948..248128126422 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
15415 } 15415 }
15416} 15416}
15417 15417
15418static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
15419{
15420 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
15421
15422 /*
15423 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
15424 * the hardware when a high res displays plugged in. DPLL P
15425 * divider is zero, and the pipe timings are bonkers. We'll
15426 * try to disable everything in that case.
15427 *
15428 * FIXME would be nice to be able to sanitize this state
15429 * without several WARNs, but for now let's take the easy
15430 * road.
15431 */
15432 return IS_GEN6(dev_priv) &&
15433 crtc_state->base.active &&
15434 crtc_state->shared_dpll &&
15435 crtc_state->port_clock == 0;
15436}
15437
15418static void intel_sanitize_encoder(struct intel_encoder *encoder) 15438static void intel_sanitize_encoder(struct intel_encoder *encoder)
15419{ 15439{
15420 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 15440 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
15421 struct intel_connector *connector; 15441 struct intel_connector *connector;
15442 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
15443 struct intel_crtc_state *crtc_state = crtc ?
15444 to_intel_crtc_state(crtc->base.state) : NULL;
15422 15445
15423 /* We need to check both for a crtc link (meaning that the 15446 /* We need to check both for a crtc link (meaning that the
15424 * encoder is active and trying to read from a pipe) and the 15447 * encoder is active and trying to read from a pipe) and the
15425 * pipe itself being active. */ 15448 * pipe itself being active. */
15426 bool has_active_crtc = encoder->base.crtc && 15449 bool has_active_crtc = crtc_state &&
15427 to_intel_crtc(encoder->base.crtc)->active; 15450 crtc_state->base.active;
15451
15452 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
15453 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
15454 pipe_name(crtc->pipe));
15455 has_active_crtc = false;
15456 }
15428 15457
15429 connector = intel_encoder_find_connector(encoder); 15458 connector = intel_encoder_find_connector(encoder);
15430 if (connector && !has_active_crtc) { 15459 if (connector && !has_active_crtc) {
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15435 /* Connector is active, but has no active pipe. This is 15464 /* Connector is active, but has no active pipe. This is
15436 * fallout from our resume register restoring. Disable 15465 * fallout from our resume register restoring. Disable
15437 * the encoder manually again. */ 15466 * the encoder manually again. */
15438 if (encoder->base.crtc) { 15467 if (crtc_state) {
15439 struct drm_crtc_state *crtc_state = encoder->base.crtc->state; 15468 struct drm_encoder *best_encoder;
15440 15469
15441 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15470 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15442 encoder->base.base.id, 15471 encoder->base.base.id,
15443 encoder->base.name); 15472 encoder->base.name);
15473
15474 /* avoid oopsing in case the hooks consult best_encoder */
15475 best_encoder = connector->base.state->best_encoder;
15476 connector->base.state->best_encoder = &encoder->base;
15477
15444 if (encoder->disable) 15478 if (encoder->disable)
15445 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15479 encoder->disable(encoder, crtc_state,
15480 connector->base.state);
15446 if (encoder->post_disable) 15481 if (encoder->post_disable)
15447 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15482 encoder->post_disable(encoder, crtc_state,
15483 connector->base.state);
15484
15485 connector->base.state->best_encoder = best_encoder;
15448 } 15486 }
15449 encoder->base.crtc = NULL; 15487 encoder->base.crtc = NULL;
15450 15488
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fdd2cbc56fa3..22a74608c6e4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
304static int icl_max_source_rate(struct intel_dp *intel_dp) 304static int icl_max_source_rate(struct intel_dp *intel_dp)
305{ 305{
306 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 306 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
307 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
307 enum port port = dig_port->base.port; 308 enum port port = dig_port->base.port;
308 309
309 if (port == PORT_B) 310 if (intel_port_is_combophy(dev_priv, port) &&
311 !intel_dp_is_edp(intel_dp))
310 return 540000; 312 return 540000;
311 313
312 return 810000; 314 return 810000;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f94a04b4ad87..e9ddeaf05a14 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,16 @@ struct intel_fbdev {
209 unsigned long vma_flags; 209 unsigned long vma_flags;
210 async_cookie_t cookie; 210 async_cookie_t cookie;
211 int preferred_bpp; 211 int preferred_bpp;
212
213 /* Whether or not fbdev hpd processing is temporarily suspended */
214 bool hpd_suspended : 1;
215 /* Set when a hotplug was received while HPD processing was
216 * suspended
217 */
218 bool hpd_waiting : 1;
219
220 /* Protects hpd_suspended */
221 struct mutex hpd_lock;
212}; 222};
213 223
214struct intel_encoder { 224struct intel_encoder {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb5bb5b32a60..7f365ac0b549 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -679,6 +679,7 @@ int intel_fbdev_init(struct drm_device *dev)
679 if (ifbdev == NULL) 679 if (ifbdev == NULL)
680 return -ENOMEM; 680 return -ENOMEM;
681 681
682 mutex_init(&ifbdev->hpd_lock);
682 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); 683 drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
683 684
684 if (!intel_fbdev_init_bios(dev, ifbdev)) 685 if (!intel_fbdev_init_bios(dev, ifbdev))
@@ -752,6 +753,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
752 intel_fbdev_destroy(ifbdev); 753 intel_fbdev_destroy(ifbdev);
753} 754}
754 755
756/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
757 * processing, fbdev will perform a full connector reprobe if a hotplug event
758 * was received while HPD was suspended.
759 */
760static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
761{
762 bool send_hpd = false;
763
764 mutex_lock(&ifbdev->hpd_lock);
765 ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
766 send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
767 ifbdev->hpd_waiting = false;
768 mutex_unlock(&ifbdev->hpd_lock);
769
770 if (send_hpd) {
771 DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
772 drm_fb_helper_hotplug_event(&ifbdev->helper);
773 }
774}
775
755void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) 776void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
756{ 777{
757 struct drm_i915_private *dev_priv = to_i915(dev); 778 struct drm_i915_private *dev_priv = to_i915(dev);
@@ -773,6 +794,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
773 */ 794 */
774 if (state != FBINFO_STATE_RUNNING) 795 if (state != FBINFO_STATE_RUNNING)
775 flush_work(&dev_priv->fbdev_suspend_work); 796 flush_work(&dev_priv->fbdev_suspend_work);
797
776 console_lock(); 798 console_lock();
777 } else { 799 } else {
778 /* 800 /*
@@ -800,17 +822,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
800 822
801 drm_fb_helper_set_suspend(&ifbdev->helper, state); 823 drm_fb_helper_set_suspend(&ifbdev->helper, state);
802 console_unlock(); 824 console_unlock();
825
826 intel_fbdev_hpd_set_suspend(ifbdev, state);
803} 827}
804 828
805void intel_fbdev_output_poll_changed(struct drm_device *dev) 829void intel_fbdev_output_poll_changed(struct drm_device *dev)
806{ 830{
807 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 831 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
832 bool send_hpd;
808 833
809 if (!ifbdev) 834 if (!ifbdev)
810 return; 835 return;
811 836
812 intel_fbdev_sync(ifbdev); 837 intel_fbdev_sync(ifbdev);
813 if (ifbdev->vma || ifbdev->helper.deferred_setup) 838
839 mutex_lock(&ifbdev->hpd_lock);
840 send_hpd = !ifbdev->hpd_suspended;
841 ifbdev->hpd_waiting = true;
842 mutex_unlock(&ifbdev->hpd_lock);
843
844 if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
814 drm_fb_helper_hotplug_event(&ifbdev->helper); 845 drm_fb_helper_hotplug_event(&ifbdev->helper);
815} 846}
816 847
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4be167dcd209..eab9341a5152 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
303 */ 303 */
304 if (!(prio & I915_PRIORITY_NEWCLIENT)) { 304 if (!(prio & I915_PRIORITY_NEWCLIENT)) {
305 prio |= I915_PRIORITY_NEWCLIENT; 305 prio |= I915_PRIORITY_NEWCLIENT;
306 active->sched.attr.priority = prio;
306 list_move_tail(&active->sched.link, 307 list_move_tail(&active->sched.link,
307 i915_sched_lookup_priolist(engine, prio)); 308 i915_sched_lookup_priolist(engine, prio));
308 } 309 }
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
645 int i; 646 int i;
646 647
647 priolist_for_each_request_consume(rq, rn, p, i) { 648 priolist_for_each_request_consume(rq, rn, p, i) {
649 GEM_BUG_ON(last &&
650 need_preempt(engine, last, rq_prio(rq)));
651
648 /* 652 /*
649 * Can we combine this request with the current port? 653 * Can we combine this request with the current port?
650 * It has to be the same context/ringbuffer and not 654 * It has to be the same context/ringbuffer and not
@@ -2244,6 +2248,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
2244 if (ret) 2248 if (ret)
2245 return ret; 2249 return ret;
2246 2250
2251 intel_engine_init_workarounds(engine);
2252
2247 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2253 if (HAS_LOGICAL_RING_ELSQ(i915)) {
2248 execlists->submit_reg = i915->regs + 2254 execlists->submit_reg = i915->regs +
2249 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); 2255 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
@@ -2310,7 +2316,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
2310 } 2316 }
2311 2317
2312 intel_engine_init_whitelist(engine); 2318 intel_engine_init_whitelist(engine);
2313 intel_engine_init_workarounds(engine);
2314 2319
2315 return 0; 2320 return 0;
2316} 2321}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b8f106d9ecf8..3ac20153705a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -55,7 +55,12 @@
55struct opregion_header { 55struct opregion_header {
56 u8 signature[16]; 56 u8 signature[16];
57 u32 size; 57 u32 size;
58 u32 opregion_ver; 58 struct {
59 u8 rsvd;
60 u8 revision;
61 u8 minor;
62 u8 major;
63 } __packed over;
59 u8 bios_ver[32]; 64 u8 bios_ver[32];
60 u8 vbios_ver[16]; 65 u8 vbios_ver[16];
61 u8 driver_ver[16]; 66 u8 driver_ver[16];
@@ -119,7 +124,8 @@ struct opregion_asle {
119 u64 fdss; 124 u64 fdss;
120 u32 fdsp; 125 u32 fdsp;
121 u32 stat; 126 u32 stat;
122 u64 rvda; /* Physical address of raw vbt data */ 127 u64 rvda; /* Physical (2.0) or relative from opregion (2.1+)
128 * address of raw VBT data. */
123 u32 rvds; /* Size of raw vbt data */ 129 u32 rvds; /* Size of raw vbt data */
124 u8 rsvd[58]; 130 u8 rsvd[58];
125} __packed; 131} __packed;
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
925 opregion->header = base; 931 opregion->header = base;
926 opregion->lid_state = base + ACPI_CLID; 932 opregion->lid_state = base + ACPI_CLID;
927 933
934 DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
935 opregion->header->over.major,
936 opregion->header->over.minor,
937 opregion->header->over.revision);
938
928 mboxes = opregion->header->mboxes; 939 mboxes = opregion->header->mboxes;
929 if (mboxes & MBOX_ACPI) { 940 if (mboxes & MBOX_ACPI) {
930 DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); 941 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
953 if (dmi_check_system(intel_no_opregion_vbt)) 964 if (dmi_check_system(intel_no_opregion_vbt))
954 goto out; 965 goto out;
955 966
956 if (opregion->header->opregion_ver >= 2 && opregion->asle && 967 if (opregion->header->over.major >= 2 && opregion->asle &&
957 opregion->asle->rvda && opregion->asle->rvds) { 968 opregion->asle->rvda && opregion->asle->rvds) {
958 opregion->rvda = memremap(opregion->asle->rvda, 969 resource_size_t rvda = opregion->asle->rvda;
959 opregion->asle->rvds, 970
971 /*
972 * opregion 2.0: rvda is the physical VBT address.
973 *
974 * opregion 2.1+: rvda is unsigned, relative offset from
975 * opregion base, and should never point within opregion.
976 */
977 if (opregion->header->over.major > 2 ||
978 opregion->header->over.minor >= 1) {
979 WARN_ON(rvda < OPREGION_SIZE);
980
981 rvda += asls;
982 }
983
984 opregion->rvda = memremap(rvda, opregion->asle->rvds,
960 MEMREMAP_WB); 985 MEMREMAP_WB);
986
961 vbt = opregion->rvda; 987 vbt = opregion->rvda;
962 vbt_size = opregion->asle->rvds; 988 vbt_size = opregion->asle->rvds;
963 if (intel_bios_is_valid_vbt(vbt, vbt_size)) { 989 if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
967 goto out; 993 goto out;
968 } else { 994 } else {
969 DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); 995 DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
996 memunmap(opregion->rvda);
997 opregion->rvda = NULL;
970 } 998 }
971 } 999 }
972 1000
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 419e56342523..f71970df9936 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", 274 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
275 intel_dp->psr_dpcd[0]); 275 intel_dp->psr_dpcd[0]);
276 276
277 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
278 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
279 return;
280 }
281
277 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { 282 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
278 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); 283 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
279 return; 284 return;
280 } 285 }
286
281 dev_priv->psr.sink_support = true; 287 dev_priv->psr.sink_support = true;
282 dev_priv->psr.sink_sync_latency = 288 dev_priv->psr.sink_sync_latency =
283 intel_dp_get_sink_sync_latency(intel_dp); 289 intel_dp_get_sink_sync_latency(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 72edaa7ff411..a1a7cc29fdd1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -415,16 +415,17 @@ struct intel_engine_cs {
415 /** 415 /**
416 * @enable_count: Reference count for the enabled samplers. 416 * @enable_count: Reference count for the enabled samplers.
417 * 417 *
418 * Index number corresponds to the bit number from @enable. 418 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
419 */ 419 */
420 unsigned int enable_count[I915_PMU_SAMPLE_BITS]; 420 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
421 /** 421 /**
422 * @sample: Counter values for sampling events. 422 * @sample: Counter values for sampling events.
423 * 423 *
424 * Our internal timer stores the current counters in this field. 424 * Our internal timer stores the current counters in this field.
425 *
426 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
425 */ 427 */
426#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) 428 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
427 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
428 } pmu; 429 } pmu;
429 430
430 /* 431 /*
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d2e003d8f3db..5170a0f5fe7b 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane,
494 494
495 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); 495 keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
496 496
497 keymsk = key->channel_mask & 0x3ffffff; 497 keymsk = key->channel_mask & 0x7ffffff;
498 if (alpha < 0xff) 498 if (alpha < 0xff)
499 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; 499 keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
500 500
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 2c5bbe317353..e31e263cf86b 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
643 int bus_format; 643 int bus_format;
644 644
645 ret = of_property_read_u32(child, "reg", &i); 645 ret = of_property_read_u32(child, "reg", &i);
646 if (ret || i < 0 || i > 1) 646 if (ret || i < 0 || i > 1) {
647 return -EINVAL; 647 ret = -EINVAL;
648 goto free_child;
649 }
648 650
649 if (!of_device_is_available(child)) 651 if (!of_device_is_available(child))
650 continue; 652 continue;
@@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
657 channel = &imx_ldb->channel[i]; 659 channel = &imx_ldb->channel[i];
658 channel->ldb = imx_ldb; 660 channel->ldb = imx_ldb;
659 channel->chno = i; 661 channel->chno = i;
660 channel->child = child;
661 662
662 /* 663 /*
663 * The output port is port@4 with an external 4-port mux or 664 * The output port is port@4 with an external 4-port mux or
@@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
667 imx_ldb->lvds_mux ? 4 : 2, 0, 668 imx_ldb->lvds_mux ? 4 : 2, 0,
668 &channel->panel, &channel->bridge); 669 &channel->panel, &channel->bridge);
669 if (ret && ret != -ENODEV) 670 if (ret && ret != -ENODEV)
670 return ret; 671 goto free_child;
671 672
672 /* panel ddc only if there is no bridge */ 673 /* panel ddc only if there is no bridge */
673 if (!channel->bridge) { 674 if (!channel->bridge) {
674 ret = imx_ldb_panel_ddc(dev, channel, child); 675 ret = imx_ldb_panel_ddc(dev, channel, child);
675 if (ret) 676 if (ret)
676 return ret; 677 goto free_child;
677 } 678 }
678 679
679 bus_format = of_get_bus_format(dev, child); 680 bus_format = of_get_bus_format(dev, child);
@@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
689 if (bus_format < 0) { 690 if (bus_format < 0) {
690 dev_err(dev, "could not determine data mapping: %d\n", 691 dev_err(dev, "could not determine data mapping: %d\n",
691 bus_format); 692 bus_format);
692 return bus_format; 693 ret = bus_format;
694 goto free_child;
693 } 695 }
694 channel->bus_format = bus_format; 696 channel->bus_format = bus_format;
697 channel->child = child;
695 698
696 ret = imx_ldb_register(drm, channel); 699 ret = imx_ldb_register(drm, channel);
697 if (ret) 700 if (ret) {
698 return ret; 701 channel->child = NULL;
702 goto free_child;
703 }
699 } 704 }
700 705
701 dev_set_drvdata(dev, imx_ldb); 706 dev_set_drvdata(dev, imx_ldb);
702 707
703 return 0; 708 return 0;
709
710free_child:
711 of_node_put(child);
712 return ret;
704} 713}
705 714
706static void imx_ldb_unbind(struct device *dev, struct device *master, 715static void imx_ldb_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index c390924de93d..21e964f6ab5c 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
370 if (ret) 370 if (ret)
371 return ret; 371 return ret;
372 372
373 /* CRTC should be enabled */ 373 /* nothing to check when disabling or disabled */
374 if (!crtc_state->enable) 374 if (!crtc_state->enable)
375 return -EINVAL; 375 return 0;
376 376
377 switch (plane->type) { 377 switch (plane->type) {
378 case DRM_PLANE_TYPE_PRIMARY: 378 case DRM_PLANE_TYPE_PRIMARY:
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 75d97f1b2e8f..4f5c67f70c4d 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -46,7 +46,6 @@ struct meson_crtc {
46 struct drm_crtc base; 46 struct drm_crtc base;
47 struct drm_pending_vblank_event *event; 47 struct drm_pending_vblank_event *event;
48 struct meson_drm *priv; 48 struct meson_drm *priv;
49 bool enabled;
50}; 49};
51#define to_meson_crtc(x) container_of(x, struct meson_crtc, base) 50#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
52 51
@@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
82 81
83}; 82};
84 83
85static void meson_crtc_enable(struct drm_crtc *crtc) 84static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
85 struct drm_crtc_state *old_state)
86{ 86{
87 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 87 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
88 struct drm_crtc_state *crtc_state = crtc->state; 88 struct drm_crtc_state *crtc_state = crtc->state;
@@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
108 108
109 drm_crtc_vblank_on(crtc); 109 drm_crtc_vblank_on(crtc);
110 110
111 meson_crtc->enabled = true;
112}
113
114static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
115 struct drm_crtc_state *old_state)
116{
117 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
118 struct meson_drm *priv = meson_crtc->priv;
119
120 DRM_DEBUG_DRIVER("\n");
121
122 if (!meson_crtc->enabled)
123 meson_crtc_enable(crtc);
124
125 priv->viu.osd1_enabled = true; 111 priv->viu.osd1_enabled = true;
126} 112}
127 113
@@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
153 139
154 crtc->state->event = NULL; 140 crtc->state->event = NULL;
155 } 141 }
156
157 meson_crtc->enabled = false;
158} 142}
159 143
160static void meson_crtc_atomic_begin(struct drm_crtc *crtc, 144static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
163 struct meson_crtc *meson_crtc = to_meson_crtc(crtc); 147 struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
164 unsigned long flags; 148 unsigned long flags;
165 149
166 if (crtc->state->enable && !meson_crtc->enabled)
167 meson_crtc_enable(crtc);
168
169 if (crtc->state->event) { 150 if (crtc->state->event) {
170 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 151 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
171 152
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 3ee4d4a4ecba..12ff47b13668 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
75 .fb_create = drm_gem_fb_create, 75 .fb_create = drm_gem_fb_create,
76}; 76};
77 77
78static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
79 .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
80};
81
78static irqreturn_t meson_irq(int irq, void *arg) 82static irqreturn_t meson_irq(int irq, void *arg)
79{ 83{
80 struct drm_device *dev = arg; 84 struct drm_device *dev = arg;
@@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
266 drm->mode_config.max_width = 3840; 270 drm->mode_config.max_width = 3840;
267 drm->mode_config.max_height = 2160; 271 drm->mode_config.max_height = 2160;
268 drm->mode_config.funcs = &meson_mode_config_funcs; 272 drm->mode_config.funcs = &meson_mode_config_funcs;
273 drm->mode_config.helper_private = &meson_mode_config_helpers;
269 274
270 /* Hardware Initialization */ 275 /* Hardware Initialization */
271 276
@@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev,
388 remote_node = of_graph_get_remote_port_parent(ep); 393 remote_node = of_graph_get_remote_port_parent(ep);
389 if (!remote_node || 394 if (!remote_node ||
390 remote_node == parent || /* Ignore parent endpoint */ 395 remote_node == parent || /* Ignore parent endpoint */
391 !of_device_is_available(remote_node)) 396 !of_device_is_available(remote_node)) {
397 of_node_put(remote_node);
392 continue; 398 continue;
399 }
393 400
394 count += meson_probe_remote(pdev, match, remote, remote_node); 401 count += meson_probe_remote(pdev, match, remote, remote_node);
395 402
@@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev)
408 415
409 for_each_endpoint_of_node(np, ep) { 416 for_each_endpoint_of_node(np, ep) {
410 remote = of_graph_get_remote_port_parent(ep); 417 remote = of_graph_get_remote_port_parent(ep);
411 if (!remote || !of_device_is_available(remote)) 418 if (!remote || !of_device_is_available(remote)) {
419 of_node_put(remote);
412 continue; 420 continue;
421 }
413 422
414 count += meson_probe_remote(pdev, &match, np, remote); 423 count += meson_probe_remote(pdev, &match, np, remote);
424 of_node_put(remote);
415 } 425 }
416 426
417 if (count && !match) 427 if (count && !match)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 5beb83d1cf87..ce1b3cc4bf6d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -944,7 +944,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
944 np = dev_pm_opp_get_of_node(opp); 944 np = dev_pm_opp_get_of_node(opp);
945 945
946 if (np) { 946 if (np) {
947 of_property_read_u32(np, "qcom,level", &val); 947 of_property_read_u32(np, "opp-level", &val);
948 of_node_put(np); 948 of_node_put(np);
949 } 949 }
950 950
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2e4372ef17a3..2cfee1a4fe0b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -765,7 +765,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
765 adreno_gpu->rev = config->rev; 765 adreno_gpu->rev = config->rev;
766 766
767 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; 767 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
768 adreno_gpu_config.irqname = "kgsl_3d0_irq";
769 768
770 adreno_gpu_config.va_start = SZ_16M; 769 adreno_gpu_config.va_start = SZ_16M;
771 adreno_gpu_config.va_end = 0xffffffff; 770 adreno_gpu_config.va_end = 0xffffffff;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index fd75870eb17f..6aefcd6db46b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -365,19 +365,6 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
365 &pdpu->pipe_qos_cfg); 365 &pdpu->pipe_qos_cfg);
366} 366}
367 367
368static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
369{
370 struct dpu_plane *pdpu = to_dpu_plane(plane);
371 struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
372
373 if (!pdpu->is_rt_pipe)
374 return;
375
376 pm_runtime_get_sync(&dpu_kms->pdev->dev);
377 _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
378 pm_runtime_put_sync(&dpu_kms->pdev->dev);
379}
380
381/** 368/**
382 * _dpu_plane_set_ot_limit - set OT limit for the given plane 369 * _dpu_plane_set_ot_limit - set OT limit for the given plane
383 * @plane: Pointer to drm plane 370 * @plane: Pointer to drm plane
@@ -1248,6 +1235,19 @@ static void dpu_plane_reset(struct drm_plane *plane)
1248} 1235}
1249 1236
1250#ifdef CONFIG_DEBUG_FS 1237#ifdef CONFIG_DEBUG_FS
1238static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
1239{
1240 struct dpu_plane *pdpu = to_dpu_plane(plane);
1241 struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
1242
1243 if (!pdpu->is_rt_pipe)
1244 return;
1245
1246 pm_runtime_get_sync(&dpu_kms->pdev->dev);
1247 _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
1248 pm_runtime_put_sync(&dpu_kms->pdev->dev);
1249}
1250
1251static ssize_t _dpu_plane_danger_read(struct file *file, 1251static ssize_t _dpu_plane_danger_read(struct file *file,
1252 char __user *buff, size_t count, loff_t *ppos) 1252 char __user *buff, size_t count, loff_t *ppos)
1253{ 1253{
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9cd6a96c6bf2..927e5d86f7c1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -250,7 +250,8 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
250void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 250void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
251 struct msm_gem_vma *vma); 251 struct msm_gem_vma *vma);
252int msm_gem_map_vma(struct msm_gem_address_space *aspace, 252int msm_gem_map_vma(struct msm_gem_address_space *aspace,
253 struct msm_gem_vma *vma, struct sg_table *sgt, int npages); 253 struct msm_gem_vma *vma, int prot,
254 struct sg_table *sgt, int npages);
254void msm_gem_close_vma(struct msm_gem_address_space *aspace, 255void msm_gem_close_vma(struct msm_gem_address_space *aspace,
255 struct msm_gem_vma *vma); 256 struct msm_gem_vma *vma);
256 257
@@ -333,6 +334,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
333struct drm_gem_object *msm_gem_import(struct drm_device *dev, 334struct drm_gem_object *msm_gem_import(struct drm_device *dev,
334 struct dma_buf *dmabuf, struct sg_table *sgt); 335 struct dma_buf *dmabuf, struct sg_table *sgt);
335 336
337__printf(2, 3)
336void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); 338void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
337 339
338int msm_framebuffer_prepare(struct drm_framebuffer *fb, 340int msm_framebuffer_prepare(struct drm_framebuffer *fb,
@@ -396,12 +398,14 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
396int msm_debugfs_late_init(struct drm_device *dev); 398int msm_debugfs_late_init(struct drm_device *dev);
397int msm_rd_debugfs_init(struct drm_minor *minor); 399int msm_rd_debugfs_init(struct drm_minor *minor);
398void msm_rd_debugfs_cleanup(struct msm_drm_private *priv); 400void msm_rd_debugfs_cleanup(struct msm_drm_private *priv);
401__printf(3, 4)
399void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, 402void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
400 const char *fmt, ...); 403 const char *fmt, ...);
401int msm_perf_debugfs_init(struct drm_minor *minor); 404int msm_perf_debugfs_init(struct drm_minor *minor);
402void msm_perf_debugfs_cleanup(struct msm_drm_private *priv); 405void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
403#else 406#else
404static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } 407static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
408__printf(3, 4)
405static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, 409static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
406 const char *fmt, ...) {} 410 const char *fmt, ...) {}
407static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {} 411static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 51a95da694d8..c8886d3071fa 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -391,6 +391,10 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
391 struct msm_gem_object *msm_obj = to_msm_bo(obj); 391 struct msm_gem_object *msm_obj = to_msm_bo(obj);
392 struct msm_gem_vma *vma; 392 struct msm_gem_vma *vma;
393 struct page **pages; 393 struct page **pages;
394 int prot = IOMMU_READ;
395
396 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
397 prot |= IOMMU_WRITE;
394 398
395 WARN_ON(!mutex_is_locked(&msm_obj->lock)); 399 WARN_ON(!mutex_is_locked(&msm_obj->lock));
396 400
@@ -405,8 +409,8 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
405 if (IS_ERR(pages)) 409 if (IS_ERR(pages))
406 return PTR_ERR(pages); 410 return PTR_ERR(pages);
407 411
408 return msm_gem_map_vma(aspace, vma, msm_obj->sgt, 412 return msm_gem_map_vma(aspace, vma, prot,
409 obj->size >> PAGE_SHIFT); 413 msm_obj->sgt, obj->size >> PAGE_SHIFT);
410} 414}
411 415
412/* get iova and pin it. Should have a matching put */ 416/* get iova and pin it. Should have a matching put */
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 557360788084..49c04829cf34 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -68,7 +68,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
68 68
69int 69int
70msm_gem_map_vma(struct msm_gem_address_space *aspace, 70msm_gem_map_vma(struct msm_gem_address_space *aspace,
71 struct msm_gem_vma *vma, struct sg_table *sgt, int npages) 71 struct msm_gem_vma *vma, int prot,
72 struct sg_table *sgt, int npages)
72{ 73{
73 unsigned size = npages << PAGE_SHIFT; 74 unsigned size = npages << PAGE_SHIFT;
74 int ret = 0; 75 int ret = 0;
@@ -86,7 +87,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
86 87
87 if (aspace->mmu) 88 if (aspace->mmu)
88 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, 89 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
89 size, IOMMU_READ | IOMMU_WRITE); 90 size, prot);
90 91
91 if (ret) 92 if (ret)
92 vma->mapped = false; 93 vma->mapped = false;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5f3eff304355..10babd18e286 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -900,7 +900,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
900 } 900 }
901 901
902 /* Get Interrupt: */ 902 /* Get Interrupt: */
903 gpu->irq = platform_get_irq_byname(pdev, config->irqname); 903 gpu->irq = platform_get_irq(pdev, 0);
904 if (gpu->irq < 0) { 904 if (gpu->irq < 0) {
905 ret = gpu->irq; 905 ret = gpu->irq;
906 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret); 906 DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index efb49bb64191..ca17086f72c9 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -31,7 +31,6 @@ struct msm_gpu_state;
31 31
32struct msm_gpu_config { 32struct msm_gpu_config {
33 const char *ioname; 33 const char *ioname;
34 const char *irqname;
35 uint64_t va_start; 34 uint64_t va_start;
36 uint64_t va_end; 35 uint64_t va_end;
37 unsigned int nr_rings; 36 unsigned int nr_rings;
@@ -63,7 +62,7 @@ struct msm_gpu_funcs {
63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 62 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
64 void (*recover)(struct msm_gpu *gpu); 63 void (*recover)(struct msm_gpu *gpu);
65 void (*destroy)(struct msm_gpu *gpu); 64 void (*destroy)(struct msm_gpu *gpu);
66#ifdef CONFIG_DEBUG_FS 65#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
67 /* show GPU status in debugfs: */ 66 /* show GPU status in debugfs: */
68 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state, 67 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
69 struct drm_printer *p); 68 struct drm_printer *p);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 90e9d0a48dc0..d21172933d92 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
115 char *fptr = &fifo->buf[fifo->head]; 115 char *fptr = &fifo->buf[fifo->head];
116 int n; 116 int n;
117 117
118 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); 118 wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
119 if (!rd->open)
120 return;
119 121
120 /* Note that smp_load_acquire() is not strictly required 122 /* Note that smp_load_acquire() is not strictly required
121 * as CIRC_SPACE_TO_END() does not access the tail more 123 * as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +215,10 @@ out:
213static int rd_release(struct inode *inode, struct file *file) 215static int rd_release(struct inode *inode, struct file *file)
214{ 216{
215 struct msm_rd_state *rd = inode->i_private; 217 struct msm_rd_state *rd = inode->i_private;
218
216 rd->open = false; 219 rd->open = false;
220 wake_up_all(&rd->fifo_event);
221
217 return 0; 222 return 0;
218} 223}
219 224
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 5f5be6368aed..c7a94c94dbf3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -253,6 +253,9 @@ nouveau_backlight_init(struct drm_connector *connector)
253 case NV_DEVICE_INFO_V0_FERMI: 253 case NV_DEVICE_INFO_V0_FERMI:
254 case NV_DEVICE_INFO_V0_KEPLER: 254 case NV_DEVICE_INFO_V0_KEPLER:
255 case NV_DEVICE_INFO_V0_MAXWELL: 255 case NV_DEVICE_INFO_V0_MAXWELL:
256 case NV_DEVICE_INFO_V0_PASCAL:
257 case NV_DEVICE_INFO_V0_VOLTA:
258 case NV_DEVICE_INFO_V0_TURING:
256 ret = nv50_backlight_init(nv_encoder, &props, &ops); 259 ret = nv50_backlight_init(nv_encoder, &props, &ops);
257 break; 260 break;
258 default: 261 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index bfbc9341e0c2..d9edb5785813 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2435,6 +2435,38 @@ nv140_chipset = {
2435}; 2435};
2436 2436
2437static const struct nvkm_device_chip 2437static const struct nvkm_device_chip
2438nv162_chipset = {
2439 .name = "TU102",
2440 .bar = tu104_bar_new,
2441 .bios = nvkm_bios_new,
2442 .bus = gf100_bus_new,
2443 .devinit = tu104_devinit_new,
2444 .fault = tu104_fault_new,
2445 .fb = gv100_fb_new,
2446 .fuse = gm107_fuse_new,
2447 .gpio = gk104_gpio_new,
2448 .i2c = gm200_i2c_new,
2449 .ibus = gm200_ibus_new,
2450 .imem = nv50_instmem_new,
2451 .ltc = gp102_ltc_new,
2452 .mc = tu104_mc_new,
2453 .mmu = tu104_mmu_new,
2454 .pci = gp100_pci_new,
2455 .pmu = gp102_pmu_new,
2456 .therm = gp100_therm_new,
2457 .timer = gk20a_timer_new,
2458 .top = gk104_top_new,
2459 .ce[0] = tu104_ce_new,
2460 .ce[1] = tu104_ce_new,
2461 .ce[2] = tu104_ce_new,
2462 .ce[3] = tu104_ce_new,
2463 .ce[4] = tu104_ce_new,
2464 .disp = tu104_disp_new,
2465 .dma = gv100_dma_new,
2466 .fifo = tu104_fifo_new,
2467};
2468
2469static const struct nvkm_device_chip
2438nv164_chipset = { 2470nv164_chipset = {
2439 .name = "TU104", 2471 .name = "TU104",
2440 .bar = tu104_bar_new, 2472 .bar = tu104_bar_new,
@@ -2950,6 +2982,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2950 case 0x138: device->chip = &nv138_chipset; break; 2982 case 0x138: device->chip = &nv138_chipset; break;
2951 case 0x13b: device->chip = &nv13b_chipset; break; 2983 case 0x13b: device->chip = &nv13b_chipset; break;
2952 case 0x140: device->chip = &nv140_chipset; break; 2984 case 0x140: device->chip = &nv140_chipset; break;
2985 case 0x162: device->chip = &nv162_chipset; break;
2953 case 0x164: device->chip = &nv164_chipset; break; 2986 case 0x164: device->chip = &nv164_chipset; break;
2954 case 0x166: device->chip = &nv166_chipset; break; 2987 case 0x166: device->chip = &nv166_chipset; break;
2955 default: 2988 default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 816ccaedfc73..8675613e142b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -22,6 +22,7 @@
22#include <engine/falcon.h> 22#include <engine/falcon.h>
23 23
24#include <core/gpuobj.h> 24#include <core/gpuobj.h>
25#include <subdev/mc.h>
25#include <subdev/timer.h> 26#include <subdev/timer.h>
26#include <engine/fifo.h> 27#include <engine/fifo.h>
27 28
@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
107 } 108 }
108 } 109 }
109 110
110 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000); 111 if (nvkm_mc_enabled(device, engine->subdev.index)) {
111 nvkm_wr32(device, base + 0x014, 0xffffffff); 112 nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
113 nvkm_wr32(device, base + 0x014, 0xffffffff);
114 }
112 return 0; 115 return 0;
113} 116}
114 117
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 3695cde669f8..07914e36939e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
132 duty = nvkm_therm_update_linear(therm); 132 duty = nvkm_therm_update_linear(therm);
133 break; 133 break;
134 case NVBIOS_THERM_FAN_OTHER: 134 case NVBIOS_THERM_FAN_OTHER:
135 if (therm->cstate) 135 if (therm->cstate) {
136 duty = therm->cstate; 136 duty = therm->cstate;
137 else 137 poll = false;
138 } else {
138 duty = nvkm_therm_update_linear_fallback(therm); 139 duty = nvkm_therm_update_linear_fallback(therm);
139 poll = false; 140 }
140 break; 141 break;
141 } 142 }
142 immd = false; 143 immd = false;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 00a9c2ab9e6c..64fb788b6647 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll)
1406 1406
1407static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) 1407static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
1408{ 1408{
1409 struct dsi_data *dsi = p; 1409 struct dsi_data *dsi = s->private;
1410 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; 1410 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1411 enum dss_clk_source dispc_clk_src, dsi_clk_src; 1411 enum dss_clk_source dispc_clk_src, dsi_clk_src;
1412 int dsi_module = dsi->module_id; 1412 int dsi_module = dsi->module_id;
@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
1467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 1467#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
1468static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) 1468static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
1469{ 1469{
1470 struct dsi_data *dsi = p; 1470 struct dsi_data *dsi = s->private;
1471 unsigned long flags; 1471 unsigned long flags;
1472 struct dsi_irq_stats stats; 1472 struct dsi_irq_stats stats;
1473 1473
@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
1558 1558
1559static int dsi_dump_dsi_regs(struct seq_file *s, void *p) 1559static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
1560{ 1560{
1561 struct dsi_data *dsi = p; 1561 struct dsi_data *dsi = s->private;
1562 1562
1563 if (dsi_runtime_get(dsi)) 1563 if (dsi_runtime_get(dsi))
1564 return 0; 1564 return 0;
@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
4751 dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; 4751 dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
4752 dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; 4752 dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
4753 dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; 4753 dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
4754 /*
4755 * HACK: These flags should be handled through the omap_dss_device bus
4756 * flags, but this will only be possible when the DSI encoder will be
4757 * converted to the omapdrm-managed encoder model.
4758 */
4759 dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
4760 dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
4761 dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
4762 dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
4763 dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
4764 dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
4754 4765
4755 dss_mgr_set_timings(&dsi->output, &dsi->vm); 4766 dss_mgr_set_timings(&dsi->output, &dsi->vm);
4756 4767
@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
5083 5094
5084 snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); 5095 snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
5085 dsi->debugfs.regs = dss_debugfs_create_file(dss, name, 5096 dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
5086 dsi_dump_dsi_regs, &dsi); 5097 dsi_dump_dsi_regs, dsi);
5087#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 5098#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
5088 snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); 5099 snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
5089 dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, 5100 dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
5090 dsi_dump_dsi_irqs, &dsi); 5101 dsi_dump_dsi_irqs, dsi);
5091#endif 5102#endif
5092 snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); 5103 snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
5093 dsi->debugfs.clks = dss_debugfs_create_file(dss, name, 5104 dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
5094 dsi_dump_dsi_clocks, &dsi); 5105 dsi_dump_dsi_clocks, dsi);
5095 5106
5096 return 0; 5107 return 0;
5097} 5108}
@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data)
5104 dss_debugfs_remove_file(dsi->debugfs.irqs); 5115 dss_debugfs_remove_file(dsi->debugfs.irqs);
5105 dss_debugfs_remove_file(dsi->debugfs.regs); 5116 dss_debugfs_remove_file(dsi->debugfs.regs);
5106 5117
5107 of_platform_depopulate(dev);
5108
5109 WARN_ON(dsi->scp_clk_refcount > 0); 5118 WARN_ON(dsi->scp_clk_refcount > 0);
5110 5119
5111 dss_pll_unregister(&dsi->pll); 5120 dss_pll_unregister(&dsi->pll);
@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev)
5457 5466
5458 dsi_uninit_output(dsi); 5467 dsi_uninit_output(dsi);
5459 5468
5469 of_platform_depopulate(&pdev->dev);
5470
5460 pm_runtime_disable(&pdev->dev); 5471 pm_runtime_disable(&pdev->dev);
5461 5472
5462 if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { 5473 if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 13c8a662f9b4..ccb090f3ab30 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = {
250#if defined(CONFIG_DEBUG_FS) 250#if defined(CONFIG_DEBUG_FS)
251 .debugfs_init = qxl_debugfs_init, 251 .debugfs_init = qxl_debugfs_init,
252#endif 252#endif
253 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
254 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
255 .gem_prime_export = drm_gem_prime_export, 253 .gem_prime_export = drm_gem_prime_export,
256 .gem_prime_import = drm_gem_prime_import, 254 .gem_prime_import = drm_gem_prime_import,
257 .gem_prime_pin = qxl_gem_prime_pin, 255 .gem_prime_pin = qxl_gem_prime_pin,
258 .gem_prime_unpin = qxl_gem_prime_unpin, 256 .gem_prime_unpin = qxl_gem_prime_unpin,
259 .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
260 .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
261 .gem_prime_vmap = qxl_gem_prime_vmap, 257 .gem_prime_vmap = qxl_gem_prime_vmap,
262 .gem_prime_vunmap = qxl_gem_prime_vunmap, 258 .gem_prime_vunmap = qxl_gem_prime_vunmap,
263 .gem_prime_mmap = qxl_gem_prime_mmap, 259 .gem_prime_mmap = qxl_gem_prime_mmap,
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index a55dece118b2..df65d3c1a7b8 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
38 WARN_ONCE(1, "not implemented"); 38 WARN_ONCE(1, "not implemented");
39} 39}
40 40
41struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
42{
43 WARN_ONCE(1, "not implemented");
44 return ERR_PTR(-ENOSYS);
45}
46
47struct drm_gem_object *qxl_gem_prime_import_sg_table(
48 struct drm_device *dev, struct dma_buf_attachment *attach,
49 struct sg_table *table)
50{
51 WARN_ONCE(1, "not implemented");
52 return ERR_PTR(-ENOSYS);
53}
54
55void *qxl_gem_prime_vmap(struct drm_gem_object *obj) 41void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
56{ 42{
57 WARN_ONCE(1, "not implemented"); 43 WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779a80b4..a97294ac96d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
5676 u16 data_offset, size; 5676 u16 data_offset, size;
5677 u8 frev, crev; 5677 u8 frev, crev;
5678 struct ci_power_info *pi; 5678 struct ci_power_info *pi;
5679 enum pci_bus_speed speed_cap; 5679 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5680 struct pci_dev *root = rdev->pdev->bus->self; 5680 struct pci_dev *root = rdev->pdev->bus->self;
5681 int ret; 5681 int ret;
5682 5682
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
5685 return -ENOMEM; 5685 return -ENOMEM;
5686 rdev->pm.dpm.priv = pi; 5686 rdev->pm.dpm.priv = pi;
5687 5687
5688 speed_cap = pcie_get_speed_cap(root); 5688 if (!pci_is_root_bus(rdev->pdev->bus))
5689 speed_cap = pcie_get_speed_cap(root);
5689 if (speed_cap == PCI_SPEED_UNKNOWN) { 5690 if (speed_cap == PCI_SPEED_UNKNOWN) {
5690 pi->sys_pcie_mask = 0; 5691 pi->sys_pcie_mask = 0;
5691 } else { 5692 } else {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3af015..0a785ef0ab66 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
6899 struct ni_power_info *ni_pi; 6899 struct ni_power_info *ni_pi;
6900 struct si_power_info *si_pi; 6900 struct si_power_info *si_pi;
6901 struct atom_clock_dividers dividers; 6901 struct atom_clock_dividers dividers;
6902 enum pci_bus_speed speed_cap; 6902 enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
6903 struct pci_dev *root = rdev->pdev->bus->self; 6903 struct pci_dev *root = rdev->pdev->bus->self;
6904 int ret; 6904 int ret;
6905 6905
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
6911 eg_pi = &ni_pi->eg; 6911 eg_pi = &ni_pi->eg;
6912 pi = &eg_pi->rv7xx; 6912 pi = &eg_pi->rv7xx;
6913 6913
6914 speed_cap = pcie_get_speed_cap(root); 6914 if (!pci_is_root_bus(rdev->pdev->bus))
6915 speed_cap = pcie_get_speed_cap(root);
6915 if (speed_cap == PCI_SPEED_UNKNOWN) { 6916 if (speed_cap == PCI_SPEED_UNKNOWN) {
6916 si_pi->sys_pcie_mask = 0; 6917 si_pi->sys_pcie_mask = 0;
6917 } else { 6918 } else {
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 96ac1458a59c..c0351abf83a3 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -1,17 +1,8 @@
1//SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author: 4 * Author:
5 * Sandy Huang <hjc@rock-chips.com> 5 * Sandy Huang <hjc@rock-chips.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */ 6 */
16 7
17#include <drm/drmP.h> 8#include <drm/drmP.h>
@@ -113,8 +104,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
113 child_count++; 104 child_count++;
114 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, 105 ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
115 &panel, &bridge); 106 &panel, &bridge);
116 if (!ret) 107 if (!ret) {
108 of_node_put(endpoint);
117 break; 109 break;
110 }
118 } 111 }
119 112
120 of_node_put(port); 113 of_node_put(port);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 38b52e63b2b0..27b9635124bc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -1,17 +1,8 @@
1//SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0 */
2/* 2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd 3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author: 4 * Author:
5 * Sandy Huang <hjc@rock-chips.com> 5 * Sandy Huang <hjc@rock-chips.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */ 6 */
16 7
17#ifdef CONFIG_ROCKCHIP_RGB 8#ifdef CONFIG_ROCKCHIP_RGB
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 4463d3826ecb..e2942c9a11a7 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
440 440
441 while ((entity->dependency = 441 while ((entity->dependency =
442 sched->ops->dependency(sched_job, entity))) { 442 sched->ops->dependency(sched_job, entity))) {
443 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
443 444
444 if (drm_sched_entity_add_dependency_cb(entity)) { 445 if (drm_sched_entity_add_dependency_cb(entity))
445
446 trace_drm_sched_job_wait_dep(sched_job,
447 entity->dependency);
448 return NULL; 446 return NULL;
449 }
450 } 447 }
451 448
452 /* skip jobs from entity that marked guilty */ 449 /* skip jobs from entity that marked guilty */
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 9e9255ee59cd..a021bab11a4f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
786 remote = of_graph_get_remote_port_parent(ep); 786 remote = of_graph_get_remote_port_parent(ep);
787 if (!remote) 787 if (!remote)
788 continue; 788 continue;
789 of_node_put(remote);
789 790
790 /* does this node match any registered engines? */ 791 /* does this node match any registered engines? */
791 list_for_each_entry(frontend, &drv->frontend_list, list) { 792 list_for_each_entry(frontend, &drv->frontend_list, list) {
792 if (remote == frontend->node) { 793 if (remote == frontend->node) {
793 of_node_put(remote);
794 of_node_put(port); 794 of_node_put(port);
795 of_node_put(ep);
795 return frontend; 796 return frontend;
796 } 797 }
797 } 798 }
798 } 799 }
799 800 of_node_put(port);
800 return ERR_PTR(-EINVAL); 801 return ERR_PTR(-EINVAL);
801} 802}
802 803
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 061d2e0d9011..416da5376701 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
92 val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG); 92 val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
93 val &= ~SUN4I_HDMI_VID_CTRL_ENABLE; 93 val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
94 writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG); 94 writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
95
96 clk_disable_unprepare(hdmi->tmds_clk);
95} 97}
96 98
97static void sun4i_hdmi_enable(struct drm_encoder *encoder) 99static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
102 104
103 DRM_DEBUG_DRIVER("Enabling the HDMI Output\n"); 105 DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
104 106
107 clk_prepare_enable(hdmi->tmds_clk);
108
105 sun4i_hdmi_setup_avi_infoframes(hdmi, mode); 109 sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
106 val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI); 110 val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
107 val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END); 111 val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 0420f5c978b9..cf45d0f940f9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
761 return PTR_ERR(tcon->sclk0); 761 return PTR_ERR(tcon->sclk0);
762 } 762 }
763 } 763 }
764 clk_prepare_enable(tcon->sclk0);
764 765
765 if (tcon->quirks->has_channel_1) { 766 if (tcon->quirks->has_channel_1) {
766 tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); 767 tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
775 776
776static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) 777static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
777{ 778{
779 clk_disable_unprepare(tcon->sclk0);
778 clk_disable_unprepare(tcon->clk); 780 clk_disable_unprepare(tcon->clk);
779} 781}
780 782
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index f7f32a885af7..2d1aaca49105 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -127,14 +127,10 @@ static struct drm_driver driver = {
127#if defined(CONFIG_DEBUG_FS) 127#if defined(CONFIG_DEBUG_FS)
128 .debugfs_init = virtio_gpu_debugfs_init, 128 .debugfs_init = virtio_gpu_debugfs_init,
129#endif 129#endif
130 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
131 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
132 .gem_prime_export = drm_gem_prime_export, 130 .gem_prime_export = drm_gem_prime_export,
133 .gem_prime_import = drm_gem_prime_import, 131 .gem_prime_import = drm_gem_prime_import,
134 .gem_prime_pin = virtgpu_gem_prime_pin, 132 .gem_prime_pin = virtgpu_gem_prime_pin,
135 .gem_prime_unpin = virtgpu_gem_prime_unpin, 133 .gem_prime_unpin = virtgpu_gem_prime_unpin,
136 .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
137 .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
138 .gem_prime_vmap = virtgpu_gem_prime_vmap, 134 .gem_prime_vmap = virtgpu_gem_prime_vmap,
139 .gem_prime_vunmap = virtgpu_gem_prime_vunmap, 135 .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
140 .gem_prime_mmap = virtgpu_gem_prime_mmap, 136 .gem_prime_mmap = virtgpu_gem_prime_mmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 1deb41d42ea4..0c15000f926e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
372/* virtgpu_prime.c */ 372/* virtgpu_prime.c */
373int virtgpu_gem_prime_pin(struct drm_gem_object *obj); 373int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
374void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); 374void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
375struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
376struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
377 struct drm_device *dev, struct dma_buf_attachment *attach,
378 struct sg_table *sgt);
379void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); 375void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
380void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 376void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
381int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, 377int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 86ce0ae93f59..c59ec34c80a5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
39 WARN_ONCE(1, "not implemented"); 39 WARN_ONCE(1, "not implemented");
40} 40}
41 41
42struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
43{
44 WARN_ONCE(1, "not implemented");
45 return ERR_PTR(-ENODEV);
46}
47
48struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
49 struct drm_device *dev, struct dma_buf_attachment *attach,
50 struct sg_table *table)
51{
52 WARN_ONCE(1, "not implemented");
53 return ERR_PTR(-ENODEV);
54}
55
56void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) 42void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
57{ 43{
58 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 44 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
index 9d9e8146db90..d7b409a3c0f8 100644
--- a/drivers/gpu/drm/vkms/vkms_crc.c
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
@@ -1,4 +1,5 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2
2#include "vkms_drv.h" 3#include "vkms_drv.h"
3#include <linux/crc32.h> 4#include <linux/crc32.h>
4#include <drm/drm_atomic.h> 5#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 177bbcb38306..eb56ee893761 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_atomic_helper.h> 4#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 83087877565c..7dcbecb5fac2 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -1,9 +1,4 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 */
7 2
8/** 3/**
9 * DOC: vkms (Virtual Kernel Modesetting) 4 * DOC: vkms (Virtual Kernel Modesetting)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index e4469cd3d254..81f1cfbeb936 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -1,3 +1,5 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2
1#ifndef _VKMS_DRV_H_ 3#ifndef _VKMS_DRV_H_
2#define _VKMS_DRV_H_ 4#define _VKMS_DRV_H_
3 5
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 80311daed47a..138b0bb325cf 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include <linux/shmem_fs.h> 3#include <linux/shmem_fs.h>
10 4
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 271a0eb9042c..4173e4f48334 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_crtc_helper.h> 4#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 418817600ad1..0e67d2d42f0c 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,10 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8 2
9#include "vkms_drv.h" 3#include "vkms_drv.h"
10#include <drm/drm_plane_helper.h> 4#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25afb1d594e3..7ef5dcb06104 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -26,6 +26,7 @@
26 **************************************************************************/ 26 **************************************************************************/
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/dma-mapping.h>
29 30
30#include <drm/drmP.h> 31#include <drm/drmP.h>
31#include "vmwgfx_drv.h" 32#include "vmwgfx_drv.h"
@@ -34,7 +35,6 @@
34#include <drm/ttm/ttm_placement.h> 35#include <drm/ttm/ttm_placement.h>
35#include <drm/ttm/ttm_bo_driver.h> 36#include <drm/ttm/ttm_bo_driver.h>
36#include <drm/ttm/ttm_module.h> 37#include <drm/ttm/ttm_module.h>
37#include <linux/intel-iommu.h>
38 38
39#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" 39#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
40#define VMWGFX_CHIP_SVGAII 0 40#define VMWGFX_CHIP_SVGAII 0
@@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv)
546} 546}
547 547
548/** 548/**
549 * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
550 * taking place.
551 * @dev: Pointer to the struct drm_device.
552 *
553 * Return: true if iommu present, false otherwise.
554 */
555static bool vmw_assume_iommu(struct drm_device *dev)
556{
557 const struct dma_map_ops *ops = get_dma_ops(dev->dev);
558
559 return !dma_is_direct(ops) && ops &&
560 ops->map_page != dma_direct_map_page;
561}
562
563/**
549 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this 564 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
550 * system. 565 * system.
551 * 566 *
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
565 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", 580 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
566 [vmw_dma_map_populate] = "Keeping DMA mappings.", 581 [vmw_dma_map_populate] = "Keeping DMA mappings.",
567 [vmw_dma_map_bind] = "Giving up DMA mappings early."}; 582 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
568#ifdef CONFIG_X86
569 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
570 583
571#ifdef CONFIG_INTEL_IOMMU 584 if (vmw_force_coherent)
572 if (intel_iommu_enabled) { 585 dev_priv->map_mode = vmw_dma_alloc_coherent;
586 else if (vmw_assume_iommu(dev_priv->dev))
573 dev_priv->map_mode = vmw_dma_map_populate; 587 dev_priv->map_mode = vmw_dma_map_populate;
574 goto out_fixup; 588 else if (!vmw_force_iommu)
575 }
576#endif
577
578 if (!(vmw_force_iommu || vmw_force_coherent)) {
579 dev_priv->map_mode = vmw_dma_phys; 589 dev_priv->map_mode = vmw_dma_phys;
580 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 590 else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
581 return 0;
582 }
583
584 dev_priv->map_mode = vmw_dma_map_populate;
585
586 if (dma_ops && dma_ops->sync_single_for_cpu)
587 dev_priv->map_mode = vmw_dma_alloc_coherent; 591 dev_priv->map_mode = vmw_dma_alloc_coherent;
588#ifdef CONFIG_SWIOTLB 592 else
589 if (swiotlb_nr_tbl() == 0)
590 dev_priv->map_mode = vmw_dma_map_populate; 593 dev_priv->map_mode = vmw_dma_map_populate;
591#endif
592 594
593#ifdef CONFIG_INTEL_IOMMU 595 if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
594out_fixup:
595#endif
596 if (dev_priv->map_mode == vmw_dma_map_populate &&
597 vmw_restrict_iommu)
598 dev_priv->map_mode = vmw_dma_map_bind; 596 dev_priv->map_mode = vmw_dma_map_bind;
599 597
600 if (vmw_force_coherent) 598 /* No TTM coherent page pool? FIXME: Ask TTM instead! */
601 dev_priv->map_mode = vmw_dma_alloc_coherent; 599 if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
602 600 (dev_priv->map_mode == vmw_dma_alloc_coherent))
603#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
604 /*
605 * No coherent page pool
606 */
607 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
608 return -EINVAL; 601 return -EINVAL;
609#endif
610
611#else /* CONFIG_X86 */
612 dev_priv->map_mode = vmw_dma_map_populate;
613#endif /* CONFIG_X86 */
614 602
615 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); 603 DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
616
617 return 0; 604 return 0;
618} 605}
619 606
@@ -625,24 +612,20 @@ out_fixup:
625 * With 32-bit we can only handle 32 bit PFNs. Optionally set that 612 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
626 * restriction also for 64-bit systems. 613 * restriction also for 64-bit systems.
627 */ 614 */
628#ifdef CONFIG_INTEL_IOMMU
629static int vmw_dma_masks(struct vmw_private *dev_priv) 615static int vmw_dma_masks(struct vmw_private *dev_priv)
630{ 616{
631 struct drm_device *dev = dev_priv->dev; 617 struct drm_device *dev = dev_priv->dev;
618 int ret = 0;
632 619
633 if (intel_iommu_enabled && 620 ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
621 if (dev_priv->map_mode != vmw_dma_phys &&
634 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { 622 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
635 DRM_INFO("Restricting DMA addresses to 44 bits.\n"); 623 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
636 return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); 624 return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
637 } 625 }
638 return 0; 626
639} 627 return ret;
640#else
641static int vmw_dma_masks(struct vmw_private *dev_priv)
642{
643 return 0;
644} 628}
645#endif
646 629
647static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) 630static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
648{ 631{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f2d13a72c05d..88b8178d4687 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3570 *p_fence = NULL; 3570 *p_fence = NULL;
3571 } 3571 }
3572 3572
3573 return 0; 3573 return ret;
3574} 3574}
3575 3575
3576/** 3576/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b351fb5214d3..ed2f67822f45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
1646 struct drm_connector_state *conn_state; 1646 struct drm_connector_state *conn_state;
1647 struct vmw_connector_state *vmw_conn_state; 1647 struct vmw_connector_state *vmw_conn_state;
1648 1648
1649 if (!du->pref_active) { 1649 if (!du->pref_active && new_crtc_state->enable) {
1650 ret = -EINVAL; 1650 ret = -EINVAL;
1651 goto clean; 1651 goto clean;
1652 } 1652 }
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2554 user_fence_rep) 2554 user_fence_rep)
2555{ 2555{
2556 struct vmw_fence_obj *fence = NULL; 2556 struct vmw_fence_obj *fence = NULL;
2557 uint32_t handle; 2557 uint32_t handle = 0;
2558 int ret; 2558 int ret = 0;
2559 2559
2560 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || 2560 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2561 out_fence) 2561 out_fence)
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 474b00e19697..0a7d4395d427 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = {
898 .cpmem_ofs = 0x1f000000, 898 .cpmem_ofs = 0x1f000000,
899 .srm_ofs = 0x1f040000, 899 .srm_ofs = 0x1f040000,
900 .tpm_ofs = 0x1f060000, 900 .tpm_ofs = 0x1f060000,
901 .csi0_ofs = 0x1f030000, 901 .csi0_ofs = 0x1e030000,
902 .csi1_ofs = 0x1f038000, 902 .csi1_ofs = 0x1e038000,
903 .ic_ofs = 0x1e020000, 903 .ic_ofs = 0x1e020000,
904 .disp0_ofs = 0x1e040000, 904 .disp0_ofs = 0x1e040000,
905 .disp1_ofs = 0x1e048000, 905 .disp1_ofs = 0x1e048000,
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = {
914 .cpmem_ofs = 0x07000000, 914 .cpmem_ofs = 0x07000000,
915 .srm_ofs = 0x07040000, 915 .srm_ofs = 0x07040000,
916 .tpm_ofs = 0x07060000, 916 .tpm_ofs = 0x07060000,
917 .csi0_ofs = 0x07030000, 917 .csi0_ofs = 0x06030000,
918 .csi1_ofs = 0x07038000, 918 .csi1_ofs = 0x06038000,
919 .ic_ofs = 0x06020000, 919 .ic_ofs = 0x06020000,
920 .disp0_ofs = 0x06040000, 920 .disp0_ofs = 0x06040000,
921 .disp1_ofs = 0x06048000, 921 .disp1_ofs = 0x06048000,
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index 2f8db9d62551..4a28f3fbb0a2 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -106,6 +106,7 @@ struct ipu_pre {
106 void *buffer_virt; 106 void *buffer_virt;
107 bool in_use; 107 bool in_use;
108 unsigned int safe_window_end; 108 unsigned int safe_window_end;
109 unsigned int last_bufaddr;
109}; 110};
110 111
111static DEFINE_MUTEX(ipu_pre_list_mutex); 112static DEFINE_MUTEX(ipu_pre_list_mutex);
@@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
185 186
186 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); 187 writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
187 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 188 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
189 pre->last_bufaddr = bufaddr;
188 190
189 val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | 191 val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
190 IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | 192 IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
@@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
242 unsigned short current_yblock; 244 unsigned short current_yblock;
243 u32 val; 245 u32 val;
244 246
247 if (bufaddr == pre->last_bufaddr)
248 return;
249
245 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); 250 writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
251 pre->last_bufaddr = bufaddr;
246 252
247 do { 253 do {
248 if (time_after(jiffies, timeout)) { 254 if (time_after(jiffies, timeout)) {
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index b677e5d524e6..d5f1d8e1c6f8 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -21,6 +21,7 @@ config VGA_SWITCHEROO
21 bool "Laptop Hybrid Graphics - GPU switching support" 21 bool "Laptop Hybrid Graphics - GPU switching support"
22 depends on X86 22 depends on X86
23 depends on ACPI 23 depends on ACPI
24 depends on PCI
24 select VGA_ARB 25 select VGA_ARB
25 help 26 help
26 Many laptops released in 2008/9/10 have two GPUs with a multiplexer 27 Many laptops released in 2008/9/10 have two GPUs with a multiplexer
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f41d5fe51abe..9993b692598f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -125,6 +125,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
125{ 125{
126 struct hid_collection *collection; 126 struct hid_collection *collection;
127 unsigned usage; 127 unsigned usage;
128 int collection_index;
128 129
129 usage = parser->local.usage[0]; 130 usage = parser->local.usage[0];
130 131
@@ -167,13 +168,13 @@ static int open_collection(struct hid_parser *parser, unsigned type)
167 parser->collection_stack[parser->collection_stack_ptr++] = 168 parser->collection_stack[parser->collection_stack_ptr++] =
168 parser->device->maxcollection; 169 parser->device->maxcollection;
169 170
170 collection = parser->device->collection + 171 collection_index = parser->device->maxcollection++;
171 parser->device->maxcollection++; 172 collection = parser->device->collection + collection_index;
172 collection->type = type; 173 collection->type = type;
173 collection->usage = usage; 174 collection->usage = usage;
174 collection->level = parser->collection_stack_ptr - 1; 175 collection->level = parser->collection_stack_ptr - 1;
175 collection->parent = parser->active_collection; 176 collection->parent_idx = (collection->level == 0) ? -1 :
176 parser->active_collection = collection; 177 parser->collection_stack[collection->level - 1];
177 178
178 if (type == HID_COLLECTION_APPLICATION) 179 if (type == HID_COLLECTION_APPLICATION)
179 parser->device->maxapplication++; 180 parser->device->maxapplication++;
@@ -192,8 +193,6 @@ static int close_collection(struct hid_parser *parser)
192 return -EINVAL; 193 return -EINVAL;
193 } 194 }
194 parser->collection_stack_ptr--; 195 parser->collection_stack_ptr--;
195 if (parser->active_collection)
196 parser->active_collection = parser->active_collection->parent;
197 return 0; 196 return 0;
198} 197}
199 198
@@ -1006,10 +1005,12 @@ static void hid_apply_multiplier_to_field(struct hid_device *hid,
1006 usage = &field->usage[i]; 1005 usage = &field->usage[i];
1007 1006
1008 collection = &hid->collection[usage->collection_index]; 1007 collection = &hid->collection[usage->collection_index];
1009 while (collection && collection != multiplier_collection) 1008 while (collection->parent_idx != -1 &&
1010 collection = collection->parent; 1009 collection != multiplier_collection)
1010 collection = &hid->collection[collection->parent_idx];
1011 1011
1012 if (collection || multiplier_collection == NULL) 1012 if (collection->parent_idx != -1 ||
1013 multiplier_collection == NULL)
1013 usage->resolution_multiplier = effective_multiplier; 1014 usage->resolution_multiplier = effective_multiplier;
1014 1015
1015 } 1016 }
@@ -1044,9 +1045,9 @@ static void hid_apply_multiplier(struct hid_device *hid,
1044 * applicable fields later. 1045 * applicable fields later.
1045 */ 1046 */
1046 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1047 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1047 while (multiplier_collection && 1048 while (multiplier_collection->parent_idx != -1 &&
1048 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1049 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1049 multiplier_collection = multiplier_collection->parent; 1050 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1050 1051
1051 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1052 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1052 1053
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index c530476edba6..ac9fda1b5a72 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -30,6 +30,7 @@
30 30
31#include <linux/debugfs.h> 31#include <linux/debugfs.h>
32#include <linux/seq_file.h> 32#include <linux/seq_file.h>
33#include <linux/kfifo.h>
33#include <linux/sched/signal.h> 34#include <linux/sched/signal.h>
34#include <linux/export.h> 35#include <linux/export.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
661/* enqueue string to 'events' ring buffer */ 662/* enqueue string to 'events' ring buffer */
662void hid_debug_event(struct hid_device *hdev, char *buf) 663void hid_debug_event(struct hid_device *hdev, char *buf)
663{ 664{
664 unsigned i;
665 struct hid_debug_list *list; 665 struct hid_debug_list *list;
666 unsigned long flags; 666 unsigned long flags;
667 667
668 spin_lock_irqsave(&hdev->debug_list_lock, flags); 668 spin_lock_irqsave(&hdev->debug_list_lock, flags);
669 list_for_each_entry(list, &hdev->debug_list, node) { 669 list_for_each_entry(list, &hdev->debug_list, node)
670 for (i = 0; buf[i]; i++) 670 kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
671 list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
672 buf[i];
673 list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
674 }
675 spin_unlock_irqrestore(&hdev->debug_list_lock, flags); 671 spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
676 672
677 wake_up_interruptible(&hdev->debug_wait); 673 wake_up_interruptible(&hdev->debug_wait);
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
722 hid_debug_event(hdev, buf); 718 hid_debug_event(hdev, buf);
723 719
724 kfree(buf); 720 kfree(buf);
725 wake_up_interruptible(&hdev->debug_wait); 721 wake_up_interruptible(&hdev->debug_wait);
726
727} 722}
728EXPORT_SYMBOL_GPL(hid_dump_input); 723EXPORT_SYMBOL_GPL(hid_dump_input);
729 724
@@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
1083 goto out; 1078 goto out;
1084 } 1079 }
1085 1080
1086 if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { 1081 err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
1087 err = -ENOMEM; 1082 if (err) {
1088 kfree(list); 1083 kfree(list);
1089 goto out; 1084 goto out;
1090 } 1085 }
@@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
1104 size_t count, loff_t *ppos) 1099 size_t count, loff_t *ppos)
1105{ 1100{
1106 struct hid_debug_list *list = file->private_data; 1101 struct hid_debug_list *list = file->private_data;
1107 int ret = 0, len; 1102 int ret = 0, copied;
1108 DECLARE_WAITQUEUE(wait, current); 1103 DECLARE_WAITQUEUE(wait, current);
1109 1104
1110 mutex_lock(&list->read_mutex); 1105 mutex_lock(&list->read_mutex);
1111 while (ret == 0) { 1106 if (kfifo_is_empty(&list->hid_debug_fifo)) {
1112 if (list->head == list->tail) { 1107 add_wait_queue(&list->hdev->debug_wait, &wait);
1113 add_wait_queue(&list->hdev->debug_wait, &wait); 1108 set_current_state(TASK_INTERRUPTIBLE);
1114 set_current_state(TASK_INTERRUPTIBLE); 1109
1115 1110 while (kfifo_is_empty(&list->hid_debug_fifo)) {
1116 while (list->head == list->tail) { 1111 if (file->f_flags & O_NONBLOCK) {
1117 if (file->f_flags & O_NONBLOCK) { 1112 ret = -EAGAIN;
1118 ret = -EAGAIN; 1113 break;
1119 break; 1114 }
1120 }
1121 if (signal_pending(current)) {
1122 ret = -ERESTARTSYS;
1123 break;
1124 }
1125 1115
1126 if (!list->hdev || !list->hdev->debug) { 1116 if (signal_pending(current)) {
1127 ret = -EIO; 1117 ret = -ERESTARTSYS;
1128 set_current_state(TASK_RUNNING); 1118 break;
1129 goto out; 1119 }
1130 }
1131 1120
1132 /* allow O_NONBLOCK from other threads */ 1121 /* if list->hdev is NULL we cannot remove_wait_queue().
1133 mutex_unlock(&list->read_mutex); 1122 * if list->hdev->debug is 0 then hid_debug_unregister()
1134 schedule(); 1123 * was already called and list->hdev is being destroyed.
1135 mutex_lock(&list->read_mutex); 1124 * if we add remove_wait_queue() here we can hit a race.
1136 set_current_state(TASK_INTERRUPTIBLE); 1125 */
1126 if (!list->hdev || !list->hdev->debug) {
1127 ret = -EIO;
1128 set_current_state(TASK_RUNNING);
1129 goto out;
1137 } 1130 }
1138 1131
1139 set_current_state(TASK_RUNNING); 1132 /* allow O_NONBLOCK from other threads */
1140 remove_wait_queue(&list->hdev->debug_wait, &wait); 1133 mutex_unlock(&list->read_mutex);
1134 schedule();
1135 mutex_lock(&list->read_mutex);
1136 set_current_state(TASK_INTERRUPTIBLE);
1141 } 1137 }
1142 1138
1143 if (ret) 1139 __set_current_state(TASK_RUNNING);
1144 goto out; 1140 remove_wait_queue(&list->hdev->debug_wait, &wait);
1145 1141
1146 /* pass the ringbuffer contents to userspace */ 1142 if (ret)
1147copy_rest:
1148 if (list->tail == list->head)
1149 goto out; 1143 goto out;
1150 if (list->tail > list->head) {
1151 len = list->tail - list->head;
1152 if (len > count)
1153 len = count;
1154
1155 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
1156 ret = -EFAULT;
1157 goto out;
1158 }
1159 ret += len;
1160 list->head += len;
1161 } else {
1162 len = HID_DEBUG_BUFSIZE - list->head;
1163 if (len > count)
1164 len = count;
1165
1166 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
1167 ret = -EFAULT;
1168 goto out;
1169 }
1170 list->head = 0;
1171 ret += len;
1172 count -= len;
1173 if (count > 0)
1174 goto copy_rest;
1175 }
1176
1177 } 1144 }
1145
1146 /* pass the fifo content to userspace, locking is not needed with only
1147 * one concurrent reader and one concurrent writer
1148 */
1149 ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
1150 if (ret)
1151 goto out;
1152 ret = copied;
1178out: 1153out:
1179 mutex_unlock(&list->read_mutex); 1154 mutex_unlock(&list->read_mutex);
1180 return ret; 1155 return ret;
@@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
1185 struct hid_debug_list *list = file->private_data; 1160 struct hid_debug_list *list = file->private_data;
1186 1161
1187 poll_wait(file, &list->hdev->debug_wait, wait); 1162 poll_wait(file, &list->hdev->debug_wait, wait);
1188 if (list->head != list->tail) 1163 if (!kfifo_is_empty(&list->hid_debug_fifo))
1189 return EPOLLIN | EPOLLRDNORM; 1164 return EPOLLIN | EPOLLRDNORM;
1190 if (!list->hdev->debug) 1165 if (!list->hdev->debug)
1191 return EPOLLERR | EPOLLHUP; 1166 return EPOLLERR | EPOLLHUP;
@@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
1200 spin_lock_irqsave(&list->hdev->debug_list_lock, flags); 1175 spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
1201 list_del(&list->node); 1176 list_del(&list->node);
1202 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); 1177 spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
1203 kfree(list->hid_debug_buf); 1178 kfifo_free(&list->hid_debug_fifo);
1204 kfree(list); 1179 kfree(list);
1205 1180
1206 return 0; 1181 return 0;
@@ -1246,4 +1221,3 @@ void hid_debug_exit(void)
1246{ 1221{
1247 debugfs_remove_recursive(hid_debug_root); 1222 debugfs_remove_recursive(hid_debug_root);
1248} 1223}
1249
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 518fa76414f5..24f846d67478 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -461,6 +461,9 @@
461#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a 461#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
462#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 462#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
463 463
464#define I2C_VENDOR_ID_GOODIX 0x27c6
465#define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
466
464#define USB_VENDOR_ID_GOODTOUCH 0x1aad 467#define USB_VENDOR_ID_GOODTOUCH 0x1aad
465#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f 468#define USB_DEVICE_ID_GOODTOUCH_000f 0x000f
466 469
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 8555ce7e737b..c5edfa966343 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -179,6 +179,8 @@ static const struct i2c_hid_quirks {
179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP }, 179 I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
180 { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001, 180 { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
181 I2C_HID_QUIRK_NO_RUNTIME_PM }, 181 I2C_HID_QUIRK_NO_RUNTIME_PM },
182 { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
183 I2C_HID_QUIRK_NO_RUNTIME_PM },
182 { 0, 0 } 184 { 0, 0 }
183}; 185};
184 186
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index ce0ba2062723..bea4c9850247 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -701,19 +701,12 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
701int vmbus_disconnect_ring(struct vmbus_channel *channel) 701int vmbus_disconnect_ring(struct vmbus_channel *channel)
702{ 702{
703 struct vmbus_channel *cur_channel, *tmp; 703 struct vmbus_channel *cur_channel, *tmp;
704 unsigned long flags;
705 LIST_HEAD(list);
706 int ret; 704 int ret;
707 705
708 if (channel->primary_channel != NULL) 706 if (channel->primary_channel != NULL)
709 return -EINVAL; 707 return -EINVAL;
710 708
711 /* Snapshot the list of subchannels */ 709 list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
712 spin_lock_irqsave(&channel->lock, flags);
713 list_splice_init(&channel->sc_list, &list);
714 spin_unlock_irqrestore(&channel->lock, flags);
715
716 list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
717 if (cur_channel->rescind) 710 if (cur_channel->rescind)
718 wait_for_completion(&cur_channel->rescind_event); 711 wait_for_completion(&cur_channel->rescind_event);
719 712
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 5301fef16c31..7c6349a50ef1 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start,
888 pfn_cnt -= pgs_ol; 888 pfn_cnt -= pgs_ol;
889 /* 889 /*
890 * Check if the corresponding memory block is already 890 * Check if the corresponding memory block is already
891 * online by checking its last previously backed page. 891 * online. It is possible to observe struct pages still
892 * In case it is we need to bring rest (which was not 892 * being uninitialized here so check section instead.
893 * backed previously) online too. 893 * In case the section is online we need to bring the
894 * rest of pfns (which were not backed previously)
895 * online too.
894 */ 896 */
895 if (start_pfn > has->start_pfn && 897 if (start_pfn > has->start_pfn &&
896 !PageReserved(pfn_to_page(start_pfn - 1))) 898 online_section_nr(pfn_to_section_nr(start_pfn)))
897 hv_bring_pgs_online(has, start_pfn, pgs_ol); 899 hv_bring_pgs_online(has, start_pfn, pgs_ol);
898 900
899 } 901 }
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 64d0c85d5161..1f1a55e07733 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
164} 164}
165 165
166/* Get various debug metrics for the specified ring buffer. */ 166/* Get various debug metrics for the specified ring buffer. */
167void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, 167int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
168 struct hv_ring_buffer_debug_info *debug_info) 168 struct hv_ring_buffer_debug_info *debug_info)
169{ 169{
170 u32 bytes_avail_towrite; 170 u32 bytes_avail_towrite;
171 u32 bytes_avail_toread; 171 u32 bytes_avail_toread;
172 172
173 if (ring_info->ring_buffer) { 173 if (!ring_info->ring_buffer)
174 hv_get_ringbuffer_availbytes(ring_info, 174 return -EINVAL;
175 &bytes_avail_toread, 175
176 &bytes_avail_towrite); 176 hv_get_ringbuffer_availbytes(ring_info,
177 177 &bytes_avail_toread,
178 debug_info->bytes_avail_toread = bytes_avail_toread; 178 &bytes_avail_towrite);
179 debug_info->bytes_avail_towrite = bytes_avail_towrite; 179 debug_info->bytes_avail_toread = bytes_avail_toread;
180 debug_info->current_read_index = 180 debug_info->bytes_avail_towrite = bytes_avail_towrite;
181 ring_info->ring_buffer->read_index; 181 debug_info->current_read_index = ring_info->ring_buffer->read_index;
182 debug_info->current_write_index = 182 debug_info->current_write_index = ring_info->ring_buffer->write_index;
183 ring_info->ring_buffer->write_index; 183 debug_info->current_interrupt_mask
184 debug_info->current_interrupt_mask = 184 = ring_info->ring_buffer->interrupt_mask;
185 ring_info->ring_buffer->interrupt_mask; 185 return 0;
186 }
187} 186}
188EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); 187EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
189 188
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index d0ff65675292..403fee01572c 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev,
313{ 313{
314 struct hv_device *hv_dev = device_to_hv_device(dev); 314 struct hv_device *hv_dev = device_to_hv_device(dev);
315 struct hv_ring_buffer_debug_info outbound; 315 struct hv_ring_buffer_debug_info outbound;
316 int ret;
316 317
317 if (!hv_dev->channel) 318 if (!hv_dev->channel)
318 return -ENODEV; 319 return -ENODEV;
319 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 320
320 return -EINVAL; 321 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
321 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 322 &outbound);
323 if (ret < 0)
324 return ret;
325
322 return sprintf(buf, "%d\n", outbound.current_interrupt_mask); 326 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
323} 327}
324static DEVICE_ATTR_RO(out_intr_mask); 328static DEVICE_ATTR_RO(out_intr_mask);
@@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev,
328{ 332{
329 struct hv_device *hv_dev = device_to_hv_device(dev); 333 struct hv_device *hv_dev = device_to_hv_device(dev);
330 struct hv_ring_buffer_debug_info outbound; 334 struct hv_ring_buffer_debug_info outbound;
335 int ret;
331 336
332 if (!hv_dev->channel) 337 if (!hv_dev->channel)
333 return -ENODEV; 338 return -ENODEV;
334 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 339
335 return -EINVAL; 340 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
336 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 341 &outbound);
342 if (ret < 0)
343 return ret;
337 return sprintf(buf, "%d\n", outbound.current_read_index); 344 return sprintf(buf, "%d\n", outbound.current_read_index);
338} 345}
339static DEVICE_ATTR_RO(out_read_index); 346static DEVICE_ATTR_RO(out_read_index);
@@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev,
344{ 351{
345 struct hv_device *hv_dev = device_to_hv_device(dev); 352 struct hv_device *hv_dev = device_to_hv_device(dev);
346 struct hv_ring_buffer_debug_info outbound; 353 struct hv_ring_buffer_debug_info outbound;
354 int ret;
347 355
348 if (!hv_dev->channel) 356 if (!hv_dev->channel)
349 return -ENODEV; 357 return -ENODEV;
350 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 358
351 return -EINVAL; 359 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
352 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 360 &outbound);
361 if (ret < 0)
362 return ret;
353 return sprintf(buf, "%d\n", outbound.current_write_index); 363 return sprintf(buf, "%d\n", outbound.current_write_index);
354} 364}
355static DEVICE_ATTR_RO(out_write_index); 365static DEVICE_ATTR_RO(out_write_index);
@@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
360{ 370{
361 struct hv_device *hv_dev = device_to_hv_device(dev); 371 struct hv_device *hv_dev = device_to_hv_device(dev);
362 struct hv_ring_buffer_debug_info outbound; 372 struct hv_ring_buffer_debug_info outbound;
373 int ret;
363 374
364 if (!hv_dev->channel) 375 if (!hv_dev->channel)
365 return -ENODEV; 376 return -ENODEV;
366 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 377
367 return -EINVAL; 378 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
368 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 379 &outbound);
380 if (ret < 0)
381 return ret;
369 return sprintf(buf, "%d\n", outbound.bytes_avail_toread); 382 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
370} 383}
371static DEVICE_ATTR_RO(out_read_bytes_avail); 384static DEVICE_ATTR_RO(out_read_bytes_avail);
@@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
376{ 389{
377 struct hv_device *hv_dev = device_to_hv_device(dev); 390 struct hv_device *hv_dev = device_to_hv_device(dev);
378 struct hv_ring_buffer_debug_info outbound; 391 struct hv_ring_buffer_debug_info outbound;
392 int ret;
379 393
380 if (!hv_dev->channel) 394 if (!hv_dev->channel)
381 return -ENODEV; 395 return -ENODEV;
382 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 396
383 return -EINVAL; 397 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
384 hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); 398 &outbound);
399 if (ret < 0)
400 return ret;
385 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); 401 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
386} 402}
387static DEVICE_ATTR_RO(out_write_bytes_avail); 403static DEVICE_ATTR_RO(out_write_bytes_avail);
@@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev,
391{ 407{
392 struct hv_device *hv_dev = device_to_hv_device(dev); 408 struct hv_device *hv_dev = device_to_hv_device(dev);
393 struct hv_ring_buffer_debug_info inbound; 409 struct hv_ring_buffer_debug_info inbound;
410 int ret;
394 411
395 if (!hv_dev->channel) 412 if (!hv_dev->channel)
396 return -ENODEV; 413 return -ENODEV;
397 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 414
398 return -EINVAL; 415 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
399 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 416 if (ret < 0)
417 return ret;
418
400 return sprintf(buf, "%d\n", inbound.current_interrupt_mask); 419 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
401} 420}
402static DEVICE_ATTR_RO(in_intr_mask); 421static DEVICE_ATTR_RO(in_intr_mask);
@@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev,
406{ 425{
407 struct hv_device *hv_dev = device_to_hv_device(dev); 426 struct hv_device *hv_dev = device_to_hv_device(dev);
408 struct hv_ring_buffer_debug_info inbound; 427 struct hv_ring_buffer_debug_info inbound;
428 int ret;
409 429
410 if (!hv_dev->channel) 430 if (!hv_dev->channel)
411 return -ENODEV; 431 return -ENODEV;
412 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 432
413 return -EINVAL; 433 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
414 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 434 if (ret < 0)
435 return ret;
436
415 return sprintf(buf, "%d\n", inbound.current_read_index); 437 return sprintf(buf, "%d\n", inbound.current_read_index);
416} 438}
417static DEVICE_ATTR_RO(in_read_index); 439static DEVICE_ATTR_RO(in_read_index);
@@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev,
421{ 443{
422 struct hv_device *hv_dev = device_to_hv_device(dev); 444 struct hv_device *hv_dev = device_to_hv_device(dev);
423 struct hv_ring_buffer_debug_info inbound; 445 struct hv_ring_buffer_debug_info inbound;
446 int ret;
424 447
425 if (!hv_dev->channel) 448 if (!hv_dev->channel)
426 return -ENODEV; 449 return -ENODEV;
427 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 450
428 return -EINVAL; 451 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
429 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 452 if (ret < 0)
453 return ret;
454
430 return sprintf(buf, "%d\n", inbound.current_write_index); 455 return sprintf(buf, "%d\n", inbound.current_write_index);
431} 456}
432static DEVICE_ATTR_RO(in_write_index); 457static DEVICE_ATTR_RO(in_write_index);
@@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
437{ 462{
438 struct hv_device *hv_dev = device_to_hv_device(dev); 463 struct hv_device *hv_dev = device_to_hv_device(dev);
439 struct hv_ring_buffer_debug_info inbound; 464 struct hv_ring_buffer_debug_info inbound;
465 int ret;
440 466
441 if (!hv_dev->channel) 467 if (!hv_dev->channel)
442 return -ENODEV; 468 return -ENODEV;
443 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 469
444 return -EINVAL; 470 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
445 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 471 if (ret < 0)
472 return ret;
473
446 return sprintf(buf, "%d\n", inbound.bytes_avail_toread); 474 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
447} 475}
448static DEVICE_ATTR_RO(in_read_bytes_avail); 476static DEVICE_ATTR_RO(in_read_bytes_avail);
@@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
453{ 481{
454 struct hv_device *hv_dev = device_to_hv_device(dev); 482 struct hv_device *hv_dev = device_to_hv_device(dev);
455 struct hv_ring_buffer_debug_info inbound; 483 struct hv_ring_buffer_debug_info inbound;
484 int ret;
456 485
457 if (!hv_dev->channel) 486 if (!hv_dev->channel)
458 return -ENODEV; 487 return -ENODEV;
459 if (hv_dev->channel->state != CHANNEL_OPENED_STATE) 488
460 return -EINVAL; 489 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
461 hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); 490 if (ret < 0)
491 return ret;
492
462 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); 493 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
463} 494}
464static DEVICE_ATTR_RO(in_write_bytes_avail); 495static DEVICE_ATTR_RO(in_write_bytes_avail);
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 0e30fa00204c..f9b8e3e23a8e 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
393 } 393 }
394 394
395 rv = lm80_read_value(client, LM80_REG_FANDIV); 395 rv = lm80_read_value(client, LM80_REG_FANDIV);
396 if (rv < 0) 396 if (rv < 0) {
397 mutex_unlock(&data->update_lock);
397 return rv; 398 return rv;
399 }
398 reg = (rv & ~(3 << (2 * (nr + 1)))) 400 reg = (rv & ~(3 << (2 * (nr + 1))))
399 | (data->fan_div[nr] << (2 * (nr + 1))); 401 | (data->fan_div[nr] << (2 * (nr + 1)));
400 lm80_write_value(client, LM80_REG_FANDIV, reg); 402 lm80_write_value(client, LM80_REG_FANDIV, reg);
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index c3040079b1cb..59ee01f3d022 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -44,8 +44,8 @@
44 * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3 44 * nct6796d 14 7 7 2+6 0xd420 0xc1 0x5ca3
45 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3 45 * nct6797d 14 7 7 2+6 0xd450 0xc1 0x5ca3
46 * (0xd451) 46 * (0xd451)
47 * nct6798d 14 7 7 2+6 0xd458 0xc1 0x5ca3 47 * nct6798d 14 7 7 2+6 0xd428 0xc1 0x5ca3
48 * (0xd459) 48 * (0xd429)
49 * 49 *
50 * #temp lists the number of monitored temperature sources (first value) plus 50 * #temp lists the number of monitored temperature sources (first value) plus
51 * the number of directly connectable temperature sensors (second value). 51 * the number of directly connectable temperature sensors (second value).
@@ -138,7 +138,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
138#define SIO_NCT6795_ID 0xd350 138#define SIO_NCT6795_ID 0xd350
139#define SIO_NCT6796_ID 0xd420 139#define SIO_NCT6796_ID 0xd420
140#define SIO_NCT6797_ID 0xd450 140#define SIO_NCT6797_ID 0xd450
141#define SIO_NCT6798_ID 0xd458 141#define SIO_NCT6798_ID 0xd428
142#define SIO_ID_MASK 0xFFF8 142#define SIO_ID_MASK 0xFFF8
143 143
144enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 }; 144enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
3594 fan5pin |= cr1b & BIT(5); 3594 fan5pin |= cr1b & BIT(5);
3595 fan5pin |= creb & BIT(5); 3595 fan5pin |= creb & BIT(5);
3596 3596
3597 fan6pin = creb & BIT(3); 3597 fan6pin = !dsw_en && (cr2d & BIT(1));
3598 fan6pin |= creb & BIT(3);
3598 3599
3599 pwm5pin |= cr2d & BIT(7); 3600 pwm5pin |= cr2d & BIT(7);
3600 pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); 3601 pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0));
@@ -4508,7 +4509,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
4508 4509
4509 if (data->kind == nct6791 || data->kind == nct6792 || 4510 if (data->kind == nct6791 || data->kind == nct6792 ||
4510 data->kind == nct6793 || data->kind == nct6795 || 4511 data->kind == nct6793 || data->kind == nct6795 ||
4511 data->kind == nct6796) 4512 data->kind == nct6796 || data->kind == nct6797 ||
4513 data->kind == nct6798)
4512 nct6791_enable_io_mapping(sioreg); 4514 nct6791_enable_io_mapping(sioreg);
4513 4515
4514 superio_exit(sioreg); 4516 superio_exit(sioreg);
@@ -4644,7 +4646,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
4644 4646
4645 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 || 4647 if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
4646 sio_data->kind == nct6793 || sio_data->kind == nct6795 || 4648 sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
4647 sio_data->kind == nct6796) 4649 sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
4650 sio_data->kind == nct6798)
4648 nct6791_enable_io_mapping(sioaddr); 4651 nct6791_enable_io_mapping(sioaddr);
4649 4652
4650 superio_exit(sioaddr); 4653 superio_exit(sioaddr);
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 423903f87955..391118c8aae8 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -380,8 +380,8 @@ static ssize_t occ_show_power_1(struct device *dev,
380 val *= 1000000ULL; 380 val *= 1000000ULL;
381 break; 381 break;
382 case 2: 382 case 2:
383 val = get_unaligned_be32(&power->update_tag) * 383 val = (u64)get_unaligned_be32(&power->update_tag) *
384 occ->powr_sample_time_us; 384 occ->powr_sample_time_us;
385 break; 385 break;
386 case 3: 386 case 3:
387 val = get_unaligned_be16(&power->value) * 1000000ULL; 387 val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -425,8 +425,8 @@ static ssize_t occ_show_power_2(struct device *dev,
425 &power->update_tag); 425 &power->update_tag);
426 break; 426 break;
427 case 2: 427 case 2:
428 val = get_unaligned_be32(&power->update_tag) * 428 val = (u64)get_unaligned_be32(&power->update_tag) *
429 occ->powr_sample_time_us; 429 occ->powr_sample_time_us;
430 break; 430 break;
431 case 3: 431 case 3:
432 val = get_unaligned_be16(&power->value) * 1000000ULL; 432 val = get_unaligned_be16(&power->value) * 1000000ULL;
@@ -463,8 +463,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
463 &power->system.update_tag); 463 &power->system.update_tag);
464 break; 464 break;
465 case 2: 465 case 2:
466 val = get_unaligned_be32(&power->system.update_tag) * 466 val = (u64)get_unaligned_be32(&power->system.update_tag) *
467 occ->powr_sample_time_us; 467 occ->powr_sample_time_us;
468 break; 468 break;
469 case 3: 469 case 3:
470 val = get_unaligned_be16(&power->system.value) * 1000000ULL; 470 val = get_unaligned_be16(&power->system.value) * 1000000ULL;
@@ -477,8 +477,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
477 &power->proc.update_tag); 477 &power->proc.update_tag);
478 break; 478 break;
479 case 6: 479 case 6:
480 val = get_unaligned_be32(&power->proc.update_tag) * 480 val = (u64)get_unaligned_be32(&power->proc.update_tag) *
481 occ->powr_sample_time_us; 481 occ->powr_sample_time_us;
482 break; 482 break;
483 case 7: 483 case 7:
484 val = get_unaligned_be16(&power->proc.value) * 1000000ULL; 484 val = get_unaligned_be16(&power->proc.value) * 1000000ULL;
@@ -491,8 +491,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
491 &power->vdd.update_tag); 491 &power->vdd.update_tag);
492 break; 492 break;
493 case 10: 493 case 10:
494 val = get_unaligned_be32(&power->vdd.update_tag) * 494 val = (u64)get_unaligned_be32(&power->vdd.update_tag) *
495 occ->powr_sample_time_us; 495 occ->powr_sample_time_us;
496 break; 496 break;
497 case 11: 497 case 11:
498 val = get_unaligned_be16(&power->vdd.value) * 1000000ULL; 498 val = get_unaligned_be16(&power->vdd.value) * 1000000ULL;
@@ -505,8 +505,8 @@ static ssize_t occ_show_power_a0(struct device *dev,
505 &power->vdn.update_tag); 505 &power->vdn.update_tag);
506 break; 506 break;
507 case 14: 507 case 14:
508 val = get_unaligned_be32(&power->vdn.update_tag) * 508 val = (u64)get_unaligned_be32(&power->vdn.update_tag) *
509 occ->powr_sample_time_us; 509 occ->powr_sample_time_us;
510 break; 510 break;
511 case 15: 511 case 15:
512 val = get_unaligned_be16(&power->vdn.value) * 1000000ULL; 512 val = get_unaligned_be16(&power->vdn.value) * 1000000ULL;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 8844c9565d2a..7053be59ad2e 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
88 .data = (void *)2 88 .data = (void *)2
89 }, 89 },
90 { 90 {
91 .compatible = "ti,tmp422", 91 .compatible = "ti,tmp442",
92 .data = (void *)3 92 .data = (void *)3
93 }, 93 },
94 { }, 94 { },
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index ec6e69aa3a8e..d2fbb4bb4a43 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev)
183 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); 183 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c);
184} 184}
185 185
186static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev)
187{
188 i2c_dev->curr_msg = NULL;
189 i2c_dev->num_msgs = 0;
190
191 i2c_dev->msg_buf = NULL;
192 i2c_dev->msg_buf_remaining = 0;
193}
194
186/* 195/*
187 * Note about I2C_C_CLEAR on error: 196 * Note about I2C_C_CLEAR on error:
188 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in 197 * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
283 292
284 time_left = wait_for_completion_timeout(&i2c_dev->completion, 293 time_left = wait_for_completion_timeout(&i2c_dev->completion,
285 adap->timeout); 294 adap->timeout);
295
296 bcm2835_i2c_finish_transfer(i2c_dev);
297
286 if (!time_left) { 298 if (!time_left) {
287 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 299 bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C,
288 BCM2835_I2C_C_CLEAR); 300 BCM2835_I2C_C_CLEAR);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index b13605718291..d917cefc5a19 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
382 * Check for the message size against FIFO depth and set the 382 * Check for the message size against FIFO depth and set the
383 * 'hold bus' bit if it is greater than FIFO depth. 383 * 'hold bus' bit if it is greater than FIFO depth.
384 */ 384 */
385 if (id->recv_count > CDNS_I2C_FIFO_DEPTH) 385 if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
386 ctrl_reg |= CDNS_I2C_CR_HOLD; 386 ctrl_reg |= CDNS_I2C_CR_HOLD;
387 else
388 ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
387 389
388 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 390 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
389 391
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
440 * Check for the message size against FIFO depth and set the 442 * Check for the message size against FIFO depth and set the
441 * 'hold bus' bit if it is greater than FIFO depth. 443 * 'hold bus' bit if it is greater than FIFO depth.
442 */ 444 */
443 if (id->send_count > CDNS_I2C_FIFO_DEPTH) 445 if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
444 ctrl_reg |= CDNS_I2C_CR_HOLD; 446 ctrl_reg |= CDNS_I2C_CR_HOLD;
447 else
448 ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
449
445 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); 450 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
446 451
447 /* Clear the interrupts in interrupt status register. */ 452 /* Clear the interrupts in interrupt status register. */
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b1086bfb0465..cd9c65f3d404 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1500 return 0; 1500 return 0;
1501} 1501}
1502 1502
1503#ifdef CONFIG_PM 1503static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
1504static int omap_i2c_runtime_suspend(struct device *dev)
1505{ 1504{
1506 struct omap_i2c_dev *omap = dev_get_drvdata(dev); 1505 struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1507 1506
@@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
1527 return 0; 1526 return 0;
1528} 1527}
1529 1528
1530static int omap_i2c_runtime_resume(struct device *dev) 1529static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
1531{ 1530{
1532 struct omap_i2c_dev *omap = dev_get_drvdata(dev); 1531 struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1533 1532
@@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
1542} 1541}
1543 1542
1544static const struct dev_pm_ops omap_i2c_pm_ops = { 1543static const struct dev_pm_ops omap_i2c_pm_ops = {
1544 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1545 pm_runtime_force_resume)
1545 SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, 1546 SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
1546 omap_i2c_runtime_resume, NULL) 1547 omap_i2c_runtime_resume, NULL)
1547}; 1548};
1548#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1549#else
1550#define OMAP_I2C_PM_OPS NULL
1551#endif /* CONFIG_PM */
1552 1549
1553static struct platform_driver omap_i2c_driver = { 1550static struct platform_driver omap_i2c_driver = {
1554 .probe = omap_i2c_probe, 1551 .probe = omap_i2c_probe,
1555 .remove = omap_i2c_remove, 1552 .remove = omap_i2c_remove,
1556 .driver = { 1553 .driver = {
1557 .name = "omap_i2c", 1554 .name = "omap_i2c",
1558 .pm = OMAP_I2C_PM_OPS, 1555 .pm = &omap_i2c_pm_ops,
1559 .of_match_table = of_match_ptr(omap_i2c_of_match), 1556 .of_match_table = of_match_ptr(omap_i2c_of_match),
1560 }, 1557 },
1561}; 1558};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e417ebf7628c..c77adbbea0c7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -155,6 +155,8 @@ enum msg_end_type {
155 * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that 155 * @has_mst_fifo: The I2C controller contains the new MST FIFO interface that
156 * provides additional features and allows for longer messages to 156 * provides additional features and allows for longer messages to
157 * be transferred in one go. 157 * be transferred in one go.
158 * @quirks: i2c adapter quirks for limiting write/read transfer size and not
159 * allowing 0 length transfers.
158 */ 160 */
159struct tegra_i2c_hw_feature { 161struct tegra_i2c_hw_feature {
160 bool has_continue_xfer_support; 162 bool has_continue_xfer_support;
@@ -167,6 +169,7 @@ struct tegra_i2c_hw_feature {
167 bool has_multi_master_mode; 169 bool has_multi_master_mode;
168 bool has_slcg_override_reg; 170 bool has_slcg_override_reg;
169 bool has_mst_fifo; 171 bool has_mst_fifo;
172 const struct i2c_adapter_quirks *quirks;
170}; 173};
171 174
172/** 175/**
@@ -837,6 +840,10 @@ static const struct i2c_adapter_quirks tegra_i2c_quirks = {
837 .max_write_len = 4096, 840 .max_write_len = 4096,
838}; 841};
839 842
843static const struct i2c_adapter_quirks tegra194_i2c_quirks = {
844 .flags = I2C_AQ_NO_ZERO_LEN,
845};
846
840static const struct tegra_i2c_hw_feature tegra20_i2c_hw = { 847static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
841 .has_continue_xfer_support = false, 848 .has_continue_xfer_support = false,
842 .has_per_pkt_xfer_complete_irq = false, 849 .has_per_pkt_xfer_complete_irq = false,
@@ -848,6 +855,7 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
848 .has_multi_master_mode = false, 855 .has_multi_master_mode = false,
849 .has_slcg_override_reg = false, 856 .has_slcg_override_reg = false,
850 .has_mst_fifo = false, 857 .has_mst_fifo = false,
858 .quirks = &tegra_i2c_quirks,
851}; 859};
852 860
853static const struct tegra_i2c_hw_feature tegra30_i2c_hw = { 861static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -861,6 +869,7 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
861 .has_multi_master_mode = false, 869 .has_multi_master_mode = false,
862 .has_slcg_override_reg = false, 870 .has_slcg_override_reg = false,
863 .has_mst_fifo = false, 871 .has_mst_fifo = false,
872 .quirks = &tegra_i2c_quirks,
864}; 873};
865 874
866static const struct tegra_i2c_hw_feature tegra114_i2c_hw = { 875static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -874,6 +883,7 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
874 .has_multi_master_mode = false, 883 .has_multi_master_mode = false,
875 .has_slcg_override_reg = false, 884 .has_slcg_override_reg = false,
876 .has_mst_fifo = false, 885 .has_mst_fifo = false,
886 .quirks = &tegra_i2c_quirks,
877}; 887};
878 888
879static const struct tegra_i2c_hw_feature tegra124_i2c_hw = { 889static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -887,6 +897,7 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
887 .has_multi_master_mode = false, 897 .has_multi_master_mode = false,
888 .has_slcg_override_reg = true, 898 .has_slcg_override_reg = true,
889 .has_mst_fifo = false, 899 .has_mst_fifo = false,
900 .quirks = &tegra_i2c_quirks,
890}; 901};
891 902
892static const struct tegra_i2c_hw_feature tegra210_i2c_hw = { 903static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
@@ -900,6 +911,7 @@ static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
900 .has_multi_master_mode = true, 911 .has_multi_master_mode = true,
901 .has_slcg_override_reg = true, 912 .has_slcg_override_reg = true,
902 .has_mst_fifo = false, 913 .has_mst_fifo = false,
914 .quirks = &tegra_i2c_quirks,
903}; 915};
904 916
905static const struct tegra_i2c_hw_feature tegra194_i2c_hw = { 917static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
@@ -913,6 +925,7 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
913 .has_multi_master_mode = true, 925 .has_multi_master_mode = true,
914 .has_slcg_override_reg = true, 926 .has_slcg_override_reg = true,
915 .has_mst_fifo = true, 927 .has_mst_fifo = true,
928 .quirks = &tegra194_i2c_quirks,
916}; 929};
917 930
918/* Match table for of_platform binding */ 931/* Match table for of_platform binding */
@@ -964,7 +977,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
964 i2c_dev->base = base; 977 i2c_dev->base = base;
965 i2c_dev->div_clk = div_clk; 978 i2c_dev->div_clk = div_clk;
966 i2c_dev->adapter.algo = &tegra_i2c_algo; 979 i2c_dev->adapter.algo = &tegra_i2c_algo;
967 i2c_dev->adapter.quirks = &tegra_i2c_quirks;
968 i2c_dev->irq = irq; 980 i2c_dev->irq = irq;
969 i2c_dev->cont_id = pdev->id; 981 i2c_dev->cont_id = pdev->id;
970 i2c_dev->dev = &pdev->dev; 982 i2c_dev->dev = &pdev->dev;
@@ -980,6 +992,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
980 i2c_dev->hw = of_device_get_match_data(&pdev->dev); 992 i2c_dev->hw = of_device_get_match_data(&pdev->dev);
981 i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node, 993 i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
982 "nvidia,tegra20-i2c-dvc"); 994 "nvidia,tegra20-i2c-dvc");
995 i2c_dev->adapter.quirks = i2c_dev->hw->quirks;
983 init_completion(&i2c_dev->msg_complete); 996 init_completion(&i2c_dev->msg_complete);
984 spin_lock_init(&i2c_dev->xfer_lock); 997 spin_lock_init(&i2c_dev->xfer_lock);
985 998
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 1aca742fde4a..ccd76c71af09 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -470,9 +470,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
470 data_arg.data); 470 data_arg.data);
471 } 471 }
472 case I2C_RETRIES: 472 case I2C_RETRIES:
473 if (arg > INT_MAX)
474 return -EINVAL;
475
473 client->adapter->retries = arg; 476 client->adapter->retries = arg;
474 break; 477 break;
475 case I2C_TIMEOUT: 478 case I2C_TIMEOUT:
479 if (arg > INT_MAX)
480 return -EINVAL;
481
476 /* For historical reasons, user-space sets the timeout 482 /* For historical reasons, user-space sets the timeout
477 * value in units of 10 ms. 483 * value in units of 10 ms.
478 */ 484 */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index c39f89d2deba..2dc628d4f1ae 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
1828 1828
1829 ret = i3c_master_retrieve_dev_info(newdev); 1829 ret = i3c_master_retrieve_dev_info(newdev);
1830 if (ret) 1830 if (ret)
1831 goto err_free_dev; 1831 goto err_detach_dev;
1832 1832
1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev); 1833 olddev = i3c_master_search_i3c_dev_duplicate(newdev);
1834 if (olddev) { 1834 if (olddev) {
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index b532e2c9cf5c..bb03079fbade 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
419 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 419 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
420} 420}
421 421
422static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, 422static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master,
423 struct dw_i3c_xfer *xfer) 423 struct dw_i3c_xfer *xfer)
424{ 424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&master->xferqueue.lock, flags);
428 if (master->xferqueue.cur == xfer) { 425 if (master->xferqueue.cur == xfer) {
429 u32 status; 426 u32 status;
430 427
@@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
439 } else { 436 } else {
440 list_del_init(&xfer->node); 437 list_del_init(&xfer->node);
441 } 438 }
439}
440
441static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
442 struct dw_i3c_xfer *xfer)
443{
444 unsigned long flags;
445
446 spin_lock_irqsave(&master->xferqueue.lock, flags);
447 dw_i3c_master_dequeue_xfer_locked(master, xfer);
442 spin_unlock_irqrestore(&master->xferqueue.lock, flags); 448 spin_unlock_irqrestore(&master->xferqueue.lock, flags);
443} 449}
444 450
@@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
494 complete(&xfer->comp); 500 complete(&xfer->comp);
495 501
496 if (ret < 0) { 502 if (ret < 0) {
497 dw_i3c_master_dequeue_xfer(master, xfer); 503 dw_i3c_master_dequeue_xfer_locked(master, xfer);
498 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, 504 writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
499 master->regs + DEVICE_CTRL); 505 master->regs + DEVICE_CTRL);
500 } 506 }
@@ -901,9 +907,6 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
901 master->regs + 907 master->regs +
902 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 908 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
903 909
904 if (!old_dyn_addr)
905 return 0;
906
907 master->addrs[data->index] = dev->info.dyn_addr; 910 master->addrs[data->index] = dev->info.dyn_addr;
908 911
909 return 0; 912 return 0;
@@ -925,11 +928,11 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
925 return -ENOMEM; 928 return -ENOMEM;
926 929
927 data->index = pos; 930 data->index = pos;
928 master->addrs[pos] = dev->info.dyn_addr; 931 master->addrs[pos] = dev->info.dyn_addr ? : dev->info.static_addr;
929 master->free_pos &= ~BIT(pos); 932 master->free_pos &= ~BIT(pos);
930 i3c_dev_set_master_data(dev, data); 933 i3c_dev_set_master_data(dev, data);
931 934
932 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), 935 writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->addrs[pos]),
933 master->regs + 936 master->regs +
934 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); 937 DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
935 938
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index bbd79b8b1a80..8889a4fdb454 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -1556,8 +1556,8 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
1556 return PTR_ERR(master->pclk); 1556 return PTR_ERR(master->pclk);
1557 1557
1558 master->sysclk = devm_clk_get(&pdev->dev, "sysclk"); 1558 master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
1559 if (IS_ERR(master->pclk)) 1559 if (IS_ERR(master->sysclk))
1560 return PTR_ERR(master->pclk); 1560 return PTR_ERR(master->sysclk);
1561 1561
1562 irq = platform_get_irq(pdev, 0); 1562 irq = platform_get_irq(pdev, 0);
1563 if (irq < 0) 1563 if (irq < 0)
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index da58020a144e..33a28cde126c 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
235 235
236int ide_queue_sense_rq(ide_drive_t *drive, void *special) 236int ide_queue_sense_rq(ide_drive_t *drive, void *special)
237{ 237{
238 struct request *sense_rq = drive->sense_rq; 238 ide_hwif_t *hwif = drive->hwif;
239 struct request *sense_rq;
240 unsigned long flags;
241
242 spin_lock_irqsave(&hwif->lock, flags);
239 243
240 /* deferred failure from ide_prep_sense() */ 244 /* deferred failure from ide_prep_sense() */
241 if (!drive->sense_rq_armed) { 245 if (!drive->sense_rq_armed) {
242 printk(KERN_WARNING PFX "%s: error queuing a sense request\n", 246 printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
243 drive->name); 247 drive->name);
248 spin_unlock_irqrestore(&hwif->lock, flags);
244 return -ENOMEM; 249 return -ENOMEM;
245 } 250 }
246 251
252 sense_rq = drive->sense_rq;
247 ide_req(sense_rq)->special = special; 253 ide_req(sense_rq)->special = special;
248 drive->sense_rq_armed = false; 254 drive->sense_rq_armed = false;
249 255
250 drive->hwif->rq = NULL; 256 drive->hwif->rq = NULL;
251 257
252 ide_insert_request_head(drive, sense_rq); 258 ide_insert_request_head(drive, sense_rq);
259 spin_unlock_irqrestore(&hwif->lock, flags);
253 return 0; 260 return 0;
254} 261}
255EXPORT_SYMBOL_GPL(ide_queue_sense_rq); 262EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 8445b484ae69..b137f27a34d5 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
68 } 68 }
69 69
70 if (!blk_update_request(rq, error, nr_bytes)) { 70 if (!blk_update_request(rq, error, nr_bytes)) {
71 if (rq == drive->sense_rq) 71 if (rq == drive->sense_rq) {
72 drive->sense_rq = NULL; 72 drive->sense_rq = NULL;
73 drive->sense_rq_active = false;
74 }
73 75
74 __blk_mq_end_request(rq, error); 76 __blk_mq_end_request(rq, error);
75 return 0; 77 return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
451 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); 453 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
452} 454}
453 455
454/* 456blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
455 * Issue a new request to a device. 457 bool local_requeue)
456 */
457blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
458 const struct blk_mq_queue_data *bd)
459{ 458{
460 ide_drive_t *drive = hctx->queue->queuedata; 459 ide_hwif_t *hwif = drive->hwif;
461 ide_hwif_t *hwif = drive->hwif;
462 struct ide_host *host = hwif->host; 460 struct ide_host *host = hwif->host;
463 struct request *rq = bd->rq;
464 ide_startstop_t startstop; 461 ide_startstop_t startstop;
465 462
466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { 463 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
474 if (ide_lock_host(host, hwif)) 471 if (ide_lock_host(host, hwif))
475 return BLK_STS_DEV_RESOURCE; 472 return BLK_STS_DEV_RESOURCE;
476 473
477 blk_mq_start_request(rq);
478
479 spin_lock_irq(&hwif->lock); 474 spin_lock_irq(&hwif->lock);
480 475
481 if (!ide_lock_port(hwif)) { 476 if (!ide_lock_port(hwif)) {
@@ -511,18 +506,6 @@ repeat:
511 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 506 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
512 507
513 /* 508 /*
514 * we know that the queue isn't empty, but this can happen
515 * if ->prep_rq() decides to kill a request
516 */
517 if (!rq) {
518 rq = bd->rq;
519 if (!rq) {
520 ide_unlock_port(hwif);
521 goto out;
522 }
523 }
524
525 /*
526 * Sanity: don't accept a request that isn't a PM request 509 * Sanity: don't accept a request that isn't a PM request
527 * if we are currently power managed. This is very important as 510 * if we are currently power managed. This is very important as
528 * blk_stop_queue() doesn't prevent the blk_fetch_request() 511 * blk_stop_queue() doesn't prevent the blk_fetch_request()
@@ -560,9 +543,12 @@ repeat:
560 } 543 }
561 } else { 544 } else {
562plug_device: 545plug_device:
546 if (local_requeue)
547 list_add(&rq->queuelist, &drive->rq_list);
563 spin_unlock_irq(&hwif->lock); 548 spin_unlock_irq(&hwif->lock);
564 ide_unlock_host(host); 549 ide_unlock_host(host);
565 ide_requeue_and_plug(drive, rq); 550 if (!local_requeue)
551 ide_requeue_and_plug(drive, rq);
566 return BLK_STS_OK; 552 return BLK_STS_OK;
567 } 553 }
568 554
@@ -573,6 +559,26 @@ out:
573 return BLK_STS_OK; 559 return BLK_STS_OK;
574} 560}
575 561
562/*
563 * Issue a new request to a device.
564 */
565blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
566 const struct blk_mq_queue_data *bd)
567{
568 ide_drive_t *drive = hctx->queue->queuedata;
569 ide_hwif_t *hwif = drive->hwif;
570
571 spin_lock_irq(&hwif->lock);
572 if (drive->sense_rq_active) {
573 spin_unlock_irq(&hwif->lock);
574 return BLK_STS_DEV_RESOURCE;
575 }
576 spin_unlock_irq(&hwif->lock);
577
578 blk_mq_start_request(bd->rq);
579 return ide_issue_rq(drive, bd->rq, false);
580}
581
576static int drive_is_ready(ide_drive_t *drive) 582static int drive_is_ready(ide_drive_t *drive)
577{ 583{
578 ide_hwif_t *hwif = drive->hwif; 584 ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
893 899
894void ide_insert_request_head(ide_drive_t *drive, struct request *rq) 900void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
895{ 901{
896 ide_hwif_t *hwif = drive->hwif; 902 drive->sense_rq_active = true;
897 unsigned long flags;
898
899 spin_lock_irqsave(&hwif->lock, flags);
900 list_add_tail(&rq->queuelist, &drive->rq_list); 903 list_add_tail(&rq->queuelist, &drive->rq_list);
901 spin_unlock_irqrestore(&hwif->lock, flags);
902
903 kblockd_schedule_work(&drive->rq_work); 904 kblockd_schedule_work(&drive->rq_work);
904} 905}
905EXPORT_SYMBOL_GPL(ide_insert_request_head); 906EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 102aa3bc3e7f..8af7af6001eb 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; 54 scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
55 scsi_req(rq)->cmd_len = 1; 55 scsi_req(rq)->cmd_len = 1;
56 ide_req(rq)->type = ATA_PRIV_MISC; 56 ide_req(rq)->type = ATA_PRIV_MISC;
57 spin_lock_irq(&hwif->lock);
57 ide_insert_request_head(drive, rq); 58 ide_insert_request_head(drive, rq);
59 spin_unlock_irq(&hwif->lock);
58 60
59out: 61out:
60 return; 62 return;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 63627be0811a..5aeaca24a28f 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); 1159 ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
1160 ide_hwif_t *hwif = drive->hwif; 1160 ide_hwif_t *hwif = drive->hwif;
1161 struct request *rq; 1161 struct request *rq;
1162 blk_status_t ret;
1162 LIST_HEAD(list); 1163 LIST_HEAD(list);
1163 1164
1164 spin_lock_irq(&hwif->lock); 1165 blk_mq_quiesce_queue(drive->queue);
1165 if (!list_empty(&drive->rq_list))
1166 list_splice_init(&drive->rq_list, &list);
1167 spin_unlock_irq(&hwif->lock);
1168 1166
1169 while (!list_empty(&list)) { 1167 ret = BLK_STS_OK;
1170 rq = list_first_entry(&list, struct request, queuelist); 1168 spin_lock_irq(&hwif->lock);
1169 while (!list_empty(&drive->rq_list)) {
1170 rq = list_first_entry(&drive->rq_list, struct request, queuelist);
1171 list_del_init(&rq->queuelist); 1171 list_del_init(&rq->queuelist);
1172 blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL); 1172
1173 spin_unlock_irq(&hwif->lock);
1174 ret = ide_issue_rq(drive, rq, true);
1175 spin_lock_irq(&hwif->lock);
1173 } 1176 }
1177 spin_unlock_irq(&hwif->lock);
1178
1179 blk_mq_unquiesce_queue(drive->queue);
1180
1181 if (ret != BLK_STS_OK)
1182 kblockd_schedule_work(&drive->rq_work);
1174} 1183}
1175 1184
1176static const u8 ide_hwif_to_major[] = 1185static const u8 ide_hwif_to_major[] =
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 4c8c7a620d08..a5dc13576394 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif)
544 drive->proc = proc_mkdir(drive->name, parent); 544 drive->proc = proc_mkdir(drive->name, parent);
545 if (drive->proc) { 545 if (drive->proc) {
546 ide_add_proc_entries(drive->proc, generic_drive_entries, drive); 546 ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
547 proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, 547 proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR,
548 drive->proc, &ide_settings_proc_fops, 548 drive->proc, &ide_settings_proc_fops,
549 drive); 549 drive);
550 } 550 }
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 031d568b4972..4e339cfd0c54 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -27,9 +27,18 @@
27#include <linux/iio/machine.h> 27#include <linux/iio/machine.h>
28#include <linux/iio/driver.h> 28#include <linux/iio/driver.h>
29 29
30#define AXP288_ADC_EN_MASK 0xF1 30/*
31#define AXP288_ADC_TS_PIN_GPADC 0xF2 31 * This mask enables all ADCs except for the battery temp-sensor (TS), that is
32#define AXP288_ADC_TS_PIN_ON 0xF3 32 * left as-is to avoid breaking charging on devices without a temp-sensor.
33 */
34#define AXP288_ADC_EN_MASK 0xF0
35#define AXP288_ADC_TS_ENABLE 0x01
36
37#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
38#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
39#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
40#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
41#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
33 42
34enum axp288_adc_id { 43enum axp288_adc_id {
35 AXP288_ADC_TS, 44 AXP288_ADC_TS,
@@ -44,6 +53,7 @@ enum axp288_adc_id {
44struct axp288_adc_info { 53struct axp288_adc_info {
45 int irq; 54 int irq;
46 struct regmap *regmap; 55 struct regmap *regmap;
56 bool ts_enabled;
47}; 57};
48 58
49static const struct iio_chan_spec axp288_adc_channels[] = { 59static const struct iio_chan_spec axp288_adc_channels[] = {
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
115 return IIO_VAL_INT; 125 return IIO_VAL_INT;
116} 126}
117 127
118static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, 128/*
119 unsigned long address) 129 * The current-source used for the battery temp-sensor (TS) is shared
130 * with the GPADC. For proper fuel-gauge and charger operation the TS
131 * current-source needs to be permanently on. But to read the GPADC we
132 * need to temporary switch the TS current-source to ondemand, so that
133 * the GPADC can use it, otherwise we will always read an all 0 value.
134 */
135static int axp288_adc_set_ts(struct axp288_adc_info *info,
136 unsigned int mode, unsigned long address)
120{ 137{
121 int ret; 138 int ret;
122 139
123 /* channels other than GPADC do not need to switch TS pin */ 140 /* No need to switch the current-source if the TS pin is disabled */
141 if (!info->ts_enabled)
142 return 0;
143
144 /* Channels other than GPADC do not need the current source */
124 if (address != AXP288_GP_ADC_H) 145 if (address != AXP288_GP_ADC_H)
125 return 0; 146 return 0;
126 147
127 ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); 148 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
149 AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
128 if (ret) 150 if (ret)
129 return ret; 151 return ret;
130 152
131 /* When switching to the GPADC pin give things some time to settle */ 153 /* When switching to the GPADC pin give things some time to settle */
132 if (mode == AXP288_ADC_TS_PIN_GPADC) 154 if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
133 usleep_range(6000, 10000); 155 usleep_range(6000, 10000);
134 156
135 return 0; 157 return 0;
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
145 mutex_lock(&indio_dev->mlock); 167 mutex_lock(&indio_dev->mlock);
146 switch (mask) { 168 switch (mask) {
147 case IIO_CHAN_INFO_RAW: 169 case IIO_CHAN_INFO_RAW:
148 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, 170 if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
149 chan->address)) { 171 chan->address)) {
150 dev_err(&indio_dev->dev, "GPADC mode\n"); 172 dev_err(&indio_dev->dev, "GPADC mode\n");
151 ret = -EINVAL; 173 ret = -EINVAL;
152 break; 174 break;
153 } 175 }
154 ret = axp288_adc_read_channel(val, chan->address, info->regmap); 176 ret = axp288_adc_read_channel(val, chan->address, info->regmap);
155 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, 177 if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
156 chan->address)) 178 chan->address))
157 dev_err(&indio_dev->dev, "TS pin restore\n"); 179 dev_err(&indio_dev->dev, "TS pin restore\n");
158 break; 180 break;
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
164 return ret; 186 return ret;
165} 187}
166 188
167static int axp288_adc_set_state(struct regmap *regmap) 189static int axp288_adc_initialize(struct axp288_adc_info *info)
168{ 190{
169 /* ADC should be always enabled for internal FG to function */ 191 int ret, adc_enable_val;
170 if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) 192
171 return -EIO; 193 /*
194 * Determine if the TS pin is enabled and set the TS current-source
195 * accordingly.
196 */
197 ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
198 if (ret)
199 return ret;
200
201 if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
202 info->ts_enabled = true;
203 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
204 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
205 AXP288_ADC_TS_CURRENT_ON);
206 } else {
207 info->ts_enabled = false;
208 ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
209 AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
210 AXP288_ADC_TS_CURRENT_OFF);
211 }
212 if (ret)
213 return ret;
172 214
173 return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); 215 /* Turn on the ADC for all channels except TS, leave TS as is */
216 return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
217 AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
174} 218}
175 219
176static const struct iio_info axp288_adc_iio_info = { 220static const struct iio_info axp288_adc_iio_info = {
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
200 * Set ADC to enabled state at all time, including system suspend. 244 * Set ADC to enabled state at all time, including system suspend.
201 * otherwise internal fuel gauge functionality may be affected. 245 * otherwise internal fuel gauge functionality may be affected.
202 */ 246 */
203 ret = axp288_adc_set_state(axp20x->regmap); 247 ret = axp288_adc_initialize(info);
204 if (ret) { 248 if (ret) {
205 dev_err(&pdev->dev, "unable to enable ADC device\n"); 249 dev_err(&pdev->dev, "unable to enable ADC device\n");
206 return ret; 250 return ret;
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 184d686ebd99..8b4568edd5cb 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -41,6 +41,7 @@
41 41
42#define ADS8688_VREF_MV 4096 42#define ADS8688_VREF_MV 4096
43#define ADS8688_REALBITS 16 43#define ADS8688_REALBITS 16
44#define ADS8688_MAX_CHANNELS 8
44 45
45/* 46/*
46 * enum ads8688_range - ADS8688 reference voltage range 47 * enum ads8688_range - ADS8688 reference voltage range
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
385{ 386{
386 struct iio_poll_func *pf = p; 387 struct iio_poll_func *pf = p;
387 struct iio_dev *indio_dev = pf->indio_dev; 388 struct iio_dev *indio_dev = pf->indio_dev;
388 u16 buffer[8]; 389 u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
389 int i, j = 0; 390 int i, j = 0;
390 391
391 for (i = 0; i < indio_dev->masklength; i++) { 392 for (i = 0; i < indio_dev->masklength; i++) {
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index cafb1dcadc48..9d984f2a8ba7 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -142,7 +142,10 @@ static void tiadc_step_config(struct iio_dev *indio_dev)
142 stepconfig |= STEPCONFIG_MODE_SWCNT; 142 stepconfig |= STEPCONFIG_MODE_SWCNT;
143 143
144 tiadc_writel(adc_dev, REG_STEPCONFIG(steps), 144 tiadc_writel(adc_dev, REG_STEPCONFIG(steps),
145 stepconfig | STEPCONFIG_INP(chan)); 145 stepconfig | STEPCONFIG_INP(chan) |
146 STEPCONFIG_INM_ADCREFM |
147 STEPCONFIG_RFP_VREFP |
148 STEPCONFIG_RFM_VREFN);
146 149
147 if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) { 150 if (adc_dev->open_delay[i] > STEPDELAY_OPEN_MASK) {
148 dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n", 151 dev_warn(dev, "chan %d open delay truncating to 0x3FFFF\n",
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index a406ad31b096..3a20cb5d9bff 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
444 case IIO_CHAN_INFO_SCALE: 444 case IIO_CHAN_INFO_SCALE:
445 switch (chan->type) { 445 switch (chan->type) {
446 case IIO_TEMP: 446 case IIO_TEMP:
447 *val = 1; /* 0.01 */ 447 *val = 10;
448 *val2 = 100; 448 return IIO_VAL_INT;
449 break;
450 case IIO_PH: 449 case IIO_PH:
451 *val = 1; /* 0.001 */ 450 *val = 1; /* 0.001 */
452 *val2 = 1000; 451 *val2 = 1000;
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
477 int val, int val2, long mask) 476 int val, int val2, long mask)
478{ 477{
479 struct atlas_data *data = iio_priv(indio_dev); 478 struct atlas_data *data = iio_priv(indio_dev);
480 __be32 reg = cpu_to_be32(val); 479 __be32 reg = cpu_to_be32(val / 10);
481 480
482 if (val2 != 0 || val < 0 || val > 20000) 481 if (val2 != 0 || val < 0 || val > 20000)
483 return -EINVAL; 482 return -EINVAL;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 63a7cc00bae0..84f077b2b90a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -494,7 +494,10 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
494 id_priv->id.route.addr.dev_addr.transport = 494 id_priv->id.route.addr.dev_addr.transport =
495 rdma_node_get_transport(cma_dev->device->node_type); 495 rdma_node_get_transport(cma_dev->device->node_type);
496 list_add_tail(&id_priv->list, &cma_dev->id_list); 496 list_add_tail(&id_priv->list, &cma_dev->id_list);
497 rdma_restrack_kadd(&id_priv->res); 497 if (id_priv->res.kern_name)
498 rdma_restrack_kadd(&id_priv->res);
499 else
500 rdma_restrack_uadd(&id_priv->res);
498} 501}
499 502
500static void cma_attach_to_dev(struct rdma_id_private *id_priv, 503static void cma_attach_to_dev(struct rdma_id_private *id_priv,
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 3cd830d52967..616734313f0c 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
267#endif 267#endif
268 268
269struct ib_device *ib_device_get_by_index(u32 ifindex); 269struct ib_device *ib_device_get_by_index(u32 ifindex);
270void ib_device_put(struct ib_device *device);
271/* RDMA device netlink */ 270/* RDMA device netlink */
272void nldev_init(void); 271void nldev_init(void);
273void nldev_exit(void); 272void nldev_exit(void);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 8872453e26c0..238ec42778ef 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index)
156 down_read(&lists_rwsem); 156 down_read(&lists_rwsem);
157 device = __ib_device_get_by_index(index); 157 device = __ib_device_get_by_index(index);
158 if (device) { 158 if (device) {
159 /* Do not return a device if unregistration has started. */ 159 if (!ib_device_try_get(device))
160 if (!refcount_inc_not_zero(&device->refcount))
161 device = NULL; 160 device = NULL;
162 } 161 }
163 up_read(&lists_rwsem); 162 up_read(&lists_rwsem);
164 return device; 163 return device;
165} 164}
166 165
166/**
167 * ib_device_put - Release IB device reference
168 * @device: device whose reference to be released
169 *
170 * ib_device_put() releases reference to the IB device to allow it to be
171 * unregistered and eventually free.
172 */
167void ib_device_put(struct ib_device *device) 173void ib_device_put(struct ib_device *device)
168{ 174{
169 if (refcount_dec_and_test(&device->refcount)) 175 if (refcount_dec_and_test(&device->refcount))
170 complete(&device->unreg_completion); 176 complete(&device->unreg_completion);
171} 177}
178EXPORT_SYMBOL(ib_device_put);
172 179
173static struct ib_device *__ib_device_get_by_name(const char *name) 180static struct ib_device *__ib_device_get_by_name(const char *name)
174{ 181{
@@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size)
303 rwlock_init(&device->client_data_lock); 310 rwlock_init(&device->client_data_lock);
304 INIT_LIST_HEAD(&device->client_data_list); 311 INIT_LIST_HEAD(&device->client_data_list);
305 INIT_LIST_HEAD(&device->port_list); 312 INIT_LIST_HEAD(&device->port_list);
306 refcount_set(&device->refcount, 1);
307 init_completion(&device->unreg_completion); 313 init_completion(&device->unreg_completion);
308 314
309 return device; 315 return device;
@@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name,
620 goto cg_cleanup; 626 goto cg_cleanup;
621 } 627 }
622 628
629 refcount_set(&device->refcount, 1);
623 device->reg_state = IB_DEV_REGISTERED; 630 device->reg_state = IB_DEV_REGISTERED;
624 631
625 list_for_each_entry(client, &client_list, list) 632 list_for_each_entry(client, &client_list, list)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e600fc23ae62..3c97a8b6bf1e 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -584,10 +584,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, 584 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) 585 atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
586 goto err; 586 goto err;
587 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
588 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
589 pd->unsafe_global_rkey))
590 goto err;
591 587
592 if (fill_res_name_pid(msg, res)) 588 if (fill_res_name_pid(msg, res))
593 goto err; 589 goto err;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index be6b8e1257d0..69f8db66925e 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -106,6 +106,8 @@ int uverbs_finalize_object(struct ib_uobject *uobj,
106 enum uverbs_obj_access access, 106 enum uverbs_obj_access access,
107 bool commit); 107 bool commit);
108 108
109int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
110
109void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); 111void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
110void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); 112void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
111 113
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index a4ec43093cb3..acb882f279cb 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
352 umem->writable = 1; 352 umem->writable = 1;
353 umem->is_odp = 1; 353 umem->is_odp = 1;
354 odp_data->per_mm = per_mm; 354 odp_data->per_mm = per_mm;
355 umem->owning_mm = per_mm->mm;
356 mmgrab(umem->owning_mm);
355 357
356 mutex_init(&odp_data->umem_mutex); 358 mutex_init(&odp_data->umem_mutex);
357 init_completion(&odp_data->notifier_completion); 359 init_completion(&odp_data->notifier_completion);
@@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
384out_page_list: 386out_page_list:
385 vfree(odp_data->page_list); 387 vfree(odp_data->page_list);
386out_odp_data: 388out_odp_data:
389 mmdrop(umem->owning_mm);
387 kfree(odp_data); 390 kfree(odp_data);
388 return ERR_PTR(ret); 391 return ERR_PTR(ret);
389} 392}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6b12cc5f97b2..3317300ab036 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -60,6 +60,10 @@ static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp,
60{ 60{
61 int ret; 61 int ret;
62 62
63 if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
64 return uverbs_copy_to_struct_or_zero(
65 attrs, UVERBS_ATTR_CORE_OUT, resp, resp_len);
66
63 if (copy_to_user(attrs->ucore.outbuf, resp, 67 if (copy_to_user(attrs->ucore.outbuf, resp,
64 min(attrs->ucore.outlen, resp_len))) 68 min(attrs->ucore.outlen, resp_len)))
65 return -EFAULT; 69 return -EFAULT;
@@ -1181,6 +1185,9 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs)
1181 goto out_put; 1185 goto out_put;
1182 } 1186 }
1183 1187
1188 if (uverbs_attr_is_valid(attrs, UVERBS_ATTR_CORE_OUT))
1189 ret = uverbs_output_written(attrs, UVERBS_ATTR_CORE_OUT);
1190
1184 ret = 0; 1191 ret = 0;
1185 1192
1186out_put: 1193out_put:
@@ -2012,8 +2019,10 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
2012 return -ENOMEM; 2019 return -ENOMEM;
2013 2020
2014 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); 2021 qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs);
2015 if (!qp) 2022 if (!qp) {
2023 ret = -EINVAL;
2016 goto out; 2024 goto out;
2025 }
2017 2026
2018 is_ud = qp->qp_type == IB_QPT_UD; 2027 is_ud = qp->qp_type == IB_QPT_UD;
2019 sg_ind = 0; 2028 sg_ind = 0;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 8c81ff698052..0ca04d224015 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -144,6 +144,21 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
144 0, uattr->len - len); 144 0, uattr->len - len);
145} 145}
146 146
147static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
148 const struct uverbs_attr *attr)
149{
150 struct bundle_priv *pbundle =
151 container_of(bundle, struct bundle_priv, bundle);
152 u16 flags;
153
154 flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
155 UVERBS_ATTR_F_VALID_OUTPUT;
156 if (put_user(flags,
157 &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
158 return -EFAULT;
159 return 0;
160}
161
147static int uverbs_process_idrs_array(struct bundle_priv *pbundle, 162static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
148 const struct uverbs_api_attr *attr_uapi, 163 const struct uverbs_api_attr *attr_uapi,
149 struct uverbs_objs_arr_attr *attr, 164 struct uverbs_objs_arr_attr *attr,
@@ -456,6 +471,19 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
456 } 471 }
457 472
458 /* 473 /*
474 * Until the drivers are revised to use the bundle directly we have to
475 * assume that the driver wrote to its UHW_OUT and flag userspace
476 * appropriately.
477 */
478 if (!ret && pbundle->method_elm->has_udata) {
479 const struct uverbs_attr *attr =
480 uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
481
482 if (!IS_ERR(attr))
483 ret = uverbs_set_output(&pbundle->bundle, attr);
484 }
485
486 /*
459 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can 487 * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
460 * not invoke the method because the request is not supported. No 488 * not invoke the method because the request is not supported. No
461 * other cases should return this code. 489 * other cases should return this code.
@@ -706,10 +734,7 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
706int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx, 734int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
707 const void *from, size_t size) 735 const void *from, size_t size)
708{ 736{
709 struct bundle_priv *pbundle =
710 container_of(bundle, struct bundle_priv, bundle);
711 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 737 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
712 u16 flags;
713 size_t min_size; 738 size_t min_size;
714 739
715 if (IS_ERR(attr)) 740 if (IS_ERR(attr))
@@ -719,16 +744,25 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
719 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size)) 744 if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
720 return -EFAULT; 745 return -EFAULT;
721 746
722 flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags | 747 return uverbs_set_output(bundle, attr);
723 UVERBS_ATTR_F_VALID_OUTPUT;
724 if (put_user(flags,
725 &pbundle->user_attrs[attr->ptr_attr.uattr_idx].flags))
726 return -EFAULT;
727
728 return 0;
729} 748}
730EXPORT_SYMBOL(uverbs_copy_to); 749EXPORT_SYMBOL(uverbs_copy_to);
731 750
751
752/*
753 * This is only used if the caller has directly used copy_to_use to write the
754 * data. It signals to user space that the buffer is filled in.
755 */
756int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx)
757{
758 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
759
760 if (IS_ERR(attr))
761 return PTR_ERR(attr);
762
763 return uverbs_set_output(bundle, attr);
764}
765
732int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, 766int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
733 size_t idx, s64 lower_bound, u64 upper_bound, 767 size_t idx, s64 lower_bound, u64 upper_bound,
734 s64 *def_val) 768 s64 *def_val)
@@ -757,8 +791,10 @@ int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle,
757{ 791{
758 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); 792 const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx);
759 793
760 if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), 794 if (size < attr->ptr_attr.len) {
761 attr->ptr_attr.len)) 795 if (clear_user(u64_to_user_ptr(attr->ptr_attr.data) + size,
762 return -EFAULT; 796 attr->ptr_attr.len - size))
797 return -EFAULT;
798 }
763 return uverbs_copy_to(bundle, idx, from, size); 799 return uverbs_copy_to(bundle, idx, from, size);
764} 800}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index fb0007aa0c27..5f366838b7ff 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref)
204 if (atomic_dec_and_test(&file->device->refcount)) 204 if (atomic_dec_and_test(&file->device->refcount))
205 ib_uverbs_comp_dev(file->device); 205 ib_uverbs_comp_dev(file->device);
206 206
207 if (file->async_file)
208 kref_put(&file->async_file->ref,
209 ib_uverbs_release_async_event_file);
207 put_device(&file->device->dev); 210 put_device(&file->device->dev);
208 kfree(file); 211 kfree(file);
209} 212}
@@ -690,6 +693,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
690 693
691 buf += sizeof(hdr); 694 buf += sizeof(hdr);
692 695
696 memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
693 bundle.ufile = file; 697 bundle.ufile = file;
694 if (!method_elm->is_ex) { 698 if (!method_elm->is_ex) {
695 size_t in_len = hdr.in_words * 4 - sizeof(hdr); 699 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
@@ -963,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
963 967
964 /* Get an arbitrary mm pointer that hasn't been cleaned yet */ 968 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
965 mutex_lock(&ufile->umap_lock); 969 mutex_lock(&ufile->umap_lock);
966 if (!list_empty(&ufile->umaps)) { 970 while (!list_empty(&ufile->umaps)) {
967 mm = list_first_entry(&ufile->umaps, 971 int ret;
968 struct rdma_umap_priv, list) 972
969 ->vma->vm_mm; 973 priv = list_first_entry(&ufile->umaps,
970 mmget(mm); 974 struct rdma_umap_priv, list);
975 mm = priv->vma->vm_mm;
976 ret = mmget_not_zero(mm);
977 if (!ret) {
978 list_del_init(&priv->list);
979 mm = NULL;
980 continue;
981 }
982 break;
971 } 983 }
972 mutex_unlock(&ufile->umap_lock); 984 mutex_unlock(&ufile->umap_lock);
973 if (!mm) 985 if (!mm)
@@ -1095,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
1095 list_del_init(&file->list); 1107 list_del_init(&file->list);
1096 mutex_unlock(&file->device->lists_mutex); 1108 mutex_unlock(&file->device->lists_mutex);
1097 1109
1098 if (file->async_file)
1099 kref_put(&file->async_file->ref,
1100 ib_uverbs_release_async_event_file);
1101
1102 kref_put(&file->ref, ib_uverbs_release_file); 1110 kref_put(&file->ref, ib_uverbs_release_file);
1103 1111
1104 return 0; 1112 return 0;
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 5030ec480370..2a3f2f01028d 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr,
168static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( 168static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)(
169 struct uverbs_attr_bundle *attrs) 169 struct uverbs_attr_bundle *attrs)
170{ 170{
171 struct ib_device *ib_dev = attrs->ufile->device->ib_dev; 171 struct ib_device *ib_dev;
172 struct ib_port_attr attr = {}; 172 struct ib_port_attr attr = {};
173 struct ib_uverbs_query_port_resp_ex resp = {}; 173 struct ib_uverbs_query_port_resp_ex resp = {};
174 struct ib_ucontext *ucontext;
174 int ret; 175 int ret;
175 u8 port_num; 176 u8 port_num;
176 177
178 ucontext = ib_uverbs_get_ucontext(attrs);
179 if (IS_ERR(ucontext))
180 return PTR_ERR(ucontext);
181 ib_dev = ucontext->device;
182
177 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ 183 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
178 if (!ib_dev->ops.query_port) 184 if (!ib_dev->ops.query_port)
179 return -EOPNOTSUPP; 185 return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 326805461265..19551aa43850 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -766,8 +766,8 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
766 return NULL; 766 return NULL;
767 767
768 sbuf->size = size; 768 sbuf->size = size;
769 sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 769 sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
770 &sbuf->dma_addr, GFP_ATOMIC); 770 &sbuf->dma_addr, GFP_ATOMIC);
771 if (!sbuf->sb) 771 if (!sbuf->sb)
772 goto bail; 772 goto bail;
773 773
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 59eeac55626f..57d4951679cb 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -105,10 +105,10 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
105 105
106 if (!sghead) { 106 if (!sghead) {
107 for (i = 0; i < pages; i++) { 107 for (i = 0; i < pages; i++) {
108 pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev, 108 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
109 pbl->pg_size, 109 pbl->pg_size,
110 &pbl->pg_map_arr[i], 110 &pbl->pg_map_arr[i],
111 GFP_KERNEL); 111 GFP_KERNEL);
112 if (!pbl->pg_arr[i]) 112 if (!pbl->pg_arr[i])
113 goto fail; 113 goto fail;
114 pbl->pg_count++; 114 pbl->pg_count++;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index df4f7a3f043d..8ac72ac7cbac 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -291,9 +291,9 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
291 if (!wq->sq) 291 if (!wq->sq)
292 goto err3; 292 goto err3;
293 293
294 wq->queue = dma_zalloc_coherent(&(rdev_p->rnic_info.pdev->dev), 294 wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
295 depth * sizeof(union t3_wr), 295 depth * sizeof(union t3_wr),
296 &(wq->dma_addr), GFP_KERNEL); 296 &(wq->dma_addr), GFP_KERNEL);
297 if (!wq->queue) 297 if (!wq->queue)
298 goto err4; 298 goto err4;
299 299
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 981ff5cfb5d1..504cf525508f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2564,9 +2564,8 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2564 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> 2564 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2565 T4_RQT_ENTRY_SHIFT; 2565 T4_RQT_ENTRY_SHIFT;
2566 2566
2567 wq->queue = dma_zalloc_coherent(&rdev->lldi.pdev->dev, 2567 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2568 wq->memsize, &wq->dma_addr, 2568 &wq->dma_addr, GFP_KERNEL);
2569 GFP_KERNEL);
2570 if (!wq->queue) 2569 if (!wq->queue)
2571 goto err_free_rqtpool; 2570 goto err_free_rqtpool;
2572 2571
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c22ebc774a6a..f9a7e9d29c8b 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
488 vmf = 1; 488 vmf = 1;
489 break; 489 break;
490 case STATUS: 490 case STATUS:
491 if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { 491 if (flags & VM_WRITE) {
492 ret = -EPERM; 492 ret = -EPERM;
493 goto done; 493 goto done;
494 } 494 }
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 09044905284f..7835eb52e7c5 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -899,10 +899,10 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
899 goto done; 899 goto done;
900 900
901 /* allocate dummy tail memory for all receive contexts */ 901 /* allocate dummy tail memory for all receive contexts */
902 dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent( 902 dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
903 &dd->pcidev->dev, sizeof(u64), 903 sizeof(u64),
904 &dd->rcvhdrtail_dummy_dma, 904 &dd->rcvhdrtail_dummy_dma,
905 GFP_KERNEL); 905 GFP_KERNEL);
906 906
907 if (!dd->rcvhdrtail_dummy_kvaddr) { 907 if (!dd->rcvhdrtail_dummy_kvaddr) {
908 dd_dev_err(dd, "cannot allocate dummy tail memory\n"); 908 dd_dev_err(dd, "cannot allocate dummy tail memory\n");
@@ -1863,9 +1863,9 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1863 gfp_flags = GFP_KERNEL; 1863 gfp_flags = GFP_KERNEL;
1864 else 1864 else
1865 gfp_flags = GFP_USER; 1865 gfp_flags = GFP_USER;
1866 rcd->rcvhdrq = dma_zalloc_coherent( 1866 rcd->rcvhdrq = dma_alloc_coherent(&dd->pcidev->dev, amt,
1867 &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma, 1867 &rcd->rcvhdrq_dma,
1868 gfp_flags | __GFP_COMP); 1868 gfp_flags | __GFP_COMP);
1869 1869
1870 if (!rcd->rcvhdrq) { 1870 if (!rcd->rcvhdrq) {
1871 dd_dev_err(dd, 1871 dd_dev_err(dd,
@@ -1876,9 +1876,10 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
1876 1876
1877 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || 1877 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
1878 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { 1878 HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
1879 rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( 1879 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
1880 &dd->pcidev->dev, PAGE_SIZE, 1880 PAGE_SIZE,
1881 &rcd->rcvhdrqtailaddr_dma, gfp_flags); 1881 &rcd->rcvhdrqtailaddr_dma,
1882 gfp_flags);
1882 if (!rcd->rcvhdrtail_kvaddr) 1883 if (!rcd->rcvhdrtail_kvaddr)
1883 goto bail_free; 1884 goto bail_free;
1884 } 1885 }
@@ -1974,10 +1975,10 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
1974 while (alloced_bytes < rcd->egrbufs.size && 1975 while (alloced_bytes < rcd->egrbufs.size &&
1975 rcd->egrbufs.alloced < rcd->egrbufs.count) { 1976 rcd->egrbufs.alloced < rcd->egrbufs.count) {
1976 rcd->egrbufs.buffers[idx].addr = 1977 rcd->egrbufs.buffers[idx].addr =
1977 dma_zalloc_coherent(&dd->pcidev->dev, 1978 dma_alloc_coherent(&dd->pcidev->dev,
1978 rcd->egrbufs.rcvtid_size, 1979 rcd->egrbufs.rcvtid_size,
1979 &rcd->egrbufs.buffers[idx].dma, 1980 &rcd->egrbufs.buffers[idx].dma,
1980 gfp_flags); 1981 gfp_flags);
1981 if (rcd->egrbufs.buffers[idx].addr) { 1982 if (rcd->egrbufs.buffers[idx].addr) {
1982 rcd->egrbufs.buffers[idx].len = 1983 rcd->egrbufs.buffers[idx].len =
1983 rcd->egrbufs.rcvtid_size; 1984 rcd->egrbufs.rcvtid_size;
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index dd5a5c030066..04126d7e318d 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -2098,11 +2098,10 @@ int init_credit_return(struct hfi1_devdata *dd)
2098 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return); 2098 int bytes = TXE_NUM_CONTEXTS * sizeof(struct credit_return);
2099 2099
2100 set_dev_node(&dd->pcidev->dev, i); 2100 set_dev_node(&dd->pcidev->dev, i);
2101 dd->cr_base[i].va = dma_zalloc_coherent( 2101 dd->cr_base[i].va = dma_alloc_coherent(&dd->pcidev->dev,
2102 &dd->pcidev->dev, 2102 bytes,
2103 bytes, 2103 &dd->cr_base[i].dma,
2104 &dd->cr_base[i].dma, 2104 GFP_KERNEL);
2105 GFP_KERNEL);
2106 if (!dd->cr_base[i].va) { 2105 if (!dd->cr_base[i].va) {
2107 set_dev_node(&dd->pcidev->dev, dd->node); 2106 set_dev_node(&dd->pcidev->dev, dd->node);
2108 dd_dev_err(dd, 2107 dd_dev_err(dd,
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index b84356e1a4c1..96897a91fb0a 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -1453,12 +1453,9 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
1453 timer_setup(&sde->err_progress_check_timer, 1453 timer_setup(&sde->err_progress_check_timer,
1454 sdma_err_progress_check, 0); 1454 sdma_err_progress_check, 0);
1455 1455
1456 sde->descq = dma_zalloc_coherent( 1456 sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
1457 &dd->pcidev->dev, 1457 descq_cnt * sizeof(u64[2]),
1458 descq_cnt * sizeof(u64[2]), 1458 &sde->descq_phys, GFP_KERNEL);
1459 &sde->descq_phys,
1460 GFP_KERNEL
1461 );
1462 if (!sde->descq) 1459 if (!sde->descq)
1463 goto bail; 1460 goto bail;
1464 sde->tx_ring = 1461 sde->tx_ring =
@@ -1471,24 +1468,18 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
1471 1468
1472 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; 1469 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1473 /* Allocate memory for DMA of head registers to memory */ 1470 /* Allocate memory for DMA of head registers to memory */
1474 dd->sdma_heads_dma = dma_zalloc_coherent( 1471 dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
1475 &dd->pcidev->dev, 1472 dd->sdma_heads_size,
1476 dd->sdma_heads_size, 1473 &dd->sdma_heads_phys,
1477 &dd->sdma_heads_phys, 1474 GFP_KERNEL);
1478 GFP_KERNEL
1479 );
1480 if (!dd->sdma_heads_dma) { 1475 if (!dd->sdma_heads_dma) {
1481 dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); 1476 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1482 goto bail; 1477 goto bail;
1483 } 1478 }
1484 1479
1485 /* Allocate memory for pad */ 1480 /* Allocate memory for pad */
1486 dd->sdma_pad_dma = dma_zalloc_coherent( 1481 dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
1487 &dd->pcidev->dev, 1482 &dd->sdma_pad_phys, GFP_KERNEL);
1488 sizeof(u32),
1489 &dd->sdma_pad_phys,
1490 GFP_KERNEL
1491 );
1492 if (!dd->sdma_pad_dma) { 1483 if (!dd->sdma_pad_dma) {
1493 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); 1484 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1494 goto bail; 1485 goto bail;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 88242fe95eaa..bf96067876c9 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 987 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data; 988 wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
989 wc.wc_flags = IB_WC_WITH_IMM; 989 wc.wc_flags = IB_WC_WITH_IMM;
990 tlen -= sizeof(u32);
991 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 990 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
992 wc.ex.imm_data = 0; 991 wc.ex.imm_data = 0;
993 wc.wc_flags = 0; 992 wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index 6300033a448f..dac058d3df53 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,8 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
197 buf->npages = 1 << order; 197 buf->npages = 1 << order;
198 buf->page_shift = page_shift; 198 buf->page_shift = page_shift;
199 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */ 199 /* MTT PA must be recorded in 4k alignment, t is 4k aligned */
200 buf->direct.buf = dma_zalloc_coherent(dev, 200 buf->direct.buf = dma_alloc_coherent(dev, size, &t,
201 size, &t, GFP_KERNEL); 201 GFP_KERNEL);
202 if (!buf->direct.buf) 202 if (!buf->direct.buf)
203 return -ENOMEM; 203 return -ENOMEM;
204 204
@@ -219,9 +219,10 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
219 return -ENOMEM; 219 return -ENOMEM;
220 220
221 for (i = 0; i < buf->nbufs; ++i) { 221 for (i = 0; i < buf->nbufs; ++i) {
222 buf->page_list[i].buf = dma_zalloc_coherent(dev, 222 buf->page_list[i].buf = dma_alloc_coherent(dev,
223 page_size, &t, 223 page_size,
224 GFP_KERNEL); 224 &t,
225 GFP_KERNEL);
225 226
226 if (!buf->page_list[i].buf) 227 if (!buf->page_list[i].buf)
227 goto err_free; 228 goto err_free;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 3a669451cf86..543fa1504cd3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -5091,7 +5091,7 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5091 eqe_alloc = i * (buf_chk_sz / eq->eqe_size); 5091 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5092 size = (eq->entries - eqe_alloc) * eq->eqe_size; 5092 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5093 } 5093 }
5094 eq->buf[i] = dma_zalloc_coherent(dev, size, 5094 eq->buf[i] = dma_alloc_coherent(dev, size,
5095 &(eq->buf_dma[i]), 5095 &(eq->buf_dma[i]),
5096 GFP_KERNEL); 5096 GFP_KERNEL);
5097 if (!eq->buf[i]) 5097 if (!eq->buf[i])
@@ -5126,9 +5126,9 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5126 size = (eq->entries - eqe_alloc) 5126 size = (eq->entries - eqe_alloc)
5127 * eq->eqe_size; 5127 * eq->eqe_size;
5128 } 5128 }
5129 eq->buf[idx] = dma_zalloc_coherent(dev, size, 5129 eq->buf[idx] = dma_alloc_coherent(dev, size,
5130 &(eq->buf_dma[idx]), 5130 &(eq->buf_dma[idx]),
5131 GFP_KERNEL); 5131 GFP_KERNEL);
5132 if (!eq->buf[idx]) 5132 if (!eq->buf[idx])
5133 goto err_dma_alloc_buf; 5133 goto err_dma_alloc_buf;
5134 5134
@@ -5241,7 +5241,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5241 goto free_cmd_mbox; 5241 goto free_cmd_mbox;
5242 } 5242 }
5243 5243
5244 eq->buf_list->buf = dma_zalloc_coherent(dev, buf_chk_sz, 5244 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5245 &(eq->buf_list->map), 5245 &(eq->buf_list->map),
5246 GFP_KERNEL); 5246 GFP_KERNEL);
5247 if (!eq->buf_list->buf) { 5247 if (!eq->buf_list->buf) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 960b1946c365..12deacf442cf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
210 struct ib_udata *udata) 210 struct ib_udata *udata)
211{ 211{
212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); 212 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
213 struct hns_roce_ib_create_srq_resp resp = {};
213 struct hns_roce_srq *srq; 214 struct hns_roce_srq *srq;
214 int srq_desc_size; 215 int srq_desc_size;
215 int srq_buf_size; 216 int srq_buf_size;
@@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd,
378 379
379 srq->event = hns_roce_ib_srq_event; 380 srq->event = hns_roce_ib_srq_event;
380 srq->ibsrq.ext.xrc.srq_num = srq->srqn; 381 srq->ibsrq.ext.xrc.srq_num = srq->srqn;
382 resp.srqn = srq->srqn;
381 383
382 if (udata) { 384 if (udata) {
383 if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { 385 if (ib_copy_to_udata(udata, &resp,
386 min(udata->outlen, sizeof(resp)))) {
384 ret = -EFAULT; 387 ret = -EFAULT;
385 goto err_wrid; 388 goto err_srqc_alloc;
386 } 389 }
387 } 390 }
388 391
389 return &srq->ibsrq; 392 return &srq->ibsrq;
390 393
394err_srqc_alloc:
395 hns_roce_srq_free(hr_dev, srq);
396
391err_wrid: 397err_wrid:
392 kvfree(srq->wrid); 398 kvfree(srq->wrid);
393 399
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index a9ea966877f2..59e978141ad4 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -745,8 +745,8 @@ enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
745 if (!mem) 745 if (!mem)
746 return I40IW_ERR_PARAM; 746 return I40IW_ERR_PARAM;
747 mem->size = ALIGN(size, alignment); 747 mem->size = ALIGN(size, alignment);
748 mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, 748 mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
749 (dma_addr_t *)&mem->pa, GFP_KERNEL); 749 (dma_addr_t *)&mem->pa, GFP_KERNEL);
750 if (!mem->va) 750 if (!mem->va)
751 return I40IW_ERR_NO_MEMORY; 751 return I40IW_ERR_NO_MEMORY;
752 return 0; 752 return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 25439da8976c..936ee1314bcd 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1411 1411
1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); 1412 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1413 if (sqp->tx_ring[wire_tx_ix].ah) 1413 if (sqp->tx_ring[wire_tx_ix].ah)
1414 rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); 1414 mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0);
1415 sqp->tx_ring[wire_tx_ix].ah = ah; 1415 sqp->tx_ring[wire_tx_ix].ah = ah;
1416 ib_dma_sync_single_for_cpu(&dev->ib_dev, 1416 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1417 sqp->tx_ring[wire_tx_ix].buf.map, 1417 sqp->tx_ring[wire_tx_ix].buf.map,
@@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1902 if (wc.status == IB_WC_SUCCESS) { 1902 if (wc.status == IB_WC_SUCCESS) {
1903 switch (wc.opcode) { 1903 switch (wc.opcode) {
1904 case IB_WC_SEND: 1904 case IB_WC_SEND:
1905 rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1905 mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1906 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1907 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1908 = NULL; 1908 = NULL;
@@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1931 " status = %d, wrid = 0x%llx\n", 1931 " status = %d, wrid = 0x%llx\n",
1932 ctx->slave, wc.status, wc.wr_id); 1932 ctx->slave, wc.status, wc.wr_id);
1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) { 1933 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1934 rdma_destroy_ah(sqp->tx_ring[wc.wr_id & 1934 mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); 1935 (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah 1936 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1937 = NULL; 1937 = NULL;
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index e8a1e4498e3f..798591a18484 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = {
630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), 630 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
631 UAPI_DEF_CHAIN_OBJ_TREE( 631 UAPI_DEF_CHAIN_OBJ_TREE(
632 UVERBS_OBJECT_FLOW, 632 UVERBS_OBJECT_FLOW,
633 &mlx5_ib_fs, 633 &mlx5_ib_fs),
634 UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)),
635 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 634 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
636 &mlx5_ib_flow_actions), 635 &mlx5_ib_flow_actions),
637 {}, 636 {},
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 01e0f6200631..4ee32964e1dd 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1595 struct prefetch_mr_work *w = 1595 struct prefetch_mr_work *w =
1596 container_of(work, struct prefetch_mr_work, work); 1596 container_of(work, struct prefetch_mr_work, work);
1597 1597
1598 if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED) 1598 if (ib_device_try_get(&w->dev->ib_dev)) {
1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, 1599 mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list,
1600 w->num_sge); 1600 w->num_sge);
1601 1601 ib_device_put(&w->dev->ib_dev);
1602 }
1603 put_device(&w->dev->ib_dev.dev);
1602 kfree(w); 1604 kfree(w);
1603} 1605}
1604 1606
@@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1617 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, 1619 return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list,
1618 num_sge); 1620 num_sge);
1619 1621
1620 if (dev->ib_dev.reg_state != IB_DEV_REGISTERED)
1621 return -ENODEV;
1622
1623 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); 1622 work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
1624 if (!work) 1623 if (!work)
1625 return -ENOMEM; 1624 return -ENOMEM;
1626 1625
1627 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); 1626 memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
1628 1627
1628 get_device(&dev->ib_dev.dev);
1629 work->dev = dev; 1629 work->dev = dev;
1630 work->pf_flags = pf_flags; 1630 work->pf_flags = pf_flags;
1631 work->num_sge = num_sge; 1631 work->num_sge = num_sge;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index dd2ae640bc84..7db778d96ef5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1912 } 1912 }
1913 1913
1914 if (!check_flags_mask(ucmd.flags, 1914 if (!check_flags_mask(ucmd.flags,
1915 MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
1916 MLX5_QP_FLAG_BFREG_INDEX |
1917 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE |
1918 MLX5_QP_FLAG_SCATTER_CQE |
1915 MLX5_QP_FLAG_SIGNATURE | 1919 MLX5_QP_FLAG_SIGNATURE |
1916 MLX5_QP_FLAG_SCATTER_CQE | 1920 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC |
1917 MLX5_QP_FLAG_TUNNEL_OFFLOADS | 1921 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
1918 MLX5_QP_FLAG_BFREG_INDEX | 1922 MLX5_QP_FLAG_TUNNEL_OFFLOADS |
1919 MLX5_QP_FLAG_TYPE_DCT | 1923 MLX5_QP_FLAG_TYPE_DCI |
1920 MLX5_QP_FLAG_TYPE_DCI | 1924 MLX5_QP_FLAG_TYPE_DCT))
1921 MLX5_QP_FLAG_ALLOW_SCATTER_CQE |
1922 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE))
1923 return -EINVAL; 1925 return -EINVAL;
1924 1926
1925 err = get_qp_user_index(to_mucontext(pd->uobject->context), 1927 err = get_qp_user_index(to_mucontext(pd->uobject->context),
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index cc9c0c8ccba3..112d2f38e0de 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -623,8 +623,9 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
623 page = dev->db_tab->page + end; 623 page = dev->db_tab->page + end;
624 624
625alloc: 625alloc:
626 page->db_rec = dma_zalloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 626 page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
627 &page->mapping, GFP_KERNEL); 627 MTHCA_ICM_PAGE_SIZE, &page->mapping,
628 GFP_KERNEL);
628 if (!page->db_rec) { 629 if (!page->db_rec) {
629 ret = -ENOMEM; 630 ret = -ENOMEM;
630 goto out; 631 goto out;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 82cb6b71ac7c..e3e9dd54caa2 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -534,7 +534,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
534 { 534 {
535 struct mthca_ucontext *context; 535 struct mthca_ucontext *context;
536 536
537 qp = kmalloc(sizeof *qp, GFP_KERNEL); 537 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
538 if (!qp) 538 if (!qp)
539 return ERR_PTR(-ENOMEM); 539 return ERR_PTR(-ENOMEM);
540 540
@@ -600,7 +600,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
600 if (udata) 600 if (udata)
601 return ERR_PTR(-EINVAL); 601 return ERR_PTR(-EINVAL);
602 602
603 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); 603 qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
604 if (!qp) 604 if (!qp)
605 return ERR_PTR(-ENOMEM); 605 return ERR_PTR(-ENOMEM);
606 606
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 241a57a07485..097e5ab2a19f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -380,8 +380,8 @@ static int ocrdma_alloc_q(struct ocrdma_dev *dev,
380 q->len = len; 380 q->len = len;
381 q->entry_size = entry_size; 381 q->entry_size = entry_size;
382 q->size = len * entry_size; 382 q->size = len * entry_size;
383 q->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, q->size, 383 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
384 &q->dma, GFP_KERNEL); 384 GFP_KERNEL);
385 if (!q->va) 385 if (!q->va)
386 return -ENOMEM; 386 return -ENOMEM;
387 return 0; 387 return 0;
@@ -1819,7 +1819,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1819 return -ENOMEM; 1819 return -ENOMEM;
1820 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ, 1820 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1821 OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 1821 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1822 cq->va = dma_zalloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL); 1822 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1823 if (!cq->va) { 1823 if (!cq->va) {
1824 status = -ENOMEM; 1824 status = -ENOMEM;
1825 goto mem_err; 1825 goto mem_err;
@@ -2209,7 +2209,7 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
2209 qp->sq.max_cnt = max_wqe_allocated; 2209 qp->sq.max_cnt = max_wqe_allocated;
2210 len = (hw_pages * hw_page_size); 2210 len = (hw_pages * hw_page_size);
2211 2211
2212 qp->sq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2212 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2213 if (!qp->sq.va) 2213 if (!qp->sq.va)
2214 return -EINVAL; 2214 return -EINVAL;
2215 qp->sq.len = len; 2215 qp->sq.len = len;
@@ -2259,7 +2259,7 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2259 qp->rq.max_cnt = max_rqe_allocated; 2259 qp->rq.max_cnt = max_rqe_allocated;
2260 len = (hw_pages * hw_page_size); 2260 len = (hw_pages * hw_page_size);
2261 2261
2262 qp->rq.va = dma_zalloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); 2262 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2263 if (!qp->rq.va) 2263 if (!qp->rq.va)
2264 return -ENOMEM; 2264 return -ENOMEM;
2265 qp->rq.pa = pa; 2265 qp->rq.pa = pa;
@@ -2315,8 +2315,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2315 if (dev->attr.ird == 0) 2315 if (dev->attr.ird == 0)
2316 return 0; 2316 return 0;
2317 2317
2318 qp->ird_q_va = dma_zalloc_coherent(&pdev->dev, ird_q_len, &pa, 2318 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
2319 GFP_KERNEL); 2319 GFP_KERNEL);
2320 if (!qp->ird_q_va) 2320 if (!qp->ird_q_va)
2321 return -ENOMEM; 2321 return -ENOMEM;
2322 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages, 2322 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index dd15474b19b7..6be0ea109138 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -73,8 +73,8 @@ bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
74 sizeof(struct ocrdma_rdma_stats_resp)); 74 sizeof(struct ocrdma_rdma_stats_resp));
75 75
76 mem->va = dma_zalloc_coherent(&dev->nic_info.pdev->dev, mem->size, 76 mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
77 &mem->pa, GFP_KERNEL); 77 &mem->pa, GFP_KERNEL);
78 if (!mem->va) { 78 if (!mem->va) {
79 pr_err("%s: stats mbox allocation failed\n", __func__); 79 pr_err("%s: stats mbox allocation failed\n", __func__);
80 return false; 80 return false;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c46bed0c5513..287c332ff0e6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -504,8 +504,8 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
504 INIT_LIST_HEAD(&ctx->mm_head); 504 INIT_LIST_HEAD(&ctx->mm_head);
505 mutex_init(&ctx->mm_list_lock); 505 mutex_init(&ctx->mm_list_lock);
506 506
507 ctx->ah_tbl.va = dma_zalloc_coherent(&pdev->dev, map_len, 507 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
508 &ctx->ah_tbl.pa, GFP_KERNEL); 508 &ctx->ah_tbl.pa, GFP_KERNEL);
509 if (!ctx->ah_tbl.va) { 509 if (!ctx->ah_tbl.va) {
510 kfree(ctx); 510 kfree(ctx);
511 return ERR_PTR(-ENOMEM); 511 return ERR_PTR(-ENOMEM);
@@ -838,7 +838,7 @@ static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
838 return -ENOMEM; 838 return -ENOMEM;
839 839
840 for (i = 0; i < mr->num_pbls; i++) { 840 for (i = 0; i < mr->num_pbls; i++) {
841 va = dma_zalloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL); 841 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
842 if (!va) { 842 if (!va) {
843 ocrdma_free_mr_pbl_tbl(dev, mr); 843 ocrdma_free_mr_pbl_tbl(dev, mr);
844 status = -ENOMEM; 844 status = -ENOMEM;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index b342a70e2814..e1ccf32b1c3d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -556,8 +556,8 @@ static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
556 return ERR_PTR(-ENOMEM); 556 return ERR_PTR(-ENOMEM);
557 557
558 for (i = 0; i < pbl_info->num_pbls; i++) { 558 for (i = 0; i < pbl_info->num_pbls; i++) {
559 va = dma_zalloc_coherent(&pdev->dev, pbl_info->pbl_size, 559 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size, &pa,
560 &pa, flags); 560 flags);
561 if (!va) 561 if (!va)
562 goto err; 562 goto err;
563 563
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 868da0ece7ba..445ea19a2ec8 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
513 wc.ex.imm_data = ohdr->u.ud.imm_data; 513 wc.ex.imm_data = ohdr->u.ud.imm_data;
514 wc.wc_flags = IB_WC_WITH_IMM; 514 wc.wc_flags = IB_WC_WITH_IMM;
515 tlen -= sizeof(u32);
516 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 515 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
517 wc.ex.imm_data = 0; 516 wc.ex.imm_data = 0;
518 wc.wc_flags = 0; 517 wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 42b8685c997e..3c633ab58052 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
427 427
428static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) 428static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
429{ 429{
430 return (enum pvrdma_wr_opcode)op; 430 switch (op) {
431 case IB_WR_RDMA_WRITE:
432 return PVRDMA_WR_RDMA_WRITE;
433 case IB_WR_RDMA_WRITE_WITH_IMM:
434 return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
435 case IB_WR_SEND:
436 return PVRDMA_WR_SEND;
437 case IB_WR_SEND_WITH_IMM:
438 return PVRDMA_WR_SEND_WITH_IMM;
439 case IB_WR_RDMA_READ:
440 return PVRDMA_WR_RDMA_READ;
441 case IB_WR_ATOMIC_CMP_AND_SWP:
442 return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
443 case IB_WR_ATOMIC_FETCH_AND_ADD:
444 return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
445 case IB_WR_LSO:
446 return PVRDMA_WR_LSO;
447 case IB_WR_SEND_WITH_INV:
448 return PVRDMA_WR_SEND_WITH_INV;
449 case IB_WR_RDMA_READ_WITH_INV:
450 return PVRDMA_WR_RDMA_READ_WITH_INV;
451 case IB_WR_LOCAL_INV:
452 return PVRDMA_WR_LOCAL_INV;
453 case IB_WR_REG_MR:
454 return PVRDMA_WR_FAST_REG_MR;
455 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
456 return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
457 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
458 return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
459 case IB_WR_REG_SIG_MR:
460 return PVRDMA_WR_REG_SIG_MR;
461 default:
462 return PVRDMA_WR_ERROR;
463 }
431} 464}
432 465
433static inline enum ib_wc_status pvrdma_wc_status_to_ib( 466static inline enum ib_wc_status pvrdma_wc_status_to_ib(
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index eaa109dbc96a..39c37b6fd715 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -890,8 +890,8 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
890 dev_info(&pdev->dev, "device version %d, driver version %d\n", 890 dev_info(&pdev->dev, "device version %d, driver version %d\n",
891 dev->dsr_version, PVRDMA_VERSION); 891 dev->dsr_version, PVRDMA_VERSION);
892 892
893 dev->dsr = dma_zalloc_coherent(&pdev->dev, sizeof(*dev->dsr), 893 dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
894 &dev->dsrbase, GFP_KERNEL); 894 &dev->dsrbase, GFP_KERNEL);
895 if (!dev->dsr) { 895 if (!dev->dsr) {
896 dev_err(&pdev->dev, "failed to allocate shared region\n"); 896 dev_err(&pdev->dev, "failed to allocate shared region\n");
897 ret = -ENOMEM; 897 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 3acf74cbe266..1ec3646087ba 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
721 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 721 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
722 wqe_hdr->ex.imm_data = wr->ex.imm_data; 722 wqe_hdr->ex.imm_data = wr->ex.imm_data;
723 723
724 if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
725 *bad_wr = wr;
726 ret = -EINVAL;
727 goto out;
728 }
729
724 switch (qp->ibqp.qp_type) { 730 switch (qp->ibqp.qp_type) {
725 case IB_QPT_GSI: 731 case IB_QPT_GSI:
726 case IB_QPT_UD: 732 case IB_QPT_UD:
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index a1bd8cfc2c25..c6cc3e4ab71d 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -2910,6 +2910,8 @@ send:
2910 goto op_err; 2910 goto op_err;
2911 if (!ret) 2911 if (!ret)
2912 goto rnr_nak; 2912 goto rnr_nak;
2913 if (wqe->length > qp->r_len)
2914 goto inv_err;
2913 break; 2915 break;
2914 2916
2915 case IB_WR_RDMA_WRITE_WITH_IMM: 2917 case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -3078,7 +3080,10 @@ op_err:
3078 goto err; 3080 goto err;
3079 3081
3080inv_err: 3082inv_err:
3081 send_status = IB_WC_REM_INV_REQ_ERR; 3083 send_status =
3084 sqp->ibqp.qp_type == IB_QPT_RC ?
3085 IB_WC_REM_INV_REQ_ERR :
3086 IB_WC_SUCCESS;
3082 wc.status = IB_WC_LOC_QP_OP_ERR; 3087 wc.status = IB_WC_LOC_QP_OP_ERR;
3083 goto err; 3088 goto err;
3084 3089
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1da119d901a9..73e808c1e6ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -248,7 +248,6 @@ struct ipoib_cm_tx {
248 struct list_head list; 248 struct list_head list;
249 struct net_device *dev; 249 struct net_device *dev;
250 struct ipoib_neigh *neigh; 250 struct ipoib_neigh *neigh;
251 struct ipoib_path *path;
252 struct ipoib_tx_buf *tx_ring; 251 struct ipoib_tx_buf *tx_ring;
253 unsigned int tx_head; 252 unsigned int tx_head;
254 unsigned int tx_tail; 253 unsigned int tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0428e01e8f69..aa9dcfc36cd3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1312 1312
1313 neigh->cm = tx; 1313 neigh->cm = tx;
1314 tx->neigh = neigh; 1314 tx->neigh = neigh;
1315 tx->path = path;
1316 tx->dev = dev; 1315 tx->dev = dev;
1317 list_add(&tx->list, &priv->cm.start_list); 1316 list_add(&tx->list, &priv->cm.start_list);
1318 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1317 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1371 neigh->daddr + QPN_AND_OPTIONS_OFFSET); 1370 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1372 goto free_neigh; 1371 goto free_neigh;
1373 } 1372 }
1374 memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); 1373 memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
1375 1374
1376 spin_unlock_irqrestore(&priv->lock, flags); 1375 spin_unlock_irqrestore(&priv->lock, flags);
1377 netif_tx_unlock_bh(dev); 1376 netif_tx_unlock_bh(dev);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cfc8b94527b9..aa4e431cbcd3 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -252,6 +252,8 @@ static const struct xpad_device {
252 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, 252 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
253 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, 253 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
254 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, 254 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
255 { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
256 { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
255 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, 257 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
256 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 258 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
257 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, 259 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = {
428 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ 430 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
429 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ 431 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
430 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ 432 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
433 XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
431 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ 434 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
432 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ 435 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
433 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ 436 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4713957b0cbb..a878351f1643 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121
420 420
421config KEYBOARD_SNVS_PWRKEY 421config KEYBOARD_SNVS_PWRKEY
422 tristate "IMX SNVS Power Key Driver" 422 tristate "IMX SNVS Power Key Driver"
423 depends on SOC_IMX6SX 423 depends on SOC_IMX6SX || SOC_IMX7D
424 depends on OF 424 depends on OF
425 help 425 help
426 This is the snvs powerkey driver for the Freescale i.MX application 426 This is the snvs powerkey driver for the Freescale i.MX application
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 312916f99597..73686c2460ce 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -75,9 +75,7 @@
75struct cap11xx_led { 75struct cap11xx_led {
76 struct cap11xx_priv *priv; 76 struct cap11xx_priv *priv;
77 struct led_classdev cdev; 77 struct led_classdev cdev;
78 struct work_struct work;
79 u32 reg; 78 u32 reg;
80 enum led_brightness new_brightness;
81}; 79};
82#endif 80#endif
83 81
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev)
233} 231}
234 232
235#ifdef CONFIG_LEDS_CLASS 233#ifdef CONFIG_LEDS_CLASS
236static void cap11xx_led_work(struct work_struct *work) 234static int cap11xx_led_set(struct led_classdev *cdev,
235 enum led_brightness value)
237{ 236{
238 struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); 237 struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
239 struct cap11xx_priv *priv = led->priv; 238 struct cap11xx_priv *priv = led->priv;
240 int value = led->new_brightness;
241 239
242 /* 240 /*
243 * All LEDs share the same duty cycle as this is a HW limitation. 241 * All LEDs share the same duty cycle as this is a HW
244 * Brightness levels per LED are either 0 (OFF) and 1 (ON). 242 * limitation. Brightness levels per LED are either
243 * 0 (OFF) and 1 (ON).
245 */ 244 */
246 regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, 245 return regmap_update_bits(priv->regmap,
247 BIT(led->reg), value ? BIT(led->reg) : 0); 246 CAP11XX_REG_LED_OUTPUT_CONTROL,
248} 247 BIT(led->reg),
249 248 value ? BIT(led->reg) : 0);
250static void cap11xx_led_set(struct led_classdev *cdev,
251 enum led_brightness value)
252{
253 struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
254
255 if (led->new_brightness == value)
256 return;
257
258 led->new_brightness = value;
259 schedule_work(&led->work);
260} 249}
261 250
262static int cap11xx_init_leds(struct device *dev, 251static int cap11xx_init_leds(struct device *dev,
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev,
299 led->cdev.default_trigger = 288 led->cdev.default_trigger =
300 of_get_property(child, "linux,default-trigger", NULL); 289 of_get_property(child, "linux,default-trigger", NULL);
301 led->cdev.flags = 0; 290 led->cdev.flags = 0;
302 led->cdev.brightness_set = cap11xx_led_set; 291 led->cdev.brightness_set_blocking = cap11xx_led_set;
303 led->cdev.max_brightness = 1; 292 led->cdev.max_brightness = 1;
304 led->cdev.brightness = LED_OFF; 293 led->cdev.brightness = LED_OFF;
305 294
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev,
312 led->reg = reg; 301 led->reg = reg;
313 led->priv = priv; 302 led->priv = priv;
314 303
315 INIT_WORK(&led->work, cap11xx_led_work);
316
317 error = devm_led_classdev_register(dev, &led->cdev); 304 error = devm_led_classdev_register(dev, &led->cdev);
318 if (error) { 305 if (error) {
319 of_node_put(child); 306 of_node_put(child);
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 403452ef00e6..3d1cb7bf5e35 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
222 keypad->stopped = true; 222 keypad->stopped = true;
223 spin_unlock_irq(&keypad->lock); 223 spin_unlock_irq(&keypad->lock);
224 224
225 flush_work(&keypad->work.work); 225 flush_delayed_work(&keypad->work);
226 /* 226 /*
227 * matrix_keypad_scan() will leave IRQs enabled; 227 * matrix_keypad_scan() will leave IRQs enabled;
228 * we should disable them now. 228 * we should disable them now.
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 43b86482dda0..d466bc07aebb 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = {
58struct qt2160_led { 58struct qt2160_led {
59 struct qt2160_data *qt2160; 59 struct qt2160_data *qt2160;
60 struct led_classdev cdev; 60 struct led_classdev cdev;
61 struct work_struct work;
62 char name[32]; 61 char name[32];
63 int id; 62 int id;
64 enum led_brightness new_brightness; 63 enum led_brightness brightness;
65}; 64};
66#endif 65#endif
67 66
@@ -74,7 +73,6 @@ struct qt2160_data {
74 u16 key_matrix; 73 u16 key_matrix;
75#ifdef CONFIG_LEDS_CLASS 74#ifdef CONFIG_LEDS_CLASS
76 struct qt2160_led leds[QT2160_NUM_LEDS_X]; 75 struct qt2160_led leds[QT2160_NUM_LEDS_X];
77 struct mutex led_lock;
78#endif 76#endif
79}; 77};
80 78
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data);
83 81
84#ifdef CONFIG_LEDS_CLASS 82#ifdef CONFIG_LEDS_CLASS
85 83
86static void qt2160_led_work(struct work_struct *work) 84static int qt2160_led_set(struct led_classdev *cdev,
85 enum led_brightness value)
87{ 86{
88 struct qt2160_led *led = container_of(work, struct qt2160_led, work); 87 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
89 struct qt2160_data *qt2160 = led->qt2160; 88 struct qt2160_data *qt2160 = led->qt2160;
90 struct i2c_client *client = qt2160->client; 89 struct i2c_client *client = qt2160->client;
91 int value = led->new_brightness;
92 u32 drive, pwmen; 90 u32 drive, pwmen;
93 91
94 mutex_lock(&qt2160->led_lock); 92 if (value != led->brightness) {
95 93 drive = qt2160_read(client, QT2160_CMD_DRIVE_X);
96 drive = qt2160_read(client, QT2160_CMD_DRIVE_X); 94 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X);
97 pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); 95 if (value != LED_OFF) {
98 if (value != LED_OFF) { 96 drive |= BIT(led->id);
99 drive |= (1 << led->id); 97 pwmen |= BIT(led->id);
100 pwmen |= (1 << led->id);
101
102 } else {
103 drive &= ~(1 << led->id);
104 pwmen &= ~(1 << led->id);
105 }
106 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
107 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
108 98
109 /* 99 } else {
110 * Changing this register will change the brightness 100 drive &= ~BIT(led->id);
111 * of every LED in the qt2160. It's a HW limitation. 101 pwmen &= ~BIT(led->id);
112 */ 102 }
113 if (value != LED_OFF) 103 qt2160_write(client, QT2160_CMD_DRIVE_X, drive);
114 qt2160_write(client, QT2160_CMD_PWM_DUTY, value); 104 qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen);
115 105
116 mutex_unlock(&qt2160->led_lock); 106 /*
117} 107 * Changing this register will change the brightness
108 * of every LED in the qt2160. It's a HW limitation.
109 */
110 if (value != LED_OFF)
111 qt2160_write(client, QT2160_CMD_PWM_DUTY, value);
118 112
119static void qt2160_led_set(struct led_classdev *cdev, 113 led->brightness = value;
120 enum led_brightness value) 114 }
121{
122 struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev);
123 115
124 led->new_brightness = value; 116 return 0;
125 schedule_work(&led->work);
126} 117}
127 118
128#endif /* CONFIG_LEDS_CLASS */ 119#endif /* CONFIG_LEDS_CLASS */
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
293 int ret; 284 int ret;
294 int i; 285 int i;
295 286
296 mutex_init(&qt2160->led_lock);
297
298 for (i = 0; i < QT2160_NUM_LEDS_X; i++) { 287 for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
299 struct qt2160_led *led = &qt2160->leds[i]; 288 struct qt2160_led *led = &qt2160->leds[i];
300 289
301 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); 290 snprintf(led->name, sizeof(led->name), "qt2160:x%d", i);
302 led->cdev.name = led->name; 291 led->cdev.name = led->name;
303 led->cdev.brightness_set = qt2160_led_set; 292 led->cdev.brightness_set_blocking = qt2160_led_set;
304 led->cdev.brightness = LED_OFF; 293 led->cdev.brightness = LED_OFF;
305 led->id = i; 294 led->id = i;
306 led->qt2160 = qt2160; 295 led->qt2160 = qt2160;
307 296
308 INIT_WORK(&led->work, qt2160_led_work);
309
310 ret = led_classdev_register(&client->dev, &led->cdev); 297 ret = led_classdev_register(&client->dev, &led->cdev);
311 if (ret < 0) 298 if (ret < 0)
312 return ret; 299 return ret;
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160)
324{ 311{
325 int i; 312 int i;
326 313
327 for (i = 0; i < QT2160_NUM_LEDS_X; i++) { 314 for (i = 0; i < QT2160_NUM_LEDS_X; i++)
328 led_classdev_unregister(&qt2160->leds[i].cdev); 315 led_classdev_unregister(&qt2160->leds[i].cdev);
329 cancel_work_sync(&qt2160->leds[i].work);
330 }
331} 316}
332 317
333#else 318#else
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index babcfb165e4f..3b85631fde91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
153 153
154 input_dev->id.bustype = BUS_HOST; 154 input_dev->id.bustype = BUS_HOST;
155 155
156 keypad_data->input_dev = input_dev;
157
156 error = keypad_matrix_key_parse_dt(keypad_data); 158 error = keypad_matrix_key_parse_dt(keypad_data);
157 if (error) 159 if (error)
158 return error; 160 return error;
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
168 170
169 input_set_drvdata(input_dev, keypad_data); 171 input_set_drvdata(input_dev, keypad_data);
170 172
171 keypad_data->input_dev = input_dev;
172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 keypad_data->base = devm_ioremap_resource(&pdev->dev, res); 174 keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
175 if (IS_ERR(keypad_data->base)) 175 if (IS_ERR(keypad_data->base))
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 094bddf56755..c1e66f45d552 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -22,7 +22,6 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/input-polldev.h> 23#include <linux/input-polldev.h>
24#include <linux/i2c.h> 24#include <linux/i2c.h>
25#include <linux/workqueue.h>
26#include <linux/leds.h> 25#include <linux/leds.h>
27 26
28#define APANEL_NAME "Fujitsu Application Panel" 27#define APANEL_NAME "Fujitsu Application Panel"
@@ -59,8 +58,6 @@ struct apanel {
59 struct i2c_client *client; 58 struct i2c_client *client;
60 unsigned short keymap[MAX_PANEL_KEYS]; 59 unsigned short keymap[MAX_PANEL_KEYS];
61 u16 nkeys; 60 u16 nkeys;
62 u16 led_bits;
63 struct work_struct led_work;
64 struct led_classdev mail_led; 61 struct led_classdev mail_led;
65}; 62};
66 63
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev)
109 report_key(idev, ap->keymap[i]); 106 report_key(idev, ap->keymap[i]);
110} 107}
111 108
112/* Track state changes of LED */ 109static int mail_led_set(struct led_classdev *led,
113static void led_update(struct work_struct *work)
114{
115 struct apanel *ap = container_of(work, struct apanel, led_work);
116
117 i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits);
118}
119
120static void mail_led_set(struct led_classdev *led,
121 enum led_brightness value) 110 enum led_brightness value)
122{ 111{
123 struct apanel *ap = container_of(led, struct apanel, mail_led); 112 struct apanel *ap = container_of(led, struct apanel, mail_led);
113 u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000;
124 114
125 if (value != LED_OFF) 115 return i2c_smbus_write_word_data(ap->client, 0x10, led_bits);
126 ap->led_bits |= 0x8000;
127 else
128 ap->led_bits &= ~0x8000;
129
130 schedule_work(&ap->led_work);
131} 116}
132 117
133static int apanel_remove(struct i2c_client *client) 118static int apanel_remove(struct i2c_client *client)
@@ -179,7 +164,7 @@ static struct apanel apanel = {
179 }, 164 },
180 .mail_led = { 165 .mail_led = {
181 .name = "mail:blue", 166 .name = "mail:blue",
182 .brightness_set = mail_led_set, 167 .brightness_set_blocking = mail_led_set,
183 }, 168 },
184}; 169};
185 170
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client,
235 if (err) 220 if (err)
236 goto out3; 221 goto out3;
237 222
238 INIT_WORK(&ap->led_work, led_update);
239 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { 223 if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
240 err = led_classdev_register(&client->dev, &ap->mail_led); 224 err = led_classdev_register(&client->dev, &ap->mail_led);
241 if (err) 225 if (err)
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 1efcfdf9f8a8..dd9dd4e40827 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
481 idev->close = bma150_irq_close; 481 idev->close = bma150_irq_close;
482 input_set_drvdata(idev, bma150); 482 input_set_drvdata(idev, bma150);
483 483
484 bma150->input = idev;
485
484 error = input_register_device(idev); 486 error = input_register_device(idev);
485 if (error) { 487 if (error) {
486 input_free_device(idev); 488 input_free_device(idev);
487 return error; 489 return error;
488 } 490 }
489 491
490 bma150->input = idev;
491 return 0; 492 return 0;
492} 493}
493 494
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
510 511
511 bma150_init_input_device(bma150, ipoll_dev->input); 512 bma150_init_input_device(bma150, ipoll_dev->input);
512 513
514 bma150->input_polled = ipoll_dev;
515 bma150->input = ipoll_dev->input;
516
513 error = input_register_polled_device(ipoll_dev); 517 error = input_register_polled_device(ipoll_dev);
514 if (error) { 518 if (error) {
515 input_free_polled_device(ipoll_dev); 519 input_free_polled_device(ipoll_dev);
516 return error; 520 return error;
517 } 521 }
518 522
519 bma150->input_polled = ipoll_dev;
520 bma150->input = ipoll_dev->input;
521
522 return 0; 523 return 0;
523} 524}
524 525
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 55da191ae550..dbb6d9e1b947 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -34,6 +34,7 @@ struct pwm_vibrator {
34 struct work_struct play_work; 34 struct work_struct play_work;
35 u16 level; 35 u16 level;
36 u32 direction_duty_cycle; 36 u32 direction_duty_cycle;
37 bool vcc_on;
37}; 38};
38 39
39static int pwm_vibrator_start(struct pwm_vibrator *vibrator) 40static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
42 struct pwm_state state; 43 struct pwm_state state;
43 int err; 44 int err;
44 45
45 err = regulator_enable(vibrator->vcc); 46 if (!vibrator->vcc_on) {
46 if (err) { 47 err = regulator_enable(vibrator->vcc);
47 dev_err(pdev, "failed to enable regulator: %d", err); 48 if (err) {
48 return err; 49 dev_err(pdev, "failed to enable regulator: %d", err);
50 return err;
51 }
52 vibrator->vcc_on = true;
49 } 53 }
50 54
51 pwm_get_state(vibrator->pwm, &state); 55 pwm_get_state(vibrator->pwm, &state);
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator)
76 80
77static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) 81static void pwm_vibrator_stop(struct pwm_vibrator *vibrator)
78{ 82{
79 regulator_disable(vibrator->vcc);
80
81 if (vibrator->pwm_dir) 83 if (vibrator->pwm_dir)
82 pwm_disable(vibrator->pwm_dir); 84 pwm_disable(vibrator->pwm_dir);
83 pwm_disable(vibrator->pwm); 85 pwm_disable(vibrator->pwm);
86
87 if (vibrator->vcc_on) {
88 regulator_disable(vibrator->vcc);
89 vibrator->vcc_on = false;
90 }
84} 91}
85 92
86static void pwm_vibrator_play_work(struct work_struct *work) 93static void pwm_vibrator_play_work(struct work_struct *work)
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 8ec483e8688b..26ec603fe220 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -39,6 +39,7 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/fs.h> 40#include <linux/fs.h>
41#include <linux/miscdevice.h> 41#include <linux/miscdevice.h>
42#include <linux/overflow.h>
42#include <linux/input/mt.h> 43#include <linux/input/mt.h>
43#include "../input-compat.h" 44#include "../input-compat.h"
44 45
@@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file)
405static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, 406static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
406 const struct input_absinfo *abs) 407 const struct input_absinfo *abs)
407{ 408{
408 int min, max; 409 int min, max, range;
409 410
410 min = abs->minimum; 411 min = abs->minimum;
411 max = abs->maximum; 412 max = abs->maximum;
@@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
417 return -EINVAL; 418 return -EINVAL;
418 } 419 }
419 420
420 if (abs->flat > max - min) { 421 if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
421 printk(KERN_DEBUG 422 printk(KERN_DEBUG
422 "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", 423 "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
423 UINPUT_NAME, code, abs->flat, min, max); 424 UINPUT_NAME, code, abs->flat, min, max);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f322a1768fbb..225ae6980182 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
1336static const struct acpi_device_id elan_acpi_id[] = { 1336static const struct acpi_device_id elan_acpi_id[] = {
1337 { "ELAN0000", 0 }, 1337 { "ELAN0000", 0 },
1338 { "ELAN0100", 0 }, 1338 { "ELAN0100", 0 },
1339 { "ELAN0501", 0 },
1340 { "ELAN0600", 0 }, 1339 { "ELAN0600", 0 },
1341 { "ELAN0602", 0 }, 1340 { "ELAN0602", 0 },
1342 { "ELAN0605", 0 }, 1341 { "ELAN0605", 0 },
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1346 { "ELAN060C", 0 }, 1345 { "ELAN060C", 0 },
1347 { "ELAN0611", 0 }, 1346 { "ELAN0611", 0 },
1348 { "ELAN0612", 0 }, 1347 { "ELAN0612", 0 },
1348 { "ELAN0617", 0 },
1349 { "ELAN0618", 0 }, 1349 { "ELAN0618", 0 },
1350 { "ELAN061C", 0 }, 1350 { "ELAN061C", 0 },
1351 { "ELAN061D", 0 }, 1351 { "ELAN061D", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 9fe075c137dc..a7f8b1614559 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1119 * Asus UX31 0x361f00 20, 15, 0e clickpad 1119 * Asus UX31 0x361f00 20, 15, 0e clickpad
1120 * Asus UX32VD 0x361f02 00, 15, 0e clickpad 1120 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
1121 * Avatar AVIU-145A2 0x361f00 ? clickpad 1121 * Avatar AVIU-145A2 0x361f00 ? clickpad
1122 * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
1123 * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
1122 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1124 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1123 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons 1125 * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
1124 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons 1126 * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1171 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), 1173 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
1172 }, 1174 },
1173 }, 1175 },
1176 {
1177 /* Fujitsu H780 also has a middle button */
1178 .matches = {
1179 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1180 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
1181 },
1182 },
1174#endif 1183#endif
1175 { } 1184 { }
1176}; 1185};
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c
index b36084710f69..a7cfab3db9ee 100644
--- a/drivers/input/serio/olpc_apsp.c
+++ b/drivers/input/serio/olpc_apsp.c
@@ -23,7 +23,6 @@
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/clk.h>
27 26
28/* 27/*
29 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. 28 * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller.
@@ -75,7 +74,6 @@ struct olpc_apsp {
75 struct serio *kbio; 74 struct serio *kbio;
76 struct serio *padio; 75 struct serio *padio;
77 void __iomem *base; 76 void __iomem *base;
78 struct clk *clk;
79 int open_count; 77 int open_count;
80 int irq; 78 int irq;
81}; 79};
@@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port)
148 struct olpc_apsp *priv = port->port_data; 146 struct olpc_apsp *priv = port->port_data;
149 unsigned int tmp; 147 unsigned int tmp;
150 unsigned long l; 148 unsigned long l;
151 int error;
152 149
153 if (priv->open_count++ == 0) { 150 if (priv->open_count++ == 0) {
154 error = clk_prepare_enable(priv->clk);
155 if (error)
156 return error;
157
158 l = readl(priv->base + COMMAND_FIFO_STATUS); 151 l = readl(priv->base + COMMAND_FIFO_STATUS);
159 if (!(l & CMD_STS_MASK)) { 152 if (!(l & CMD_STS_MASK)) {
160 dev_err(priv->dev, "SP cannot accept commands.\n"); 153 dev_err(priv->dev, "SP cannot accept commands.\n");
161 clk_disable_unprepare(priv->clk);
162 return -EIO; 154 return -EIO;
163 } 155 }
164 156
@@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port)
179 /* Disable interrupt 0 */ 171 /* Disable interrupt 0 */
180 tmp = readl(priv->base + PJ_INTERRUPT_MASK); 172 tmp = readl(priv->base + PJ_INTERRUPT_MASK);
181 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); 173 writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK);
182
183 clk_disable_unprepare(priv->clk);
184 } 174 }
185} 175}
186 176
@@ -195,6 +185,8 @@ static int olpc_apsp_probe(struct platform_device *pdev)
195 if (!priv) 185 if (!priv)
196 return -ENOMEM; 186 return -ENOMEM;
197 187
188 priv->dev = &pdev->dev;
189
198 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
199 priv->base = devm_ioremap_resource(&pdev->dev, res); 191 priv->base = devm_ioremap_resource(&pdev->dev, res);
200 if (IS_ERR(priv->base)) { 192 if (IS_ERR(priv->base)) {
@@ -206,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
206 if (priv->irq < 0) 198 if (priv->irq < 0)
207 return priv->irq; 199 return priv->irq;
208 200
209 priv->clk = devm_clk_get(&pdev->dev, "sp");
210 if (IS_ERR(priv->clk))
211 return PTR_ERR(priv->clk);
212
213 /* KEYBOARD */ 201 /* KEYBOARD */
214 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); 202 kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
215 if (!kb_serio) 203 if (!kb_serio)
@@ -248,7 +236,6 @@ static int olpc_apsp_probe(struct platform_device *pdev)
248 goto err_irq; 236 goto err_irq;
249 } 237 }
250 238
251 priv->dev = &pdev->dev;
252 device_init_wakeup(priv->dev, 1); 239 device_init_wakeup(priv->dev, 1);
253 platform_set_drvdata(pdev, priv); 240 platform_set_drvdata(pdev, priv);
254 241
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index c62cceb97bb1..5e8d8384aa2a 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio)
76{ 76{
77 struct ps2_gpio_data *drvdata = serio->port_data; 77 struct ps2_gpio_data *drvdata = serio->port_data;
78 78
79 flush_delayed_work(&drvdata->tx_work);
79 disable_irq(drvdata->irq); 80 disable_irq(drvdata->irq);
80} 81}
81 82
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index af6027cc7bbf..068dbbc610fc 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -698,7 +698,7 @@ config TOUCHSCREEN_EDT_FT5X06
698 698
699config TOUCHSCREEN_RASPBERRYPI_FW 699config TOUCHSCREEN_RASPBERRYPI_FW
700 tristate "Raspberry Pi's firmware base touch screen support" 700 tristate "Raspberry Pi's firmware base touch screen support"
701 depends on RASPBERRYPI_FIRMWARE || COMPILE_TEST 701 depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST)
702 help 702 help
703 Say Y here if you have the official Raspberry Pi 7 inch screen on 703 Say Y here if you have the official Raspberry Pi 7 inch screen on
704 your system. 704 your system.
diff --git a/drivers/input/touchscreen/raspberrypi-ts.c b/drivers/input/touchscreen/raspberrypi-ts.c
index f456c1125bd6..69881265d121 100644
--- a/drivers/input/touchscreen/raspberrypi-ts.c
+++ b/drivers/input/touchscreen/raspberrypi-ts.c
@@ -147,8 +147,8 @@ static int rpi_ts_probe(struct platform_device *pdev)
147 return -ENOMEM; 147 return -ENOMEM;
148 ts->pdev = pdev; 148 ts->pdev = pdev;
149 149
150 ts->fw_regs_va = dma_zalloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys, 150 ts->fw_regs_va = dma_alloc_coherent(dev, PAGE_SIZE, &ts->fw_regs_phys,
151 GFP_KERNEL); 151 GFP_KERNEL);
152 if (!ts->fw_regs_va) { 152 if (!ts->fw_regs_va) {
153 dev_err(dev, "failed to dma_alloc_coherent\n"); 153 dev_err(dev, "failed to dma_alloc_coherent\n");
154 return -ENOMEM; 154 return -ENOMEM;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 87ba23a75b38..2a7b78bb98b4 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
1991 1991
1992static void do_detach(struct iommu_dev_data *dev_data) 1992static void do_detach(struct iommu_dev_data *dev_data)
1993{ 1993{
1994 struct protection_domain *domain = dev_data->domain;
1994 struct amd_iommu *iommu; 1995 struct amd_iommu *iommu;
1995 u16 alias; 1996 u16 alias;
1996 1997
1997 iommu = amd_iommu_rlookup_table[dev_data->devid]; 1998 iommu = amd_iommu_rlookup_table[dev_data->devid];
1998 alias = dev_data->alias; 1999 alias = dev_data->alias;
1999 2000
2000 /* decrease reference counters */
2001 dev_data->domain->dev_iommu[iommu->index] -= 1;
2002 dev_data->domain->dev_cnt -= 1;
2003
2004 /* Update data structures */ 2001 /* Update data structures */
2005 dev_data->domain = NULL; 2002 dev_data->domain = NULL;
2006 list_del(&dev_data->list); 2003 list_del(&dev_data->list);
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
2010 2007
2011 /* Flush the DTE entry */ 2008 /* Flush the DTE entry */
2012 device_flush_dte(dev_data); 2009 device_flush_dte(dev_data);
2010
2011 /* Flush IOTLB */
2012 domain_flush_tlb_pde(domain);
2013
2014 /* Wait for the flushes to finish */
2015 domain_flush_complete(domain);
2016
2017 /* decrease reference counters - needs to happen after the flushes */
2018 domain->dev_iommu[iommu->index] -= 1;
2019 domain->dev_cnt -= 1;
2013} 2020}
2014 2021
2015/* 2022/*
@@ -2617,13 +2624,13 @@ out_unmap:
2617 bus_addr = address + s->dma_address + (j << PAGE_SHIFT); 2624 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
2618 iommu_unmap_page(domain, bus_addr, PAGE_SIZE); 2625 iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
2619 2626
2620 if (--mapped_pages) 2627 if (--mapped_pages == 0)
2621 goto out_free_iova; 2628 goto out_free_iova;
2622 } 2629 }
2623 } 2630 }
2624 2631
2625out_free_iova: 2632out_free_iova:
2626 free_iova_fast(&dma_dom->iovad, address, npages); 2633 free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
2627 2634
2628out_err: 2635out_err:
2629 return 0; 2636 return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2bd9ac285c0d..78188bf7e90d 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -363,7 +363,7 @@ static int dmar_map_gfx = 1;
363static int dmar_forcedac; 363static int dmar_forcedac;
364static int intel_iommu_strict; 364static int intel_iommu_strict;
365static int intel_iommu_superpage = 1; 365static int intel_iommu_superpage = 1;
366static int intel_iommu_sm = 1; 366static int intel_iommu_sm;
367static int iommu_identity_mapping; 367static int iommu_identity_mapping;
368 368
369#define IDENTMAP_ALL 1 369#define IDENTMAP_ALL 1
@@ -456,9 +456,9 @@ static int __init intel_iommu_setup(char *str)
456 } else if (!strncmp(str, "sp_off", 6)) { 456 } else if (!strncmp(str, "sp_off", 6)) {
457 pr_info("Disable supported super page\n"); 457 pr_info("Disable supported super page\n");
458 intel_iommu_superpage = 0; 458 intel_iommu_superpage = 0;
459 } else if (!strncmp(str, "sm_off", 6)) { 459 } else if (!strncmp(str, "sm_on", 5)) {
460 pr_info("Intel-IOMMU: disable scalable mode support\n"); 460 pr_info("Intel-IOMMU: scalable mode supported\n");
461 intel_iommu_sm = 0; 461 intel_iommu_sm = 1;
462 } else if (!strncmp(str, "tboot_noforce", 13)) { 462 } else if (!strncmp(str, "tboot_noforce", 13)) {
463 printk(KERN_INFO 463 printk(KERN_INFO
464 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); 464 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -5294,7 +5294,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
5294 struct iommu_resv_region *entry, *next; 5294 struct iommu_resv_region *entry, *next;
5295 5295
5296 list_for_each_entry_safe(entry, next, head, list) { 5296 list_for_each_entry_safe(entry, next, head, list) {
5297 if (entry->type == IOMMU_RESV_RESERVED) 5297 if (entry->type == IOMMU_RESV_MSI)
5298 kfree(entry); 5298 kfree(entry);
5299 } 5299 }
5300} 5300}
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 6ede4286b835..7e0df67bd3e9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -232,9 +232,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
232 232
233 spin_lock_init(&dom->pgtlock); 233 spin_lock_init(&dom->pgtlock);
234 234
235 dom->pgt_va = dma_zalloc_coherent(data->dev, 235 dom->pgt_va = dma_alloc_coherent(data->dev, M2701_IOMMU_PGT_SIZE,
236 M2701_IOMMU_PGT_SIZE, 236 &dom->pgt_pa, GFP_KERNEL);
237 &dom->pgt_pa, GFP_KERNEL);
238 if (!dom->pgt_va) 237 if (!dom->pgt_va)
239 return -ENOMEM; 238 return -ENOMEM;
240 239
@@ -442,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev)
442 iommu_spec.args_count = count; 441 iommu_spec.args_count = count;
443 442
444 mtk_iommu_create_mapping(dev, &iommu_spec); 443 mtk_iommu_create_mapping(dev, &iommu_spec);
444
445 /* dev->iommu_fwspec might have changed */
446 fwspec = dev_iommu_fwspec_get(dev);
447
445 of_node_put(iommu_spec.np); 448 of_node_put(iommu_spec.np);
446 } 449 }
447 450
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index d8947b28db2d..f04a6df65eb8 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -224,7 +224,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
224 * If we have reason to believe the IOMMU driver missed the initial 224 * If we have reason to believe the IOMMU driver missed the initial
225 * probe for dev, replay it to get things in order. 225 * probe for dev, replay it to get things in order.
226 */ 226 */
227 if (dev->bus && !device_iommu_mapped(dev)) 227 if (!err && dev->bus && !device_iommu_mapped(dev))
228 err = iommu_probe_device(dev); 228 err = iommu_probe_device(dev);
229 229
230 /* Ignore all other errors apart from EPROBE_DEFER */ 230 /* Ignore all other errors apart from EPROBE_DEFER */
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index 2543baba8b1f..5a2ec43b7ddd 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -95,7 +95,7 @@ static inline void setup_irq_channel(u32 magic, void __iomem *reg_addr)
95 95
96 /* Setup 64 channel slots */ 96 /* Setup 64 channel slots */
97 for (i = 0; i < INTC_IRQS; i += 4) 97 for (i = 0; i < INTC_IRQS; i += 4)
98 writel_relaxed(build_channel_val(i, magic), reg_addr + i); 98 writel(build_channel_val(i, magic), reg_addr + i);
99} 99}
100 100
101static int __init 101static int __init
@@ -135,16 +135,10 @@ ck_intc_init_comm(struct device_node *node, struct device_node *parent)
135static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq, 135static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
136 u32 irq_base) 136 u32 irq_base)
137{ 137{
138 u32 irq;
139
140 if (hwirq == 0) 138 if (hwirq == 0)
141 return 0; 139 return 0;
142 140
143 while (hwirq) { 141 handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
144 irq = __ffs(hwirq);
145 hwirq &= ~BIT(irq);
146 handle_domain_irq(root_domain, irq_base + irq, regs);
147 }
148 142
149 return 1; 143 return 1;
150} 144}
@@ -154,12 +148,16 @@ static void gx_irq_handler(struct pt_regs *regs)
154{ 148{
155 bool ret; 149 bool ret;
156 150
157 do { 151retry:
158 ret = handle_irq_perbit(regs, 152 ret = handle_irq_perbit(regs,
159 readl_relaxed(reg_base + GX_INTC_PEN31_00), 0); 153 readl(reg_base + GX_INTC_PEN63_32), 32);
160 ret |= handle_irq_perbit(regs, 154 if (ret)
161 readl_relaxed(reg_base + GX_INTC_PEN63_32), 32); 155 goto retry;
162 } while (ret); 156
157 ret = handle_irq_perbit(regs,
158 readl(reg_base + GX_INTC_PEN31_00), 0);
159 if (ret)
160 goto retry;
163} 161}
164 162
165static int __init 163static int __init
@@ -174,14 +172,14 @@ gx_intc_init(struct device_node *node, struct device_node *parent)
174 /* 172 /*
175 * Initial enable reg to disable all interrupts 173 * Initial enable reg to disable all interrupts
176 */ 174 */
177 writel_relaxed(0x0, reg_base + GX_INTC_NEN31_00); 175 writel(0x0, reg_base + GX_INTC_NEN31_00);
178 writel_relaxed(0x0, reg_base + GX_INTC_NEN63_32); 176 writel(0x0, reg_base + GX_INTC_NEN63_32);
179 177
180 /* 178 /*
181 * Initial mask reg with all unmasked, because we only use enalbe reg 179 * Initial mask reg with all unmasked, because we only use enalbe reg
182 */ 180 */
183 writel_relaxed(0x0, reg_base + GX_INTC_NMASK31_00); 181 writel(0x0, reg_base + GX_INTC_NMASK31_00);
184 writel_relaxed(0x0, reg_base + GX_INTC_NMASK63_32); 182 writel(0x0, reg_base + GX_INTC_NMASK63_32);
185 183
186 setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE); 184 setup_irq_channel(0x03020100, reg_base + GX_INTC_SOURCE);
187 185
@@ -204,20 +202,29 @@ static void ck_irq_handler(struct pt_regs *regs)
204 void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00; 202 void __iomem *reg_pen_lo = reg_base + CK_INTC_PEN31_00;
205 void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32; 203 void __iomem *reg_pen_hi = reg_base + CK_INTC_PEN63_32;
206 204
207 do { 205retry:
208 /* handle 0 - 31 irqs */ 206 /* handle 0 - 63 irqs */
209 ret = handle_irq_perbit(regs, readl_relaxed(reg_pen_lo), 0); 207 ret = handle_irq_perbit(regs, readl(reg_pen_hi), 32);
210 ret |= handle_irq_perbit(regs, readl_relaxed(reg_pen_hi), 32); 208 if (ret)
209 goto retry;
211 210
212 if (nr_irq == INTC_IRQS) 211 ret = handle_irq_perbit(regs, readl(reg_pen_lo), 0);
213 continue; 212 if (ret)
213 goto retry;
214
215 if (nr_irq == INTC_IRQS)
216 return;
214 217
215 /* handle 64 - 127 irqs */ 218 /* handle 64 - 127 irqs */
216 ret |= handle_irq_perbit(regs, 219 ret = handle_irq_perbit(regs,
217 readl_relaxed(reg_pen_lo + CK_INTC_DUAL_BASE), 64); 220 readl(reg_pen_hi + CK_INTC_DUAL_BASE), 96);
218 ret |= handle_irq_perbit(regs, 221 if (ret)
219 readl_relaxed(reg_pen_hi + CK_INTC_DUAL_BASE), 96); 222 goto retry;
220 } while (ret); 223
224 ret = handle_irq_perbit(regs,
225 readl(reg_pen_lo + CK_INTC_DUAL_BASE), 64);
226 if (ret)
227 goto retry;
221} 228}
222 229
223static int __init 230static int __init
@@ -230,11 +237,11 @@ ck_intc_init(struct device_node *node, struct device_node *parent)
230 return ret; 237 return ret;
231 238
232 /* Initial enable reg to disable all interrupts */ 239 /* Initial enable reg to disable all interrupts */
233 writel_relaxed(0, reg_base + CK_INTC_NEN31_00); 240 writel(0, reg_base + CK_INTC_NEN31_00);
234 writel_relaxed(0, reg_base + CK_INTC_NEN63_32); 241 writel(0, reg_base + CK_INTC_NEN63_32);
235 242
236 /* Enable irq intc */ 243 /* Enable irq intc */
237 writel_relaxed(BIT(31), reg_base + CK_INTC_ICR); 244 writel(BIT(31), reg_base + CK_INTC_ICR);
238 245
239 ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0); 246 ck_set_gc(node, reg_base, CK_INTC_NEN31_00, 0);
240 ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32); 247 ck_set_gc(node, reg_base, CK_INTC_NEN63_32, 32);
@@ -260,8 +267,8 @@ ck_dual_intc_init(struct device_node *node, struct device_node *parent)
260 return ret; 267 return ret;
261 268
262 /* Initial enable reg to disable all interrupts */ 269 /* Initial enable reg to disable all interrupts */
263 writel_relaxed(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE); 270 writel(0, reg_base + CK_INTC_NEN31_00 + CK_INTC_DUAL_BASE);
264 writel_relaxed(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE); 271 writel(0, reg_base + CK_INTC_NEN63_32 + CK_INTC_DUAL_BASE);
265 272
266 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64); 273 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN31_00, 64);
267 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96); 274 ck_set_gc(node, reg_base + CK_INTC_DUAL_BASE, CK_INTC_NEN63_32, 96);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index db20e992a40f..c3aba3fc818d 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -97,9 +97,14 @@ struct its_device;
97 * The ITS structure - contains most of the infrastructure, with the 97 * The ITS structure - contains most of the infrastructure, with the
98 * top-level MSI domain, the command queue, the collections, and the 98 * top-level MSI domain, the command queue, the collections, and the
99 * list of devices writing to it. 99 * list of devices writing to it.
100 *
101 * dev_alloc_lock has to be taken for device allocations, while the
102 * spinlock must be taken to parse data structures such as the device
103 * list.
100 */ 104 */
101struct its_node { 105struct its_node {
102 raw_spinlock_t lock; 106 raw_spinlock_t lock;
107 struct mutex dev_alloc_lock;
103 struct list_head entry; 108 struct list_head entry;
104 void __iomem *base; 109 void __iomem *base;
105 phys_addr_t phys_base; 110 phys_addr_t phys_base;
@@ -156,6 +161,7 @@ struct its_device {
156 void *itt; 161 void *itt;
157 u32 nr_ites; 162 u32 nr_ites;
158 u32 device_id; 163 u32 device_id;
164 bool shared;
159}; 165};
160 166
161static struct { 167static struct {
@@ -1580,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1580 nr_irqs /= 2; 1586 nr_irqs /= 2;
1581 } while (nr_irqs > 0); 1587 } while (nr_irqs > 0);
1582 1588
1589 if (!nr_irqs)
1590 err = -ENOSPC;
1591
1583 if (err) 1592 if (err)
1584 goto out; 1593 goto out;
1585 1594
@@ -2059,6 +2068,29 @@ static int __init allocate_lpi_tables(void)
2059 return 0; 2068 return 0;
2060} 2069}
2061 2070
2071static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2072{
2073 u32 count = 1000000; /* 1s! */
2074 bool clean;
2075 u64 val;
2076
2077 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2078 val &= ~GICR_VPENDBASER_Valid;
2079 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2080
2081 do {
2082 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2083 clean = !(val & GICR_VPENDBASER_Dirty);
2084 if (!clean) {
2085 count--;
2086 cpu_relax();
2087 udelay(1);
2088 }
2089 } while (!clean && count);
2090
2091 return val;
2092}
2093
2062static void its_cpu_init_lpis(void) 2094static void its_cpu_init_lpis(void)
2063{ 2095{
2064 void __iomem *rbase = gic_data_rdist_rd_base(); 2096 void __iomem *rbase = gic_data_rdist_rd_base();
@@ -2144,6 +2176,30 @@ static void its_cpu_init_lpis(void)
2144 val |= GICR_CTLR_ENABLE_LPIS; 2176 val |= GICR_CTLR_ENABLE_LPIS;
2145 writel_relaxed(val, rbase + GICR_CTLR); 2177 writel_relaxed(val, rbase + GICR_CTLR);
2146 2178
2179 if (gic_rdists->has_vlpis) {
2180 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2181
2182 /*
2183 * It's possible for CPU to receive VLPIs before it is
2184 * sheduled as a vPE, especially for the first CPU, and the
2185 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2186 * as out of range and dropped by GIC.
2187 * So we initialize IDbits to known value to avoid VLPI drop.
2188 */
2189 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2190 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2191 smp_processor_id(), val);
2192 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2193
2194 /*
2195 * Also clear Valid bit of GICR_VPENDBASER, in case some
2196 * ancient programming gets left in and has possibility of
2197 * corrupting memory.
2198 */
2199 val = its_clear_vpend_valid(vlpi_base);
2200 WARN_ON(val & GICR_VPENDBASER_Dirty);
2201 }
2202
2147 /* Make sure the GIC has seen the above */ 2203 /* Make sure the GIC has seen the above */
2148 dsb(sy); 2204 dsb(sy);
2149out: 2205out:
@@ -2399,13 +2455,14 @@ static void its_free_device(struct its_device *its_dev)
2399 kfree(its_dev); 2455 kfree(its_dev);
2400} 2456}
2401 2457
2402static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) 2458static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2403{ 2459{
2404 int idx; 2460 int idx;
2405 2461
2406 idx = find_first_zero_bit(dev->event_map.lpi_map, 2462 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2407 dev->event_map.nr_lpis); 2463 dev->event_map.nr_lpis,
2408 if (idx == dev->event_map.nr_lpis) 2464 get_count_order(nvecs));
2465 if (idx < 0)
2409 return -ENOSPC; 2466 return -ENOSPC;
2410 2467
2411 *hwirq = dev->event_map.lpi_base + idx; 2468 *hwirq = dev->event_map.lpi_base + idx;
@@ -2421,6 +2478,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2421 struct its_device *its_dev; 2478 struct its_device *its_dev;
2422 struct msi_domain_info *msi_info; 2479 struct msi_domain_info *msi_info;
2423 u32 dev_id; 2480 u32 dev_id;
2481 int err = 0;
2424 2482
2425 /* 2483 /*
2426 * We ignore "dev" entierely, and rely on the dev_id that has 2484 * We ignore "dev" entierely, and rely on the dev_id that has
@@ -2443,6 +2501,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2443 return -EINVAL; 2501 return -EINVAL;
2444 } 2502 }
2445 2503
2504 mutex_lock(&its->dev_alloc_lock);
2446 its_dev = its_find_device(its, dev_id); 2505 its_dev = its_find_device(its, dev_id);
2447 if (its_dev) { 2506 if (its_dev) {
2448 /* 2507 /*
@@ -2450,18 +2509,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2450 * another alias (PCI bridge of some sort). No need to 2509 * another alias (PCI bridge of some sort). No need to
2451 * create the device. 2510 * create the device.
2452 */ 2511 */
2512 its_dev->shared = true;
2453 pr_debug("Reusing ITT for devID %x\n", dev_id); 2513 pr_debug("Reusing ITT for devID %x\n", dev_id);
2454 goto out; 2514 goto out;
2455 } 2515 }
2456 2516
2457 its_dev = its_create_device(its, dev_id, nvec, true); 2517 its_dev = its_create_device(its, dev_id, nvec, true);
2458 if (!its_dev) 2518 if (!its_dev) {
2459 return -ENOMEM; 2519 err = -ENOMEM;
2520 goto out;
2521 }
2460 2522
2461 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 2523 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2462out: 2524out:
2525 mutex_unlock(&its->dev_alloc_lock);
2463 info->scratchpad[0].ptr = its_dev; 2526 info->scratchpad[0].ptr = its_dev;
2464 return 0; 2527 return err;
2465} 2528}
2466 2529
2467static struct msi_domain_ops its_msi_domain_ops = { 2530static struct msi_domain_ops its_msi_domain_ops = {
@@ -2501,21 +2564,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2501 int err; 2564 int err;
2502 int i; 2565 int i;
2503 2566
2504 for (i = 0; i < nr_irqs; i++) { 2567 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2505 err = its_alloc_device_irq(its_dev, &hwirq); 2568 if (err)
2506 if (err) 2569 return err;
2507 return err;
2508 2570
2509 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); 2571 for (i = 0; i < nr_irqs; i++) {
2572 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2510 if (err) 2573 if (err)
2511 return err; 2574 return err;
2512 2575
2513 irq_domain_set_hwirq_and_chip(domain, virq + i, 2576 irq_domain_set_hwirq_and_chip(domain, virq + i,
2514 hwirq, &its_irq_chip, its_dev); 2577 hwirq + i, &its_irq_chip, its_dev);
2515 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); 2578 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2516 pr_debug("ID:%d pID:%d vID:%d\n", 2579 pr_debug("ID:%d pID:%d vID:%d\n",
2517 (int)(hwirq - its_dev->event_map.lpi_base), 2580 (int)(hwirq + i - its_dev->event_map.lpi_base),
2518 (int) hwirq, virq + i); 2581 (int)(hwirq + i), virq + i);
2519 } 2582 }
2520 2583
2521 return 0; 2584 return 0;
@@ -2565,6 +2628,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2565{ 2628{
2566 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 2629 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2567 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2630 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2631 struct its_node *its = its_dev->its;
2568 int i; 2632 int i;
2569 2633
2570 for (i = 0; i < nr_irqs; i++) { 2634 for (i = 0; i < nr_irqs; i++) {
@@ -2579,8 +2643,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2579 irq_domain_reset_irq_data(data); 2643 irq_domain_reset_irq_data(data);
2580 } 2644 }
2581 2645
2582 /* If all interrupts have been freed, start mopping the floor */ 2646 mutex_lock(&its->dev_alloc_lock);
2583 if (bitmap_empty(its_dev->event_map.lpi_map, 2647
2648 /*
2649 * If all interrupts have been freed, start mopping the
2650 * floor. This is conditionned on the device not being shared.
2651 */
2652 if (!its_dev->shared &&
2653 bitmap_empty(its_dev->event_map.lpi_map,
2584 its_dev->event_map.nr_lpis)) { 2654 its_dev->event_map.nr_lpis)) {
2585 its_lpi_free(its_dev->event_map.lpi_map, 2655 its_lpi_free(its_dev->event_map.lpi_map,
2586 its_dev->event_map.lpi_base, 2656 its_dev->event_map.lpi_base,
@@ -2592,6 +2662,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2592 its_free_device(its_dev); 2662 its_free_device(its_dev);
2593 } 2663 }
2594 2664
2665 mutex_unlock(&its->dev_alloc_lock);
2666
2595 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 2667 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2596} 2668}
2597 2669
@@ -2754,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
2754static void its_vpe_deschedule(struct its_vpe *vpe) 2826static void its_vpe_deschedule(struct its_vpe *vpe)
2755{ 2827{
2756 void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); 2828 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2757 u32 count = 1000000; /* 1s! */
2758 bool clean;
2759 u64 val; 2829 u64 val;
2760 2830
2761 /* We're being scheduled out */ 2831 val = its_clear_vpend_valid(vlpi_base);
2762 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2763 val &= ~GICR_VPENDBASER_Valid;
2764 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2765
2766 do {
2767 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2768 clean = !(val & GICR_VPENDBASER_Dirty);
2769 if (!clean) {
2770 count--;
2771 cpu_relax();
2772 udelay(1);
2773 }
2774 } while (!clean && count);
2775 2832
2776 if (unlikely(!clean && !count)) { 2833 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2777 pr_err_ratelimited("ITS virtual pending table not cleaning\n"); 2834 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2778 vpe->idai = false; 2835 vpe->idai = false;
2779 vpe->pending_last = true; 2836 vpe->pending_last = true;
@@ -3516,6 +3573,7 @@ static int __init its_probe_one(struct resource *res,
3516 } 3573 }
3517 3574
3518 raw_spin_lock_init(&its->lock); 3575 raw_spin_lock_init(&its->lock);
3576 mutex_init(&its->dev_alloc_lock);
3519 INIT_LIST_HEAD(&its->entry); 3577 INIT_LIST_HEAD(&its->entry);
3520 INIT_LIST_HEAD(&its->its_device_list); 3578 INIT_LIST_HEAD(&its->its_device_list);
3521 typer = gic_read_typer(its_base + GITS_TYPER); 3579 typer = gic_read_typer(its_base + GITS_TYPER);
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index ad70e7c416e3..fbfa7ff6deb1 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -24,7 +24,7 @@ struct mbi_range {
24 unsigned long *bm; 24 unsigned long *bm;
25}; 25};
26 26
27static struct mutex mbi_lock; 27static DEFINE_MUTEX(mbi_lock);
28static phys_addr_t mbi_phys_base; 28static phys_addr_t mbi_phys_base;
29static struct mbi_range *mbi_ranges; 29static struct mbi_range *mbi_ranges;
30static unsigned int mbi_range_nr; 30static unsigned int mbi_range_nr;
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
index e9256dee1a45..8b81271c823c 100644
--- a/drivers/irqchip/irq-madera.c
+++ b/drivers/irqchip/irq-madera.c
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/gpio.h>
11#include <linux/interrupt.h> 10#include <linux/interrupt.h>
12#include <linux/irq.h> 11#include <linux/irq.h>
13#include <linux/irqdomain.h> 12#include <linux/irqdomain.h>
@@ -16,7 +15,6 @@
16#include <linux/slab.h> 15#include <linux/slab.h>
17#include <linux/of.h> 16#include <linux/of.h>
18#include <linux/of_device.h> 17#include <linux/of_device.h>
19#include <linux/of_gpio.h>
20#include <linux/of_irq.h> 18#include <linux/of_irq.h>
21#include <linux/irqchip/irq-madera.h> 19#include <linux/irqchip/irq-madera.h>
22#include <linux/mfd/madera/core.h> 20#include <linux/mfd/madera/core.h>
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 25f32e1d7764..3496b61a312a 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -34,6 +34,9 @@
34#define SEL_INT_PENDING (1 << 6) 34#define SEL_INT_PENDING (1 << 6)
35#define SEL_INT_NUM_MASK 0x3f 35#define SEL_INT_NUM_MASK 0x3f
36 36
37#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
38#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
39
37struct icu_chip_data { 40struct icu_chip_data {
38 int nr_irqs; 41 int nr_irqs;
39 unsigned int virq_base; 42 unsigned int virq_base;
@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
190static const struct mmp_intc_conf mmp2_conf = { 193static const struct mmp_intc_conf mmp2_conf = {
191 .conf_enable = 0x20, 194 .conf_enable = 0x20,
192 .conf_disable = 0x0, 195 .conf_disable = 0x0,
193 .conf_mask = 0x7f, 196 .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
197 MMP2_ICU_INT_ROUTE_PJ4_FIQ,
194}; 198};
195 199
196static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) 200static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 6edfd4bfa169..a93296b9b45d 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -822,6 +822,7 @@ out_unmap:
822static const struct irq_domain_ops stm32_exti_h_domain_ops = { 822static const struct irq_domain_ops stm32_exti_h_domain_ops = {
823 .alloc = stm32_exti_h_domain_alloc, 823 .alloc = stm32_exti_h_domain_alloc,
824 .free = irq_domain_free_irqs_common, 824 .free = irq_domain_free_irqs_common,
825 .xlate = irq_domain_xlate_twocell,
825}; 826};
826 827
827static int 828static int
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 5385f5768345..27933338f7b3 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d)
71 unsigned int mask = 1u << d->hwirq; 71 unsigned int mask = 1u << d->hwirq;
72 72
73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 73 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 74 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
75 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 75 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
76 HW_IRQ_MX_BASE), MIENG); 76
77 } else { 77 if (ext_irq >= HW_IRQ_MX_BASE) {
78 mask = __this_cpu_read(cached_irq_mask) & ~mask; 78 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
79 __this_cpu_write(cached_irq_mask, mask); 79 return;
80 xtensa_set_sr(mask, intenable); 80 }
81 } 81 }
82 mask = __this_cpu_read(cached_irq_mask) & ~mask;
83 __this_cpu_write(cached_irq_mask, mask);
84 xtensa_set_sr(mask, intenable);
82} 85}
83 86
84static void xtensa_mx_irq_unmask(struct irq_data *d) 87static void xtensa_mx_irq_unmask(struct irq_data *d)
@@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d)
86 unsigned int mask = 1u << d->hwirq; 89 unsigned int mask = 1u << d->hwirq;
87 90
88 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | 91 if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
89 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { 92 XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
90 set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - 93 unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
91 HW_IRQ_MX_BASE), MIENGSET); 94
92 } else { 95 if (ext_irq >= HW_IRQ_MX_BASE) {
93 mask |= __this_cpu_read(cached_irq_mask); 96 set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
94 __this_cpu_write(cached_irq_mask, mask); 97 return;
95 xtensa_set_sr(mask, intenable); 98 }
96 } 99 }
100 mask |= __this_cpu_read(cached_irq_mask);
101 __this_cpu_write(cached_irq_mask, mask);
102 xtensa_set_sr(mask, intenable);
97} 103}
98 104
99static void xtensa_mx_irq_enable(struct irq_data *d) 105static void xtensa_mx_irq_enable(struct irq_data *d)
@@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d)
113 119
114static int xtensa_mx_irq_retrigger(struct irq_data *d) 120static int xtensa_mx_irq_retrigger(struct irq_data *d)
115{ 121{
116 xtensa_set_sr(1 << d->hwirq, intset); 122 unsigned int mask = 1u << d->hwirq;
123
124 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
125 return 0;
126 xtensa_set_sr(mask, intset);
117 return 1; 127 return 1;
118} 128}
119 129
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index c200234dd2c9..ab12328be5ee 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d)
70 70
71static int xtensa_irq_retrigger(struct irq_data *d) 71static int xtensa_irq_retrigger(struct irq_data *d)
72{ 72{
73 xtensa_set_sr(1 << d->hwirq, intset); 73 unsigned int mask = 1u << d->hwirq;
74
75 if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
76 return 0;
77 xtensa_set_sr(mask, intset);
74 return 1; 78 return 1;
75} 79}
76 80
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4ac378e48902..40ca1e8fa09f 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
423 int i, j; 423 int i, j;
424 424
425 for (j = 0; j < AVM_MAXVERSION; j++) 425 for (j = 0; j < AVM_MAXVERSION; j++)
426 cinfo->version[j] = "\0\0" + 1; 426 cinfo->version[j] = "";
427 for (i = 0, j = 0; 427 for (i = 0, j = 0;
428 j < AVM_MAXVERSION && i < cinfo->versionlen; 428 j < AVM_MAXVERSION && i < cinfo->versionlen;
429 j++, i += cinfo->versionbuf[i] + 1) 429 j++, i += cinfo->versionbuf[i] + 1)
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 6d05946b445e..124ff530da82 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -262,8 +262,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
262 struct dchannel *dch = &hw->dch; 262 struct dchannel *dch = &hw->dch;
263 int i; 263 int i;
264 264
265 phi = kzalloc(sizeof(struct ph_info) + 265 phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
266 dch->dev.nrbchan * sizeof(struct ph_info_ch), GFP_ATOMIC);
267 phi->dch.ch.protocol = hw->protocol; 266 phi->dch.ch.protocol = hw->protocol;
268 phi->dch.ch.Flags = dch->Flags; 267 phi->dch.ch.Flags = dch->Flags;
269 phi->dch.state = dch->state; 268 phi->dch.state = dch->state;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 1b2239c1d569..dc1cded716c1 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1437,15 +1437,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
1437{ 1437{
1438 modem_info *info = (modem_info *) tty->driver_data; 1438 modem_info *info = (modem_info *) tty->driver_data;
1439 1439
1440 mutex_lock(&modem_info_mutex);
1440 if (!old_termios) 1441 if (!old_termios)
1441 isdn_tty_change_speed(info); 1442 isdn_tty_change_speed(info);
1442 else { 1443 else {
1443 if (tty->termios.c_cflag == old_termios->c_cflag && 1444 if (tty->termios.c_cflag == old_termios->c_cflag &&
1444 tty->termios.c_ispeed == old_termios->c_ispeed && 1445 tty->termios.c_ispeed == old_termios->c_ispeed &&
1445 tty->termios.c_ospeed == old_termios->c_ospeed) 1446 tty->termios.c_ospeed == old_termios->c_ospeed) {
1447 mutex_unlock(&modem_info_mutex);
1446 return; 1448 return;
1449 }
1447 isdn_tty_change_speed(info); 1450 isdn_tty_change_speed(info);
1448 } 1451 }
1452 mutex_unlock(&modem_info_mutex);
1449} 1453}
1450 1454
1451/* 1455/*
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 211ed6cffd10..578978711887 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
170 spin_lock_irqsave(&timer->dev->lock, flags); 170 spin_lock_irqsave(&timer->dev->lock, flags);
171 if (timer->id >= 0) 171 if (timer->id >= 0)
172 list_move_tail(&timer->list, &timer->dev->expired); 172 list_move_tail(&timer->list, &timer->dev->expired);
173 spin_unlock_irqrestore(&timer->dev->lock, flags);
174 wake_up_interruptible(&timer->dev->wait); 173 wake_up_interruptible(&timer->dev->wait);
174 spin_unlock_irqrestore(&timer->dev->lock, flags);
175} 175}
176 176
177static int 177static int
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index a2e74feee2b2..fd64df5a57a5 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
318 318
319 /* Let the programs run for couple of ms and check the engine status */ 319 /* Let the programs run for couple of ms and check the engine status */
320 usleep_range(3000, 6000); 320 usleep_range(3000, 6000);
321 lp55xx_read(chip, LP5523_REG_STATUS, &status); 321 ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
322 if (ret)
323 return ret;
322 status &= LP5523_ENG_STATUS_MASK; 324 status &= LP5523_ENG_STATUS_MASK;
323 325
324 if (status != LP5523_ENG_STATUS_MASK) { 326 if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0ff22159a0ca..dd538e6b2748 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
932 if (IS_ERR(bip)) 932 if (IS_ERR(bip))
933 return PTR_ERR(bip); 933 return PTR_ERR(bip);
934 934
935 tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); 935 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
936 936
937 bip->bip_iter.bi_size = tag_len; 937 bip->bip_iter.bi_size = tag_len;
938 bip->bip_iter.bi_sector = io->cc->start + io->sector; 938 bip->bip_iter.bi_sector = io->cc->start + io->sector;
@@ -2414,9 +2414,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key
2414 * capi:cipher_api_spec-iv:ivopts 2414 * capi:cipher_api_spec-iv:ivopts
2415 */ 2415 */
2416 tmp = &cipher_in[strlen("capi:")]; 2416 tmp = &cipher_in[strlen("capi:")];
2417 cipher_api = strsep(&tmp, "-"); 2417
2418 *ivmode = strsep(&tmp, ":"); 2418 /* Separate IV options if present, it can contain another '-' in hash name */
2419 *ivopts = tmp; 2419 *ivopts = strrchr(tmp, ':');
2420 if (*ivopts) {
2421 **ivopts = '\0';
2422 (*ivopts)++;
2423 }
2424 /* Parse IV mode */
2425 *ivmode = strrchr(tmp, '-');
2426 if (*ivmode) {
2427 **ivmode = '\0';
2428 (*ivmode)++;
2429 }
2430 /* The rest is crypto API spec */
2431 cipher_api = tmp;
2420 2432
2421 if (*ivmode && !strcmp(*ivmode, "lmk")) 2433 if (*ivmode && !strcmp(*ivmode, "lmk"))
2422 cc->tfms_count = 64; 2434 cc->tfms_count = 64;
@@ -2486,11 +2498,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
2486 goto bad_mem; 2498 goto bad_mem;
2487 2499
2488 chainmode = strsep(&tmp, "-"); 2500 chainmode = strsep(&tmp, "-");
2489 *ivopts = strsep(&tmp, "-"); 2501 *ivmode = strsep(&tmp, ":");
2490 *ivmode = strsep(&*ivopts, ":"); 2502 *ivopts = tmp;
2491
2492 if (tmp)
2493 DMWARN("Ignoring unexpected additional cipher options");
2494 2503
2495 /* 2504 /*
2496 * For compatibility with the original dm-crypt mapping format, if 2505 * For compatibility with the original dm-crypt mapping format, if
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 4eb5f8c56535..a20531e5f3b4 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
131static void rq_completed(struct mapped_device *md) 131static void rq_completed(struct mapped_device *md)
132{ 132{
133 /* nudge anyone waiting on suspend queue */ 133 /* nudge anyone waiting on suspend queue */
134 if (unlikely(waitqueue_active(&md->wait))) 134 if (unlikely(wq_has_sleeper(&md->wait)))
135 wake_up(&md->wait); 135 wake_up(&md->wait);
136 136
137 /* 137 /*
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 20b0776e39ef..ed3caceaed07 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
1678 return r; 1678 return r;
1679} 1679}
1680 1680
1681int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) 1681int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
1682{ 1682{
1683 int r; 1683 int r;
1684 uint32_t ref_count; 1684 uint32_t ref_count;
@@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
1686 down_read(&pmd->root_lock); 1686 down_read(&pmd->root_lock);
1687 r = dm_sm_get_count(pmd->data_sm, b, &ref_count); 1687 r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
1688 if (!r) 1688 if (!r)
1689 *result = (ref_count != 0); 1689 *result = (ref_count > 1);
1690 up_read(&pmd->root_lock); 1690 up_read(&pmd->root_lock);
1691 1691
1692 return r; 1692 return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 35e954ea20a9..f6be0d733c20 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
195 195
196int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); 196int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
197 197
198int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); 198int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
199 199
200int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); 200int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
201int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); 201int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index dadd9696340c..e83b63608262 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -257,6 +257,7 @@ struct pool {
257 257
258 spinlock_t lock; 258 spinlock_t lock;
259 struct bio_list deferred_flush_bios; 259 struct bio_list deferred_flush_bios;
260 struct bio_list deferred_flush_completions;
260 struct list_head prepared_mappings; 261 struct list_head prepared_mappings;
261 struct list_head prepared_discards; 262 struct list_head prepared_discards;
262 struct list_head prepared_discards_pt2; 263 struct list_head prepared_discards_pt2;
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
956 mempool_free(m, &m->tc->pool->mapping_pool); 957 mempool_free(m, &m->tc->pool->mapping_pool);
957} 958}
958 959
960static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
961{
962 struct pool *pool = tc->pool;
963 unsigned long flags;
964
965 /*
966 * If the bio has the REQ_FUA flag set we must commit the metadata
967 * before signaling its completion.
968 */
969 if (!bio_triggers_commit(tc, bio)) {
970 bio_endio(bio);
971 return;
972 }
973
974 /*
975 * Complete bio with an error if earlier I/O caused changes to the
976 * metadata that can't be committed, e.g, due to I/O errors on the
977 * metadata device.
978 */
979 if (dm_thin_aborted_changes(tc->td)) {
980 bio_io_error(bio);
981 return;
982 }
983
984 /*
985 * Batch together any bios that trigger commits and then issue a
986 * single commit for them in process_deferred_bios().
987 */
988 spin_lock_irqsave(&pool->lock, flags);
989 bio_list_add(&pool->deferred_flush_completions, bio);
990 spin_unlock_irqrestore(&pool->lock, flags);
991}
992
959static void process_prepared_mapping(struct dm_thin_new_mapping *m) 993static void process_prepared_mapping(struct dm_thin_new_mapping *m)
960{ 994{
961 struct thin_c *tc = m->tc; 995 struct thin_c *tc = m->tc;
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
988 */ 1022 */
989 if (bio) { 1023 if (bio) {
990 inc_remap_and_issue_cell(tc, m->cell, m->data_block); 1024 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
991 bio_endio(bio); 1025 complete_overwrite_bio(tc, bio);
992 } else { 1026 } else {
993 inc_all_io_entry(tc->pool, m->cell->holder); 1027 inc_all_io_entry(tc->pool, m->cell->holder);
994 remap_and_issue(tc, m->cell->holder, m->data_block); 1028 remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1048,7 +1082,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1048 * passdown we have to check that these blocks are now unused. 1082 * passdown we have to check that these blocks are now unused.
1049 */ 1083 */
1050 int r = 0; 1084 int r = 0;
1051 bool used = true; 1085 bool shared = true;
1052 struct thin_c *tc = m->tc; 1086 struct thin_c *tc = m->tc;
1053 struct pool *pool = tc->pool; 1087 struct pool *pool = tc->pool;
1054 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; 1088 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
@@ -1058,11 +1092,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1058 while (b != end) { 1092 while (b != end) {
1059 /* find start of unmapped run */ 1093 /* find start of unmapped run */
1060 for (; b < end; b++) { 1094 for (; b < end; b++) {
1061 r = dm_pool_block_is_used(pool->pmd, b, &used); 1095 r = dm_pool_block_is_shared(pool->pmd, b, &shared);
1062 if (r) 1096 if (r)
1063 goto out; 1097 goto out;
1064 1098
1065 if (!used) 1099 if (!shared)
1066 break; 1100 break;
1067 } 1101 }
1068 1102
@@ -1071,11 +1105,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
1071 1105
1072 /* find end of run */ 1106 /* find end of run */
1073 for (e = b + 1; e != end; e++) { 1107 for (e = b + 1; e != end; e++) {
1074 r = dm_pool_block_is_used(pool->pmd, e, &used); 1108 r = dm_pool_block_is_shared(pool->pmd, e, &shared);
1075 if (r) 1109 if (r)
1076 goto out; 1110 goto out;
1077 1111
1078 if (used) 1112 if (shared)
1079 break; 1113 break;
1080 } 1114 }
1081 1115
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
2317{ 2351{
2318 unsigned long flags; 2352 unsigned long flags;
2319 struct bio *bio; 2353 struct bio *bio;
2320 struct bio_list bios; 2354 struct bio_list bios, bio_completions;
2321 struct thin_c *tc; 2355 struct thin_c *tc;
2322 2356
2323 tc = get_first_thin(pool); 2357 tc = get_first_thin(pool);
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
2328 } 2362 }
2329 2363
2330 /* 2364 /*
2331 * If there are any deferred flush bios, we must commit 2365 * If there are any deferred flush bios, we must commit the metadata
2332 * the metadata before issuing them. 2366 * before issuing them or signaling their completion.
2333 */ 2367 */
2334 bio_list_init(&bios); 2368 bio_list_init(&bios);
2369 bio_list_init(&bio_completions);
2370
2335 spin_lock_irqsave(&pool->lock, flags); 2371 spin_lock_irqsave(&pool->lock, flags);
2336 bio_list_merge(&bios, &pool->deferred_flush_bios); 2372 bio_list_merge(&bios, &pool->deferred_flush_bios);
2337 bio_list_init(&pool->deferred_flush_bios); 2373 bio_list_init(&pool->deferred_flush_bios);
2374
2375 bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
2376 bio_list_init(&pool->deferred_flush_completions);
2338 spin_unlock_irqrestore(&pool->lock, flags); 2377 spin_unlock_irqrestore(&pool->lock, flags);
2339 2378
2340 if (bio_list_empty(&bios) && 2379 if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
2341 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) 2380 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
2342 return; 2381 return;
2343 2382
2344 if (commit(pool)) { 2383 if (commit(pool)) {
2384 bio_list_merge(&bios, &bio_completions);
2385
2345 while ((bio = bio_list_pop(&bios))) 2386 while ((bio = bio_list_pop(&bios)))
2346 bio_io_error(bio); 2387 bio_io_error(bio);
2347 return; 2388 return;
2348 } 2389 }
2349 pool->last_commit_jiffies = jiffies; 2390 pool->last_commit_jiffies = jiffies;
2350 2391
2392 while ((bio = bio_list_pop(&bio_completions)))
2393 bio_endio(bio);
2394
2351 while ((bio = bio_list_pop(&bios))) 2395 while ((bio = bio_list_pop(&bios)))
2352 generic_make_request(bio); 2396 generic_make_request(bio);
2353} 2397}
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
2954 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); 2998 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
2955 spin_lock_init(&pool->lock); 2999 spin_lock_init(&pool->lock);
2956 bio_list_init(&pool->deferred_flush_bios); 3000 bio_list_init(&pool->deferred_flush_bios);
3001 bio_list_init(&pool->deferred_flush_completions);
2957 INIT_LIST_HEAD(&pool->prepared_mappings); 3002 INIT_LIST_HEAD(&pool->prepared_mappings);
2958 INIT_LIST_HEAD(&pool->prepared_discards); 3003 INIT_LIST_HEAD(&pool->prepared_discards);
2959 INIT_LIST_HEAD(&pool->prepared_discards_pt2); 3004 INIT_LIST_HEAD(&pool->prepared_discards_pt2);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d67c95ef8d7e..515e6af9bed2 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io)
699 true, duration, &io->stats_aux); 699 true, duration, &io->stats_aux);
700 700
701 /* nudge anyone waiting on suspend queue */ 701 /* nudge anyone waiting on suspend queue */
702 if (unlikely(waitqueue_active(&md->wait))) 702 if (unlikely(wq_has_sleeper(&md->wait)))
703 wake_up(&md->wait); 703 wake_up(&md->wait);
704} 704}
705 705
@@ -1320,7 +1320,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1320 1320
1321 __bio_clone_fast(clone, bio); 1321 __bio_clone_fast(clone, bio);
1322 1322
1323 if (unlikely(bio_integrity(bio) != NULL)) { 1323 if (bio_integrity(bio)) {
1324 int r; 1324 int r;
1325 1325
1326 if (unlikely(!dm_target_has_integrity(tio->ti->type) && 1326 if (unlikely(!dm_target_has_integrity(tio->ti->type) &&
@@ -1339,7 +1339,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1339 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); 1339 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1340 clone->bi_iter.bi_size = to_bytes(len); 1340 clone->bi_iter.bi_size = to_bytes(len);
1341 1341
1342 if (unlikely(bio_integrity(bio) != NULL)) 1342 if (bio_integrity(bio))
1343 bio_integrity_trim(clone); 1343 bio_integrity_trim(clone);
1344 1344
1345 return 0; 1345 return 0;
@@ -1588,6 +1588,9 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
1588 ci->sector = bio->bi_iter.bi_sector; 1588 ci->sector = bio->bi_iter.bi_sector;
1589} 1589}
1590 1590
1591#define __dm_part_stat_sub(part, field, subnd) \
1592 (part_stat_get(part, field) -= (subnd))
1593
1591/* 1594/*
1592 * Entry point to split a bio into clones and submit them to the targets. 1595 * Entry point to split a bio into clones and submit them to the targets.
1593 */ 1596 */
@@ -1642,7 +1645,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
1642 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, 1645 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1643 GFP_NOIO, &md->queue->bio_split); 1646 GFP_NOIO, &md->queue->bio_split);
1644 ci.io->orig_bio = b; 1647 ci.io->orig_bio = b;
1648
1649 /*
1650 * Adjust IO stats for each split, otherwise upon queue
1651 * reentry there will be redundant IO accounting.
1652 * NOTE: this is a stop-gap fix, a proper fix involves
1653 * significant refactoring of DM core's bio splitting
1654 * (by eliminating DM's splitting and just using bio_split)
1655 */
1656 part_stat_lock();
1657 __dm_part_stat_sub(&dm_disk(md)->part0,
1658 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1659 part_stat_unlock();
1660
1645 bio_chain(b, bio); 1661 bio_chain(b, bio);
1662 trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1646 ret = generic_make_request(bio); 1663 ret = generic_make_request(bio);
1647 break; 1664 break;
1648 } 1665 }
@@ -1713,6 +1730,15 @@ out:
1713 return ret; 1730 return ret;
1714} 1731}
1715 1732
1733static blk_qc_t dm_process_bio(struct mapped_device *md,
1734 struct dm_table *map, struct bio *bio)
1735{
1736 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
1737 return __process_bio(md, map, bio);
1738 else
1739 return __split_and_process_bio(md, map, bio);
1740}
1741
1716static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) 1742static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1717{ 1743{
1718 struct mapped_device *md = q->queuedata; 1744 struct mapped_device *md = q->queuedata;
@@ -1733,10 +1759,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
1733 return ret; 1759 return ret;
1734 } 1760 }
1735 1761
1736 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) 1762 ret = dm_process_bio(md, map, bio);
1737 ret = __process_bio(md, map, bio);
1738 else
1739 ret = __split_and_process_bio(md, map, bio);
1740 1763
1741 dm_put_live_table(md, srcu_idx); 1764 dm_put_live_table(md, srcu_idx);
1742 return ret; 1765 return ret;
@@ -2415,9 +2438,9 @@ static void dm_wq_work(struct work_struct *work)
2415 break; 2438 break;
2416 2439
2417 if (dm_request_based(md)) 2440 if (dm_request_based(md))
2418 generic_make_request(c); 2441 (void) generic_make_request(c);
2419 else 2442 else
2420 __split_and_process_bio(md, map, c); 2443 (void) dm_process_bio(md, map, c);
2421 } 2444 }
2422 2445
2423 dm_put_live_table(md, srcu_idx); 2446 dm_put_live_table(md, srcu_idx);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fd4af4de03b4..05ffffb8b769 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -207,15 +207,10 @@ static bool create_on_open = true;
207struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 207struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
208 struct mddev *mddev) 208 struct mddev *mddev)
209{ 209{
210 struct bio *b;
211
212 if (!mddev || !bioset_initialized(&mddev->bio_set)) 210 if (!mddev || !bioset_initialized(&mddev->bio_set))
213 return bio_alloc(gfp_mask, nr_iovecs); 211 return bio_alloc(gfp_mask, nr_iovecs);
214 212
215 b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); 213 return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
216 if (!b)
217 return NULL;
218 return b;
219} 214}
220EXPORT_SYMBOL_GPL(bio_alloc_mddev); 215EXPORT_SYMBOL_GPL(bio_alloc_mddev);
221 216
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1d54109071cc..fa47249fa3e4 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
1863 reschedule_retry(r1_bio); 1863 reschedule_retry(r1_bio);
1864} 1864}
1865 1865
1866static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1867{
1868 sector_t sync_blocks = 0;
1869 sector_t s = r1_bio->sector;
1870 long sectors_to_go = r1_bio->sectors;
1871
1872 /* make sure these bits don't get cleared. */
1873 do {
1874 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1875 s += sync_blocks;
1876 sectors_to_go -= sync_blocks;
1877 } while (sectors_to_go > 0);
1878}
1879
1866static void end_sync_write(struct bio *bio) 1880static void end_sync_write(struct bio *bio)
1867{ 1881{
1868 int uptodate = !bio->bi_status; 1882 int uptodate = !bio->bi_status;
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
1874 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; 1888 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1875 1889
1876 if (!uptodate) { 1890 if (!uptodate) {
1877 sector_t sync_blocks = 0; 1891 abort_sync_write(mddev, r1_bio);
1878 sector_t s = r1_bio->sector;
1879 long sectors_to_go = r1_bio->sectors;
1880 /* make sure these bits doesn't get cleared. */
1881 do {
1882 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1883 s += sync_blocks;
1884 sectors_to_go -= sync_blocks;
1885 } while (sectors_to_go > 0);
1886 set_bit(WriteErrorSeen, &rdev->flags); 1892 set_bit(WriteErrorSeen, &rdev->flags);
1887 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 1893 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1888 set_bit(MD_RECOVERY_NEEDED, & 1894 set_bit(MD_RECOVERY_NEEDED, &
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2172 (i == r1_bio->read_disk || 2178 (i == r1_bio->read_disk ||
2173 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 2179 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2174 continue; 2180 continue;
2175 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 2181 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2182 abort_sync_write(mddev, r1_bio);
2176 continue; 2183 continue;
2184 }
2177 2185
2178 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); 2186 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2179 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) 2187 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index ec3a5ef7fee0..cbbe6b6535be 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1935,12 +1935,14 @@ out:
1935} 1935}
1936 1936
1937static struct stripe_head * 1937static struct stripe_head *
1938r5c_recovery_alloc_stripe(struct r5conf *conf, 1938r5c_recovery_alloc_stripe(
1939 sector_t stripe_sect) 1939 struct r5conf *conf,
1940 sector_t stripe_sect,
1941 int noblock)
1940{ 1942{
1941 struct stripe_head *sh; 1943 struct stripe_head *sh;
1942 1944
1943 sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); 1945 sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
1944 if (!sh) 1946 if (!sh)
1945 return NULL; /* no more stripe available */ 1947 return NULL; /* no more stripe available */
1946 1948
@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2150 stripe_sect); 2152 stripe_sect);
2151 2153
2152 if (!sh) { 2154 if (!sh) {
2153 sh = r5c_recovery_alloc_stripe(conf, stripe_sect); 2155 sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
2154 /* 2156 /*
2155 * cannot get stripe from raid5_get_active_stripe 2157 * cannot get stripe from raid5_get_active_stripe
2156 * try replay some stripes 2158 * try replay some stripes
@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
2159 r5c_recovery_replay_stripes( 2161 r5c_recovery_replay_stripes(
2160 cached_stripe_list, ctx); 2162 cached_stripe_list, ctx);
2161 sh = r5c_recovery_alloc_stripe( 2163 sh = r5c_recovery_alloc_stripe(
2162 conf, stripe_sect); 2164 conf, stripe_sect, 1);
2163 } 2165 }
2164 if (!sh) { 2166 if (!sh) {
2167 int new_size = conf->min_nr_stripes * 2;
2165 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", 2168 pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
2166 mdname(mddev), 2169 mdname(mddev),
2167 conf->min_nr_stripes * 2); 2170 new_size);
2168 raid5_set_cache_size(mddev, 2171 ret = raid5_set_cache_size(mddev, new_size);
2169 conf->min_nr_stripes * 2); 2172 if (conf->min_nr_stripes <= new_size / 2) {
2170 sh = r5c_recovery_alloc_stripe(conf, 2173 pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
2171 stripe_sect); 2174 mdname(mddev),
2175 ret,
2176 new_size,
2177 conf->min_nr_stripes,
2178 conf->max_nr_stripes);
2179 return -ENOMEM;
2180 }
2181 sh = r5c_recovery_alloc_stripe(
2182 conf, stripe_sect, 0);
2172 } 2183 }
2173 if (!sh) { 2184 if (!sh) {
2174 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", 2185 pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
2175 mdname(mddev)); 2186 mdname(mddev));
2176 return -ENOMEM; 2187 return -ENOMEM;
2177 } 2188 }
2178 list_add_tail(&sh->lru, cached_stripe_list); 2189 list_add_tail(&sh->lru, cached_stripe_list);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4990f0319f6c..cecea901ab8c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6369int 6369int
6370raid5_set_cache_size(struct mddev *mddev, int size) 6370raid5_set_cache_size(struct mddev *mddev, int size)
6371{ 6371{
6372 int result = 0;
6372 struct r5conf *conf = mddev->private; 6373 struct r5conf *conf = mddev->private;
6373 6374
6374 if (size <= 16 || size > 32768) 6375 if (size <= 16 || size > 32768)
@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
6385 6386
6386 mutex_lock(&conf->cache_size_mutex); 6387 mutex_lock(&conf->cache_size_mutex);
6387 while (size > conf->max_nr_stripes) 6388 while (size > conf->max_nr_stripes)
6388 if (!grow_one_stripe(conf, GFP_KERNEL)) 6389 if (!grow_one_stripe(conf, GFP_KERNEL)) {
6390 conf->min_nr_stripes = conf->max_nr_stripes;
6391 result = -ENOMEM;
6389 break; 6392 break;
6393 }
6390 mutex_unlock(&conf->cache_size_mutex); 6394 mutex_unlock(&conf->cache_size_mutex);
6391 6395
6392 return 0; 6396 return result;
6393} 6397}
6394EXPORT_SYMBOL(raid5_set_cache_size); 6398EXPORT_SYMBOL(raid5_set_cache_size);
6395 6399
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 447baaebca44..cdb79ae2d8dc 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -218,8 +218,8 @@ static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
218{ 218{
219 struct device *dev = &cio2->pci_dev->dev; 219 struct device *dev = &cio2->pci_dev->dev;
220 220
221 q->fbpt = dma_zalloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, 221 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
222 GFP_KERNEL); 222 GFP_KERNEL);
223 if (!q->fbpt) 223 if (!q->fbpt)
224 return -ENOMEM; 224 return -ENOMEM;
225 225
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
index e80123cba406..060c0ad6243a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
@@ -49,7 +49,7 @@ int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
49 struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data; 49 struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
50 struct device *dev = &ctx->dev->plat_dev->dev; 50 struct device *dev = &ctx->dev->plat_dev->dev;
51 51
52 mem->va = dma_zalloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL); 52 mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
53 if (!mem->va) { 53 if (!mem->va) {
54 mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev), 54 mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
55 size); 55 size);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d01821a6906a..89d9c4c21037 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -807,7 +807,9 @@ static void vim2m_stop_streaming(struct vb2_queue *q)
807 struct vb2_v4l2_buffer *vbuf; 807 struct vb2_v4l2_buffer *vbuf;
808 unsigned long flags; 808 unsigned long flags;
809 809
810 cancel_delayed_work_sync(&dev->work_run); 810 if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
811 cancel_delayed_work_sync(&dev->work_run);
812
811 for (;;) { 813 for (;;) {
812 if (V4L2_TYPE_IS_OUTPUT(q->type)) 814 if (V4L2_TYPE_IS_OUTPUT(q->type))
813 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); 815 vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 1441a73ce64c..90aad465f9ed 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -287,6 +287,7 @@ static void v4l_print_format(const void *arg, bool write_only)
287 const struct v4l2_window *win; 287 const struct v4l2_window *win;
288 const struct v4l2_sdr_format *sdr; 288 const struct v4l2_sdr_format *sdr;
289 const struct v4l2_meta_format *meta; 289 const struct v4l2_meta_format *meta;
290 u32 planes;
290 unsigned i; 291 unsigned i;
291 292
292 pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); 293 pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -317,7 +318,8 @@ static void v4l_print_format(const void *arg, bool write_only)
317 prt_names(mp->field, v4l2_field_names), 318 prt_names(mp->field, v4l2_field_names),
318 mp->colorspace, mp->num_planes, mp->flags, 319 mp->colorspace, mp->num_planes, mp->flags,
319 mp->ycbcr_enc, mp->quantization, mp->xfer_func); 320 mp->ycbcr_enc, mp->quantization, mp->xfer_func);
320 for (i = 0; i < mp->num_planes; i++) 321 planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
322 for (i = 0; i < planes; i++)
321 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, 323 printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
322 mp->plane_fmt[i].bytesperline, 324 mp->plane_fmt[i].bytesperline,
323 mp->plane_fmt[i].sizeimage); 325 mp->plane_fmt[i].sizeimage);
@@ -1551,8 +1553,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1551 if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) 1553 if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
1552 break; 1554 break;
1553 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1555 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1556 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1557 break;
1554 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1558 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1555 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1559 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1560 bytesperline);
1556 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg); 1561 return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
1557 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1562 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1558 if (unlikely(!ops->vidioc_s_fmt_vid_overlay)) 1563 if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
@@ -1581,8 +1586,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
1581 if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) 1586 if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
1582 break; 1587 break;
1583 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1588 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1589 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1590 break;
1584 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1591 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1585 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1592 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1593 bytesperline);
1586 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg); 1594 return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
1587 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 1595 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1588 if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay)) 1596 if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
@@ -1648,8 +1656,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1648 if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) 1656 if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
1649 break; 1657 break;
1650 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1658 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1659 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1660 break;
1651 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1661 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1652 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1662 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1663 bytesperline);
1653 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg); 1664 return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
1654 case V4L2_BUF_TYPE_VIDEO_OVERLAY: 1665 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
1655 if (unlikely(!ops->vidioc_try_fmt_vid_overlay)) 1666 if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
@@ -1678,8 +1689,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
1678 if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) 1689 if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
1679 break; 1690 break;
1680 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); 1691 CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
1692 if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES)
1693 break;
1681 for (i = 0; i < p->fmt.pix_mp.num_planes; i++) 1694 for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
1682 CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline); 1695 CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i],
1696 bytesperline);
1683 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg); 1697 return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
1684 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: 1698 case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
1685 if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay)) 1699 if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 8c5dfdce4326..76f9909cf396 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -102,6 +102,7 @@ config MFD_AAT2870_CORE
102config MFD_AT91_USART 102config MFD_AT91_USART
103 tristate "AT91 USART Driver" 103 tristate "AT91 USART Driver"
104 select MFD_CORE 104 select MFD_CORE
105 depends on ARCH_AT91 || COMPILE_TEST
105 help 106 help
106 Select this to get support for AT91 USART IP. This is a wrapper 107 Select this to get support for AT91 USART IP. This is a wrapper
107 over at91-usart-serial driver and usart-spi-driver. Only one function 108 over at91-usart-serial driver and usart-spi-driver. Only one function
@@ -1418,7 +1419,7 @@ config MFD_TPS65217
1418 1419
1419config MFD_TPS68470 1420config MFD_TPS68470
1420 bool "TI TPS68470 Power Management / LED chips" 1421 bool "TI TPS68470 Power Management / LED chips"
1421 depends on ACPI && I2C=y 1422 depends on ACPI && PCI && I2C=y
1422 select MFD_CORE 1423 select MFD_CORE
1423 select REGMAP_I2C 1424 select REGMAP_I2C
1424 select I2C_DESIGNWARE_PLATFORM 1425 select I2C_DESIGNWARE_PLATFORM
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 30d09d177171..11ab17f64c64 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
261 mutex_unlock(&ab8500->lock); 261 mutex_unlock(&ab8500->lock);
262 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret); 262 dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
263 263
264 return ret; 264 return (ret < 0) ? ret : 0;
265} 265}
266 266
267static int ab8500_get_register(struct device *dev, u8 bank, 267static int ab8500_get_register(struct device *dev, u8 bank,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index e1450a56fc07..3c97f2c0fdfe 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -641,9 +641,9 @@ static const struct mfd_cell axp221_cells[] = {
641 641
642static const struct mfd_cell axp223_cells[] = { 642static const struct mfd_cell axp223_cells[] = {
643 { 643 {
644 .name = "axp221-pek", 644 .name = "axp221-pek",
645 .num_resources = ARRAY_SIZE(axp22x_pek_resources), 645 .num_resources = ARRAY_SIZE(axp22x_pek_resources),
646 .resources = axp22x_pek_resources, 646 .resources = axp22x_pek_resources,
647 }, { 647 }, {
648 .name = "axp22x-adc", 648 .name = "axp22x-adc",
649 .of_compatible = "x-powers,axp221-adc", 649 .of_compatible = "x-powers,axp221-adc",
@@ -651,7 +651,7 @@ static const struct mfd_cell axp223_cells[] = {
651 .name = "axp20x-battery-power-supply", 651 .name = "axp20x-battery-power-supply",
652 .of_compatible = "x-powers,axp221-battery-power-supply", 652 .of_compatible = "x-powers,axp221-battery-power-supply",
653 }, { 653 }, {
654 .name = "axp20x-regulator", 654 .name = "axp20x-regulator",
655 }, { 655 }, {
656 .name = "axp20x-ac-power-supply", 656 .name = "axp20x-ac-power-supply",
657 .of_compatible = "x-powers,axp221-ac-power-supply", 657 .of_compatible = "x-powers,axp221-ac-power-supply",
@@ -667,9 +667,9 @@ static const struct mfd_cell axp223_cells[] = {
667 667
668static const struct mfd_cell axp152_cells[] = { 668static const struct mfd_cell axp152_cells[] = {
669 { 669 {
670 .name = "axp20x-pek", 670 .name = "axp20x-pek",
671 .num_resources = ARRAY_SIZE(axp152_pek_resources), 671 .num_resources = ARRAY_SIZE(axp152_pek_resources),
672 .resources = axp152_pek_resources, 672 .resources = axp152_pek_resources,
673 }, 673 },
674}; 674};
675 675
@@ -698,87 +698,101 @@ static const struct resource axp288_charger_resources[] = {
698 698
699static const struct mfd_cell axp288_cells[] = { 699static const struct mfd_cell axp288_cells[] = {
700 { 700 {
701 .name = "axp288_adc", 701 .name = "axp288_adc",
702 .num_resources = ARRAY_SIZE(axp288_adc_resources), 702 .num_resources = ARRAY_SIZE(axp288_adc_resources),
703 .resources = axp288_adc_resources, 703 .resources = axp288_adc_resources,
704 }, 704 }, {
705 { 705 .name = "axp288_extcon",
706 .name = "axp288_extcon", 706 .num_resources = ARRAY_SIZE(axp288_extcon_resources),
707 .num_resources = ARRAY_SIZE(axp288_extcon_resources), 707 .resources = axp288_extcon_resources,
708 .resources = axp288_extcon_resources, 708 }, {
709 }, 709 .name = "axp288_charger",
710 { 710 .num_resources = ARRAY_SIZE(axp288_charger_resources),
711 .name = "axp288_charger", 711 .resources = axp288_charger_resources,
712 .num_resources = ARRAY_SIZE(axp288_charger_resources), 712 }, {
713 .resources = axp288_charger_resources, 713 .name = "axp288_fuel_gauge",
714 }, 714 .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
715 { 715 .resources = axp288_fuel_gauge_resources,
716 .name = "axp288_fuel_gauge", 716 }, {
717 .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources), 717 .name = "axp221-pek",
718 .resources = axp288_fuel_gauge_resources, 718 .num_resources = ARRAY_SIZE(axp288_power_button_resources),
719 }, 719 .resources = axp288_power_button_resources,
720 { 720 }, {
721 .name = "axp221-pek", 721 .name = "axp288_pmic_acpi",
722 .num_resources = ARRAY_SIZE(axp288_power_button_resources),
723 .resources = axp288_power_button_resources,
724 },
725 {
726 .name = "axp288_pmic_acpi",
727 }, 722 },
728}; 723};
729 724
730static const struct mfd_cell axp803_cells[] = { 725static const struct mfd_cell axp803_cells[] = {
731 { 726 {
732 .name = "axp221-pek", 727 .name = "axp221-pek",
733 .num_resources = ARRAY_SIZE(axp803_pek_resources), 728 .num_resources = ARRAY_SIZE(axp803_pek_resources),
734 .resources = axp803_pek_resources, 729 .resources = axp803_pek_resources,
730 }, {
731 .name = "axp20x-gpio",
732 .of_compatible = "x-powers,axp813-gpio",
733 }, {
734 .name = "axp813-adc",
735 .of_compatible = "x-powers,axp813-adc",
736 }, {
737 .name = "axp20x-battery-power-supply",
738 .of_compatible = "x-powers,axp813-battery-power-supply",
739 }, {
740 .name = "axp20x-ac-power-supply",
741 .of_compatible = "x-powers,axp813-ac-power-supply",
742 .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
743 .resources = axp20x_ac_power_supply_resources,
735 }, 744 },
736 { .name = "axp20x-regulator" }, 745 { .name = "axp20x-regulator" },
737}; 746};
738 747
739static const struct mfd_cell axp806_self_working_cells[] = { 748static const struct mfd_cell axp806_self_working_cells[] = {
740 { 749 {
741 .name = "axp221-pek", 750 .name = "axp221-pek",
742 .num_resources = ARRAY_SIZE(axp806_pek_resources), 751 .num_resources = ARRAY_SIZE(axp806_pek_resources),
743 .resources = axp806_pek_resources, 752 .resources = axp806_pek_resources,
744 }, 753 },
745 { .name = "axp20x-regulator" }, 754 { .name = "axp20x-regulator" },
746}; 755};
747 756
748static const struct mfd_cell axp806_cells[] = { 757static const struct mfd_cell axp806_cells[] = {
749 { 758 {
750 .id = 2, 759 .id = 2,
751 .name = "axp20x-regulator", 760 .name = "axp20x-regulator",
752 }, 761 },
753}; 762};
754 763
755static const struct mfd_cell axp809_cells[] = { 764static const struct mfd_cell axp809_cells[] = {
756 { 765 {
757 .name = "axp221-pek", 766 .name = "axp221-pek",
758 .num_resources = ARRAY_SIZE(axp809_pek_resources), 767 .num_resources = ARRAY_SIZE(axp809_pek_resources),
759 .resources = axp809_pek_resources, 768 .resources = axp809_pek_resources,
760 }, { 769 }, {
761 .id = 1, 770 .id = 1,
762 .name = "axp20x-regulator", 771 .name = "axp20x-regulator",
763 }, 772 },
764}; 773};
765 774
766static const struct mfd_cell axp813_cells[] = { 775static const struct mfd_cell axp813_cells[] = {
767 { 776 {
768 .name = "axp221-pek", 777 .name = "axp221-pek",
769 .num_resources = ARRAY_SIZE(axp803_pek_resources), 778 .num_resources = ARRAY_SIZE(axp803_pek_resources),
770 .resources = axp803_pek_resources, 779 .resources = axp803_pek_resources,
771 }, { 780 }, {
772 .name = "axp20x-regulator", 781 .name = "axp20x-regulator",
773 }, { 782 }, {
774 .name = "axp20x-gpio", 783 .name = "axp20x-gpio",
775 .of_compatible = "x-powers,axp813-gpio", 784 .of_compatible = "x-powers,axp813-gpio",
776 }, { 785 }, {
777 .name = "axp813-adc", 786 .name = "axp813-adc",
778 .of_compatible = "x-powers,axp813-adc", 787 .of_compatible = "x-powers,axp813-adc",
779 }, { 788 }, {
780 .name = "axp20x-battery-power-supply", 789 .name = "axp20x-battery-power-supply",
781 .of_compatible = "x-powers,axp813-battery-power-supply", 790 .of_compatible = "x-powers,axp813-battery-power-supply",
791 }, {
792 .name = "axp20x-ac-power-supply",
793 .of_compatible = "x-powers,axp813-ac-power-supply",
794 .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
795 .resources = axp20x_ac_power_supply_resources,
782 }, 796 },
783}; 797};
784 798
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 503979c81dae..fab3cdc27ed6 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
59}; 59};
60 60
61static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = { 61static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
62 regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
62 regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN), 63 regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
63 regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT), 64 regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
64 regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ), 65 regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index b99a194ce5a4..2d0fee488c5a 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
499 499
500 cros_ec_debugfs_remove(ec); 500 cros_ec_debugfs_remove(ec);
501 501
502 mfd_remove_devices(ec->dev);
502 cdev_del(&ec->cdev); 503 cdev_del(&ec->cdev);
503 device_unregister(&ec->class_dev); 504 device_unregister(&ec->class_dev);
504 return 0; 505 return 0;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5970b8def548..aec20e1c7d3d 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
2584 .irq_unmask = prcmu_irq_unmask, 2584 .irq_unmask = prcmu_irq_unmask,
2585}; 2585};
2586 2586
2587static __init char *fw_project_name(u32 project) 2587static char *fw_project_name(u32 project)
2588{ 2588{
2589 switch (project) { 2589 switch (project) {
2590 case PRCMU_FW_PROJECT_U8500: 2590 case PRCMU_FW_PROJECT_U8500:
@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
2732 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); 2732 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
2733} 2733}
2734 2734
2735static void __init init_prcm_registers(void) 2735static void init_prcm_registers(void)
2736{ 2736{
2737 u32 val; 2737 u32 val;
2738 2738
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index ca829f85672f..2713de989f05 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -82,11 +82,13 @@ static void exynos_lpass_enable(struct exynos_lpass *lpass)
82 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); 82 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S);
83 83
84 regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK, 84 regmap_write(lpass->top, SFR_LPASS_INTR_CPU_MASK,
85 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S); 85 LPASS_INTR_SFR | LPASS_INTR_DMA | LPASS_INTR_I2S |
86 LPASS_INTR_UART);
86 87
87 exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET); 88 exynos_lpass_core_sw_reset(lpass, LPASS_I2S_SW_RESET);
88 exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET); 89 exynos_lpass_core_sw_reset(lpass, LPASS_DMA_SW_RESET);
89 exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET); 90 exynos_lpass_core_sw_reset(lpass, LPASS_MEM_SW_RESET);
91 exynos_lpass_core_sw_reset(lpass, LPASS_UART_SW_RESET);
90} 92}
91 93
92static void exynos_lpass_disable(struct exynos_lpass *lpass) 94static void exynos_lpass_disable(struct exynos_lpass *lpass)
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 440030cecbbd..2a77988d0462 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -15,6 +15,7 @@
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/mfd/core.h> 16#include <linux/mfd/core.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/mutex.h>
18#include <linux/notifier.h> 19#include <linux/notifier.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
@@ -155,7 +156,7 @@ static int madera_wait_for_boot(struct madera *madera)
155 usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2, 156 usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2,
156 MADERA_BOOT_POLL_INTERVAL_USEC); 157 MADERA_BOOT_POLL_INTERVAL_USEC);
157 regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val); 158 regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
158 }; 159 }
159 160
160 if (!(val & MADERA_BOOT_DONE_STS1)) { 161 if (!(val & MADERA_BOOT_DONE_STS1)) {
161 dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n"); 162 dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n");
@@ -357,6 +358,8 @@ int madera_dev_init(struct madera *madera)
357 358
358 dev_set_drvdata(madera->dev, madera); 359 dev_set_drvdata(madera->dev, madera);
359 BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier); 360 BLOCKING_INIT_NOTIFIER_HEAD(&madera->notifier);
361 mutex_init(&madera->dapm_ptr_lock);
362
360 madera_set_micbias_info(madera); 363 madera_set_micbias_info(madera);
361 364
362 /* 365 /*
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index d8217366ed36..d8ddd1a6f304 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -280,7 +280,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
280 280
281 for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) { 281 for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
282 sprintf(fps_name, "fps%d", fps_id); 282 sprintf(fps_name, "fps%d", fps_id);
283 if (!strcmp(fps_np->name, fps_name)) 283 if (of_node_name_eq(fps_np, fps_name))
284 break; 284 break;
285 } 285 }
286 286
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index f475e848252f..d0bf50e3568d 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
274 274
275 mc13xxx->adcflags |= MC13XXX_ADC_WORKING; 275 mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
276 276
277 mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0); 277 ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
278 if (ret)
279 goto out;
278 280
279 adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 | 281 adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
280 MC13XXX_ADC0_CHRGRAWDIV; 282 MC13XXX_ADC0_CHRGRAWDIV;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 77b64bd64df3..ab24e176ef44 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
329 329
330 default: 330 default:
331 dev_err(&pdev->dev, "unsupported chip: %d\n", id); 331 dev_err(&pdev->dev, "unsupported chip: %d\n", id);
332 ret = -ENODEV; 332 return -ENODEV;
333 break;
334 } 333 }
335 334
336 if (ret) { 335 if (ret) {
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 52fafea06067..8d420c37b2a6 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
638 return -EFAULT; 638 return -EFAULT;
639 } 639 }
640 640
641 writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
642 writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
643 writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
644
641 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], 645 dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
642 fw_version[1], 646 fw_version[1],
643 fw_version[2]); 647 fw_version[2]);
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 2a8369657e38..26c7b63e008a 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -109,7 +109,7 @@ struct rave_sp_reply {
109/** 109/**
110 * struct rave_sp_checksum - Variant specific checksum implementation details 110 * struct rave_sp_checksum - Variant specific checksum implementation details
111 * 111 *
112 * @length: Caculated checksum length 112 * @length: Calculated checksum length
113 * @subroutine: Utilized checksum algorithm implementation 113 * @subroutine: Utilized checksum algorithm implementation
114 */ 114 */
115struct rave_sp_checksum { 115struct rave_sp_checksum {
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 566caca4efd8..7569a4be0608 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1302,17 +1302,17 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
1302 pdata->autosleep = (pdata->autosleep_timeout) ? true : false; 1302 pdata->autosleep = (pdata->autosleep_timeout) ? true : false;
1303 1303
1304 for_each_child_of_node(np, child) { 1304 for_each_child_of_node(np, child) {
1305 if (!strcmp(child->name, "stmpe_gpio")) { 1305 if (of_node_name_eq(child, "stmpe_gpio")) {
1306 pdata->blocks |= STMPE_BLOCK_GPIO; 1306 pdata->blocks |= STMPE_BLOCK_GPIO;
1307 } else if (!strcmp(child->name, "stmpe_keypad")) { 1307 } else if (of_node_name_eq(child, "stmpe_keypad")) {
1308 pdata->blocks |= STMPE_BLOCK_KEYPAD; 1308 pdata->blocks |= STMPE_BLOCK_KEYPAD;
1309 } else if (!strcmp(child->name, "stmpe_touchscreen")) { 1309 } else if (of_node_name_eq(child, "stmpe_touchscreen")) {
1310 pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN; 1310 pdata->blocks |= STMPE_BLOCK_TOUCHSCREEN;
1311 } else if (!strcmp(child->name, "stmpe_adc")) { 1311 } else if (of_node_name_eq(child, "stmpe_adc")) {
1312 pdata->blocks |= STMPE_BLOCK_ADC; 1312 pdata->blocks |= STMPE_BLOCK_ADC;
1313 } else if (!strcmp(child->name, "stmpe_pwm")) { 1313 } else if (of_node_name_eq(child, "stmpe_pwm")) {
1314 pdata->blocks |= STMPE_BLOCK_PWM; 1314 pdata->blocks |= STMPE_BLOCK_PWM;
1315 } else if (!strcmp(child->name, "stmpe_rotator")) { 1315 } else if (of_node_name_eq(child, "stmpe_rotator")) {
1316 pdata->blocks |= STMPE_BLOCK_ROTATOR; 1316 pdata->blocks |= STMPE_BLOCK_ROTATOR;
1317 } 1317 }
1318 } 1318 }
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index c2d47d78705b..fd111296b959 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
264 cell->pdata_size = sizeof(tscadc); 264 cell->pdata_size = sizeof(tscadc);
265 } 265 }
266 266
267 err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells, 267 err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
268 tscadc->used_cells, NULL, 0, NULL); 268 tscadc->cells, tscadc->used_cells, NULL,
269 0, NULL);
269 if (err < 0) 270 if (err < 0)
270 goto err_disable_clk; 271 goto err_disable_clk;
271 272
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 910f569ff77c..8bcdecf494d0 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
235 235
236 mutex_init(&tps->tps_lock); 236 mutex_init(&tps->tps_lock);
237 237
238 ret = regmap_add_irq_chip(tps->regmap, tps->irq, 238 ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
239 IRQF_ONESHOT, 0, &tps65218_irq_chip, 239 IRQF_ONESHOT, 0, &tps65218_irq_chip,
240 &tps->irq_data); 240 &tps->irq_data);
241 if (ret < 0) 241 if (ret < 0)
242 return ret; 242 return ret;
243 243
@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
253 ARRAY_SIZE(tps65218_cells), NULL, 0, 253 ARRAY_SIZE(tps65218_cells), NULL, 0,
254 regmap_irq_get_domain(tps->irq_data)); 254 regmap_irq_get_domain(tps->irq_data));
255 255
256 if (ret < 0)
257 goto err_irq;
258
259 return 0;
260
261err_irq:
262 regmap_del_irq_chip(tps->irq, tps->irq_data);
263
264 return ret; 256 return ret;
265} 257}
266 258
267static int tps65218_remove(struct i2c_client *client)
268{
269 struct tps65218 *tps = i2c_get_clientdata(client);
270
271 regmap_del_irq_chip(tps->irq, tps->irq_data);
272
273 return 0;
274}
275
276static const struct i2c_device_id tps65218_id_table[] = { 259static const struct i2c_device_id tps65218_id_table[] = {
277 { "tps65218", TPS65218 }, 260 { "tps65218", TPS65218 },
278 { }, 261 { },
@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
285 .of_match_table = of_tps65218_match_table, 268 .of_match_table = of_tps65218_match_table,
286 }, 269 },
287 .probe = tps65218_probe, 270 .probe = tps65218_probe,
288 .remove = tps65218_remove,
289 .id_table = tps65218_id_table, 271 .id_table = tps65218_id_table,
290}; 272};
291 273
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index b89379782741..9c7925ca13cf 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
592 return 0; 592 return 0;
593} 593}
594 594
595static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
596{
597 struct tps6586x *tps6586x = dev_get_drvdata(dev);
598
599 if (tps6586x->client->irq)
600 disable_irq(tps6586x->client->irq);
601
602 return 0;
603}
604
605static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
606{
607 struct tps6586x *tps6586x = dev_get_drvdata(dev);
608
609 if (tps6586x->client->irq)
610 enable_irq(tps6586x->client->irq);
611
612 return 0;
613}
614
615static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
616 tps6586x_i2c_resume);
617
595static const struct i2c_device_id tps6586x_id_table[] = { 618static const struct i2c_device_id tps6586x_id_table[] = {
596 { "tps6586x", 0 }, 619 { "tps6586x", 0 },
597 { }, 620 { },
@@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
602 .driver = { 625 .driver = {
603 .name = "tps6586x", 626 .name = "tps6586x",
604 .of_match_table = of_match_ptr(tps6586x_of_match), 627 .of_match_table = of_match_ptr(tps6586x_of_match),
628 .pm = &tps6586x_pm_ops,
605 }, 629 },
606 .probe = tps6586x_i2c_probe, 630 .probe = tps6586x_i2c_probe,
607 .remove = tps6586x_i2c_remove, 631 .remove = tps6586x_i2c_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 4be3d239da9e..299016bc46d9 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
979 * letting it generate the right frequencies for USB, MADC, and 979 * letting it generate the right frequencies for USB, MADC, and
980 * other purposes. 980 * other purposes.
981 */ 981 */
982static inline int __init protect_pm_master(void) 982static inline int protect_pm_master(void)
983{ 983{
984 int e = 0; 984 int e = 0;
985 985
@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
988 return e; 988 return e;
989} 989}
990 990
991static inline int __init unprotect_pm_master(void) 991static inline int unprotect_pm_master(void)
992{ 992{
993 int e = 0; 993 int e = 0;
994 994
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 1ee68bd440fb..16c6e2accfaa 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
1618 { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */ 1618 { 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
1619 { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */ 1619 { 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
1620 { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */ 1620 { 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
1621 { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
1621 { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */ 1622 { 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
1622 { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */ 1623 { 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
1623 { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */ 1624 { 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
2869 case ARIZONA_ASRC_ENABLE: 2870 case ARIZONA_ASRC_ENABLE:
2870 case ARIZONA_ASRC_STATUS: 2871 case ARIZONA_ASRC_STATUS:
2871 case ARIZONA_ASRC_RATE1: 2872 case ARIZONA_ASRC_RATE1:
2873 case ARIZONA_ASRC_RATE2:
2872 case ARIZONA_ISRC_1_CTRL_1: 2874 case ARIZONA_ISRC_1_CTRL_1:
2873 case ARIZONA_ISRC_1_CTRL_2: 2875 case ARIZONA_ISRC_1_CTRL_2:
2874 case ARIZONA_ISRC_1_CTRL_3: 2876 case ARIZONA_ISRC_1_CTRL_3:
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index efe2fb72d54b..25265fd0fd6e 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -218,8 +218,8 @@ void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
218 if (get_order(size) >= MAX_ORDER) 218 if (get_order(size) >= MAX_ORDER)
219 return NULL; 219 return NULL;
220 220
221 return dma_zalloc_coherent(&cd->pci_dev->dev, size, dma_handle, 221 return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
222 GFP_KERNEL); 222 GFP_KERNEL);
223} 223}
224 224
225void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size, 225void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index b8aaa684c397..2ed23c99f59f 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
820 * 820 *
821 * Return: 821 * Return:
822 * 0 - Success 822 * 0 - Success
823 * Non-zero - Failure
823 */ 824 */
824static int ibmvmc_open(struct inode *inode, struct file *file) 825static int ibmvmc_open(struct inode *inode, struct file *file)
825{ 826{
826 struct ibmvmc_file_session *session; 827 struct ibmvmc_file_session *session;
827 int rc = 0;
828 828
829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, 829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
830 (unsigned long)inode, (unsigned long)file, 830 (unsigned long)inode, (unsigned long)file,
831 ibmvmc.state); 831 ibmvmc.state);
832 832
833 session = kzalloc(sizeof(*session), GFP_KERNEL); 833 session = kzalloc(sizeof(*session), GFP_KERNEL);
834 if (!session)
835 return -ENOMEM;
836
834 session->file = file; 837 session->file = file;
835 file->private_data = session; 838 file->private_data = session;
836 839
837 return rc; 840 return 0;
838} 841}
839 842
840/** 843/**
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1fc8ea0f519b..ca4c9cc218a2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head,
401 struct mei_cl_cb *cb, *next; 401 struct mei_cl_cb *cb, *next;
402 402
403 list_for_each_entry_safe(cb, next, head, list) { 403 list_for_each_entry_safe(cb, next, head, list) {
404 if (cl == cb->cl) 404 if (cl == cb->cl) {
405 list_del_init(&cb->list); 405 list_del_init(&cb->list);
406 if (cb->fop_type == MEI_FOP_READ)
407 mei_io_cb_free(cb);
408 }
406 } 409 }
407} 410}
408 411
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 78c26cebf5d4..8f7616557c97 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1187,9 +1187,15 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
1187 dma_setup_res = (struct hbm_dma_setup_response *)mei_msg; 1187 dma_setup_res = (struct hbm_dma_setup_response *)mei_msg;
1188 1188
1189 if (dma_setup_res->status) { 1189 if (dma_setup_res->status) {
1190 dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n", 1190 u8 status = dma_setup_res->status;
1191 dma_setup_res->status, 1191
1192 mei_hbm_status_str(dma_setup_res->status)); 1192 if (status == MEI_HBMS_NOT_ALLOWED) {
1193 dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
1194 } else {
1195 dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
1196 status,
1197 mei_hbm_status_str(status));
1198 }
1193 dev->hbm_f_dr_supported = 0; 1199 dev->hbm_f_dr_supported = 0;
1194 mei_dmam_ring_free(dev); 1200 mei_dmam_ring_free(dev);
1195 } 1201 }
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index e4b10b2d1a08..bb1ee9834a02 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -127,6 +127,8 @@
127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ 127#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ 128#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
129 129
130#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
131
130#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ 132#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
131 133
132#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ 134#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
@@ -137,6 +139,8 @@
137#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ 139#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
138#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ 140#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
139 141
142#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
143
140/* 144/*
141 * MEI HW Section 145 * MEI HW Section
142 */ 146 */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 73ace2d59dea..3ab946ad3257 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, 88 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, 89 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
90 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, 90 {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
91 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, 91 {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)},
92 92
93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, 93 {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, 94 {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
95 95
96 {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)},
97
96 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, 98 {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)},
97 99
98 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, 100 {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
@@ -103,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
103 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, 105 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
104 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, 106 {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
105 107
108 {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
109
106 /* required last entry */ 110 /* required last entry */
107 {0, } 111 {0, }
108}; 112};
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 6b212c8b78e7..744757f541be 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -47,7 +47,8 @@
47 * @dc: Virtio device control 47 * @dc: Virtio device control
48 * @vpdev: VOP device which is the parent for this virtio device 48 * @vpdev: VOP device which is the parent for this virtio device
49 * @vr: Buffer for accessing the VRING 49 * @vr: Buffer for accessing the VRING
50 * @used: Buffer for used 50 * @used_virt: Virtual address of used ring
51 * @used: DMA address of used ring
51 * @used_size: Size of the used buffer 52 * @used_size: Size of the used buffer
52 * @reset_done: Track whether VOP reset is complete 53 * @reset_done: Track whether VOP reset is complete
53 * @virtio_cookie: Cookie returned upon requesting a interrupt 54 * @virtio_cookie: Cookie returned upon requesting a interrupt
@@ -61,6 +62,7 @@ struct _vop_vdev {
61 struct mic_device_ctrl __iomem *dc; 62 struct mic_device_ctrl __iomem *dc;
62 struct vop_device *vpdev; 63 struct vop_device *vpdev;
63 void __iomem *vr[VOP_MAX_VRINGS]; 64 void __iomem *vr[VOP_MAX_VRINGS];
65 void *used_virt[VOP_MAX_VRINGS];
64 dma_addr_t used[VOP_MAX_VRINGS]; 66 dma_addr_t used[VOP_MAX_VRINGS];
65 int used_size[VOP_MAX_VRINGS]; 67 int used_size[VOP_MAX_VRINGS];
66 struct completion reset_done; 68 struct completion reset_done;
@@ -260,12 +262,12 @@ static bool vop_notify(struct virtqueue *vq)
260static void vop_del_vq(struct virtqueue *vq, int n) 262static void vop_del_vq(struct virtqueue *vq, int n)
261{ 263{
262 struct _vop_vdev *vdev = to_vopvdev(vq->vdev); 264 struct _vop_vdev *vdev = to_vopvdev(vq->vdev);
263 struct vring *vr = (struct vring *)(vq + 1);
264 struct vop_device *vpdev = vdev->vpdev; 265 struct vop_device *vpdev = vdev->vpdev;
265 266
266 dma_unmap_single(&vpdev->dev, vdev->used[n], 267 dma_unmap_single(&vpdev->dev, vdev->used[n],
267 vdev->used_size[n], DMA_BIDIRECTIONAL); 268 vdev->used_size[n], DMA_BIDIRECTIONAL);
268 free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); 269 free_pages((unsigned long)vdev->used_virt[n],
270 get_order(vdev->used_size[n]));
269 vring_del_virtqueue(vq); 271 vring_del_virtqueue(vq);
270 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); 272 vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]);
271 vdev->vr[n] = NULL; 273 vdev->vr[n] = NULL;
@@ -283,6 +285,26 @@ static void vop_del_vqs(struct virtio_device *dev)
283 vop_del_vq(vq, idx++); 285 vop_del_vq(vq, idx++);
284} 286}
285 287
288static struct virtqueue *vop_new_virtqueue(unsigned int index,
289 unsigned int num,
290 struct virtio_device *vdev,
291 bool context,
292 void *pages,
293 bool (*notify)(struct virtqueue *vq),
294 void (*callback)(struct virtqueue *vq),
295 const char *name,
296 void *used)
297{
298 bool weak_barriers = false;
299 struct vring vring;
300
301 vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN);
302 vring.used = used;
303
304 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
305 notify, callback, name);
306}
307
286/* 308/*
287 * This routine will assign vring's allocated in host/io memory. Code in 309 * This routine will assign vring's allocated in host/io memory. Code in
288 * virtio_ring.c however continues to access this io memory as if it were local 310 * virtio_ring.c however continues to access this io memory as if it were local
@@ -302,7 +324,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
302 struct _mic_vring_info __iomem *info; 324 struct _mic_vring_info __iomem *info;
303 void *used; 325 void *used;
304 int vr_size, _vr_size, err, magic; 326 int vr_size, _vr_size, err, magic;
305 struct vring *vr;
306 u8 type = ioread8(&vdev->desc->type); 327 u8 type = ioread8(&vdev->desc->type);
307 328
308 if (index >= ioread8(&vdev->desc->num_vq)) 329 if (index >= ioread8(&vdev->desc->num_vq))
@@ -322,17 +343,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
322 return ERR_PTR(-ENOMEM); 343 return ERR_PTR(-ENOMEM);
323 vdev->vr[index] = va; 344 vdev->vr[index] = va;
324 memset_io(va, 0x0, _vr_size); 345 memset_io(va, 0x0, _vr_size);
325 vq = vring_new_virtqueue( 346
326 index,
327 le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN,
328 dev,
329 false,
330 ctx,
331 (void __force *)va, vop_notify, callback, name);
332 if (!vq) {
333 err = -ENOMEM;
334 goto unmap;
335 }
336 info = va + _vr_size; 347 info = va + _vr_size;
337 magic = ioread32(&info->magic); 348 magic = ioread32(&info->magic);
338 349
@@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
341 goto unmap; 352 goto unmap;
342 } 353 }
343 354
344 /* Allocate and reassign used ring now */
345 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + 355 vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 +
346 sizeof(struct vring_used_elem) * 356 sizeof(struct vring_used_elem) *
347 le16_to_cpu(config.num)); 357 le16_to_cpu(config.num));
348 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 358 used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
349 get_order(vdev->used_size[index])); 359 get_order(vdev->used_size[index]));
360 vdev->used_virt[index] = used;
350 if (!used) { 361 if (!used) {
351 err = -ENOMEM; 362 err = -ENOMEM;
352 dev_err(_vop_dev(vdev), "%s %d err %d\n", 363 dev_err(_vop_dev(vdev), "%s %d err %d\n",
353 __func__, __LINE__, err); 364 __func__, __LINE__, err);
354 goto del_vq; 365 goto unmap;
355 } 366 }
367
368 vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx,
369 (void __force *)va, vop_notify, callback,
370 name, used);
371 if (!vq) {
372 err = -ENOMEM;
373 goto free_used;
374 }
375
356 vdev->used[index] = dma_map_single(&vpdev->dev, used, 376 vdev->used[index] = dma_map_single(&vpdev->dev, used,
357 vdev->used_size[index], 377 vdev->used_size[index],
358 DMA_BIDIRECTIONAL); 378 DMA_BIDIRECTIONAL);
@@ -360,26 +380,17 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev,
360 err = -ENOMEM; 380 err = -ENOMEM;
361 dev_err(_vop_dev(vdev), "%s %d err %d\n", 381 dev_err(_vop_dev(vdev), "%s %d err %d\n",
362 __func__, __LINE__, err); 382 __func__, __LINE__, err);
363 goto free_used; 383 goto del_vq;
364 } 384 }
365 writeq(vdev->used[index], &vqconfig->used_address); 385 writeq(vdev->used[index], &vqconfig->used_address);
366 /*
367 * To reassign the used ring here we are directly accessing
368 * struct vring_virtqueue which is a private data structure
369 * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in
370 * vring_new_virtqueue() would ensure that
371 * (&vq->vring == (struct vring *) (&vq->vq + 1));
372 */
373 vr = (struct vring *)(vq + 1);
374 vr->used = used;
375 386
376 vq->priv = vdev; 387 vq->priv = vdev;
377 return vq; 388 return vq;
389del_vq:
390 vring_del_virtqueue(vq);
378free_used: 391free_used:
379 free_pages((unsigned long)used, 392 free_pages((unsigned long)used,
380 get_order(vdev->used_size[index])); 393 get_order(vdev->used_size[index]));
381del_vq:
382 vring_del_virtqueue(vq);
383unmap: 394unmap:
384 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); 395 vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]);
385 return ERR_PTR(err); 396 return ERR_PTR(err);
@@ -394,16 +405,21 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs,
394 struct _vop_vdev *vdev = to_vopvdev(dev); 405 struct _vop_vdev *vdev = to_vopvdev(dev);
395 struct vop_device *vpdev = vdev->vpdev; 406 struct vop_device *vpdev = vdev->vpdev;
396 struct mic_device_ctrl __iomem *dc = vdev->dc; 407 struct mic_device_ctrl __iomem *dc = vdev->dc;
397 int i, err, retry; 408 int i, err, retry, queue_idx = 0;
398 409
399 /* We must have this many virtqueues. */ 410 /* We must have this many virtqueues. */
400 if (nvqs > ioread8(&vdev->desc->num_vq)) 411 if (nvqs > ioread8(&vdev->desc->num_vq))
401 return -ENOENT; 412 return -ENOENT;
402 413
403 for (i = 0; i < nvqs; ++i) { 414 for (i = 0; i < nvqs; ++i) {
415 if (!names[i]) {
416 vqs[i] = NULL;
417 continue;
418 }
419
404 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n", 420 dev_dbg(_vop_dev(vdev), "%s: %d: %s\n",
405 __func__, i, names[i]); 421 __func__, i, names[i]);
406 vqs[i] = vop_find_vq(dev, i, callbacks[i], names[i], 422 vqs[i] = vop_find_vq(dev, queue_idx++, callbacks[i], names[i],
407 ctx ? ctx[i] : false); 423 ctx ? ctx[i] : false);
408 if (IS_ERR(vqs[i])) { 424 if (IS_ERR(vqs[i])) {
409 err = PTR_ERR(vqs[i]); 425 err = PTR_ERR(vqs[i]);
@@ -576,6 +592,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
576 int ret = -1; 592 int ret = -1;
577 593
578 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { 594 if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
595 struct device *dev = get_device(&vdev->vdev.dev);
596
579 dev_dbg(&vpdev->dev, 597 dev_dbg(&vpdev->dev,
580 "%s %d config_change %d type %d vdev %p\n", 598 "%s %d config_change %d type %d vdev %p\n",
581 __func__, __LINE__, 599 __func__, __LINE__,
@@ -587,7 +605,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
587 iowrite8(-1, &dc->h2c_vdev_db); 605 iowrite8(-1, &dc->h2c_vdev_db);
588 if (status & VIRTIO_CONFIG_S_DRIVER_OK) 606 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
589 wait_for_completion(&vdev->reset_done); 607 wait_for_completion(&vdev->reset_done);
590 put_device(&vdev->vdev.dev); 608 put_device(dev);
591 iowrite8(1, &dc->guest_ack); 609 iowrite8(1, &dc->guest_ack);
592 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", 610 dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
593 __func__, __LINE__, ioread8(&dc->guest_ack)); 611 __func__, __LINE__, ioread8(&dc->guest_ack));
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 595ac065b401..95ff7c5a1dfb 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -70,8 +70,12 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context)
70 struct resource r; 70 struct resource r;
71 71
72 if (acpi_dev_resource_io(res, &r)) { 72 if (acpi_dev_resource_io(res, &r)) {
73#ifdef CONFIG_HAS_IOPORT_MAP
73 base = ioport_map(r.start, resource_size(&r)); 74 base = ioport_map(r.start, resource_size(&r));
74 return AE_OK; 75 return AE_OK;
76#else
77 return AE_ERROR;
78#endif
75 } else if (acpi_dev_resource_memory(res, &r)) { 79 } else if (acpi_dev_resource_memory(res, &r)) {
76 base = ioremap(r.start, resource_size(&r)); 80 base = ioremap(r.start, resource_size(&r));
77 return AE_OK; 81 return AE_OK;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index aef1185f383d..14f3fdb8c6bb 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2112,7 +2112,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
2112 if (waiting) 2112 if (waiting)
2113 wake_up(&mq->wait); 2113 wake_up(&mq->wait);
2114 else 2114 else
2115 kblockd_schedule_work(&mq->complete_work); 2115 queue_work(mq->card->complete_wq, &mq->complete_work);
2116 2116
2117 return; 2117 return;
2118 } 2118 }
@@ -2924,6 +2924,13 @@ static int mmc_blk_probe(struct mmc_card *card)
2924 2924
2925 mmc_fixup_device(card, mmc_blk_fixups); 2925 mmc_fixup_device(card, mmc_blk_fixups);
2926 2926
2927 card->complete_wq = alloc_workqueue("mmc_complete",
2928 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2929 if (unlikely(!card->complete_wq)) {
2930 pr_err("Failed to create mmc completion workqueue");
2931 return -ENOMEM;
2932 }
2933
2927 md = mmc_blk_alloc(card); 2934 md = mmc_blk_alloc(card);
2928 if (IS_ERR(md)) 2935 if (IS_ERR(md))
2929 return PTR_ERR(md); 2936 return PTR_ERR(md);
@@ -2987,6 +2994,7 @@ static void mmc_blk_remove(struct mmc_card *card)
2987 pm_runtime_put_noidle(&card->dev); 2994 pm_runtime_put_noidle(&card->dev);
2988 mmc_blk_remove_req(md); 2995 mmc_blk_remove_req(md);
2989 dev_set_drvdata(&card->dev, NULL); 2996 dev_set_drvdata(&card->dev, NULL);
2997 destroy_workqueue(card->complete_wq);
2990} 2998}
2991 2999
2992static int _mmc_blk_suspend(struct mmc_card *card) 3000static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f57f5de54206..cf58ccaf22d5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -234,7 +234,7 @@ int mmc_of_parse(struct mmc_host *host)
234 if (device_property_read_bool(dev, "broken-cd")) 234 if (device_property_read_bool(dev, "broken-cd"))
235 host->caps |= MMC_CAP_NEEDS_POLL; 235 host->caps |= MMC_CAP_NEEDS_POLL;
236 236
237 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 237 ret = mmc_gpiod_request_cd(host, "cd", 0, false,
238 cd_debounce_delay_ms * 1000, 238 cd_debounce_delay_ms * 1000,
239 &cd_gpio_invert); 239 &cd_gpio_invert);
240 if (!ret) 240 if (!ret)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e26b8145efb3..a44ec8bb5418 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -116,7 +116,7 @@ config MMC_RICOH_MMC
116 116
117config MMC_SDHCI_ACPI 117config MMC_SDHCI_ACPI
118 tristate "SDHCI support for ACPI enumerated SDHCI controllers" 118 tristate "SDHCI support for ACPI enumerated SDHCI controllers"
119 depends on MMC_SDHCI && ACPI 119 depends on MMC_SDHCI && ACPI && PCI
120 select IOSF_MBI if X86 120 select IOSF_MBI if X86
121 help 121 help
122 This selects support for ACPI enumerated SDHCI controllers, 122 This selects support for ACPI enumerated SDHCI controllers,
@@ -978,7 +978,7 @@ config MMC_SDHCI_OMAP
978 tristate "TI SDHCI Controller Support" 978 tristate "TI SDHCI Controller Support"
979 depends on MMC_SDHCI_PLTFM && OF 979 depends on MMC_SDHCI_PLTFM && OF
980 select THERMAL 980 select THERMAL
981 select TI_SOC_THERMAL 981 imply TI_SOC_THERMAL
982 help 982 help
983 This selects the Secure Digital Host Controller Interface (SDHCI) 983 This selects the Secure Digital Host Controller Interface (SDHCI)
984 support present in TI's DRA7 SOCs. The controller supports 984 support present in TI's DRA7 SOCs. The controller supports
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 50293529d6de..c9e7aa50bb0a 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1431,6 +1431,8 @@ static int bcm2835_probe(struct platform_device *pdev)
1431 1431
1432err: 1432err:
1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret); 1433 dev_dbg(dev, "%s -> err %d\n", __func__, ret);
1434 if (host->dma_chan_rxtx)
1435 dma_release_channel(host->dma_chan_rxtx);
1434 mmc_free_host(mmc); 1436 mmc_free_host(mmc);
1435 1437
1436 return ret; 1438 return ret;
diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c
index ed8f2254b66a..aa38b1a8017e 100644
--- a/drivers/mmc/host/dw_mmc-bluefield.c
+++ b/drivers/mmc/host/dw_mmc-bluefield.c
@@ -1,11 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * Copyright (C) 2018 Mellanox Technologies. 3 * Copyright (C) 2018 Mellanox Technologies.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */ 4 */
10 5
11#include <linux/bitfield.h> 6#include <linux/bitfield.h>
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c2690c1a50ff..2eba507790e4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -179,6 +179,8 @@ struct meson_host {
179 struct sd_emmc_desc *descs; 179 struct sd_emmc_desc *descs;
180 dma_addr_t descs_dma_addr; 180 dma_addr_t descs_dma_addr;
181 181
182 int irq;
183
182 bool vqmmc_enabled; 184 bool vqmmc_enabled;
183}; 185};
184 186
@@ -738,6 +740,11 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
738static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 740static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
739{ 741{
740 struct meson_host *host = mmc_priv(mmc); 742 struct meson_host *host = mmc_priv(mmc);
743 int adj = 0;
744
745 /* enable signal resampling w/o delay */
746 adj = ADJUST_ADJ_EN;
747 writel(adj, host->regs + host->data->adjust);
741 748
742 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 749 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
743} 750}
@@ -768,6 +775,9 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
768 if (!IS_ERR(mmc->supply.vmmc)) 775 if (!IS_ERR(mmc->supply.vmmc))
769 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 776 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
770 777
778 /* disable signal resampling */
779 writel(0, host->regs + host->data->adjust);
780
771 /* Reset rx phase */ 781 /* Reset rx phase */
772 clk_set_phase(host->rx_clk, 0); 782 clk_set_phase(host->rx_clk, 0);
773 783
@@ -1166,7 +1176,7 @@ static int meson_mmc_get_cd(struct mmc_host *mmc)
1166 1176
1167static void meson_mmc_cfg_init(struct meson_host *host) 1177static void meson_mmc_cfg_init(struct meson_host *host)
1168{ 1178{
1169 u32 cfg = 0, adj = 0; 1179 u32 cfg = 0;
1170 1180
1171 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 1181 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK,
1172 ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 1182 ilog2(SD_EMMC_CFG_RESP_TIMEOUT));
@@ -1177,10 +1187,6 @@ static void meson_mmc_cfg_init(struct meson_host *host)
1177 cfg |= CFG_ERR_ABORT; 1187 cfg |= CFG_ERR_ABORT;
1178 1188
1179 writel(cfg, host->regs + SD_EMMC_CFG); 1189 writel(cfg, host->regs + SD_EMMC_CFG);
1180
1181 /* enable signal resampling w/o delay */
1182 adj = ADJUST_ADJ_EN;
1183 writel(adj, host->regs + host->data->adjust);
1184} 1190}
1185 1191
1186static int meson_mmc_card_busy(struct mmc_host *mmc) 1192static int meson_mmc_card_busy(struct mmc_host *mmc)
@@ -1231,7 +1237,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1231 struct resource *res; 1237 struct resource *res;
1232 struct meson_host *host; 1238 struct meson_host *host;
1233 struct mmc_host *mmc; 1239 struct mmc_host *mmc;
1234 int ret, irq; 1240 int ret;
1235 1241
1236 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 1242 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
1237 if (!mmc) 1243 if (!mmc)
@@ -1276,8 +1282,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1276 goto free_host; 1282 goto free_host;
1277 } 1283 }
1278 1284
1279 irq = platform_get_irq(pdev, 0); 1285 host->irq = platform_get_irq(pdev, 0);
1280 if (irq <= 0) { 1286 if (host->irq <= 0) {
1281 dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 1287 dev_err(&pdev->dev, "failed to get interrupt resource.\n");
1282 ret = -EINVAL; 1288 ret = -EINVAL;
1283 goto free_host; 1289 goto free_host;
@@ -1331,9 +1337,9 @@ static int meson_mmc_probe(struct platform_device *pdev)
1331 writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, 1337 writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN,
1332 host->regs + SD_EMMC_IRQ_EN); 1338 host->regs + SD_EMMC_IRQ_EN);
1333 1339
1334 ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, 1340 ret = request_threaded_irq(host->irq, meson_mmc_irq,
1335 meson_mmc_irq_thread, IRQF_SHARED, 1341 meson_mmc_irq_thread, IRQF_SHARED,
1336 NULL, host); 1342 dev_name(&pdev->dev), host);
1337 if (ret) 1343 if (ret)
1338 goto err_init_clk; 1344 goto err_init_clk;
1339 1345
@@ -1351,7 +1357,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
1351 if (host->bounce_buf == NULL) { 1357 if (host->bounce_buf == NULL) {
1352 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 1358 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
1353 ret = -ENOMEM; 1359 ret = -ENOMEM;
1354 goto err_init_clk; 1360 goto err_free_irq;
1355 } 1361 }
1356 1362
1357 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1363 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
@@ -1370,6 +1376,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
1370err_bounce_buf: 1376err_bounce_buf:
1371 dma_free_coherent(host->dev, host->bounce_buf_size, 1377 dma_free_coherent(host->dev, host->bounce_buf_size,
1372 host->bounce_buf, host->bounce_dma_addr); 1378 host->bounce_buf, host->bounce_dma_addr);
1379err_free_irq:
1380 free_irq(host->irq, host);
1373err_init_clk: 1381err_init_clk:
1374 clk_disable_unprepare(host->mmc_clk); 1382 clk_disable_unprepare(host->mmc_clk);
1375err_core_clk: 1383err_core_clk:
@@ -1387,6 +1395,7 @@ static int meson_mmc_remove(struct platform_device *pdev)
1387 1395
1388 /* disable interrupts */ 1396 /* disable interrupts */
1389 writel(0, host->regs + SD_EMMC_IRQ_EN); 1397 writel(0, host->regs + SD_EMMC_IRQ_EN);
1398 free_irq(host->irq, host);
1390 1399
1391 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1400 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN,
1392 host->descs, host->descs_dma_addr); 1401 host->descs, host->descs_dma_addr);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 8afeaf81ae66..833ef0590af8 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
846 846
847 if (timing == MMC_TIMING_MMC_HS400 && 847 if (timing == MMC_TIMING_MMC_HS400 &&
848 host->dev_comp->hs400_tune) 848 host->dev_comp->hs400_tune)
849 sdr_set_field(host->base + PAD_CMD_TUNE, 849 sdr_set_field(host->base + tune_reg,
850 MSDC_PAD_TUNE_CMDRRDLY, 850 MSDC_PAD_TUNE_CMDRRDLY,
851 host->hs400_cmd_int_delay); 851 host->hs400_cmd_int_delay);
852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, 852 dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock,
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 0db99057c44f..9d12c06c7fd6 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -296,7 +296,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
296 296
297 iproc_host->data = iproc_data; 297 iproc_host->data = iproc_data;
298 298
299 mmc_of_parse(host->mmc); 299 ret = mmc_of_parse(host->mmc);
300 if (ret)
301 goto err;
302
300 sdhci_get_property(pdev); 303 sdhci_get_property(pdev);
301 304
302 host->mmc->caps |= iproc_host->data->mmc_caps; 305 host->mmc->caps |= iproc_host->data->mmc_caps;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a22e11a65658..eba9bcc92ad3 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3763,8 +3763,9 @@ int sdhci_setup_host(struct sdhci_host *host)
3763 * Use zalloc to zero the reserved high 32-bits of 128-bit 3763 * Use zalloc to zero the reserved high 32-bits of 128-bit
3764 * descriptors so that they never need to be written. 3764 * descriptors so that they never need to be written.
3765 */ 3765 */
3766 buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz + 3766 buf = dma_alloc_coherent(mmc_dev(mmc),
3767 host->adma_table_sz, &dma, GFP_KERNEL); 3767 host->align_buffer_sz + host->adma_table_sz,
3768 &dma, GFP_KERNEL);
3768 if (!buf) { 3769 if (!buf) {
3769 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 3770 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3770 mmc_hostname(mmc)); 3771 mmc_hostname(mmc));
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 279e326e397e..70fadc976795 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
1399 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1399 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1400 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; 1400 MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
1401 1401
1402 if (host->cfg->clk_delays || host->use_new_timings) 1402 /*
1403 * Some H5 devices do not have signal traces precise enough to
1404 * use HS DDR mode for their eMMC chips.
1405 *
1406 * We still enable HS DDR modes for all the other controller
1407 * variants that support them.
1408 */
1409 if ((host->cfg->clk_delays || host->use_new_timings) &&
1410 !of_device_is_compatible(pdev->dev.of_node,
1411 "allwinner,sun50i-h5-emmc"))
1403 mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; 1412 mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR;
1404 1413
1405 ret = mmc_of_parse(mmc); 1414 ret = mmc_of_parse(mmc);
1406 if (ret) 1415 if (ret)
1407 goto error_free_dma; 1416 goto error_free_dma;
1408 1417
1418 /*
1419 * If we don't support delay chains in the SoC, we can't use any
1420 * of the higher speed modes. Mask them out in case the device
1421 * tree specifies the properties for them, which gets added to
1422 * the caps by mmc_of_parse() above.
1423 */
1424 if (!(host->cfg->clk_delays || host->use_new_timings)) {
1425 mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
1426 MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
1427 mmc->caps2 &= ~MMC_CAP2_HS200;
1428 }
1429
1430 /* TODO: This driver doesn't support HS400 mode yet */
1431 mmc->caps2 &= ~MMC_CAP2_HS400;
1432
1409 ret = sunxi_mmc_init_host(host); 1433 ret = sunxi_mmc_init_host(host);
1410 if (ret) 1434 if (ret)
1411 goto error_free_dma; 1435 goto error_free_dma;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 21e3cdc04036..999b705769a8 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -522,7 +522,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd)
522 mtd->nvmem = nvmem_register(&config); 522 mtd->nvmem = nvmem_register(&config);
523 if (IS_ERR(mtd->nvmem)) { 523 if (IS_ERR(mtd->nvmem)) {
524 /* Just ignore if there is no NVMEM support in the kernel */ 524 /* Just ignore if there is no NVMEM support in the kernel */
525 if (PTR_ERR(mtd->nvmem) == -ENOSYS) { 525 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
526 mtd->nvmem = NULL; 526 mtd->nvmem = NULL;
527 } else { 527 } else {
528 dev_err(&mtd->dev, "Failed to register NVMEM device\n"); 528 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 9887bda317cd..b31c868019ad 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -7,7 +7,7 @@
7extern struct mutex mtd_table_mutex; 7extern struct mutex mtd_table_mutex;
8 8
9struct mtd_info *__mtd_next_device(int i); 9struct mtd_info *__mtd_next_device(int i);
10int add_mtd_device(struct mtd_info *mtd); 10int __must_check add_mtd_device(struct mtd_info *mtd);
11int del_mtd_device(struct mtd_info *mtd); 11int del_mtd_device(struct mtd_info *mtd);
12int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); 12int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
13int del_mtd_partitions(struct mtd_info *); 13int del_mtd_partitions(struct mtd_info *);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index b6af41b04622..37f174ccbcec 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
480 /* let's register it anyway to preserve ordering */ 480 /* let's register it anyway to preserve ordering */
481 slave->offset = 0; 481 slave->offset = 0;
482 slave->mtd.size = 0; 482 slave->mtd.size = 0;
483
484 /* Initialize ->erasesize to make add_mtd_device() happy. */
485 slave->mtd.erasesize = parent->erasesize;
486
483 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", 487 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
484 part->name); 488 part->name);
485 goto out_register; 489 goto out_register;
@@ -618,10 +622,21 @@ int mtd_add_partition(struct mtd_info *parent, const char *name,
618 list_add(&new->list, &mtd_partitions); 622 list_add(&new->list, &mtd_partitions);
619 mutex_unlock(&mtd_partitions_mutex); 623 mutex_unlock(&mtd_partitions_mutex);
620 624
621 add_mtd_device(&new->mtd); 625 ret = add_mtd_device(&new->mtd);
626 if (ret)
627 goto err_remove_part;
622 628
623 mtd_add_partition_attrs(new); 629 mtd_add_partition_attrs(new);
624 630
631 return 0;
632
633err_remove_part:
634 mutex_lock(&mtd_partitions_mutex);
635 list_del(&new->list);
636 mutex_unlock(&mtd_partitions_mutex);
637
638 free_partition(new);
639
625 return ret; 640 return ret;
626} 641}
627EXPORT_SYMBOL_GPL(mtd_add_partition); 642EXPORT_SYMBOL_GPL(mtd_add_partition);
@@ -712,22 +727,31 @@ int add_mtd_partitions(struct mtd_info *master,
712{ 727{
713 struct mtd_part *slave; 728 struct mtd_part *slave;
714 uint64_t cur_offset = 0; 729 uint64_t cur_offset = 0;
715 int i; 730 int i, ret;
716 731
717 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); 732 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
718 733
719 for (i = 0; i < nbparts; i++) { 734 for (i = 0; i < nbparts; i++) {
720 slave = allocate_partition(master, parts + i, i, cur_offset); 735 slave = allocate_partition(master, parts + i, i, cur_offset);
721 if (IS_ERR(slave)) { 736 if (IS_ERR(slave)) {
722 del_mtd_partitions(master); 737 ret = PTR_ERR(slave);
723 return PTR_ERR(slave); 738 goto err_del_partitions;
724 } 739 }
725 740
726 mutex_lock(&mtd_partitions_mutex); 741 mutex_lock(&mtd_partitions_mutex);
727 list_add(&slave->list, &mtd_partitions); 742 list_add(&slave->list, &mtd_partitions);
728 mutex_unlock(&mtd_partitions_mutex); 743 mutex_unlock(&mtd_partitions_mutex);
729 744
730 add_mtd_device(&slave->mtd); 745 ret = add_mtd_device(&slave->mtd);
746 if (ret) {
747 mutex_lock(&mtd_partitions_mutex);
748 list_del(&slave->list);
749 mutex_unlock(&mtd_partitions_mutex);
750
751 free_partition(slave);
752 goto err_del_partitions;
753 }
754
731 mtd_add_partition_attrs(slave); 755 mtd_add_partition_attrs(slave);
732 /* Look for subpartitions */ 756 /* Look for subpartitions */
733 parse_mtd_partitions(&slave->mtd, parts[i].types, NULL); 757 parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
@@ -736,6 +760,11 @@ int add_mtd_partitions(struct mtd_info *master,
736 } 760 }
737 761
738 return 0; 762 return 0;
763
764err_del_partitions:
765 del_mtd_partitions(master);
766
767 return ret;
739} 768}
740 769
741static DEFINE_SPINLOCK(part_parser_lock); 770static DEFINE_SPINLOCK(part_parser_lock);
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index eebac35304c6..6e8edc9375dd 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -1322,7 +1322,7 @@ int denali_init(struct denali_nand_info *denali)
1322 } 1322 }
1323 1323
1324 /* clk rate info is needed for setup_data_interface */ 1324 /* clk rate info is needed for setup_data_interface */
1325 if (denali->clk_rate && denali->clk_x_rate) 1325 if (!denali->clk_rate || !denali->clk_x_rate)
1326 chip->options |= NAND_KEEP_TIMINGS; 1326 chip->options |= NAND_KEEP_TIMINGS;
1327 1327
1328 chip->legacy.dummy_controller.ops = &denali_controller_ops; 1328 chip->legacy.dummy_controller.ops = &denali_controller_ops;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 325b4414dccc..c9149a37f8f0 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -593,23 +593,6 @@ static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
593 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); 593 dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
594} 594}
595 595
596/* fsmc_select_chip - assert or deassert nCE */
597static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert)
598{
599 u32 pc = readl(host->regs_va + FSMC_PC);
600
601 if (!assert)
602 writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
603 else
604 writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
605
606 /*
607 * nCE line changes must be applied before returning from this
608 * function.
609 */
610 mb();
611}
612
613/* 596/*
614 * fsmc_exec_op - hook called by the core to execute NAND operations 597 * fsmc_exec_op - hook called by the core to execute NAND operations
615 * 598 *
@@ -627,8 +610,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
627 610
628 pr_debug("Executing operation [%d instructions]:\n", op->ninstrs); 611 pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
629 612
630 fsmc_ce_ctrl(host, true);
631
632 for (op_id = 0; op_id < op->ninstrs; op_id++) { 613 for (op_id = 0; op_id < op->ninstrs; op_id++) {
633 instr = &op->instrs[op_id]; 614 instr = &op->instrs[op_id];
634 615
@@ -686,8 +667,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
686 } 667 }
687 } 668 }
688 669
689 fsmc_ce_ctrl(host, false);
690
691 return ret; 670 return ret;
692} 671}
693 672
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index bd4cfac6b5aa..a4768df5083f 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
155 155
156 /* 156 /*
157 * Reset BCH here, too. We got failures otherwise :( 157 * Reset BCH here, too. We got failures otherwise :(
158 * See later BCH reset for explanation of MX23 handling 158 * See later BCH reset for explanation of MX23 and MX28 handling
159 */ 159 */
160 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); 160 ret = gpmi_reset_block(r->bch_regs,
161 GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
161 if (ret) 162 if (ret)
162 goto err_out; 163 goto err_out;
163 164
@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
263 /* 264 /*
264 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this 265 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
265 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. 266 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
266 * On the other hand, the MX28 needs the reset, because one case has been 267 * and MX28.
267 * seen where the BCH produced ECC errors constantly after 10000
268 * consecutive reboots. The latter case has not been seen on the MX23
269 * yet, still we don't know if it could happen there as well.
270 */ 268 */
271 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); 269 ret = gpmi_reset_block(r->bch_regs,
270 GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
272 if (ret) 271 if (ret)
273 goto err_out; 272 goto err_out;
274 273
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index f92ae5aa2a54..9526d5b23c80 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -260,7 +260,7 @@ static int jz_nand_correct_ecc_rs(struct nand_chip *chip, uint8_t *dat,
260} 260}
261 261
262static int jz_nand_ioremap_resource(struct platform_device *pdev, 262static int jz_nand_ioremap_resource(struct platform_device *pdev,
263 const char *name, struct resource **res, void *__iomem *base) 263 const char *name, struct resource **res, void __iomem **base)
264{ 264{
265 int ret; 265 int ret;
266 266
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index cca4b24d2ffa..839494ac457c 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -410,6 +410,7 @@ static int nand_check_wp(struct nand_chip *chip)
410 410
411/** 411/**
412 * nand_fill_oob - [INTERN] Transfer client buffer to oob 412 * nand_fill_oob - [INTERN] Transfer client buffer to oob
413 * @chip: NAND chip object
413 * @oob: oob data buffer 414 * @oob: oob data buffer
414 * @len: oob data write length 415 * @len: oob data write length
415 * @ops: oob ops structure 416 * @ops: oob ops structure
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 1b722fe9213c..19a2b563acdf 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
158 158
159/** 159/**
160 * read_bbt - [GENERIC] Read the bad block table starting from page 160 * read_bbt - [GENERIC] Read the bad block table starting from page
161 * @chip: NAND chip object 161 * @this: NAND chip object
162 * @buf: temporary buffer 162 * @buf: temporary buffer
163 * @page: the starting page 163 * @page: the starting page
164 * @num: the number of bbt descriptors to read 164 * @num: the number of bbt descriptors to read
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 46c62a31fa46..920e7375084f 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2833,6 +2833,16 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
2833 if (ret) 2833 if (ret)
2834 return ret; 2834 return ret;
2835 2835
2836 if (nandc->props->is_bam) {
2837 free_bam_transaction(nandc);
2838 nandc->bam_txn = alloc_bam_transaction(nandc);
2839 if (!nandc->bam_txn) {
2840 dev_err(nandc->dev,
2841 "failed to allocate bam transaction\n");
2842 return -ENOMEM;
2843 }
2844 }
2845
2836 ret = mtd_device_register(mtd, NULL, 0); 2846 ret = mtd_device_register(mtd, NULL, 0);
2837 if (ret) 2847 if (ret)
2838 nand_cleanup(chip); 2848 nand_cleanup(chip);
@@ -2847,16 +2857,6 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
2847 struct qcom_nand_host *host; 2857 struct qcom_nand_host *host;
2848 int ret; 2858 int ret;
2849 2859
2850 if (nandc->props->is_bam) {
2851 free_bam_transaction(nandc);
2852 nandc->bam_txn = alloc_bam_transaction(nandc);
2853 if (!nandc->bam_txn) {
2854 dev_err(nandc->dev,
2855 "failed to allocate bam transaction\n");
2856 return -ENOMEM;
2857 }
2858 }
2859
2860 for_each_available_child_of_node(dn, child) { 2860 for_each_available_child_of_node(dn, child) {
2861 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); 2861 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2862 if (!host) { 2862 if (!host) {
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 479c2f2cf17f..fa87ae28cdfe 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
304 struct nand_device *nand = spinand_to_nand(spinand); 304 struct nand_device *nand = spinand_to_nand(spinand);
305 struct mtd_info *mtd = nanddev_to_mtd(nand); 305 struct mtd_info *mtd = nanddev_to_mtd(nand);
306 struct nand_page_io_req adjreq = *req; 306 struct nand_page_io_req adjreq = *req;
307 unsigned int nbytes = 0; 307 void *buf = spinand->databuf;
308 void *buf = NULL; 308 unsigned int nbytes;
309 u16 column = 0; 309 u16 column = 0;
310 int ret; 310 int ret;
311 311
312 memset(spinand->databuf, 0xff, 312 /*
313 nanddev_page_size(nand) + 313 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
314 nanddev_per_page_oobsize(nand)); 314 * the cache content to 0xFF (depends on vendor implementation), so we
315 * must fill the page cache entirely even if we only want to program
316 * the data portion of the page, otherwise we might corrupt the BBM or
317 * user data previously programmed in OOB area.
318 */
319 nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
320 memset(spinand->databuf, 0xff, nbytes);
321 adjreq.dataoffs = 0;
322 adjreq.datalen = nanddev_page_size(nand);
323 adjreq.databuf.out = spinand->databuf;
324 adjreq.ooblen = nanddev_per_page_oobsize(nand);
325 adjreq.ooboffs = 0;
326 adjreq.oobbuf.out = spinand->oobbuf;
315 327
316 if (req->datalen) { 328 if (req->datalen)
317 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 329 memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
318 req->datalen); 330 req->datalen);
319 adjreq.dataoffs = 0;
320 adjreq.datalen = nanddev_page_size(nand);
321 adjreq.databuf.out = spinand->databuf;
322 nbytes = adjreq.datalen;
323 buf = spinand->databuf;
324 }
325 331
326 if (req->ooblen) { 332 if (req->ooblen) {
327 if (req->mode == MTD_OPS_AUTO_OOB) 333 if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
332 else 338 else
333 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 339 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
334 req->ooblen); 340 req->ooblen);
335
336 adjreq.ooblen = nanddev_per_page_oobsize(nand);
337 adjreq.ooboffs = 0;
338 nbytes += nanddev_per_page_oobsize(nand);
339 if (!buf) {
340 buf = spinand->oobbuf;
341 column = nanddev_page_size(nand);
342 }
343 } 341 }
344 342
345 spinand_cache_op_adjust_colum(spinand, &adjreq, &column); 343 spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
370 368
371 /* 369 /*
372 * We need to use the RANDOM LOAD CACHE operation if there's 370 * We need to use the RANDOM LOAD CACHE operation if there's
373 * more than one iteration, because the LOAD operation resets 371 * more than one iteration, because the LOAD operation might
374 * the cache to 0xff. 372 * reset the cache to 0xff.
375 */ 373 */
376 if (nbytes) { 374 if (nbytes) {
377 column = op.addr.val; 375 column = op.addr.val;
@@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand)
1018 for (i = 0; i < nand->memorg.ntargets; i++) { 1016 for (i = 0; i < nand->memorg.ntargets; i++) {
1019 ret = spinand_select_target(spinand, i); 1017 ret = spinand_select_target(spinand, i);
1020 if (ret) 1018 if (ret)
1021 goto err_free_bufs; 1019 goto err_manuf_cleanup;
1022 1020
1023 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1021 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1024 if (ret) 1022 if (ret)
1025 goto err_free_bufs; 1023 goto err_manuf_cleanup;
1026 } 1024 }
1027 1025
1028 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1026 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6371958dd170..21bf8ac78380 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -197,9 +197,9 @@ config VXLAN
197 197
198config GENEVE 198config GENEVE
199 tristate "Generic Network Virtualization Encapsulation" 199 tristate "Generic Network Virtualization Encapsulation"
200 depends on INET && NET_UDP_TUNNEL 200 depends on INET
201 depends on IPV6 || !IPV6 201 depends on IPV6 || !IPV6
202 select NET_IP_TUNNEL 202 select NET_UDP_TUNNEL
203 select GRO_CELLS 203 select GRO_CELLS
204 ---help--- 204 ---help---
205 This allows one to create geneve virtual interfaces that provide 205 This allows one to create geneve virtual interfaces that provide
@@ -519,7 +519,7 @@ config NET_FAILOVER
519 and destroy a failover master netdev and manages a primary and 519 and destroy a failover master netdev and manages a primary and
520 standby slave netdevs that get registered via the generic failover 520 standby slave netdevs that get registered via the generic failover
521 infrastructure. This can be used by paravirtual drivers to enable 521 infrastructure. This can be used by paravirtual drivers to enable
522 an alternate low latency datapath. It alsoenables live migration of 522 an alternate low latency datapath. It also enables live migration of
523 a VM with direct attached VF by failing over to the paravirtual 523 a VM with direct attached VF by failing over to the paravirtual
524 datapath when the VF is unplugged. 524 datapath when the VF is unplugged.
525 525
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a9d597f28023..485462d3087f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1963,6 +1963,9 @@ static int __bond_release_one(struct net_device *bond_dev,
1963 if (!bond_has_slaves(bond)) { 1963 if (!bond_has_slaves(bond)) {
1964 bond_set_carrier(bond); 1964 bond_set_carrier(bond);
1965 eth_hw_addr_random(bond_dev); 1965 eth_hw_addr_random(bond_dev);
1966 bond->nest_level = SINGLE_DEPTH_NESTING;
1967 } else {
1968 bond->nest_level = dev_get_nest_level(bond_dev) + 1;
1966 } 1969 }
1967 1970
1968 unblock_netpoll_tx(); 1971 unblock_netpoll_tx();
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index a0f954f36c09..44e6c7b1b222 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -257,10 +257,7 @@ static int handle_tx(struct ser_device *ser)
257 if (skb->len == 0) { 257 if (skb->len == 0) {
258 struct sk_buff *tmp = skb_dequeue(&ser->head); 258 struct sk_buff *tmp = skb_dequeue(&ser->head);
259 WARN_ON(tmp != skb); 259 WARN_ON(tmp != skb);
260 if (in_interrupt()) 260 dev_consume_skb_any(skb);
261 dev_kfree_skb_irq(skb);
262 else
263 kfree_skb(skb);
264 } 261 }
265 } 262 }
266 /* Send flow off if queue is empty */ 263 /* Send flow off if queue is empty */
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3b3f88ffab53..c05e4d50d43d 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) 480struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
481{ 481{
482 struct can_priv *priv = netdev_priv(dev); 482 struct can_priv *priv = netdev_priv(dev);
483 struct sk_buff *skb = priv->echo_skb[idx];
484 struct canfd_frame *cf;
485 483
486 if (idx >= priv->echo_skb_max) { 484 if (idx >= priv->echo_skb_max) {
487 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", 485 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
@@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
489 return NULL; 487 return NULL;
490 } 488 }
491 489
492 if (!skb) { 490 if (priv->echo_skb[idx]) {
493 netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", 491 /* Using "struct canfd_frame::len" for the frame
494 __func__, idx); 492 * length is supported on both CAN and CANFD frames.
495 return NULL; 493 */
496 } 494 struct sk_buff *skb = priv->echo_skb[idx];
495 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
496 u8 len = cf->len;
497 497
498 /* Using "struct canfd_frame::len" for the frame 498 *len_ptr = len;
499 * length is supported on both CAN and CANFD frames. 499 priv->echo_skb[idx] = NULL;
500 */
501 cf = (struct canfd_frame *)skb->data;
502 *len_ptr = cf->len;
503 priv->echo_skb[idx] = NULL;
504 500
505 return skb; 501 return skb;
502 }
503
504 return NULL;
506} 505}
507 506
508/* 507/*
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 0f36eafe3ac1..1c66fb2ad76b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1106,7 +1106,7 @@ static int flexcan_chip_start(struct net_device *dev)
1106 } 1106 }
1107 } else { 1107 } else {
1108 /* clear and invalidate unused mailboxes first */ 1108 /* clear and invalidate unused mailboxes first */
1109 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) { 1109 for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
1110 mb = flexcan_get_mb(priv, i); 1110 mb = flexcan_get_mb(priv, i);
1111 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, 1111 priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
1112 &mb->can_ctrl); 1112 &mb->can_ctrl);
@@ -1432,7 +1432,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
1432 gpr_np = of_find_node_by_phandle(phandle); 1432 gpr_np = of_find_node_by_phandle(phandle);
1433 if (!gpr_np) { 1433 if (!gpr_np) {
1434 dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); 1434 dev_dbg(&pdev->dev, "could not find gpr node by phandle\n");
1435 return PTR_ERR(gpr_np); 1435 return -ENODEV;
1436 } 1436 }
1437 1437
1438 priv = netdev_priv(dev); 1438 priv = netdev_priv(dev);
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 90f514252987..d9c56a779c08 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
511 /* Clear all pending interrupts */ 511 /* Clear all pending interrupts */
512 writel(0xffffffff, priv->regs + B53_SRAB_INTR); 512 writel(0xffffffff, priv->regs + B53_SRAB_INTR);
513 513
514 if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
515 return;
516
517 for (i = 0; i < B53_N_PORTS; i++) { 514 for (i = 0; i < B53_N_PORTS; i++) {
518 port = &priv->port_intrs[i]; 515 port = &priv->port_intrs[i];
519 516
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 361fbde76654..17ec32b0a1cc 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
690 * port, the other ones have already been disabled during 690 * port, the other ones have already been disabled during
691 * bcm_sf2_sw_setup 691 * bcm_sf2_sw_setup
692 */ 692 */
693 for (port = 0; port < DSA_MAX_PORTS; port++) { 693 for (port = 0; port < ds->num_ports; port++) {
694 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) 694 if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port))
695 bcm_sf2_port_disable(ds, port, NULL); 695 bcm_sf2_port_disable(ds, port, NULL);
696 } 696 }
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 3b12e2dcff31..8a5111f9414c 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -7,7 +7,6 @@
7 7
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/gpio.h>
11#include <linux/gpio/consumer.h> 10#include <linux/gpio/consumer.h>
12#include <linux/kernel.h> 11#include <linux/kernel.h>
13#include <linux/module.h> 12#include <linux/module.h>
@@ -15,7 +14,6 @@
15#include <linux/phy.h> 14#include <linux/phy.h>
16#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
17#include <linux/if_bridge.h> 16#include <linux/if_bridge.h>
18#include <linux/of_gpio.h>
19#include <linux/of_net.h> 17#include <linux/of_net.h>
20#include <net/dsa.h> 18#include <net/dsa.h>
21#include <net/switchdev.h> 19#include <net/switchdev.h>
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 74547f43b938..a8a2c728afba 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -18,7 +18,6 @@
18#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/of_gpio.h>
22#include <linux/of_mdio.h> 21#include <linux/of_mdio.h>
23#include <linux/of_net.h> 22#include <linux/of_net.h>
24#include <linux/of_platform.h> 23#include <linux/of_platform.h>
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8a517d8fb9d1..12fd7ce3f1ff 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
261 unsigned int sub_irq; 261 unsigned int sub_irq;
262 unsigned int n; 262 unsigned int n;
263 u16 reg; 263 u16 reg;
264 u16 ctl1;
264 int err; 265 int err;
265 266
266 mutex_lock(&chip->reg_lock); 267 mutex_lock(&chip->reg_lock);
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
270 if (err) 271 if (err)
271 goto out; 272 goto out;
272 273
273 for (n = 0; n < chip->g1_irq.nirqs; ++n) { 274 do {
274 if (reg & (1 << n)) { 275 for (n = 0; n < chip->g1_irq.nirqs; ++n) {
275 sub_irq = irq_find_mapping(chip->g1_irq.domain, n); 276 if (reg & (1 << n)) {
276 handle_nested_irq(sub_irq); 277 sub_irq = irq_find_mapping(chip->g1_irq.domain,
277 ++nhandled; 278 n);
279 handle_nested_irq(sub_irq);
280 ++nhandled;
281 }
278 } 282 }
279 } 283
284 mutex_lock(&chip->reg_lock);
285 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
286 if (err)
287 goto unlock;
288 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
289unlock:
290 mutex_unlock(&chip->reg_lock);
291 if (err)
292 goto out;
293 ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
294 } while (reg & ctl1);
295
280out: 296out:
281 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); 297 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
282} 298}
@@ -2403,6 +2419,107 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
2403 return mv88e6xxx_g1_stats_clear(chip); 2419 return mv88e6xxx_g1_stats_clear(chip);
2404} 2420}
2405 2421
2422/* The mv88e6390 has some hidden registers used for debug and
2423 * development. The errata also makes use of them.
2424 */
2425static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
2426 int reg, u16 val)
2427{
2428 u16 ctrl;
2429 int err;
2430
2431 err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
2432 PORT_RESERVED_1A, val);
2433 if (err)
2434 return err;
2435
2436 ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
2437 PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
2438 reg;
2439
2440 return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
2441 PORT_RESERVED_1A, ctrl);
2442}
2443
2444static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
2445{
2446 return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
2447 PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
2448}
2449
2450
2451static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
2452 int reg, u16 *val)
2453{
2454 u16 ctrl;
2455 int err;
2456
2457 ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
2458 PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
2459 reg;
2460
2461 err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
2462 PORT_RESERVED_1A, ctrl);
2463 if (err)
2464 return err;
2465
2466 err = mv88e6390_hidden_wait(chip);
2467 if (err)
2468 return err;
2469
2470 return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
2471 PORT_RESERVED_1A, val);
2472}
2473
2474/* Check if the errata has already been applied. */
2475static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
2476{
2477 int port;
2478 int err;
2479 u16 val;
2480
2481 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2482 err = mv88e6390_hidden_read(chip, port, 0, &val);
2483 if (err) {
2484 dev_err(chip->dev,
2485 "Error reading hidden register: %d\n", err);
2486 return false;
2487 }
2488 if (val != 0x01c0)
2489 return false;
2490 }
2491
2492 return true;
2493}
2494
2495/* The 6390 copper ports have an errata which require poking magic
2496 * values into undocumented hidden registers and then performing a
2497 * software reset.
2498 */
2499static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
2500{
2501 int port;
2502 int err;
2503
2504 if (mv88e6390_setup_errata_applied(chip))
2505 return 0;
2506
2507 /* Set the ports into blocking mode */
2508 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2509 err = mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED);
2510 if (err)
2511 return err;
2512 }
2513
2514 for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
2515 err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
2516 if (err)
2517 return err;
2518 }
2519
2520 return mv88e6xxx_software_reset(chip);
2521}
2522
2406static int mv88e6xxx_setup(struct dsa_switch *ds) 2523static int mv88e6xxx_setup(struct dsa_switch *ds)
2407{ 2524{
2408 struct mv88e6xxx_chip *chip = ds->priv; 2525 struct mv88e6xxx_chip *chip = ds->priv;
@@ -2415,6 +2532,12 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
2415 2532
2416 mutex_lock(&chip->reg_lock); 2533 mutex_lock(&chip->reg_lock);
2417 2534
2535 if (chip->info->ops->setup_errata) {
2536 err = chip->info->ops->setup_errata(chip);
2537 if (err)
2538 goto unlock;
2539 }
2540
2418 /* Cache the cmode of each port. */ 2541 /* Cache the cmode of each port. */
2419 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) { 2542 for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
2420 if (chip->info->ops->port_get_cmode) { 2543 if (chip->info->ops->port_get_cmode) {
@@ -3226,6 +3349,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
3226 3349
3227static const struct mv88e6xxx_ops mv88e6190_ops = { 3350static const struct mv88e6xxx_ops mv88e6190_ops = {
3228 /* MV88E6XXX_FAMILY_6390 */ 3351 /* MV88E6XXX_FAMILY_6390 */
3352 .setup_errata = mv88e6390_setup_errata,
3229 .irl_init_all = mv88e6390_g2_irl_init_all, 3353 .irl_init_all = mv88e6390_g2_irl_init_all,
3230 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3354 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3231 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3355 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3269,6 +3393,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
3269 3393
3270static const struct mv88e6xxx_ops mv88e6190x_ops = { 3394static const struct mv88e6xxx_ops mv88e6190x_ops = {
3271 /* MV88E6XXX_FAMILY_6390 */ 3395 /* MV88E6XXX_FAMILY_6390 */
3396 .setup_errata = mv88e6390_setup_errata,
3272 .irl_init_all = mv88e6390_g2_irl_init_all, 3397 .irl_init_all = mv88e6390_g2_irl_init_all,
3273 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3398 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3274 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3399 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3312,6 +3437,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
3312 3437
3313static const struct mv88e6xxx_ops mv88e6191_ops = { 3438static const struct mv88e6xxx_ops mv88e6191_ops = {
3314 /* MV88E6XXX_FAMILY_6390 */ 3439 /* MV88E6XXX_FAMILY_6390 */
3440 .setup_errata = mv88e6390_setup_errata,
3315 .irl_init_all = mv88e6390_g2_irl_init_all, 3441 .irl_init_all = mv88e6390_g2_irl_init_all,
3316 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3442 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3317 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3443 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3404,6 +3530,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
3404 3530
3405static const struct mv88e6xxx_ops mv88e6290_ops = { 3531static const struct mv88e6xxx_ops mv88e6290_ops = {
3406 /* MV88E6XXX_FAMILY_6390 */ 3532 /* MV88E6XXX_FAMILY_6390 */
3533 .setup_errata = mv88e6390_setup_errata,
3407 .irl_init_all = mv88e6390_g2_irl_init_all, 3534 .irl_init_all = mv88e6390_g2_irl_init_all,
3408 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3535 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3409 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3536 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3709,6 +3836,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
3709 3836
3710static const struct mv88e6xxx_ops mv88e6390_ops = { 3837static const struct mv88e6xxx_ops mv88e6390_ops = {
3711 /* MV88E6XXX_FAMILY_6390 */ 3838 /* MV88E6XXX_FAMILY_6390 */
3839 .setup_errata = mv88e6390_setup_errata,
3712 .irl_init_all = mv88e6390_g2_irl_init_all, 3840 .irl_init_all = mv88e6390_g2_irl_init_all,
3713 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3841 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3714 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3842 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
@@ -3756,6 +3884,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
3756 3884
3757static const struct mv88e6xxx_ops mv88e6390x_ops = { 3885static const struct mv88e6xxx_ops mv88e6390x_ops = {
3758 /* MV88E6XXX_FAMILY_6390 */ 3886 /* MV88E6XXX_FAMILY_6390 */
3887 .setup_errata = mv88e6390_setup_errata,
3759 .irl_init_all = mv88e6390_g2_irl_init_all, 3888 .irl_init_all = mv88e6390_g2_irl_init_all,
3760 .get_eeprom = mv88e6xxx_g2_get_eeprom8, 3889 .get_eeprom = mv88e6xxx_g2_get_eeprom8,
3761 .set_eeprom = mv88e6xxx_g2_set_eeprom8, 3890 .set_eeprom = mv88e6xxx_g2_set_eeprom8,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index f9ecb7872d32..546651d8c3e1 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -300,6 +300,11 @@ struct mv88e6xxx_mdio_bus {
300}; 300};
301 301
302struct mv88e6xxx_ops { 302struct mv88e6xxx_ops {
303 /* Switch Setup Errata, called early in the switch setup to
304 * allow any errata actions to be performed
305 */
306 int (*setup_errata)(struct mv88e6xxx_chip *chip);
307
303 int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); 308 int (*ieee_pri_map)(struct mv88e6xxx_chip *chip);
304 int (*ip_pri_map)(struct mv88e6xxx_chip *chip); 309 int (*ip_pri_map)(struct mv88e6xxx_chip *chip);
305 310
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 5200e4bdce93..ea243840ee0f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
314{ 314{
315 struct mv88e6xxx_chip *chip = dev_id; 315 struct mv88e6xxx_chip *chip = dev_id;
316 struct mv88e6xxx_atu_entry entry; 316 struct mv88e6xxx_atu_entry entry;
317 int spid;
317 int err; 318 int err;
318 u16 val; 319 u16 val;
319 320
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
336 if (err) 337 if (err)
337 goto out; 338 goto out;
338 339
340 spid = entry.state;
341
339 if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { 342 if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
340 dev_err_ratelimited(chip->dev, 343 dev_err_ratelimited(chip->dev,
341 "ATU age out violation for %pM\n", 344 "ATU age out violation for %pM\n",
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
344 347
345 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { 348 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
346 dev_err_ratelimited(chip->dev, 349 dev_err_ratelimited(chip->dev,
347 "ATU member violation for %pM portvec %x\n", 350 "ATU member violation for %pM portvec %x spid %d\n",
348 entry.mac, entry.portvec); 351 entry.mac, entry.portvec, spid);
349 chip->ports[entry.portvec].atu_member_violation++; 352 chip->ports[spid].atu_member_violation++;
350 } 353 }
351 354
352 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { 355 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
353 dev_err_ratelimited(chip->dev, 356 dev_err_ratelimited(chip->dev,
354 "ATU miss violation for %pM portvec %x\n", 357 "ATU miss violation for %pM portvec %x spid %d\n",
355 entry.mac, entry.portvec); 358 entry.mac, entry.portvec, spid);
356 chip->ports[entry.portvec].atu_miss_violation++; 359 chip->ports[spid].atu_miss_violation++;
357 } 360 }
358 361
359 if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { 362 if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
360 dev_err_ratelimited(chip->dev, 363 dev_err_ratelimited(chip->dev,
361 "ATU full violation for %pM portvec %x\n", 364 "ATU full violation for %pM portvec %x spid %d\n",
362 entry.mac, entry.portvec); 365 entry.mac, entry.portvec, spid);
363 chip->ports[entry.portvec].atu_full_violation++; 366 chip->ports[spid].atu_full_violation++;
364 } 367 }
365 mutex_unlock(&chip->reg_lock); 368 mutex_unlock(&chip->reg_lock);
366 369
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 0d81866d0e4a..e583641de758 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -251,6 +251,16 @@
251/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */ 251/* Offset 0x19: Port IEEE Priority Remapping Registers (4-7) */
252#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19 252#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
253 253
254/* Offset 0x1a: Magic undocumented errata register */
255#define PORT_RESERVED_1A 0x1a
256#define PORT_RESERVED_1A_BUSY BIT(15)
257#define PORT_RESERVED_1A_WRITE BIT(14)
258#define PORT_RESERVED_1A_READ 0
259#define PORT_RESERVED_1A_PORT_SHIFT 5
260#define PORT_RESERVED_1A_BLOCK (0xf << 10)
261#define PORT_RESERVED_1A_CTRL_PORT 4
262#define PORT_RESERVED_1A_DATA_PORT 5
263
254int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, 264int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
255 u16 *val); 265 u16 *val);
256int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, 266int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2caa8c8b4b55..1bfc5ff8d81d 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -664,7 +664,7 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
664 if (port < 9) 664 if (port < 9)
665 return 0; 665 return 0;
666 666
667 return mv88e6390_serdes_irq_setup(chip, port); 667 return mv88e6390x_serdes_irq_setup(chip, port);
668} 668}
669 669
670void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) 670void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
diff --git a/drivers/net/dsa/realtek-smi.c b/drivers/net/dsa/realtek-smi.c
index b4b839a1d095..ad41ec63cc9f 100644
--- a/drivers/net/dsa/realtek-smi.c
+++ b/drivers/net/dsa/realtek-smi.c
@@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
347 struct device_node *mdio_np; 347 struct device_node *mdio_np;
348 int ret; 348 int ret;
349 349
350 mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, 350 mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
351 "realtek,smi-mdio");
352 if (!mdio_np) { 351 if (!mdio_np) {
353 dev_err(smi->dev, "no MDIO bus node\n"); 352 dev_err(smi->dev, "no MDIO bus node\n");
354 return -ENODEV; 353 return -ENODEV;
355 } 354 }
356 355
357 smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); 356 smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
358 if (!smi->slave_mii_bus) 357 if (!smi->slave_mii_bus) {
359 return -ENOMEM; 358 ret = -ENOMEM;
359 goto err_put_node;
360 }
360 smi->slave_mii_bus->priv = smi; 361 smi->slave_mii_bus->priv = smi;
361 smi->slave_mii_bus->name = "SMI slave MII"; 362 smi->slave_mii_bus->name = "SMI slave MII";
362 smi->slave_mii_bus->read = realtek_smi_mdio_read; 363 smi->slave_mii_bus->read = realtek_smi_mdio_read;
@@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
371 if (ret) { 372 if (ret) {
372 dev_err(smi->dev, "unable to register MDIO bus %s\n", 373 dev_err(smi->dev, "unable to register MDIO bus %s\n",
373 smi->slave_mii_bus->id); 374 smi->slave_mii_bus->id);
374 of_node_put(mdio_np); 375 goto err_put_node;
375 } 376 }
376 377
377 return 0; 378 return 0;
379
380err_put_node:
381 of_node_put(mdio_np);
382
383 return ret;
378} 384}
379 385
380static int realtek_smi_probe(struct platform_device *pdev) 386static int realtek_smi_probe(struct platform_device *pdev)
@@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
457 struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); 463 struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
458 464
459 dsa_unregister_switch(smi->ds); 465 dsa_unregister_switch(smi->ds);
466 if (smi->slave_mii_bus)
467 of_node_put(smi->slave_mii_bus->dev.of_node);
460 gpiod_set_value(smi->reset, 1); 468 gpiod_set_value(smi->reset, 1);
461 469
462 return 0; 470 return 0;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 91fc64c1145e..47e5984f16fb 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1433,18 +1433,18 @@ static int greth_of_probe(struct platform_device *ofdev)
1433 } 1433 }
1434 1434
1435 /* Allocate TX descriptor ring in coherent memory */ 1435 /* Allocate TX descriptor ring in coherent memory */
1436 greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1436 greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1437 &greth->tx_bd_base_phys, 1437 &greth->tx_bd_base_phys,
1438 GFP_KERNEL); 1438 GFP_KERNEL);
1439 if (!greth->tx_bd_base) { 1439 if (!greth->tx_bd_base) {
1440 err = -ENOMEM; 1440 err = -ENOMEM;
1441 goto error3; 1441 goto error3;
1442 } 1442 }
1443 1443
1444 /* Allocate RX descriptor ring in coherent memory */ 1444 /* Allocate RX descriptor ring in coherent memory */
1445 greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024, 1445 greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1446 &greth->rx_bd_base_phys, 1446 &greth->rx_bd_base_phys,
1447 GFP_KERNEL); 1447 GFP_KERNEL);
1448 if (!greth->rx_bd_base) { 1448 if (!greth->rx_bd_base) {
1449 err = -ENOMEM; 1449 err = -ENOMEM;
1450 goto error4; 1450 goto error4;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 0b60921c392f..16477aa6d61f 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -795,8 +795,8 @@ static int slic_init_stat_queue(struct slic_device *sdev)
795 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK; 795 size = stq->len * sizeof(*descs) + DESC_ALIGN_MASK;
796 796
797 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) { 797 for (i = 0; i < SLIC_NUM_STAT_DESC_ARRAYS; i++) {
798 descs = dma_zalloc_coherent(&sdev->pdev->dev, size, &paddr, 798 descs = dma_alloc_coherent(&sdev->pdev->dev, size, &paddr,
799 GFP_KERNEL); 799 GFP_KERNEL);
800 if (!descs) { 800 if (!descs) {
801 netdev_err(sdev->netdev, 801 netdev_err(sdev->netdev,
802 "failed to allocate status descriptors\n"); 802 "failed to allocate status descriptors\n");
@@ -1240,8 +1240,8 @@ static int slic_init_shmem(struct slic_device *sdev)
1240 struct slic_shmem_data *sm_data; 1240 struct slic_shmem_data *sm_data;
1241 dma_addr_t paddr; 1241 dma_addr_t paddr;
1242 1242
1243 sm_data = dma_zalloc_coherent(&sdev->pdev->dev, sizeof(*sm_data), 1243 sm_data = dma_alloc_coherent(&sdev->pdev->dev, sizeof(*sm_data),
1244 &paddr, GFP_KERNEL); 1244 &paddr, GFP_KERNEL);
1245 if (!sm_data) { 1245 if (!sm_data) {
1246 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n"); 1246 dev_err(&sdev->pdev->dev, "failed to allocate shared memory\n");
1247 return -ENOMEM; 1247 return -ENOMEM;
@@ -1621,8 +1621,8 @@ static int slic_read_eeprom(struct slic_device *sdev)
1621 int err = 0; 1621 int err = 0;
1622 u8 *mac[2]; 1622 u8 *mac[2];
1623 1623
1624 eeprom = dma_zalloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, 1624 eeprom = dma_alloc_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE,
1625 &paddr, GFP_KERNEL); 1625 &paddr, GFP_KERNEL);
1626 if (!eeprom) 1626 if (!eeprom)
1627 return -ENOMEM; 1627 return -ENOMEM;
1628 1628
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 4f11f98347ed..1827ef1f6d55 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2059,7 +2059,7 @@ static inline void ace_tx_int(struct net_device *dev,
2059 if (skb) { 2059 if (skb) {
2060 dev->stats.tx_packets++; 2060 dev->stats.tx_packets++;
2061 dev->stats.tx_bytes += skb->len; 2061 dev->stats.tx_bytes += skb->len;
2062 dev_kfree_skb_irq(skb); 2062 dev_consume_skb_irq(skb);
2063 info->skb = NULL; 2063 info->skb = NULL;
2064 } 2064 }
2065 2065
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0fb986ba3290..0ae723f75341 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
145 & 0xffff; 145 & 0xffff;
146 146
147 if (inuse) { /* Tx FIFO is not empty */ 147 if (inuse) { /* Tx FIFO is not empty */
148 ready = priv->tx_prod - priv->tx_cons - inuse - 1; 148 ready = max_t(int,
149 priv->tx_prod - priv->tx_cons - inuse - 1, 0);
149 } else { 150 } else {
150 /* Check for buffered last packet */ 151 /* Check for buffered last packet */
151 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); 152 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 02921d877c08..aa1d1f5339d2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
714 714
715 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link, 715 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
716 priv->phy_iface); 716 priv->phy_iface);
717 if (IS_ERR(phydev)) 717 if (IS_ERR(phydev)) {
718 netdev_err(dev, "Could not attach to PHY\n"); 718 netdev_err(dev, "Could not attach to PHY\n");
719 phydev = NULL;
720 }
719 721
720 } else { 722 } else {
721 int ret; 723 int ret;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 420cede41ca4..b17d435de09f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -111,8 +111,8 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
111 struct ena_com_admin_sq *sq = &queue->sq; 111 struct ena_com_admin_sq *sq = &queue->sq;
112 u16 size = ADMIN_SQ_SIZE(queue->q_depth); 112 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113 113
114 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, 114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 GFP_KERNEL); 115 GFP_KERNEL);
116 116
117 if (!sq->entries) { 117 if (!sq->entries) {
118 pr_err("memory allocation failed"); 118 pr_err("memory allocation failed");
@@ -133,8 +133,8 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
133 struct ena_com_admin_cq *cq = &queue->cq; 133 struct ena_com_admin_cq *cq = &queue->cq;
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth); 134 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135 135
136 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, 136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 GFP_KERNEL); 137 GFP_KERNEL);
138 138
139 if (!cq->entries) { 139 if (!cq->entries) {
140 pr_err("memory allocation failed"); 140 pr_err("memory allocation failed");
@@ -156,8 +156,8 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
156 156
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; 157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); 158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, 159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
160 GFP_KERNEL); 160 GFP_KERNEL);
161 161
162 if (!aenq->entries) { 162 if (!aenq->entries) {
163 pr_err("memory allocation failed"); 163 pr_err("memory allocation failed");
@@ -344,15 +344,15 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
344 dev_node = dev_to_node(ena_dev->dmadev); 344 dev_node = dev_to_node(ena_dev->dmadev);
345 set_dev_node(ena_dev->dmadev, ctx->numa_node); 345 set_dev_node(ena_dev->dmadev, ctx->numa_node);
346 io_sq->desc_addr.virt_addr = 346 io_sq->desc_addr.virt_addr =
347 dma_zalloc_coherent(ena_dev->dmadev, size, 347 dma_alloc_coherent(ena_dev->dmadev, size,
348 &io_sq->desc_addr.phys_addr, 348 &io_sq->desc_addr.phys_addr,
349 GFP_KERNEL); 349 GFP_KERNEL);
350 set_dev_node(ena_dev->dmadev, dev_node); 350 set_dev_node(ena_dev->dmadev, dev_node);
351 if (!io_sq->desc_addr.virt_addr) { 351 if (!io_sq->desc_addr.virt_addr) {
352 io_sq->desc_addr.virt_addr = 352 io_sq->desc_addr.virt_addr =
353 dma_zalloc_coherent(ena_dev->dmadev, size, 353 dma_alloc_coherent(ena_dev->dmadev, size,
354 &io_sq->desc_addr.phys_addr, 354 &io_sq->desc_addr.phys_addr,
355 GFP_KERNEL); 355 GFP_KERNEL);
356 } 356 }
357 357
358 if (!io_sq->desc_addr.virt_addr) { 358 if (!io_sq->desc_addr.virt_addr) {
@@ -425,14 +425,14 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
425 prev_node = dev_to_node(ena_dev->dmadev); 425 prev_node = dev_to_node(ena_dev->dmadev);
426 set_dev_node(ena_dev->dmadev, ctx->numa_node); 426 set_dev_node(ena_dev->dmadev, ctx->numa_node);
427 io_cq->cdesc_addr.virt_addr = 427 io_cq->cdesc_addr.virt_addr =
428 dma_zalloc_coherent(ena_dev->dmadev, size, 428 dma_alloc_coherent(ena_dev->dmadev, size,
429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); 429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
430 set_dev_node(ena_dev->dmadev, prev_node); 430 set_dev_node(ena_dev->dmadev, prev_node);
431 if (!io_cq->cdesc_addr.virt_addr) { 431 if (!io_cq->cdesc_addr.virt_addr) {
432 io_cq->cdesc_addr.virt_addr = 432 io_cq->cdesc_addr.virt_addr =
433 dma_zalloc_coherent(ena_dev->dmadev, size, 433 dma_alloc_coherent(ena_dev->dmadev, size,
434 &io_cq->cdesc_addr.phys_addr, 434 &io_cq->cdesc_addr.phys_addr,
435 GFP_KERNEL); 435 GFP_KERNEL);
436 } 436 }
437 437
438 if (!io_cq->cdesc_addr.virt_addr) { 438 if (!io_cq->cdesc_addr.virt_addr) {
@@ -1026,8 +1026,8 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1026 struct ena_rss *rss = &ena_dev->rss; 1026 struct ena_rss *rss = &ena_dev->rss;
1027 1027
1028 rss->hash_key = 1028 rss->hash_key =
1029 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), 1029 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1030 &rss->hash_key_dma_addr, GFP_KERNEL); 1030 &rss->hash_key_dma_addr, GFP_KERNEL);
1031 1031
1032 if (unlikely(!rss->hash_key)) 1032 if (unlikely(!rss->hash_key))
1033 return -ENOMEM; 1033 return -ENOMEM;
@@ -1050,8 +1050,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1050 struct ena_rss *rss = &ena_dev->rss; 1050 struct ena_rss *rss = &ena_dev->rss;
1051 1051
1052 rss->hash_ctrl = 1052 rss->hash_ctrl =
1053 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), 1053 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1054 &rss->hash_ctrl_dma_addr, GFP_KERNEL); 1054 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1055 1055
1056 if (unlikely(!rss->hash_ctrl)) 1056 if (unlikely(!rss->hash_ctrl))
1057 return -ENOMEM; 1057 return -ENOMEM;
@@ -1094,8 +1094,8 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1094 sizeof(struct ena_admin_rss_ind_table_entry); 1094 sizeof(struct ena_admin_rss_ind_table_entry);
1095 1095
1096 rss->rss_ind_tbl = 1096 rss->rss_ind_tbl =
1097 dma_zalloc_coherent(ena_dev->dmadev, tbl_size, 1097 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1098 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); 1098 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1099 if (unlikely(!rss->rss_ind_tbl)) 1099 if (unlikely(!rss->rss_ind_tbl))
1100 goto mem_err1; 1100 goto mem_err1;
1101 1101
@@ -1649,9 +1649,9 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1649 1649
1650 spin_lock_init(&mmio_read->lock); 1650 spin_lock_init(&mmio_read->lock);
1651 mmio_read->read_resp = 1651 mmio_read->read_resp =
1652 dma_zalloc_coherent(ena_dev->dmadev, 1652 dma_alloc_coherent(ena_dev->dmadev,
1653 sizeof(*mmio_read->read_resp), 1653 sizeof(*mmio_read->read_resp),
1654 &mmio_read->read_resp_dma_addr, GFP_KERNEL); 1654 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1655 if (unlikely(!mmio_read->read_resp)) 1655 if (unlikely(!mmio_read->read_resp))
1656 goto err; 1656 goto err;
1657 1657
@@ -2623,8 +2623,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2624 2624
2625 host_attr->host_info = 2625 host_attr->host_info =
2626 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, 2626 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2627 &host_attr->host_info_dma_addr, GFP_KERNEL); 2627 &host_attr->host_info_dma_addr, GFP_KERNEL);
2628 if (unlikely(!host_attr->host_info)) 2628 if (unlikely(!host_attr->host_info))
2629 return -ENOMEM; 2629 return -ENOMEM;
2630 2630
@@ -2641,8 +2641,9 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr; 2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2642 2642
2643 host_attr->debug_area_virt_addr = 2643 host_attr->debug_area_virt_addr =
2644 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, 2644 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2645 &host_attr->debug_area_dma_addr, GFP_KERNEL); 2645 &host_attr->debug_area_dma_addr,
2646 GFP_KERNEL);
2646 if (unlikely(!host_attr->debug_area_virt_addr)) { 2647 if (unlikely(!host_attr->debug_area_virt_addr)) {
2647 host_attr->debug_area_size = 0; 2648 host_attr->debug_area_size = 0;
2648 return -ENOMEM; 2649 return -ENOMEM;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a70bb1bb90e7..a6eacf2099c3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
2663 goto err_device_destroy; 2663 goto err_device_destroy;
2664 } 2664 }
2665 2665
2666 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2667 /* Make sure we don't have a race with AENQ Links state handler */
2668 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2669 netif_carrier_on(adapter->netdev);
2670
2671 rc = ena_enable_msix_and_set_admin_interrupts(adapter, 2666 rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2672 adapter->num_queues); 2667 adapter->num_queues);
2673 if (rc) { 2668 if (rc) {
@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
2684 } 2679 }
2685 2680
2686 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); 2681 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2682
2683 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2684 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2685 netif_carrier_on(adapter->netdev);
2686
2687 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); 2687 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2688 dev_err(&pdev->dev, 2688 dev_err(&pdev->dev,
2689 "Device reset completed successfully, Driver info: %s\n", 2689 "Device reset completed successfully, Driver info: %s\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index dc8b6173d8d8..63870072cbbd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -45,7 +45,7 @@
45 45
46#define DRV_MODULE_VER_MAJOR 2 46#define DRV_MODULE_VER_MAJOR 2
47#define DRV_MODULE_VER_MINOR 0 47#define DRV_MODULE_VER_MINOR 0
48#define DRV_MODULE_VER_SUBMINOR 2 48#define DRV_MODULE_VER_SUBMINOR 3
49 49
50#define DRV_MODULE_NAME "ena" 50#define DRV_MODULE_NAME "ena"
51#ifndef DRV_MODULE_VERSION 51#ifndef DRV_MODULE_VERSION
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a90080f12e67..e548c0ae2e00 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -666,7 +666,7 @@ static int amd8111e_tx(struct net_device *dev)
666 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], 666 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
667 lp->tx_skbuff[tx_index]->len, 667 lp->tx_skbuff[tx_index]->len,
668 PCI_DMA_TODEVICE); 668 PCI_DMA_TODEVICE);
669 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); 669 dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
670 lp->tx_skbuff[tx_index] = NULL; 670 lp->tx_skbuff[tx_index] = NULL;
671 lp->tx_dma_addr[tx_index] = 0; 671 lp->tx_dma_addr[tx_index] = 0;
672 } 672 }
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index d272dc6984ac..b40d4377cc71 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -431,8 +431,6 @@
431#define MAC_MDIOSCAR_PA_WIDTH 5 431#define MAC_MDIOSCAR_PA_WIDTH 5
432#define MAC_MDIOSCAR_RA_INDEX 0 432#define MAC_MDIOSCAR_RA_INDEX 0
433#define MAC_MDIOSCAR_RA_WIDTH 16 433#define MAC_MDIOSCAR_RA_WIDTH 16
434#define MAC_MDIOSCAR_REG_INDEX 0
435#define MAC_MDIOSCAR_REG_WIDTH 21
436#define MAC_MDIOSCCDR_BUSY_INDEX 22 434#define MAC_MDIOSCCDR_BUSY_INDEX 22
437#define MAC_MDIOSCCDR_BUSY_WIDTH 1 435#define MAC_MDIOSCCDR_BUSY_WIDTH 1
438#define MAC_MDIOSCCDR_CMD_INDEX 16 436#define MAC_MDIOSCCDR_CMD_INDEX 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 1e929a1e4ca7..4666084eda16 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1284 } 1284 }
1285} 1285}
1286 1286
1287static unsigned int xgbe_create_mdio_sca(int port, int reg)
1288{
1289 unsigned int mdio_sca, da;
1290
1291 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1292
1293 mdio_sca = 0;
1294 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1297
1298 return mdio_sca;
1299}
1300
1287static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, 1301static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1288 int reg, u16 val) 1302 int reg, u16 val)
1289{ 1303{
@@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1291 1305
1292 reinit_completion(&pdata->mdio_complete); 1306 reinit_completion(&pdata->mdio_complete);
1293 1307
1294 mdio_sca = 0; 1308 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1297 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1309 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1298 1310
1299 mdio_sccd = 0; 1311 mdio_sccd = 0;
@@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
1317 1329
1318 reinit_completion(&pdata->mdio_complete); 1330 reinit_completion(&pdata->mdio_complete);
1319 1331
1320 mdio_sca = 0; 1332 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1321 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
1322 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
1323 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1333 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1324 1334
1325 mdio_sccd = 0; 1335 mdio_sccd = 0;
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 0f2ad50f3bd7..87b142a312e0 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -206,8 +206,8 @@ static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
206 } 206 }
207 207
208 /* Packet buffers should be 64B aligned */ 208 /* Packet buffers should be 64B aligned */
209 pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr, 209 pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
210 GFP_ATOMIC); 210 GFP_ATOMIC);
211 if (unlikely(!pkt_buf)) { 211 if (unlikely(!pkt_buf)) {
212 dev_kfree_skb_any(skb); 212 dev_kfree_skb_any(skb);
213 return NETDEV_TX_OK; 213 return NETDEV_TX_OK;
@@ -428,8 +428,8 @@ static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
428 ring->ndev = ndev; 428 ring->ndev = ndev;
429 429
430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC; 430 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
431 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr, 431 ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
432 GFP_KERNEL); 432 GFP_KERNEL);
433 if (!ring->desc_addr) 433 if (!ring->desc_addr)
434 goto err; 434 goto err;
435 435
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 6a8e2567f2bd..4d3855ceb500 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -777,7 +777,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
777 777
778 if (bp->tx_bufs[bp->tx_empty]) { 778 if (bp->tx_bufs[bp->tx_empty]) {
779 ++dev->stats.tx_packets; 779 ++dev->stats.tx_packets;
780 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); 780 dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]);
781 } 781 }
782 bp->tx_bufs[bp->tx_empty] = NULL; 782 bp->tx_bufs[bp->tx_empty] = NULL;
783 bp->tx_fullup = 0; 783 bp->tx_fullup = 0;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index c131cfc1b79d..e3538ba7d0e7 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -660,10 +660,9 @@ static int alx_alloc_rings(struct alx_priv *alx)
660 alx->num_txq + 660 alx->num_txq +
661 sizeof(struct alx_rrd) * alx->rx_ringsz + 661 sizeof(struct alx_rrd) * alx->rx_ringsz +
662 sizeof(struct alx_rfd) * alx->rx_ringsz; 662 sizeof(struct alx_rfd) * alx->rx_ringsz;
663 alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev, 663 alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
664 alx->descmem.size, 664 alx->descmem.size,
665 &alx->descmem.dma, 665 &alx->descmem.dma, GFP_KERNEL);
666 GFP_KERNEL);
667 if (!alx->descmem.virt) 666 if (!alx->descmem.virt)
668 return -ENOMEM; 667 return -ENOMEM;
669 668
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 7087b88550db..3a3b35b5df67 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1019,8 +1019,8 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1019 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1019 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
1020 8 * 4; 1020 8 * 4;
1021 1021
1022 ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, 1022 ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
1023 &ring_header->dma, GFP_KERNEL); 1023 &ring_header->dma, GFP_KERNEL);
1024 if (unlikely(!ring_header->desc)) { 1024 if (unlikely(!ring_header->desc)) {
1025 dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); 1025 dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1026 goto err_nomem; 1026 goto err_nomem;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f44808959ff3..97ab0dd25552 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -638,7 +638,7 @@ static void b44_tx(struct b44 *bp)
638 bytes_compl += skb->len; 638 bytes_compl += skb->len;
639 pkts_compl++; 639 pkts_compl++;
640 640
641 dev_kfree_skb_irq(skb); 641 dev_consume_skb_irq(skb);
642 } 642 }
643 643
644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); 644 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
@@ -1012,7 +1012,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1012 } 1012 }
1013 1013
1014 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); 1014 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1015 dev_kfree_skb_any(skb); 1015 dev_consume_skb_any(skb);
1016 skb = bounce_skb; 1016 skb = bounce_skb;
1017 } 1017 }
1018 1018
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 6bae973d4dce..09cd188826b1 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -936,7 +936,7 @@ static int bcm_enet_open(struct net_device *dev)
936 936
937 /* allocate rx dma ring */ 937 /* allocate rx dma ring */
938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 938 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
939 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 939 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
940 if (!p) { 940 if (!p) {
941 ret = -ENOMEM; 941 ret = -ENOMEM;
942 goto out_freeirq_tx; 942 goto out_freeirq_tx;
@@ -947,7 +947,7 @@ static int bcm_enet_open(struct net_device *dev)
947 947
948 /* allocate tx dma ring */ 948 /* allocate tx dma ring */
949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 949 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
950 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 950 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
951 if (!p) { 951 if (!p) {
952 ret = -ENOMEM; 952 ret = -ENOMEM;
953 goto out_free_rx_ring; 953 goto out_free_rx_ring;
@@ -2120,7 +2120,7 @@ static int bcm_enetsw_open(struct net_device *dev)
2120 2120
2121 /* allocate rx dma ring */ 2121 /* allocate rx dma ring */
2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 2122 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
2123 p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 2123 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
2124 if (!p) { 2124 if (!p) {
2125 dev_err(kdev, "cannot allocate rx ring %u\n", size); 2125 dev_err(kdev, "cannot allocate rx ring %u\n", size);
2126 ret = -ENOMEM; 2126 ret = -ENOMEM;
@@ -2132,7 +2132,7 @@ static int bcm_enetsw_open(struct net_device *dev)
2132 2132
2133 /* allocate tx dma ring */ 2133 /* allocate tx dma ring */
2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 2134 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
2135 p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 2135 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
2136 if (!p) { 2136 if (!p) {
2137 dev_err(kdev, "cannot allocate tx ring\n"); 2137 dev_err(kdev, "cannot allocate tx ring\n");
2138 ret = -ENOMEM; 2138 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4574275ef445..28c9b0bdf2f6 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -520,7 +520,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
520 struct ethtool_wolinfo *wol) 520 struct ethtool_wolinfo *wol)
521{ 521{
522 struct bcm_sysport_priv *priv = netdev_priv(dev); 522 struct bcm_sysport_priv *priv = netdev_priv(dev);
523 u32 reg;
524 523
525 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; 524 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
526 wol->wolopts = priv->wolopts; 525 wol->wolopts = priv->wolopts;
@@ -528,11 +527,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
528 if (!(priv->wolopts & WAKE_MAGICSECURE)) 527 if (!(priv->wolopts & WAKE_MAGICSECURE))
529 return; 528 return;
530 529
531 /* Return the programmed SecureOn password */ 530 memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
532 reg = umac_readl(priv, UMAC_PSW_MS);
533 put_unaligned_be16(reg, &wol->sopass[0]);
534 reg = umac_readl(priv, UMAC_PSW_LS);
535 put_unaligned_be32(reg, &wol->sopass[2]);
536} 531}
537 532
538static int bcm_sysport_set_wol(struct net_device *dev, 533static int bcm_sysport_set_wol(struct net_device *dev,
@@ -548,13 +543,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
548 if (wol->wolopts & ~supported) 543 if (wol->wolopts & ~supported)
549 return -EINVAL; 544 return -EINVAL;
550 545
551 /* Program the SecureOn password */ 546 if (wol->wolopts & WAKE_MAGICSECURE)
552 if (wol->wolopts & WAKE_MAGICSECURE) { 547 memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
553 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
554 UMAC_PSW_MS);
555 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
556 UMAC_PSW_LS);
557 }
558 548
559 /* Flag the device and relevant IRQ as wakeup capable */ 549 /* Flag the device and relevant IRQ as wakeup capable */
560 if (wol->wolopts) { 550 if (wol->wolopts) {
@@ -1506,8 +1496,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1506 /* We just need one DMA descriptor which is DMA-able, since writing to 1496 /* We just need one DMA descriptor which is DMA-able, since writing to
1507 * the port will allocate a new descriptor in its internal linked-list 1497 * the port will allocate a new descriptor in its internal linked-list
1508 */ 1498 */
1509 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma, 1499 p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1510 GFP_KERNEL); 1500 GFP_KERNEL);
1511 if (!p) { 1501 if (!p) {
1512 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1502 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1513 return -ENOMEM; 1503 return -ENOMEM;
@@ -2649,13 +2639,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2649 unsigned int index, i = 0; 2639 unsigned int index, i = 0;
2650 u32 reg; 2640 u32 reg;
2651 2641
2652 /* Password has already been programmed */
2653 reg = umac_readl(priv, UMAC_MPD_CTRL); 2642 reg = umac_readl(priv, UMAC_MPD_CTRL);
2654 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) 2643 if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
2655 reg |= MPD_EN; 2644 reg |= MPD_EN;
2656 reg &= ~PSW_EN; 2645 reg &= ~PSW_EN;
2657 if (priv->wolopts & WAKE_MAGICSECURE) 2646 if (priv->wolopts & WAKE_MAGICSECURE) {
2647 /* Program the SecureOn password */
2648 umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
2649 UMAC_PSW_MS);
2650 umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
2651 UMAC_PSW_LS);
2658 reg |= PSW_EN; 2652 reg |= PSW_EN;
2653 }
2659 umac_writel(priv, reg, UMAC_MPD_CTRL); 2654 umac_writel(priv, reg, UMAC_MPD_CTRL);
2660 2655
2661 if (priv->wolopts & WAKE_FILTER) { 2656 if (priv->wolopts & WAKE_FILTER) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 0887e6356649..0b192fea9c5d 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -12,6 +12,7 @@
12#define __BCM_SYSPORT_H 12#define __BCM_SYSPORT_H
13 13
14#include <linux/bitmap.h> 14#include <linux/bitmap.h>
15#include <linux/ethtool.h>
15#include <linux/if_vlan.h> 16#include <linux/if_vlan.h>
16#include <linux/net_dim.h> 17#include <linux/net_dim.h>
17 18
@@ -778,6 +779,7 @@ struct bcm_sysport_priv {
778 unsigned int crc_fwd:1; 779 unsigned int crc_fwd:1;
779 u16 rev; 780 u16 rev;
780 u32 wolopts; 781 u32 wolopts;
782 u8 sopass[SOPASS_MAX];
781 unsigned int wol_irq_disabled:1; 783 unsigned int wol_irq_disabled:1;
782 784
783 /* MIB related fields */ 785 /* MIB related fields */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index cabc8e49ad24..2d3a44c40221 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -634,9 +634,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
634 634
635 /* Alloc ring of descriptors */ 635 /* Alloc ring of descriptors */
636 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 636 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
637 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 637 ring->cpu_base = dma_alloc_coherent(dma_dev, size,
638 &ring->dma_base, 638 &ring->dma_base,
639 GFP_KERNEL); 639 GFP_KERNEL);
640 if (!ring->cpu_base) { 640 if (!ring->cpu_base) {
641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", 641 dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
642 ring->mmio_base); 642 ring->mmio_base);
@@ -659,9 +659,9 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
659 659
660 /* Alloc ring of descriptors */ 660 /* Alloc ring of descriptors */
661 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); 661 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
662 ring->cpu_base = dma_zalloc_coherent(dma_dev, size, 662 ring->cpu_base = dma_alloc_coherent(dma_dev, size,
663 &ring->dma_base, 663 &ring->dma_base,
664 GFP_KERNEL); 664 GFP_KERNEL);
665 if (!ring->cpu_base) { 665 if (!ring->cpu_base) {
666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", 666 dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
667 ring->mmio_base); 667 ring->mmio_base);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index bbb247116045..d63371d70bce 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -844,8 +844,8 @@ bnx2_alloc_stats_blk(struct net_device *dev)
844 BNX2_SBLK_MSIX_ALIGN_SIZE); 844 BNX2_SBLK_MSIX_ALIGN_SIZE);
845 bp->status_stats_size = status_blk_size + 845 bp->status_stats_size = status_blk_size +
846 sizeof(struct statistics_block); 846 sizeof(struct statistics_block);
847 status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, 847 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
848 &bp->status_blk_mapping, GFP_KERNEL); 848 &bp->status_blk_mapping, GFP_KERNEL);
849 if (!status_blk) 849 if (!status_blk)
850 return -ENOMEM; 850 return -ENOMEM;
851 851
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 5cd3135dfe30..03d131f777bc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -2081,7 +2081,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
2081 bool is_pf); 2081 bool is_pf);
2082 2082
2083#define BNX2X_ILT_ZALLOC(x, y, size) \ 2083#define BNX2X_ILT_ZALLOC(x, y, size) \
2084 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL) 2084 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
2085 2085
2086#define BNX2X_ILT_FREE(x, y, size) \ 2086#define BNX2X_ILT_FREE(x, y, size) \
2087 do { \ 2087 do { \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 142bc11b9fbb..2462e7aa0c5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -52,7 +52,7 @@ extern int bnx2x_num_queues;
52 52
53#define BNX2X_PCI_ALLOC(y, size) \ 53#define BNX2X_PCI_ALLOC(y, size) \
54({ \ 54({ \
55 void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 55 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
56 if (x) \ 56 if (x) \
57 DP(NETIF_MSG_HW, \ 57 DP(NETIF_MSG_HW, \
58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ 58 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 3aa80da973d7..8bc7e495b027 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3449,10 +3449,10 @@ alloc_ext_stats:
3449 goto alloc_tx_ext_stats; 3449 goto alloc_tx_ext_stats;
3450 3450
3451 bp->hw_rx_port_stats_ext = 3451 bp->hw_rx_port_stats_ext =
3452 dma_zalloc_coherent(&pdev->dev, 3452 dma_alloc_coherent(&pdev->dev,
3453 sizeof(struct rx_port_stats_ext), 3453 sizeof(struct rx_port_stats_ext),
3454 &bp->hw_rx_port_stats_ext_map, 3454 &bp->hw_rx_port_stats_ext_map,
3455 GFP_KERNEL); 3455 GFP_KERNEL);
3456 if (!bp->hw_rx_port_stats_ext) 3456 if (!bp->hw_rx_port_stats_ext)
3457 return 0; 3457 return 0;
3458 3458
@@ -3462,10 +3462,10 @@ alloc_tx_ext_stats:
3462 3462
3463 if (bp->hwrm_spec_code >= 0x10902) { 3463 if (bp->hwrm_spec_code >= 0x10902) {
3464 bp->hw_tx_port_stats_ext = 3464 bp->hw_tx_port_stats_ext =
3465 dma_zalloc_coherent(&pdev->dev, 3465 dma_alloc_coherent(&pdev->dev,
3466 sizeof(struct tx_port_stats_ext), 3466 sizeof(struct tx_port_stats_ext),
3467 &bp->hw_tx_port_stats_ext_map, 3467 &bp->hw_tx_port_stats_ext_map,
3468 GFP_KERNEL); 3468 GFP_KERNEL);
3469 } 3469 }
3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT; 3470 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3471 } 3471 }
@@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 4973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4974 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; 4974 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4975 u32 map_idx = ring->map_idx; 4975 u32 map_idx = ring->map_idx;
4976 unsigned int vector;
4976 4977
4978 vector = bp->irq_tbl[map_idx].vector;
4979 disable_irq_nosync(vector);
4977 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); 4980 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
4978 if (rc) 4981 if (rc) {
4982 enable_irq(vector);
4979 goto err_out; 4983 goto err_out;
4984 }
4980 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); 4985 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4981 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); 4986 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4987 enable_irq(vector);
4982 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; 4988 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4983 4989
4984 if (!i) { 4990 if (!i) {
@@ -5601,7 +5607,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5601 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 5607 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5602 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 5608 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
5603 if (bp->flags & BNXT_FLAG_CHIP_P5) 5609 if (bp->flags & BNXT_FLAG_CHIP_P5)
5604 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; 5610 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5611 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
5605 else 5612 else
5606 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; 5613 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5607 } 5614 }
@@ -6221,9 +6228,12 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6221 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; 6228 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6222 rmem->depth = 1; 6229 rmem->depth = 1;
6223 rmem->nr_pages = MAX_CTX_PAGES; 6230 rmem->nr_pages = MAX_CTX_PAGES;
6224 if (i == (nr_tbls - 1)) 6231 if (i == (nr_tbls - 1)) {
6225 rmem->nr_pages = ctx_pg->nr_pages % 6232 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6226 MAX_CTX_PAGES; 6233
6234 if (rem)
6235 rmem->nr_pages = rem;
6236 }
6227 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); 6237 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6228 if (rc) 6238 if (rc)
6229 break; 6239 break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 15c7041e937b..70775158c8c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -316,8 +316,8 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
316 316
317 n = IEEE_8021QAZ_MAX_TCS; 317 n = IEEE_8021QAZ_MAX_TCS;
318 data_len = sizeof(*data) + sizeof(*fw_app) * n; 318 data_len = sizeof(*data) + sizeof(*fw_app) * n;
319 data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, 319 data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping,
320 GFP_KERNEL); 320 GFP_KERNEL);
321 if (!data) 321 if (!data)
322 return -ENOMEM; 322 return -ENOMEM;
323 323
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 140dbd62106d..7f56032e44ac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -85,8 +85,8 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
85 return -EFAULT; 85 return -EFAULT;
86 } 86 }
87 87
88 data_addr = dma_zalloc_coherent(&bp->pdev->dev, bytesize, 88 data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
89 &data_dma_addr, GFP_KERNEL); 89 &data_dma_addr, GFP_KERNEL);
90 if (!data_addr) 90 if (!data_addr)
91 return -ENOMEM; 91 return -ENOMEM;
92 92
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f1aaac8e6268..0a0995894ddb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -386,8 +386,8 @@ struct hwrm_err_output {
386#define HWRM_VERSION_MAJOR 1 386#define HWRM_VERSION_MAJOR 1
387#define HWRM_VERSION_MINOR 10 387#define HWRM_VERSION_MINOR 10
388#define HWRM_VERSION_UPDATE 0 388#define HWRM_VERSION_UPDATE 0
389#define HWRM_VERSION_RSVD 33 389#define HWRM_VERSION_RSVD 35
390#define HWRM_VERSION_STR "1.10.0.33" 390#define HWRM_VERSION_STR "1.10.0.35"
391 391
392/* hwrm_ver_get_input (size:192b/24B) */ 392/* hwrm_ver_get_input (size:192b/24B) */
393struct hwrm_ver_get_input { 393struct hwrm_ver_get_input {
@@ -1184,6 +1184,7 @@ struct hwrm_func_cfg_input {
1184 #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL 1184 #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
1185 #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL 1185 #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
1186 #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL 1186 #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
1187 #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
1187 __le32 enables; 1188 __le32 enables;
1188 #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL 1189 #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
1189 #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL 1190 #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 5db9f4158e62..134ae2862efa 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
1288 * for transmits, we just free buffers. 1288 * for transmits, we just free buffers.
1289 */ 1289 */
1290 1290
1291 dev_kfree_skb_irq(sb); 1291 dev_consume_skb_irq(sb);
1292 1292
1293 /* 1293 /*
1294 * .. and advance to the next buffer. 1294 * .. and advance to the next buffer.
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3b1397af81f7..b1627dd5f2fd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8712,10 +8712,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
8712 if (!i && tg3_flag(tp, ENABLE_RSS)) 8712 if (!i && tg3_flag(tp, ENABLE_RSS))
8713 continue; 8713 continue;
8714 8714
8715 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev, 8715 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8716 TG3_RX_RCB_RING_BYTES(tp), 8716 TG3_RX_RCB_RING_BYTES(tp),
8717 &tnapi->rx_rcb_mapping, 8717 &tnapi->rx_rcb_mapping,
8718 GFP_KERNEL); 8718 GFP_KERNEL);
8719 if (!tnapi->rx_rcb) 8719 if (!tnapi->rx_rcb)
8720 goto err_out; 8720 goto err_out;
8721 } 8721 }
@@ -8768,9 +8768,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8768{ 8768{
8769 int i; 8769 int i;
8770 8770
8771 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev, 8771 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8772 sizeof(struct tg3_hw_stats), 8772 sizeof(struct tg3_hw_stats),
8773 &tp->stats_mapping, GFP_KERNEL); 8773 &tp->stats_mapping, GFP_KERNEL);
8774 if (!tp->hw_stats) 8774 if (!tp->hw_stats)
8775 goto err_out; 8775 goto err_out;
8776 8776
@@ -8778,10 +8778,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8778 struct tg3_napi *tnapi = &tp->napi[i]; 8778 struct tg3_napi *tnapi = &tp->napi[i];
8779 struct tg3_hw_status *sblk; 8779 struct tg3_hw_status *sblk;
8780 8780
8781 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev, 8781 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8782 TG3_HW_STATUS_SIZE, 8782 TG3_HW_STATUS_SIZE,
8783 &tnapi->status_mapping, 8783 &tnapi->status_mapping,
8784 GFP_KERNEL); 8784 GFP_KERNEL);
8785 if (!tnapi->hw_status) 8785 if (!tnapi->hw_status)
8786 goto err_out; 8786 goto err_out;
8787 8787
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 3d45f4c92cf6..9bbaad9f3d63 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -643,6 +643,7 @@
643#define MACB_CAPS_JUMBO 0x00000020 643#define MACB_CAPS_JUMBO 0x00000020
644#define MACB_CAPS_GEM_HAS_PTP 0x00000040 644#define MACB_CAPS_GEM_HAS_PTP 0x00000040
645#define MACB_CAPS_BD_RD_PREFETCH 0x00000080 645#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
646#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
646#define MACB_CAPS_FIFO_MODE 0x10000000 647#define MACB_CAPS_FIFO_MODE 0x10000000
647#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 648#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
648#define MACB_CAPS_SG_DISABLED 0x40000000 649#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1214,6 +1215,8 @@ struct macb {
1214 1215
1215 int rx_bd_rd_prefetch; 1216 int rx_bd_rd_prefetch;
1216 int tx_bd_rd_prefetch; 1217 int tx_bd_rd_prefetch;
1218
1219 u32 rx_intr_mask;
1217}; 1220};
1218 1221
1219#ifdef CONFIG_MACB_USE_HWSTAMP 1222#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b126926ef7f5..2b2882615e8b 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -56,8 +56,7 @@
56/* level of occupied TX descriptors under which we wake up TX process */ 56/* level of occupied TX descriptors under which we wake up TX process */
57#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) 57#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
58 58
59#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 59#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
60 | MACB_BIT(ISR_ROVR))
61#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ 60#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
62 | MACB_BIT(ISR_RLE) \ 61 | MACB_BIT(ISR_RLE) \
63 | MACB_BIT(TXERR)) 62 | MACB_BIT(TXERR))
@@ -1270,7 +1269,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
1270 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1269 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1271 napi_reschedule(napi); 1270 napi_reschedule(napi);
1272 } else { 1271 } else {
1273 queue_writel(queue, IER, MACB_RX_INT_FLAGS); 1272 queue_writel(queue, IER, bp->rx_intr_mask);
1274 } 1273 }
1275 } 1274 }
1276 1275
@@ -1288,7 +1287,7 @@ static void macb_hresp_error_task(unsigned long data)
1288 u32 ctrl; 1287 u32 ctrl;
1289 1288
1290 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1289 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1291 queue_writel(queue, IDR, MACB_RX_INT_FLAGS | 1290 queue_writel(queue, IDR, bp->rx_intr_mask |
1292 MACB_TX_INT_FLAGS | 1291 MACB_TX_INT_FLAGS |
1293 MACB_BIT(HRESP)); 1292 MACB_BIT(HRESP));
1294 } 1293 }
@@ -1318,7 +1317,7 @@ static void macb_hresp_error_task(unsigned long data)
1318 1317
1319 /* Enable interrupts */ 1318 /* Enable interrupts */
1320 queue_writel(queue, IER, 1319 queue_writel(queue, IER,
1321 MACB_RX_INT_FLAGS | 1320 bp->rx_intr_mask |
1322 MACB_TX_INT_FLAGS | 1321 MACB_TX_INT_FLAGS |
1323 MACB_BIT(HRESP)); 1322 MACB_BIT(HRESP));
1324 } 1323 }
@@ -1372,14 +1371,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1372 (unsigned int)(queue - bp->queues), 1371 (unsigned int)(queue - bp->queues),
1373 (unsigned long)status); 1372 (unsigned long)status);
1374 1373
1375 if (status & MACB_RX_INT_FLAGS) { 1374 if (status & bp->rx_intr_mask) {
1376 /* There's no point taking any more interrupts 1375 /* There's no point taking any more interrupts
1377 * until we have processed the buffers. The 1376 * until we have processed the buffers. The
1378 * scheduling call may fail if the poll routine 1377 * scheduling call may fail if the poll routine
1379 * is already scheduled, so disable interrupts 1378 * is already scheduled, so disable interrupts
1380 * now. 1379 * now.
1381 */ 1380 */
1382 queue_writel(queue, IDR, MACB_RX_INT_FLAGS); 1381 queue_writel(queue, IDR, bp->rx_intr_mask);
1383 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) 1382 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1384 queue_writel(queue, ISR, MACB_BIT(RCOMP)); 1383 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1385 1384
@@ -1412,8 +1411,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
1412 /* There is a hardware issue under heavy load where DMA can 1411 /* There is a hardware issue under heavy load where DMA can
1413 * stop, this causes endless "used buffer descriptor read" 1412 * stop, this causes endless "used buffer descriptor read"
1414 * interrupts but it can be cleared by re-enabling RX. See 1413 * interrupts but it can be cleared by re-enabling RX. See
1415 * the at91 manual, section 41.3.1 or the Zynq manual 1414 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1416 * section 16.7.4 for details. 1415 * section 16.7.4 for details. RXUBR is only enabled for
1416 * these two versions.
1417 */ 1417 */
1418 if (status & MACB_BIT(RXUBR)) { 1418 if (status & MACB_BIT(RXUBR)) {
1419 ctrl = macb_readl(bp, NCR); 1419 ctrl = macb_readl(bp, NCR);
@@ -1738,12 +1738,8 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
1738 *skb = nskb; 1738 *skb = nskb;
1739 } 1739 }
1740 1740
1741 if (padlen) { 1741 if (padlen > ETH_FCS_LEN)
1742 if (padlen >= ETH_FCS_LEN) 1742 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1743 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1744 else
1745 skb_trim(*skb, ETH_FCS_LEN - padlen);
1746 }
1747 1743
1748add_fcs: 1744add_fcs:
1749 /* set FCS to packet */ 1745 /* set FCS to packet */
@@ -2263,7 +2259,7 @@ static void macb_init_hw(struct macb *bp)
2263 2259
2264 /* Enable interrupts */ 2260 /* Enable interrupts */
2265 queue_writel(queue, IER, 2261 queue_writel(queue, IER,
2266 MACB_RX_INT_FLAGS | 2262 bp->rx_intr_mask |
2267 MACB_TX_INT_FLAGS | 2263 MACB_TX_INT_FLAGS |
2268 MACB_BIT(HRESP)); 2264 MACB_BIT(HRESP));
2269 } 2265 }
@@ -3911,6 +3907,7 @@ static const struct macb_config sama5d4_config = {
3911}; 3907};
3912 3908
3913static const struct macb_config emac_config = { 3909static const struct macb_config emac_config = {
3910 .caps = MACB_CAPS_NEEDS_RSTONUBR,
3914 .clk_init = at91ether_clk_init, 3911 .clk_init = at91ether_clk_init,
3915 .init = at91ether_init, 3912 .init = at91ether_init,
3916}; 3913};
@@ -3932,7 +3929,8 @@ static const struct macb_config zynqmp_config = {
3932}; 3929};
3933 3930
3934static const struct macb_config zynq_config = { 3931static const struct macb_config zynq_config = {
3935 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, 3932 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
3933 MACB_CAPS_NEEDS_RSTONUBR,
3936 .dma_burst_length = 16, 3934 .dma_burst_length = 16,
3937 .clk_init = macb_clk_init, 3935 .clk_init = macb_clk_init,
3938 .init = macb_init, 3936 .init = macb_init,
@@ -4087,6 +4085,10 @@ static int macb_probe(struct platform_device *pdev)
4087 macb_dma_desc_get_size(bp); 4085 macb_dma_desc_get_size(bp);
4088 } 4086 }
4089 4087
4088 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4089 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4090 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4091
4090 mac = of_get_mac_address(np); 4092 mac = of_get_mac_address(np);
4091 if (mac) { 4093 if (mac) {
4092 ether_addr_copy(bp->dev->dev_addr, mac); 4094 ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 5f03199a3acf..05f4a3b21e29 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -54,7 +54,6 @@ config CAVIUM_PTP
54 tristate "Cavium PTP coprocessor as PTP clock" 54 tristate "Cavium PTP coprocessor as PTP clock"
55 depends on 64BIT && PCI 55 depends on 64BIT && PCI
56 imply PTP_1588_CLOCK 56 imply PTP_1588_CLOCK
57 default y
58 ---help--- 57 ---help---
59 This driver adds support for the Precision Time Protocol Clocks and 58 This driver adds support for the Precision Time Protocol Clocks and
60 Timestamping coprocessor (PTP) found on Cavium processors. 59 Timestamping coprocessor (PTP) found on Cavium processors.
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index fcaf18fa3904..5b4d3badcb73 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -59,7 +59,7 @@ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
59 dmem->q_len = q_len; 59 dmem->q_len = q_len;
60 dmem->size = (desc_size * q_len) + align_bytes; 60 dmem->size = (desc_size * q_len) + align_bytes;
61 /* Save address, need it while freeing */ 61 /* Save address, need it while freeing */
62 dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size, 62 dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
63 &dmem->dma, GFP_KERNEL); 63 &dmem->dma, GFP_KERNEL);
64 if (!dmem->unalign_base) 64 if (!dmem->unalign_base)
65 return -ENOMEM; 65 return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 20b6e1b3f5e3..89db739b7819 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
620{ 620{
621 size_t len = nelem * elem_size; 621 size_t len = nelem * elem_size;
622 void *s = NULL; 622 void *s = NULL;
623 void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); 623 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
624 624
625 if (!p) 625 if (!p)
626 return NULL; 626 return NULL;
@@ -2381,7 +2381,7 @@ no_mem:
2381 lro_add_page(adap, qs, fl, 2381 lro_add_page(adap, qs, fl,
2382 G_RSPD_LEN(len), 2382 G_RSPD_LEN(len),
2383 flags & F_RSPD_EOP); 2383 flags & F_RSPD_EOP);
2384 goto next_fl; 2384 goto next_fl;
2385 } 2385 }
2386 2386
2387 skb = get_packet_pg(adap, fl, q, 2387 skb = get_packet_pg(adap, fl, q,
@@ -3214,11 +3214,13 @@ void t3_start_sge_timers(struct adapter *adap)
3214 for (i = 0; i < SGE_QSETS; ++i) { 3214 for (i = 0; i < SGE_QSETS; ++i) {
3215 struct sge_qset *q = &adap->sge.qs[i]; 3215 struct sge_qset *q = &adap->sge.qs[i];
3216 3216
3217 if (q->tx_reclaim_timer.function) 3217 if (q->tx_reclaim_timer.function)
3218 mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD); 3218 mod_timer(&q->tx_reclaim_timer,
3219 jiffies + TX_RECLAIM_PERIOD);
3219 3220
3220 if (q->rx_reclaim_timer.function) 3221 if (q->rx_reclaim_timer.function)
3221 mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD); 3222 mod_timer(&q->rx_reclaim_timer,
3223 jiffies + RX_RECLAIM_PERIOD);
3222 } 3224 }
3223} 3225}
3224 3226
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 080918af773c..0a9f2c596624 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1082,7 +1082,7 @@ int t3_check_fw_version(struct adapter *adapter)
1082 CH_WARN(adapter, "found newer FW version(%u.%u), " 1082 CH_WARN(adapter, "found newer FW version(%u.%u), "
1083 "driver compiled for version %u.%u\n", major, minor, 1083 "driver compiled for version %u.%u\n", major, minor,
1084 FW_VERSION_MAJOR, FW_VERSION_MINOR); 1084 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1085 return 0; 1085 return 0;
1086 } 1086 }
1087 return -EINVAL; 1087 return -EINVAL;
1088} 1088}
@@ -3619,7 +3619,7 @@ int t3_reset_adapter(struct adapter *adapter)
3619 3619
3620static int init_parity(struct adapter *adap) 3620static int init_parity(struct adapter *adap)
3621{ 3621{
3622 int i, err, addr; 3622 int i, err, addr;
3623 3623
3624 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY) 3624 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3625 return -EBUSY; 3625 return -EBUSY;
@@ -3806,6 +3806,6 @@ int t3_replay_prep_adapter(struct adapter *adapter)
3806 p->phy.ops->power_down(&p->phy, 1); 3806 p->phy.ops->power_down(&p->phy, 1);
3807 } 3807 }
3808 3808
3809return 0; 3809 return 0;
3810} 3810}
3811 3811
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 9f9d6cae39d5..58a039c3224a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -378,10 +378,10 @@ static void cxgb4_init_ptp_timer(struct adapter *adapter)
378 int err; 378 int err;
379 379
380 memset(&c, 0, sizeof(c)); 380 memset(&c, 0, sizeof(c));
381 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) | 381 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
382 FW_CMD_REQUEST_F | 382 FW_CMD_REQUEST_F |
383 FW_CMD_WRITE_F | 383 FW_CMD_WRITE_F |
384 FW_PTP_CMD_PORTID_V(0)); 384 FW_PTP_CMD_PORTID_V(0));
385 c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16)); 385 c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
386 c.u.scmd.sc = FW_PTP_SC_INIT_TIMER; 386 c.u.scmd.sc = FW_PTP_SC_INIT_TIMER;
387 387
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 9a6065a3fa46..c041f44324db 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -78,7 +78,7 @@ static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx)
78 unsigned long flags; 78 unsigned long flags;
79 79
80 spin_lock_irqsave(&bmap->lock, flags); 80 spin_lock_irqsave(&bmap->lock, flags);
81 __clear_bit(msix_idx, bmap->msix_bmap); 81 __clear_bit(msix_idx, bmap->msix_bmap);
82 spin_unlock_irqrestore(&bmap->lock, flags); 82 spin_unlock_irqrestore(&bmap->lock, flags);
83} 83}
84 84
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b90188401d4a..fc0bc6458e84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
694{ 694{
695 size_t len = nelem * elem_size + stat_size; 695 size_t len = nelem * elem_size + stat_size;
696 void *s = NULL; 696 void *s = NULL;
697 void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); 697 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
698 698
699 if (!p) 699 if (!p)
700 return NULL; 700 return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e8c34292a0ec..2b03f6187a24 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3794,7 +3794,7 @@ int t4_load_phy_fw(struct adapter *adap,
3794 /* If we have version number support, then check to see if the adapter 3794 /* If we have version number support, then check to see if the adapter
3795 * already has up-to-date PHY firmware loaded. 3795 * already has up-to-date PHY firmware loaded.
3796 */ 3796 */
3797 if (phy_fw_version) { 3797 if (phy_fw_version) {
3798 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size); 3798 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3799 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver); 3799 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3800 if (ret < 0) 3800 if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 3007e1ac1e61..1d534f0baa69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
756 * Allocate the hardware ring and PCI DMA bus address space for said. 756 * Allocate the hardware ring and PCI DMA bus address space for said.
757 */ 757 */
758 size_t hwlen = nelem * hwsize + stat_size; 758 size_t hwlen = nelem * hwsize + stat_size;
759 void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); 759 void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
760 760
761 if (!hwring) 761 if (!hwring)
762 return NULL; 762 return NULL;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 60641e202534..9a7f70db20c7 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1434 * csum is correct or is zero. 1434 * csum is correct or is zero.
1435 */ 1435 */
1436 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && 1436 if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
1437 tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { 1437 tcp_udp_csum_ok && outer_csum_ok &&
1438 (ipv4_csum_ok || ipv6)) {
1438 skb->ip_summed = CHECKSUM_UNNECESSARY; 1439 skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 skb->csum_level = encap; 1440 skb->csum_level = encap;
1440 } 1441 }
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 13430f75496c..f1a2da15dd0a 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -585,7 +585,7 @@ static void de_tx (struct de_private *de)
585 netif_dbg(de, tx_done, de->dev, 585 netif_dbg(de, tx_done, de->dev,
586 "tx done, slot %d\n", tx_tail); 586 "tx done, slot %d\n", tx_tail);
587 } 587 }
588 dev_kfree_skb_irq(skb); 588 dev_consume_skb_irq(skb);
589 } 589 }
590 590
591next: 591next:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1e9d882c04ef..59a7f0b99069 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1808,9 +1808,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
1808 total_size = buf_len; 1808 total_size = buf_len;
1809 1809
1810 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; 1810 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1811 get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1811 get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1812 get_fat_cmd.size, 1812 get_fat_cmd.size,
1813 &get_fat_cmd.dma, GFP_ATOMIC); 1813 &get_fat_cmd.dma, GFP_ATOMIC);
1814 if (!get_fat_cmd.va) 1814 if (!get_fat_cmd.va)
1815 return -ENOMEM; 1815 return -ENOMEM;
1816 1816
@@ -2302,8 +2302,8 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2302 return -EINVAL; 2302 return -EINVAL;
2303 2303
2304 cmd.size = sizeof(struct be_cmd_resp_port_type); 2304 cmd.size = sizeof(struct be_cmd_resp_port_type);
2305 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2305 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2306 GFP_ATOMIC); 2306 GFP_ATOMIC);
2307 if (!cmd.va) { 2307 if (!cmd.va) {
2308 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2308 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2309 return -ENOMEM; 2309 return -ENOMEM;
@@ -3066,8 +3066,8 @@ int lancer_fw_download(struct be_adapter *adapter,
3066 3066
3067 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 3067 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3068 + LANCER_FW_DOWNLOAD_CHUNK; 3068 + LANCER_FW_DOWNLOAD_CHUNK;
3069 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, 3069 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3070 &flash_cmd.dma, GFP_KERNEL); 3070 GFP_KERNEL);
3071 if (!flash_cmd.va) 3071 if (!flash_cmd.va)
3072 return -ENOMEM; 3072 return -ENOMEM;
3073 3073
@@ -3184,8 +3184,8 @@ int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
3184 } 3184 }
3185 3185
3186 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 3186 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3187 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3187 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3188 GFP_KERNEL); 3188 GFP_KERNEL);
3189 if (!flash_cmd.va) 3189 if (!flash_cmd.va)
3190 return -ENOMEM; 3190 return -ENOMEM;
3191 3191
@@ -3435,8 +3435,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
3435 goto err; 3435 goto err;
3436 } 3436 }
3437 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 3437 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
3438 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3438 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3439 GFP_ATOMIC); 3439 GFP_ATOMIC);
3440 if (!cmd.va) { 3440 if (!cmd.va) {
3441 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3441 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3442 status = -ENOMEM; 3442 status = -ENOMEM;
@@ -3522,9 +3522,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
3522 3522
3523 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 3523 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
3524 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 3524 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
3525 attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3525 attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3526 attribs_cmd.size, 3526 attribs_cmd.size,
3527 &attribs_cmd.dma, GFP_ATOMIC); 3527 &attribs_cmd.dma, GFP_ATOMIC);
3528 if (!attribs_cmd.va) { 3528 if (!attribs_cmd.va) {
3529 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3529 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3530 status = -ENOMEM; 3530 status = -ENOMEM;
@@ -3699,10 +3699,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3699 3699
3700 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 3700 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3701 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 3701 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3702 get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 3702 get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
3703 get_mac_list_cmd.size, 3703 get_mac_list_cmd.size,
3704 &get_mac_list_cmd.dma, 3704 &get_mac_list_cmd.dma,
3705 GFP_ATOMIC); 3705 GFP_ATOMIC);
3706 3706
3707 if (!get_mac_list_cmd.va) { 3707 if (!get_mac_list_cmd.va) {
3708 dev_err(&adapter->pdev->dev, 3708 dev_err(&adapter->pdev->dev,
@@ -3829,8 +3829,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3829 3829
3830 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3830 memset(&cmd, 0, sizeof(struct be_dma_mem));
3831 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3831 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3832 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3832 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3833 GFP_KERNEL); 3833 GFP_KERNEL);
3834 if (!cmd.va) 3834 if (!cmd.va)
3835 return -ENOMEM; 3835 return -ENOMEM;
3836 3836
@@ -4035,8 +4035,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
4035 4035
4036 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4036 memset(&cmd, 0, sizeof(struct be_dma_mem));
4037 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 4037 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
4038 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4038 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4039 GFP_ATOMIC); 4039 GFP_ATOMIC);
4040 if (!cmd.va) { 4040 if (!cmd.va) {
4041 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 4041 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
4042 status = -ENOMEM; 4042 status = -ENOMEM;
@@ -4089,9 +4089,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
4089 4089
4090 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4090 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4091 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4091 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4092 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4092 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4093 extfat_cmd.size, &extfat_cmd.dma, 4093 extfat_cmd.size, &extfat_cmd.dma,
4094 GFP_ATOMIC); 4094 GFP_ATOMIC);
4095 if (!extfat_cmd.va) 4095 if (!extfat_cmd.va)
4096 return -ENOMEM; 4096 return -ENOMEM;
4097 4097
@@ -4127,9 +4127,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
4127 4127
4128 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4128 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4129 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4129 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4130 extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 4130 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
4131 extfat_cmd.size, &extfat_cmd.dma, 4131 extfat_cmd.size, &extfat_cmd.dma,
4132 GFP_ATOMIC); 4132 GFP_ATOMIC);
4133 4133
4134 if (!extfat_cmd.va) { 4134 if (!extfat_cmd.va) {
4135 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 4135 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -4354,8 +4354,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
4354 4354
4355 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4355 memset(&cmd, 0, sizeof(struct be_dma_mem));
4356 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 4356 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
4357 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4357 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4358 GFP_ATOMIC); 4358 GFP_ATOMIC);
4359 if (!cmd.va) { 4359 if (!cmd.va) {
4360 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 4360 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
4361 status = -ENOMEM; 4361 status = -ENOMEM;
@@ -4452,8 +4452,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
4452 4452
4453 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4453 memset(&cmd, 0, sizeof(struct be_dma_mem));
4454 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 4454 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
4455 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4455 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4456 GFP_ATOMIC); 4456 GFP_ATOMIC);
4457 if (!cmd.va) 4457 if (!cmd.va)
4458 return -ENOMEM; 4458 return -ENOMEM;
4459 4459
@@ -4539,8 +4539,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4539 4539
4540 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4540 memset(&cmd, 0, sizeof(struct be_dma_mem));
4541 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 4541 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
4542 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4542 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4543 GFP_ATOMIC); 4543 GFP_ATOMIC);
4544 if (!cmd.va) 4544 if (!cmd.va)
4545 return -ENOMEM; 4545 return -ENOMEM;
4546 4546
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 3f6749fc889f..4c218341c51b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -274,8 +274,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
274 int status = 0; 274 int status = 0;
275 275
276 read_cmd.size = LANCER_READ_FILE_CHUNK; 276 read_cmd.size = LANCER_READ_FILE_CHUNK;
277 read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size, 277 read_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, read_cmd.size,
278 &read_cmd.dma, GFP_ATOMIC); 278 &read_cmd.dma, GFP_ATOMIC);
279 279
280 if (!read_cmd.va) { 280 if (!read_cmd.va) {
281 dev_err(&adapter->pdev->dev, 281 dev_err(&adapter->pdev->dev,
@@ -815,7 +815,7 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
815 } 815 }
816 816
817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 817 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
818 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); 818 cmd.va = dma_alloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
819 if (!cmd.va) 819 if (!cmd.va)
820 return -ENOMEM; 820 return -ENOMEM;
821 821
@@ -851,9 +851,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
851 }; 851 };
852 852
853 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 853 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
854 ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 854 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
855 ddrdma_cmd.size, &ddrdma_cmd.dma, 855 ddrdma_cmd.size, &ddrdma_cmd.dma,
856 GFP_KERNEL); 856 GFP_KERNEL);
857 if (!ddrdma_cmd.va) 857 if (!ddrdma_cmd.va)
858 return -ENOMEM; 858 return -ENOMEM;
859 859
@@ -1014,9 +1014,9 @@ static int be_read_eeprom(struct net_device *netdev,
1014 1014
1015 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); 1015 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
1016 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); 1016 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
1017 eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, 1017 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
1018 eeprom_cmd.size, &eeprom_cmd.dma, 1018 eeprom_cmd.size, &eeprom_cmd.dma,
1019 GFP_KERNEL); 1019 GFP_KERNEL);
1020 1020
1021 if (!eeprom_cmd.va) 1021 if (!eeprom_cmd.va)
1022 return -ENOMEM; 1022 return -ENOMEM;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 852f5bfe5f6d..d5026909dec5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -167,8 +167,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
167 q->len = len; 167 q->len = len;
168 q->entry_size = entry_size; 168 q->entry_size = entry_size;
169 mem->size = len * entry_size; 169 mem->size = len * entry_size;
170 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 170 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
171 GFP_KERNEL); 171 &mem->dma, GFP_KERNEL);
172 if (!mem->va) 172 if (!mem->va)
173 return -ENOMEM; 173 return -ENOMEM;
174 return 0; 174 return 0;
@@ -5766,9 +5766,9 @@ static int be_drv_init(struct be_adapter *adapter)
5766 int status = 0; 5766 int status = 0;
5767 5767
5768 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 5768 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5769 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size, 5769 mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5770 &mbox_mem_alloc->dma, 5770 &mbox_mem_alloc->dma,
5771 GFP_KERNEL); 5771 GFP_KERNEL);
5772 if (!mbox_mem_alloc->va) 5772 if (!mbox_mem_alloc->va)
5773 return -ENOMEM; 5773 return -ENOMEM;
5774 5774
@@ -5777,8 +5777,8 @@ static int be_drv_init(struct be_adapter *adapter)
5777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 5777 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5778 5778
5779 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 5779 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5780 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, 5780 rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5781 &rx_filter->dma, GFP_KERNEL); 5781 &rx_filter->dma, GFP_KERNEL);
5782 if (!rx_filter->va) { 5782 if (!rx_filter->va) {
5783 status = -ENOMEM; 5783 status = -ENOMEM;
5784 goto free_mbox; 5784 goto free_mbox;
@@ -5792,8 +5792,8 @@ static int be_drv_init(struct be_adapter *adapter)
5792 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 5792 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5793 else 5793 else
5794 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2); 5794 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5795 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size, 5795 stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5796 &stats_cmd->dma, GFP_KERNEL); 5796 &stats_cmd->dma, GFP_KERNEL);
5797 if (!stats_cmd->va) { 5797 if (!stats_cmd->va) {
5798 status = -ENOMEM; 5798 status = -ENOMEM;
5799 goto free_rx_filter; 5799 goto free_rx_filter;
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 4d673225ed3e..3e5e97186fc4 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -935,16 +935,14 @@ static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
935 return -ENOMEM; 935 return -ENOMEM;
936 936
937 /* Allocate descriptors */ 937 /* Allocate descriptors */
938 priv->rxdes = dma_zalloc_coherent(priv->dev, 938 priv->rxdes = dma_alloc_coherent(priv->dev,
939 MAX_RX_QUEUE_ENTRIES * 939 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
940 sizeof(struct ftgmac100_rxdes), 940 &priv->rxdes_dma, GFP_KERNEL);
941 &priv->rxdes_dma, GFP_KERNEL);
942 if (!priv->rxdes) 941 if (!priv->rxdes)
943 return -ENOMEM; 942 return -ENOMEM;
944 priv->txdes = dma_zalloc_coherent(priv->dev, 943 priv->txdes = dma_alloc_coherent(priv->dev,
945 MAX_TX_QUEUE_ENTRIES * 944 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
946 sizeof(struct ftgmac100_txdes), 945 &priv->txdes_dma, GFP_KERNEL);
947 &priv->txdes_dma, GFP_KERNEL);
948 if (!priv->txdes) 946 if (!priv->txdes)
949 return -ENOMEM; 947 return -ENOMEM;
950 948
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 084f24daf2b5..2a0e820526dc 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -734,10 +734,9 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
734{ 734{
735 int i; 735 int i;
736 736
737 priv->descs = dma_zalloc_coherent(priv->dev, 737 priv->descs = dma_alloc_coherent(priv->dev,
738 sizeof(struct ftmac100_descs), 738 sizeof(struct ftmac100_descs),
739 &priv->descs_dma_addr, 739 &priv->descs_dma_addr, GFP_KERNEL);
740 GFP_KERNEL);
741 if (!priv->descs) 740 if (!priv->descs)
742 return -ENOMEM; 741 return -ENOMEM;
743 742
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f53090cde041..dfebc30c4841 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2051,6 +2051,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2051 bool nonlinear = skb_is_nonlinear(skb); 2051 bool nonlinear = skb_is_nonlinear(skb);
2052 struct rtnl_link_stats64 *percpu_stats; 2052 struct rtnl_link_stats64 *percpu_stats;
2053 struct dpaa_percpu_priv *percpu_priv; 2053 struct dpaa_percpu_priv *percpu_priv;
2054 struct netdev_queue *txq;
2054 struct dpaa_priv *priv; 2055 struct dpaa_priv *priv;
2055 struct qm_fd fd; 2056 struct qm_fd fd;
2056 int offset = 0; 2057 int offset = 0;
@@ -2100,6 +2101,11 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2100 if (unlikely(err < 0)) 2101 if (unlikely(err < 0))
2101 goto skb_to_fd_failed; 2102 goto skb_to_fd_failed;
2102 2103
2104 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2105
2106 /* LLTX requires to do our own update of trans_start */
2107 txq->trans_start = jiffies;
2108
2103 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 2109 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2104 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD); 2110 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2105 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2111 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index 809a155eb193..f6d244c663fd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -9,8 +9,9 @@ config FSL_DPAA2_ETH
9 9
10config FSL_DPAA2_PTP_CLOCK 10config FSL_DPAA2_PTP_CLOCK
11 tristate "Freescale DPAA2 PTP Clock" 11 tristate "Freescale DPAA2 PTP Clock"
12 depends on FSL_DPAA2_ETH && POSIX_TIMERS 12 depends on FSL_DPAA2_ETH
13 select PTP_1588_CLOCK 13 imply PTP_1588_CLOCK
14 default y
14 help 15 help
15 This driver adds support for using the DPAA2 1588 timer module 16 This driver adds support for using the DPAA2 1588 timer module
16 as a PTP clock. 17 as a PTP clock.
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ae0f88bce9aa..697c2427f2b7 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
2098#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2098#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2099 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2099 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2100 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2100 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2101static __u32 fec_enet_register_version = 2;
2101static u32 fec_enet_register_offset[] = { 2102static u32 fec_enet_register_offset[] = {
2102 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2103 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2103 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2104 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = {
2128 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2129 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2129}; 2130};
2130#else 2131#else
2132static __u32 fec_enet_register_version = 1;
2131static u32 fec_enet_register_offset[] = { 2133static u32 fec_enet_register_offset[] = {
2132 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2134 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2133 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2135 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
@@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
2149 u32 *buf = (u32 *)regbuf; 2151 u32 *buf = (u32 *)regbuf;
2150 u32 i, off; 2152 u32 i, off;
2151 2153
2154 regs->version = fec_enet_register_version;
2155
2152 memset(buf, 0, regs->len); 2156 memset(buf, 0, regs->len);
2153 2157
2154 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { 2158 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
@@ -3467,7 +3471,7 @@ fec_probe(struct platform_device *pdev)
3467 if (ret) 3471 if (ret)
3468 goto failed_clk_ipg; 3472 goto failed_clk_ipg;
3469 3473
3470 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3474 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
3471 if (!IS_ERR(fep->reg_phy)) { 3475 if (!IS_ERR(fep->reg_phy)) {
3472 ret = regulator_enable(fep->reg_phy); 3476 ret = regulator_enable(fep->reg_phy);
3473 if (ret) { 3477 if (ret) {
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index b90bab72efdb..c1968b3ecec8 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
369 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, 369 dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
370 DMA_TO_DEVICE); 370 DMA_TO_DEVICE);
371 371
372 dev_kfree_skb_irq(skb); 372 dev_consume_skb_irq(skb);
373 } 373 }
374 spin_unlock(&priv->lock); 374 spin_unlock(&priv->lock);
375 375
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c3d539e209ed..eb3e65e8868f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
1879 u16 i, j; 1879 u16 i, j;
1880 u8 __iomem *bd; 1880 u8 __iomem *bd;
1881 1881
1882 netdev_reset_queue(ugeth->ndev);
1883
1882 ug_info = ugeth->ug_info; 1884 ug_info = ugeth->ug_info;
1883 uf_info = &ug_info->uf_info; 1885 uf_info = &ug_info->uf_info;
1884 1886
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 471805ea363b..e5d853b7b454 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1006,8 +1006,8 @@ static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
1006 1006
1007 for (i = 0; i < QUEUE_NUMS; i++) { 1007 for (i = 0; i < QUEUE_NUMS; i++) {
1008 size = priv->pool[i].count * sizeof(struct hix5hd2_desc); 1008 size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
1009 virt_addr = dma_zalloc_coherent(dev, size, &phys_addr, 1009 virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
1010 GFP_KERNEL); 1010 GFP_KERNEL);
1011 if (virt_addr == NULL) 1011 if (virt_addr == NULL)
1012 goto error_free_pool; 1012 goto error_free_pool;
1013 1013
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ad1779fc410e..a78bfafd212c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
147 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); 147 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
148 int i; 148 int i;
149 149
150 vf_cb->mac_cb = NULL;
151
152 kfree(vf_cb);
153
154 for (i = 0; i < handle->q_num; i++) 150 for (i = 0; i < handle->q_num; i++)
155 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; 151 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
152
153 kfree(vf_cb);
156} 154}
157 155
158static int hns_ae_wait_flow_down(struct hnae_handle *handle) 156static int hns_ae_wait_flow_down(struct hnae_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 3b9e74be5fbd..b8155f5e71b4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
3081 dsaf_dev = dev_get_drvdata(&pdev->dev); 3081 dsaf_dev = dev_get_drvdata(&pdev->dev);
3082 if (!dsaf_dev) { 3082 if (!dsaf_dev) {
3083 dev_err(&pdev->dev, "dsaf_dev is NULL\n"); 3083 dev_err(&pdev->dev, "dsaf_dev is NULL\n");
3084 put_device(&pdev->dev);
3084 return -ENODEV; 3085 return -ENODEV;
3085 } 3086 }
3086 3087
@@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
3088 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { 3089 if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
3089 dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", 3090 dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
3090 dsaf_dev->ae_dev.name); 3091 dsaf_dev->ae_dev.name);
3092 put_device(&pdev->dev);
3091 return -ENODEV; 3093 return -ENODEV;
3092 } 3094 }
3093 3095
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 5748d3f722f6..60e7d7ae3787 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1170,6 +1170,13 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1170 if (!h->phy_dev) 1170 if (!h->phy_dev)
1171 return 0; 1171 return 0;
1172 1172
1173 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1174 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1175 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1176
1177 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1178 phy_dev->autoneg = false;
1179
1173 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { 1180 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1174 phy_dev->dev_flags = 0; 1181 phy_dev->dev_flags = 0;
1175 1182
@@ -1181,16 +1188,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1181 if (unlikely(ret)) 1188 if (unlikely(ret))
1182 return -ENODEV; 1189 return -ENODEV;
1183 1190
1184 ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1185 linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1186 linkmode_copy(phy_dev->advertising, phy_dev->supported);
1187
1188 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1189 phy_dev->autoneg = false;
1190
1191 if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
1192 phy_stop(phy_dev);
1193
1194 return 0; 1191 return 0;
1195} 1192}
1196 1193
@@ -2421,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2421out_notify_fail: 2418out_notify_fail:
2422 (void)cancel_work_sync(&priv->service_task); 2419 (void)cancel_work_sync(&priv->service_task);
2423out_read_prop_fail: 2420out_read_prop_fail:
2421 /* safe for ACPI FW */
2422 of_node_put(to_of_node(priv->fwnode));
2424 free_netdev(ndev); 2423 free_netdev(ndev);
2425 return ret; 2424 return ret;
2426} 2425}
@@ -2450,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
2450 set_bit(NIC_STATE_REMOVING, &priv->state); 2449 set_bit(NIC_STATE_REMOVING, &priv->state);
2451 (void)cancel_work_sync(&priv->service_task); 2450 (void)cancel_work_sync(&priv->service_task);
2452 2451
2452 /* safe for ACPI FW */
2453 of_node_put(to_of_node(priv->fwnode));
2454
2453 free_netdev(ndev); 2455 free_netdev(ndev);
2454 return 0; 2456 return 0;
2455} 2457}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 8e9b95871d30..ce15d2350db9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
1157 */ 1157 */
1158static int hns_nic_nway_reset(struct net_device *netdev) 1158static int hns_nic_nway_reset(struct net_device *netdev)
1159{ 1159{
1160 int ret = 0;
1161 struct phy_device *phy = netdev->phydev; 1160 struct phy_device *phy = netdev->phydev;
1162 1161
1163 if (netif_running(netdev)) { 1162 if (!netif_running(netdev))
1164 /* if autoneg is disabled, don't restart auto-negotiation */ 1163 return 0;
1165 if (phy && phy->autoneg == AUTONEG_ENABLE)
1166 ret = genphy_restart_aneg(phy);
1167 }
1168 1164
1169 return ret; 1165 if (!phy)
1166 return -EOPNOTSUPP;
1167
1168 if (phy->autoneg != AUTONEG_ENABLE)
1169 return -EINVAL;
1170
1171 return genphy_restart_aneg(phy);
1170} 1172}
1171 1173
1172static u32 1174static u32
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 07cd58798083..1bf7a5f116a0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2041,9 +2041,8 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2041{ 2041{
2042 int size = ring->desc_num * sizeof(ring->desc[0]); 2042 int size = ring->desc_num * sizeof(ring->desc[0]);
2043 2043
2044 ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size, 2044 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2045 &ring->desc_dma_addr, 2045 &ring->desc_dma_addr, GFP_KERNEL);
2046 GFP_KERNEL);
2047 if (!ring->desc) 2046 if (!ring->desc)
2048 return -ENOMEM; 2047 return -ENOMEM;
2049 2048
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 8af0cef5609b..e483a6e730e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -39,9 +39,8 @@ static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
39{ 39{
40 int size = ring->desc_num * sizeof(struct hclge_desc); 40 int size = ring->desc_num * sizeof(struct hclge_desc);
41 41
42 ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 42 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
43 size, &ring->desc_dma_addr, 43 &ring->desc_dma_addr, GFP_KERNEL);
44 GFP_KERNEL);
45 if (!ring->desc) 44 if (!ring->desc)
46 return -ENOMEM; 45 return -ENOMEM;
47 46
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index d5765c8cf3a3..4e78e8812a04 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -115,9 +115,8 @@ static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
115{ 115{
116 int size = ring->desc_num * sizeof(struct hclgevf_desc); 116 int size = ring->desc_num * sizeof(struct hclgevf_desc);
117 117
118 ring->desc = dma_zalloc_coherent(cmq_ring_to_dev(ring), 118 ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
119 size, &ring->desc_dma_addr, 119 &ring->desc_dma_addr, GFP_KERNEL);
120 GFP_KERNEL);
121 if (!ring->desc) 120 if (!ring->desc)
122 return -ENOMEM; 121 return -ENOMEM;
123 122
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 017e08452d8c..baf5cc251f32 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
321 } 321 }
322 322
323 hns_mdio_cmd_write(mdio_dev, is_c45, 323 hns_mdio_cmd_write(mdio_dev, is_c45,
324 MDIO_C45_WRITE_ADDR, phy_id, devad); 324 MDIO_C45_READ, phy_id, devad);
325 } 325 }
326 326
327 /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ 327 /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index c40603a183df..b4fefb4c3064 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -613,8 +613,8 @@ static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
613 u8 *cmd_vaddr; 613 u8 *cmd_vaddr;
614 int err = 0; 614 int err = 0;
615 615
616 cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, 616 cmd_vaddr = dma_alloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
617 &cmd_paddr, GFP_KERNEL); 617 &cmd_paddr, GFP_KERNEL);
618 if (!cmd_vaddr) { 618 if (!cmd_vaddr) {
619 dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); 619 dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
620 return -ENOMEM; 620 return -ENOMEM;
@@ -663,8 +663,8 @@ static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
663 dma_addr_t node_paddr; 663 dma_addr_t node_paddr;
664 int err; 664 int err;
665 665
666 node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, 666 node = dma_alloc_coherent(&pdev->dev, chain->cell_size, &node_paddr,
667 &node_paddr, GFP_KERNEL); 667 GFP_KERNEL);
668 if (!node) { 668 if (!node) {
669 dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); 669 dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
670 return -ENOMEM; 670 return -ENOMEM;
@@ -821,10 +821,10 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
821 if (!chain->cell_ctxt) 821 if (!chain->cell_ctxt)
822 return -ENOMEM; 822 return -ENOMEM;
823 823
824 chain->wb_status = dma_zalloc_coherent(&pdev->dev, 824 chain->wb_status = dma_alloc_coherent(&pdev->dev,
825 sizeof(*chain->wb_status), 825 sizeof(*chain->wb_status),
826 &chain->wb_status_paddr, 826 &chain->wb_status_paddr,
827 GFP_KERNEL); 827 GFP_KERNEL);
828 if (!chain->wb_status) { 828 if (!chain->wb_status) {
829 dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); 829 dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
830 return -ENOMEM; 830 return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index 7cb8b9b94726..683e67515016 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -593,10 +593,10 @@ static int alloc_eq_pages(struct hinic_eq *eq)
593 } 593 }
594 594
595 for (pg = 0; pg < eq->num_pages; pg++) { 595 for (pg = 0; pg < eq->num_pages; pg++) {
596 eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, 596 eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
597 eq->page_size, 597 eq->page_size,
598 &eq->dma_addr[pg], 598 &eq->dma_addr[pg],
599 GFP_KERNEL); 599 GFP_KERNEL);
600 if (!eq->virt_addr[pg]) { 600 if (!eq->virt_addr[pg]) {
601 err = -ENOMEM; 601 err = -ENOMEM;
602 goto err_dma_alloc; 602 goto err_dma_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 8e5897669a3a..a322a22d9357 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -355,9 +355,9 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
355 goto err_sq_db; 355 goto err_sq_db;
356 } 356 }
357 357
358 ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), 358 ci_addr_base = dma_alloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
359 &func_to_io->ci_dma_base, 359 &func_to_io->ci_dma_base,
360 GFP_KERNEL); 360 GFP_KERNEL);
361 if (!ci_addr_base) { 361 if (!ci_addr_base) {
362 dev_err(&pdev->dev, "Failed to allocate CI area\n"); 362 dev_err(&pdev->dev, "Failed to allocate CI area\n");
363 err = -ENOMEM; 363 err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index bbf9bdd0ee3e..d62cf509646a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -336,9 +336,9 @@ static int alloc_rq_cqe(struct hinic_rq *rq)
336 goto err_cqe_dma_arr_alloc; 336 goto err_cqe_dma_arr_alloc;
337 337
338 for (i = 0; i < wq->q_depth; i++) { 338 for (i = 0; i < wq->q_depth; i++) {
339 rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, 339 rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
340 sizeof(*rq->cqe[i]), 340 sizeof(*rq->cqe[i]),
341 &rq->cqe_dma[i], GFP_KERNEL); 341 &rq->cqe_dma[i], GFP_KERNEL);
342 if (!rq->cqe[i]) 342 if (!rq->cqe[i])
343 goto err_cqe_alloc; 343 goto err_cqe_alloc;
344 } 344 }
@@ -415,8 +415,8 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
415 415
416 /* HW requirements: Must be at least 32 bit */ 416 /* HW requirements: Must be at least 32 bit */
417 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); 417 pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
418 rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, 418 rq->pi_virt_addr = dma_alloc_coherent(&pdev->dev, pi_size,
419 &rq->pi_dma_addr, GFP_KERNEL); 419 &rq->pi_dma_addr, GFP_KERNEL);
420 if (!rq->pi_virt_addr) { 420 if (!rq->pi_virt_addr) {
421 dev_err(&pdev->dev, "Failed to allocate PI address\n"); 421 dev_err(&pdev->dev, "Failed to allocate PI address\n");
422 err = -ENOMEM; 422 err = -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 1dfa7eb05c10..cb66e7024659 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -114,8 +114,8 @@ static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
114 struct pci_dev *pdev = hwif->pdev; 114 struct pci_dev *pdev = hwif->pdev;
115 dma_addr_t dma_addr; 115 dma_addr_t dma_addr;
116 116
117 *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, 117 *vaddr = dma_alloc_coherent(&pdev->dev, page_sz, &dma_addr,
118 GFP_KERNEL); 118 GFP_KERNEL);
119 if (!*vaddr) { 119 if (!*vaddr) {
120 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); 120 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
121 return -ENOMEM; 121 return -ENOMEM;
@@ -482,8 +482,8 @@ static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
482 u64 *paddr = &wq->block_vaddr[i]; 482 u64 *paddr = &wq->block_vaddr[i];
483 dma_addr_t dma_addr; 483 dma_addr_t dma_addr;
484 484
485 *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, 485 *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size,
486 &dma_addr, GFP_KERNEL); 486 &dma_addr, GFP_KERNEL);
487 if (!*vaddr) { 487 if (!*vaddr) {
488 dev_err(&pdev->dev, "Failed to allocate wq page\n"); 488 dev_err(&pdev->dev, "Failed to allocate wq page\n");
489 goto err_alloc_wq_pages; 489 goto err_alloc_wq_pages;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index d719668a6684..92929750f832 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1310,7 +1310,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1310 dev->stats.tx_aborted_errors++; 1310 dev->stats.tx_aborted_errors++;
1311 } 1311 }
1312 1312
1313 dev_kfree_skb_irq(skb); 1313 dev_consume_skb_irq(skb);
1314 1314
1315 tx_cmd->cmd.command = 0; /* Mark free */ 1315 tx_cmd->cmd.command = 0; /* Mark free */
1316 break; 1316 break;
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fff09dcf9e34..787d5aca5278 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -636,8 +636,8 @@ static int mal_probe(struct platform_device *ofdev)
636 bd_size = sizeof(struct mal_descriptor) * 636 bd_size = sizeof(struct mal_descriptor) *
637 (NUM_TX_BUFF * mal->num_tx_chans + 637 (NUM_TX_BUFF * mal->num_tx_chans +
638 NUM_RX_BUFF * mal->num_rx_chans); 638 NUM_RX_BUFF * mal->num_rx_chans);
639 mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 639 mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
640 GFP_KERNEL); 640 GFP_KERNEL);
641 if (mal->bd_virt == NULL) { 641 if (mal->bd_virt == NULL) {
642 err = -ENOMEM; 642 err = -ENOMEM;
643 goto fail_unmap; 643 goto fail_unmap;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 098d8764c0ea..dd71d5db7274 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1313,7 +1313,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1313 unsigned long lpar_rc; 1313 unsigned long lpar_rc;
1314 u16 mss = 0; 1314 u16 mss = 0;
1315 1315
1316restart_poll:
1317 while (frames_processed < budget) { 1316 while (frames_processed < budget) {
1318 if (!ibmveth_rxq_pending_buffer(adapter)) 1317 if (!ibmveth_rxq_pending_buffer(adapter))
1319 break; 1318 break;
@@ -1401,7 +1400,6 @@ restart_poll:
1401 napi_reschedule(napi)) { 1400 napi_reschedule(napi)) {
1402 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1401 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1403 VIO_IRQ_DISABLE); 1402 VIO_IRQ_DISABLE);
1404 goto restart_poll;
1405 } 1403 }
1406 } 1404 }
1407 1405
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 31fb76ee9d82..a1246e89aad4 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -159,7 +159,7 @@ config IXGBE
159 tristate "Intel(R) 10GbE PCI Express adapters support" 159 tristate "Intel(R) 10GbE PCI Express adapters support"
160 depends on PCI 160 depends on PCI
161 select MDIO 161 select MDIO
162 select MDIO_DEVICE 162 select PHYLIB
163 imply PTP_1588_CLOCK 163 imply PTP_1588_CLOCK
164 ---help--- 164 ---help---
165 This driver supports Intel(R) 10GbE PCI Express family of 165 This driver supports Intel(R) 10GbE PCI Express family of
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2569a168334c..a41008523c98 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -993,8 +993,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
993 993
994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 994 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
995 txdr->size = ALIGN(txdr->size, 4096); 995 txdr->size = ALIGN(txdr->size, 4096);
996 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 996 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
997 GFP_KERNEL); 997 GFP_KERNEL);
998 if (!txdr->desc) { 998 if (!txdr->desc) {
999 ret_val = 2; 999 ret_val = 2;
1000 goto err_nomem; 1000 goto err_nomem;
@@ -1051,8 +1051,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1051 } 1051 }
1052 1052
1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1053 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1054 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1054 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1055 GFP_KERNEL); 1055 GFP_KERNEL);
1056 if (!rxdr->desc) { 1056 if (!rxdr->desc) {
1057 ret_val = 6; 1057 ret_val = 6;
1058 goto err_nomem; 1058 goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 308c006cb41d..189f231075c2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2305,8 +2305,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2305{ 2305{
2306 struct pci_dev *pdev = adapter->pdev; 2306 struct pci_dev *pdev = adapter->pdev;
2307 2307
2308 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, 2308 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2309 GFP_KERNEL); 2309 GFP_KERNEL);
2310 if (!ring->desc) 2310 if (!ring->desc)
2311 return -ENOMEM; 2311 return -ENOMEM;
2312 2312
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4d40878e395a..f52e2c46e6a7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -109,8 +109,8 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back; 109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
110 110
111 mem->size = ALIGN(size, alignment); 111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, 112 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
113 &mem->pa, GFP_KERNEL); 113 GFP_KERNEL);
114 if (!mem->va) 114 if (!mem->va)
115 return -ENOMEM; 115 return -ENOMEM;
116 116
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index fe1592ae8769..ca54e268d157 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -515,7 +515,7 @@ struct igb_adapter {
515 /* OS defined structs */ 515 /* OS defined structs */
516 struct pci_dev *pdev; 516 struct pci_dev *pdev;
517 517
518 struct mutex stats64_lock; 518 spinlock_t stats64_lock;
519 struct rtnl_link_stats64 stats64; 519 struct rtnl_link_stats64 stats64;
520 520
521 /* structs defined in e1000_hw.h */ 521 /* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7426060b678f..c57671068245 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2295 int i, j; 2295 int i, j;
2296 char *p; 2296 char *p;
2297 2297
2298 mutex_lock(&adapter->stats64_lock); 2298 spin_lock(&adapter->stats64_lock);
2299 igb_update_stats(adapter); 2299 igb_update_stats(adapter);
2300 2300
2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2301 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); 2338 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
2339 i += IGB_RX_QUEUE_STATS_LEN; 2339 i += IGB_RX_QUEUE_STATS_LEN;
2340 } 2340 }
2341 mutex_unlock(&adapter->stats64_lock); 2341 spin_unlock(&adapter->stats64_lock);
2342} 2342}
2343 2343
2344static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2344static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 87bdf1604ae2..7137e7f9c7f3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2203,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter)
2203 del_timer_sync(&adapter->phy_info_timer); 2203 del_timer_sync(&adapter->phy_info_timer);
2204 2204
2205 /* record the stats before reset*/ 2205 /* record the stats before reset*/
2206 mutex_lock(&adapter->stats64_lock); 2206 spin_lock(&adapter->stats64_lock);
2207 igb_update_stats(adapter); 2207 igb_update_stats(adapter);
2208 mutex_unlock(&adapter->stats64_lock); 2208 spin_unlock(&adapter->stats64_lock);
2209 2209
2210 adapter->link_speed = 0; 2210 adapter->link_speed = 0;
2211 adapter->link_duplex = 0; 2211 adapter->link_duplex = 0;
@@ -3840,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 3840 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3841 3841
3842 spin_lock_init(&adapter->nfc_lock); 3842 spin_lock_init(&adapter->nfc_lock);
3843 mutex_init(&adapter->stats64_lock); 3843 spin_lock_init(&adapter->stats64_lock);
3844#ifdef CONFIG_PCI_IOV 3844#ifdef CONFIG_PCI_IOV
3845 switch (hw->mac.type) { 3845 switch (hw->mac.type) {
3846 case e1000_82576: 3846 case e1000_82576:
@@ -5406,9 +5406,9 @@ no_wait:
5406 } 5406 }
5407 } 5407 }
5408 5408
5409 mutex_lock(&adapter->stats64_lock); 5409 spin_lock(&adapter->stats64_lock);
5410 igb_update_stats(adapter); 5410 igb_update_stats(adapter);
5411 mutex_unlock(&adapter->stats64_lock); 5411 spin_unlock(&adapter->stats64_lock);
5412 5412
5413 for (i = 0; i < adapter->num_tx_queues; i++) { 5413 for (i = 0; i < adapter->num_tx_queues; i++) {
5414 struct igb_ring *tx_ring = adapter->tx_ring[i]; 5414 struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -6235,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev,
6235{ 6235{
6236 struct igb_adapter *adapter = netdev_priv(netdev); 6236 struct igb_adapter *adapter = netdev_priv(netdev);
6237 6237
6238 mutex_lock(&adapter->stats64_lock); 6238 spin_lock(&adapter->stats64_lock);
6239 igb_update_stats(adapter); 6239 igb_update_stats(adapter);
6240 memcpy(stats, &adapter->stats64, sizeof(*stats)); 6240 memcpy(stats, &adapter->stats64, sizeof(*stats));
6241 mutex_unlock(&adapter->stats64_lock); 6241 spin_unlock(&adapter->stats64_lock);
6242} 6242}
6243 6243
6244/** 6244/**
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1d4d1686909a..e5ac2d3fd816 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -680,8 +680,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 680 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
681 txdr->size = ALIGN(txdr->size, 4096); 681 txdr->size = ALIGN(txdr->size, 4096);
682 682
683 txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 683 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
684 GFP_KERNEL); 684 GFP_KERNEL);
685 if (!txdr->desc) { 685 if (!txdr->desc) {
686 vfree(txdr->buffer_info); 686 vfree(txdr->buffer_info);
687 return -ENOMEM; 687 return -ENOMEM;
@@ -763,8 +763,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
764 rxdr->size = ALIGN(rxdr->size, 4096); 764 rxdr->size = ALIGN(rxdr->size, 4096);
765 765
766 rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 766 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
767 GFP_KERNEL); 767 GFP_KERNEL);
768 768
769 if (!rxdr->desc) { 769 if (!rxdr->desc) {
770 vfree(rxdr->buffer_info); 770 vfree(rxdr->buffer_info);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e0875476a780..16066c2d5b3a 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2044,9 +2044,9 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2044 u32 txq_dma; 2044 u32 txq_dma;
2045 2045
2046 /* Allocate memory for TX descriptors */ 2046 /* Allocate memory for TX descriptors */
2047 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, 2047 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2048 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, 2048 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2049 &aggr_txq->descs_dma, GFP_KERNEL); 2049 &aggr_txq->descs_dma, GFP_KERNEL);
2050 if (!aggr_txq->descs) 2050 if (!aggr_txq->descs)
2051 return -ENOMEM; 2051 return -ENOMEM;
2052 2052
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 742f0c1f60df..6d55e3d0b7ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -825,7 +825,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
825 if (!cgx->cgx_cmd_workq) { 825 if (!cgx->cgx_cmd_workq) {
826 dev_err(dev, "alloc workqueue failed for cgx cmd"); 826 dev_err(dev, "alloc workqueue failed for cgx cmd");
827 err = -ENOMEM; 827 err = -ENOMEM;
828 goto err_release_regions; 828 goto err_free_irq_vectors;
829 } 829 }
830 830
831 list_add(&cgx->cgx_list, &cgx_list); 831 list_add(&cgx->cgx_list, &cgx_list);
@@ -841,6 +841,8 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
841err_release_lmac: 841err_release_lmac:
842 cgx_lmac_exit(cgx); 842 cgx_lmac_exit(cgx);
843 list_del(&cgx->cgx_list); 843 list_del(&cgx->cgx_list);
844err_free_irq_vectors:
845 pci_free_irq_vectors(pdev);
844err_release_regions: 846err_release_regions:
845 pci_release_regions(pdev); 847 pci_release_regions(pdev);
846err_disable_device: 848err_disable_device:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index ec50a21c5aaf..e332e82fc066 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -64,7 +64,7 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q,
64 64
65 qmem->entry_sz = entry_sz; 65 qmem->entry_sz = entry_sz;
66 qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; 66 qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN;
67 qmem->base = dma_zalloc_coherent(dev, qmem->alloc_sz, 67 qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz,
68 &qmem->iova, GFP_KERNEL); 68 &qmem->iova, GFP_KERNEL);
69 if (!qmem->base) 69 if (!qmem->base)
70 return -ENOMEM; 70 return -ENOMEM;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 0bd4351b2a49..f8a6d6e3cb7a 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -557,9 +557,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
557 * table is full. 557 * table is full.
558 */ 558 */
559 if (!pep->htpr) { 559 if (!pep->htpr) {
560 pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent, 560 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
561 HASH_ADDR_TABLE_SIZE, 561 HASH_ADDR_TABLE_SIZE,
562 &pep->htpr_dma, GFP_KERNEL); 562 &pep->htpr_dma, GFP_KERNEL);
563 if (!pep->htpr) 563 if (!pep->htpr)
564 return -ENOMEM; 564 return -ENOMEM;
565 } else { 565 } else {
@@ -1044,9 +1044,9 @@ static int rxq_init(struct net_device *dev)
1044 pep->rx_desc_count = 0; 1044 pep->rx_desc_count = 0;
1045 size = pep->rx_ring_size * sizeof(struct rx_desc); 1045 size = pep->rx_ring_size * sizeof(struct rx_desc);
1046 pep->rx_desc_area_size = size; 1046 pep->rx_desc_area_size = size;
1047 pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1047 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1048 &pep->rx_desc_dma, 1048 &pep->rx_desc_dma,
1049 GFP_KERNEL); 1049 GFP_KERNEL);
1050 if (!pep->p_rx_desc_area) 1050 if (!pep->p_rx_desc_area)
1051 goto out; 1051 goto out;
1052 1052
@@ -1103,9 +1103,9 @@ static int txq_init(struct net_device *dev)
1103 pep->tx_desc_count = 0; 1103 pep->tx_desc_count = 0;
1104 size = pep->tx_ring_size * sizeof(struct tx_desc); 1104 size = pep->tx_ring_size * sizeof(struct tx_desc);
1105 pep->tx_desc_area_size = size; 1105 pep->tx_desc_area_size = size;
1106 pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size, 1106 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1107 &pep->tx_desc_dma, 1107 &pep->tx_desc_dma,
1108 GFP_KERNEL); 1108 GFP_KERNEL);
1109 if (!pep->p_tx_desc_area) 1109 if (!pep->p_tx_desc_area)
1110 goto out; 1110 goto out;
1111 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1111 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 04fd1f135011..654ac534b10e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
152 memset(p, 0, regs->len); 152 memset(p, 0, regs->len);
153 memcpy_fromio(p, io, B3_RAM_ADDR); 153 memcpy_fromio(p, io, B3_RAM_ADDR);
154 154
155 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 155 if (regs->len > B3_RI_WTO_R1) {
156 regs->len - B3_RI_WTO_R1); 156 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
157 regs->len - B3_RI_WTO_R1);
158 }
157} 159}
158 160
159/* Wake on Lan only supported on Yukon chips with rev 1 or above */ 161/* Wake on Lan only supported on Yukon chips with rev 1 or above */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 399f565dd85a..49f926b7a91c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -258,11 +258,6 @@ static void mtk_phy_link_adjust(struct net_device *dev)
258 258
259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); 259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
260 260
261 if (dev->phydev->link)
262 netif_carrier_on(dev);
263 else
264 netif_carrier_off(dev);
265
266 if (!of_phy_is_fixed_link(mac->of_node)) 261 if (!of_phy_is_fixed_link(mac->of_node))
267 phy_print_status(dev->phydev); 262 phy_print_status(dev->phydev);
268} 263}
@@ -347,17 +342,6 @@ static int mtk_phy_connect(struct net_device *dev)
347 if (mtk_phy_connect_node(eth, mac, np)) 342 if (mtk_phy_connect_node(eth, mac, np))
348 goto err_phy; 343 goto err_phy;
349 344
350 dev->phydev->autoneg = AUTONEG_ENABLE;
351 dev->phydev->speed = 0;
352 dev->phydev->duplex = 0;
353
354 phy_set_max_speed(dev->phydev, SPEED_1000);
355 phy_support_asym_pause(dev->phydev);
356 linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
357 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
358 dev->phydev->advertising);
359 phy_start_aneg(dev->phydev);
360
361 of_node_put(np); 345 of_node_put(np);
362 346
363 return 0; 347 return 0;
@@ -598,10 +582,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
598 dma_addr_t dma_addr; 582 dma_addr_t dma_addr;
599 int i; 583 int i;
600 584
601 eth->scratch_ring = dma_zalloc_coherent(eth->dev, 585 eth->scratch_ring = dma_alloc_coherent(eth->dev,
602 cnt * sizeof(struct mtk_tx_dma), 586 cnt * sizeof(struct mtk_tx_dma),
603 &eth->phy_scratch_ring, 587 &eth->phy_scratch_ring,
604 GFP_ATOMIC); 588 GFP_ATOMIC);
605 if (unlikely(!eth->scratch_ring)) 589 if (unlikely(!eth->scratch_ring))
606 return -ENOMEM; 590 return -ENOMEM;
607 591
@@ -1213,8 +1197,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
1213 if (!ring->buf) 1197 if (!ring->buf)
1214 goto no_tx_mem; 1198 goto no_tx_mem;
1215 1199
1216 ring->dma = dma_zalloc_coherent(eth->dev, MTK_DMA_SIZE * sz, 1200 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1217 &ring->phys, GFP_ATOMIC); 1201 &ring->phys, GFP_ATOMIC);
1218 if (!ring->dma) 1202 if (!ring->dma)
1219 goto no_tx_mem; 1203 goto no_tx_mem;
1220 1204
@@ -1310,9 +1294,9 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1310 return -ENOMEM; 1294 return -ENOMEM;
1311 } 1295 }
1312 1296
1313 ring->dma = dma_zalloc_coherent(eth->dev, 1297 ring->dma = dma_alloc_coherent(eth->dev,
1314 rx_dma_size * sizeof(*ring->dma), 1298 rx_dma_size * sizeof(*ring->dma),
1315 &ring->phys, GFP_ATOMIC); 1299 &ring->phys, GFP_ATOMIC);
1316 if (!ring->dma) 1300 if (!ring->dma)
1317 return -ENOMEM; 1301 return -ENOMEM;
1318 1302
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 9af34e03892c..dbc483e4a2ef 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -584,8 +584,8 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
584 buf->npages = 1; 584 buf->npages = 1;
585 buf->page_shift = get_order(size) + PAGE_SHIFT; 585 buf->page_shift = get_order(size) + PAGE_SHIFT;
586 buf->direct.buf = 586 buf->direct.buf =
587 dma_zalloc_coherent(&dev->persist->pdev->dev, 587 dma_alloc_coherent(&dev->persist->pdev->dev, size, &t,
588 size, &t, GFP_KERNEL); 588 GFP_KERNEL);
589 if (!buf->direct.buf) 589 if (!buf->direct.buf)
590 return -ENOMEM; 590 return -ENOMEM;
591 591
@@ -624,8 +624,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
624 624
625 for (i = 0; i < buf->nbufs; ++i) { 625 for (i = 0; i < buf->nbufs; ++i) {
626 buf->page_list[i].buf = 626 buf->page_list[i].buf =
627 dma_zalloc_coherent(&dev->persist->pdev->dev, 627 dma_alloc_coherent(&dev->persist->pdev->dev,
628 PAGE_SIZE, &t, GFP_KERNEL); 628 PAGE_SIZE, &t, GFP_KERNEL);
629 if (!buf->page_list[i].buf) 629 if (!buf->page_list[i].buf)
630 goto err_free; 630 goto err_free;
631 631
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index db909b6069b5..65f8a4b6ed0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -306,14 +306,16 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
306 306
307 if (entries_per_copy < entries) { 307 if (entries_per_copy < entries) {
308 for (i = 0; i < entries / entries_per_copy; i++) { 308 for (i = 0; i < entries / entries_per_copy; i++) {
309 err = copy_to_user(buf, init_ents, PAGE_SIZE); 309 err = copy_to_user((void __user *)buf, init_ents, PAGE_SIZE) ?
310 -EFAULT : 0;
310 if (err) 311 if (err)
311 goto out; 312 goto out;
312 313
313 buf += PAGE_SIZE; 314 buf += PAGE_SIZE;
314 } 315 }
315 } else { 316 } else {
316 err = copy_to_user(buf, init_ents, entries * cqe_size); 317 err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
318 -EFAULT : 0;
317 } 319 }
318 320
319out: 321out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9a0881cb7f51..6c01314e87b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
617} 617}
618#endif 618#endif
619 619
620#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
621
620/* We reach this function only after checking that any of 622/* We reach this function only after checking that any of
621 * the (IPv4 | IPv6) bits are set in cqe->status. 623 * the (IPv4 | IPv6) bits are set in cqe->status.
622 */ 624 */
@@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
624 netdev_features_t dev_features) 626 netdev_features_t dev_features)
625{ 627{
626 __wsum hw_checksum = 0; 628 __wsum hw_checksum = 0;
629 void *hdr;
630
631 /* CQE csum doesn't cover padding octets in short ethernet
632 * frames. And the pad field is appended prior to calculating
633 * and appending the FCS field.
634 *
635 * Detecting these padded frames requires to verify and parse
636 * IP headers, so we simply force all those small frames to skip
637 * checksum complete.
638 */
639 if (short_frame(skb->len))
640 return -EINVAL;
627 641
628 void *hdr = (u8 *)va + sizeof(struct ethhdr); 642 hdr = (u8 *)va + sizeof(struct ethhdr);
629
630 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); 643 hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
631 644
632 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && 645 if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
@@ -819,6 +832,11 @@ xdp_drop_no_cnt:
819 skb_record_rx_queue(skb, cq_ring); 832 skb_record_rx_queue(skb, cq_ring);
820 833
821 if (likely(dev->features & NETIF_F_RXCSUM)) { 834 if (likely(dev->features & NETIF_F_RXCSUM)) {
835 /* TODO: For IP non TCP/UDP packets when csum complete is
836 * not an option (not supported or any other reason) we can
837 * actually check cqe IPOK status bit and report
838 * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
839 */
822 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | 840 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
823 MLX4_CQE_STATUS_UDP)) && 841 MLX4_CQE_STATUS_UDP)) &&
824 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 842 (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7df728f1e5b5..6e501af0e532 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -2067,9 +2067,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2067{ 2067{
2068 struct mlx4_cmd_mailbox *mailbox; 2068 struct mlx4_cmd_mailbox *mailbox;
2069 __be32 *outbox; 2069 __be32 *outbox;
2070 u64 qword_field;
2070 u32 dword_field; 2071 u32 dword_field;
2071 int err; 2072 u16 word_field;
2072 u8 byte_field; 2073 u8 byte_field;
2074 int err;
2073 static const u8 a0_dmfs_query_hw_steering[] = { 2075 static const u8 a0_dmfs_query_hw_steering[] = {
2074 [0] = MLX4_STEERING_DMFS_A0_DEFAULT, 2076 [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
2075 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC, 2077 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -2097,19 +2099,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2097 2099
2098 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 2100 /* QPC/EEC/CQC/EQC/RDMARC attributes */
2099 2101
2100 MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET); 2102 MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
2101 MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET); 2103 param->qpc_base = qword_field & ~((u64)0x1f);
2102 MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET); 2104 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
2103 MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET); 2105 param->log_num_qps = byte_field & 0x1f;
2104 MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET); 2106 MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
2105 MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET); 2107 param->srqc_base = qword_field & ~((u64)0x1f);
2106 MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET); 2108 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
2107 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 2109 param->log_num_srqs = byte_field & 0x1f;
2108 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 2110 MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
2109 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 2111 param->cqc_base = qword_field & ~((u64)0x1f);
2110 MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 2112 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
2111 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 2113 param->log_num_cqs = byte_field & 0x1f;
2112 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 2114 MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
2115 param->altc_base = qword_field;
2116 MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
2117 param->auxc_base = qword_field;
2118 MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
2119 param->eqc_base = qword_field & ~((u64)0x1f);
2120 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
2121 param->log_num_eqs = byte_field & 0x1f;
2122 MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
2123 param->num_sys_eqs = word_field & 0xfff;
2124 MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
2125 param->rdmarc_base = qword_field & ~((u64)0x1f);
2126 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
2127 param->log_rd_per_qp = byte_field & 0x7;
2113 2128
2114 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 2129 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
2115 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 2130 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -2128,22 +2143,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2128 /* steering attributes */ 2143 /* steering attributes */
2129 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 2144 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2130 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 2145 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
2131 MLX4_GET(param->log_mc_entry_sz, outbox, 2146 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
2132 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); 2147 param->log_mc_entry_sz = byte_field & 0x1f;
2133 MLX4_GET(param->log_mc_table_sz, outbox, 2148 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
2134 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); 2149 param->log_mc_table_sz = byte_field & 0x1f;
2135 MLX4_GET(byte_field, outbox, 2150 MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
2136 INIT_HCA_FS_A0_OFFSET);
2137 param->dmfs_high_steer_mode = 2151 param->dmfs_high_steer_mode =
2138 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3]; 2152 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
2139 } else { 2153 } else {
2140 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); 2154 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
2141 MLX4_GET(param->log_mc_entry_sz, outbox, 2155 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
2142 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 2156 param->log_mc_entry_sz = byte_field & 0x1f;
2143 MLX4_GET(param->log_mc_hash_sz, outbox, 2157 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
2144 INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 2158 param->log_mc_hash_sz = byte_field & 0x1f;
2145 MLX4_GET(param->log_mc_table_sz, outbox, 2159 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
2146 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 2160 param->log_mc_table_sz = byte_field & 0x1f;
2147 } 2161 }
2148 2162
2149 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ 2163 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2167,15 +2181,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
2167 /* TPT attributes */ 2181 /* TPT attributes */
2168 2182
2169 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); 2183 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
2170 MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET); 2184 MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
2171 MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET); 2185 param->mw_enabled = byte_field >> 7;
2186 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
2187 param->log_mpt_sz = byte_field & 0x3f;
2172 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET); 2188 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
2173 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET); 2189 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
2174 2190
2175 /* UAR attributes */ 2191 /* UAR attributes */
2176 2192
2177 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET); 2193 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
2178 MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET); 2194 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
2195 param->log_uar_sz = byte_field & 0xf;
2179 2196
2180 /* phv_check enable */ 2197 /* phv_check enable */
2181 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET); 2198 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 4b4351141b94..d89a3da89e5a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
57 int i; 57 int i;
58 58
59 if (chunk->nsg > 0) 59 if (chunk->nsg > 0)
60 pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages, 60 dma_unmap_sg(&dev->persist->pdev->dev, chunk->sg, chunk->npages,
61 PCI_DMA_BIDIRECTIONAL); 61 DMA_BIDIRECTIONAL);
62 62
63 for (i = 0; i < chunk->npages; ++i) 63 for (i = 0; i < chunk->npages; ++i)
64 __free_pages(sg_page(&chunk->mem[i]), 64 __free_pages(sg_page(&chunk->sg[i]),
65 get_order(chunk->mem[i].length)); 65 get_order(chunk->sg[i].length));
66} 66}
67 67
68static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) 68static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
71 71
72 for (i = 0; i < chunk->npages; ++i) 72 for (i = 0; i < chunk->npages; ++i)
73 dma_free_coherent(&dev->persist->pdev->dev, 73 dma_free_coherent(&dev->persist->pdev->dev,
74 chunk->mem[i].length, 74 chunk->buf[i].size,
75 lowmem_page_address(sg_page(&chunk->mem[i])), 75 chunk->buf[i].addr,
76 sg_dma_address(&chunk->mem[i])); 76 chunk->buf[i].dma_addr);
77} 77}
78 78
79void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) 79void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
111 return 0; 111 return 0;
112} 112}
113 113
114static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem, 114static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
115 int order, gfp_t gfp_mask) 115 int order, gfp_t gfp_mask)
116{ 116{
117 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, 117 buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
118 &sg_dma_address(mem), gfp_mask); 118 &buf->dma_addr, gfp_mask);
119 if (!buf) 119 if (!buf->addr)
120 return -ENOMEM; 120 return -ENOMEM;
121 121
122 if (offset_in_page(buf)) { 122 if (offset_in_page(buf->addr)) {
123 dma_free_coherent(dev, PAGE_SIZE << order, 123 dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
124 buf, sg_dma_address(mem)); 124 buf->dma_addr);
125 return -ENOMEM; 125 return -ENOMEM;
126 } 126 }
127 127
128 sg_set_buf(mem, buf, PAGE_SIZE << order); 128 buf->size = PAGE_SIZE << order;
129 sg_dma_len(mem) = PAGE_SIZE << order;
130 return 0; 129 return 0;
131} 130}
132 131
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
159 158
160 while (npages > 0) { 159 while (npages > 0) {
161 if (!chunk) { 160 if (!chunk) {
162 chunk = kmalloc_node(sizeof(*chunk), 161 chunk = kzalloc_node(sizeof(*chunk),
163 gfp_mask & ~(__GFP_HIGHMEM | 162 gfp_mask & ~(__GFP_HIGHMEM |
164 __GFP_NOWARN), 163 __GFP_NOWARN),
165 dev->numa_node); 164 dev->numa_node);
166 if (!chunk) { 165 if (!chunk) {
167 chunk = kmalloc(sizeof(*chunk), 166 chunk = kzalloc(sizeof(*chunk),
168 gfp_mask & ~(__GFP_HIGHMEM | 167 gfp_mask & ~(__GFP_HIGHMEM |
169 __GFP_NOWARN)); 168 __GFP_NOWARN));
170 if (!chunk) 169 if (!chunk)
171 goto fail; 170 goto fail;
172 } 171 }
172 chunk->coherent = coherent;
173 173
174 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); 174 if (!coherent)
175 chunk->npages = 0; 175 sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
176 chunk->nsg = 0;
177 list_add_tail(&chunk->list, &icm->chunk_list); 176 list_add_tail(&chunk->list, &icm->chunk_list);
178 } 177 }
179 178
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
186 185
187 if (coherent) 186 if (coherent)
188 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, 187 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
189 &chunk->mem[chunk->npages], 188 &chunk->buf[chunk->npages],
190 cur_order, mask); 189 cur_order, mask);
191 else 190 else
192 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], 191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
193 cur_order, mask, 192 cur_order, mask,
194 dev->numa_node); 193 dev->numa_node);
195 194
@@ -205,9 +204,9 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
205 if (coherent) 204 if (coherent)
206 ++chunk->nsg; 205 ++chunk->nsg;
207 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { 206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
208 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 207 chunk->nsg = dma_map_sg(&dev->persist->pdev->dev,
209 chunk->npages, 208 chunk->sg, chunk->npages,
210 PCI_DMA_BIDIRECTIONAL); 209 DMA_BIDIRECTIONAL);
211 210
212 if (chunk->nsg <= 0) 211 if (chunk->nsg <= 0)
213 goto fail; 212 goto fail;
@@ -220,9 +219,8 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
220 } 219 }
221 220
222 if (!coherent && chunk) { 221 if (!coherent && chunk) {
223 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem, 222 chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
224 chunk->npages, 223 chunk->npages, DMA_BIDIRECTIONAL);
225 PCI_DMA_BIDIRECTIONAL);
226 224
227 if (chunk->nsg <= 0) 225 if (chunk->nsg <= 0)
228 goto fail; 226 goto fail;
@@ -320,7 +318,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
320 u64 idx; 318 u64 idx;
321 struct mlx4_icm_chunk *chunk; 319 struct mlx4_icm_chunk *chunk;
322 struct mlx4_icm *icm; 320 struct mlx4_icm *icm;
323 struct page *page = NULL; 321 void *addr = NULL;
324 322
325 if (!table->lowmem) 323 if (!table->lowmem)
326 return NULL; 324 return NULL;
@@ -336,28 +334,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
336 334
337 list_for_each_entry(chunk, &icm->chunk_list, list) { 335 list_for_each_entry(chunk, &icm->chunk_list, list) {
338 for (i = 0; i < chunk->npages; ++i) { 336 for (i = 0; i < chunk->npages; ++i) {
337 dma_addr_t dma_addr;
338 size_t len;
339
340 if (table->coherent) {
341 len = chunk->buf[i].size;
342 dma_addr = chunk->buf[i].dma_addr;
343 addr = chunk->buf[i].addr;
344 } else {
345 struct page *page;
346
347 len = sg_dma_len(&chunk->sg[i]);
348 dma_addr = sg_dma_address(&chunk->sg[i]);
349
350 /* XXX: we should never do this for highmem
351 * allocation. This function either needs
352 * to be split, or the kernel virtual address
353 * return needs to be made optional.
354 */
355 page = sg_page(&chunk->sg[i]);
356 addr = lowmem_page_address(page);
357 }
358
339 if (dma_handle && dma_offset >= 0) { 359 if (dma_handle && dma_offset >= 0) {
340 if (sg_dma_len(&chunk->mem[i]) > dma_offset) 360 if (len > dma_offset)
341 *dma_handle = sg_dma_address(&chunk->mem[i]) + 361 *dma_handle = dma_addr + dma_offset;
342 dma_offset; 362 dma_offset -= len;
343 dma_offset -= sg_dma_len(&chunk->mem[i]);
344 } 363 }
364
345 /* 365 /*
346 * DMA mapping can merge pages but not split them, 366 * DMA mapping can merge pages but not split them,
347 * so if we found the page, dma_handle has already 367 * so if we found the page, dma_handle has already
348 * been assigned to. 368 * been assigned to.
349 */ 369 */
350 if (chunk->mem[i].length > offset) { 370 if (len > offset)
351 page = sg_page(&chunk->mem[i]);
352 goto out; 371 goto out;
353 } 372 offset -= len;
354 offset -= chunk->mem[i].length;
355 } 373 }
356 } 374 }
357 375
376 addr = NULL;
358out: 377out:
359 mutex_unlock(&table->mutex); 378 mutex_unlock(&table->mutex);
360 return page ? lowmem_page_address(page) + offset : NULL; 379 return addr ? addr + offset : NULL;
361} 380}
362 381
363int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 382int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index c9169a490557..d199874b1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -47,11 +47,21 @@ enum {
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT, 47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
48}; 48};
49 49
50struct mlx4_icm_buf {
51 void *addr;
52 size_t size;
53 dma_addr_t dma_addr;
54};
55
50struct mlx4_icm_chunk { 56struct mlx4_icm_chunk {
51 struct list_head list; 57 struct list_head list;
52 int npages; 58 int npages;
53 int nsg; 59 int nsg;
54 struct scatterlist mem[MLX4_ICM_CHUNK_LEN]; 60 bool coherent;
61 union {
62 struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
63 struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
64 };
55}; 65};
56 66
57struct mlx4_icm { 67struct mlx4_icm {
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
114 124
115static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter) 125static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
116{ 126{
117 return sg_dma_address(&iter->chunk->mem[iter->page_idx]); 127 if (iter->chunk->coherent)
128 return iter->chunk->buf[iter->page_idx].dma_addr;
129 else
130 return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
118} 131}
119 132
120static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter) 133static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
121{ 134{
122 return sg_dma_len(&iter->chunk->mem[iter->page_idx]); 135 if (iter->chunk->coherent)
136 return iter->chunk->buf[iter->page_idx].size;
137 else
138 return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
123} 139}
124 140
125int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm); 141int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 456f30007ad6..421b9c3c8bf7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -63,8 +63,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
63 mutex_lock(&priv->alloc_mutex); 63 mutex_lock(&priv->alloc_mutex);
64 original_node = dev_to_node(&dev->pdev->dev); 64 original_node = dev_to_node(&dev->pdev->dev);
65 set_dev_node(&dev->pdev->dev, node); 65 set_dev_node(&dev->pdev->dev, node);
66 cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size, 66 cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
67 dma_handle, GFP_KERNEL); 67 GFP_KERNEL);
68 set_dev_node(&dev->pdev->dev, original_node); 68 set_dev_node(&dev->pdev->dev, original_node);
69 mutex_unlock(&priv->alloc_mutex); 69 mutex_unlock(&priv->alloc_mutex);
70 return cpu_handle; 70 return cpu_handle;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d3125cdf69db..e267ff93e8a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1583,6 +1583,24 @@ no_trig:
1583 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1583 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
1584} 1584}
1585 1585
1586void mlx5_cmd_flush(struct mlx5_core_dev *dev)
1587{
1588 struct mlx5_cmd *cmd = &dev->cmd;
1589 int i;
1590
1591 for (i = 0; i < cmd->max_reg_cmds; i++)
1592 while (down_trylock(&cmd->sem))
1593 mlx5_cmd_trigger_completions(dev);
1594
1595 while (down_trylock(&cmd->pages_sem))
1596 mlx5_cmd_trigger_completions(dev);
1597
1598 /* Unlock cmdif */
1599 up(&cmd->pages_sem);
1600 for (i = 0; i < cmd->max_reg_cmds; i++)
1601 up(&cmd->sem);
1602}
1603
1586static int status_to_err(u8 status) 1604static int status_to_err(u8 status)
1587{ 1605{
1588 return status ? -1 : 0; /* TBD more meaningful codes */ 1606 return status ? -1 : 0; /* TBD more meaningful codes */
@@ -1789,8 +1807,8 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1789{ 1807{
1790 struct device *ddev = &dev->pdev->dev; 1808 struct device *ddev = &dev->pdev->dev;
1791 1809
1792 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, 1810 cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1793 &cmd->alloc_dma, GFP_KERNEL); 1811 &cmd->alloc_dma, GFP_KERNEL);
1794 if (!cmd->cmd_alloc_buf) 1812 if (!cmd->cmd_alloc_buf)
1795 return -ENOMEM; 1813 return -ENOMEM;
1796 1814
@@ -1804,9 +1822,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1804 1822
1805 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 1823 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1806 cmd->alloc_dma); 1824 cmd->alloc_dma);
1807 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, 1825 cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
1808 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 1826 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1809 &cmd->alloc_dma, GFP_KERNEL); 1827 &cmd->alloc_dma, GFP_KERNEL);
1810 if (!cmd->cmd_alloc_buf) 1828 if (!cmd->cmd_alloc_buf)
1811 return -ENOMEM; 1829 return -ENOMEM;
1812 1830
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8fa8fdd30b85..448a92561567 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -657,6 +657,7 @@ struct mlx5e_channel_stats {
657enum { 657enum {
658 MLX5E_STATE_OPENED, 658 MLX5E_STATE_OPENED,
659 MLX5E_STATE_DESTROYING, 659 MLX5E_STATE_DESTROYING,
660 MLX5E_STATE_XDP_TX_ENABLED,
660}; 661};
661 662
662struct mlx5e_rqt { 663struct mlx5e_rqt {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 046948ead152..f3c7ab6faea5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
256 e->m_neigh.family = n->ops->family; 256 e->m_neigh.family = n->ops->family;
257 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 257 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
258 e->out_dev = out_dev; 258 e->out_dev = out_dev;
259 e->route_dev = route_dev;
259 260
260 /* It's important to add the neigh to the hash table before checking 261 /* It's important to add the neigh to the hash table before checking
261 * the neigh validity state. So if we'll get a notification, in case the 262 * the neigh validity state. So if we'll get a notification, in case the
@@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
369 e->m_neigh.family = n->ops->family; 370 e->m_neigh.family = n->ops->family;
370 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); 371 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
371 e->out_dev = out_dev; 372 e->out_dev = out_dev;
373 e->route_dev = route_dev;
372 374
373 /* It's importent to add the neigh to the hash table before checking 375 /* It's importent to add the neigh to the hash table before checking
374 * the neigh validity state. So if we'll get a notification, in case the 376 * the neigh validity state. So if we'll get a notification, in case the
@@ -612,16 +614,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
612 struct mlx5_flow_spec *spec, 614 struct mlx5_flow_spec *spec,
613 struct tc_cls_flower_offload *f, 615 struct tc_cls_flower_offload *f,
614 void *headers_c, 616 void *headers_c,
615 void *headers_v) 617 void *headers_v, u8 *match_level)
616{ 618{
617 int tunnel_type; 619 int tunnel_type;
618 int err = 0; 620 int err = 0;
619 621
620 tunnel_type = mlx5e_tc_tun_get_type(filter_dev); 622 tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
621 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { 623 if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
624 *match_level = MLX5_MATCH_L4;
622 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, 625 err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
623 headers_c, headers_v); 626 headers_c, headers_v);
624 } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { 627 } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
628 *match_level = MLX5_MATCH_L3;
625 err = mlx5e_tc_tun_parse_gretap(priv, spec, f, 629 err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
626 headers_c, headers_v); 630 headers_c, headers_v);
627 } else { 631 } else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 706ce7bf15e7..b63f15de899d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
39 struct mlx5_flow_spec *spec, 39 struct mlx5_flow_spec *spec,
40 struct tc_cls_flower_offload *f, 40 struct tc_cls_flower_offload *f,
41 void *headers_c, 41 void *headers_c,
42 void *headers_v); 42 void *headers_v, u8 *match_level);
43 43
44#endif //__MLX5_EN_TC_TUNNEL_H__ 44#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 3740177eed09..03b2a9f9c589 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
365 int sq_num; 365 int sq_num;
366 int i; 366 int i;
367 367
368 if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) 368 /* this flag is sufficient, no need to test internal sq state */
369 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
369 return -ENETDOWN; 370 return -ENETDOWN;
370 371
371 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 372 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
378 379
379 sq = &priv->channels.c[sq_num]->xdpsq; 380 sq = &priv->channels.c[sq_num]->xdpsq;
380 381
381 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
382 return -ENETDOWN;
383
384 for (i = 0; i < n; i++) { 382 for (i = 0; i < n; i++) {
385 struct xdp_frame *xdpf = frames[i]; 383 struct xdp_frame *xdpf = frames[i];
386 struct mlx5e_xdp_info xdpi; 384 struct mlx5e_xdp_info xdpi;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 3a67cb3cd179..ee27a7c8cd87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
50int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 50int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
51 u32 flags); 51 u32 flags);
52 52
53static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
54{
55 set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
56}
57
58static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
59{
60 clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
61 /* let other device's napi(s) see our new state */
62 synchronize_rcu();
63}
64
65static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
66{
67 return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
68}
69
53static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) 70static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
54{ 71{
55 if (sq->doorbell_cseg) { 72 if (sq->doorbell_cseg) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index c9df08133718..47233b9a4f81 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -354,9 +354,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
354 354
355 new_channels.params = priv->channels.params; 355 new_channels.params = priv->channels.params;
356 new_channels.params.num_channels = count; 356 new_channels.params.num_channels = count;
357 if (!netif_is_rxfh_configured(priv->netdev))
358 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
359 MLX5E_INDIR_RQT_SIZE, count);
360 357
361 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 358 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
362 priv->channels.params = new_channels.params; 359 priv->channels.params = new_channels.params;
@@ -372,6 +369,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
372 if (arfs_enabled) 369 if (arfs_enabled)
373 mlx5e_arfs_disable(priv); 370 mlx5e_arfs_disable(priv);
374 371
372 if (!netif_is_rxfh_configured(priv->netdev))
373 mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
374 MLX5E_INDIR_RQT_SIZE, count);
375
375 /* Switch to new channels, set new parameters and close old ones */ 376 /* Switch to new channels, set new parameters and close old ones */
376 mlx5e_switch_priv_channels(priv, &new_channels, NULL); 377 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
377 378
@@ -844,9 +845,12 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
844 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, 845 ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
845 Autoneg); 846 Autoneg);
846 847
847 if (get_fec_supported_advertised(mdev, link_ksettings)) 848 err = get_fec_supported_advertised(mdev, link_ksettings);
849 if (err) {
848 netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", 850 netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
849 __func__, err); 851 __func__, err);
852 err = 0; /* don't fail caps query because of FEC error */
853 }
850 854
851 if (!an_disable_admin) 855 if (!an_disable_admin)
852 ethtool_link_ksettings_add_link_mode(link_ksettings, 856 ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 8cfd2ec7c0a2..93e50ccd44c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -950,7 +950,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
950 if (params->rx_dim_enabled) 950 if (params->rx_dim_enabled)
951 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); 951 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
952 952
953 if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) 953 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
954 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); 954 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
955 955
956 return 0; 956 return 0;
@@ -2938,6 +2938,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2938 2938
2939 mlx5e_build_tx2sq_maps(priv); 2939 mlx5e_build_tx2sq_maps(priv);
2940 mlx5e_activate_channels(&priv->channels); 2940 mlx5e_activate_channels(&priv->channels);
2941 mlx5e_xdp_tx_enable(priv);
2941 netif_tx_start_all_queues(priv->netdev); 2942 netif_tx_start_all_queues(priv->netdev);
2942 2943
2943 if (mlx5e_is_vport_rep(priv)) 2944 if (mlx5e_is_vport_rep(priv))
@@ -2959,6 +2960,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2959 */ 2960 */
2960 netif_tx_stop_all_queues(priv->netdev); 2961 netif_tx_stop_all_queues(priv->netdev);
2961 netif_tx_disable(priv->netdev); 2962 netif_tx_disable(priv->netdev);
2963 mlx5e_xdp_tx_disable(priv);
2962 mlx5e_deactivate_channels(&priv->channels); 2964 mlx5e_deactivate_channels(&priv->channels);
2963} 2965}
2964 2966
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 96cc0c6a4014..ef9e472daffb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -58,7 +58,8 @@ struct mlx5e_rep_indr_block_priv {
58 struct list_head list; 58 struct list_head list;
59}; 59};
60 60
61static void mlx5e_rep_indr_unregister_block(struct net_device *netdev); 61static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
62 struct net_device *netdev);
62 63
63static void mlx5e_rep_get_drvinfo(struct net_device *dev, 64static void mlx5e_rep_get_drvinfo(struct net_device *dev,
64 struct ethtool_drvinfo *drvinfo) 65 struct ethtool_drvinfo *drvinfo)
@@ -179,6 +180,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
179 180
180 s->tx_packets += sq_stats->packets; 181 s->tx_packets += sq_stats->packets;
181 s->tx_bytes += sq_stats->bytes; 182 s->tx_bytes += sq_stats->bytes;
183 s->tx_queue_dropped += sq_stats->dropped;
182 } 184 }
183 } 185 }
184} 186}
@@ -594,6 +596,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
594 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { 596 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
595 ether_addr_copy(e->h_dest, ha); 597 ether_addr_copy(e->h_dest, ha);
596 ether_addr_copy(eth->h_dest, ha); 598 ether_addr_copy(eth->h_dest, ha);
599 /* Update the encap source mac, in case that we delete
600 * the flows when encap source mac changed.
601 */
602 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
597 603
598 mlx5e_tc_encap_flows_add(priv, e); 604 mlx5e_tc_encap_flows_add(priv, e);
599 } 605 }
@@ -663,7 +669,7 @@ static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
663 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; 669 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
664 670
665 list_for_each_entry_safe(cb_priv, temp, head, list) { 671 list_for_each_entry_safe(cb_priv, temp, head, list) {
666 mlx5e_rep_indr_unregister_block(cb_priv->netdev); 672 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
667 kfree(cb_priv); 673 kfree(cb_priv);
668 } 674 }
669} 675}
@@ -735,7 +741,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
735 741
736 err = tcf_block_cb_register(f->block, 742 err = tcf_block_cb_register(f->block,
737 mlx5e_rep_indr_setup_block_cb, 743 mlx5e_rep_indr_setup_block_cb,
738 netdev, indr_priv, f->extack); 744 indr_priv, indr_priv, f->extack);
739 if (err) { 745 if (err) {
740 list_del(&indr_priv->list); 746 list_del(&indr_priv->list);
741 kfree(indr_priv); 747 kfree(indr_priv);
@@ -743,14 +749,15 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
743 749
744 return err; 750 return err;
745 case TC_BLOCK_UNBIND: 751 case TC_BLOCK_UNBIND:
752 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
753 if (!indr_priv)
754 return -ENOENT;
755
746 tcf_block_cb_unregister(f->block, 756 tcf_block_cb_unregister(f->block,
747 mlx5e_rep_indr_setup_block_cb, 757 mlx5e_rep_indr_setup_block_cb,
748 netdev); 758 indr_priv);
749 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); 759 list_del(&indr_priv->list);
750 if (indr_priv) { 760 kfree(indr_priv);
751 list_del(&indr_priv->list);
752 kfree(indr_priv);
753 }
754 761
755 return 0; 762 return 0;
756 default: 763 default:
@@ -779,7 +786,7 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
779 786
780 err = __tc_indr_block_cb_register(netdev, rpriv, 787 err = __tc_indr_block_cb_register(netdev, rpriv,
781 mlx5e_rep_indr_setup_tc_cb, 788 mlx5e_rep_indr_setup_tc_cb,
782 netdev); 789 rpriv);
783 if (err) { 790 if (err) {
784 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); 791 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
785 792
@@ -789,10 +796,11 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
789 return err; 796 return err;
790} 797}
791 798
792static void mlx5e_rep_indr_unregister_block(struct net_device *netdev) 799static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
800 struct net_device *netdev)
793{ 801{
794 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, 802 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
795 netdev); 803 rpriv);
796} 804}
797 805
798static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, 806static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
@@ -811,7 +819,7 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
811 mlx5e_rep_indr_register_block(rpriv, netdev); 819 mlx5e_rep_indr_register_block(rpriv, netdev);
812 break; 820 break;
813 case NETDEV_UNREGISTER: 821 case NETDEV_UNREGISTER:
814 mlx5e_rep_indr_unregister_block(netdev); 822 mlx5e_rep_indr_unregister_block(rpriv, netdev);
815 break; 823 break;
816 } 824 }
817 return NOTIFY_OK; 825 return NOTIFY_OK;
@@ -1122,9 +1130,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
1122 struct mlx5e_priv *priv = netdev_priv(dev); 1130 struct mlx5e_priv *priv = netdev_priv(dev);
1123 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1131 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1124 struct mlx5_eswitch_rep *rep = rpriv->rep; 1132 struct mlx5_eswitch_rep *rep = rpriv->rep;
1125 int ret; 1133 int ret, pf_num;
1134
1135 ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num);
1136 if (ret)
1137 return ret;
1138
1139 if (rep->vport == FDB_UPLINK_VPORT)
1140 ret = snprintf(buf, len, "p%d", pf_num);
1141 else
1142 ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1);
1126 1143
1127 ret = snprintf(buf, len, "%d", rep->vport - 1);
1128 if (ret >= len) 1144 if (ret >= len)
1129 return -EOPNOTSUPP; 1145 return -EOPNOTSUPP;
1130 1146
@@ -1281,6 +1297,18 @@ static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1281 return 0; 1297 return 0;
1282} 1298}
1283 1299
1300static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1301 __be16 vlan_proto)
1302{
1303 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1304
1305 if (vlan != 0)
1306 return -EOPNOTSUPP;
1307
1308 /* allow setting 0-vid for compatibility with libvirt */
1309 return 0;
1310}
1311
1284static const struct switchdev_ops mlx5e_rep_switchdev_ops = { 1312static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
1285 .switchdev_port_attr_get = mlx5e_attr_get, 1313 .switchdev_port_attr_get = mlx5e_attr_get,
1286}; 1314};
@@ -1315,6 +1343,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1315 .ndo_set_vf_rate = mlx5e_set_vf_rate, 1343 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1316 .ndo_get_vf_config = mlx5e_get_vf_config, 1344 .ndo_get_vf_config = mlx5e_get_vf_config,
1317 .ndo_get_vf_stats = mlx5e_get_vf_stats, 1345 .ndo_get_vf_stats = mlx5e_get_vf_stats,
1346 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
1318}; 1347};
1319 1348
1320bool mlx5e_eswitch_rep(struct net_device *netdev) 1349bool mlx5e_eswitch_rep(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index edd722824697..36eafc877e6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -148,6 +148,7 @@ struct mlx5e_encap_entry {
148 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 148 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
149 149
150 struct net_device *out_dev; 150 struct net_device *out_dev;
151 struct net_device *route_dev;
151 int tunnel_type; 152 int tunnel_type;
152 int tunnel_hlen; 153 int tunnel_hlen;
153 int reformat_type; 154 int reformat_type;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1d0bb5ff8c26..f86e4804e83e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -732,6 +732,8 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
732 ((struct ipv6hdr *)ip_p)->nexthdr; 732 ((struct ipv6hdr *)ip_p)->nexthdr;
733} 733}
734 734
735#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
736
735static inline void mlx5e_handle_csum(struct net_device *netdev, 737static inline void mlx5e_handle_csum(struct net_device *netdev,
736 struct mlx5_cqe64 *cqe, 738 struct mlx5_cqe64 *cqe,
737 struct mlx5e_rq *rq, 739 struct mlx5e_rq *rq,
@@ -754,6 +756,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
754 if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) 756 if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
755 goto csum_unnecessary; 757 goto csum_unnecessary;
756 758
759 /* CQE csum doesn't cover padding octets in short ethernet
760 * frames. And the pad field is appended prior to calculating
761 * and appending the FCS field.
762 *
763 * Detecting these padded frames requires to verify and parse
764 * IP headers, so we simply force all those small frames to be
765 * CHECKSUM_UNNECESSARY even if they are not padded.
766 */
767 if (short_frame(skb->len))
768 goto csum_unnecessary;
769
757 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { 770 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
758 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) 771 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
759 goto csum_unnecessary; 772 goto csum_unnecessary;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index cae6c6d48984..b5c1b039375a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -128,6 +128,7 @@ struct mlx5e_tc_flow_parse_attr {
128 struct net_device *filter_dev; 128 struct net_device *filter_dev;
129 struct mlx5_flow_spec spec; 129 struct mlx5_flow_spec spec;
130 int num_mod_hdr_actions; 130 int num_mod_hdr_actions;
131 int max_mod_hdr_actions;
131 void *mod_hdr_actions; 132 void *mod_hdr_actions;
132 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; 133 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
133}; 134};
@@ -1302,7 +1303,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1302static int parse_tunnel_attr(struct mlx5e_priv *priv, 1303static int parse_tunnel_attr(struct mlx5e_priv *priv,
1303 struct mlx5_flow_spec *spec, 1304 struct mlx5_flow_spec *spec,
1304 struct tc_cls_flower_offload *f, 1305 struct tc_cls_flower_offload *f,
1305 struct net_device *filter_dev) 1306 struct net_device *filter_dev, u8 *match_level)
1306{ 1307{
1307 struct netlink_ext_ack *extack = f->common.extack; 1308 struct netlink_ext_ack *extack = f->common.extack;
1308 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1309 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1317,7 +1318,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
1317 int err = 0; 1318 int err = 0;
1318 1319
1319 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, 1320 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1320 headers_c, headers_v); 1321 headers_c, headers_v, match_level);
1321 if (err) { 1322 if (err) {
1322 NL_SET_ERR_MSG_MOD(extack, 1323 NL_SET_ERR_MSG_MOD(extack,
1323 "failed to parse tunnel attributes"); 1324 "failed to parse tunnel attributes");
@@ -1426,7 +1427,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1426 struct mlx5_flow_spec *spec, 1427 struct mlx5_flow_spec *spec,
1427 struct tc_cls_flower_offload *f, 1428 struct tc_cls_flower_offload *f,
1428 struct net_device *filter_dev, 1429 struct net_device *filter_dev,
1429 u8 *match_level) 1430 u8 *match_level, u8 *tunnel_match_level)
1430{ 1431{
1431 struct netlink_ext_ack *extack = f->common.extack; 1432 struct netlink_ext_ack *extack = f->common.extack;
1432 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1433 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1477,7 +1478,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
1477 switch (key->addr_type) { 1478 switch (key->addr_type) {
1478 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1479 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1479 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1480 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1480 if (parse_tunnel_attr(priv, spec, f, filter_dev)) 1481 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
1481 return -EOPNOTSUPP; 1482 return -EOPNOTSUPP;
1482 break; 1483 break;
1483 default: 1484 default:
@@ -1826,11 +1827,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1826 struct mlx5_core_dev *dev = priv->mdev; 1827 struct mlx5_core_dev *dev = priv->mdev;
1827 struct mlx5_eswitch *esw = dev->priv.eswitch; 1828 struct mlx5_eswitch *esw = dev->priv.eswitch;
1828 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1829 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1830 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1829 struct mlx5_eswitch_rep *rep; 1831 struct mlx5_eswitch_rep *rep;
1830 u8 match_level;
1831 int err; 1832 int err;
1832 1833
1833 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); 1834 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
1834 1835
1835 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { 1836 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1836 rep = rpriv->rep; 1837 rep = rpriv->rep;
@@ -1846,10 +1847,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
1846 } 1847 }
1847 } 1848 }
1848 1849
1849 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) 1850 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1850 flow->esw_attr->match_level = match_level; 1851 flow->esw_attr->match_level = match_level;
1851 else 1852 flow->esw_attr->tunnel_match_level = tunnel_match_level;
1853 } else {
1852 flow->nic_attr->match_level = match_level; 1854 flow->nic_attr->match_level = match_level;
1855 }
1853 1856
1854 return err; 1857 return err;
1855} 1858}
@@ -1934,9 +1937,9 @@ static struct mlx5_fields fields[] = {
1934 OFFLOAD(UDP_DPORT, 2, udp.dest, 0), 1937 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1935}; 1938};
1936 1939
1937/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at 1940/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1938 * max from the SW pedit action. On success, it says how many HW actions were 1941 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1939 * actually parsed. 1942 * says how many HW actions were actually parsed.
1940 */ 1943 */
1941static int offload_pedit_fields(struct pedit_headers *masks, 1944static int offload_pedit_fields(struct pedit_headers *masks,
1942 struct pedit_headers *vals, 1945 struct pedit_headers *vals,
@@ -1960,9 +1963,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
1960 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; 1963 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1961 1964
1962 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); 1965 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1963 action = parse_attr->mod_hdr_actions; 1966 action = parse_attr->mod_hdr_actions +
1964 max_actions = parse_attr->num_mod_hdr_actions; 1967 parse_attr->num_mod_hdr_actions * action_size;
1965 nactions = 0; 1968
1969 max_actions = parse_attr->max_mod_hdr_actions;
1970 nactions = parse_attr->num_mod_hdr_actions;
1966 1971
1967 for (i = 0; i < ARRAY_SIZE(fields); i++) { 1972 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1968 f = &fields[i]; 1973 f = &fields[i];
@@ -2073,7 +2078,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2073 if (!parse_attr->mod_hdr_actions) 2078 if (!parse_attr->mod_hdr_actions)
2074 return -ENOMEM; 2079 return -ENOMEM;
2075 2080
2076 parse_attr->num_mod_hdr_actions = max_actions; 2081 parse_attr->max_mod_hdr_actions = max_actions;
2077 return 0; 2082 return 0;
2078} 2083}
2079 2084
@@ -2119,9 +2124,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2119 goto out_err; 2124 goto out_err;
2120 } 2125 }
2121 2126
2122 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); 2127 if (!parse_attr->mod_hdr_actions) {
2123 if (err) 2128 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2124 goto out_err; 2129 if (err)
2130 goto out_err;
2131 }
2125 2132
2126 err = offload_pedit_fields(masks, vals, parse_attr, extack); 2133 err = offload_pedit_fields(masks, vals, parse_attr, extack);
2127 if (err < 0) 2134 if (err < 0)
@@ -2179,6 +2186,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
2179 2186
2180static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 2187static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2181 struct tcf_exts *exts, 2188 struct tcf_exts *exts,
2189 u32 actions,
2182 struct netlink_ext_ack *extack) 2190 struct netlink_ext_ack *extack)
2183{ 2191{
2184 const struct tc_action *a; 2192 const struct tc_action *a;
@@ -2188,7 +2196,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2188 u16 ethertype; 2196 u16 ethertype;
2189 int nkeys, i; 2197 int nkeys, i;
2190 2198
2191 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); 2199 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2200 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2201 else
2202 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2203
2192 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); 2204 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2193 2205
2194 /* for non-IP we only re-write MACs, so we're okay */ 2206 /* for non-IP we only re-write MACs, so we're okay */
@@ -2245,7 +2257,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
2245 2257
2246 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 2258 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2247 return modify_header_match_supported(&parse_attr->spec, exts, 2259 return modify_header_match_supported(&parse_attr->spec, exts,
2248 extack); 2260 actions, extack);
2249 2261
2250 return true; 2262 return true;
2251} 2263}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 598ad7e4d5c9..0e55cd1f2e98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 387 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 388 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
389 if (unlikely(contig_wqebbs_room < num_wqebbs)) { 389 if (unlikely(contig_wqebbs_room < num_wqebbs)) {
390#ifdef CONFIG_MLX5_EN_IPSEC
391 struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
392#endif
390 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); 393 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
391 mlx5e_sq_fetch_wqe(sq, &wqe, &pi); 394 mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
395#ifdef CONFIG_MLX5_EN_IPSEC
396 wqe->eth = cur_eth;
397#endif
392 } 398 }
393 399
394 /* fill wqe */ 400 /* fill wqe */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index a44ea7b85614..5b492b67f4e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1134,13 +1134,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1134 int err = 0; 1134 int err = 0;
1135 u8 *smac_v; 1135 u8 *smac_v;
1136 1136
1137 if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
1138 mlx5_core_warn(esw->dev,
1139 "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
1140 vport->vport);
1141 return -EPERM;
1142 }
1143
1144 esw_vport_cleanup_ingress_rules(esw, vport); 1137 esw_vport_cleanup_ingress_rules(esw, vport);
1145 1138
1146 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { 1139 if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
@@ -1728,7 +1721,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1728 int vport_num; 1721 int vport_num;
1729 int err; 1722 int err;
1730 1723
1731 if (!MLX5_ESWITCH_MANAGER(dev)) 1724 if (!MLX5_VPORT_MANAGER(dev))
1732 return 0; 1725 return 0;
1733 1726
1734 esw_info(dev, 1727 esw_info(dev,
@@ -1797,7 +1790,7 @@ abort:
1797 1790
1798void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) 1791void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1799{ 1792{
1800 if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) 1793 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1801 return; 1794 return;
1802 1795
1803 esw_info(esw->dev, "cleanup\n"); 1796 esw_info(esw->dev, "cleanup\n");
@@ -1827,13 +1820,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1827 mutex_lock(&esw->state_lock); 1820 mutex_lock(&esw->state_lock);
1828 evport = &esw->vports[vport]; 1821 evport = &esw->vports[vport];
1829 1822
1830 if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { 1823 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1831 mlx5_core_warn(esw->dev, 1824 mlx5_core_warn(esw->dev,
1832 "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", 1825 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1833 vport); 1826 vport);
1834 err = -EPERM;
1835 goto unlock;
1836 }
1837 1827
1838 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); 1828 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1839 if (err) { 1829 if (err) {
@@ -1979,6 +1969,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
1979 evport = &esw->vports[vport]; 1969 evport = &esw->vports[vport];
1980 pschk = evport->info.spoofchk; 1970 pschk = evport->info.spoofchk;
1981 evport->info.spoofchk = spoofchk; 1971 evport->info.spoofchk = spoofchk;
1972 if (pschk && !is_valid_ether_addr(evport->info.mac))
1973 mlx5_core_warn(esw->dev,
1974 "Spoofchk in set while MAC is invalid, vport(%d)\n",
1975 evport->vport);
1982 if (evport->enabled && esw->mode == SRIOV_LEGACY) 1976 if (evport->enabled && esw->mode == SRIOV_LEGACY)
1983 err = esw_vport_ingress_config(esw, evport); 1977 err = esw_vport_ingress_config(esw, evport);
1984 if (err) 1978 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 9c89eea9b2c3..748ff178a1d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr {
312 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 312 } dests[MLX5_MAX_FLOW_FWD_VPORTS];
313 u32 mod_hdr_id; 313 u32 mod_hdr_id;
314 u8 match_level; 314 u8 match_level;
315 u8 tunnel_match_level;
315 struct mlx5_fc *counter; 316 struct mlx5_fc *counter;
316 u32 chain; 317 u32 chain;
317 u16 prio; 318 u16 prio;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 53065b6ae593..d4e6fe5b9300 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
160 MLX5_SET_TO_ONES(fte_match_set_misc, misc, 160 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
161 source_eswitch_owner_vhca_id); 161 source_eswitch_owner_vhca_id);
162 162
163 if (attr->match_level == MLX5_MATCH_NONE) 163 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
164 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; 164 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
165 else 165 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
166 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | 166 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
167 MLX5_MATCH_MISC_PARAMETERS; 167 if (attr->match_level != MLX5_MATCH_NONE)
168 168 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 169 } else if (attr->match_level != MLX5_MATCH_NONE) {
170 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; 170 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
171 }
171 172
172 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 173 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
173 flow_act.modify_id = attr->mod_hdr_id; 174 flow_act.modify_id = attr->mod_hdr_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index fbc42b7252a9..503035469d2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -211,11 +211,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
211 enum port_module_event_status_type module_status; 211 enum port_module_event_status_type module_status;
212 enum port_module_event_error_type error_type; 212 enum port_module_event_error_type error_type;
213 struct mlx5_eqe_port_module *module_event_eqe; 213 struct mlx5_eqe_port_module *module_event_eqe;
214 const char *status_str, *error_str; 214 const char *status_str;
215 u8 module_num; 215 u8 module_num;
216 216
217 module_event_eqe = &eqe->data.port_module; 217 module_event_eqe = &eqe->data.port_module;
218 module_num = module_event_eqe->module;
219 module_status = module_event_eqe->module_status & 218 module_status = module_event_eqe->module_status &
220 PORT_MODULE_EVENT_MODULE_STATUS_MASK; 219 PORT_MODULE_EVENT_MODULE_STATUS_MASK;
221 error_type = module_event_eqe->error_type & 220 error_type = module_event_eqe->error_type &
@@ -223,25 +222,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
223 222
224 if (module_status < MLX5_MODULE_STATUS_NUM) 223 if (module_status < MLX5_MODULE_STATUS_NUM)
225 events->pme_stats.status_counters[module_status]++; 224 events->pme_stats.status_counters[module_status]++;
226 status_str = mlx5_pme_status_to_string(module_status);
227 225
228 if (module_status == MLX5_MODULE_STATUS_ERROR) { 226 if (module_status == MLX5_MODULE_STATUS_ERROR)
229 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) 227 if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
230 events->pme_stats.error_counters[error_type]++; 228 events->pme_stats.error_counters[error_type]++;
231 error_str = mlx5_pme_error_to_string(error_type);
232 }
233 229
234 if (!printk_ratelimit()) 230 if (!printk_ratelimit())
235 return NOTIFY_OK; 231 return NOTIFY_OK;
236 232
237 if (module_status == MLX5_MODULE_STATUS_ERROR) 233 module_num = module_event_eqe->module;
234 status_str = mlx5_pme_status_to_string(module_status);
235 if (module_status == MLX5_MODULE_STATUS_ERROR) {
236 const char *error_str = mlx5_pme_error_to_string(error_type);
237
238 mlx5_core_err(events->dev, 238 mlx5_core_err(events->dev,
239 "Port module event[error]: module %u, %s, %s\n", 239 "Port module event[error]: module %u, %s, %s\n",
240 module_num, status_str, error_str); 240 module_num, status_str, error_str);
241 else 241 } else {
242 mlx5_core_info(events->dev, 242 mlx5_core_info(events->dev,
243 "Port module event: module %u, %s\n", 243 "Port module event: module %u, %s\n",
244 module_num, status_str); 244 module_num, status_str);
245 }
245 246
246 return NOTIFY_OK; 247 return NOTIFY_OK;
247} 248}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 196c07383082..cb9fa3430c53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
103 mlx5_core_err(dev, "start\n"); 103 mlx5_core_err(dev, "start\n");
104 if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { 104 if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
105 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 105 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
106 mlx5_cmd_trigger_completions(dev); 106 mlx5_cmd_flush(dev);
107 } 107 }
108 108
109 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); 109 mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 3a6baed722d8..2d223385dc81 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -616,6 +616,27 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
616 } 616 }
617} 617}
618 618
619int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num)
620{
621 struct mlx5_lag *ldev;
622 int n;
623
624 ldev = mlx5_lag_dev_get(dev);
625 if (!ldev) {
626 mlx5_core_warn(dev, "no lag device, can't get pf num\n");
627 return -EINVAL;
628 }
629
630 for (n = 0; n < MLX5_MAX_PORTS; n++)
631 if (ldev->pf[n].dev == dev) {
632 *pf_num = n;
633 return 0;
634 }
635
636 mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n");
637 return -EINVAL;
638}
639
619/* Must be called with intf_mutex held */ 640/* Must be called with intf_mutex held */
620void mlx5_lag_remove(struct mlx5_core_dev *dev) 641void mlx5_lag_remove(struct mlx5_core_dev *dev)
621{ 642{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index c68dcea5985b..4fdac020b795 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -126,6 +126,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
126 struct ptp_system_timestamp *sts); 126 struct ptp_system_timestamp *sts);
127 127
128void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); 128void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
129void mlx5_cmd_flush(struct mlx5_core_dev *dev);
129int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 130int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
130void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); 131void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
131 132
@@ -187,6 +188,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
187 MLX5_CAP_GEN(dev, lag_master); 188 MLX5_CAP_GEN(dev, lag_master);
188} 189}
189 190
191int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num);
192
190void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); 193void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
191void mlx5_lag_update(struct mlx5_core_dev *dev); 194void mlx5_lag_update(struct mlx5_core_dev *dev);
192 195
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 388f205a497f..370ca94b6775 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common *
44mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) 44mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
45{ 45{
46 struct mlx5_core_rsc_common *common; 46 struct mlx5_core_rsc_common *common;
47 unsigned long flags;
47 48
48 spin_lock(&table->lock); 49 spin_lock_irqsave(&table->lock, flags);
49 50
50 common = radix_tree_lookup(&table->tree, rsn); 51 common = radix_tree_lookup(&table->tree, rsn);
51 if (common) 52 if (common)
52 atomic_inc(&common->refcount); 53 atomic_inc(&common->refcount);
53 54
54 spin_unlock(&table->lock); 55 spin_unlock_irqrestore(&table->lock, flags);
55 56
56 return common; 57 return common;
57} 58}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 080ddd1942ec..b9a25aed5d11 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -78,6 +78,7 @@ config MLXSW_SPECTRUM
78 depends on IPV6 || IPV6=n 78 depends on IPV6 || IPV6=n
79 depends on NET_IPGRE || NET_IPGRE=n 79 depends on NET_IPGRE || NET_IPGRE=n
80 depends on IPV6_GRE || IPV6_GRE=n 80 depends on IPV6_GRE || IPV6_GRE=n
81 depends on VXLAN || VXLAN=n
81 select GENERIC_ALLOCATOR 82 select GENERIC_ALLOCATOR
82 select PARMAN 83 select PARMAN
83 select OBJAGG 84 select OBJAGG
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 66b8098c6fd2..a2321fe8d6a0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); 604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); 605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); 606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
607 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
608
609 memcpy(ncqe, cqe, q->elem_size);
610 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
607 611
608 if (sendq) { 612 if (sendq) {
609 struct mlxsw_pci_queue *sdq; 613 struct mlxsw_pci_queue *sdq;
610 614
611 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); 615 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
612 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, 616 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
613 wqe_counter, cqe); 617 wqe_counter, ncqe);
614 q->u.cq.comp_sdq_count++; 618 q->u.cq.comp_sdq_count++;
615 } else { 619 } else {
616 struct mlxsw_pci_queue *rdq; 620 struct mlxsw_pci_queue *rdq;
617 621
618 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); 622 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
619 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, 623 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
620 wqe_counter, q->u.cq.v, cqe); 624 wqe_counter, q->u.cq.v, ncqe);
621 q->u.cq.comp_rdq_count++; 625 q->u.cq.comp_rdq_count++;
622 } 626 }
623 if (++items == credits) 627 if (++items == credits)
624 break; 628 break;
625 } 629 }
626 if (items) { 630 if (items)
627 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
628 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 631 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
629 }
630} 632}
631 633
632static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) 634static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
@@ -1365,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1365 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1367 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1366 1368
1367 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) 1369 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1368 break; 1370 return 0;
1369 cond_resched(); 1371 cond_resched();
1370 } while (time_before(jiffies, end)); 1372 } while (time_before(jiffies, end));
1371 return 0; 1373 return -EBUSY;
1372} 1374}
1373 1375
1374static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) 1376static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index bb99f6d41fe0..ffee38e36ce8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -27,7 +27,7 @@
27 27
28#define MLXSW_PCI_SW_RESET 0xF0010 28#define MLXSW_PCI_SW_RESET 0xF0010
29#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) 29#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
30#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 30#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
31#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 31#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
32#define MLXSW_PCI_FW_READY 0xA1844 32#define MLXSW_PCI_FW_READY 0xA1844
33#define MLXSW_PCI_FW_READY_MASK 0xFFFF 33#define MLXSW_PCI_FW_READY_MASK 0xFFFF
@@ -53,6 +53,7 @@
53#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ 53#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
54#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ 54#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
55#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ 55#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
56#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE
56#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ 57#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
57#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) 58#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
58#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) 59#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index eed1045e4d96..32519c93df17 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -5005,12 +5005,15 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
5005 lower_dev, 5005 lower_dev,
5006 upper_dev); 5006 upper_dev);
5007 } else if (netif_is_lag_master(upper_dev)) { 5007 } else if (netif_is_lag_master(upper_dev)) {
5008 if (info->linking) 5008 if (info->linking) {
5009 err = mlxsw_sp_port_lag_join(mlxsw_sp_port, 5009 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
5010 upper_dev); 5010 upper_dev);
5011 else 5011 } else {
5012 mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
5013 false);
5012 mlxsw_sp_port_lag_leave(mlxsw_sp_port, 5014 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
5013 upper_dev); 5015 upper_dev);
5016 }
5014 } else if (netif_is_ovs_master(upper_dev)) { 5017 } else if (netif_is_ovs_master(upper_dev)) {
5015 if (info->linking) 5018 if (info->linking)
5016 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port); 5019 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index b0f2d8e8ded0..ac222833a5cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -72,7 +72,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
72 act_set = mlxsw_afa_block_first_set(rulei->act_block); 72 act_set = mlxsw_afa_block_first_set(rulei->act_block);
73 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); 73 mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
74 74
75 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); 75 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
76 if (err)
77 goto err_ptce2_write;
78
79 return 0;
80
81err_ptce2_write:
82 cregion->ops->entry_remove(cregion, centry);
83 return err;
76} 84}
77 85
78static void 86static void
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 1c19feefa5f2..2941967e1cc5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -1022,7 +1022,6 @@ void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
1022{ 1022{
1023 struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; 1023 struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
1024 1024
1025 ASSERT_RTNL();
1026 objagg_obj_put(aregion->erp_table->objagg, objagg_obj); 1025 objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
1027} 1026}
1028 1027
@@ -1054,7 +1053,6 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
1054 const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); 1053 const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
1055 unsigned int erp_bank; 1054 unsigned int erp_bank;
1056 1055
1057 ASSERT_RTNL();
1058 if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) 1056 if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
1059 return; 1057 return;
1060 1058
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 055cc6943b34..9d9aa28684af 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -997,8 +997,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = {
997static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { 997static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
998 .type = MLXSW_SP_FID_TYPE_DUMMY, 998 .type = MLXSW_SP_FID_TYPE_DUMMY,
999 .fid_size = sizeof(struct mlxsw_sp_fid), 999 .fid_size = sizeof(struct mlxsw_sp_fid),
1000 .start_index = MLXSW_SP_RFID_BASE - 1, 1000 .start_index = VLAN_N_VID - 1,
1001 .end_index = MLXSW_SP_RFID_BASE - 1, 1001 .end_index = VLAN_N_VID - 1,
1002 .ops = &mlxsw_sp_fid_dummy_ops, 1002 .ops = &mlxsw_sp_fid_dummy_ops,
1003}; 1003};
1004 1004
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 0a31fff2516e..fb1c48c698f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -816,14 +816,14 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
816 ops = nve->nve_ops_arr[params->type]; 816 ops = nve->nve_ops_arr[params->type];
817 817
818 if (!ops->can_offload(nve, params->dev, extack)) 818 if (!ops->can_offload(nve, params->dev, extack))
819 return -EOPNOTSUPP; 819 return -EINVAL;
820 820
821 memset(&config, 0, sizeof(config)); 821 memset(&config, 0, sizeof(config));
822 ops->nve_config(nve, params->dev, &config); 822 ops->nve_config(nve, params->dev, &config);
823 if (nve->num_nve_tunnels && 823 if (nve->num_nve_tunnels &&
824 memcmp(&config, &nve->config, sizeof(config))) { 824 memcmp(&config, &nve->config, sizeof(config))) {
825 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration"); 825 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
826 return -EOPNOTSUPP; 826 return -EINVAL;
827 } 827 }
828 828
829 err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config); 829 err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 1bd2c6e15f8d..c772109b638d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1078,8 +1078,7 @@ static int
1078mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, 1078mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1079 struct mlxsw_sp_bridge_port *bridge_port, 1079 struct mlxsw_sp_bridge_port *bridge_port,
1080 u16 vid, bool is_untagged, bool is_pvid, 1080 u16 vid, bool is_untagged, bool is_pvid,
1081 struct netlink_ext_ack *extack, 1081 struct netlink_ext_ack *extack)
1082 struct switchdev_trans *trans)
1083{ 1082{
1084 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 1083 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1085 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1084 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
@@ -1095,9 +1094,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1095 mlxsw_sp_port_vlan->bridge_port != bridge_port) 1094 mlxsw_sp_port_vlan->bridge_port != bridge_port)
1096 return -EEXIST; 1095 return -EEXIST;
1097 1096
1098 if (switchdev_trans_ph_prepare(trans))
1099 return 0;
1100
1101 if (!mlxsw_sp_port_vlan) { 1097 if (!mlxsw_sp_port_vlan) {
1102 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, 1098 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1103 vid); 1099 vid);
@@ -1188,6 +1184,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1188 return err; 1184 return err;
1189 } 1185 }
1190 1186
1187 if (switchdev_trans_ph_commit(trans))
1188 return 0;
1189
1191 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1190 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1192 if (WARN_ON(!bridge_port)) 1191 if (WARN_ON(!bridge_port))
1193 return -EINVAL; 1192 return -EINVAL;
@@ -1200,7 +1199,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1200 1199
1201 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, 1200 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1202 vid, flag_untagged, 1201 vid, flag_untagged,
1203 flag_pvid, extack, trans); 1202 flag_pvid, extack);
1204 if (err) 1203 if (err)
1205 return err; 1204 return err;
1206 } 1205 }
@@ -1234,7 +1233,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1234static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic) 1233static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1235{ 1234{
1236 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : 1235 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1237 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; 1236 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1238} 1237}
1239 1238
1240static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) 1239static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
@@ -1291,7 +1290,7 @@ out:
1291static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, 1290static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1292 const char *mac, u16 fid, bool adding, 1291 const char *mac, u16 fid, bool adding,
1293 enum mlxsw_reg_sfd_rec_action action, 1292 enum mlxsw_reg_sfd_rec_action action,
1294 bool dynamic) 1293 enum mlxsw_reg_sfd_rec_policy policy)
1295{ 1294{
1296 char *sfd_pl; 1295 char *sfd_pl;
1297 u8 num_rec; 1296 u8 num_rec;
@@ -1302,8 +1301,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1302 return -ENOMEM; 1301 return -ENOMEM;
1303 1302
1304 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); 1303 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1305 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), 1304 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1306 mac, fid, action, local_port);
1307 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); 1305 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1308 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); 1306 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1309 if (err) 1307 if (err)
@@ -1322,7 +1320,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1322 bool dynamic) 1320 bool dynamic)
1323{ 1321{
1324 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, 1322 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1325 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); 1323 MLXSW_REG_SFD_REC_ACTION_NOP,
1324 mlxsw_sp_sfd_rec_policy(dynamic));
1326} 1325}
1327 1326
1328int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, 1327int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
@@ -1330,7 +1329,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1330{ 1329{
1331 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, 1330 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1332 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, 1331 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1333 false); 1332 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1334} 1333}
1335 1334
1336static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, 1335static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
@@ -1808,7 +1807,7 @@ static void
1808mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, 1807mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1809 struct mlxsw_sp_bridge_port *bridge_port, u16 vid) 1808 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1810{ 1809{
1811 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid; 1810 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1812 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 1811 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1813 1812
1814 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); 1813 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -3207,7 +3206,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3207 struct mlxsw_sp_bridge_device *bridge_device, 3206 struct mlxsw_sp_bridge_device *bridge_device,
3208 const struct net_device *vxlan_dev, u16 vid, 3207 const struct net_device *vxlan_dev, u16 vid,
3209 bool flag_untagged, bool flag_pvid, 3208 bool flag_untagged, bool flag_pvid,
3210 struct switchdev_trans *trans,
3211 struct netlink_ext_ack *extack) 3209 struct netlink_ext_ack *extack)
3212{ 3210{
3213 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); 3211 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
@@ -3225,9 +3223,6 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3225 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) 3223 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
3226 return -EINVAL; 3224 return -EINVAL;
3227 3225
3228 if (switchdev_trans_ph_prepare(trans))
3229 return 0;
3230
3231 if (!netif_running(vxlan_dev)) 3226 if (!netif_running(vxlan_dev))
3232 return 0; 3227 return 0;
3233 3228
@@ -3345,6 +3340,9 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3345 3340
3346 port_obj_info->handled = true; 3341 port_obj_info->handled = true;
3347 3342
3343 if (switchdev_trans_ph_commit(trans))
3344 return 0;
3345
3348 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); 3346 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3349 if (!bridge_device) 3347 if (!bridge_device)
3350 return -EINVAL; 3348 return -EINVAL;
@@ -3358,8 +3356,7 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3358 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, 3356 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3359 vxlan_dev, vid, 3357 vxlan_dev, vid,
3360 flag_untagged, 3358 flag_untagged,
3361 flag_pvid, trans, 3359 flag_pvid, extack);
3362 extack);
3363 if (err) 3360 if (err)
3364 return err; 3361 return err;
3365 } 3362 }
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 20c9377e99cb..310807ef328b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
962 962
963 memset(&ksettings, 0, sizeof(ksettings)); 963 memset(&ksettings, 0, sizeof(ksettings));
964 phy_ethtool_get_link_ksettings(netdev, &ksettings); 964 phy_ethtool_get_link_ksettings(netdev, &ksettings);
965 local_advertisement = phy_read(phydev, MII_ADVERTISE); 965 local_advertisement =
966 if (local_advertisement < 0) 966 linkmode_adv_to_mii_adv_t(phydev->advertising);
967 return; 967 remote_advertisement =
968 968 linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
969 remote_advertisement = phy_read(phydev, MII_LPA);
970 if (remote_advertisement < 0)
971 return;
972 969
973 lan743x_phy_update_flowcontrol(adapter, 970 lan743x_phy_update_flowcontrol(adapter,
974 ksettings.base.duplex, 971 ksettings.base.duplex,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 5f384f73007d..19ce0e605096 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3604,9 +3604,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3604 for (i = 0; i < mgp->num_slices; i++) { 3604 for (i = 0; i < mgp->num_slices; i++) {
3605 ss = &mgp->ss[i]; 3605 ss = &mgp->ss[i];
3606 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3606 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3607 ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes, 3607 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3608 &ss->rx_done.bus, 3608 &ss->rx_done.bus,
3609 GFP_KERNEL); 3609 GFP_KERNEL);
3610 if (ss->rx_done.entry == NULL) 3610 if (ss->rx_done.entry == NULL)
3611 goto abort; 3611 goto abort;
3612 bytes = sizeof(*ss->fw_stats); 3612 bytes = sizeof(*ss->fw_stats);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index e97636d2e6ee..7d2d4241498f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2170,9 +2170,9 @@ nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2170 tx_ring->cnt = dp->txd_cnt; 2170 tx_ring->cnt = dp->txd_cnt;
2171 2171
2172 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); 2172 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
2173 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, 2173 tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
2174 &tx_ring->dma, 2174 &tx_ring->dma,
2175 GFP_KERNEL | __GFP_NOWARN); 2175 GFP_KERNEL | __GFP_NOWARN);
2176 if (!tx_ring->txds) { 2176 if (!tx_ring->txds) {
2177 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2177 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2178 tx_ring->cnt); 2178 tx_ring->cnt);
@@ -2328,9 +2328,9 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2328 2328
2329 rx_ring->cnt = dp->rxd_cnt; 2329 rx_ring->cnt = dp->rxd_cnt;
2330 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); 2330 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
2331 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, 2331 rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
2332 &rx_ring->dma, 2332 &rx_ring->dma,
2333 GFP_KERNEL | __GFP_NOWARN); 2333 GFP_KERNEL | __GFP_NOWARN);
2334 if (!rx_ring->rxds) { 2334 if (!rx_ring->rxds) {
2335 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", 2335 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2336 rx_ring->cnt); 2336 rx_ring->cnt);
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 0611f2335b4a..1e408d1a9b5f 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -287,9 +287,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
287 priv->rx_bd_ci = 0; 287 priv->rx_bd_ci = 0;
288 288
289 /* Allocate the Tx and Rx buffer descriptors. */ 289 /* Allocate the Tx and Rx buffer descriptors. */
290 priv->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 290 priv->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
291 sizeof(*priv->tx_bd_v) * TX_BD_NUM, 291 sizeof(*priv->tx_bd_v) * TX_BD_NUM,
292 &priv->tx_bd_p, GFP_KERNEL); 292 &priv->tx_bd_p, GFP_KERNEL);
293 if (!priv->tx_bd_v) 293 if (!priv->tx_bd_v)
294 goto out; 294 goto out;
295 295
@@ -299,9 +299,9 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev)
299 if (!priv->tx_skb) 299 if (!priv->tx_skb)
300 goto out; 300 goto out;
301 301
302 priv->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 302 priv->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
303 sizeof(*priv->rx_bd_v) * RX_BD_NUM, 303 sizeof(*priv->rx_bd_v) * RX_BD_NUM,
304 &priv->rx_bd_p, GFP_KERNEL); 304 &priv->rx_bd_p, GFP_KERNEL);
305 if (!priv->rx_bd_v) 305 if (!priv->rx_bd_v)
306 goto out; 306 goto out;
307 307
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 43c0c10dfeb7..552d930e3940 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1440,8 +1440,8 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1440 1440
1441 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1441 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1442 rx_ring->rx_buff_pool = 1442 rx_ring->rx_buff_pool =
1443 dma_zalloc_coherent(&pdev->dev, size, 1443 dma_alloc_coherent(&pdev->dev, size,
1444 &rx_ring->rx_buff_pool_logic, GFP_KERNEL); 1444 &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
1445 if (!rx_ring->rx_buff_pool) 1445 if (!rx_ring->rx_buff_pool)
1446 return -ENOMEM; 1446 return -ENOMEM;
1447 1447
@@ -1755,8 +1755,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1755 1755
1756 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1756 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1757 1757
1758 tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size, 1758 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1759 &tx_ring->dma, GFP_KERNEL); 1759 &tx_ring->dma, GFP_KERNEL);
1760 if (!tx_ring->desc) { 1760 if (!tx_ring->desc) {
1761 vfree(tx_ring->buffer_info); 1761 vfree(tx_ring->buffer_info);
1762 return -ENOMEM; 1762 return -ENOMEM;
@@ -1798,8 +1798,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1798 return -ENOMEM; 1798 return -ENOMEM;
1799 1799
1800 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1800 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1801 rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size, 1801 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1802 &rx_ring->dma, GFP_KERNEL); 1802 &rx_ring->dma, GFP_KERNEL);
1803 if (!rx_ring->desc) { 1803 if (!rx_ring->desc) {
1804 vfree(rx_ring->buffer_info); 1804 vfree(rx_ring->buffer_info);
1805 return -ENOMEM; 1805 return -ENOMEM;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 8a31a02c9f47..d21041554507 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -401,9 +401,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
401 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) 401 if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
402 goto out_ring_desc; 402 goto out_ring_desc;
403 403
404 ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev, 404 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
405 RX_RING_SIZE * sizeof(u64), 405 RX_RING_SIZE * sizeof(u64),
406 &ring->buf_dma, GFP_KERNEL); 406 &ring->buf_dma, GFP_KERNEL);
407 if (!ring->buffers) 407 if (!ring->buffers)
408 goto out_ring_desc; 408 goto out_ring_desc;
409 409
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 24a90163775e..2d8a77cc156b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -53,7 +53,7 @@
53extern const struct qed_common_ops qed_common_ops_pass; 53extern const struct qed_common_ops qed_common_ops_pass;
54 54
55#define QED_MAJOR_VERSION 8 55#define QED_MAJOR_VERSION 8
56#define QED_MINOR_VERSION 33 56#define QED_MINOR_VERSION 37
57#define QED_REVISION_VERSION 0 57#define QED_REVISION_VERSION 0
58#define QED_ENGINEERING_VERSION 20 58#define QED_ENGINEERING_VERSION 20
59 59
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index dc1c1b616084..c2ad405b2f50 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -936,9 +936,9 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
936 u32 size = min_t(u32, total_size, psz); 936 u32 size = min_t(u32, total_size, psz);
937 void **p_virt = &p_mngr->t2[i].p_virt; 937 void **p_virt = &p_mngr->t2[i].p_virt;
938 938
939 *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 939 *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
940 size, &p_mngr->t2[i].p_phys, 940 &p_mngr->t2[i].p_phys,
941 GFP_KERNEL); 941 GFP_KERNEL);
942 if (!p_mngr->t2[i].p_virt) { 942 if (!p_mngr->t2[i].p_virt) {
943 rc = -ENOMEM; 943 rc = -ENOMEM;
944 goto t2_fail; 944 goto t2_fail;
@@ -1054,8 +1054,8 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
1054 u32 size; 1054 u32 size;
1055 1055
1056 size = min_t(u32, sz_left, p_blk->real_size_in_page); 1056 size = min_t(u32, sz_left, p_blk->real_size_in_page);
1057 p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, 1057 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
1058 &p_phys, GFP_KERNEL); 1058 &p_phys, GFP_KERNEL);
1059 if (!p_virt) 1059 if (!p_virt)
1060 return -ENOMEM; 1060 return -ENOMEM;
1061 1061
@@ -2306,9 +2306,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
2306 goto out0; 2306 goto out0;
2307 } 2307 }
2308 2308
2309 p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, 2309 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2310 p_blk->real_size_in_page, &p_phys, 2310 p_blk->real_size_in_page, &p_phys,
2311 GFP_KERNEL); 2311 GFP_KERNEL);
2312 if (!p_virt) { 2312 if (!p_virt) {
2313 rc = -ENOMEM; 2313 rc = -ENOMEM;
2314 goto out1; 2314 goto out1;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 8f6551421945..2ecaaaa4469a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
795 795
796/* get pq index according to PQ_FLAGS */ 796/* get pq index according to PQ_FLAGS */
797static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, 797static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
798 u32 pq_flags) 798 unsigned long pq_flags)
799{ 799{
800 struct qed_qm_info *qm_info = &p_hwfn->qm_info; 800 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
801 801
802 /* Can't have multiple flags set here */ 802 /* Can't have multiple flags set here */
803 if (bitmap_weight((unsigned long *)&pq_flags, 803 if (bitmap_weight(&pq_flags,
804 sizeof(pq_flags) * BITS_PER_BYTE) > 1) { 804 sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
805 DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); 805 DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
806 goto err; 806 goto err;
807 } 807 }
808 808
809 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { 809 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
810 DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); 810 DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
811 goto err; 811 goto err;
812 } 812 }
813 813
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 67c02ea93906..58be1c4c6668 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && 609 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); 610 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
611 611
612 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
613 (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
614 !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
615
612 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, 616 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
613 !!(accept_filter & QED_ACCEPT_BCAST)); 617 !!(accept_filter & QED_ACCEPT_BCAST));
614 618
@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
744 return rc; 748 return rc;
745 } 749 }
746 750
751 if (p_params->update_ctl_frame_check) {
752 p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
753 p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
754 }
755
747 /* Update mcast bins for VFs, PF doesn't use this functionality */ 756 /* Update mcast bins for VFs, PF doesn't use this functionality */
748 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); 757 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
749 758
@@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2207 u16 num_queues = 0; 2216 u16 num_queues = 0;
2208 2217
2209 /* Since the feature controls only queue-zones, 2218 /* Since the feature controls only queue-zones,
2210 * make sure we have the contexts [rx, tx, xdp] to 2219 * make sure we have the contexts [rx, xdp, tcs] to
2211 * match. 2220 * match.
2212 */ 2221 */
2213 for_each_hwfn(cdev, i) { 2222 for_each_hwfn(cdev, i) {
@@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2217 u16 cids; 2226 u16 cids;
2218 2227
2219 cids = hwfn->pf_params.eth_pf_params.num_cons; 2228 cids = hwfn->pf_params.eth_pf_params.num_cons;
2220 num_queues += min_t(u16, l2_queues, cids / 3); 2229 cids /= (2 + info->num_tc);
2230 num_queues += min_t(u16, l2_queues, cids);
2221 } 2231 }
2222 2232
2223 /* queues might theoretically be >256, but interrupts' 2233 /* queues might theoretically be >256, but interrupts'
@@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2688 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { 2698 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2689 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | 2699 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2690 QED_ACCEPT_MCAST_UNMATCHED; 2700 QED_ACCEPT_MCAST_UNMATCHED;
2691 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2701 accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2702 QED_ACCEPT_MCAST_UNMATCHED;
2692 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { 2703 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2693 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2704 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2694 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; 2705 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
@@ -2860,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2860 p_hwfn = p_cid->p_owner; 2871 p_hwfn = p_cid->p_owner;
2861 rc = qed_get_queue_coalesce(p_hwfn, coal, handle); 2872 rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2862 if (rc) 2873 if (rc)
2863 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); 2874 DP_VERBOSE(cdev, QED_MSG_DEBUG,
2875 "Unable to read queue coalescing\n");
2864 2876
2865 return rc; 2877 return rc;
2866} 2878}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8d80f1095d17..7127d5aaac42 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
219 struct qed_rss_params *rss_params; 219 struct qed_rss_params *rss_params;
220 struct qed_filter_accept_flags accept_flags; 220 struct qed_filter_accept_flags accept_flags;
221 struct qed_sge_tpa_params *sge_tpa_params; 221 struct qed_sge_tpa_params *sge_tpa_params;
222 u8 update_ctl_frame_check;
223 u8 mac_chk_en;
224 u8 ethtype_chk_en;
222}; 225};
223 226
224int qed_sp_vport_update(struct qed_hwfn *p_hwfn, 227int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 90afd514ffe1..b5f419b71287 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1619,6 +1619,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1619 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); 1619 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1620 rx_prod.bd_prod = cpu_to_le16(bd_prod); 1620 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1621 rx_prod.cqe_prod = cpu_to_le16(cq_prod); 1621 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1622
1623 /* Make sure chain element is updated before ringing the doorbell */
1624 dma_wmb();
1625
1622 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); 1626 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1623} 1627}
1624 1628
@@ -2447,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2447{ 2451{
2448 struct qed_ll2_tx_pkt_info pkt; 2452 struct qed_ll2_tx_pkt_info pkt;
2449 const skb_frag_t *frag; 2453 const skb_frag_t *frag;
2454 u8 flags = 0, nr_frags;
2450 int rc = -EINVAL, i; 2455 int rc = -EINVAL, i;
2451 dma_addr_t mapping; 2456 dma_addr_t mapping;
2452 u16 vlan = 0; 2457 u16 vlan = 0;
2453 u8 flags = 0;
2454 2458
2455 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { 2459 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2456 DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); 2460 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2457 return -EINVAL; 2461 return -EINVAL;
2458 } 2462 }
2459 2463
2460 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { 2464 /* Cache number of fragments from SKB since SKB may be freed by
2465 * the completion routine after calling qed_ll2_prepare_tx_packet()
2466 */
2467 nr_frags = skb_shinfo(skb)->nr_frags;
2468
2469 if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2461 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", 2470 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2462 1 + skb_shinfo(skb)->nr_frags); 2471 1 + nr_frags);
2463 return -EINVAL; 2472 return -EINVAL;
2464 } 2473 }
2465 2474
@@ -2481,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2481 } 2490 }
2482 2491
2483 memset(&pkt, 0, sizeof(pkt)); 2492 memset(&pkt, 0, sizeof(pkt));
2484 pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; 2493 pkt.num_of_bds = 1 + nr_frags;
2485 pkt.vlan = vlan; 2494 pkt.vlan = vlan;
2486 pkt.bd_flags = flags; 2495 pkt.bd_flags = flags;
2487 pkt.tx_dest = QED_LL2_TX_DEST_NW; 2496 pkt.tx_dest = QED_LL2_TX_DEST_NW;
@@ -2492,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2492 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) 2501 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2493 pkt.remove_stag = true; 2502 pkt.remove_stag = true;
2494 2503
2504 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2505 * there are no fragments in the skb and subsequently the completion
2506 * routine may run and free the SKB, so no dereferencing the SKB
2507 * beyond this point unless skb has any fragments.
2508 */
2495 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, 2509 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2496 &pkt, 1); 2510 &pkt, 1);
2497 if (rc) 2511 if (rc)
2498 goto err; 2512 goto err;
2499 2513
2500 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2514 for (i = 0; i < nr_frags; i++) {
2501 frag = &skb_shinfo(skb)->frags[i]; 2515 frag = &skb_shinfo(skb)->frags[i];
2502 2516
2503 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, 2517 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 4179c9013fc6..96ab77ae6af5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
382 * @param p_hwfn 382 * @param p_hwfn
383 */ 383 */
384void qed_consq_free(struct qed_hwfn *p_hwfn); 384void qed_consq_free(struct qed_hwfn *p_hwfn);
385int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
385 386
386/** 387/**
387 * @file 388 * @file
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 888274fa208b..5a495fda9e9d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
604 604
605 p_ent->ramrod.pf_update.update_mf_vlan_flag = true; 605 p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
606 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); 606 p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
607 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
608 p_ent->ramrod.pf_update.mf_vlan |=
609 cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
607 610
608 return qed_spq_post(p_hwfn, p_ent, NULL); 611 return qed_spq_post(p_hwfn, p_ent, NULL);
609} 612}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index eb88bbc6b193..ba64ff9bedbd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
397 397
398 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); 398 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
399 399
400 /* Attempt to post pending requests */
401 spin_lock_bh(&p_hwfn->p_spq->lock);
402 rc = qed_spq_pend_post(p_hwfn);
403 spin_unlock_bh(&p_hwfn->p_spq->lock);
404
400 return rc; 405 return rc;
401} 406}
402 407
@@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
767 return 0; 772 return 0;
768} 773}
769 774
770static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) 775int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
771{ 776{
772 struct qed_spq *p_spq = p_hwfn->p_spq; 777 struct qed_spq *p_spq = p_hwfn->p_spq;
773 struct qed_spq_entry *p_ent = NULL; 778 struct qed_spq_entry *p_ent = NULL;
@@ -905,7 +910,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
905 struct qed_spq_entry *p_ent = NULL; 910 struct qed_spq_entry *p_ent = NULL;
906 struct qed_spq_entry *tmp; 911 struct qed_spq_entry *tmp;
907 struct qed_spq_entry *found = NULL; 912 struct qed_spq_entry *found = NULL;
908 int rc;
909 913
910 if (!p_hwfn) 914 if (!p_hwfn)
911 return -EINVAL; 915 return -EINVAL;
@@ -963,12 +967,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
963 */ 967 */
964 qed_spq_return_entry(p_hwfn, found); 968 qed_spq_return_entry(p_hwfn, found);
965 969
966 /* Attempt to post pending requests */ 970 return 0;
967 spin_lock_bh(&p_spq->lock);
968 rc = qed_spq_pend_post(p_hwfn);
969 spin_unlock_bh(&p_spq->lock);
970
971 return rc;
972} 971}
973 972
974int qed_consq_alloc(struct qed_hwfn *p_hwfn) 973int qed_consq_alloc(struct qed_hwfn *p_hwfn)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ca6290fa0f30..71a7af134dd8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1969 params.vport_id = vf->vport_id; 1969 params.vport_id = vf->vport_id;
1970 params.max_buffers_per_cqe = start->max_buffers_per_cqe; 1970 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1971 params.mtu = vf->mtu; 1971 params.mtu = vf->mtu;
1972 params.check_mac = true; 1972
1973 /* Non trusted VFs should enable control frame filtering */
1974 params.check_mac = !vf->p_vf_info.is_trusted_configured;
1973 1975
1974 rc = qed_sp_eth_vport_start(p_hwfn, &params); 1976 rc = qed_sp_eth_vport_start(p_hwfn, &params);
1975 if (rc) { 1977 if (rc) {
@@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5130 params.opaque_fid = vf->opaque_fid; 5132 params.opaque_fid = vf->opaque_fid;
5131 params.vport_id = vf->vport_id; 5133 params.vport_id = vf->vport_id;
5132 5134
5135 params.update_ctl_frame_check = 1;
5136 params.mac_chk_en = !vf_info->is_trusted_configured;
5137
5133 if (vf_info->rx_accept_mode & mask) { 5138 if (vf_info->rx_accept_mode & mask) {
5134 flags->update_rx_mode_config = 1; 5139 flags->update_rx_mode_config = 1;
5135 flags->rx_accept_filter = vf_info->rx_accept_mode; 5140 flags->rx_accept_filter = vf_info->rx_accept_mode;
@@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5147 } 5152 }
5148 5153
5149 if (flags->update_rx_mode_config || 5154 if (flags->update_rx_mode_config ||
5150 flags->update_tx_mode_config) 5155 flags->update_tx_mode_config ||
5156 params.update_ctl_frame_check)
5151 qed_sp_vport_update(hwfn, &params, 5157 qed_sp_vport_update(hwfn, &params,
5152 QED_SPQ_MODE_EBLOCK, NULL); 5158 QED_SPQ_MODE_EBLOCK, NULL);
5153 } 5159 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index b6cccf44bf40..5dda547772c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
261 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; 261 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
262 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; 262 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
263 struct vf_pf_resc_request *p_resc; 263 struct vf_pf_resc_request *p_resc;
264 u8 retry_cnt = VF_ACQUIRE_THRESH;
264 bool resources_acquired = false; 265 bool resources_acquired = false;
265 struct vfpf_acquire_tlv *req; 266 struct vfpf_acquire_tlv *req;
266 int rc = 0, attempts = 0; 267 int rc = 0, attempts = 0;
@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
314 315
315 /* send acquire request */ 316 /* send acquire request */
316 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); 317 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
318
319 /* Re-try acquire in case of vf-pf hw channel timeout */
320 if (retry_cnt && rc == -EBUSY) {
321 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
322 "VF retrying to acquire due to VPC timeout\n");
323 retry_cnt--;
324 continue;
325 }
326
317 if (rc) 327 if (rc)
318 goto exit; 328 goto exit;
319 329
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 613249d1e967..730997b13747 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -56,7 +56,7 @@
56#include <net/tc_act/tc_gact.h> 56#include <net/tc_act/tc_gact.h>
57 57
58#define QEDE_MAJOR_VERSION 8 58#define QEDE_MAJOR_VERSION 8
59#define QEDE_MINOR_VERSION 33 59#define QEDE_MINOR_VERSION 37
60#define QEDE_REVISION_VERSION 0 60#define QEDE_REVISION_VERSION 0
61#define QEDE_ENGINEERING_VERSION 20 61#define QEDE_ENGINEERING_VERSION 20
62#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ 62#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
@@ -494,6 +494,9 @@ struct qede_reload_args {
494 494
495/* Datapath functions definition */ 495/* Datapath functions definition */
496netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); 496netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
497u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
498 struct net_device *sb_dev,
499 select_queue_fallback_t fallback);
497netdev_features_t qede_features_check(struct sk_buff *skb, 500netdev_features_t qede_features_check(struct sk_buff *skb,
498 struct net_device *dev, 501 struct net_device *dev,
499 netdev_features_t features); 502 netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index bdf816fe5a16..31b046e24565 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1695 return NETDEV_TX_OK; 1695 return NETDEV_TX_OK;
1696} 1696}
1697 1697
1698u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1699 struct net_device *sb_dev,
1700 select_queue_fallback_t fallback)
1701{
1702 struct qede_dev *edev = netdev_priv(dev);
1703 int total_txq;
1704
1705 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
1706
1707 return QEDE_TSS_COUNT(edev) ?
1708 fallback(dev, skb, NULL) % total_txq : 0;
1709}
1710
1698/* 8B udp header + 8B base tunnel header + 32B option length */ 1711/* 8B udp header + 8B base tunnel header + 32B option length */
1699#define QEDE_MAX_TUN_HDR_LEN 48 1712#define QEDE_MAX_TUN_HDR_LEN 48
1700 1713
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 5a74fcbdbc2b..9790f26d17c4 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
631 .ndo_open = qede_open, 631 .ndo_open = qede_open,
632 .ndo_stop = qede_close, 632 .ndo_stop = qede_close,
633 .ndo_start_xmit = qede_start_xmit, 633 .ndo_start_xmit = qede_start_xmit,
634 .ndo_select_queue = qede_select_queue,
634 .ndo_set_rx_mode = qede_set_rx_mode, 635 .ndo_set_rx_mode = qede_set_rx_mode,
635 .ndo_set_mac_address = qede_set_mac_addr, 636 .ndo_set_mac_address = qede_set_mac_addr,
636 .ndo_validate_addr = eth_validate_addr, 637 .ndo_validate_addr = eth_validate_addr,
@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
666 .ndo_open = qede_open, 667 .ndo_open = qede_open,
667 .ndo_stop = qede_close, 668 .ndo_stop = qede_close,
668 .ndo_start_xmit = qede_start_xmit, 669 .ndo_start_xmit = qede_start_xmit,
670 .ndo_select_queue = qede_select_queue,
669 .ndo_set_rx_mode = qede_set_rx_mode, 671 .ndo_set_rx_mode = qede_set_rx_mode,
670 .ndo_set_mac_address = qede_set_mac_addr, 672 .ndo_set_mac_address = qede_set_mac_addr,
671 .ndo_validate_addr = eth_validate_addr, 673 .ndo_validate_addr = eth_validate_addr,
@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
684 .ndo_open = qede_open, 686 .ndo_open = qede_open,
685 .ndo_stop = qede_close, 687 .ndo_stop = qede_close,
686 .ndo_start_xmit = qede_start_xmit, 688 .ndo_start_xmit = qede_start_xmit,
689 .ndo_select_queue = qede_select_queue,
687 .ndo_set_rx_mode = qede_set_rx_mode, 690 .ndo_set_rx_mode = qede_set_rx_mode,
688 .ndo_set_mac_address = qede_set_mac_addr, 691 .ndo_set_mac_address = qede_set_mac_addr,
689 .ndo_validate_addr = eth_validate_addr, 692 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d344e9d43832..af38d3d73291 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -434,14 +434,14 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
434 *(tx_ring->hw_consumer) = 0; 434 *(tx_ring->hw_consumer) = 0;
435 435
436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
437 rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size, 437 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
438 &rq_phys_addr, GFP_KERNEL); 438 &rq_phys_addr, GFP_KERNEL);
439 if (!rq_addr) 439 if (!rq_addr)
440 return -ENOMEM; 440 return -ENOMEM;
441 441
442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
443 rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size, 443 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
444 &rsp_phys_addr, GFP_KERNEL); 444 &rsp_phys_addr, GFP_KERNEL);
445 if (!rsp_addr) { 445 if (!rsp_addr) {
446 err = -ENOMEM; 446 err = -ENOMEM;
447 goto out_free_rq; 447 goto out_free_rq;
@@ -855,8 +855,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
855 struct qlcnic_cmd_args cmd; 855 struct qlcnic_cmd_args cmd;
856 size_t nic_size = sizeof(struct qlcnic_info_le); 856 size_t nic_size = sizeof(struct qlcnic_info_le);
857 857
858 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 858 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
859 &nic_dma_t, GFP_KERNEL); 859 &nic_dma_t, GFP_KERNEL);
860 if (!nic_info_addr) 860 if (!nic_info_addr)
861 return -ENOMEM; 861 return -ENOMEM;
862 862
@@ -909,8 +909,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) 909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
910 return err; 910 return err;
911 911
912 nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size, 912 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
913 &nic_dma_t, GFP_KERNEL); 913 &nic_dma_t, GFP_KERNEL);
914 if (!nic_info_addr) 914 if (!nic_info_addr)
915 return -ENOMEM; 915 return -ENOMEM;
916 916
@@ -964,8 +964,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
964 void *pci_info_addr; 964 void *pci_info_addr;
965 int err = 0, i; 965 int err = 0, i;
966 966
967 pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size, 967 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
968 &pci_info_dma_t, GFP_KERNEL); 968 &pci_info_dma_t, GFP_KERNEL);
969 if (!pci_info_addr) 969 if (!pci_info_addr)
970 return -ENOMEM; 970 return -ENOMEM;
971 971
@@ -1078,8 +1078,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
1078 return -EIO; 1078 return -EIO;
1079 } 1079 }
1080 1080
1081 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1081 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1082 &stats_dma_t, GFP_KERNEL); 1082 &stats_dma_t, GFP_KERNEL);
1083 if (!stats_addr) 1083 if (!stats_addr)
1084 return -ENOMEM; 1084 return -ENOMEM;
1085 1085
@@ -1134,8 +1134,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1134 if (mac_stats == NULL) 1134 if (mac_stats == NULL)
1135 return -ENOMEM; 1135 return -ENOMEM;
1136 1136
1137 stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size, 1137 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1138 &stats_dma_t, GFP_KERNEL); 1138 &stats_dma_t, GFP_KERNEL);
1139 if (!stats_addr) 1139 if (!stats_addr)
1140 return -ENOMEM; 1140 return -ENOMEM;
1141 1141
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 031f6e6ee9c1..8d790313ee3d 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -776,7 +776,7 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
776 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ 776 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
777 777
778 ring_header->used = 0; 778 ring_header->used = 0;
779 ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, 779 ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
780 &ring_header->dma_addr, 780 &ring_header->dma_addr,
781 GFP_KERNEL); 781 GFP_KERNEL);
782 if (!ring_header->v_addr) 782 if (!ring_header->v_addr)
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 44f6e4873aad..4f910c4f67b0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -691,7 +691,7 @@ static void cp_tx (struct cp_private *cp)
691 } 691 }
692 bytes_compl += skb->len; 692 bytes_compl += skb->len;
693 pkts_compl++; 693 pkts_compl++;
694 dev_kfree_skb_irq(skb); 694 dev_consume_skb_irq(skb);
695 } 695 }
696 696
697 cp->tx_skb[tx_tail] = NULL; 697 cp->tx_skb[tx_tail] = NULL;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 298930d39b79..6e36b88ca7c9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -205,6 +205,8 @@ enum cfg_version {
205}; 205};
206 206
207static const struct pci_device_id rtl8169_pci_tbl[] = { 207static const struct pci_device_id rtl8169_pci_tbl[] = {
208 { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
209 { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
208 { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, 210 { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
209 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, 211 { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
210 { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, 212 { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
@@ -706,6 +708,7 @@ module_param(use_dac, int, 0);
706MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); 708MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
707module_param_named(debug, debug.msg_enable, int, 0); 709module_param_named(debug, debug.msg_enable, int, 0);
708MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 710MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
711MODULE_SOFTDEP("pre: realtek");
709MODULE_LICENSE("GPL"); 712MODULE_LICENSE("GPL");
710MODULE_FIRMWARE(FIRMWARE_8168D_1); 713MODULE_FIRMWARE(FIRMWARE_8168D_1);
711MODULE_FIRMWARE(FIRMWARE_8168D_2); 714MODULE_FIRMWARE(FIRMWARE_8168D_2);
@@ -1283,11 +1286,13 @@ static u16 rtl_get_events(struct rtl8169_private *tp)
1283static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) 1286static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1284{ 1287{
1285 RTL_W16(tp, IntrStatus, bits); 1288 RTL_W16(tp, IntrStatus, bits);
1289 mmiowb();
1286} 1290}
1287 1291
1288static void rtl_irq_disable(struct rtl8169_private *tp) 1292static void rtl_irq_disable(struct rtl8169_private *tp)
1289{ 1293{
1290 RTL_W16(tp, IntrMask, 0); 1294 RTL_W16(tp, IntrMask, 0);
1295 mmiowb();
1291} 1296}
1292 1297
1293#define RTL_EVENT_NAPI_RX (RxOK | RxErr) 1298#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@@ -1679,11 +1684,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
1679 1684
1680static bool rtl8169_update_counters(struct rtl8169_private *tp) 1685static bool rtl8169_update_counters(struct rtl8169_private *tp)
1681{ 1686{
1687 u8 val = RTL_R8(tp, ChipCmd);
1688
1682 /* 1689 /*
1683 * Some chips are unable to dump tally counters when the receiver 1690 * Some chips are unable to dump tally counters when the receiver
1684 * is disabled. 1691 * is disabled. If 0xff chip may be in a PCI power-save state.
1685 */ 1692 */
1686 if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) 1693 if (!(val & CmdRxEnb) || val == 0xff)
1687 return true; 1694 return true;
1688 1695
1689 return rtl8169_do_counters(tp, CounterDump); 1696 return rtl8169_do_counters(tp, CounterDump);
@@ -6067,7 +6074,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6067 struct device *d = tp_to_dev(tp); 6074 struct device *d = tp_to_dev(tp);
6068 dma_addr_t mapping; 6075 dma_addr_t mapping;
6069 u32 opts[2], len; 6076 u32 opts[2], len;
6070 bool stop_queue;
6071 int frags; 6077 int frags;
6072 6078
6073 if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { 6079 if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
@@ -6109,6 +6115,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6109 6115
6110 txd->opts2 = cpu_to_le32(opts[1]); 6116 txd->opts2 = cpu_to_le32(opts[1]);
6111 6117
6118 netdev_sent_queue(dev, skb->len);
6119
6112 skb_tx_timestamp(skb); 6120 skb_tx_timestamp(skb);
6113 6121
6114 /* Force memory writes to complete before releasing descriptor */ 6122 /* Force memory writes to complete before releasing descriptor */
@@ -6121,14 +6129,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6121 6129
6122 tp->cur_tx += frags + 1; 6130 tp->cur_tx += frags + 1;
6123 6131
6124 stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); 6132 RTL_W8(tp, TxPoll, NPQ);
6125 if (unlikely(stop_queue))
6126 netif_stop_queue(dev);
6127 6133
6128 if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) 6134 mmiowb();
6129 RTL_W8(tp, TxPoll, NPQ);
6130 6135
6131 if (unlikely(stop_queue)) { 6136 if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
6137 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
6138 * not miss a ring update when it notices a stopped queue.
6139 */
6140 smp_wmb();
6141 netif_stop_queue(dev);
6132 /* Sync with rtl_tx: 6142 /* Sync with rtl_tx:
6133 * - publish queue status and cur_tx ring index (write barrier) 6143 * - publish queue status and cur_tx ring index (write barrier)
6134 * - refresh dirty_tx ring index (read barrier). 6144 * - refresh dirty_tx ring index (read barrier).
@@ -6478,7 +6488,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
6478 6488
6479 if (work_done < budget) { 6489 if (work_done < budget) {
6480 napi_complete_done(napi, work_done); 6490 napi_complete_done(napi, work_done);
6491
6481 rtl_irq_enable(tp); 6492 rtl_irq_enable(tp);
6493 mmiowb();
6482 } 6494 }
6483 6495
6484 return work_done; 6496 return work_done;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ffc1ada4e6da..d28c8f9ca55b 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -343,7 +343,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
343 int i; 343 int i;
344 344
345 priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) + 345 priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
346 ETH_HLEN + VLAN_HLEN; 346 ETH_HLEN + VLAN_HLEN + sizeof(__sum16);
347 347
348 /* Allocate RX and TX skb rings */ 348 /* Allocate RX and TX skb rings */
349 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 349 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -524,13 +524,15 @@ static void ravb_rx_csum(struct sk_buff *skb)
524{ 524{
525 u8 *hw_csum; 525 u8 *hw_csum;
526 526
527 /* The hardware checksum is 2 bytes appended to packet data */ 527 /* The hardware checksum is contained in sizeof(__sum16) (2) bytes
528 if (unlikely(skb->len < 2)) 528 * appended to packet data
529 */
530 if (unlikely(skb->len < sizeof(__sum16)))
529 return; 531 return;
530 hw_csum = skb_tail_pointer(skb) - 2; 532 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
531 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); 533 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
532 skb->ip_summed = CHECKSUM_COMPLETE; 534 skb->ip_summed = CHECKSUM_COMPLETE;
533 skb_trim(skb, skb->len - 2); 535 skb_trim(skb, skb->len - sizeof(__sum16));
534} 536}
535 537
536/* Packet receive function for Ethernet AVB */ 538/* Packet receive function for Ethernet AVB */
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 690aee88f0eb..6d22dd500790 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -400,9 +400,9 @@ static int init_tx_ring(struct device *dev, u8 queue_no,
400 } 400 }
401 401
402 /* allocate memory for TX descriptors */ 402 /* allocate memory for TX descriptors */
403 tx_ring->dma_tx = dma_zalloc_coherent(dev, 403 tx_ring->dma_tx = dma_alloc_coherent(dev,
404 tx_rsize * sizeof(struct sxgbe_tx_norm_desc), 404 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
405 &tx_ring->dma_tx_phy, GFP_KERNEL); 405 &tx_ring->dma_tx_phy, GFP_KERNEL);
406 if (!tx_ring->dma_tx) 406 if (!tx_ring->dma_tx)
407 return -ENOMEM; 407 return -ENOMEM;
408 408
@@ -479,9 +479,9 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
479 rx_ring->queue_no = queue_no; 479 rx_ring->queue_no = queue_no;
480 480
481 /* allocate memory for RX descriptors */ 481 /* allocate memory for RX descriptors */
482 rx_ring->dma_rx = dma_zalloc_coherent(priv->device, 482 rx_ring->dma_rx = dma_alloc_coherent(priv->device,
483 rx_rsize * sizeof(struct sxgbe_rx_norm_desc), 483 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
484 &rx_ring->dma_rx_phy, GFP_KERNEL); 484 &rx_ring->dma_rx_phy, GFP_KERNEL);
485 485
486 if (rx_ring->dma_rx == NULL) 486 if (rx_ring->dma_rx == NULL)
487 return -ENOMEM; 487 return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index b6a50058bb8d..c08034154a9a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6046,22 +6046,25 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
6046 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, 6046 { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
6047 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } 6047 { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
6048}; 6048};
6049#define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
6049 6050
6050static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 6051static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6051 struct efx_mcdi_mtd_partition *part, 6052 struct efx_mcdi_mtd_partition *part,
6052 unsigned int type) 6053 unsigned int type,
6054 unsigned long *found)
6053{ 6055{
6054 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 6056 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
6055 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 6057 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
6056 const struct efx_ef10_nvram_type_info *info; 6058 const struct efx_ef10_nvram_type_info *info;
6057 size_t size, erase_size, outlen; 6059 size_t size, erase_size, outlen;
6060 int type_idx = 0;
6058 bool protected; 6061 bool protected;
6059 int rc; 6062 int rc;
6060 6063
6061 for (info = efx_ef10_nvram_types; ; info++) { 6064 for (type_idx = 0; ; type_idx++) {
6062 if (info == 6065 if (type_idx == EF10_NVRAM_PARTITION_COUNT)
6063 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
6064 return -ENODEV; 6066 return -ENODEV;
6067 info = efx_ef10_nvram_types + type_idx;
6065 if ((type & ~info->type_mask) == info->type) 6068 if ((type & ~info->type_mask) == info->type)
6066 break; 6069 break;
6067 } 6070 }
@@ -6074,6 +6077,13 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6074 if (protected) 6077 if (protected)
6075 return -ENODEV; /* hide it */ 6078 return -ENODEV; /* hide it */
6076 6079
6080 /* If we've already exposed a partition of this type, hide this
6081 * duplicate. All operations on MTDs are keyed by the type anyway,
6082 * so we can't act on the duplicate.
6083 */
6084 if (__test_and_set_bit(type_idx, found))
6085 return -EEXIST;
6086
6077 part->nvram_type = type; 6087 part->nvram_type = type;
6078 6088
6079 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 6089 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
@@ -6105,6 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
6105static int efx_ef10_mtd_probe(struct efx_nic *efx) 6115static int efx_ef10_mtd_probe(struct efx_nic *efx)
6106{ 6116{
6107 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 6117 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
6118 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
6108 struct efx_mcdi_mtd_partition *parts; 6119 struct efx_mcdi_mtd_partition *parts;
6109 size_t outlen, n_parts_total, i, n_parts; 6120 size_t outlen, n_parts_total, i, n_parts;
6110 unsigned int type; 6121 unsigned int type;
@@ -6133,11 +6144,13 @@ static int efx_ef10_mtd_probe(struct efx_nic *efx)
6133 for (i = 0; i < n_parts_total; i++) { 6144 for (i = 0; i < n_parts_total; i++) {
6134 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 6145 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
6135 i); 6146 i);
6136 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); 6147 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
6137 if (rc == 0) 6148 found);
6138 n_parts++; 6149 if (rc == -EEXIST || rc == -ENODEV)
6139 else if (rc != -ENODEV) 6150 continue;
6151 if (rc)
6140 goto fail; 6152 goto fail;
6153 n_parts++;
6141 } 6154 }
6142 6155
6143 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 6156 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
diff --git a/drivers/net/ethernet/sfc/falcon/nic.c b/drivers/net/ethernet/sfc/falcon/nic.c
index a8ecb33390da..9c07b5175581 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.c
+++ b/drivers/net/ethernet/sfc/falcon/nic.c
@@ -33,8 +33,8 @@
33int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer, 33int ef4_nic_alloc_buffer(struct ef4_nic *efx, struct ef4_buffer *buffer,
34 unsigned int len, gfp_t gfp_flags) 34 unsigned int len, gfp_t gfp_flags)
35{ 35{
36 buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 36 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
37 &buffer->dma_addr, gfp_flags); 37 &buffer->dma_addr, gfp_flags);
38 if (!buffer->addr) 38 if (!buffer->addr)
39 return -ENOMEM; 39 return -ENOMEM;
40 buffer->len = len; 40 buffer->len = len;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index aa1945a858d5..c2d45a40eb48 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -34,8 +34,8 @@
34int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 34int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
35 unsigned int len, gfp_t gfp_flags) 35 unsigned int len, gfp_t gfp_flags)
36{ 36{
37 buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len, 37 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
38 &buffer->dma_addr, gfp_flags); 38 &buffer->dma_addr, gfp_flags);
39 if (!buffer->addr) 39 if (!buffer->addr)
40 return -ENOMEM; 40 return -ENOMEM;
41 buffer->len = len; 41 buffer->len = len;
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 703fbbefea44..0e1b7e960b98 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -211,8 +211,8 @@ static void meth_check_link(struct net_device *dev)
211static int meth_init_tx_ring(struct meth_private *priv) 211static int meth_init_tx_ring(struct meth_private *priv)
212{ 212{
213 /* Init TX ring */ 213 /* Init TX ring */
214 priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, 214 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
215 &priv->tx_ring_dma, GFP_ATOMIC); 215 &priv->tx_ring_dma, GFP_ATOMIC);
216 if (!priv->tx_ring) 216 if (!priv->tx_ring)
217 return -ENOMEM; 217 return -ENOMEM;
218 218
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 15c62c160953..be47d864f8b9 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
1037 skb = ep->tx_skbuff[entry]; 1037 skb = ep->tx_skbuff[entry];
1038 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 1038 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1039 skb->len, PCI_DMA_TODEVICE); 1039 skb->len, PCI_DMA_TODEVICE);
1040 dev_kfree_skb_irq(skb); 1040 dev_consume_skb_irq(skb);
1041 ep->tx_skbuff[entry] = NULL; 1041 ep->tx_skbuff[entry] = NULL;
1042 } 1042 }
1043 1043
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 05a0948ad929..a18149720aa2 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1029,8 +1029,8 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1029 struct netsec_desc_ring *dring = &priv->desc_ring[id]; 1029 struct netsec_desc_ring *dring = &priv->desc_ring[id];
1030 int i; 1030 int i;
1031 1031
1032 dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, 1032 dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1033 &dring->desc_dma, GFP_KERNEL); 1033 &dring->desc_dma, GFP_KERNEL);
1034 if (!dring->vaddr) 1034 if (!dring->vaddr)
1035 goto err; 1035 goto err;
1036 1036
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7b923362ee55..3b174eae77c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
1342 } 1342 }
1343 1343
1344 ret = phy_power_on(bsp_priv, true); 1344 ret = phy_power_on(bsp_priv, true);
1345 if (ret) 1345 if (ret) {
1346 gmac_clk_enable(bsp_priv, false);
1346 return ret; 1347 return ret;
1348 }
1347 1349
1348 pm_runtime_enable(dev); 1350 pm_runtime_enable(dev);
1349 pm_runtime_get_sync(dev); 1351 pm_runtime_get_sync(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 6c5092e7771c..c5e25580a43f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -263,6 +263,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
263 struct stmmac_extra_stats *x, u32 chan) 263 struct stmmac_extra_stats *x, u32 chan)
264{ 264{
265 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); 265 u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
266 u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
266 int ret = 0; 267 int ret = 0;
267 268
268 /* ABNORMAL interrupts */ 269 /* ABNORMAL interrupts */
@@ -282,8 +283,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
282 x->normal_irq_n++; 283 x->normal_irq_n++;
283 284
284 if (likely(intr_status & XGMAC_RI)) { 285 if (likely(intr_status & XGMAC_RI)) {
285 u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); 286 if (likely(intr_en & XGMAC_RIE)) {
286 if (likely(value & XGMAC_RIE)) {
287 x->rx_normal_irq_n++; 287 x->rx_normal_irq_n++;
288 ret |= handle_rx; 288 ret |= handle_rx;
289 } 289 }
@@ -295,7 +295,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
295 } 295 }
296 296
297 /* Clear interrupts */ 297 /* Clear interrupts */
298 writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan)); 298 writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
299 299
300 return ret; 300 return ret;
301} 301}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1f61c25d82b..5d85742a2be0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -721,8 +721,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
721{ 721{
722 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 722 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
723 723
724 if (!clk) 724 if (!clk) {
725 return 0; 725 clk = priv->plat->clk_ref_rate;
726 if (!clk)
727 return 0;
728 }
726 729
727 return (usec * (clk / 1000000)) / 256; 730 return (usec * (clk / 1000000)) / 256;
728} 731}
@@ -731,8 +734,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
731{ 734{
732 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); 735 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
733 736
734 if (!clk) 737 if (!clk) {
735 return 0; 738 clk = priv->plat->clk_ref_rate;
739 if (!clk)
740 return 0;
741 }
736 742
737 return (riwt * 256) / (clk / 1000000); 743 return (riwt * 256) / (clk / 1000000);
738} 744}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0e0a0789c2ed..685d20472358 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1549,22 +1549,18 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1549 goto err_dma; 1549 goto err_dma;
1550 1550
1551 if (priv->extend_desc) { 1551 if (priv->extend_desc) {
1552 rx_q->dma_erx = dma_zalloc_coherent(priv->device, 1552 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1553 DMA_RX_SIZE * 1553 DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1554 sizeof(struct 1554 &rx_q->dma_rx_phy,
1555 dma_extended_desc), 1555 GFP_KERNEL);
1556 &rx_q->dma_rx_phy,
1557 GFP_KERNEL);
1558 if (!rx_q->dma_erx) 1556 if (!rx_q->dma_erx)
1559 goto err_dma; 1557 goto err_dma;
1560 1558
1561 } else { 1559 } else {
1562 rx_q->dma_rx = dma_zalloc_coherent(priv->device, 1560 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1563 DMA_RX_SIZE * 1561 DMA_RX_SIZE * sizeof(struct dma_desc),
1564 sizeof(struct 1562 &rx_q->dma_rx_phy,
1565 dma_desc), 1563 GFP_KERNEL);
1566 &rx_q->dma_rx_phy,
1567 GFP_KERNEL);
1568 if (!rx_q->dma_rx) 1564 if (!rx_q->dma_rx)
1569 goto err_dma; 1565 goto err_dma;
1570 } 1566 }
@@ -1612,21 +1608,17 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1612 goto err_dma; 1608 goto err_dma;
1613 1609
1614 if (priv->extend_desc) { 1610 if (priv->extend_desc) {
1615 tx_q->dma_etx = dma_zalloc_coherent(priv->device, 1611 tx_q->dma_etx = dma_alloc_coherent(priv->device,
1616 DMA_TX_SIZE * 1612 DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1617 sizeof(struct 1613 &tx_q->dma_tx_phy,
1618 dma_extended_desc), 1614 GFP_KERNEL);
1619 &tx_q->dma_tx_phy,
1620 GFP_KERNEL);
1621 if (!tx_q->dma_etx) 1615 if (!tx_q->dma_etx)
1622 goto err_dma; 1616 goto err_dma;
1623 } else { 1617 } else {
1624 tx_q->dma_tx = dma_zalloc_coherent(priv->device, 1618 tx_q->dma_tx = dma_alloc_coherent(priv->device,
1625 DMA_TX_SIZE * 1619 DMA_TX_SIZE * sizeof(struct dma_desc),
1626 sizeof(struct 1620 &tx_q->dma_tx_phy,
1627 dma_desc), 1621 GFP_KERNEL);
1628 &tx_q->dma_tx_phy,
1629 GFP_KERNEL);
1630 if (!tx_q->dma_tx) 1622 if (!tx_q->dma_tx)
1631 goto err_dma; 1623 goto err_dma;
1632 } 1624 }
@@ -3031,10 +3023,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3031 3023
3032 tx_q = &priv->tx_queue[queue]; 3024 tx_q = &priv->tx_queue[queue];
3033 3025
3026 if (priv->tx_path_in_lpi_mode)
3027 stmmac_disable_eee_mode(priv);
3028
3034 /* Manage oversized TCP frames for GMAC4 device */ 3029 /* Manage oversized TCP frames for GMAC4 device */
3035 if (skb_is_gso(skb) && priv->tso) { 3030 if (skb_is_gso(skb) && priv->tso) {
3036 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 3031 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3032 /*
3033 * There is no way to determine the number of TSO
3034 * capable Queues. Let's use always the Queue 0
3035 * because if TSO is supported then at least this
3036 * one will be capable.
3037 */
3038 skb_set_queue_mapping(skb, 0);
3039
3037 return stmmac_tso_xmit(skb, dev); 3040 return stmmac_tso_xmit(skb, dev);
3041 }
3038 } 3042 }
3039 3043
3040 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3044 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3049,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3049 return NETDEV_TX_BUSY; 3053 return NETDEV_TX_BUSY;
3050 } 3054 }
3051 3055
3052 if (priv->tx_path_in_lpi_mode)
3053 stmmac_disable_eee_mode(priv);
3054
3055 entry = tx_q->cur_tx; 3056 entry = tx_q->cur_tx;
3056 first_entry = entry; 3057 first_entry = entry;
3057 WARN_ON(tx_q->tx_skbuff[first_entry]); 3058 WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -3525,27 +3526,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3525 struct stmmac_channel *ch = 3526 struct stmmac_channel *ch =
3526 container_of(napi, struct stmmac_channel, napi); 3527 container_of(napi, struct stmmac_channel, napi);
3527 struct stmmac_priv *priv = ch->priv_data; 3528 struct stmmac_priv *priv = ch->priv_data;
3528 int work_done = 0, work_rem = budget; 3529 int work_done, rx_done = 0, tx_done = 0;
3529 u32 chan = ch->index; 3530 u32 chan = ch->index;
3530 3531
3531 priv->xstats.napi_poll++; 3532 priv->xstats.napi_poll++;
3532 3533
3533 if (ch->has_tx) { 3534 if (ch->has_tx)
3534 int done = stmmac_tx_clean(priv, work_rem, chan); 3535 tx_done = stmmac_tx_clean(priv, budget, chan);
3536 if (ch->has_rx)
3537 rx_done = stmmac_rx(priv, budget, chan);
3535 3538
3536 work_done += done; 3539 work_done = max(rx_done, tx_done);
3537 work_rem -= done; 3540 work_done = min(work_done, budget);
3538 }
3539 3541
3540 if (ch->has_rx) { 3542 if (work_done < budget && napi_complete_done(napi, work_done)) {
3541 int done = stmmac_rx(priv, work_rem, chan); 3543 int stat;
3542 3544
3543 work_done += done;
3544 work_rem -= done;
3545 }
3546
3547 if (work_done < budget && napi_complete_done(napi, work_done))
3548 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3545 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3546 stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3547 &priv->xstats, chan);
3548 if (stat && napi_reschedule(napi))
3549 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
3550 }
3549 3551
3550 return work_done; 3552 return work_done;
3551} 3553}
@@ -4168,6 +4170,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
4168 return ret; 4170 return ret;
4169 } 4171 }
4170 4172
4173 /* Rx Watchdog is available in the COREs newer than the 3.40.
4174 * In some case, for example on bugged HW this feature
4175 * has to be disable and this can be done by passing the
4176 * riwt_off field from the platform.
4177 */
4178 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4179 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4180 priv->use_riwt = 1;
4181 dev_info(priv->device,
4182 "Enable RX Mitigation via HW Watchdog Timer\n");
4183 }
4184
4171 return 0; 4185 return 0;
4172} 4186}
4173 4187
@@ -4300,18 +4314,6 @@ int stmmac_dvr_probe(struct device *device,
4300 if (flow_ctrl) 4314 if (flow_ctrl)
4301 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 4315 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4302 4316
4303 /* Rx Watchdog is available in the COREs newer than the 3.40.
4304 * In some case, for example on bugged HW this feature
4305 * has to be disable and this can be done by passing the
4306 * riwt_off field from the platform.
4307 */
4308 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4309 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4310 priv->use_riwt = 1;
4311 dev_info(priv->device,
4312 "Enable RX Mitigation via HW Watchdog Timer\n");
4313 }
4314
4315 /* Setup channels NAPI */ 4317 /* Setup channels NAPI */
4316 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 4318 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4317 4319
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index c54a50dbd5ac..d819e8eaba12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
299 */ 299 */
300static void stmmac_pci_remove(struct pci_dev *pdev) 300static void stmmac_pci_remove(struct pci_dev *pdev)
301{ 301{
302 int i;
303
302 stmmac_dvr_remove(&pdev->dev); 304 stmmac_dvr_remove(&pdev->dev);
305
306 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
307 if (pci_resource_len(pdev, i) == 0)
308 continue;
309 pcim_iounmap_regions(pdev, BIT(i));
310 break;
311 }
312
303 pci_disable_device(pdev); 313 pci_disable_device(pdev);
304} 314}
305 315
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 531294f4978b..58ea18af9813 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
301 /* Queue 0 is not AVB capable */ 301 /* Queue 0 is not AVB capable */
302 if (queue <= 0 || queue >= tx_queues_count) 302 if (queue <= 0 || queue >= tx_queues_count)
303 return -EINVAL; 303 return -EINVAL;
304 if (!priv->dma_cap.av)
305 return -EOPNOTSUPP;
304 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000) 306 if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
305 return -EOPNOTSUPP; 307 return -EOPNOTSUPP;
306 308
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 9020b084b953..6fc05c106afc 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1,22 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. 2/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
3 * 3 *
4 * Copyright (C) 2004 Sun Microsystems Inc. 4 * Copyright (C) 2004 Sun Microsystems Inc.
5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) 5 * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
6 * 6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 *
20 * This driver uses the sungem driver (c) David Miller 7 * This driver uses the sungem driver (c) David Miller
21 * (davem@redhat.com) as its basis. 8 * (davem@redhat.com) as its basis.
22 * 9 *
@@ -1911,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1911 cp->net_stats[ring].tx_packets++; 1898 cp->net_stats[ring].tx_packets++;
1912 cp->net_stats[ring].tx_bytes += skb->len; 1899 cp->net_stats[ring].tx_bytes += skb->len;
1913 spin_unlock(&cp->stat_lock[ring]); 1900 spin_unlock(&cp->stat_lock[ring]);
1914 dev_kfree_skb_irq(skb); 1901 dev_consume_skb_irq(skb);
1915 } 1902 }
1916 cp->tx_old[ring] = entry; 1903 cp->tx_old[ring] = entry;
1917 1904
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index 13f3860496a8..ae5f05f03f88 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -1,23 +1,10 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0+ */
2/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ 2/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
3 * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. 3 * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
4 * 4 *
5 * Copyright (C) 2004 Sun Microsystems Inc. 5 * Copyright (C) 2004 Sun Microsystems Inc.
6 * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) 6 * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 *
21 * vendor id: 0x108E (Sun Microsystems, Inc.) 8 * vendor id: 0x108E (Sun Microsystems, Inc.)
22 * device id: 0xabba (Cassini) 9 * device id: 0xabba (Cassini)
23 * revision ids: 0x01 = Cassini 10 * revision ids: 0x01 = Cassini
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 720b7ac77f3b..e9b757b03b56 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp)
781 781
782 DTX(("skb(%p) ", skb)); 782 DTX(("skb(%p) ", skb));
783 bp->tx_skbs[elem] = NULL; 783 bp->tx_skbs[elem] = NULL;
784 dev_kfree_skb_irq(skb); 784 dev_consume_skb_irq(skb);
785 785
786 elem = NEXT_TX(elem); 786 elem = NEXT_TX(elem);
787 } 787 }
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index ff641cf30a4e..d007dfeba5c3 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp)
1962 this = &txbase[elem]; 1962 this = &txbase[elem];
1963 } 1963 }
1964 1964
1965 dev_kfree_skb_irq(skb); 1965 dev_consume_skb_irq(skb);
1966 dev->stats.tx_packets++; 1966 dev->stats.tx_packets++;
1967 } 1967 }
1968 hp->tx_old = elem; 1968 hp->tx_old = elem;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dc966ddb6d81..b24c11187017 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
1739 tx_level -= db->rptr->len; /* '-' koz len is negative */ 1739 tx_level -= db->rptr->len; /* '-' koz len is negative */
1740 1740
1741 /* now should come skb pointer - free it */ 1741 /* now should come skb pointer - free it */
1742 dev_kfree_skb_irq(db->rptr->addr.skb); 1742 dev_consume_skb_irq(db->rptr->addr.skb);
1743 bdx_tx_db_inc_rptr(db); 1743 bdx_tx_db_inc_rptr(db);
1744 } 1744 }
1745 1745
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 810dfc7de1f9..e2d47b24a869 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -608,7 +608,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
608 netdev_dbg(dev, "sent 0x%p, len=%d\n", 608 netdev_dbg(dev, "sent 0x%p, len=%d\n",
609 desc->skb, desc->skb->len); 609 desc->skb, desc->skb->len);
610 610
611 dev_kfree_skb_irq(desc->skb); 611 dev_consume_skb_irq(desc->skb);
612 desc->skb = NULL; 612 desc->skb = NULL;
613 if (__netif_subqueue_stopped(dev, queue)) 613 if (__netif_subqueue_stopped(dev, queue))
614 netif_wake_subqueue(dev, queue); 614 netif_wake_subqueue(dev, queue);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index edcd1e60b30d..37925a1d58de 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1311,13 +1311,13 @@ static int tsi108_open(struct net_device *dev)
1311 data->id, dev->irq, dev->name); 1311 data->id, dev->irq, dev->name);
1312 } 1312 }
1313 1313
1314 data->rxring = dma_zalloc_coherent(&data->pdev->dev, rxring_size, 1314 data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
1315 &data->rxdma, GFP_KERNEL); 1315 &data->rxdma, GFP_KERNEL);
1316 if (!data->rxring) 1316 if (!data->rxring)
1317 return -ENOMEM; 1317 return -ENOMEM;
1318 1318
1319 data->txring = dma_zalloc_coherent(&data->pdev->dev, txring_size, 1319 data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
1320 &data->txdma, GFP_KERNEL); 1320 &data->txdma, GFP_KERNEL);
1321 if (!data->txring) { 1321 if (!data->txring) {
1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, 1322 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
1323 data->rxdma); 1323 data->rxdma);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 82412691ee66..27f6cf140845 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
1740 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], 1740 dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
1741 le16_to_cpu(pktlen), DMA_TO_DEVICE); 1741 le16_to_cpu(pktlen), DMA_TO_DEVICE);
1742 } 1742 }
1743 dev_kfree_skb_irq(skb); 1743 dev_consume_skb_irq(skb);
1744 tdinfo->skb = NULL; 1744 tdinfo->skb = NULL;
1745} 1745}
1746 1746
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2241f9897092..15bb058db392 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
243 243
244 /* allocate the tx and rx ring buffer descriptors. */ 244 /* allocate the tx and rx ring buffer descriptors. */
245 /* returns a virtual address and a physical address. */ 245 /* returns a virtual address and a physical address. */
246 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
247 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 247 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
248 &lp->tx_bd_p, GFP_KERNEL); 248 &lp->tx_bd_p, GFP_KERNEL);
249 if (!lp->tx_bd_v) 249 if (!lp->tx_bd_v)
250 goto out; 250 goto out;
251 251
252 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 252 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
253 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 253 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
254 &lp->rx_bd_p, GFP_KERNEL); 254 &lp->rx_bd_p, GFP_KERNEL);
255 if (!lp->rx_bd_v) 255 if (!lp->rx_bd_v)
256 goto out; 256 goto out;
257 257
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 12a14609ec47..0789d8af7d72 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -199,15 +199,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
199 lp->rx_bd_ci = 0; 199 lp->rx_bd_ci = 0;
200 200
201 /* Allocate the Tx and Rx buffer descriptors. */ 201 /* Allocate the Tx and Rx buffer descriptors. */
202 lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 202 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
203 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 203 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
204 &lp->tx_bd_p, GFP_KERNEL); 204 &lp->tx_bd_p, GFP_KERNEL);
205 if (!lp->tx_bd_v) 205 if (!lp->tx_bd_v)
206 goto out; 206 goto out;
207 207
208 lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent, 208 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
209 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 209 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
210 &lp->rx_bd_p, GFP_KERNEL); 210 &lp->rx_bd_p, GFP_KERNEL);
211 if (!lp->rx_bd_v) 211 if (!lp->rx_bd_v)
212 goto out; 212 goto out;
213 213
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 61fceee73c1b..56b7791911bf 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1139,9 +1139,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
1139#endif 1139#endif
1140 sizeof(PI_CONSUMER_BLOCK) + 1140 sizeof(PI_CONSUMER_BLOCK) +
1141 (PI_ALIGN_K_DESC_BLK - 1); 1141 (PI_ALIGN_K_DESC_BLK - 1);
1142 bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size, 1142 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1143 &bp->kmalloced_dma, 1143 &bp->kmalloced_dma,
1144 GFP_ATOMIC); 1144 GFP_ATOMIC);
1145 if (top_v == NULL) 1145 if (top_v == NULL)
1146 return DFX_K_FAILURE; 1146 return DFX_K_FAILURE;
1147 1147
@@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp)
3512 bp->descr_block_virt->xmt_data[comp].long_1, 3512 bp->descr_block_virt->xmt_data[comp].long_1,
3513 p_xmt_drv_descr->p_skb->len, 3513 p_xmt_drv_descr->p_skb->len,
3514 DMA_TO_DEVICE); 3514 DMA_TO_DEVICE);
3515 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); 3515 dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
3516 3516
3517 /* 3517 /*
3518 * Move to start of next packet by updating completion index 3518 * Move to start of next packet by updating completion index
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 72433f3efc74..5d661f60b101 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -409,10 +409,10 @@ static int skfp_driver_init(struct net_device *dev)
409 if (bp->SharedMemSize > 0) { 409 if (bp->SharedMemSize > 0) {
410 bp->SharedMemSize += 16; // for descriptor alignment 410 bp->SharedMemSize += 16; // for descriptor alignment
411 411
412 bp->SharedMemAddr = dma_zalloc_coherent(&bp->pdev.dev, 412 bp->SharedMemAddr = dma_alloc_coherent(&bp->pdev.dev,
413 bp->SharedMemSize, 413 bp->SharedMemSize,
414 &bp->SharedMemDMA, 414 &bp->SharedMemDMA,
415 GFP_ATOMIC); 415 GFP_ATOMIC);
416 if (!bp->SharedMemAddr) { 416 if (!bp->SharedMemAddr) {
417 printk("could not allocate mem for "); 417 printk("could not allocate mem for ");
418 printk("hardware module: %ld byte\n", 418 printk("hardware module: %ld byte\n",
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 58bbba8582b0..3377ac66a347 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev,
1512 } 1512 }
1513#if IS_ENABLED(CONFIG_IPV6) 1513#if IS_ENABLED(CONFIG_IPV6)
1514 case AF_INET6: { 1514 case AF_INET6: {
1515 struct rt6_info *rt = rt6_lookup(geneve->net, 1515 struct rt6_info *rt;
1516 &info->key.u.ipv6.dst, NULL, 0, 1516
1517 NULL, 0); 1517 if (!__in6_dev_get(dev))
1518 break;
1519
1520 rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
1521 NULL, 0);
1518 1522
1519 if (rt && rt->dst.dev) 1523 if (rt && rt->dst.dev)
1520 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; 1524 ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index ef6f766f6389..e859ae2e42d5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -144,6 +144,8 @@ struct hv_netvsc_packet {
144 u32 total_data_buflen; 144 u32 total_data_buflen;
145}; 145};
146 146
147#define NETVSC_HASH_KEYLEN 40
148
147struct netvsc_device_info { 149struct netvsc_device_info {
148 unsigned char mac_adr[ETH_ALEN]; 150 unsigned char mac_adr[ETH_ALEN];
149 u32 num_chn; 151 u32 num_chn;
@@ -151,6 +153,8 @@ struct netvsc_device_info {
151 u32 recv_sections; 153 u32 recv_sections;
152 u32 send_section_size; 154 u32 send_section_size;
153 u32 recv_section_size; 155 u32 recv_section_size;
156
157 u8 rss_key[NETVSC_HASH_KEYLEN];
154}; 158};
155 159
156enum rndis_device_state { 160enum rndis_device_state {
@@ -160,8 +164,6 @@ enum rndis_device_state {
160 RNDIS_DEV_DATAINITIALIZED, 164 RNDIS_DEV_DATAINITIALIZED,
161}; 165};
162 166
163#define NETVSC_HASH_KEYLEN 40
164
165struct rndis_device { 167struct rndis_device {
166 struct net_device *ndev; 168 struct net_device *ndev;
167 169
@@ -209,7 +211,9 @@ int netvsc_recv_callback(struct net_device *net,
209void netvsc_channel_cb(void *context); 211void netvsc_channel_cb(void *context);
210int netvsc_poll(struct napi_struct *napi, int budget); 212int netvsc_poll(struct napi_struct *napi, int budget);
211 213
212int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev); 214int rndis_set_subchannel(struct net_device *ndev,
215 struct netvsc_device *nvdev,
216 struct netvsc_device_info *dev_info);
213int rndis_filter_open(struct netvsc_device *nvdev); 217int rndis_filter_open(struct netvsc_device *nvdev);
214int rndis_filter_close(struct netvsc_device *nvdev); 218int rndis_filter_close(struct netvsc_device *nvdev);
215struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 219struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
@@ -1177,7 +1181,7 @@ enum ndis_per_pkt_info_type {
1177 1181
1178enum rndis_per_pkt_info_interal_type { 1182enum rndis_per_pkt_info_interal_type {
1179 RNDIS_PKTINFO_ID = 1, 1183 RNDIS_PKTINFO_ID = 1,
1180 /* Add more memebers here */ 1184 /* Add more members here */
1181 1185
1182 RNDIS_PKTINFO_MAX 1186 RNDIS_PKTINFO_MAX
1183}; 1187};
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 922054c1d544..813d195bbd57 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w)
84 84
85 rdev = nvdev->extension; 85 rdev = nvdev->extension;
86 if (rdev) { 86 if (rdev) {
87 ret = rndis_set_subchannel(rdev->ndev, nvdev); 87 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
88 if (ret == 0) { 88 if (ret == 0) {
89 netif_device_attach(rdev->ndev); 89 netif_device_attach(rdev->ndev);
90 } else { 90 } else {
@@ -1331,7 +1331,7 @@ void netvsc_channel_cb(void *context)
1331 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); 1331 prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1332 1332
1333 if (napi_schedule_prep(&nvchan->napi)) { 1333 if (napi_schedule_prep(&nvchan->napi)) {
1334 /* disable interupts from host */ 1334 /* disable interrupts from host */
1335 hv_begin_read(rbi); 1335 hv_begin_read(rbi);
1336 1336
1337 __napi_schedule_irqoff(&nvchan->napi); 1337 __napi_schedule_irqoff(&nvchan->napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 91ed15ea5883..256adbd044f5 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -370,7 +370,7 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
370{ 370{
371 int j = 0; 371 int j = 0;
372 372
373 /* Deal with compund pages by ignoring unused part 373 /* Deal with compound pages by ignoring unused part
374 * of the page. 374 * of the page.
375 */ 375 */
376 page += (offset >> PAGE_SHIFT); 376 page += (offset >> PAGE_SHIFT);
@@ -858,6 +858,39 @@ static void netvsc_get_channels(struct net_device *net,
858 } 858 }
859} 859}
860 860
861/* Alloc struct netvsc_device_info, and initialize it from either existing
862 * struct netvsc_device, or from default values.
863 */
864static struct netvsc_device_info *netvsc_devinfo_get
865 (struct netvsc_device *nvdev)
866{
867 struct netvsc_device_info *dev_info;
868
869 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
870
871 if (!dev_info)
872 return NULL;
873
874 if (nvdev) {
875 dev_info->num_chn = nvdev->num_chn;
876 dev_info->send_sections = nvdev->send_section_cnt;
877 dev_info->send_section_size = nvdev->send_section_size;
878 dev_info->recv_sections = nvdev->recv_section_cnt;
879 dev_info->recv_section_size = nvdev->recv_section_size;
880
881 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
882 NETVSC_HASH_KEYLEN);
883 } else {
884 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
885 dev_info->send_sections = NETVSC_DEFAULT_TX;
886 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
887 dev_info->recv_sections = NETVSC_DEFAULT_RX;
888 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
889 }
890
891 return dev_info;
892}
893
861static int netvsc_detach(struct net_device *ndev, 894static int netvsc_detach(struct net_device *ndev,
862 struct netvsc_device *nvdev) 895 struct netvsc_device *nvdev)
863{ 896{
@@ -909,7 +942,7 @@ static int netvsc_attach(struct net_device *ndev,
909 return PTR_ERR(nvdev); 942 return PTR_ERR(nvdev);
910 943
911 if (nvdev->num_chn > 1) { 944 if (nvdev->num_chn > 1) {
912 ret = rndis_set_subchannel(ndev, nvdev); 945 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
913 946
914 /* if unavailable, just proceed with one queue */ 947 /* if unavailable, just proceed with one queue */
915 if (ret) { 948 if (ret) {
@@ -943,7 +976,7 @@ static int netvsc_set_channels(struct net_device *net,
943 struct net_device_context *net_device_ctx = netdev_priv(net); 976 struct net_device_context *net_device_ctx = netdev_priv(net);
944 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 977 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
945 unsigned int orig, count = channels->combined_count; 978 unsigned int orig, count = channels->combined_count;
946 struct netvsc_device_info device_info; 979 struct netvsc_device_info *device_info;
947 int ret; 980 int ret;
948 981
949 /* We do not support separate count for rx, tx, or other */ 982 /* We do not support separate count for rx, tx, or other */
@@ -962,24 +995,26 @@ static int netvsc_set_channels(struct net_device *net,
962 995
963 orig = nvdev->num_chn; 996 orig = nvdev->num_chn;
964 997
965 memset(&device_info, 0, sizeof(device_info)); 998 device_info = netvsc_devinfo_get(nvdev);
966 device_info.num_chn = count; 999
967 device_info.send_sections = nvdev->send_section_cnt; 1000 if (!device_info)
968 device_info.send_section_size = nvdev->send_section_size; 1001 return -ENOMEM;
969 device_info.recv_sections = nvdev->recv_section_cnt; 1002
970 device_info.recv_section_size = nvdev->recv_section_size; 1003 device_info->num_chn = count;
971 1004
972 ret = netvsc_detach(net, nvdev); 1005 ret = netvsc_detach(net, nvdev);
973 if (ret) 1006 if (ret)
974 return ret; 1007 goto out;
975 1008
976 ret = netvsc_attach(net, &device_info); 1009 ret = netvsc_attach(net, device_info);
977 if (ret) { 1010 if (ret) {
978 device_info.num_chn = orig; 1011 device_info->num_chn = orig;
979 if (netvsc_attach(net, &device_info)) 1012 if (netvsc_attach(net, device_info))
980 netdev_err(net, "restoring channel setting failed\n"); 1013 netdev_err(net, "restoring channel setting failed\n");
981 } 1014 }
982 1015
1016out:
1017 kfree(device_info);
983 return ret; 1018 return ret;
984} 1019}
985 1020
@@ -1048,48 +1083,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1048 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1083 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1049 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1084 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1050 int orig_mtu = ndev->mtu; 1085 int orig_mtu = ndev->mtu;
1051 struct netvsc_device_info device_info; 1086 struct netvsc_device_info *device_info;
1052 int ret = 0; 1087 int ret = 0;
1053 1088
1054 if (!nvdev || nvdev->destroy) 1089 if (!nvdev || nvdev->destroy)
1055 return -ENODEV; 1090 return -ENODEV;
1056 1091
1092 device_info = netvsc_devinfo_get(nvdev);
1093
1094 if (!device_info)
1095 return -ENOMEM;
1096
1057 /* Change MTU of underlying VF netdev first. */ 1097 /* Change MTU of underlying VF netdev first. */
1058 if (vf_netdev) { 1098 if (vf_netdev) {
1059 ret = dev_set_mtu(vf_netdev, mtu); 1099 ret = dev_set_mtu(vf_netdev, mtu);
1060 if (ret) 1100 if (ret)
1061 return ret; 1101 goto out;
1062 } 1102 }
1063 1103
1064 memset(&device_info, 0, sizeof(device_info));
1065 device_info.num_chn = nvdev->num_chn;
1066 device_info.send_sections = nvdev->send_section_cnt;
1067 device_info.send_section_size = nvdev->send_section_size;
1068 device_info.recv_sections = nvdev->recv_section_cnt;
1069 device_info.recv_section_size = nvdev->recv_section_size;
1070
1071 ret = netvsc_detach(ndev, nvdev); 1104 ret = netvsc_detach(ndev, nvdev);
1072 if (ret) 1105 if (ret)
1073 goto rollback_vf; 1106 goto rollback_vf;
1074 1107
1075 ndev->mtu = mtu; 1108 ndev->mtu = mtu;
1076 1109
1077 ret = netvsc_attach(ndev, &device_info); 1110 ret = netvsc_attach(ndev, device_info);
1078 if (ret) 1111 if (!ret)
1079 goto rollback; 1112 goto out;
1080
1081 return 0;
1082 1113
1083rollback:
1084 /* Attempt rollback to original MTU */ 1114 /* Attempt rollback to original MTU */
1085 ndev->mtu = orig_mtu; 1115 ndev->mtu = orig_mtu;
1086 1116
1087 if (netvsc_attach(ndev, &device_info)) 1117 if (netvsc_attach(ndev, device_info))
1088 netdev_err(ndev, "restoring mtu failed\n"); 1118 netdev_err(ndev, "restoring mtu failed\n");
1089rollback_vf: 1119rollback_vf:
1090 if (vf_netdev) 1120 if (vf_netdev)
1091 dev_set_mtu(vf_netdev, orig_mtu); 1121 dev_set_mtu(vf_netdev, orig_mtu);
1092 1122
1123out:
1124 kfree(device_info);
1093 return ret; 1125 return ret;
1094} 1126}
1095 1127
@@ -1674,7 +1706,7 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1674{ 1706{
1675 struct net_device_context *ndevctx = netdev_priv(ndev); 1707 struct net_device_context *ndevctx = netdev_priv(ndev);
1676 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1708 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1677 struct netvsc_device_info device_info; 1709 struct netvsc_device_info *device_info;
1678 struct ethtool_ringparam orig; 1710 struct ethtool_ringparam orig;
1679 u32 new_tx, new_rx; 1711 u32 new_tx, new_rx;
1680 int ret = 0; 1712 int ret = 0;
@@ -1694,26 +1726,29 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1694 new_rx == orig.rx_pending) 1726 new_rx == orig.rx_pending)
1695 return 0; /* no change */ 1727 return 0; /* no change */
1696 1728
1697 memset(&device_info, 0, sizeof(device_info)); 1729 device_info = netvsc_devinfo_get(nvdev);
1698 device_info.num_chn = nvdev->num_chn; 1730
1699 device_info.send_sections = new_tx; 1731 if (!device_info)
1700 device_info.send_section_size = nvdev->send_section_size; 1732 return -ENOMEM;
1701 device_info.recv_sections = new_rx; 1733
1702 device_info.recv_section_size = nvdev->recv_section_size; 1734 device_info->send_sections = new_tx;
1735 device_info->recv_sections = new_rx;
1703 1736
1704 ret = netvsc_detach(ndev, nvdev); 1737 ret = netvsc_detach(ndev, nvdev);
1705 if (ret) 1738 if (ret)
1706 return ret; 1739 goto out;
1707 1740
1708 ret = netvsc_attach(ndev, &device_info); 1741 ret = netvsc_attach(ndev, device_info);
1709 if (ret) { 1742 if (ret) {
1710 device_info.send_sections = orig.tx_pending; 1743 device_info->send_sections = orig.tx_pending;
1711 device_info.recv_sections = orig.rx_pending; 1744 device_info->recv_sections = orig.rx_pending;
1712 1745
1713 if (netvsc_attach(ndev, &device_info)) 1746 if (netvsc_attach(ndev, device_info))
1714 netdev_err(ndev, "restoring ringparam failed"); 1747 netdev_err(ndev, "restoring ringparam failed");
1715 } 1748 }
1716 1749
1750out:
1751 kfree(device_info);
1717 return ret; 1752 return ret;
1718} 1753}
1719 1754
@@ -2088,7 +2123,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
2088 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) 2123 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2089 return NOTIFY_DONE; 2124 return NOTIFY_DONE;
2090 2125
2091 /* if syntihetic interface is a different namespace, 2126 /* if synthetic interface is a different namespace,
2092 * then move the VF to that namespace; join will be 2127 * then move the VF to that namespace; join will be
2093 * done again in that context. 2128 * done again in that context.
2094 */ 2129 */
@@ -2167,7 +2202,7 @@ static int netvsc_probe(struct hv_device *dev,
2167{ 2202{
2168 struct net_device *net = NULL; 2203 struct net_device *net = NULL;
2169 struct net_device_context *net_device_ctx; 2204 struct net_device_context *net_device_ctx;
2170 struct netvsc_device_info device_info; 2205 struct netvsc_device_info *device_info = NULL;
2171 struct netvsc_device *nvdev; 2206 struct netvsc_device *nvdev;
2172 int ret = -ENOMEM; 2207 int ret = -ENOMEM;
2173 2208
@@ -2214,21 +2249,21 @@ static int netvsc_probe(struct hv_device *dev,
2214 netif_set_real_num_rx_queues(net, 1); 2249 netif_set_real_num_rx_queues(net, 1);
2215 2250
2216 /* Notify the netvsc driver of the new device */ 2251 /* Notify the netvsc driver of the new device */
2217 memset(&device_info, 0, sizeof(device_info)); 2252 device_info = netvsc_devinfo_get(NULL);
2218 device_info.num_chn = VRSS_CHANNEL_DEFAULT; 2253
2219 device_info.send_sections = NETVSC_DEFAULT_TX; 2254 if (!device_info) {
2220 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE; 2255 ret = -ENOMEM;
2221 device_info.recv_sections = NETVSC_DEFAULT_RX; 2256 goto devinfo_failed;
2222 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE; 2257 }
2223 2258
2224 nvdev = rndis_filter_device_add(dev, &device_info); 2259 nvdev = rndis_filter_device_add(dev, device_info);
2225 if (IS_ERR(nvdev)) { 2260 if (IS_ERR(nvdev)) {
2226 ret = PTR_ERR(nvdev); 2261 ret = PTR_ERR(nvdev);
2227 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); 2262 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2228 goto rndis_failed; 2263 goto rndis_failed;
2229 } 2264 }
2230 2265
2231 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2266 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2232 2267
2233 /* We must get rtnl lock before scheduling nvdev->subchan_work, 2268 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2234 * otherwise netvsc_subchan_work() can get rtnl lock first and wait 2269 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2236,7 +2271,7 @@ static int netvsc_probe(struct hv_device *dev,
2236 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer() 2271 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2237 * -> ... -> device_add() -> ... -> __device_attach() can't get 2272 * -> ... -> device_add() -> ... -> __device_attach() can't get
2238 * the device lock, so all the subchannels can't be processed -- 2273 * the device lock, so all the subchannels can't be processed --
2239 * finally netvsc_subchan_work() hangs for ever. 2274 * finally netvsc_subchan_work() hangs forever.
2240 */ 2275 */
2241 rtnl_lock(); 2276 rtnl_lock();
2242 2277
@@ -2266,12 +2301,16 @@ static int netvsc_probe(struct hv_device *dev,
2266 2301
2267 list_add(&net_device_ctx->list, &netvsc_dev_list); 2302 list_add(&net_device_ctx->list, &netvsc_dev_list);
2268 rtnl_unlock(); 2303 rtnl_unlock();
2304
2305 kfree(device_info);
2269 return 0; 2306 return 0;
2270 2307
2271register_failed: 2308register_failed:
2272 rtnl_unlock(); 2309 rtnl_unlock();
2273 rndis_filter_device_remove(dev, nvdev); 2310 rndis_filter_device_remove(dev, nvdev);
2274rndis_failed: 2311rndis_failed:
2312 kfree(device_info);
2313devinfo_failed:
2275 free_percpu(net_device_ctx->vf_stats); 2314 free_percpu(net_device_ctx->vf_stats);
2276no_stats: 2315no_stats:
2277 hv_set_drvdata(dev, NULL); 2316 hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8b537a049c1e..73b60592de06 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -774,8 +774,8 @@ cleanup:
774 return ret; 774 return ret;
775} 775}
776 776
777int rndis_filter_set_rss_param(struct rndis_device *rdev, 777static int rndis_set_rss_param_msg(struct rndis_device *rdev,
778 const u8 *rss_key) 778 const u8 *rss_key, u16 flag)
779{ 779{
780 struct net_device *ndev = rdev->ndev; 780 struct net_device *ndev = rdev->ndev;
781 struct rndis_request *request; 781 struct rndis_request *request;
@@ -804,7 +804,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
804 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; 804 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
805 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; 805 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
806 rssp->hdr.size = sizeof(struct ndis_recv_scale_param); 806 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
807 rssp->flag = 0; 807 rssp->flag = flag;
808 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | 808 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
809 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | 809 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
810 NDIS_HASH_TCP_IPV6; 810 NDIS_HASH_TCP_IPV6;
@@ -829,9 +829,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
829 829
830 wait_for_completion(&request->wait_event); 830 wait_for_completion(&request->wait_event);
831 set_complete = &request->response_msg.msg.set_complete; 831 set_complete = &request->response_msg.msg.set_complete;
832 if (set_complete->status == RNDIS_STATUS_SUCCESS) 832 if (set_complete->status == RNDIS_STATUS_SUCCESS) {
833 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); 833 if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
834 else { 834 !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
835 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
836
837 } else {
835 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", 838 netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
836 set_complete->status); 839 set_complete->status);
837 ret = -EINVAL; 840 ret = -EINVAL;
@@ -842,6 +845,16 @@ cleanup:
842 return ret; 845 return ret;
843} 846}
844 847
848int rndis_filter_set_rss_param(struct rndis_device *rdev,
849 const u8 *rss_key)
850{
851 /* Disable RSS before change */
852 rndis_set_rss_param_msg(rdev, rss_key,
853 NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
854
855 return rndis_set_rss_param_msg(rdev, rss_key, 0);
856}
857
845static int rndis_filter_query_device_link_status(struct rndis_device *dev, 858static int rndis_filter_query_device_link_status(struct rndis_device *dev,
846 struct netvsc_device *net_device) 859 struct netvsc_device *net_device)
847{ 860{
@@ -1121,7 +1134,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1121 * This breaks overlap of processing the host message for the 1134 * This breaks overlap of processing the host message for the
1122 * new primary channel with the initialization of sub-channels. 1135 * new primary channel with the initialization of sub-channels.
1123 */ 1136 */
1124int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev) 1137int rndis_set_subchannel(struct net_device *ndev,
1138 struct netvsc_device *nvdev,
1139 struct netvsc_device_info *dev_info)
1125{ 1140{
1126 struct nvsp_message *init_packet = &nvdev->channel_init_pkt; 1141 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1127 struct net_device_context *ndev_ctx = netdev_priv(ndev); 1142 struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1161,8 +1176,11 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
1161 wait_event(nvdev->subchan_open, 1176 wait_event(nvdev->subchan_open,
1162 atomic_read(&nvdev->open_chn) == nvdev->num_chn); 1177 atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1163 1178
1164 /* ignore failues from setting rss parameters, still have channels */ 1179 /* ignore failures from setting rss parameters, still have channels */
1165 rndis_filter_set_rss_param(rdev, netvsc_hash_key); 1180 if (dev_info)
1181 rndis_filter_set_rss_param(rdev, dev_info->rss_key);
1182 else
1183 rndis_filter_set_rss_param(rdev, netvsc_hash_key);
1166 1184
1167 netif_set_real_num_tx_queues(ndev, nvdev->num_chn); 1185 netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
1168 netif_set_real_num_rx_queues(ndev, nvdev->num_chn); 1186 netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 44de81e5f140..c589f5ae75bb 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context)
905 } 905 }
906 break; 906 break;
907 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): 907 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
908 /* rx is starting */ 908 /* rx is starting */
909 dev_dbg(printdev(lp), "RX is starting\n"); 909 dev_dbg(printdev(lp), "RX is starting\n");
910 mcr20a_handle_rx(lp); 910 mcr20a_handle_rx(lp);
911 break; 911 break;
912 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): 912 case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
913 if (lp->is_tx) { 913 if (lp->is_tx) {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 19bdde60680c..7cdac77d0c68 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -100,12 +100,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
100 err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); 100 err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
101 if (!err) { 101 if (!err) {
102 mdev->l3mdev_ops = &ipvl_l3mdev_ops; 102 mdev->l3mdev_ops = &ipvl_l3mdev_ops;
103 mdev->priv_flags |= IFF_L3MDEV_MASTER; 103 mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
104 } else 104 } else
105 goto fail; 105 goto fail;
106 } else if (port->mode == IPVLAN_MODE_L3S) { 106 } else if (port->mode == IPVLAN_MODE_L3S) {
107 /* Old mode was L3S */ 107 /* Old mode was L3S */
108 mdev->priv_flags &= ~IFF_L3MDEV_MASTER; 108 mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
109 ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); 109 ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
110 mdev->l3mdev_ops = NULL; 110 mdev->l3mdev_ops = NULL;
111 } 111 }
@@ -167,7 +167,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
167 struct sk_buff *skb; 167 struct sk_buff *skb;
168 168
169 if (port->mode == IPVLAN_MODE_L3S) { 169 if (port->mode == IPVLAN_MODE_L3S) {
170 dev->priv_flags &= ~IFF_L3MDEV_MASTER; 170 dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
171 ipvlan_unregister_nf_hook(dev_net(dev)); 171 ipvlan_unregister_nf_hook(dev_net(dev));
172 dev->l3mdev_ops = NULL; 172 dev->l3mdev_ops = NULL;
173 } 173 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fc726ce4c164..6d067176320f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -337,7 +337,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
337 337
338 if (src) 338 if (src)
339 dev_put(src->dev); 339 dev_put(src->dev);
340 kfree_skb(skb); 340 consume_skb(skb);
341 } 341 }
342} 342}
343 343
diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c
index 8ebe7f5484ae..f14ba5366b91 100644
--- a/drivers/net/phy/asix.c
+++ b/drivers/net/phy/asix.c
@@ -1,13 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0+
2/* Driver for Asix PHYs 2/* Driver for Asix PHYs
3 * 3 *
4 * Author: Michael Schmitz <schmitzmic@gmail.com> 4 * Author: Michael Schmitz <schmitzmic@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */ 5 */
12#include <linux/kernel.h> 6#include <linux/kernel.h>
13#include <linux/errno.h> 7#include <linux/errno.h>
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 1b350183bffb..a271239748f2 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -197,6 +197,7 @@ static struct phy_driver bcm87xx_driver[] = {
197 .phy_id = PHY_ID_BCM8706, 197 .phy_id = PHY_ID_BCM8706,
198 .phy_id_mask = 0xffffffff, 198 .phy_id_mask = 0xffffffff,
199 .name = "Broadcom BCM8706", 199 .name = "Broadcom BCM8706",
200 .features = PHY_10GBIT_FEC_FEATURES,
200 .config_init = bcm87xx_config_init, 201 .config_init = bcm87xx_config_init,
201 .config_aneg = bcm87xx_config_aneg, 202 .config_aneg = bcm87xx_config_aneg,
202 .read_status = bcm87xx_read_status, 203 .read_status = bcm87xx_read_status,
@@ -208,6 +209,7 @@ static struct phy_driver bcm87xx_driver[] = {
208 .phy_id = PHY_ID_BCM8727, 209 .phy_id = PHY_ID_BCM8727,
209 .phy_id_mask = 0xffffffff, 210 .phy_id_mask = 0xffffffff,
210 .name = "Broadcom BCM8727", 211 .name = "Broadcom BCM8727",
212 .features = PHY_10GBIT_FEC_FEATURES,
211 .config_init = bcm87xx_config_init, 213 .config_init = bcm87xx_config_init,
212 .config_aneg = bcm87xx_config_aneg, 214 .config_aneg = bcm87xx_config_aneg,
213 .read_status = bcm87xx_read_status, 215 .read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 8022cd317f62..1a4d04afb7f0 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -88,6 +88,7 @@ static struct phy_driver cortina_driver[] = {
88 .phy_id = PHY_ID_CS4340, 88 .phy_id = PHY_ID_CS4340,
89 .phy_id_mask = 0xffffffff, 89 .phy_id_mask = 0xffffffff,
90 .name = "Cortina CS4340", 90 .name = "Cortina CS4340",
91 .features = PHY_10GBIT_FEATURES,
91 .config_init = gen10g_config_init, 92 .config_init = gen10g_config_init,
92 .config_aneg = gen10g_config_aneg, 93 .config_aneg = gen10g_config_aneg,
93 .read_status = cortina_read_status, 94 .read_status = cortina_read_status,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 18b41bc345ab..6e8807212aa3 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640,
898 struct phy_txts *phy_txts) 898 struct phy_txts *phy_txts)
899{ 899{
900 struct skb_shared_hwtstamps shhwtstamps; 900 struct skb_shared_hwtstamps shhwtstamps;
901 struct dp83640_skb_info *skb_info;
901 struct sk_buff *skb; 902 struct sk_buff *skb;
902 u64 ns;
903 u8 overflow; 903 u8 overflow;
904 u64 ns;
904 905
905 /* We must already have the skb that triggered this. */ 906 /* We must already have the skb that triggered this. */
906 907again:
907 skb = skb_dequeue(&dp83640->tx_queue); 908 skb = skb_dequeue(&dp83640->tx_queue);
908
909 if (!skb) { 909 if (!skb) {
910 pr_debug("have timestamp but tx_queue empty\n"); 910 pr_debug("have timestamp but tx_queue empty\n");
911 return; 911 return;
@@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640,
920 } 920 }
921 return; 921 return;
922 } 922 }
923 skb_info = (struct dp83640_skb_info *)skb->cb;
924 if (time_after(jiffies, skb_info->tmo)) {
925 kfree_skb(skb);
926 goto again;
927 }
923 928
924 ns = phy2txts(phy_txts); 929 ns = phy2txts(phy_txts);
925 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 930 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1472static void dp83640_txtstamp(struct phy_device *phydev, 1477static void dp83640_txtstamp(struct phy_device *phydev,
1473 struct sk_buff *skb, int type) 1478 struct sk_buff *skb, int type)
1474{ 1479{
1480 struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
1475 struct dp83640_private *dp83640 = phydev->priv; 1481 struct dp83640_private *dp83640 = phydev->priv;
1476 1482
1477 switch (dp83640->hwts_tx_en) { 1483 switch (dp83640->hwts_tx_en) {
@@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1484 /* fall through */ 1490 /* fall through */
1485 case HWTSTAMP_TX_ON: 1491 case HWTSTAMP_TX_ON:
1486 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1492 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1493 skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
1487 skb_queue_tail(&dp83640->tx_queue, skb); 1494 skb_queue_tail(&dp83640->tx_queue, skb);
1488 break; 1495 break;
1489 1496
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index a9c7c7f41b0c..abb7876a8776 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
847 847
848 /* SGMII-to-Copper mode initialization */ 848 /* SGMII-to-Copper mode initialization */
849 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { 849 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
850
851 /* Select page 18 */ 850 /* Select page 18 */
852 err = marvell_set_page(phydev, 18); 851 err = marvell_set_page(phydev, 18);
853 if (err < 0) 852 if (err < 0)
@@ -870,21 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
870 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); 869 err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
871 if (err < 0) 870 if (err < 0)
872 return err; 871 return err;
873
874 /* There appears to be a bug in the 88e1512 when used in
875 * SGMII to copper mode, where the AN advertisement register
876 * clears the pause bits each time a negotiation occurs.
877 * This means we can never be truely sure what was advertised,
878 * so disable Pause support.
879 */
880 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
881 phydev->supported);
882 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
883 phydev->supported);
884 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
885 phydev->advertising);
886 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
887 phydev->advertising);
888 } 872 }
889 873
890 return m88e1318_config_init(phydev); 874 return m88e1318_config_init(phydev);
@@ -1046,6 +1030,39 @@ static int m88e1145_config_init(struct phy_device *phydev)
1046 return 0; 1030 return 0;
1047} 1031}
1048 1032
1033/* The VOD can be out of specification on link up. Poke an
1034 * undocumented register, in an undocumented page, with a magic value
1035 * to fix this.
1036 */
1037static int m88e6390_errata(struct phy_device *phydev)
1038{
1039 int err;
1040
1041 err = phy_write(phydev, MII_BMCR,
1042 BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX);
1043 if (err)
1044 return err;
1045
1046 usleep_range(300, 400);
1047
1048 err = phy_write_paged(phydev, 0xf8, 0x08, 0x36);
1049 if (err)
1050 return err;
1051
1052 return genphy_soft_reset(phydev);
1053}
1054
1055static int m88e6390_config_aneg(struct phy_device *phydev)
1056{
1057 int err;
1058
1059 err = m88e6390_errata(phydev);
1060 if (err)
1061 return err;
1062
1063 return m88e1510_config_aneg(phydev);
1064}
1065
1049/** 1066/**
1050 * fiber_lpa_mod_linkmode_lpa_t 1067 * fiber_lpa_mod_linkmode_lpa_t
1051 * @advertising: the linkmode advertisement settings 1068 * @advertising: the linkmode advertisement settings
@@ -1402,7 +1419,7 @@ static int m88e1318_set_wol(struct phy_device *phydev,
1402 * before enabling it if !phy_interrupt_is_valid() 1419 * before enabling it if !phy_interrupt_is_valid()
1403 */ 1420 */
1404 if (!phy_interrupt_is_valid(phydev)) 1421 if (!phy_interrupt_is_valid(phydev))
1405 phy_read(phydev, MII_M1011_IEVENT); 1422 __phy_read(phydev, MII_M1011_IEVENT);
1406 1423
1407 /* Enable the WOL interrupt */ 1424 /* Enable the WOL interrupt */
1408 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, 1425 err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
@@ -2283,7 +2300,7 @@ static struct phy_driver marvell_drivers[] = {
2283 .features = PHY_GBIT_FEATURES, 2300 .features = PHY_GBIT_FEATURES,
2284 .probe = m88e6390_probe, 2301 .probe = m88e6390_probe,
2285 .config_init = &marvell_config_init, 2302 .config_init = &marvell_config_init,
2286 .config_aneg = &m88e1510_config_aneg, 2303 .config_aneg = &m88e6390_config_aneg,
2287 .read_status = &marvell_read_status, 2304 .read_status = &marvell_read_status,
2288 .ack_interrupt = &marvell_ack_interrupt, 2305 .ack_interrupt = &marvell_ack_interrupt,
2289 .config_intr = &marvell_config_intr, 2306 .config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c
index b03fedd6c1d8..287f3ccf1da1 100644
--- a/drivers/net/phy/mdio-hisi-femac.c
+++ b/drivers/net/phy/mdio-hisi-femac.c
@@ -1,20 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0+
1/* 2/*
2 * Hisilicon Fast Ethernet MDIO Bus Driver 3 * Hisilicon Fast Ethernet MDIO Bus Driver
3 * 4 *
4 * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. 5 * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 6 */
19 7
20#include <linux/clk.h> 8#include <linux/clk.h>
@@ -163,4 +151,4 @@ module_platform_driver(hisi_femac_mdio_driver);
163 151
164MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); 152MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver");
165MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); 153MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
166MODULE_LICENSE("GPL v2"); 154MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e59a8419b17..66b9cfe692fc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -390,6 +390,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
390 if (IS_ERR(gpiod)) { 390 if (IS_ERR(gpiod)) {
391 dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", 391 dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
392 bus->id); 392 bus->id);
393 device_del(&bus->dev);
393 return PTR_ERR(gpiod); 394 return PTR_ERR(gpiod);
394 } else if (gpiod) { 395 } else if (gpiod) {
395 bus->reset_gpiod = gpiod; 396 bus->reset_gpiod = gpiod;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index b03bcf2c388a..3ddaf9595697 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -233,6 +233,7 @@ static struct phy_driver meson_gxl_phy[] = {
233 .name = "Meson GXL Internal PHY", 233 .name = "Meson GXL Internal PHY",
234 .features = PHY_BASIC_FEATURES, 234 .features = PHY_BASIC_FEATURES,
235 .flags = PHY_IS_INTERNAL, 235 .flags = PHY_IS_INTERNAL,
236 .soft_reset = genphy_soft_reset,
236 .config_init = meson_gxl_config_init, 237 .config_init = meson_gxl_config_init,
237 .aneg_done = genphy_aneg_done, 238 .aneg_done = genphy_aneg_done,
238 .read_status = meson_gxl_read_status, 239 .read_status = meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c33384710d26..b1f959935f50 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1070,6 +1070,7 @@ static struct phy_driver ksphy_driver[] = {
1070 .driver_data = &ksz9021_type, 1070 .driver_data = &ksz9021_type,
1071 .probe = kszphy_probe, 1071 .probe = kszphy_probe,
1072 .config_init = ksz9031_config_init, 1072 .config_init = ksz9031_config_init,
1073 .soft_reset = genphy_soft_reset,
1073 .read_status = ksz9031_read_status, 1074 .read_status = ksz9031_read_status,
1074 .ack_interrupt = kszphy_ack_interrupt, 1075 .ack_interrupt = kszphy_ack_interrupt,
1075 .config_intr = kszphy_config_intr, 1076 .config_intr = kszphy_config_intr,
@@ -1098,6 +1099,7 @@ static struct phy_driver ksphy_driver[] = {
1098 .phy_id = PHY_ID_KSZ8873MLL, 1099 .phy_id = PHY_ID_KSZ8873MLL,
1099 .phy_id_mask = MICREL_PHY_ID_MASK, 1100 .phy_id_mask = MICREL_PHY_ID_MASK,
1100 .name = "Micrel KSZ8873MLL Switch", 1101 .name = "Micrel KSZ8873MLL Switch",
1102 .features = PHY_BASIC_FEATURES,
1101 .config_init = kszphy_config_init, 1103 .config_init = kszphy_config_init,
1102 .config_aneg = ksz8873mll_config_aneg, 1104 .config_aneg = ksz8873mll_config_aneg,
1103 .read_status = ksz8873mll_read_status, 1105 .read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d33e7b3caf03..c5675df5fc6f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -543,13 +543,6 @@ int phy_start_aneg(struct phy_device *phydev)
543 543
544 mutex_lock(&phydev->lock); 544 mutex_lock(&phydev->lock);
545 545
546 if (!__phy_is_started(phydev)) {
547 WARN(1, "called from state %s\n",
548 phy_state_to_str(phydev->state));
549 err = -EBUSY;
550 goto out_unlock;
551 }
552
553 if (AUTONEG_DISABLE == phydev->autoneg) 546 if (AUTONEG_DISABLE == phydev->autoneg)
554 phy_sanitize_settings(phydev); 547 phy_sanitize_settings(phydev);
555 548
@@ -560,11 +553,13 @@ int phy_start_aneg(struct phy_device *phydev)
560 if (err < 0) 553 if (err < 0)
561 goto out_unlock; 554 goto out_unlock;
562 555
563 if (phydev->autoneg == AUTONEG_ENABLE) { 556 if (phy_is_started(phydev)) {
564 err = phy_check_link_status(phydev); 557 if (phydev->autoneg == AUTONEG_ENABLE) {
565 } else { 558 err = phy_check_link_status(phydev);
566 phydev->state = PHY_FORCING; 559 } else {
567 phydev->link_timeout = PHY_FORCE_TIMEOUT; 560 phydev->state = PHY_FORCING;
561 phydev->link_timeout = PHY_FORCE_TIMEOUT;
562 }
568 } 563 }
569 564
570out_unlock: 565out_unlock:
@@ -714,7 +709,7 @@ void phy_stop_machine(struct phy_device *phydev)
714 cancel_delayed_work_sync(&phydev->state_queue); 709 cancel_delayed_work_sync(&phydev->state_queue);
715 710
716 mutex_lock(&phydev->lock); 711 mutex_lock(&phydev->lock);
717 if (__phy_is_started(phydev)) 712 if (phy_is_started(phydev))
718 phydev->state = PHY_UP; 713 phydev->state = PHY_UP;
719 mutex_unlock(&phydev->lock); 714 mutex_unlock(&phydev->lock);
720} 715}
@@ -767,9 +762,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
767{ 762{
768 struct phy_device *phydev = phy_dat; 763 struct phy_device *phydev = phy_dat;
769 764
770 if (!phy_is_started(phydev))
771 return IRQ_NONE; /* It can't be ours. */
772
773 if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) 765 if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
774 return IRQ_NONE; 766 return IRQ_NONE;
775 767
@@ -847,15 +839,14 @@ EXPORT_SYMBOL(phy_stop_interrupts);
847 */ 839 */
848void phy_stop(struct phy_device *phydev) 840void phy_stop(struct phy_device *phydev)
849{ 841{
850 mutex_lock(&phydev->lock); 842 if (!phy_is_started(phydev)) {
851
852 if (!__phy_is_started(phydev)) {
853 WARN(1, "called from state %s\n", 843 WARN(1, "called from state %s\n",
854 phy_state_to_str(phydev->state)); 844 phy_state_to_str(phydev->state));
855 mutex_unlock(&phydev->lock);
856 return; 845 return;
857 } 846 }
858 847
848 mutex_lock(&phydev->lock);
849
859 if (phy_interrupt_is_valid(phydev)) 850 if (phy_interrupt_is_valid(phydev))
860 phy_disable_interrupts(phydev); 851 phy_disable_interrupts(phydev);
861 852
@@ -994,8 +985,10 @@ void phy_state_machine(struct work_struct *work)
994 * state machine would be pointless and possibly error prone when 985 * state machine would be pointless and possibly error prone when
995 * called from phy_disconnect() synchronously. 986 * called from phy_disconnect() synchronously.
996 */ 987 */
988 mutex_lock(&phydev->lock);
997 if (phy_polling_mode(phydev) && phy_is_started(phydev)) 989 if (phy_polling_mode(phydev) && phy_is_started(phydev))
998 phy_queue_state_machine(phydev, PHY_STATE_TIME); 990 phy_queue_state_machine(phydev, PHY_STATE_TIME);
991 mutex_unlock(&phydev->lock);
999} 992}
1000 993
1001/** 994/**
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 51990002d495..46c86725a693 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(phy_gbit_all_ports_features);
61__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; 61__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init;
62EXPORT_SYMBOL_GPL(phy_10gbit_features); 62EXPORT_SYMBOL_GPL(phy_10gbit_features);
63 63
64__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init;
65EXPORT_SYMBOL_GPL(phy_10gbit_fec_features);
66
64static const int phy_basic_ports_array[] = { 67static const int phy_basic_ports_array[] = {
65 ETHTOOL_LINK_MODE_Autoneg_BIT, 68 ETHTOOL_LINK_MODE_Autoneg_BIT,
66 ETHTOOL_LINK_MODE_TP_BIT, 69 ETHTOOL_LINK_MODE_TP_BIT,
@@ -109,6 +112,11 @@ const int phy_10gbit_features_array[1] = {
109}; 112};
110EXPORT_SYMBOL_GPL(phy_10gbit_features_array); 113EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
111 114
115const int phy_10gbit_fec_features_array[1] = {
116 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
117};
118EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
119
112__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; 120__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
113EXPORT_SYMBOL_GPL(phy_10gbit_full_features); 121EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
114 122
@@ -191,6 +199,10 @@ static void features_init(void)
191 linkmode_set_bit_array(phy_10gbit_full_features_array, 199 linkmode_set_bit_array(phy_10gbit_full_features_array,
192 ARRAY_SIZE(phy_10gbit_full_features_array), 200 ARRAY_SIZE(phy_10gbit_full_features_array),
193 phy_10gbit_full_features); 201 phy_10gbit_full_features);
202 /* 10G FEC only */
203 linkmode_set_bit_array(phy_10gbit_fec_features_array,
204 ARRAY_SIZE(phy_10gbit_fec_features_array),
205 phy_10gbit_fec_features);
194} 206}
195 207
196void phy_device_free(struct phy_device *phydev) 208void phy_device_free(struct phy_device *phydev)
@@ -2243,6 +2255,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
2243{ 2255{
2244 int retval; 2256 int retval;
2245 2257
2258 if (WARN_ON(!new_driver->features)) {
2259 pr_err("%s: Driver features are missing\n", new_driver->name);
2260 return -EINVAL;
2261 }
2262
2246 new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY; 2263 new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
2247 new_driver->mdiodrv.driver.name = new_driver->name; 2264 new_driver->mdiodrv.driver.name = new_driver->name;
2248 new_driver->mdiodrv.driver.bus = &mdio_bus_type; 2265 new_driver->mdiodrv.driver.bus = &mdio_bus_type;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e7becc7379d7..938803237d7f 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -474,6 +474,17 @@ static void phylink_run_resolve(struct phylink *pl)
474 queue_work(system_power_efficient_wq, &pl->resolve); 474 queue_work(system_power_efficient_wq, &pl->resolve);
475} 475}
476 476
477static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
478{
479 unsigned long state = pl->phylink_disable_state;
480
481 set_bit(bit, &pl->phylink_disable_state);
482 if (state == 0) {
483 queue_work(system_power_efficient_wq, &pl->resolve);
484 flush_work(&pl->resolve);
485 }
486}
487
477static void phylink_fixed_poll(struct timer_list *t) 488static void phylink_fixed_poll(struct timer_list *t)
478{ 489{
479 struct phylink *pl = container_of(t, struct phylink, link_poll); 490 struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -924,9 +935,7 @@ void phylink_stop(struct phylink *pl)
924 if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) 935 if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
925 del_timer_sync(&pl->link_poll); 936 del_timer_sync(&pl->link_poll);
926 937
927 set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); 938 phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
928 queue_work(system_power_efficient_wq, &pl->resolve);
929 flush_work(&pl->resolve);
930} 939}
931EXPORT_SYMBOL_GPL(phylink_stop); 940EXPORT_SYMBOL_GPL(phylink_stop);
932 941
@@ -1632,9 +1641,7 @@ static void phylink_sfp_link_down(void *upstream)
1632 1641
1633 ASSERT_RTNL(); 1642 ASSERT_RTNL();
1634 1643
1635 set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); 1644 phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
1636 queue_work(system_power_efficient_wq, &pl->resolve);
1637 flush_work(&pl->resolve);
1638} 1645}
1639 1646
1640static void phylink_sfp_link_up(void *upstream) 1647static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index f1da70b9b55f..95abf7072f32 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+
1/** 2/**
2 * drivers/net/phy/rockchip.c 3 * drivers/net/phy/rockchip.c
3 * 4 *
@@ -6,12 +7,6 @@
6 * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd 7 * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd
7 * 8 *
8 * David Wu <david.wu@rock-chips.com> 9 * David Wu <david.wu@rock-chips.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 */ 10 */
16 11
17#include <linux/ethtool.h> 12#include <linux/ethtool.h>
@@ -229,4 +224,4 @@ MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl);
229 224
230MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>"); 225MODULE_AUTHOR("David Wu <david.wu@rock-chips.com>");
231MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); 226MODULE_DESCRIPTION("Rockchip Ethernet PHY driver");
232MODULE_LICENSE("GPL v2"); 227MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index ad9db652874d..fef701bfad62 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
347 return ret; 347 return ret;
348 } 348 }
349 } 349 }
350 bus->socket_ops->attach(bus->sfp);
350 if (bus->started) 351 if (bus->started)
351 bus->socket_ops->start(bus->sfp); 352 bus->socket_ops->start(bus->sfp);
352 bus->netdev->sfp_bus = bus; 353 bus->netdev->sfp_bus = bus;
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
362 if (bus->registered) { 363 if (bus->registered) {
363 if (bus->started) 364 if (bus->started)
364 bus->socket_ops->stop(bus->sfp); 365 bus->socket_ops->stop(bus->sfp);
366 bus->socket_ops->detach(bus->sfp);
365 if (bus->phydev && ops && ops->disconnect_phy) 367 if (bus->phydev && ops && ops->disconnect_phy)
366 ops->disconnect_phy(bus->upstream); 368 ops->disconnect_phy(bus->upstream);
367 } 369 }
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index fd8bb998ae52..68c8fbf099f8 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -184,6 +184,7 @@ struct sfp {
184 184
185 struct gpio_desc *gpio[GPIO_MAX]; 185 struct gpio_desc *gpio[GPIO_MAX];
186 186
187 bool attached;
187 unsigned int state; 188 unsigned int state;
188 struct delayed_work poll; 189 struct delayed_work poll;
189 struct delayed_work timeout; 190 struct delayed_work timeout;
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
1475 */ 1476 */
1476 switch (sfp->sm_mod_state) { 1477 switch (sfp->sm_mod_state) {
1477 default: 1478 default:
1478 if (event == SFP_E_INSERT) { 1479 if (event == SFP_E_INSERT && sfp->attached) {
1479 sfp_module_tx_disable(sfp); 1480 sfp_module_tx_disable(sfp);
1480 sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); 1481 sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
1481 } 1482 }
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
1607 mutex_unlock(&sfp->sm_mutex); 1608 mutex_unlock(&sfp->sm_mutex);
1608} 1609}
1609 1610
1611static void sfp_attach(struct sfp *sfp)
1612{
1613 sfp->attached = true;
1614 if (sfp->state & SFP_F_PRESENT)
1615 sfp_sm_event(sfp, SFP_E_INSERT);
1616}
1617
1618static void sfp_detach(struct sfp *sfp)
1619{
1620 sfp->attached = false;
1621 sfp_sm_event(sfp, SFP_E_REMOVE);
1622}
1623
1610static void sfp_start(struct sfp *sfp) 1624static void sfp_start(struct sfp *sfp)
1611{ 1625{
1612 sfp_sm_event(sfp, SFP_E_DEV_UP); 1626 sfp_sm_event(sfp, SFP_E_DEV_UP);
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
1667} 1681}
1668 1682
1669static const struct sfp_socket_ops sfp_module_ops = { 1683static const struct sfp_socket_ops sfp_module_ops = {
1684 .attach = sfp_attach,
1685 .detach = sfp_detach,
1670 .start = sfp_start, 1686 .start = sfp_start,
1671 .stop = sfp_stop, 1687 .stop = sfp_stop,
1672 .module_info = sfp_module_info, 1688 .module_info = sfp_module_info,
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
1834 dev_info(sfp->dev, "Host maximum power %u.%uW\n", 1850 dev_info(sfp->dev, "Host maximum power %u.%uW\n",
1835 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); 1851 sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
1836 1852
1837 sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
1838 if (!sfp->sfp_bus)
1839 return -ENOMEM;
1840
1841 /* Get the initial state, and always signal TX disable, 1853 /* Get the initial state, and always signal TX disable,
1842 * since the network interface will not be up. 1854 * since the network interface will not be up.
1843 */ 1855 */
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
1848 sfp->state |= SFP_F_RATE_SELECT; 1860 sfp->state |= SFP_F_RATE_SELECT;
1849 sfp_set_state(sfp, sfp->state); 1861 sfp_set_state(sfp, sfp->state);
1850 sfp_module_tx_disable(sfp); 1862 sfp_module_tx_disable(sfp);
1851 rtnl_lock();
1852 if (sfp->state & SFP_F_PRESENT)
1853 sfp_sm_event(sfp, SFP_E_INSERT);
1854 rtnl_unlock();
1855 1863
1856 for (i = 0; i < GPIO_MAX; i++) { 1864 for (i = 0; i < GPIO_MAX; i++) {
1857 if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) 1865 if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
1884 dev_warn(sfp->dev, 1892 dev_warn(sfp->dev,
1885 "No tx_disable pin: SFP modules will always be emitting.\n"); 1893 "No tx_disable pin: SFP modules will always be emitting.\n");
1886 1894
1895 sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
1896 if (!sfp->sfp_bus)
1897 return -ENOMEM;
1898
1887 return 0; 1899 return 0;
1888} 1900}
1889 1901
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 31b0acf337e2..64f54b0bbd8c 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -7,6 +7,8 @@
7struct sfp; 7struct sfp;
8 8
9struct sfp_socket_ops { 9struct sfp_socket_ops {
10 void (*attach)(struct sfp *sfp);
11 void (*detach)(struct sfp *sfp);
10 void (*start)(struct sfp *sfp); 12 void (*start)(struct sfp *sfp);
11 void (*stop)(struct sfp *sfp); 13 void (*stop)(struct sfp *sfp);
12 int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); 14 int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index 22f3bdd8206c..91247182bc52 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -80,6 +80,7 @@ static struct phy_driver teranetics_driver[] = {
80 .phy_id = PHY_ID_TN2020, 80 .phy_id = PHY_ID_TN2020,
81 .phy_id_mask = 0xffffffff, 81 .phy_id_mask = 0xffffffff,
82 .name = "Teranetics TN2020", 82 .name = "Teranetics TN2020",
83 .features = PHY_10GBIT_FEATURES,
83 .soft_reset = gen10g_no_soft_reset, 84 .soft_reset = gen10g_no_soft_reset,
84 .aneg_done = teranetics_aneg_done, 85 .aneg_done = teranetics_aneg_done,
85 .config_init = gen10g_config_init, 86 .config_init = gen10g_config_init,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 62dc564b251d..f22639f0116a 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
445 if (pskb_trim_rcsum(skb, len)) 445 if (pskb_trim_rcsum(skb, len))
446 goto drop; 446 goto drop;
447 447
448 ph = pppoe_hdr(skb);
448 pn = pppoe_pernet(dev_net(dev)); 449 pn = pppoe_pernet(dev_net(dev));
449 450
450 /* Note that get_item does a sock_hold(), so sk_pppox(po) 451 /* Note that get_item does a sock_hold(), so sk_pppox(po)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index afd9d25d1992..958f1cf67282 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
256 } 256 }
257} 257}
258 258
259static bool __team_option_inst_tmp_find(const struct list_head *opts,
260 const struct team_option_inst *needle)
261{
262 struct team_option_inst *opt_inst;
263
264 list_for_each_entry(opt_inst, opts, tmp_list)
265 if (opt_inst == needle)
266 return true;
267 return false;
268}
269
270static int __team_options_register(struct team *team, 259static int __team_options_register(struct team *team,
271 const struct team_option *option, 260 const struct team_option *option,
272 size_t option_count) 261 size_t option_count)
@@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2460 int err = 0; 2449 int err = 0;
2461 int i; 2450 int i;
2462 struct nlattr *nl_option; 2451 struct nlattr *nl_option;
2463 LIST_HEAD(opt_inst_list);
2464 2452
2465 rtnl_lock(); 2453 rtnl_lock();
2466 2454
@@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2480 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; 2468 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2481 struct nlattr *attr; 2469 struct nlattr *attr;
2482 struct nlattr *attr_data; 2470 struct nlattr *attr_data;
2471 LIST_HEAD(opt_inst_list);
2483 enum team_option_type opt_type; 2472 enum team_option_type opt_type;
2484 int opt_port_ifindex = 0; /* != 0 for per-port options */ 2473 int opt_port_ifindex = 0; /* != 0 for per-port options */
2485 u32 opt_array_index = 0; 2474 u32 opt_array_index = 0;
@@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2584 if (err) 2573 if (err)
2585 goto team_put; 2574 goto team_put;
2586 opt_inst->changed = true; 2575 opt_inst->changed = true;
2587
2588 /* dumb/evil user-space can send us duplicate opt,
2589 * keep only the last one
2590 */
2591 if (__team_option_inst_tmp_find(&opt_inst_list,
2592 opt_inst))
2593 continue;
2594
2595 list_add(&opt_inst->tmp_list, &opt_inst_list); 2576 list_add(&opt_inst->tmp_list, &opt_inst_list);
2596 } 2577 }
2597 if (!opt_found) { 2578 if (!opt_found) {
2598 err = -ENOENT; 2579 err = -ENOENT;
2599 goto team_put; 2580 goto team_put;
2600 } 2581 }
2601 }
2602 2582
2603 err = team_nl_send_event_options_get(team, &opt_inst_list); 2583 err = team_nl_send_event_options_get(team, &opt_inst_list);
2584 if (err)
2585 break;
2586 }
2604 2587
2605team_put: 2588team_put:
2606 team_nl_team_put(team); 2589 team_nl_team_put(team);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a4fdad475594..fed298c0cb39 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -856,10 +856,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
856 err = 0; 856 err = 0;
857 } 857 }
858 858
859 rcu_assign_pointer(tfile->tun, tun);
860 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
861 tun->numqueues++;
862
863 if (tfile->detached) { 859 if (tfile->detached) {
864 tun_enable_queue(tfile); 860 tun_enable_queue(tfile);
865 } else { 861 } else {
@@ -870,12 +866,18 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
870 if (rtnl_dereference(tun->xdp_prog)) 866 if (rtnl_dereference(tun->xdp_prog))
871 sock_set_flag(&tfile->sk, SOCK_XDP); 867 sock_set_flag(&tfile->sk, SOCK_XDP);
872 868
873 tun_set_real_num_queues(tun);
874
875 /* device is allowed to go away first, so no need to hold extra 869 /* device is allowed to go away first, so no need to hold extra
876 * refcnt. 870 * refcnt.
877 */ 871 */
878 872
873 /* Publish tfile->tun and tun->tfiles only after we've fully
874 * initialized tfile; otherwise we risk using half-initialized
875 * object.
876 */
877 rcu_assign_pointer(tfile->tun, tun);
878 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
879 tun->numqueues++;
880 tun_set_real_num_queues(tun);
879out: 881out:
880 return err; 882 return err;
881} 883}
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 57f1c94fca0b..820a2fe7d027 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1287,6 +1287,20 @@ static const struct driver_info asix112_info = {
1287 1287
1288#undef ASIX112_DESC 1288#undef ASIX112_DESC
1289 1289
1290static const struct driver_info trendnet_info = {
1291 .description = "USB-C 3.1 to 5GBASE-T Ethernet Adapter",
1292 .bind = aqc111_bind,
1293 .unbind = aqc111_unbind,
1294 .status = aqc111_status,
1295 .link_reset = aqc111_link_reset,
1296 .reset = aqc111_reset,
1297 .stop = aqc111_stop,
1298 .flags = FLAG_ETHER | FLAG_FRAMING_AX |
1299 FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
1300 .rx_fixup = aqc111_rx_fixup,
1301 .tx_fixup = aqc111_tx_fixup,
1302};
1303
1290static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) 1304static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
1291{ 1305{
1292 struct usbnet *dev = usb_get_intfdata(intf); 1306 struct usbnet *dev = usb_get_intfdata(intf);
@@ -1440,6 +1454,7 @@ static const struct usb_device_id products[] = {
1440 {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, 1454 {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)},
1441 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, 1455 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
1442 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, 1456 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
1457 {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
1443 { },/* END */ 1458 { },/* END */
1444}; 1459};
1445MODULE_DEVICE_TABLE(usb, products); 1460MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b654f05b2ccd..3d93993e74da 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
739 asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); 739 asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
740 chipcode &= AX_CHIPCODE_MASK; 740 chipcode &= AX_CHIPCODE_MASK;
741 741
742 (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) : 742 ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
743 ax88772a_hw_reset(dev, 0); 743 ax88772a_hw_reset(dev, 0);
744
745 if (ret < 0) {
746 netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
747 return ret;
748 }
744 749
745 /* Read PHYID register *AFTER* the PHY was reset properly */ 750 /* Read PHYID register *AFTER* the PHY was reset properly */
746 phyid = asix_get_phyid(dev); 751 phyid = asix_get_phyid(dev);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index b3b3c05903a1..5512a1038721 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -179,10 +179,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
179 * probed with) and a slave/data interface; union 179 * probed with) and a slave/data interface; union
180 * descriptors sort this all out. 180 * descriptors sort this all out.
181 */ 181 */
182 info->control = usb_ifnum_to_if(dev->udev, 182 info->control = usb_ifnum_to_if(dev->udev, info->u->bMasterInterface0);
183 info->u->bMasterInterface0); 183 info->data = usb_ifnum_to_if(dev->udev, info->u->bSlaveInterface0);
184 info->data = usb_ifnum_to_if(dev->udev,
185 info->u->bSlaveInterface0);
186 if (!info->control || !info->data) { 184 if (!info->control || !info->data) {
187 dev_dbg(&intf->dev, 185 dev_dbg(&intf->dev,
188 "master #%u/%p slave #%u/%p\n", 186 "master #%u/%p slave #%u/%p\n",
@@ -216,18 +214,16 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
216 /* a data interface altsetting does the real i/o */ 214 /* a data interface altsetting does the real i/o */
217 d = &info->data->cur_altsetting->desc; 215 d = &info->data->cur_altsetting->desc;
218 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) { 216 if (d->bInterfaceClass != USB_CLASS_CDC_DATA) {
219 dev_dbg(&intf->dev, "slave class %u\n", 217 dev_dbg(&intf->dev, "slave class %u\n", d->bInterfaceClass);
220 d->bInterfaceClass);
221 goto bad_desc; 218 goto bad_desc;
222 } 219 }
223skip: 220skip:
224 if ( rndis && 221 if (rndis && header.usb_cdc_acm_descriptor &&
225 header.usb_cdc_acm_descriptor && 222 header.usb_cdc_acm_descriptor->bmCapabilities) {
226 header.usb_cdc_acm_descriptor->bmCapabilities) { 223 dev_dbg(&intf->dev,
227 dev_dbg(&intf->dev, 224 "ACM capabilities %02x, not really RNDIS?\n",
228 "ACM capabilities %02x, not really RNDIS?\n", 225 header.usb_cdc_acm_descriptor->bmCapabilities);
229 header.usb_cdc_acm_descriptor->bmCapabilities); 226 goto bad_desc;
230 goto bad_desc;
231 } 227 }
232 228
233 if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) { 229 if (header.usb_cdc_ether_desc && info->ether->wMaxSegmentSize) {
@@ -238,7 +234,7 @@ skip:
238 } 234 }
239 235
240 if (header.usb_cdc_mdlm_desc && 236 if (header.usb_cdc_mdlm_desc &&
241 memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) { 237 memcmp(header.usb_cdc_mdlm_desc->bGUID, mbm_guid, 16)) {
242 dev_dbg(&intf->dev, "GUID doesn't match\n"); 238 dev_dbg(&intf->dev, "GUID doesn't match\n");
243 goto bad_desc; 239 goto bad_desc;
244 } 240 }
@@ -302,7 +298,7 @@ skip:
302 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) { 298 if (info->control->cur_altsetting->desc.bNumEndpoints == 1) {
303 struct usb_endpoint_descriptor *desc; 299 struct usb_endpoint_descriptor *desc;
304 300
305 dev->status = &info->control->cur_altsetting->endpoint [0]; 301 dev->status = &info->control->cur_altsetting->endpoint[0];
306 desc = &dev->status->desc; 302 desc = &dev->status->desc;
307 if (!usb_endpoint_is_int_in(desc) || 303 if (!usb_endpoint_is_int_in(desc) ||
308 (le16_to_cpu(desc->wMaxPacketSize) 304 (le16_to_cpu(desc->wMaxPacketSize)
@@ -847,6 +843,14 @@ static const struct usb_device_id products[] = {
847 .driver_info = 0, 843 .driver_info = 0,
848}, 844},
849 845
846/* USB-C 3.1 to 5GBASE-T Ethernet Adapter (based on AQC111U) */
847{
848 USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0xe05a, USB_CLASS_COMM,
849 USB_CDC_SUBCLASS_ETHERNET,
850 USB_CDC_PROTO_NONE),
851 .driver_info = 0,
852},
853
850/* WHITELIST!!! 854/* WHITELIST!!!
851 * 855 *
852 * CDC Ether uses two interfaces, not necessarily consecutive. 856 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 774e1ff01c9a..735ad838e2ba 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -123,6 +123,7 @@ static void qmimux_setup(struct net_device *dev)
123 dev->addr_len = 0; 123 dev->addr_len = 0;
124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
125 dev->netdev_ops = &qmimux_netdev_ops; 125 dev->netdev_ops = &qmimux_netdev_ops;
126 dev->mtu = 1500;
126 dev->needs_free_netdev = true; 127 dev->needs_free_netdev = true;
127} 128}
128 129
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 023725086046..4cfceb789eea 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
57#define VIRTIO_XDP_TX BIT(0) 57#define VIRTIO_XDP_TX BIT(0)
58#define VIRTIO_XDP_REDIR BIT(1) 58#define VIRTIO_XDP_REDIR BIT(1)
59 59
60#define VIRTIO_XDP_FLAG BIT(0)
61
60/* RX packet size EWMA. The average packet size is used to determine the packet 62/* RX packet size EWMA. The average packet size is used to determine the packet
61 * buffer size when refilling RX rings. As the entire RX ring may be refilled 63 * buffer size when refilling RX rings. As the entire RX ring may be refilled
62 * at once, the weight is chosen so that the EWMA will be insensitive to short- 64 * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -252,6 +254,21 @@ struct padded_vnet_hdr {
252 char padding[4]; 254 char padding[4];
253}; 255};
254 256
257static bool is_xdp_frame(void *ptr)
258{
259 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
260}
261
262static void *xdp_to_ptr(struct xdp_frame *ptr)
263{
264 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
265}
266
267static struct xdp_frame *ptr_to_xdp(void *ptr)
268{
269 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
270}
271
255/* Converting between virtqueue no. and kernel tx/rx queue no. 272/* Converting between virtqueue no. and kernel tx/rx queue no.
256 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq 273 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
257 */ 274 */
@@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
462 479
463 sg_init_one(sq->sg, xdpf->data, xdpf->len); 480 sg_init_one(sq->sg, xdpf->data, xdpf->len);
464 481
465 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); 482 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
483 GFP_ATOMIC);
466 if (unlikely(err)) 484 if (unlikely(err))
467 return -ENOSPC; /* Caller handle free/refcnt */ 485 return -ENOSPC; /* Caller handle free/refcnt */
468 486
@@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev,
482{ 500{
483 struct virtnet_info *vi = netdev_priv(dev); 501 struct virtnet_info *vi = netdev_priv(dev);
484 struct receive_queue *rq = vi->rq; 502 struct receive_queue *rq = vi->rq;
485 struct xdp_frame *xdpf_sent;
486 struct bpf_prog *xdp_prog; 503 struct bpf_prog *xdp_prog;
487 struct send_queue *sq; 504 struct send_queue *sq;
488 unsigned int len; 505 unsigned int len;
506 int packets = 0;
507 int bytes = 0;
489 int drops = 0; 508 int drops = 0;
490 int kicks = 0; 509 int kicks = 0;
491 int ret, err; 510 int ret, err;
511 void *ptr;
492 int i; 512 int i;
493 513
494 sq = virtnet_xdp_sq(vi);
495
496 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
497 ret = -EINVAL;
498 drops = n;
499 goto out;
500 }
501
502 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this 514 /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
503 * indicate XDP resources have been successfully allocated. 515 * indicate XDP resources have been successfully allocated.
504 */ 516 */
505 xdp_prog = rcu_dereference(rq->xdp_prog); 517 xdp_prog = rcu_dereference(rq->xdp_prog);
506 if (!xdp_prog) { 518 if (!xdp_prog)
507 ret = -ENXIO; 519 return -ENXIO;
520
521 sq = virtnet_xdp_sq(vi);
522
523 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
524 ret = -EINVAL;
508 drops = n; 525 drops = n;
509 goto out; 526 goto out;
510 } 527 }
511 528
512 /* Free up any pending old buffers before queueing new ones. */ 529 /* Free up any pending old buffers before queueing new ones. */
513 while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) 530 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
514 xdp_return_frame(xdpf_sent); 531 if (likely(is_xdp_frame(ptr))) {
532 struct xdp_frame *frame = ptr_to_xdp(ptr);
533
534 bytes += frame->len;
535 xdp_return_frame(frame);
536 } else {
537 struct sk_buff *skb = ptr;
538
539 bytes += skb->len;
540 napi_consume_skb(skb, false);
541 }
542 packets++;
543 }
515 544
516 for (i = 0; i < n; i++) { 545 for (i = 0; i < n; i++) {
517 struct xdp_frame *xdpf = frames[i]; 546 struct xdp_frame *xdpf = frames[i];
@@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
530 } 559 }
531out: 560out:
532 u64_stats_update_begin(&sq->stats.syncp); 561 u64_stats_update_begin(&sq->stats.syncp);
562 sq->stats.bytes += bytes;
563 sq->stats.packets += packets;
533 sq->stats.xdp_tx += n; 564 sq->stats.xdp_tx += n;
534 sq->stats.xdp_tx_drops += drops; 565 sq->stats.xdp_tx_drops += drops;
535 sq->stats.kicks += kicks; 566 sq->stats.kicks += kicks;
@@ -1330,20 +1361,28 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1330 return stats.packets; 1361 return stats.packets;
1331} 1362}
1332 1363
1333static void free_old_xmit_skbs(struct send_queue *sq) 1364static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
1334{ 1365{
1335 struct sk_buff *skb;
1336 unsigned int len; 1366 unsigned int len;
1337 unsigned int packets = 0; 1367 unsigned int packets = 0;
1338 unsigned int bytes = 0; 1368 unsigned int bytes = 0;
1369 void *ptr;
1339 1370
1340 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { 1371 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1341 pr_debug("Sent skb %p\n", skb); 1372 if (likely(!is_xdp_frame(ptr))) {
1373 struct sk_buff *skb = ptr;
1342 1374
1343 bytes += skb->len; 1375 pr_debug("Sent skb %p\n", skb);
1344 packets++; 1376
1377 bytes += skb->len;
1378 napi_consume_skb(skb, in_napi);
1379 } else {
1380 struct xdp_frame *frame = ptr_to_xdp(ptr);
1345 1381
1346 dev_consume_skb_any(skb); 1382 bytes += frame->len;
1383 xdp_return_frame(frame);
1384 }
1385 packets++;
1347 } 1386 }
1348 1387
1349 /* Avoid overhead when no packets have been processed 1388 /* Avoid overhead when no packets have been processed
@@ -1358,6 +1397,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
1358 u64_stats_update_end(&sq->stats.syncp); 1397 u64_stats_update_end(&sq->stats.syncp);
1359} 1398}
1360 1399
1400static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1401{
1402 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1403 return false;
1404 else if (q < vi->curr_queue_pairs)
1405 return true;
1406 else
1407 return false;
1408}
1409
1361static void virtnet_poll_cleantx(struct receive_queue *rq) 1410static void virtnet_poll_cleantx(struct receive_queue *rq)
1362{ 1411{
1363 struct virtnet_info *vi = rq->vq->vdev->priv; 1412 struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1365,11 +1414,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
1365 struct send_queue *sq = &vi->sq[index]; 1414 struct send_queue *sq = &vi->sq[index];
1366 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); 1415 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1367 1416
1368 if (!sq->napi.weight) 1417 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1369 return; 1418 return;
1370 1419
1371 if (__netif_tx_trylock(txq)) { 1420 if (__netif_tx_trylock(txq)) {
1372 free_old_xmit_skbs(sq); 1421 free_old_xmit_skbs(sq, true);
1373 __netif_tx_unlock(txq); 1422 __netif_tx_unlock(txq);
1374 } 1423 }
1375 1424
@@ -1442,10 +1491,18 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1442{ 1491{
1443 struct send_queue *sq = container_of(napi, struct send_queue, napi); 1492 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1444 struct virtnet_info *vi = sq->vq->vdev->priv; 1493 struct virtnet_info *vi = sq->vq->vdev->priv;
1445 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); 1494 unsigned int index = vq2txq(sq->vq);
1495 struct netdev_queue *txq;
1446 1496
1497 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1498 /* We don't need to enable cb for XDP */
1499 napi_complete_done(napi, 0);
1500 return 0;
1501 }
1502
1503 txq = netdev_get_tx_queue(vi->dev, index);
1447 __netif_tx_lock(txq, raw_smp_processor_id()); 1504 __netif_tx_lock(txq, raw_smp_processor_id());
1448 free_old_xmit_skbs(sq); 1505 free_old_xmit_skbs(sq, true);
1449 __netif_tx_unlock(txq); 1506 __netif_tx_unlock(txq);
1450 1507
1451 virtqueue_napi_complete(napi, sq->vq, 0); 1508 virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1514,7 +1571,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1514 bool use_napi = sq->napi.weight; 1571 bool use_napi = sq->napi.weight;
1515 1572
1516 /* Free up any pending old buffers before queueing new ones. */ 1573 /* Free up any pending old buffers before queueing new ones. */
1517 free_old_xmit_skbs(sq); 1574 free_old_xmit_skbs(sq, false);
1518 1575
1519 if (use_napi && kick) 1576 if (use_napi && kick)
1520 virtqueue_enable_cb_delayed(sq->vq); 1577 virtqueue_enable_cb_delayed(sq->vq);
@@ -1557,7 +1614,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1557 if (!use_napi && 1614 if (!use_napi &&
1558 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1615 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1559 /* More just got used, free them then recheck. */ 1616 /* More just got used, free them then recheck. */
1560 free_old_xmit_skbs(sq); 1617 free_old_xmit_skbs(sq, false);
1561 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { 1618 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1562 netif_start_subqueue(dev, qnum); 1619 netif_start_subqueue(dev, qnum);
1563 virtqueue_disable_cb(sq->vq); 1620 virtqueue_disable_cb(sq->vq);
@@ -2395,6 +2452,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2395 return -ENOMEM; 2452 return -ENOMEM;
2396 } 2453 }
2397 2454
2455 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
2456 if (!prog && !old_prog)
2457 return 0;
2458
2398 if (prog) { 2459 if (prog) {
2399 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); 2460 prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
2400 if (IS_ERR(prog)) 2461 if (IS_ERR(prog))
@@ -2402,36 +2463,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2402 } 2463 }
2403 2464
2404 /* Make sure NAPI is not using any XDP TX queues for RX. */ 2465 /* Make sure NAPI is not using any XDP TX queues for RX. */
2405 if (netif_running(dev)) 2466 if (netif_running(dev)) {
2406 for (i = 0; i < vi->max_queue_pairs; i++) 2467 for (i = 0; i < vi->max_queue_pairs; i++) {
2407 napi_disable(&vi->rq[i].napi); 2468 napi_disable(&vi->rq[i].napi);
2469 virtnet_napi_tx_disable(&vi->sq[i].napi);
2470 }
2471 }
2472
2473 if (!prog) {
2474 for (i = 0; i < vi->max_queue_pairs; i++) {
2475 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2476 if (i == 0)
2477 virtnet_restore_guest_offloads(vi);
2478 }
2479 synchronize_net();
2480 }
2408 2481
2409 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2410 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); 2482 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2411 if (err) 2483 if (err)
2412 goto err; 2484 goto err;
2485 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2413 vi->xdp_queue_pairs = xdp_qp; 2486 vi->xdp_queue_pairs = xdp_qp;
2414 2487
2415 for (i = 0; i < vi->max_queue_pairs; i++) { 2488 if (prog) {
2416 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); 2489 for (i = 0; i < vi->max_queue_pairs; i++) {
2417 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); 2490 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2418 if (i == 0) { 2491 if (i == 0 && !old_prog)
2419 if (!old_prog)
2420 virtnet_clear_guest_offloads(vi); 2492 virtnet_clear_guest_offloads(vi);
2421 if (!prog)
2422 virtnet_restore_guest_offloads(vi);
2423 } 2493 }
2494 }
2495
2496 for (i = 0; i < vi->max_queue_pairs; i++) {
2424 if (old_prog) 2497 if (old_prog)
2425 bpf_prog_put(old_prog); 2498 bpf_prog_put(old_prog);
2426 if (netif_running(dev)) 2499 if (netif_running(dev)) {
2427 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2500 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2501 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2502 &vi->sq[i].napi);
2503 }
2428 } 2504 }
2429 2505
2430 return 0; 2506 return 0;
2431 2507
2432err: 2508err:
2433 for (i = 0; i < vi->max_queue_pairs; i++) 2509 if (!prog) {
2434 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); 2510 virtnet_clear_guest_offloads(vi);
2511 for (i = 0; i < vi->max_queue_pairs; i++)
2512 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
2513 }
2514
2515 if (netif_running(dev)) {
2516 for (i = 0; i < vi->max_queue_pairs; i++) {
2517 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2518 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2519 &vi->sq[i].napi);
2520 }
2521 }
2435 if (prog) 2522 if (prog)
2436 bpf_prog_sub(prog, vi->max_queue_pairs - 1); 2523 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2437 return err; 2524 return err;
@@ -2613,16 +2700,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
2613 put_page(vi->rq[i].alloc_frag.page); 2700 put_page(vi->rq[i].alloc_frag.page);
2614} 2701}
2615 2702
2616static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
2617{
2618 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
2619 return false;
2620 else if (q < vi->curr_queue_pairs)
2621 return true;
2622 else
2623 return false;
2624}
2625
2626static void free_unused_bufs(struct virtnet_info *vi) 2703static void free_unused_bufs(struct virtnet_info *vi)
2627{ 2704{
2628 void *buf; 2705 void *buf;
@@ -2631,10 +2708,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
2631 for (i = 0; i < vi->max_queue_pairs; i++) { 2708 for (i = 0; i < vi->max_queue_pairs; i++) {
2632 struct virtqueue *vq = vi->sq[i].vq; 2709 struct virtqueue *vq = vi->sq[i].vq;
2633 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { 2710 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2634 if (!is_xdp_raw_buffer_queue(vi, i)) 2711 if (!is_xdp_frame(buf))
2635 dev_kfree_skb(buf); 2712 dev_kfree_skb(buf);
2636 else 2713 else
2637 put_page(virt_to_head_page(buf)); 2714 xdp_return_frame(ptr_to_xdp(buf));
2638 } 2715 }
2639 } 2716 }
2640 2717
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e454dfc9ad8f..89984fcab01e 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -535,8 +535,8 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
535 } 535 }
536 536
537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); 537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
538 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, 538 tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
539 &tq->buf_info_pa, GFP_KERNEL); 539 &tq->buf_info_pa, GFP_KERNEL);
540 if (!tq->buf_info) 540 if (!tq->buf_info)
541 goto err; 541 goto err;
542 542
@@ -1815,8 +1815,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1815 1815
1816 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1816 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1817 rq->rx_ring[1].size); 1817 rq->rx_ring[1].size);
1818 bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa, 1818 bi = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
1819 GFP_KERNEL); 1819 GFP_KERNEL);
1820 if (!bi) 1820 if (!bi)
1821 goto err; 1821 goto err;
1822 1822
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5209ee9aac47..2aae11feff0c 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2219,7 +2219,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2219 struct pcpu_sw_netstats *tx_stats, *rx_stats; 2219 struct pcpu_sw_netstats *tx_stats, *rx_stats;
2220 union vxlan_addr loopback; 2220 union vxlan_addr loopback;
2221 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; 2221 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
2222 struct net_device *dev = skb->dev; 2222 struct net_device *dev;
2223 int len = skb->len; 2223 int len = skb->len;
2224 2224
2225 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); 2225 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
@@ -2239,9 +2239,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2239#endif 2239#endif
2240 } 2240 }
2241 2241
2242 rcu_read_lock();
2243 dev = skb->dev;
2244 if (unlikely(!(dev->flags & IFF_UP))) {
2245 kfree_skb(skb);
2246 goto drop;
2247 }
2248
2242 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) 2249 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
2243 vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, 2250 vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
2244 vni);
2245 2251
2246 u64_stats_update_begin(&tx_stats->syncp); 2252 u64_stats_update_begin(&tx_stats->syncp);
2247 tx_stats->tx_packets++; 2253 tx_stats->tx_packets++;
@@ -2254,8 +2260,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
2254 rx_stats->rx_bytes += len; 2260 rx_stats->rx_bytes += len;
2255 u64_stats_update_end(&rx_stats->syncp); 2261 u64_stats_update_end(&rx_stats->syncp);
2256 } else { 2262 } else {
2263drop:
2257 dev->stats.rx_dropped++; 2264 dev->stats.rx_dropped++;
2258 } 2265 }
2266 rcu_read_unlock();
2259} 2267}
2260 2268
2261static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, 2269static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c0b0f525c87c..27decf8ae840 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1575,7 +1575,7 @@ try:
1575 dev->stats.tx_packets++; 1575 dev->stats.tx_packets++;
1576 dev->stats.tx_bytes += skb->len; 1576 dev->stats.tx_bytes += skb->len;
1577 } 1577 }
1578 dev_kfree_skb_irq(skb); 1578 dev_consume_skb_irq(skb);
1579 dpriv->tx_skbuff[cur] = NULL; 1579 dpriv->tx_skbuff[cur] = NULL;
1580 ++dpriv->tx_dirty; 1580 ++dpriv->tx_dirty;
1581 } else { 1581 } else {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 839fa7715709..a08f04c3f644 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -279,10 +279,9 @@ static int uhdlc_init(struct ucc_hdlc_private *priv)
279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); 279 iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
280 280
281 /* Get BD buffer */ 281 /* Get BD buffer */
282 bd_buffer = dma_zalloc_coherent(priv->dev, 282 bd_buffer = dma_alloc_coherent(priv->dev,
283 (RX_BD_RING_LEN + TX_BD_RING_LEN) * 283 (RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
284 MAX_RX_BUF_LENGTH, 284 &bd_dma_addr, GFP_KERNEL);
285 &bd_dma_addr, GFP_KERNEL);
286 285
287 if (!bd_buffer) { 286 if (!bd_buffer) {
288 dev_err(priv->dev, "Could not allocate buffer descriptors\n"); 287 dev_err(priv->dev, "Could not allocate buffer descriptors\n");
@@ -483,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
483 memset(priv->tx_buffer + 482 memset(priv->tx_buffer +
484 (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 483 (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
485 0, skb->len); 484 0, skb->len);
486 dev_kfree_skb_irq(skb); 485 dev_consume_skb_irq(skb);
487 486
488 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 487 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
489 priv->skb_dirtytx = 488 priv->skb_dirtytx =
@@ -1057,6 +1056,54 @@ static const struct net_device_ops uhdlc_ops = {
1057 .ndo_tx_timeout = uhdlc_tx_timeout, 1056 .ndo_tx_timeout = uhdlc_tx_timeout,
1058}; 1057};
1059 1058
1059static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
1060{
1061 struct device_node *np;
1062 struct platform_device *pdev;
1063 struct resource *res;
1064 static int siram_init_flag;
1065 int ret = 0;
1066
1067 np = of_find_compatible_node(NULL, NULL, name);
1068 if (!np)
1069 return -EINVAL;
1070
1071 pdev = of_find_device_by_node(np);
1072 if (!pdev) {
1073 pr_err("%pOFn: failed to lookup pdev\n", np);
1074 of_node_put(np);
1075 return -EINVAL;
1076 }
1077
1078 of_node_put(np);
1079 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1080 if (!res) {
1081 ret = -EINVAL;
1082 goto error_put_device;
1083 }
1084 *ptr = ioremap(res->start, resource_size(res));
1085 if (!*ptr) {
1086 ret = -ENOMEM;
1087 goto error_put_device;
1088 }
1089
1090 /* We've remapped the addresses, and we don't need the device any
1091 * more, so we should release it.
1092 */
1093 put_device(&pdev->dev);
1094
1095 if (init_flag && siram_init_flag == 0) {
1096 memset_io(*ptr, 0, resource_size(res));
1097 siram_init_flag = 1;
1098 }
1099 return 0;
1100
1101error_put_device:
1102 put_device(&pdev->dev);
1103
1104 return ret;
1105}
1106
1060static int ucc_hdlc_probe(struct platform_device *pdev) 1107static int ucc_hdlc_probe(struct platform_device *pdev)
1061{ 1108{
1062 struct device_node *np = pdev->dev.of_node; 1109 struct device_node *np = pdev->dev.of_node;
@@ -1151,6 +1198,15 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1151 ret = ucc_of_parse_tdm(np, utdm, ut_info); 1198 ret = ucc_of_parse_tdm(np, utdm, ut_info);
1152 if (ret) 1199 if (ret)
1153 goto free_utdm; 1200 goto free_utdm;
1201
1202 ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
1203 (void __iomem **)&utdm->si_regs);
1204 if (ret)
1205 goto free_utdm;
1206 ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
1207 (void __iomem **)&utdm->siram);
1208 if (ret)
1209 goto unmap_si_regs;
1154 } 1210 }
1155 1211
1156 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) 1212 if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
@@ -1159,7 +1215,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1159 ret = uhdlc_init(uhdlc_priv); 1215 ret = uhdlc_init(uhdlc_priv);
1160 if (ret) { 1216 if (ret) {
1161 dev_err(&pdev->dev, "Failed to init uhdlc\n"); 1217 dev_err(&pdev->dev, "Failed to init uhdlc\n");
1162 goto free_utdm; 1218 goto undo_uhdlc_init;
1163 } 1219 }
1164 1220
1165 dev = alloc_hdlcdev(uhdlc_priv); 1221 dev = alloc_hdlcdev(uhdlc_priv);
@@ -1188,6 +1244,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
1188free_dev: 1244free_dev:
1189 free_netdev(dev); 1245 free_netdev(dev);
1190undo_uhdlc_init: 1246undo_uhdlc_init:
1247 iounmap(utdm->siram);
1248unmap_si_regs:
1249 iounmap(utdm->si_regs);
1191free_utdm: 1250free_utdm:
1192 if (uhdlc_priv->tsa) 1251 if (uhdlc_priv->tsa)
1193 kfree(utdm); 1252 kfree(utdm);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index f6d3ecbdd3a3..2a5668b4f6bc 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1553,10 +1553,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1553 * coherent DMA are unsupported 1553 * coherent DMA are unsupported
1554 */ 1554 */
1555 dest_ring->base_addr_owner_space_unaligned = 1555 dest_ring->base_addr_owner_space_unaligned =
1556 dma_zalloc_coherent(ar->dev, 1556 dma_alloc_coherent(ar->dev,
1557 (nentries * sizeof(struct ce_desc) + 1557 (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN),
1558 CE_DESC_RING_ALIGN), 1558 &base_addr, GFP_KERNEL);
1559 &base_addr, GFP_KERNEL);
1560 if (!dest_ring->base_addr_owner_space_unaligned) { 1559 if (!dest_ring->base_addr_owner_space_unaligned) {
1561 kfree(dest_ring); 1560 kfree(dest_ring);
1562 return ERR_PTR(-ENOMEM); 1561 return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 399b501f3c3c..e8891f5fc83a 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
548 { 548 {
549 .id = WCN3990_HW_1_0_DEV_VERSION, 549 .id = WCN3990_HW_1_0_DEV_VERSION,
550 .dev_id = 0, 550 .dev_id = 0,
551 .bus = ATH10K_BUS_PCI, 551 .bus = ATH10K_BUS_SNOC,
552 .name = "wcn3990 hw1.0", 552 .name = "wcn3990 hw1.0",
553 .continuous_frag_desc = true, 553 .continuous_frag_desc = true,
554 .tx_chain_mask = 0x7, 554 .tx_chain_mask = 0x7,
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e49b36752ba2..49758490eaba 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5169,10 +5169,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
5169 if (vif->type == NL80211_IFTYPE_ADHOC || 5169 if (vif->type == NL80211_IFTYPE_ADHOC ||
5170 vif->type == NL80211_IFTYPE_MESH_POINT || 5170 vif->type == NL80211_IFTYPE_MESH_POINT ||
5171 vif->type == NL80211_IFTYPE_AP) { 5171 vif->type == NL80211_IFTYPE_AP) {
5172 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5172 arvif->beacon_buf = dma_alloc_coherent(ar->dev,
5173 IEEE80211_MAX_FRAME_LEN, 5173 IEEE80211_MAX_FRAME_LEN,
5174 &arvif->beacon_paddr, 5174 &arvif->beacon_paddr,
5175 GFP_ATOMIC); 5175 GFP_ATOMIC);
5176 if (!arvif->beacon_buf) { 5176 if (!arvif->beacon_buf) {
5177 ret = -ENOMEM; 5177 ret = -ENOMEM;
5178 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5178 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 01b4edb00e9e..39e0b1cc2a12 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -936,8 +936,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
936 */ 936 */
937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
938 938
939 data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, 939 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes,
940 alloc_nbytes,
941 &ce_data_base, 940 &ce_data_base,
942 GFP_ATOMIC); 941 GFP_ATOMIC);
943 942
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index ba837403e266..8e236d158ca6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -5193,7 +5193,7 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
5193 void *vaddr; 5193 void *vaddr;
5194 5194
5195 pool_size = num_units * round_up(unit_len, 4); 5195 pool_size = num_units * round_up(unit_len, 4);
5196 vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL); 5196 vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
5197 5197
5198 if (!vaddr) 5198 if (!vaddr)
5199 return -ENOMEM; 5199 return -ENOMEM;
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 5ab3e31c9ffa..bab30f7a443c 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -174,9 +174,8 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn
174 int i; 174 int i;
175 175
176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); 176 size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
177 wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, 177 wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
178 &wcn_ch->dma_addr, 178 GFP_KERNEL);
179 GFP_KERNEL);
180 if (!wcn_ch->cpu_addr) 179 if (!wcn_ch->cpu_addr)
181 return -ENOMEM; 180 return -ENOMEM;
182 181
@@ -627,9 +626,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
627 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 626 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
628 627
629 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; 628 s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
630 cpu_addr = dma_zalloc_coherent(wcn->dev, s, 629 cpu_addr = dma_alloc_coherent(wcn->dev, s,
631 &wcn->mgmt_mem_pool.phy_addr, 630 &wcn->mgmt_mem_pool.phy_addr,
632 GFP_KERNEL); 631 GFP_KERNEL);
633 if (!cpu_addr) 632 if (!cpu_addr)
634 goto out_err; 633 goto out_err;
635 634
@@ -642,9 +641,9 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
642 16 - (WCN36XX_BD_CHUNK_SIZE % 8); 641 16 - (WCN36XX_BD_CHUNK_SIZE % 8);
643 642
644 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; 643 s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
645 cpu_addr = dma_zalloc_coherent(wcn->dev, s, 644 cpu_addr = dma_alloc_coherent(wcn->dev, s,
646 &wcn->data_mem_pool.phy_addr, 645 &wcn->data_mem_pool.phy_addr,
647 GFP_KERNEL); 646 GFP_KERNEL);
648 if (!cpu_addr) 647 if (!cpu_addr)
649 goto out_err; 648 goto out_err;
650 649
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 05a8348bd7b9..3380aaef456c 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -99,7 +99,7 @@ static int wil_sring_alloc(struct wil6210_priv *wil,
99 /* Status messages are allocated and initialized to 0. This is necessary 99 /* Status messages are allocated and initialized to 0. This is necessary
100 * since DR bit should be initialized to 0. 100 * since DR bit should be initialized to 0.
101 */ 101 */
102 sring->va = dma_zalloc_coherent(dev, sz, &sring->pa, GFP_KERNEL); 102 sring->va = dma_alloc_coherent(dev, sz, &sring->pa, GFP_KERNEL);
103 if (!sring->va) 103 if (!sring->va)
104 return -ENOMEM; 104 return -ENOMEM;
105 105
@@ -381,15 +381,15 @@ static int wil_ring_alloc_desc_ring(struct wil6210_priv *wil,
381 if (!ring->ctx) 381 if (!ring->ctx)
382 goto err; 382 goto err;
383 383
384 ring->va = dma_zalloc_coherent(dev, sz, &ring->pa, GFP_KERNEL); 384 ring->va = dma_alloc_coherent(dev, sz, &ring->pa, GFP_KERNEL);
385 if (!ring->va) 385 if (!ring->va)
386 goto err_free_ctx; 386 goto err_free_ctx;
387 387
388 if (ring->is_rx) { 388 if (ring->is_rx) {
389 sz = sizeof(*ring->edma_rx_swtail.va); 389 sz = sizeof(*ring->edma_rx_swtail.va);
390 ring->edma_rx_swtail.va = 390 ring->edma_rx_swtail.va =
391 dma_zalloc_coherent(dev, sz, &ring->edma_rx_swtail.pa, 391 dma_alloc_coherent(dev, sz, &ring->edma_rx_swtail.pa,
392 GFP_KERNEL); 392 GFP_KERNEL);
393 if (!ring->edma_rx_swtail.va) 393 if (!ring->edma_rx_swtail.va)
394 goto err_free_va; 394 goto err_free_va;
395 } 395 }
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index dfc4c34298d4..b34e51933257 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ? 431 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE; 432 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
433 433
434 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 434 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
435 ring_mem_size, &(ring->dmabase), 435 ring_mem_size, &(ring->dmabase),
436 GFP_KERNEL); 436 GFP_KERNEL);
437 if (!ring->descbase) 437 if (!ring->descbase)
438 return -ENOMEM; 438 return -ENOMEM;
439 439
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 1b1da7d83652..2ce1537d983c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -331,9 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
331static int alloc_ringmemory(struct b43legacy_dmaring *ring) 331static int alloc_ringmemory(struct b43legacy_dmaring *ring)
332{ 332{
333 /* GFP flags must match the flags in free_ringmemory()! */ 333 /* GFP flags must match the flags in free_ringmemory()! */
334 ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev, 334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
335 B43legacy_DMA_RINGMEMSIZE, 335 B43legacy_DMA_RINGMEMSIZE,
336 &(ring->dmabase), GFP_KERNEL); 336 &(ring->dmabase), GFP_KERNEL);
337 if (!ring->descbase) 337 if (!ring->descbase)
338 return -ENOMEM; 338 return -ENOMEM;
339 339
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 16d7dda965d8..0f69b3fa296e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1281,10 +1281,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1281 u32 addr; 1281 u32 addr;
1282 1282
1283 devinfo->shared.scratch = 1283 devinfo->shared.scratch =
1284 dma_zalloc_coherent(&devinfo->pdev->dev, 1284 dma_alloc_coherent(&devinfo->pdev->dev,
1285 BRCMF_DMA_D2H_SCRATCH_BUF_LEN, 1285 BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
1286 &devinfo->shared.scratch_dmahandle, 1286 &devinfo->shared.scratch_dmahandle,
1287 GFP_KERNEL); 1287 GFP_KERNEL);
1288 if (!devinfo->shared.scratch) 1288 if (!devinfo->shared.scratch)
1289 goto fail; 1289 goto fail;
1290 1290
@@ -1298,10 +1298,10 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN); 1298 brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
1299 1299
1300 devinfo->shared.ringupd = 1300 devinfo->shared.ringupd =
1301 dma_zalloc_coherent(&devinfo->pdev->dev, 1301 dma_alloc_coherent(&devinfo->pdev->dev,
1302 BRCMF_DMA_D2H_RINGUPD_BUF_LEN, 1302 BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
1303 &devinfo->shared.ringupd_dmahandle, 1303 &devinfo->shared.ringupd_dmahandle,
1304 GFP_KERNEL); 1304 GFP_KERNEL);
1305 if (!devinfo->shared.ringupd) 1305 if (!devinfo->shared.ringupd)
1306 goto fail; 1306 goto fail;
1307 1307
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 491ca3c8b43c..83d5bceea08f 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -1,6 +1,6 @@
1config IWLWIFI 1config IWLWIFI
2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " 2 tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
3 depends on PCI && HAS_IOMEM 3 depends on PCI && HAS_IOMEM && CFG80211
4 select FW_LOADER 4 select FW_LOADER
5 ---help--- 5 ---help---
6 Select to build the driver supporting the: 6 Select to build the driver supporting the:
@@ -47,6 +47,7 @@ if IWLWIFI
47config IWLWIFI_LEDS 47config IWLWIFI_LEDS
48 bool 48 bool
49 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI 49 depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
50 depends on IWLMVM || IWLDVM
50 select LEDS_TRIGGERS 51 select LEDS_TRIGGERS
51 select MAC80211_LEDS 52 select MAC80211_LEDS
52 default y 53 default y
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e965cc588850..9e850c25877b 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -711,30 +711,24 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
711 * Allocate the circular buffer of Read Buffer Descriptors 711 * Allocate the circular buffer of Read Buffer Descriptors
712 * (RBDs) 712 * (RBDs)
713 */ 713 */
714 rxq->bd = dma_zalloc_coherent(dev, 714 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
715 free_size * rxq->queue_size, 715 &rxq->bd_dma, GFP_KERNEL);
716 &rxq->bd_dma, GFP_KERNEL);
717 if (!rxq->bd) 716 if (!rxq->bd)
718 goto err; 717 goto err;
719 718
720 if (trans->cfg->mq_rx_supported) { 719 if (trans->cfg->mq_rx_supported) {
721 rxq->used_bd = dma_zalloc_coherent(dev, 720 rxq->used_bd = dma_alloc_coherent(dev,
722 (use_rx_td ? 721 (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
723 sizeof(*rxq->cd) : 722 &rxq->used_bd_dma,
724 sizeof(__le32)) * 723 GFP_KERNEL);
725 rxq->queue_size,
726 &rxq->used_bd_dma,
727 GFP_KERNEL);
728 if (!rxq->used_bd) 724 if (!rxq->used_bd)
729 goto err; 725 goto err;
730 } 726 }
731 727
732 /* Allocate the driver's pointer to receive buffer status */ 728 /* Allocate the driver's pointer to receive buffer status */
733 rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? 729 rxq->rb_stts = dma_alloc_coherent(dev,
734 sizeof(__le16) : 730 use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status),
735 sizeof(struct iwl_rb_status), 731 &rxq->rb_stts_dma, GFP_KERNEL);
736 &rxq->rb_stts_dma,
737 GFP_KERNEL);
738 if (!rxq->rb_stts) 732 if (!rxq->rb_stts)
739 goto err; 733 goto err;
740 734
@@ -742,16 +736,14 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
742 return 0; 736 return 0;
743 737
744 /* Allocate the driver's pointer to TR tail */ 738 /* Allocate the driver's pointer to TR tail */
745 rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 739 rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
746 &rxq->tr_tail_dma, 740 &rxq->tr_tail_dma, GFP_KERNEL);
747 GFP_KERNEL);
748 if (!rxq->tr_tail) 741 if (!rxq->tr_tail)
749 goto err; 742 goto err;
750 743
751 /* Allocate the driver's pointer to CR tail */ 744 /* Allocate the driver's pointer to CR tail */
752 rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), 745 rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
753 &rxq->cr_tail_dma, 746 &rxq->cr_tail_dma, GFP_KERNEL);
754 GFP_KERNEL);
755 if (!rxq->cr_tail) 747 if (!rxq->cr_tail)
756 goto err; 748 goto err;
757 /* 749 /*
@@ -1947,9 +1939,8 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1947 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1939 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1948 1940
1949 trans_pcie->ict_tbl = 1941 trans_pcie->ict_tbl =
1950 dma_zalloc_coherent(trans->dev, ICT_SIZE, 1942 dma_alloc_coherent(trans->dev, ICT_SIZE,
1951 &trans_pcie->ict_tbl_dma, 1943 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
1952 GFP_KERNEL);
1953 if (!trans_pcie->ict_tbl) 1944 if (!trans_pcie->ict_tbl)
1954 return -ENOMEM; 1945 return -ENOMEM;
1955 1946
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3a4b8786f7ea..320edcac4699 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2761,6 +2761,11 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2761 BIT(NL80211_CHAN_WIDTH_160); 2761 BIT(NL80211_CHAN_WIDTH_160);
2762 } 2762 }
2763 2763
2764 if (!n_limits) {
2765 err = -EINVAL;
2766 goto failed_hw;
2767 }
2768
2764 data->if_combination.n_limits = n_limits; 2769 data->if_combination.n_limits = n_limits;
2765 data->if_combination.max_interfaces = 2048; 2770 data->if_combination.max_interfaces = 2048;
2766 data->if_combination.limits = data->if_limits; 2771 data->if_combination.limits = data->if_limits;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 497e762978cc..b2cabce1d74d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
212 mt76x02_add_rate_power_offset(t, delta); 212 mt76x02_add_rate_power_offset(t, delta);
213} 213}
214 214
215void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) 215void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp)
216{ 216{
217 struct mt76x0_chan_map { 217 struct mt76x0_chan_map {
218 u8 chan; 218 u8 chan;
219 u8 offset; 219 u8 offset;
220 } chan_map[] = { 220 } chan_map[] = {
221 { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, 221 { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 },
222 { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, 222 { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 },
223 { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, 223 { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 },
224 { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, 224 { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 },
225 { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, 225 { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 },
226 { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, 226 { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
227 { 167, 17 }, { 171, 18 }, { 173, 19 }, 227 { 167, 34 }, { 171, 36 }, { 175, 38 },
228 }; 228 };
229 struct ieee80211_channel *chan = dev->mt76.chandef.chan; 229 struct ieee80211_channel *chan = dev->mt76.chandef.chan;
230 u8 offset, addr; 230 u8 offset, addr;
231 int i, idx = 0;
231 u16 data; 232 u16 data;
232 int i;
233 233
234 if (mt76x0_tssi_enabled(dev)) { 234 if (mt76x0_tssi_enabled(dev)) {
235 s8 target_power; 235 s8 target_power;
@@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
239 else 239 else
240 data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); 240 data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
241 target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; 241 target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
242 info[0] = target_power + mt76x0_get_delta(dev); 242 *tp = target_power + mt76x0_get_delta(dev);
243 info[1] = 0;
244 243
245 return; 244 return;
246 } 245 }
247 246
248 for (i = 0; i < ARRAY_SIZE(chan_map); i++) { 247 for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
249 if (chan_map[i].chan <= chan->hw_value) { 248 if (chan->hw_value <= chan_map[i].chan) {
249 idx = (chan->hw_value == chan_map[i].chan);
250 offset = chan_map[i].offset; 250 offset = chan_map[i].offset;
251 break; 251 break;
252 } 252 }
@@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
258 addr = MT_EE_TX_POWER_DELTA_BW80 + offset; 258 addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
259 } else { 259 } else {
260 switch (chan->hw_value) { 260 switch (chan->hw_value) {
261 case 42:
262 offset = 2;
263 break;
261 case 58: 264 case 58:
262 offset = 8; 265 offset = 8;
263 break; 266 break;
264 case 106: 267 case 106:
265 offset = 14; 268 offset = 14;
266 break; 269 break;
267 case 112: 270 case 122:
268 offset = 20; 271 offset = 20;
269 break; 272 break;
270 case 155: 273 case 155:
@@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
277 } 280 }
278 281
279 data = mt76x02_eeprom_get(dev, addr); 282 data = mt76x02_eeprom_get(dev, addr);
280 283 *tp = data >> (8 * idx);
281 info[0] = data; 284 if (*tp < 0 || *tp > 0x3f)
282 if (!info[0] || info[0] > 0x3f) 285 *tp = 5;
283 info[0] = 5;
284
285 info[1] = data >> 8;
286 if (!info[1] || info[1] > 0x3f)
287 info[1] = 5;
288} 286}
289 287
290static int mt76x0_check_eeprom(struct mt76x02_dev *dev) 288static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
index ee9ade9f3c8b..42b259f90b6d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h
@@ -26,7 +26,7 @@ struct mt76x02_dev;
26int mt76x0_eeprom_init(struct mt76x02_dev *dev); 26int mt76x0_eeprom_init(struct mt76x02_dev *dev);
27void mt76x0_read_rx_gain(struct mt76x02_dev *dev); 27void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
28void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); 28void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
29void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); 29void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp);
30 30
31static inline s8 s6_to_s8(u32 val) 31static inline s8 s6_to_s8(u32 val)
32{ 32{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 1eb1a802ed20..b6166703ad76 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
845void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) 845void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
846{ 846{
847 struct mt76_rate_power *t = &dev->mt76.rate_power; 847 struct mt76_rate_power *t = &dev->mt76.rate_power;
848 u8 info[2]; 848 s8 info;
849 849
850 mt76x0_get_tx_power_per_rate(dev); 850 mt76x0_get_tx_power_per_rate(dev);
851 mt76x0_get_power_info(dev, info); 851 mt76x0_get_power_info(dev, &info);
852 852
853 mt76x02_add_rate_power_offset(t, info[0]); 853 mt76x02_add_rate_power_offset(t, info);
854 mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); 854 mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
855 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); 855 dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
856 mt76x02_add_rate_power_offset(t, -info[0]); 856 mt76x02_add_rate_power_offset(t, -info);
857 857
858 mt76x02_phy_set_txpower(dev, info[0], info[1]); 858 mt76x02_phy_set_txpower(dev, info, info);
859} 859}
860 860
861void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) 861void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
index 528cb0401df1..4956a54151cb 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
@@ -119,9 +119,9 @@ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
119 /* 119 /*
120 * Allocate DMA memory for descriptor and buffer. 120 * Allocate DMA memory for descriptor and buffer.
121 */ 121 */
122 addr = dma_zalloc_coherent(rt2x00dev->dev, 122 addr = dma_alloc_coherent(rt2x00dev->dev,
123 queue->limit * queue->desc_size, &dma, 123 queue->limit * queue->desc_size, &dma,
124 GFP_KERNEL); 124 GFP_KERNEL);
125 if (!addr) 125 if (!addr)
126 return -ENOMEM; 126 return -ENOMEM;
127 127
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index bd10165d7eec..4d4b07701149 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
164 } 164 }
165 165
166 sdio_claim_host(func); 166 sdio_claim_host(func);
167 /*
168 * To guarantee that the SDIO card is power cycled, as required to make
169 * the FW programming to succeed, let's do a brute force HW reset.
170 */
171 mmc_hw_reset(card->host);
172
167 sdio_enable_func(func); 173 sdio_enable_func(func);
168 sdio_release_host(func); 174 sdio_release_host(func);
169 175
@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
174{ 180{
175 struct sdio_func *func = dev_to_sdio_func(glue->dev); 181 struct sdio_func *func = dev_to_sdio_func(glue->dev);
176 struct mmc_card *card = func->card; 182 struct mmc_card *card = func->card;
177 int error;
178 183
179 sdio_claim_host(func); 184 sdio_claim_host(func);
180 sdio_disable_func(func); 185 sdio_disable_func(func);
181 sdio_release_host(func); 186 sdio_release_host(func);
182 187
183 /* Let runtime PM know the card is powered off */ 188 /* Let runtime PM know the card is powered off */
184 error = pm_runtime_put(&card->dev); 189 pm_runtime_put(&card->dev);
185 if (error < 0 && error != -EBUSY) {
186 dev_err(&card->dev, "%s failed: %i\n", __func__, error);
187
188 return error;
189 }
190
191 return 0; 190 return 0;
192} 191}
193 192
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 64b218699656..3a93e4d9828b 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -530,8 +530,10 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
530 SET_NETDEV_DEV(dev, &priv->lowerdev->dev); 530 SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
531 dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL); 531 dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
532 532
533 if (!dev->ieee80211_ptr) 533 if (!dev->ieee80211_ptr) {
534 err = -ENOMEM;
534 goto remove_handler; 535 goto remove_handler;
536 }
535 537
536 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; 538 dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
537 dev->ieee80211_ptr->wiphy = common_wiphy; 539 dev->ieee80211_ptr->wiphy = common_wiphy;
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 5ee5f40b4dfc..f1eaa3c4d46a 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -1339,10 +1339,10 @@ static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1339 int rc; 1339 int rc;
1340 1340
1341 sndev->nr_rsvd_luts++; 1341 sndev->nr_rsvd_luts++;
1342 sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, 1342 sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
1343 LUT_SIZE, 1343 LUT_SIZE,
1344 &sndev->self_shared_dma, 1344 &sndev->self_shared_dma,
1345 GFP_KERNEL); 1345 GFP_KERNEL);
1346 if (!sndev->self_shared) { 1346 if (!sndev->self_shared) {
1347 dev_err(&sndev->stdev->dev, 1347 dev_err(&sndev->stdev->dev,
1348 "unable to allocate memory for shared mw\n"); 1348 "unable to allocate memory for shared mw\n");
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 0cf58cabc9ed..3cf50274fadb 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -26,6 +26,12 @@ static int nvdimm_probe(struct device *dev)
26 struct nvdimm_drvdata *ndd; 26 struct nvdimm_drvdata *ndd;
27 int rc; 27 int rc;
28 28
29 rc = nvdimm_security_setup_events(dev);
30 if (rc < 0) {
31 dev_err(dev, "security event setup failed: %d\n", rc);
32 return rc;
33 }
34
29 rc = nvdimm_check_config_data(dev); 35 rc = nvdimm_check_config_data(dev);
30 if (rc) { 36 if (rc) {
31 /* not required for non-aliased nvdimm, ex. NVDIMM-N */ 37 /* not required for non-aliased nvdimm, ex. NVDIMM-N */
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 4890310df874..efe412a6b5b9 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -578,13 +578,25 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
578} 578}
579EXPORT_SYMBOL_GPL(__nvdimm_create); 579EXPORT_SYMBOL_GPL(__nvdimm_create);
580 580
581int nvdimm_security_setup_events(struct nvdimm *nvdimm) 581static void shutdown_security_notify(void *data)
582{ 582{
583 nvdimm->sec.overwrite_state = sysfs_get_dirent(nvdimm->dev.kobj.sd, 583 struct nvdimm *nvdimm = data;
584 "security"); 584
585 sysfs_put(nvdimm->sec.overwrite_state);
586}
587
588int nvdimm_security_setup_events(struct device *dev)
589{
590 struct nvdimm *nvdimm = to_nvdimm(dev);
591
592 if (nvdimm->sec.state < 0 || !nvdimm->sec.ops
593 || !nvdimm->sec.ops->overwrite)
594 return 0;
595 nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
585 if (!nvdimm->sec.overwrite_state) 596 if (!nvdimm->sec.overwrite_state)
586 return -ENODEV; 597 return -ENOMEM;
587 return 0; 598
599 return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
588} 600}
589EXPORT_SYMBOL_GPL(nvdimm_security_setup_events); 601EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
590 602
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 2b2cf4e554d3..e5ffd5733540 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -54,12 +54,12 @@ struct nvdimm {
54}; 54};
55 55
56static inline enum nvdimm_security_state nvdimm_security_state( 56static inline enum nvdimm_security_state nvdimm_security_state(
57 struct nvdimm *nvdimm, bool master) 57 struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
58{ 58{
59 if (!nvdimm->sec.ops) 59 if (!nvdimm->sec.ops)
60 return -ENXIO; 60 return -ENXIO;
61 61
62 return nvdimm->sec.ops->state(nvdimm, master); 62 return nvdimm->sec.ops->state(nvdimm, ptype);
63} 63}
64int nvdimm_security_freeze(struct nvdimm *nvdimm); 64int nvdimm_security_freeze(struct nvdimm *nvdimm);
65#if IS_ENABLED(CONFIG_NVDIMM_KEYS) 65#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index cfde992684e7..379bf4305e61 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -250,6 +250,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
250void nvdimm_set_aliasing(struct device *dev); 250void nvdimm_set_aliasing(struct device *dev);
251void nvdimm_set_locked(struct device *dev); 251void nvdimm_set_locked(struct device *dev);
252void nvdimm_clear_locked(struct device *dev); 252void nvdimm_clear_locked(struct device *dev);
253int nvdimm_security_setup_events(struct device *dev);
253#if IS_ENABLED(CONFIG_NVDIMM_KEYS) 254#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
254int nvdimm_security_unlock(struct device *dev); 255int nvdimm_security_unlock(struct device *dev);
255#else 256#else
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 08f2c92602f4..6a9dd68c0f4f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1253,6 +1253,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1253 * effects say only one namespace is affected. 1253 * effects say only one namespace is affected.
1254 */ 1254 */
1255 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { 1255 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1256 mutex_lock(&ctrl->scan_lock);
1256 nvme_start_freeze(ctrl); 1257 nvme_start_freeze(ctrl);
1257 nvme_wait_freeze(ctrl); 1258 nvme_wait_freeze(ctrl);
1258 } 1259 }
@@ -1281,8 +1282,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
1281 */ 1282 */
1282 if (effects & NVME_CMD_EFFECTS_LBCC) 1283 if (effects & NVME_CMD_EFFECTS_LBCC)
1283 nvme_update_formats(ctrl); 1284 nvme_update_formats(ctrl);
1284 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) 1285 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
1285 nvme_unfreeze(ctrl); 1286 nvme_unfreeze(ctrl);
1287 mutex_unlock(&ctrl->scan_lock);
1288 }
1286 if (effects & NVME_CMD_EFFECTS_CCC) 1289 if (effects & NVME_CMD_EFFECTS_CCC)
1287 nvme_init_identify(ctrl); 1290 nvme_init_identify(ctrl);
1288 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) 1291 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -2173,18 +2176,20 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
2173 size_t nqnlen; 2176 size_t nqnlen;
2174 int off; 2177 int off;
2175 2178
2176 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); 2179 if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) {
2177 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { 2180 nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
2178 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); 2181 if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
2179 return; 2182 strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
2180 } 2183 return;
2184 }
2181 2185
2182 if (ctrl->vs >= NVME_VS(1, 2, 1)) 2186 if (ctrl->vs >= NVME_VS(1, 2, 1))
2183 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); 2187 dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n");
2188 }
2184 2189
2185 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ 2190 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
2186 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, 2191 off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
2187 "nqn.2014.08.org.nvmexpress:%4x%4x", 2192 "nqn.2014.08.org.nvmexpress:%04x%04x",
2188 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); 2193 le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
2189 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); 2194 memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
2190 off += sizeof(id->sn); 2195 off += sizeof(id->sn);
@@ -2500,7 +2505,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
2500 ctrl->oaes = le32_to_cpu(id->oaes); 2505 ctrl->oaes = le32_to_cpu(id->oaes);
2501 atomic_set(&ctrl->abort_limit, id->acl + 1); 2506 atomic_set(&ctrl->abort_limit, id->acl + 1);
2502 ctrl->vwc = id->vwc; 2507 ctrl->vwc = id->vwc;
2503 ctrl->cntlid = le16_to_cpup(&id->cntlid);
2504 if (id->mdts) 2508 if (id->mdts)
2505 max_hw_sectors = 1 << (id->mdts + page_shift - 9); 2509 max_hw_sectors = 1 << (id->mdts + page_shift - 9);
2506 else 2510 else
@@ -3400,6 +3404,7 @@ static void nvme_scan_work(struct work_struct *work)
3400 if (nvme_identify_ctrl(ctrl, &id)) 3404 if (nvme_identify_ctrl(ctrl, &id))
3401 return; 3405 return;
3402 3406
3407 mutex_lock(&ctrl->scan_lock);
3403 nn = le32_to_cpu(id->nn); 3408 nn = le32_to_cpu(id->nn);
3404 if (ctrl->vs >= NVME_VS(1, 1, 0) && 3409 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
3405 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { 3410 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3408,6 +3413,7 @@ static void nvme_scan_work(struct work_struct *work)
3408 } 3413 }
3409 nvme_scan_ns_sequential(ctrl, nn); 3414 nvme_scan_ns_sequential(ctrl, nn);
3410out_free_id: 3415out_free_id:
3416 mutex_unlock(&ctrl->scan_lock);
3411 kfree(id); 3417 kfree(id);
3412 down_write(&ctrl->namespaces_rwsem); 3418 down_write(&ctrl->namespaces_rwsem);
3413 list_sort(NULL, &ctrl->namespaces, ns_cmp); 3419 list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3651,6 +3657,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
3651 3657
3652 ctrl->state = NVME_CTRL_NEW; 3658 ctrl->state = NVME_CTRL_NEW;
3653 spin_lock_init(&ctrl->lock); 3659 spin_lock_init(&ctrl->lock);
3660 mutex_init(&ctrl->scan_lock);
3654 INIT_LIST_HEAD(&ctrl->namespaces); 3661 INIT_LIST_HEAD(&ctrl->namespaces);
3655 init_rwsem(&ctrl->namespaces_rwsem); 3662 init_rwsem(&ctrl->namespaces_rwsem);
3656 ctrl->dev = dev; 3663 ctrl->dev = dev;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index b2ab213f43de..3eb908c50e1a 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -874,6 +874,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
874 if (opts->discovery_nqn) { 874 if (opts->discovery_nqn) {
875 opts->kato = 0; 875 opts->kato = 0;
876 opts->nr_io_queues = 0; 876 opts->nr_io_queues = 0;
877 opts->nr_write_queues = 0;
878 opts->nr_poll_queues = 0;
877 opts->duplicate_connect = true; 879 opts->duplicate_connect = true;
878 } 880 }
879 if (ctrl_loss_tmo < 0) 881 if (ctrl_loss_tmo < 0)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 183ec17ba067..b9fff3b8ed1b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
545 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); 545 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
546 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + 546 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
547 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); 547 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
548 if (!(ctrl->anacap & (1 << 6))) 548 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
549 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
550 549
551 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { 550 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
552 dev_err(ctrl->device, 551 dev_err(ctrl->device,
@@ -570,6 +569,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
570 return 0; 569 return 0;
571out_free_ana_log_buf: 570out_free_ana_log_buf:
572 kfree(ctrl->ana_log_buf); 571 kfree(ctrl->ana_log_buf);
572 ctrl->ana_log_buf = NULL;
573out: 573out:
574 return error; 574 return error;
575} 575}
@@ -577,5 +577,6 @@ out:
577void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 577void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
578{ 578{
579 kfree(ctrl->ana_log_buf); 579 kfree(ctrl->ana_log_buf);
580 ctrl->ana_log_buf = NULL;
580} 581}
581 582
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 2b36ac922596..c4a1bb41abf0 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -90,6 +90,11 @@ enum nvme_quirks {
90 * Set MEDIUM priority on SQ creation 90 * Set MEDIUM priority on SQ creation
91 */ 91 */
92 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 92 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
93
94 /*
95 * Ignore device provided subnqn.
96 */
97 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
93}; 98};
94 99
95/* 100/*
@@ -149,6 +154,7 @@ struct nvme_ctrl {
149 enum nvme_ctrl_state state; 154 enum nvme_ctrl_state state;
150 bool identified; 155 bool identified;
151 spinlock_t lock; 156 spinlock_t lock;
157 struct mutex scan_lock;
152 const struct nvme_ctrl_ops *ops; 158 const struct nvme_ctrl_ops *ops;
153 struct request_queue *admin_q; 159 struct request_queue *admin_q;
154 struct request_queue *connect_q; 160 struct request_queue *connect_q;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5a0bf6a24d50..7fee665ec45e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -95,6 +95,7 @@ struct nvme_dev;
95struct nvme_queue; 95struct nvme_queue;
96 96
97static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 97static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
98static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode);
98 99
99/* 100/*
100 * Represents an NVM Express device. Each nvme_dev is a PCI function. 101 * Represents an NVM Express device. Each nvme_dev is a PCI function.
@@ -1019,9 +1020,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
1019 1020
1020static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 1021static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1021{ 1022{
1022 if (++nvmeq->cq_head == nvmeq->q_depth) { 1023 if (nvmeq->cq_head == nvmeq->q_depth - 1) {
1023 nvmeq->cq_head = 0; 1024 nvmeq->cq_head = 0;
1024 nvmeq->cq_phase = !nvmeq->cq_phase; 1025 nvmeq->cq_phase = !nvmeq->cq_phase;
1026 } else {
1027 nvmeq->cq_head++;
1025 } 1028 }
1026} 1029}
1027 1030
@@ -1420,6 +1423,14 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1420 return 0; 1423 return 0;
1421} 1424}
1422 1425
1426static void nvme_suspend_io_queues(struct nvme_dev *dev)
1427{
1428 int i;
1429
1430 for (i = dev->ctrl.queue_count - 1; i > 0; i--)
1431 nvme_suspend_queue(&dev->queues[i]);
1432}
1433
1423static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 1434static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1424{ 1435{
1425 struct nvme_queue *nvmeq = &dev->queues[0]; 1436 struct nvme_queue *nvmeq = &dev->queues[0];
@@ -1485,8 +1496,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
1485 if (dev->ctrl.queue_count > qid) 1496 if (dev->ctrl.queue_count > qid)
1486 return 0; 1497 return 0;
1487 1498
1488 nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth), 1499 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth),
1489 &nvmeq->cq_dma_addr, GFP_KERNEL); 1500 &nvmeq->cq_dma_addr, GFP_KERNEL);
1490 if (!nvmeq->cqes) 1501 if (!nvmeq->cqes)
1491 goto free_nvmeq; 1502 goto free_nvmeq;
1492 1503
@@ -1885,8 +1896,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
1885 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 1896 struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
1886 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size; 1897 size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
1887 1898
1888 dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i], 1899 dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
1889 le64_to_cpu(desc->addr)); 1900 le64_to_cpu(desc->addr),
1901 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1890 } 1902 }
1891 1903
1892 kfree(dev->host_mem_desc_bufs); 1904 kfree(dev->host_mem_desc_bufs);
@@ -1915,8 +1927,8 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
1915 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1927 if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries)
1916 max_entries = dev->ctrl.hmmaxd; 1928 max_entries = dev->ctrl.hmmaxd;
1917 1929
1918 descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs), 1930 descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs),
1919 &descs_dma, GFP_KERNEL); 1931 &descs_dma, GFP_KERNEL);
1920 if (!descs) 1932 if (!descs)
1921 goto out; 1933 goto out;
1922 1934
@@ -1952,8 +1964,9 @@ out_free_bufs:
1952 while (--i >= 0) { 1964 while (--i >= 0) {
1953 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size; 1965 size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
1954 1966
1955 dma_free_coherent(dev->dev, size, bufs[i], 1967 dma_free_attrs(dev->dev, size, bufs[i],
1956 le64_to_cpu(descs[i].addr)); 1968 le64_to_cpu(descs[i].addr),
1969 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1957 } 1970 }
1958 1971
1959 kfree(bufs); 1972 kfree(bufs);
@@ -2028,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
2028 return ret; 2041 return ret;
2029} 2042}
2030 2043
2044/* irq_queues covers admin queue */
2031static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) 2045static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
2032{ 2046{
2033 unsigned int this_w_queues = write_queues; 2047 unsigned int this_w_queues = write_queues;
2034 2048
2049 WARN_ON(!irq_queues);
2050
2035 /* 2051 /*
2036 * Setup read/write queue split 2052 * Setup read/write queue split, assign admin queue one independent
2053 * irq vector if irq_queues is > 1.
2037 */ 2054 */
2038 if (irq_queues == 1) { 2055 if (irq_queues <= 2) {
2039 dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2056 dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
2040 dev->io_queues[HCTX_TYPE_READ] = 0; 2057 dev->io_queues[HCTX_TYPE_READ] = 0;
2041 return; 2058 return;
@@ -2043,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
2043 2060
2044 /* 2061 /*
2045 * If 'write_queues' is set, ensure it leaves room for at least 2062 * If 'write_queues' is set, ensure it leaves room for at least
2046 * one read queue 2063 * one read queue and one admin queue
2047 */ 2064 */
2048 if (this_w_queues >= irq_queues) 2065 if (this_w_queues >= irq_queues)
2049 this_w_queues = irq_queues - 1; 2066 this_w_queues = irq_queues - 2;
2050 2067
2051 /* 2068 /*
2052 * If 'write_queues' is set to zero, reads and writes will share 2069 * If 'write_queues' is set to zero, reads and writes will share
2053 * a queue set. 2070 * a queue set.
2054 */ 2071 */
2055 if (!this_w_queues) { 2072 if (!this_w_queues) {
2056 dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; 2073 dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1;
2057 dev->io_queues[HCTX_TYPE_READ] = 0; 2074 dev->io_queues[HCTX_TYPE_READ] = 0;
2058 } else { 2075 } else {
2059 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; 2076 dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
2060 dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; 2077 dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1;
2061 } 2078 }
2062} 2079}
2063 2080
@@ -2082,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2082 this_p_queues = nr_io_queues - 1; 2099 this_p_queues = nr_io_queues - 1;
2083 irq_queues = 1; 2100 irq_queues = 1;
2084 } else { 2101 } else {
2085 irq_queues = nr_io_queues - this_p_queues; 2102 irq_queues = nr_io_queues - this_p_queues + 1;
2086 } 2103 }
2087 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; 2104 dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
2088 2105
@@ -2102,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2102 * If we got a failure and we're down to asking for just 2119 * If we got a failure and we're down to asking for just
2103 * 1 + 1 queues, just ask for a single vector. We'll share 2120 * 1 + 1 queues, just ask for a single vector. We'll share
2104 * that between the single IO queue and the admin queue. 2121 * that between the single IO queue and the admin queue.
2122 * Otherwise, we assign one independent vector to admin queue.
2105 */ 2123 */
2106 if (result >= 0 && irq_queues > 1) 2124 if (irq_queues > 1)
2107 irq_queues = irq_sets[0] + irq_sets[1] + 1; 2125 irq_queues = irq_sets[0] + irq_sets[1] + 1;
2108 2126
2109 result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, 2127 result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
@@ -2132,6 +2150,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
2132 return result; 2150 return result;
2133} 2151}
2134 2152
2153static void nvme_disable_io_queues(struct nvme_dev *dev)
2154{
2155 if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq))
2156 __nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2157}
2158
2135static int nvme_setup_io_queues(struct nvme_dev *dev) 2159static int nvme_setup_io_queues(struct nvme_dev *dev)
2136{ 2160{
2137 struct nvme_queue *adminq = &dev->queues[0]; 2161 struct nvme_queue *adminq = &dev->queues[0];
@@ -2168,6 +2192,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2168 } while (1); 2192 } while (1);
2169 adminq->q_db = dev->dbs; 2193 adminq->q_db = dev->dbs;
2170 2194
2195 retry:
2171 /* Deregister the admin queue's interrupt */ 2196 /* Deregister the admin queue's interrupt */
2172 pci_free_irq(pdev, 0, adminq); 2197 pci_free_irq(pdev, 0, adminq);
2173 2198
@@ -2185,25 +2210,34 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
2185 result = max(result - 1, 1); 2210 result = max(result - 1, 1);
2186 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 2211 dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
2187 2212
2188 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2189 dev->io_queues[HCTX_TYPE_DEFAULT],
2190 dev->io_queues[HCTX_TYPE_READ],
2191 dev->io_queues[HCTX_TYPE_POLL]);
2192
2193 /* 2213 /*
2194 * Should investigate if there's a performance win from allocating 2214 * Should investigate if there's a performance win from allocating
2195 * more queues than interrupt vectors; it might allow the submission 2215 * more queues than interrupt vectors; it might allow the submission
2196 * path to scale better, even if the receive path is limited by the 2216 * path to scale better, even if the receive path is limited by the
2197 * number of interrupts. 2217 * number of interrupts.
2198 */ 2218 */
2199
2200 result = queue_request_irq(adminq); 2219 result = queue_request_irq(adminq);
2201 if (result) { 2220 if (result) {
2202 adminq->cq_vector = -1; 2221 adminq->cq_vector = -1;
2203 return result; 2222 return result;
2204 } 2223 }
2205 set_bit(NVMEQ_ENABLED, &adminq->flags); 2224 set_bit(NVMEQ_ENABLED, &adminq->flags);
2206 return nvme_create_io_queues(dev); 2225
2226 result = nvme_create_io_queues(dev);
2227 if (result || dev->online_queues < 2)
2228 return result;
2229
2230 if (dev->online_queues - 1 < dev->max_qid) {
2231 nr_io_queues = dev->online_queues - 1;
2232 nvme_disable_io_queues(dev);
2233 nvme_suspend_io_queues(dev);
2234 goto retry;
2235 }
2236 dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
2237 dev->io_queues[HCTX_TYPE_DEFAULT],
2238 dev->io_queues[HCTX_TYPE_READ],
2239 dev->io_queues[HCTX_TYPE_POLL]);
2240 return 0;
2207} 2241}
2208 2242
2209static void nvme_del_queue_end(struct request *req, blk_status_t error) 2243static void nvme_del_queue_end(struct request *req, blk_status_t error)
@@ -2248,7 +2282,7 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2248 return 0; 2282 return 0;
2249} 2283}
2250 2284
2251static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2285static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
2252{ 2286{
2253 int nr_queues = dev->online_queues - 1, sent = 0; 2287 int nr_queues = dev->online_queues - 1, sent = 0;
2254 unsigned long timeout; 2288 unsigned long timeout;
@@ -2294,7 +2328,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
2294 dev->tagset.nr_maps = 2; /* default + read */ 2328 dev->tagset.nr_maps = 2; /* default + read */
2295 if (dev->io_queues[HCTX_TYPE_POLL]) 2329 if (dev->io_queues[HCTX_TYPE_POLL])
2296 dev->tagset.nr_maps++; 2330 dev->tagset.nr_maps++;
2297 dev->tagset.nr_maps = HCTX_MAX_TYPES;
2298 dev->tagset.timeout = NVME_IO_TIMEOUT; 2331 dev->tagset.timeout = NVME_IO_TIMEOUT;
2299 dev->tagset.numa_node = dev_to_node(dev->dev); 2332 dev->tagset.numa_node = dev_to_node(dev->dev);
2300 dev->tagset.queue_depth = 2333 dev->tagset.queue_depth =
@@ -2410,7 +2443,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
2410 2443
2411static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 2444static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2412{ 2445{
2413 int i;
2414 bool dead = true; 2446 bool dead = true;
2415 struct pci_dev *pdev = to_pci_dev(dev->dev); 2447 struct pci_dev *pdev = to_pci_dev(dev->dev);
2416 2448
@@ -2437,13 +2469,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2437 nvme_stop_queues(&dev->ctrl); 2469 nvme_stop_queues(&dev->ctrl);
2438 2470
2439 if (!dead && dev->ctrl.queue_count > 0) { 2471 if (!dead && dev->ctrl.queue_count > 0) {
2440 if (nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 2472 nvme_disable_io_queues(dev);
2441 nvme_disable_io_queues(dev, nvme_admin_delete_cq);
2442 nvme_disable_admin_queue(dev, shutdown); 2473 nvme_disable_admin_queue(dev, shutdown);
2443 } 2474 }
2444 for (i = dev->ctrl.queue_count - 1; i >= 0; i--) 2475 nvme_suspend_io_queues(dev);
2445 nvme_suspend_queue(&dev->queues[i]); 2476 nvme_suspend_queue(&dev->queues[0]);
2446
2447 nvme_pci_disable(dev); 2477 nvme_pci_disable(dev);
2448 2478
2449 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2479 blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
@@ -2527,27 +2557,18 @@ static void nvme_reset_work(struct work_struct *work)
2527 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2557 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2528 nvme_dev_disable(dev, false); 2558 nvme_dev_disable(dev, false);
2529 2559
2530 /* 2560 mutex_lock(&dev->shutdown_lock);
2531 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2532 * initializing procedure here.
2533 */
2534 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2535 dev_warn(dev->ctrl.device,
2536 "failed to mark controller CONNECTING\n");
2537 goto out;
2538 }
2539
2540 result = nvme_pci_enable(dev); 2561 result = nvme_pci_enable(dev);
2541 if (result) 2562 if (result)
2542 goto out; 2563 goto out_unlock;
2543 2564
2544 result = nvme_pci_configure_admin_queue(dev); 2565 result = nvme_pci_configure_admin_queue(dev);
2545 if (result) 2566 if (result)
2546 goto out; 2567 goto out_unlock;
2547 2568
2548 result = nvme_alloc_admin_tags(dev); 2569 result = nvme_alloc_admin_tags(dev);
2549 if (result) 2570 if (result)
2550 goto out; 2571 goto out_unlock;
2551 2572
2552 /* 2573 /*
2553 * Limit the max command size to prevent iod->sg allocations going 2574 * Limit the max command size to prevent iod->sg allocations going
@@ -2555,6 +2576,17 @@ static void nvme_reset_work(struct work_struct *work)
2555 */ 2576 */
2556 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; 2577 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
2557 dev->ctrl.max_segments = NVME_MAX_SEGS; 2578 dev->ctrl.max_segments = NVME_MAX_SEGS;
2579 mutex_unlock(&dev->shutdown_lock);
2580
2581 /*
2582 * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2583 * initializing procedure here.
2584 */
2585 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2586 dev_warn(dev->ctrl.device,
2587 "failed to mark controller CONNECTING\n");
2588 goto out;
2589 }
2558 2590
2559 result = nvme_init_identify(&dev->ctrl); 2591 result = nvme_init_identify(&dev->ctrl);
2560 if (result) 2592 if (result)
@@ -2619,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work)
2619 nvme_start_ctrl(&dev->ctrl); 2651 nvme_start_ctrl(&dev->ctrl);
2620 return; 2652 return;
2621 2653
2654 out_unlock:
2655 mutex_unlock(&dev->shutdown_lock);
2622 out: 2656 out:
2623 nvme_remove_dead_ctrl(dev, result); 2657 nvme_remove_dead_ctrl(dev, result);
2624} 2658}
@@ -2946,6 +2980,8 @@ static const struct pci_device_id nvme_id_table[] = {
2946 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 2980 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
2947 .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 2981 .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
2948 NVME_QUIRK_MEDIUM_PRIO_SQ }, 2982 NVME_QUIRK_MEDIUM_PRIO_SQ },
2983 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
2984 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
2949 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2985 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2950 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2986 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
2951 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 2987 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 0a2fd2949ad7..52abc3a6de12 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -119,6 +119,7 @@ struct nvme_rdma_ctrl {
119 119
120 struct nvme_ctrl ctrl; 120 struct nvme_ctrl ctrl;
121 bool use_inline_data; 121 bool use_inline_data;
122 u32 io_queues[HCTX_MAX_TYPES];
122}; 123};
123 124
124static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) 125static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
@@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
165static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) 166static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
166{ 167{
167 return nvme_rdma_queue_idx(queue) > 168 return nvme_rdma_queue_idx(queue) >
168 queue->ctrl->ctrl.opts->nr_io_queues + 169 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
169 queue->ctrl->ctrl.opts->nr_write_queues; 170 queue->ctrl->io_queues[HCTX_TYPE_READ];
170} 171}
171 172
172static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) 173static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
@@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
661 nr_io_queues = min_t(unsigned int, nr_io_queues, 662 nr_io_queues = min_t(unsigned int, nr_io_queues,
662 ibdev->num_comp_vectors); 663 ibdev->num_comp_vectors);
663 664
664 nr_io_queues += min(opts->nr_write_queues, num_online_cpus()); 665 if (opts->nr_write_queues) {
665 nr_io_queues += min(opts->nr_poll_queues, num_online_cpus()); 666 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
667 min(opts->nr_write_queues, nr_io_queues);
668 nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
669 } else {
670 ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
671 }
672
673 ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
674
675 if (opts->nr_poll_queues) {
676 ctrl->io_queues[HCTX_TYPE_POLL] =
677 min(opts->nr_poll_queues, num_online_cpus());
678 nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
679 }
666 680
667 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); 681 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
668 if (ret) 682 if (ret)
@@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return
1689nvme_rdma_timeout(struct request *rq, bool reserved) 1703nvme_rdma_timeout(struct request *rq, bool reserved)
1690{ 1704{
1691 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); 1705 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1706 struct nvme_rdma_queue *queue = req->queue;
1707 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1692 1708
1693 dev_warn(req->queue->ctrl->ctrl.device, 1709 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
1694 "I/O %d QID %d timeout, reset controller\n", 1710 rq->tag, nvme_rdma_queue_idx(queue));
1695 rq->tag, nvme_rdma_queue_idx(req->queue));
1696 1711
1697 /* queue error recovery */ 1712 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1698 nvme_rdma_error_recovery(req->queue->ctrl); 1713 /*
1714 * Teardown immediately if controller times out while starting
1715 * or we are already started error recovery. all outstanding
1716 * requests are completed on shutdown, so we return BLK_EH_DONE.
1717 */
1718 flush_work(&ctrl->err_work);
1719 nvme_rdma_teardown_io_queues(ctrl, false);
1720 nvme_rdma_teardown_admin_queue(ctrl, false);
1721 return BLK_EH_DONE;
1722 }
1699 1723
1700 /* fail with DNR on cmd timeout */ 1724 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1701 nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; 1725 nvme_rdma_error_recovery(ctrl);
1702 1726
1703 return BLK_EH_DONE; 1727 return BLK_EH_RESET_TIMER;
1704} 1728}
1705 1729
1706static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1730static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1779 struct nvme_rdma_ctrl *ctrl = set->driver_data; 1803 struct nvme_rdma_ctrl *ctrl = set->driver_data;
1780 1804
1781 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 1805 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1782 set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues; 1806 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1807 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1808 set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
1783 if (ctrl->ctrl.opts->nr_write_queues) { 1809 if (ctrl->ctrl.opts->nr_write_queues) {
1784 /* separate read/write queues */ 1810 /* separate read/write queues */
1785 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1786 ctrl->ctrl.opts->nr_write_queues;
1787 set->map[HCTX_TYPE_READ].queue_offset = 1811 set->map[HCTX_TYPE_READ].queue_offset =
1788 ctrl->ctrl.opts->nr_write_queues; 1812 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1789 } else { 1813 } else {
1790 /* mixed read/write queues */ 1814 /* mixed read/write queues */
1791 set->map[HCTX_TYPE_DEFAULT].nr_queues =
1792 ctrl->ctrl.opts->nr_io_queues;
1793 set->map[HCTX_TYPE_READ].queue_offset = 0; 1815 set->map[HCTX_TYPE_READ].queue_offset = 0;
1794 } 1816 }
1795 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], 1817 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
1799 1821
1800 if (ctrl->ctrl.opts->nr_poll_queues) { 1822 if (ctrl->ctrl.opts->nr_poll_queues) {
1801 set->map[HCTX_TYPE_POLL].nr_queues = 1823 set->map[HCTX_TYPE_POLL].nr_queues =
1802 ctrl->ctrl.opts->nr_poll_queues; 1824 ctrl->io_queues[HCTX_TYPE_POLL];
1803 set->map[HCTX_TYPE_POLL].queue_offset = 1825 set->map[HCTX_TYPE_POLL].queue_offset =
1804 ctrl->ctrl.opts->nr_io_queues; 1826 ctrl->io_queues[HCTX_TYPE_DEFAULT];
1805 if (ctrl->ctrl.opts->nr_write_queues) 1827 if (ctrl->ctrl.opts->nr_write_queues)
1806 set->map[HCTX_TYPE_POLL].queue_offset += 1828 set->map[HCTX_TYPE_POLL].queue_offset +=
1807 ctrl->ctrl.opts->nr_write_queues; 1829 ctrl->io_queues[HCTX_TYPE_READ];
1808 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 1830 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1809 } 1831 }
1810 return 0; 1832 return 0;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index de174912445e..5f0a00425242 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1565,8 +1565,7 @@ static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1565{ 1565{
1566 nvme_tcp_stop_io_queues(ctrl); 1566 nvme_tcp_stop_io_queues(ctrl);
1567 if (remove) { 1567 if (remove) {
1568 if (ctrl->ops->flags & NVME_F_FABRICS) 1568 blk_cleanup_queue(ctrl->connect_q);
1569 blk_cleanup_queue(ctrl->connect_q);
1570 blk_mq_free_tag_set(ctrl->tagset); 1569 blk_mq_free_tag_set(ctrl->tagset);
1571 } 1570 }
1572 nvme_tcp_free_io_queues(ctrl); 1571 nvme_tcp_free_io_queues(ctrl);
@@ -1587,12 +1586,10 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1587 goto out_free_io_queues; 1586 goto out_free_io_queues;
1588 } 1587 }
1589 1588
1590 if (ctrl->ops->flags & NVME_F_FABRICS) { 1589 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1591 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); 1590 if (IS_ERR(ctrl->connect_q)) {
1592 if (IS_ERR(ctrl->connect_q)) { 1591 ret = PTR_ERR(ctrl->connect_q);
1593 ret = PTR_ERR(ctrl->connect_q); 1592 goto out_free_tag_set;
1594 goto out_free_tag_set;
1595 }
1596 } 1593 }
1597 } else { 1594 } else {
1598 blk_mq_update_nr_hw_queues(ctrl->tagset, 1595 blk_mq_update_nr_hw_queues(ctrl->tagset,
@@ -1606,7 +1603,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1606 return 0; 1603 return 0;
1607 1604
1608out_cleanup_connect_q: 1605out_cleanup_connect_q:
1609 if (new && (ctrl->ops->flags & NVME_F_FABRICS)) 1606 if (new)
1610 blk_cleanup_queue(ctrl->connect_q); 1607 blk_cleanup_queue(ctrl->connect_q);
1611out_free_tag_set: 1608out_free_tag_set:
1612 if (new) 1609 if (new)
@@ -1620,7 +1617,6 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1620{ 1617{
1621 nvme_tcp_stop_queue(ctrl, 0); 1618 nvme_tcp_stop_queue(ctrl, 0);
1622 if (remove) { 1619 if (remove) {
1623 free_opal_dev(ctrl->opal_dev);
1624 blk_cleanup_queue(ctrl->admin_q); 1620 blk_cleanup_queue(ctrl->admin_q);
1625 blk_mq_free_tag_set(ctrl->admin_tagset); 1621 blk_mq_free_tag_set(ctrl->admin_tagset);
1626 } 1622 }
@@ -1952,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
1952 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; 1948 struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
1953 struct nvme_tcp_cmd_pdu *pdu = req->pdu; 1949 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1954 1950
1955 dev_dbg(ctrl->ctrl.device, 1951 dev_warn(ctrl->ctrl.device,
1956 "queue %d: timeout request %#x type %d\n", 1952 "queue %d: timeout request %#x type %d\n",
1957 nvme_tcp_queue_id(req->queue), rq->tag, 1953 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
1958 pdu->hdr.type);
1959 1954
1960 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { 1955 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1961 union nvme_result res = {}; 1956 /*
1962 1957 * Teardown immediately if controller times out while starting
1963 nvme_req(rq)->flags |= NVME_REQ_CANCELLED; 1958 * or we are already started error recovery. all outstanding
1964 nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res); 1959 * requests are completed on shutdown, so we return BLK_EH_DONE.
1960 */
1961 flush_work(&ctrl->err_work);
1962 nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
1963 nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
1965 return BLK_EH_DONE; 1964 return BLK_EH_DONE;
1966 } 1965 }
1967 1966
1968 /* queue error recovery */ 1967 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1969 nvme_tcp_error_recovery(&ctrl->ctrl); 1968 nvme_tcp_error_recovery(&ctrl->ctrl);
1970 1969
1971 return BLK_EH_RESET_TIMER; 1970 return BLK_EH_RESET_TIMER;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index a8d23eb80192..a884e3a0e8af 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); 139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); 140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); 141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
142static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
143 struct nvmet_rdma_rsp *r);
144static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
145 struct nvmet_rdma_rsp *r);
142 146
143static const struct nvmet_fabrics_ops nvmet_rdma_ops; 147static const struct nvmet_fabrics_ops nvmet_rdma_ops;
144 148
@@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
182 spin_unlock_irqrestore(&queue->rsps_lock, flags); 186 spin_unlock_irqrestore(&queue->rsps_lock, flags);
183 187
184 if (unlikely(!rsp)) { 188 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); 189 int ret;
190
191 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp)) 192 if (unlikely(!rsp))
187 return NULL; 193 return NULL;
194 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
195 if (unlikely(ret)) {
196 kfree(rsp);
197 return NULL;
198 }
199
188 rsp->allocated = true; 200 rsp->allocated = true;
189 } 201 }
190 202
@@ -197,6 +209,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
197 unsigned long flags; 209 unsigned long flags;
198 210
199 if (unlikely(rsp->allocated)) { 211 if (unlikely(rsp->allocated)) {
212 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
200 kfree(rsp); 213 kfree(rsp);
201 return; 214 return;
202 } 215 }
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 44b37b202e39..ad0df786fe93 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1089,7 +1089,7 @@ out:
1089 1089
1090static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1090static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1091{ 1091{
1092 int result; 1092 int result = 0;
1093 1093
1094 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1094 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1095 return 0; 1095 return 0;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index a09c1c3cf831..49b16f76d78e 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -207,11 +207,8 @@ static void __of_attach_node(struct device_node *np)
207 207
208 if (!of_node_check_flag(np, OF_OVERLAY)) { 208 if (!of_node_check_flag(np, OF_OVERLAY)) {
209 np->name = __of_get_property(np, "name", NULL); 209 np->name = __of_get_property(np, "name", NULL);
210 np->type = __of_get_property(np, "device_type", NULL);
211 if (!np->name) 210 if (!np->name)
212 np->name = "<NULL>"; 211 np->name = "<NULL>";
213 if (!np->type)
214 np->type = "<NULL>";
215 212
216 phandle = __of_get_property(np, "phandle", &sz); 213 phandle = __of_get_property(np, "phandle", &sz);
217 if (!phandle) 214 if (!phandle)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 7099c652c6a5..9cc1461aac7d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,12 +314,8 @@ static bool populate_node(const void *blob,
314 populate_properties(blob, offset, mem, np, pathp, dryrun); 314 populate_properties(blob, offset, mem, np, pathp, dryrun);
315 if (!dryrun) { 315 if (!dryrun) {
316 np->name = of_get_property(np, "name", NULL); 316 np->name = of_get_property(np, "name", NULL);
317 np->type = of_get_property(np, "device_type", NULL);
318
319 if (!np->name) 317 if (!np->name)
320 np->name = "<NULL>"; 318 np->name = "<NULL>";
321 if (!np->type)
322 np->type = "<NULL>";
323 } 319 }
324 320
325 *pnp = np; 321 *pnp = np;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 2b5ac43a5690..c423e94baf0f 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -423,12 +423,9 @@ static int add_changeset_node(struct overlay_changeset *ovcs,
423 423
424 tchild->parent = target->np; 424 tchild->parent = target->np;
425 tchild->name = __of_get_property(node, "name", NULL); 425 tchild->name = __of_get_property(node, "name", NULL);
426 tchild->type = __of_get_property(node, "device_type", NULL);
427 426
428 if (!tchild->name) 427 if (!tchild->name)
429 tchild->name = "<NULL>"; 428 tchild->name = "<NULL>";
430 if (!tchild->type)
431 tchild->type = "<NULL>";
432 429
433 /* ignore obsolete "linux,phandle" */ 430 /* ignore obsolete "linux,phandle" */
434 phandle = __of_get_property(node, "phandle", &size); 431 phandle = __of_get_property(node, "phandle", &size);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index d3185063d369..7eda43c66c91 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -155,7 +155,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
155 dp->parent = parent; 155 dp->parent = parent;
156 156
157 dp->name = of_pdt_get_one_property(node, "name"); 157 dp->name = of_pdt_get_one_property(node, "name");
158 dp->type = of_pdt_get_one_property(node, "device_type");
159 dp->phandle = node; 158 dp->phandle = node;
160 159
161 dp->properties = of_pdt_build_prop_list(node); 160 dp->properties = of_pdt_build_prop_list(node);
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 08430031bd28..8631efa1daa1 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
806 806
807 if (!of_device_is_available(remote)) { 807 if (!of_device_is_available(remote)) {
808 pr_debug("not available for remote node\n"); 808 pr_debug("not available for remote node\n");
809 of_node_put(remote);
809 return NULL; 810 return NULL;
810 } 811 }
811 812
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e5507add8f04..4e00301060cf 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -533,9 +533,8 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg,
533 return ret; 533 return ret;
534} 534}
535 535
536static inline int 536static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
537_generic_set_opp_clk_only(struct device *dev, struct clk *clk, 537 unsigned long freq)
538 unsigned long old_freq, unsigned long freq)
539{ 538{
540 int ret; 539 int ret;
541 540
@@ -572,7 +571,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
572 } 571 }
573 572
574 /* Change frequency */ 573 /* Change frequency */
575 ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq); 574 ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq);
576 if (ret) 575 if (ret)
577 goto restore_voltage; 576 goto restore_voltage;
578 577
@@ -586,7 +585,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
586 return 0; 585 return 0;
587 586
588restore_freq: 587restore_freq:
589 if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq)) 588 if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq))
590 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", 589 dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
591 __func__, old_freq); 590 __func__, old_freq);
592restore_voltage: 591restore_voltage:
@@ -759,7 +758,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
759 opp->supplies); 758 opp->supplies);
760 } else { 759 } else {
761 /* Only frequency scaling */ 760 /* Only frequency scaling */
762 ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); 761 ret = _generic_set_opp_clk_only(dev, clk, freq);
763 } 762 }
764 763
765 /* Scaling down? Configure required OPPs after frequency */ 764 /* Scaling down? Configure required OPPs after frequency */
@@ -988,11 +987,9 @@ void _opp_free(struct dev_pm_opp *opp)
988 kfree(opp); 987 kfree(opp);
989} 988}
990 989
991static void _opp_kref_release(struct kref *kref) 990static void _opp_kref_release(struct dev_pm_opp *opp,
991 struct opp_table *opp_table)
992{ 992{
993 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
994 struct opp_table *opp_table = opp->opp_table;
995
996 /* 993 /*
997 * Notify the changes in the availability of the operable 994 * Notify the changes in the availability of the operable
998 * frequency/voltage list. 995 * frequency/voltage list.
@@ -1002,7 +999,22 @@ static void _opp_kref_release(struct kref *kref)
1002 opp_debug_remove_one(opp); 999 opp_debug_remove_one(opp);
1003 list_del(&opp->node); 1000 list_del(&opp->node);
1004 kfree(opp); 1001 kfree(opp);
1002}
1005 1003
1004static void _opp_kref_release_unlocked(struct kref *kref)
1005{
1006 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1007 struct opp_table *opp_table = opp->opp_table;
1008
1009 _opp_kref_release(opp, opp_table);
1010}
1011
1012static void _opp_kref_release_locked(struct kref *kref)
1013{
1014 struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref);
1015 struct opp_table *opp_table = opp->opp_table;
1016
1017 _opp_kref_release(opp, opp_table);
1006 mutex_unlock(&opp_table->lock); 1018 mutex_unlock(&opp_table->lock);
1007} 1019}
1008 1020
@@ -1013,10 +1025,16 @@ void dev_pm_opp_get(struct dev_pm_opp *opp)
1013 1025
1014void dev_pm_opp_put(struct dev_pm_opp *opp) 1026void dev_pm_opp_put(struct dev_pm_opp *opp)
1015{ 1027{
1016 kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); 1028 kref_put_mutex(&opp->kref, _opp_kref_release_locked,
1029 &opp->opp_table->lock);
1017} 1030}
1018EXPORT_SYMBOL_GPL(dev_pm_opp_put); 1031EXPORT_SYMBOL_GPL(dev_pm_opp_put);
1019 1032
1033static void dev_pm_opp_put_unlocked(struct dev_pm_opp *opp)
1034{
1035 kref_put(&opp->kref, _opp_kref_release_unlocked);
1036}
1037
1020/** 1038/**
1021 * dev_pm_opp_remove() - Remove an OPP from OPP table 1039 * dev_pm_opp_remove() - Remove an OPP from OPP table
1022 * @dev: device for which we do this operation 1040 * @dev: device for which we do this operation
@@ -1060,6 +1078,40 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
1060} 1078}
1061EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 1079EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
1062 1080
1081/**
1082 * dev_pm_opp_remove_all_dynamic() - Remove all dynamically created OPPs
1083 * @dev: device for which we do this operation
1084 *
1085 * This function removes all dynamically created OPPs from the opp table.
1086 */
1087void dev_pm_opp_remove_all_dynamic(struct device *dev)
1088{
1089 struct opp_table *opp_table;
1090 struct dev_pm_opp *opp, *temp;
1091 int count = 0;
1092
1093 opp_table = _find_opp_table(dev);
1094 if (IS_ERR(opp_table))
1095 return;
1096
1097 mutex_lock(&opp_table->lock);
1098 list_for_each_entry_safe(opp, temp, &opp_table->opp_list, node) {
1099 if (opp->dynamic) {
1100 dev_pm_opp_put_unlocked(opp);
1101 count++;
1102 }
1103 }
1104 mutex_unlock(&opp_table->lock);
1105
1106 /* Drop the references taken by dev_pm_opp_add() */
1107 while (count--)
1108 dev_pm_opp_put_opp_table(opp_table);
1109
1110 /* Drop the reference taken by _find_opp_table() */
1111 dev_pm_opp_put_opp_table(opp_table);
1112}
1113EXPORT_SYMBOL_GPL(dev_pm_opp_remove_all_dynamic);
1114
1063struct dev_pm_opp *_opp_allocate(struct opp_table *table) 1115struct dev_pm_opp *_opp_allocate(struct opp_table *table)
1064{ 1116{
1065 struct dev_pm_opp *opp; 1117 struct dev_pm_opp *opp;
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 06f0f632ec47..cd58959e5158 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -20,6 +20,7 @@
20#include <linux/pm_domain.h> 20#include <linux/pm_domain.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/energy_model.h>
23 24
24#include "opp.h" 25#include "opp.h"
25 26
@@ -1047,3 +1048,101 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
1047 return of_node_get(opp->np); 1048 return of_node_get(opp->np);
1048} 1049}
1049EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); 1050EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
1051
1052/*
1053 * Callback function provided to the Energy Model framework upon registration.
1054 * This computes the power estimated by @CPU at @kHz if it is the frequency
1055 * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
1056 * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
1057 * frequency and @mW to the associated power. The power is estimated as
1058 * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively
1059 * the voltage and frequency of the OPP.
1060 *
1061 * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power
1062 * calculation failed because of missing parameters, 0 otherwise.
1063 */
1064static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz,
1065 int cpu)
1066{
1067 struct device *cpu_dev;
1068 struct dev_pm_opp *opp;
1069 struct device_node *np;
1070 unsigned long mV, Hz;
1071 u32 cap;
1072 u64 tmp;
1073 int ret;
1074
1075 cpu_dev = get_cpu_device(cpu);
1076 if (!cpu_dev)
1077 return -ENODEV;
1078
1079 np = of_node_get(cpu_dev->of_node);
1080 if (!np)
1081 return -EINVAL;
1082
1083 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1084 of_node_put(np);
1085 if (ret)
1086 return -EINVAL;
1087
1088 Hz = *kHz * 1000;
1089 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
1090 if (IS_ERR(opp))
1091 return -EINVAL;
1092
1093 mV = dev_pm_opp_get_voltage(opp) / 1000;
1094 dev_pm_opp_put(opp);
1095 if (!mV)
1096 return -EINVAL;
1097
1098 tmp = (u64)cap * mV * mV * (Hz / 1000000);
1099 do_div(tmp, 1000000000);
1100
1101 *mW = (unsigned long)tmp;
1102 *kHz = Hz / 1000;
1103
1104 return 0;
1105}
1106
1107/**
1108 * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
1109 * @cpus : CPUs for which an Energy Model has to be registered
1110 *
1111 * This checks whether the "dynamic-power-coefficient" devicetree property has
1112 * been specified, and tries to register an Energy Model with it if it has.
1113 */
1114void dev_pm_opp_of_register_em(struct cpumask *cpus)
1115{
1116 struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power);
1117 int ret, nr_opp, cpu = cpumask_first(cpus);
1118 struct device *cpu_dev;
1119 struct device_node *np;
1120 u32 cap;
1121
1122 cpu_dev = get_cpu_device(cpu);
1123 if (!cpu_dev)
1124 return;
1125
1126 nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
1127 if (nr_opp <= 0)
1128 return;
1129
1130 np = of_node_get(cpu_dev->of_node);
1131 if (!np)
1132 return;
1133
1134 /*
1135 * Register an EM only if the 'dynamic-power-coefficient' property is
1136 * set in devicetree. It is assumed the voltage values are known if that
1137 * property is set since it is useless otherwise. If voltages are not
1138 * known, just let the EM registration fail with an error to alert the
1139 * user about the inconsistent configuration.
1140 */
1141 ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1142 of_node_put(np);
1143 if (ret || !cap)
1144 return;
1145
1146 em_register_perf_domain(cpus, nr_opp, &em_cb);
1147}
1148EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 4310c7a4212e..2ab92409210a 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -21,13 +21,14 @@ menuconfig PCI
21 support for PCI-X and the foundations for PCI Express support. 21 support for PCI-X and the foundations for PCI Express support.
22 Say 'Y' here unless you know what you are doing. 22 Say 'Y' here unless you know what you are doing.
23 23
24if PCI
25
24config PCI_DOMAINS 26config PCI_DOMAINS
25 bool 27 bool
26 depends on PCI 28 depends on PCI
27 29
28config PCI_DOMAINS_GENERIC 30config PCI_DOMAINS_GENERIC
29 bool 31 bool
30 depends on PCI
31 select PCI_DOMAINS 32 select PCI_DOMAINS
32 33
33config PCI_SYSCALL 34config PCI_SYSCALL
@@ -37,7 +38,6 @@ source "drivers/pci/pcie/Kconfig"
37 38
38config PCI_MSI 39config PCI_MSI
39 bool "Message Signaled Interrupts (MSI and MSI-X)" 40 bool "Message Signaled Interrupts (MSI and MSI-X)"
40 depends on PCI
41 select GENERIC_MSI_IRQ 41 select GENERIC_MSI_IRQ
42 help 42 help
43 This allows device drivers to enable MSI (Message Signaled 43 This allows device drivers to enable MSI (Message Signaled
@@ -59,7 +59,6 @@ config PCI_MSI_IRQ_DOMAIN
59config PCI_QUIRKS 59config PCI_QUIRKS
60 default y 60 default y
61 bool "Enable PCI quirk workarounds" if EXPERT 61 bool "Enable PCI quirk workarounds" if EXPERT
62 depends on PCI
63 help 62 help
64 This enables workarounds for various PCI chipset bugs/quirks. 63 This enables workarounds for various PCI chipset bugs/quirks.
65 Disable this only if your target machine is unaffected by PCI 64 Disable this only if your target machine is unaffected by PCI
@@ -67,7 +66,7 @@ config PCI_QUIRKS
67 66
68config PCI_DEBUG 67config PCI_DEBUG
69 bool "PCI Debugging" 68 bool "PCI Debugging"
70 depends on PCI && DEBUG_KERNEL 69 depends on DEBUG_KERNEL
71 help 70 help
72 Say Y here if you want the PCI core to produce a bunch of debug 71 Say Y here if you want the PCI core to produce a bunch of debug
73 messages to the system log. Select this if you are having a 72 messages to the system log. Select this if you are having a
@@ -77,7 +76,6 @@ config PCI_DEBUG
77 76
78config PCI_REALLOC_ENABLE_AUTO 77config PCI_REALLOC_ENABLE_AUTO
79 bool "Enable PCI resource re-allocation detection" 78 bool "Enable PCI resource re-allocation detection"
80 depends on PCI
81 depends on PCI_IOV 79 depends on PCI_IOV
82 help 80 help
83 Say Y here if you want the PCI core to detect if PCI resource 81 Say Y here if you want the PCI core to detect if PCI resource
@@ -90,7 +88,6 @@ config PCI_REALLOC_ENABLE_AUTO
90 88
91config PCI_STUB 89config PCI_STUB
92 tristate "PCI Stub driver" 90 tristate "PCI Stub driver"
93 depends on PCI
94 help 91 help
95 Say Y or M here if you want be able to reserve a PCI device 92 Say Y or M here if you want be able to reserve a PCI device
96 when it is going to be assigned to a guest operating system. 93 when it is going to be assigned to a guest operating system.
@@ -99,7 +96,6 @@ config PCI_STUB
99 96
100config PCI_PF_STUB 97config PCI_PF_STUB
101 tristate "PCI PF Stub driver" 98 tristate "PCI PF Stub driver"
102 depends on PCI
103 depends on PCI_IOV 99 depends on PCI_IOV
104 help 100 help
105 Say Y or M here if you want to enable support for devices that 101 Say Y or M here if you want to enable support for devices that
@@ -111,7 +107,7 @@ config PCI_PF_STUB
111 107
112config XEN_PCIDEV_FRONTEND 108config XEN_PCIDEV_FRONTEND
113 tristate "Xen PCI Frontend" 109 tristate "Xen PCI Frontend"
114 depends on PCI && X86 && XEN 110 depends on X86 && XEN
115 select PCI_XEN 111 select PCI_XEN
116 select XEN_XENBUS_FRONTEND 112 select XEN_XENBUS_FRONTEND
117 default y 113 default y
@@ -133,7 +129,6 @@ config PCI_BRIDGE_EMUL
133 129
134config PCI_IOV 130config PCI_IOV
135 bool "PCI IOV support" 131 bool "PCI IOV support"
136 depends on PCI
137 select PCI_ATS 132 select PCI_ATS
138 help 133 help
139 I/O Virtualization is a PCI feature supported by some devices 134 I/O Virtualization is a PCI feature supported by some devices
@@ -144,7 +139,6 @@ config PCI_IOV
144 139
145config PCI_PRI 140config PCI_PRI
146 bool "PCI PRI support" 141 bool "PCI PRI support"
147 depends on PCI
148 select PCI_ATS 142 select PCI_ATS
149 help 143 help
150 PRI is the PCI Page Request Interface. It allows PCI devices that are 144 PRI is the PCI Page Request Interface. It allows PCI devices that are
@@ -154,7 +148,6 @@ config PCI_PRI
154 148
155config PCI_PASID 149config PCI_PASID
156 bool "PCI PASID support" 150 bool "PCI PASID support"
157 depends on PCI
158 select PCI_ATS 151 select PCI_ATS
159 help 152 help
160 Process Address Space Identifiers (PASIDs) can be used by PCI devices 153 Process Address Space Identifiers (PASIDs) can be used by PCI devices
@@ -167,7 +160,7 @@ config PCI_PASID
167 160
168config PCI_P2PDMA 161config PCI_P2PDMA
169 bool "PCI peer-to-peer transfer support" 162 bool "PCI peer-to-peer transfer support"
170 depends on PCI && ZONE_DEVICE 163 depends on ZONE_DEVICE
171 select GENERIC_ALLOCATOR 164 select GENERIC_ALLOCATOR
172 help 165 help
173 Enableѕ drivers to do PCI peer-to-peer transactions to and from 166 Enableѕ drivers to do PCI peer-to-peer transactions to and from
@@ -184,12 +177,11 @@ config PCI_P2PDMA
184 177
185config PCI_LABEL 178config PCI_LABEL
186 def_bool y if (DMI || ACPI) 179 def_bool y if (DMI || ACPI)
187 depends on PCI
188 select NLS 180 select NLS
189 181
190config PCI_HYPERV 182config PCI_HYPERV
191 tristate "Hyper-V PCI Frontend" 183 tristate "Hyper-V PCI Frontend"
192 depends on PCI && X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64 184 depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
193 help 185 help
194 The PCI device frontend driver allows the kernel to import arbitrary 186 The PCI device frontend driver allows the kernel to import arbitrary
195 PCI devices from a PCI backend to support PCI driver domains. 187 PCI devices from a PCI backend to support PCI driver domains.
@@ -198,3 +190,5 @@ source "drivers/pci/hotplug/Kconfig"
198source "drivers/pci/controller/Kconfig" 190source "drivers/pci/controller/Kconfig"
199source "drivers/pci/endpoint/Kconfig" 191source "drivers/pci/endpoint/Kconfig"
200source "drivers/pci/switch/Kconfig" 192source "drivers/pci/switch/Kconfig"
193
194endif
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 52e47dac028f..80f843030e36 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev)
310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); 310 imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie");
311 if (IS_ERR(imx6_pcie->pd_pcie)) 311 if (IS_ERR(imx6_pcie->pd_pcie))
312 return PTR_ERR(imx6_pcie->pd_pcie); 312 return PTR_ERR(imx6_pcie->pd_pcie);
313 /* Do nothing when power domain missing */
314 if (!imx6_pcie->pd_pcie)
315 return 0;
313 link = device_link_add(dev, imx6_pcie->pd_pcie, 316 link = device_link_add(dev, imx6_pcie->pd_pcie,
314 DL_FLAG_STATELESS | 317 DL_FLAG_STATELESS |
315 DL_FLAG_PM_RUNTIME | 318 DL_FLAG_PM_RUNTIME |
@@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev)
323 if (IS_ERR(imx6_pcie->pd_pcie_phy)) 326 if (IS_ERR(imx6_pcie->pd_pcie_phy))
324 return PTR_ERR(imx6_pcie->pd_pcie_phy); 327 return PTR_ERR(imx6_pcie->pd_pcie_phy);
325 328
326 device_link_add(dev, imx6_pcie->pd_pcie_phy, 329 link = device_link_add(dev, imx6_pcie->pd_pcie_phy,
327 DL_FLAG_STATELESS | 330 DL_FLAG_STATELESS |
328 DL_FLAG_PM_RUNTIME | 331 DL_FLAG_PM_RUNTIME |
329 DL_FLAG_RPM_ACTIVE); 332 DL_FLAG_RPM_ACTIVE);
330 if (IS_ERR(link)) { 333 if (!link) {
331 dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); 334 dev_err(dev, "Failed to add device_link to pcie_phy pd.\n");
332 return PTR_ERR(link); 335 return -EINVAL;
333 } 336 }
334 337
335 return 0; 338 return 0;
diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
index 241ebe0c4505..e35e9eaa50ee 100644
--- a/drivers/pci/controller/dwc/pci-meson.c
+++ b/drivers/pci/controller/dwc/pci-meson.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/gpio/consumer.h>
11#include <linux/of_device.h> 12#include <linux/of_device.h>
12#include <linux/of_gpio.h> 13#include <linux/of_gpio.h>
13#include <linux/pci.h> 14#include <linux/pci.h>
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c
index b171b6bc15c8..0c389a30ef5d 100644
--- a/drivers/pci/controller/dwc/pcie-armada8k.c
+++ b/drivers/pci/controller/dwc/pcie-armada8k.c
@@ -22,7 +22,6 @@
22#include <linux/resource.h> 22#include <linux/resource.h>
23#include <linux/of_pci.h> 23#include <linux/of_pci.h>
24#include <linux/of_irq.h> 24#include <linux/of_irq.h>
25#include <linux/gpio/consumer.h>
26 25
27#include "pcie-designware.h" 26#include "pcie-designware.h"
28 27
@@ -30,7 +29,6 @@ struct armada8k_pcie {
30 struct dw_pcie *pci; 29 struct dw_pcie *pci;
31 struct clk *clk; 30 struct clk *clk;
32 struct clk *clk_reg; 31 struct clk *clk_reg;
33 struct gpio_desc *reset_gpio;
34}; 32};
35 33
36#define PCIE_VENDOR_REGS_OFFSET 0x8000 34#define PCIE_VENDOR_REGS_OFFSET 0x8000
@@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp)
139 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 137 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
140 struct armada8k_pcie *pcie = to_armada8k_pcie(pci); 138 struct armada8k_pcie *pcie = to_armada8k_pcie(pci);
141 139
142 if (pcie->reset_gpio) {
143 /* assert and then deassert the reset signal */
144 gpiod_set_value_cansleep(pcie->reset_gpio, 1);
145 msleep(100);
146 gpiod_set_value_cansleep(pcie->reset_gpio, 0);
147 }
148 dw_pcie_setup_rc(pp); 140 dw_pcie_setup_rc(pp);
149 armada8k_pcie_establish_link(pcie); 141 armada8k_pcie_establish_link(pcie);
150 142
@@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev)
257 goto fail_clkreg; 249 goto fail_clkreg;
258 } 250 }
259 251
260 /* Get reset gpio signal and hold asserted (logically high) */
261 pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset",
262 GPIOD_OUT_HIGH);
263 if (IS_ERR(pcie->reset_gpio)) {
264 ret = PTR_ERR(pcie->reset_gpio);
265 goto fail_clkreg;
266 }
267
268 platform_set_drvdata(pdev, pcie); 252 platform_set_drvdata(pdev, pcie);
269 253
270 ret = armada8k_add_pcie_port(pcie, pdev); 254 ret = armada8k_add_pcie_port(pcie, pdev);
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c
index 9deb56989d72..cb3401a931f8 100644
--- a/drivers/pci/controller/pcie-iproc-msi.c
+++ b/drivers/pci/controller/pcie-iproc-msi.c
@@ -602,9 +602,9 @@ int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node)
602 } 602 }
603 603
604 /* Reserve memory for event queue and make sure memories are zeroed */ 604 /* Reserve memory for event queue and make sure memories are zeroed */
605 msi->eq_cpu = dma_zalloc_coherent(pcie->dev, 605 msi->eq_cpu = dma_alloc_coherent(pcie->dev,
606 msi->nr_eq_region * EQ_MEM_REGION_SIZE, 606 msi->nr_eq_region * EQ_MEM_REGION_SIZE,
607 &msi->eq_dma, GFP_KERNEL); 607 &msi->eq_dma, GFP_KERNEL);
608 if (!msi->eq_cpu) { 608 if (!msi->eq_cpu) {
609 ret = -ENOMEM; 609 ret = -ENOMEM;
610 goto free_irqs; 610 goto free_irqs;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a1c8a09efa5..4c0b47867258 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -1168,7 +1168,8 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1168 const struct irq_affinity *affd) 1168 const struct irq_affinity *affd)
1169{ 1169{
1170 static const struct irq_affinity msi_default_affd; 1170 static const struct irq_affinity msi_default_affd;
1171 int vecs = -ENOSPC; 1171 int msix_vecs = -ENOSPC;
1172 int msi_vecs = -ENOSPC;
1172 1173
1173 if (flags & PCI_IRQ_AFFINITY) { 1174 if (flags & PCI_IRQ_AFFINITY) {
1174 if (!affd) 1175 if (!affd)
@@ -1179,16 +1180,17 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1179 } 1180 }
1180 1181
1181 if (flags & PCI_IRQ_MSIX) { 1182 if (flags & PCI_IRQ_MSIX) {
1182 vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, 1183 msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs,
1183 affd); 1184 max_vecs, affd);
1184 if (vecs > 0) 1185 if (msix_vecs > 0)
1185 return vecs; 1186 return msix_vecs;
1186 } 1187 }
1187 1188
1188 if (flags & PCI_IRQ_MSI) { 1189 if (flags & PCI_IRQ_MSI) {
1189 vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); 1190 msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs,
1190 if (vecs > 0) 1191 affd);
1191 return vecs; 1192 if (msi_vecs > 0)
1193 return msi_vecs;
1192 } 1194 }
1193 1195
1194 /* use legacy irq if allowed */ 1196 /* use legacy irq if allowed */
@@ -1199,7 +1201,9 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1199 } 1201 }
1200 } 1202 }
1201 1203
1202 return vecs; 1204 if (msix_vecs == -ENOSPC)
1205 return -ENOSPC;
1206 return msi_vecs;
1203} 1207}
1204EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); 1208EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
1205 1209
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c9d8e3c837de..c25acace7d91 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str)
6195 } else if (!strncmp(str, "pcie_scan_all", 13)) { 6195 } else if (!strncmp(str, "pcie_scan_all", 13)) {
6196 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); 6196 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6197 } else if (!strncmp(str, "disable_acs_redir=", 18)) { 6197 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
6198 disable_acs_redir_param = str + 18; 6198 disable_acs_redir_param =
6199 kstrdup(str + 18, GFP_KERNEL);
6199 } else { 6200 } else {
6200 printk(KERN_ERR "PCI: Unknown option `%s'\n", 6201 printk(KERN_ERR "PCI: Unknown option `%s'\n",
6201 str); 6202 str);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b0a413f3f7ca..e2a879e93d86 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev)
639 break; 639 break;
640 } 640 }
641} 641}
642DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, 642DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
643 quirk_synopsys_haps); 643 PCI_CLASS_SERIAL_USB_XHCI, 0,
644 quirk_synopsys_haps);
644 645
645/* 646/*
646 * Let's make the southbridge information explicit instead of having to 647 * Let's make the southbridge information explicit instead of having to
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 6c5536d3d42a..e22766c79fe9 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1373,10 +1373,10 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) 1373 if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1374 return 0; 1374 return 0;
1375 1375
1376 stdev->dma_mrpc = dma_zalloc_coherent(&stdev->pdev->dev, 1376 stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1377 sizeof(*stdev->dma_mrpc), 1377 sizeof(*stdev->dma_mrpc),
1378 &stdev->dma_mrpc_dma_addr, 1378 &stdev->dma_mrpc_dma_addr,
1379 GFP_KERNEL); 1379 GFP_KERNEL);
1380 if (stdev->dma_mrpc == NULL) 1380 if (stdev->dma_mrpc == NULL)
1381 return -ENOMEM; 1381 return -ENOMEM;
1382 1382
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index a91fc67fc4e0..d70ba9bc42d9 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -32,7 +32,7 @@
32 32
33/* register 0x01 */ 33/* register 0x01 */
34#define REF_FREF_SEL_25 BIT(0) 34#define REF_FREF_SEL_25 BIT(0)
35#define PHY_MODE_SATA (0x0 << 5) 35#define PHY_BERLIN_MODE_SATA (0x0 << 5)
36 36
37/* register 0x02 */ 37/* register 0x02 */
38#define USE_MAX_PLL_RATE BIT(12) 38#define USE_MAX_PLL_RATE BIT(12)
@@ -102,7 +102,8 @@ static int phy_berlin_sata_power_on(struct phy *phy)
102 102
103 /* set PHY mode and ref freq to 25 MHz */ 103 /* set PHY mode and ref freq to 25 MHz */
104 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01, 104 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x01,
105 0x00ff, REF_FREF_SEL_25 | PHY_MODE_SATA); 105 0x00ff,
106 REF_FREF_SEL_25 | PHY_BERLIN_MODE_SATA);
106 107
107 /* set PHY up to 6 Gbps */ 108 /* set PHY up to 6 Gbps */
108 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25, 109 phy_berlin_sata_reg_setbits(ctrl_reg, priv->phy_base, 0x25,
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 6fd6e07ab345..09a77e556ece 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy)
31 31
32 err = reset_control_deassert(priv->reset); 32 err = reset_control_deassert(priv->reset);
33 if (err && priv->no_suspend_override) 33 if (err && priv->no_suspend_override)
34 reset_control_assert(priv->no_suspend_override); 34 reset_control_deassert(priv->no_suspend_override);
35 35
36 return err; 36 return err;
37} 37}
@@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev)
69 if (!priv) 69 if (!priv)
70 return -ENOMEM; 70 return -ENOMEM;
71 71
72 priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy"); 72 priv->reset = devm_reset_control_get(&pdev->dev, "phy");
73 if (IS_ERR(priv->reset)) 73 if (IS_ERR(priv->reset))
74 return PTR_ERR(priv->reset); 74 return PTR_ERR(priv->reset);
75 75
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index f137e0107764..c4709ed7fb0e 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -82,6 +82,7 @@ config PHY_TI_GMII_SEL
82 default y if TI_CPSW=y 82 default y if TI_CPSW=y
83 depends on TI_CPSW || COMPILE_TEST 83 depends on TI_CPSW || COMPILE_TEST
84 select GENERIC_PHY 84 select GENERIC_PHY
85 select REGMAP
85 default m 86 default m
86 help 87 help
87 This driver supports configuring of the TI CPSW Port mode depending on 88 This driver supports configuring of the TI CPSW Port mode depending on
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 77fdaa551977..a52c5bb35033 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -204,11 +204,11 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev,
204 204
205 if (args->args_count < 1) 205 if (args->args_count < 1)
206 return ERR_PTR(-EINVAL); 206 return ERR_PTR(-EINVAL);
207 if (!priv || !priv->if_phys)
208 return ERR_PTR(-ENODEV);
207 if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) && 209 if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) &&
208 args->args_count < 2) 210 args->args_count < 2)
209 return ERR_PTR(-EINVAL); 211 return ERR_PTR(-EINVAL);
210 if (!priv || !priv->if_phys)
211 return ERR_PTR(-ENODEV);
212 if (phy_id > priv->soc_data->num_ports) 212 if (phy_id > priv->soc_data->num_ports)
213 return ERR_PTR(-EINVAL); 213 return ERR_PTR(-EINVAL);
214 if (phy_id != priv->if_phys[phy_id - 1].id) 214 if (phy_id != priv->if_phys[phy_id - 1].id)
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 05044e323ea5..03ec7a5d9d0b 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1513 .matches = { 1513 .matches = {
1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1514 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), 1515 DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
1516 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1516 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1517 }, 1517 },
1518 }, 1518 },
1519 { 1519 {
@@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1521 .matches = { 1521 .matches = {
1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 1522 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), 1523 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
1524 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1524 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1525 }, 1525 },
1526 }, 1526 },
1527 { 1527 {
@@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1529 .matches = { 1529 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1530 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), 1531 DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
1532 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1532 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1533 }, 1533 },
1534 }, 1534 },
1535 { 1535 {
@@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1537 .matches = { 1537 .matches = {
1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1538 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 1539 DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
1540 DMI_MATCH(DMI_BOARD_VERSION, "1.0"), 1540 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1541 }, 1541 },
1542 }, 1542 },
1543 {} 1543 {}
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index 1817786ab6aa..a005cbccb4f7 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -45,12 +45,14 @@ config PINCTRL_MT2701
45config PINCTRL_MT7623 45config PINCTRL_MT7623
46 bool "Mediatek MT7623 pin control with generic binding" 46 bool "Mediatek MT7623 pin control with generic binding"
47 depends on MACH_MT7623 || COMPILE_TEST 47 depends on MACH_MT7623 || COMPILE_TEST
48 depends on OF
48 default MACH_MT7623 49 default MACH_MT7623
49 select PINCTRL_MTK_MOORE 50 select PINCTRL_MTK_MOORE
50 51
51config PINCTRL_MT7629 52config PINCTRL_MT7629
52 bool "Mediatek MT7629 pin control" 53 bool "Mediatek MT7629 pin control"
53 depends on MACH_MT7629 || COMPILE_TEST 54 depends on MACH_MT7629 || COMPILE_TEST
55 depends on OF
54 default MACH_MT7629 56 default MACH_MT7629
55 select PINCTRL_MTK_MOORE 57 select PINCTRL_MTK_MOORE
56 58
@@ -92,6 +94,7 @@ config PINCTRL_MT6797
92 94
93config PINCTRL_MT7622 95config PINCTRL_MT7622
94 bool "MediaTek MT7622 pin control" 96 bool "MediaTek MT7622 pin control"
97 depends on OF
95 depends on ARM64 || COMPILE_TEST 98 depends on ARM64 || COMPILE_TEST
96 default ARM64 && ARCH_MEDIATEK 99 default ARM64 && ARCH_MEDIATEK
97 select PINCTRL_MTK_MOORE 100 select PINCTRL_MTK_MOORE
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index b03481ef99a1..98905d4a79ca 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
832 break; 832 break;
833 833
834 case MCP_TYPE_S18: 834 case MCP_TYPE_S18:
835 one_regmap_config =
836 devm_kmemdup(dev, &mcp23x17_regmap,
837 sizeof(struct regmap_config), GFP_KERNEL);
838 if (!one_regmap_config)
839 return -ENOMEM;
835 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, 840 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
836 &mcp23x17_regmap); 841 one_regmap_config);
837 mcp->reg_shift = 1; 842 mcp->reg_shift = 1;
838 mcp->chip.ngpio = 16; 843 mcp->chip.ngpio = 16;
839 mcp->chip.label = "mcp23s18"; 844 mcp->chip.label = "mcp23s18";
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
index aa8b58125568..ef4268cc6227 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
588static const struct sunxi_pinctrl_desc h6_pinctrl_data = { 588static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
589 .pins = h6_pins, 589 .pins = h6_pins,
590 .npins = ARRAY_SIZE(h6_pins), 590 .npins = ARRAY_SIZE(h6_pins),
591 .irq_banks = 3, 591 .irq_banks = 4,
592 .irq_bank_map = h6_irq_bank_map, 592 .irq_bank_map = h6_irq_bank_map,
593 .irq_read_needs_mux = true, 593 .irq_read_needs_mux = true,
594}; 594};
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 5d9184d18c16..0e7fa69e93df 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
698{ 698{
699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 699 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
700 unsigned short bank = offset / PINS_PER_BANK; 700 unsigned short bank = offset / PINS_PER_BANK;
701 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 701 unsigned short bank_offset = bank - pctl->desc->pin_base /
702 struct regulator *reg; 702 PINS_PER_BANK;
703 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
704 struct regulator *reg = s_reg->regulator;
705 char supply[16];
703 int ret; 706 int ret;
704 707
705 reg = s_reg->regulator; 708 if (reg) {
706 if (!reg) {
707 char supply[16];
708
709 snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
710 reg = regulator_get(pctl->dev, supply);
711 if (IS_ERR(reg)) {
712 dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
713 'A' + bank);
714 return PTR_ERR(reg);
715 }
716
717 s_reg->regulator = reg;
718 refcount_set(&s_reg->refcount, 1);
719 } else {
720 refcount_inc(&s_reg->refcount); 709 refcount_inc(&s_reg->refcount);
710 return 0;
711 }
712
713 snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank);
714 reg = regulator_get(pctl->dev, supply);
715 if (IS_ERR(reg)) {
716 dev_err(pctl->dev, "Couldn't get bank P%c regulator\n",
717 'A' + bank);
718 return PTR_ERR(reg);
721 } 719 }
722 720
723 ret = regulator_enable(reg); 721 ret = regulator_enable(reg);
@@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset)
727 goto out; 725 goto out;
728 } 726 }
729 727
728 s_reg->regulator = reg;
729 refcount_set(&s_reg->refcount, 1);
730
730 return 0; 731 return 0;
731 732
732out: 733out:
733 if (refcount_dec_and_test(&s_reg->refcount)) { 734 regulator_put(s_reg->regulator);
734 regulator_put(s_reg->regulator);
735 s_reg->regulator = NULL;
736 }
737 735
738 return ret; 736 return ret;
739} 737}
@@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset)
742{ 740{
743 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 741 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
744 unsigned short bank = offset / PINS_PER_BANK; 742 unsigned short bank = offset / PINS_PER_BANK;
745 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; 743 unsigned short bank_offset = bank - pctl->desc->pin_base /
744 PINS_PER_BANK;
745 struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset];
746 746
747 if (!refcount_dec_and_test(&s_reg->refcount)) 747 if (!refcount_dec_and_test(&s_reg->refcount))
748 return 0; 748 return 0;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index e340d2a24b44..034c0317c8d6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -136,7 +136,7 @@ struct sunxi_pinctrl {
136 struct gpio_chip *chip; 136 struct gpio_chip *chip;
137 const struct sunxi_pinctrl_desc *desc; 137 const struct sunxi_pinctrl_desc *desc;
138 struct device *dev; 138 struct device *dev;
139 struct sunxi_pinctrl_regulator regulators[12]; 139 struct sunxi_pinctrl_regulator regulators[9];
140 struct irq_domain *domain; 140 struct irq_domain *domain;
141 struct sunxi_pinctrl_function *functions; 141 struct sunxi_pinctrl_function *functions;
142 unsigned nfunctions; 142 unsigned nfunctions;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e3b62c2ee8d1..b5e9db85e881 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -905,6 +905,7 @@ config TOSHIBA_WMI
905config ACPI_CMPC 905config ACPI_CMPC
906 tristate "CMPC Laptop Extras" 906 tristate "CMPC Laptop Extras"
907 depends on ACPI && INPUT 907 depends on ACPI && INPUT
908 depends on BACKLIGHT_LCD_SUPPORT
908 depends on RFKILL || RFKILL=n 909 depends on RFKILL || RFKILL=n
909 select BACKLIGHT_CLASS_DEVICE 910 select BACKLIGHT_CLASS_DEVICE
910 help 911 help
@@ -1009,7 +1010,7 @@ config INTEL_MFLD_THERMAL
1009 1010
1010config INTEL_IPS 1011config INTEL_IPS
1011 tristate "Intel Intelligent Power Sharing" 1012 tristate "Intel Intelligent Power Sharing"
1012 depends on ACPI 1013 depends on ACPI && PCI
1013 ---help--- 1014 ---help---
1014 Intel Calpella platforms support dynamic power sharing between the 1015 Intel Calpella platforms support dynamic power sharing between the
1015 CPU and GPU, maximizing performance in a given TDP. This driver, 1016 CPU and GPU, maximizing performance in a given TDP. This driver,
@@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL
1128config SAMSUNG_Q10 1129config SAMSUNG_Q10
1129 tristate "Samsung Q10 Extras" 1130 tristate "Samsung Q10 Extras"
1130 depends on ACPI 1131 depends on ACPI
1132 depends on BACKLIGHT_LCD_SUPPORT
1131 select BACKLIGHT_CLASS_DEVICE 1133 select BACKLIGHT_CLASS_DEVICE
1132 ---help--- 1134 ---help---
1133 This driver provides support for backlight control on Samsung Q10 1135 This driver provides support for backlight control on Samsung Q10
@@ -1135,7 +1137,7 @@ config SAMSUNG_Q10
1135 1137
1136config APPLE_GMUX 1138config APPLE_GMUX
1137 tristate "Apple Gmux Driver" 1139 tristate "Apple Gmux Driver"
1138 depends on ACPI 1140 depends on ACPI && PCI
1139 depends on PNP 1141 depends on PNP
1140 depends on BACKLIGHT_CLASS_DEVICE 1142 depends on BACKLIGHT_CLASS_DEVICE
1141 depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE 1143 depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
@@ -1174,7 +1176,7 @@ config INTEL_SMARTCONNECT
1174 1176
1175config INTEL_PMC_IPC 1177config INTEL_PMC_IPC
1176 tristate "Intel PMC IPC Driver" 1178 tristate "Intel PMC IPC Driver"
1177 depends on ACPI 1179 depends on ACPI && PCI
1178 ---help--- 1180 ---help---
1179 This driver provides support for PMC control on some Intel platforms. 1181 This driver provides support for PMC control on some Intel platforms.
1180 The PMC is an ARC processor which defines IPC commands for communication 1182 The PMC is an ARC processor which defines IPC commands for communication
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 797fab33bb98..7cbea796652a 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -224,7 +224,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
224 extoff = NULL; 224 extoff = NULL;
225 break; 225 break;
226 } 226 }
227 if (extoff->n_samples > PTP_MAX_SAMPLES) { 227 if (extoff->n_samples > PTP_MAX_SAMPLES
228 || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
228 err = -EINVAL; 229 err = -EINVAL;
229 break; 230 break;
230 } 231 }
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index bb655854713d..b64c56c33c3b 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -1382,9 +1382,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
1382 INIT_WORK(&priv->idb_work, tsi721_db_dpc); 1382 INIT_WORK(&priv->idb_work, tsi721_db_dpc);
1383 1383
1384 /* Allocate buffer for inbound doorbells queue */ 1384 /* Allocate buffer for inbound doorbells queue */
1385 priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, 1385 priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
1386 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, 1386 IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
1387 &priv->idb_dma, GFP_KERNEL); 1387 &priv->idb_dma, GFP_KERNEL);
1388 if (!priv->idb_base) 1388 if (!priv->idb_base)
1389 return -ENOMEM; 1389 return -ENOMEM;
1390 1390
@@ -1447,9 +1447,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1447 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); 1447 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
1448 1448
1449 /* Allocate space for DMA descriptors */ 1449 /* Allocate space for DMA descriptors */
1450 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1450 bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
1451 bd_num * sizeof(struct tsi721_dma_desc), 1451 bd_num * sizeof(struct tsi721_dma_desc),
1452 &bd_phys, GFP_KERNEL); 1452 &bd_phys, GFP_KERNEL);
1453 if (!bd_ptr) 1453 if (!bd_ptr)
1454 return -ENOMEM; 1454 return -ENOMEM;
1455 1455
@@ -1464,7 +1464,7 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv)
1464 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? 1464 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
1465 bd_num : TSI721_DMA_MINSTSSZ; 1465 bd_num : TSI721_DMA_MINSTSSZ;
1466 sts_size = roundup_pow_of_two(sts_size); 1466 sts_size = roundup_pow_of_two(sts_size);
1467 sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, 1467 sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
1468 sts_size * sizeof(struct tsi721_dma_sts), 1468 sts_size * sizeof(struct tsi721_dma_sts),
1469 &sts_phys, GFP_KERNEL); 1469 &sts_phys, GFP_KERNEL);
1470 if (!sts_ptr) { 1470 if (!sts_ptr) {
@@ -1939,10 +1939,10 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1939 1939
1940 /* Outbound message descriptor status FIFO allocation */ 1940 /* Outbound message descriptor status FIFO allocation */
1941 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); 1941 priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1942 priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, 1942 priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
1943 priv->omsg_ring[mbox].sts_size * 1943 priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1944 sizeof(struct tsi721_dma_sts), 1944 &priv->omsg_ring[mbox].sts_phys,
1945 &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); 1945 GFP_KERNEL);
1946 if (priv->omsg_ring[mbox].sts_base == NULL) { 1946 if (priv->omsg_ring[mbox].sts_base == NULL) {
1947 tsi_debug(OMSG, &priv->pdev->dev, 1947 tsi_debug(OMSG, &priv->pdev->dev,
1948 "ENOMEM for OB_MSG_%d status FIFO", mbox); 1948 "ENOMEM for OB_MSG_%d status FIFO", mbox);
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 006ea5a45020..7f5d4436f594 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -90,9 +90,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
90 * Allocate space for DMA descriptors 90 * Allocate space for DMA descriptors
91 * (add an extra element for link descriptor) 91 * (add an extra element for link descriptor)
92 */ 92 */
93 bd_ptr = dma_zalloc_coherent(dev, 93 bd_ptr = dma_alloc_coherent(dev,
94 (bd_num + 1) * sizeof(struct tsi721_dma_desc), 94 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
95 &bd_phys, GFP_ATOMIC); 95 &bd_phys, GFP_ATOMIC);
96 if (!bd_ptr) 96 if (!bd_ptr)
97 return -ENOMEM; 97 return -ENOMEM;
98 98
@@ -108,7 +108,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? 108 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
109 (bd_num + 1) : TSI721_DMA_MINSTSSZ; 109 (bd_num + 1) : TSI721_DMA_MINSTSSZ;
110 sts_size = roundup_pow_of_two(sts_size); 110 sts_size = roundup_pow_of_two(sts_size);
111 sts_ptr = dma_zalloc_coherent(dev, 111 sts_ptr = dma_alloc_coherent(dev,
112 sts_size * sizeof(struct tsi721_dma_sts), 112 sts_size * sizeof(struct tsi721_dma_sts),
113 &sts_phys, GFP_ATOMIC); 113 &sts_phys, GFP_ATOMIC);
114 if (!sts_ptr) { 114 if (!sts_ptr) {
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 183fc42a510a..2d7cd344f3bf 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -153,10 +153,15 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
153 const bool * ctx, 153 const bool * ctx,
154 struct irq_affinity *desc) 154 struct irq_affinity *desc)
155{ 155{
156 int i, ret; 156 int i, ret, queue_idx = 0;
157 157
158 for (i = 0; i < nvqs; ++i) { 158 for (i = 0; i < nvqs; ++i) {
159 vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i], 159 if (!names[i]) {
160 vqs[i] = NULL;
161 continue;
162 }
163
164 vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
160 ctx ? ctx[i] : false); 165 ctx ? ctx[i] : false);
161 if (IS_ERR(vqs[i])) { 166 if (IS_ERR(vqs[i])) {
162 ret = PTR_ERR(vqs[i]); 167 ret = PTR_ERR(vqs[i]);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index c21da9fe51ec..2e01bd833ffd 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -109,7 +109,7 @@ config RESET_QCOM_PDC
109 109
110config RESET_SIMPLE 110config RESET_SIMPLE
111 bool "Simple Reset Controller Driver" if COMPILE_TEST 111 bool "Simple Reset Controller Driver" if COMPILE_TEST
112 default ARCH_SOCFPGA || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED 112 default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED
113 help 113 help
114 This enables a simple reset controller driver for reset lines that 114 This enables a simple reset controller driver for reset lines that
115 that can be asserted and deasserted by toggling bits in a contiguous, 115 that can be asserted and deasserted by toggling bits in a contiguous,
@@ -128,6 +128,14 @@ config RESET_STM32MP157
128 help 128 help
129 This enables the RCC reset controller driver for STM32 MPUs. 129 This enables the RCC reset controller driver for STM32 MPUs.
130 130
131config RESET_SOCFPGA
132 bool "SoCFPGA Reset Driver" if COMPILE_TEST && !ARCH_SOCFPGA
133 default ARCH_SOCFPGA
134 select RESET_SIMPLE
135 help
136 This enables the reset driver for the SoCFPGA ARMv7 platforms. This
137 driver gets initialized early during platform init calls.
138
131config RESET_SUNXI 139config RESET_SUNXI
132 bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI 140 bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI
133 default ARCH_SUNXI 141 default ARCH_SUNXI
@@ -163,15 +171,15 @@ config RESET_UNIPHIER
163 Say Y if you want to control reset signals provided by System Control 171 Say Y if you want to control reset signals provided by System Control
164 block, Media I/O block, Peripheral Block. 172 block, Media I/O block, Peripheral Block.
165 173
166config RESET_UNIPHIER_USB3 174config RESET_UNIPHIER_GLUE
167 tristate "USB3 reset driver for UniPhier SoCs" 175 tristate "Reset driver in glue layer for UniPhier SoCs"
168 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF 176 depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
169 default ARCH_UNIPHIER 177 default ARCH_UNIPHIER
170 select RESET_SIMPLE 178 select RESET_SIMPLE
171 help 179 help
172 Support for the USB3 core reset on UniPhier SoCs. 180 Support for peripheral core reset included in its own glue layer
173 Say Y if you want to control reset signals provided by 181 on UniPhier SoCs. Say Y if you want to control reset signals
174 USB3 glue layer. 182 provided by the glue layer.
175 183
176config RESET_ZYNQ 184config RESET_ZYNQ
177 bool "ZYNQ Reset Driver" if COMPILE_TEST 185 bool "ZYNQ Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d08e8b90046a..dc7874df78d9 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -19,10 +19,11 @@ obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o
19obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o 19obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o
20obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o 20obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o
21obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o 21obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o
22obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o
22obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o 23obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o
23obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o 24obj-$(CONFIG_RESET_TI_SCI) += reset-ti-sci.o
24obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o 25obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o
25obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o 26obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o
26obj-$(CONFIG_RESET_UNIPHIER_USB3) += reset-uniphier-usb3.o 27obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o
27obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o 28obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o
28 29
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index d1887c0ed5d3..9582efb70025 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -795,3 +795,45 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
795 return rstc; 795 return rstc;
796} 796}
797EXPORT_SYMBOL_GPL(devm_reset_control_array_get); 797EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
798
799static int reset_control_get_count_from_lookup(struct device *dev)
800{
801 const struct reset_control_lookup *lookup;
802 const char *dev_id;
803 int count = 0;
804
805 if (!dev)
806 return -EINVAL;
807
808 dev_id = dev_name(dev);
809 mutex_lock(&reset_lookup_mutex);
810
811 list_for_each_entry(lookup, &reset_lookup_list, list) {
812 if (!strcmp(lookup->dev_id, dev_id))
813 count++;
814 }
815
816 mutex_unlock(&reset_lookup_mutex);
817
818 if (count == 0)
819 count = -ENOENT;
820
821 return count;
822}
823
824/**
825 * reset_control_get_count - Count number of resets available with a device
826 *
827 * @dev: device for which to return the number of resets
828 *
829 * Returns positive reset count on success, or error number on failure and
830 * on count being zero.
831 */
832int reset_control_get_count(struct device *dev)
833{
834 if (dev->of_node)
835 return of_reset_control_get_count(dev->of_node);
836
837 return reset_control_get_count_from_lookup(dev);
838}
839EXPORT_SYMBOL_GPL(reset_control_get_count);
diff --git a/drivers/reset/reset-hsdk.c b/drivers/reset/reset-hsdk.c
index 8bce391c6943..4c7b8647b49c 100644
--- a/drivers/reset/reset-hsdk.c
+++ b/drivers/reset/reset-hsdk.c
@@ -86,6 +86,7 @@ static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
86 86
87static const struct reset_control_ops hsdk_reset_ops = { 87static const struct reset_control_ops hsdk_reset_ops = {
88 .reset = hsdk_reset_reset, 88 .reset = hsdk_reset_reset,
89 .deassert = hsdk_reset_reset,
89}; 90};
90 91
91static int hsdk_reset_probe(struct platform_device *pdev) 92static int hsdk_reset_probe(struct platform_device *pdev)
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index a91107fc9e27..77fbba3100c8 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -109,7 +109,7 @@ struct reset_simple_devdata {
109#define SOCFPGA_NR_BANKS 8 109#define SOCFPGA_NR_BANKS 8
110 110
111static const struct reset_simple_devdata reset_simple_socfpga = { 111static const struct reset_simple_devdata reset_simple_socfpga = {
112 .reg_offset = 0x10, 112 .reg_offset = 0x20,
113 .nr_resets = SOCFPGA_NR_BANKS * 32, 113 .nr_resets = SOCFPGA_NR_BANKS * 32,
114 .status_active_low = true, 114 .status_active_low = true,
115}; 115};
@@ -120,7 +120,8 @@ static const struct reset_simple_devdata reset_simple_active_low = {
120}; 120};
121 121
122static const struct of_device_id reset_simple_dt_ids[] = { 122static const struct of_device_id reset_simple_dt_ids[] = {
123 { .compatible = "altr,rst-mgr", .data = &reset_simple_socfpga }, 123 { .compatible = "altr,stratix10-rst-mgr",
124 .data = &reset_simple_socfpga },
124 { .compatible = "st,stm32-rcc", }, 125 { .compatible = "st,stm32-rcc", },
125 { .compatible = "allwinner,sun6i-a31-clock-reset", 126 { .compatible = "allwinner,sun6i-a31-clock-reset",
126 .data = &reset_simple_active_low }, 127 .data = &reset_simple_active_low },
@@ -166,14 +167,6 @@ static int reset_simple_probe(struct platform_device *pdev)
166 data->status_active_low = devdata->status_active_low; 167 data->status_active_low = devdata->status_active_low;
167 } 168 }
168 169
169 if (of_device_is_compatible(dev->of_node, "altr,rst-mgr") &&
170 of_property_read_u32(dev->of_node, "altr,modrst-offset",
171 &reg_offset)) {
172 dev_warn(dev,
173 "missing altr,modrst-offset property, assuming 0x%x!\n",
174 reg_offset);
175 }
176
177 data->membase += reg_offset; 170 data->membase += reg_offset;
178 171
179 return devm_reset_controller_register(dev, &data->rcdev); 172 return devm_reset_controller_register(dev, &data->rcdev);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
new file mode 100644
index 000000000000..318cfc51c441
--- /dev/null
+++ b/drivers/reset/reset-socfpga.c
@@ -0,0 +1,88 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018, Intel Corporation
4 * Copied from reset-sunxi.c
5 */
6
7#include <linux/err.h>
8#include <linux/io.h>
9#include <linux/init.h>
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/platform_device.h>
13#include <linux/reset-controller.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17
18#include "reset-simple.h"
19
20#define SOCFPGA_NR_BANKS 8
21void __init socfpga_reset_init(void);
22
23static int a10_reset_init(struct device_node *np)
24{
25 struct reset_simple_data *data;
26 struct resource res;
27 resource_size_t size;
28 int ret;
29 u32 reg_offset = 0x10;
30
31 data = kzalloc(sizeof(*data), GFP_KERNEL);
32 if (!data)
33 return -ENOMEM;
34
35 ret = of_address_to_resource(np, 0, &res);
36 if (ret)
37 goto err_alloc;
38
39 size = resource_size(&res);
40 if (!request_mem_region(res.start, size, np->name)) {
41 ret = -EBUSY;
42 goto err_alloc;
43 }
44
45 data->membase = ioremap(res.start, size);
46 if (!data->membase) {
47 ret = -ENOMEM;
48 goto err_alloc;
49 }
50
51 if (of_property_read_u32(np, "altr,modrst-offset", &reg_offset))
52 pr_warn("missing altr,modrst-offset property, assuming 0x10\n");
53 data->membase += reg_offset;
54
55 spin_lock_init(&data->lock);
56
57 data->rcdev.owner = THIS_MODULE;
58 data->rcdev.nr_resets = SOCFPGA_NR_BANKS * 32;
59 data->rcdev.ops = &reset_simple_ops;
60 data->rcdev.of_node = np;
61 data->status_active_low = true;
62
63 return reset_controller_register(&data->rcdev);
64
65err_alloc:
66 kfree(data);
67 return ret;
68};
69
70/*
71 * These are the reset controller we need to initialize early on in
72 * our system, before we can even think of using a regular device
73 * driver for it.
74 * The controllers that we can register through the regular device
75 * model are handled by the simple reset driver directly.
76 */
77static const struct of_device_id socfpga_early_reset_dt_ids[] __initconst = {
78 { .compatible = "altr,rst-mgr", },
79 { /* sentinel */ },
80};
81
82void __init socfpga_reset_init(void)
83{
84 struct device_node *np;
85
86 for_each_matching_node(np, socfpga_early_reset_dt_ids)
87 a10_reset_init(np);
88}
diff --git a/drivers/reset/reset-uniphier-usb3.c b/drivers/reset/reset-uniphier-glue.c
index ffa1b19b594d..a45923f4df6d 100644
--- a/drivers/reset/reset-uniphier-usb3.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2// 2//
3// reset-uniphier-usb3.c - USB3 reset driver for UniPhier 3// reset-uniphier-glue.c - Glue layer reset driver for UniPhier
4// Copyright 2018 Socionext Inc. 4// Copyright 2018 Socionext Inc.
5// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> 5// Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
6 6
@@ -15,24 +15,24 @@
15#define MAX_CLKS 2 15#define MAX_CLKS 2
16#define MAX_RSTS 2 16#define MAX_RSTS 2
17 17
18struct uniphier_usb3_reset_soc_data { 18struct uniphier_glue_reset_soc_data {
19 int nclks; 19 int nclks;
20 const char * const *clock_names; 20 const char * const *clock_names;
21 int nrsts; 21 int nrsts;
22 const char * const *reset_names; 22 const char * const *reset_names;
23}; 23};
24 24
25struct uniphier_usb3_reset_priv { 25struct uniphier_glue_reset_priv {
26 struct clk_bulk_data clk[MAX_CLKS]; 26 struct clk_bulk_data clk[MAX_CLKS];
27 struct reset_control *rst[MAX_RSTS]; 27 struct reset_control *rst[MAX_RSTS];
28 struct reset_simple_data rdata; 28 struct reset_simple_data rdata;
29 const struct uniphier_usb3_reset_soc_data *data; 29 const struct uniphier_glue_reset_soc_data *data;
30}; 30};
31 31
32static int uniphier_usb3_reset_probe(struct platform_device *pdev) 32static int uniphier_glue_reset_probe(struct platform_device *pdev)
33{ 33{
34 struct device *dev = &pdev->dev; 34 struct device *dev = &pdev->dev;
35 struct uniphier_usb3_reset_priv *priv; 35 struct uniphier_glue_reset_priv *priv;
36 struct resource *res; 36 struct resource *res;
37 resource_size_t size; 37 resource_size_t size;
38 const char *name; 38 const char *name;
@@ -100,9 +100,9 @@ out_rst_assert:
100 return ret; 100 return ret;
101} 101}
102 102
103static int uniphier_usb3_reset_remove(struct platform_device *pdev) 103static int uniphier_glue_reset_remove(struct platform_device *pdev)
104{ 104{
105 struct uniphier_usb3_reset_priv *priv = platform_get_drvdata(pdev); 105 struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev);
106 int i; 106 int i;
107 107
108 for (i = 0; i < priv->data->nrsts; i++) 108 for (i = 0; i < priv->data->nrsts; i++)
@@ -117,7 +117,7 @@ static const char * const uniphier_pro4_clock_reset_names[] = {
117 "gio", "link", 117 "gio", "link",
118}; 118};
119 119
120static const struct uniphier_usb3_reset_soc_data uniphier_pro4_data = { 120static const struct uniphier_glue_reset_soc_data uniphier_pro4_data = {
121 .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names), 121 .nclks = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
122 .clock_names = uniphier_pro4_clock_reset_names, 122 .clock_names = uniphier_pro4_clock_reset_names,
123 .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names), 123 .nrsts = ARRAY_SIZE(uniphier_pro4_clock_reset_names),
@@ -128,14 +128,14 @@ static const char * const uniphier_pxs2_clock_reset_names[] = {
128 "link", 128 "link",
129}; 129};
130 130
131static const struct uniphier_usb3_reset_soc_data uniphier_pxs2_data = { 131static const struct uniphier_glue_reset_soc_data uniphier_pxs2_data = {
132 .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), 132 .nclks = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
133 .clock_names = uniphier_pxs2_clock_reset_names, 133 .clock_names = uniphier_pxs2_clock_reset_names,
134 .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names), 134 .nrsts = ARRAY_SIZE(uniphier_pxs2_clock_reset_names),
135 .reset_names = uniphier_pxs2_clock_reset_names, 135 .reset_names = uniphier_pxs2_clock_reset_names,
136}; 136};
137 137
138static const struct of_device_id uniphier_usb3_reset_match[] = { 138static const struct of_device_id uniphier_glue_reset_match[] = {
139 { 139 {
140 .compatible = "socionext,uniphier-pro4-usb3-reset", 140 .compatible = "socionext,uniphier-pro4-usb3-reset",
141 .data = &uniphier_pro4_data, 141 .data = &uniphier_pro4_data,
@@ -152,20 +152,32 @@ static const struct of_device_id uniphier_usb3_reset_match[] = {
152 .compatible = "socionext,uniphier-pxs3-usb3-reset", 152 .compatible = "socionext,uniphier-pxs3-usb3-reset",
153 .data = &uniphier_pxs2_data, 153 .data = &uniphier_pxs2_data,
154 }, 154 },
155 {
156 .compatible = "socionext,uniphier-pro4-ahci-reset",
157 .data = &uniphier_pro4_data,
158 },
159 {
160 .compatible = "socionext,uniphier-pxs2-ahci-reset",
161 .data = &uniphier_pxs2_data,
162 },
163 {
164 .compatible = "socionext,uniphier-pxs3-ahci-reset",
165 .data = &uniphier_pxs2_data,
166 },
155 { /* Sentinel */ } 167 { /* Sentinel */ }
156}; 168};
157MODULE_DEVICE_TABLE(of, uniphier_usb3_reset_match); 169MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match);
158 170
159static struct platform_driver uniphier_usb3_reset_driver = { 171static struct platform_driver uniphier_glue_reset_driver = {
160 .probe = uniphier_usb3_reset_probe, 172 .probe = uniphier_glue_reset_probe,
161 .remove = uniphier_usb3_reset_remove, 173 .remove = uniphier_glue_reset_remove,
162 .driver = { 174 .driver = {
163 .name = "uniphier-usb3-reset", 175 .name = "uniphier-glue-reset",
164 .of_match_table = uniphier_usb3_reset_match, 176 .of_match_table = uniphier_glue_reset_match,
165 }, 177 },
166}; 178};
167module_platform_driver(uniphier_usb3_reset_driver); 179module_platform_driver(uniphier_glue_reset_driver);
168 180
169MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); 181MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
170MODULE_DESCRIPTION("UniPhier USB3 Reset Driver"); 182MODULE_DESCRIPTION("UniPhier Glue layer reset driver");
171MODULE_LICENSE("GPL"); 183MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 4e7b55a14b1a..6e294b4d3635 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
4469 usrparm.psf_data &= 0x7fffffffULL; 4469 usrparm.psf_data &= 0x7fffffffULL;
4470 usrparm.rssd_result &= 0x7fffffffULL; 4470 usrparm.rssd_result &= 0x7fffffffULL;
4471 } 4471 }
4472 /* at least 2 bytes are accessed and should be allocated */
4473 if (usrparm.psf_data_len < 2) {
4474 DBF_DEV_EVENT(DBF_WARNING, device,
4475 "Symmetrix ioctl invalid data length %d",
4476 usrparm.psf_data_len);
4477 rc = -EINVAL;
4478 goto out;
4479 }
4472 /* alloc I/O data area */ 4480 /* alloc I/O data area */
4473 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 4481 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
4474 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 4482 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 194ffd5c8580..039b2074db7e 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
60 60
61static void __ref sclp_cpu_change_notify(struct work_struct *work) 61static void __ref sclp_cpu_change_notify(struct work_struct *work)
62{ 62{
63 lock_device_hotplug();
63 smp_rescan_cpus(); 64 smp_rescan_cpus();
65 unlock_device_hotplug();
64} 66}
65 67
66static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) 68static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48ea0004a56d..5a699746c357 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
248static inline int ap_test_config_card_id(unsigned int id) 248static inline int ap_test_config_card_id(unsigned int id)
249{ 249{
250 if (!ap_configuration) /* QCI not supported */ 250 if (!ap_configuration) /* QCI not supported */
251 return 1; 251 /* only ids 0...3F may be probed */
252 return id < 0x40 ? 1 : 0;
252 return ap_test_config(ap_configuration->apm, id); 253 return ap_test_config(ap_configuration->apm, id);
253} 254}
254 255
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index dcbf5c857743..ed8e58f09054 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -89,8 +89,8 @@ static int register_sba(struct ism_dev *ism)
89 dma_addr_t dma_handle; 89 dma_addr_t dma_handle;
90 struct ism_sba *sba; 90 struct ism_sba *sba;
91 91
92 sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 92 sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
93 &dma_handle, GFP_KERNEL); 93 GFP_KERNEL);
94 if (!sba) 94 if (!sba)
95 return -ENOMEM; 95 return -ENOMEM;
96 96
@@ -116,8 +116,8 @@ static int register_ieq(struct ism_dev *ism)
116 dma_addr_t dma_handle; 116 dma_addr_t dma_handle;
117 struct ism_eq *ieq; 117 struct ism_eq *ieq;
118 118
119 ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, 119 ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
120 &dma_handle, GFP_KERNEL); 120 GFP_KERNEL);
121 if (!ieq) 121 if (!ieq)
122 return -ENOMEM; 122 return -ENOMEM;
123 123
@@ -234,10 +234,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) 234 test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
235 return -EINVAL; 235 return -EINVAL;
236 236
237 dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, 237 dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
238 &dmb->dma_addr, GFP_KERNEL | 238 &dmb->dma_addr,
239 __GFP_NOWARN | __GFP_NOMEMALLOC | 239 GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
240 __GFP_COMP | __GFP_NORETRY);
241 if (!dmb->cpu_addr) 240 if (!dmb->cpu_addr)
242 clear_bit(dmb->sba_idx, ism->sba_bitmap); 241 clear_bit(dmb->sba_idx, ism->sba_bitmap);
243 242
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 0ee026947f20..122059ecad84 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -22,6 +22,7 @@
22#include <linux/hashtable.h> 22#include <linux/hashtable.h>
23#include <linux/ip.h> 23#include <linux/ip.h>
24#include <linux/refcount.h> 24#include <linux/refcount.h>
25#include <linux/workqueue.h>
25 26
26#include <net/ipv6.h> 27#include <net/ipv6.h>
27#include <net/if_inet6.h> 28#include <net/if_inet6.h>
@@ -789,6 +790,7 @@ struct qeth_card {
789 struct qeth_seqno seqno; 790 struct qeth_seqno seqno;
790 struct qeth_card_options options; 791 struct qeth_card_options options;
791 792
793 struct workqueue_struct *event_wq;
792 wait_queue_head_t wait_q; 794 wait_queue_head_t wait_q;
793 spinlock_t mclock; 795 spinlock_t mclock;
794 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 796 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -962,7 +964,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[];
962extern const struct attribute_group qeth_device_attr_group; 964extern const struct attribute_group qeth_device_attr_group;
963extern const struct attribute_group qeth_device_blkt_group; 965extern const struct attribute_group qeth_device_blkt_group;
964extern const struct device_type qeth_generic_devtype; 966extern const struct device_type qeth_generic_devtype;
965extern struct workqueue_struct *qeth_wq;
966 967
967int qeth_card_hw_is_reachable(struct qeth_card *); 968int qeth_card_hw_is_reachable(struct qeth_card *);
968const char *qeth_get_cardname_short(struct qeth_card *); 969const char *qeth_get_cardname_short(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e63e03143ca7..89f912213e62 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
74static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 74static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
75static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 75static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
76 76
77struct workqueue_struct *qeth_wq; 77static struct workqueue_struct *qeth_wq;
78EXPORT_SYMBOL_GPL(qeth_wq);
79 78
80int qeth_card_hw_is_reachable(struct qeth_card *card) 79int qeth_card_hw_is_reachable(struct qeth_card *card)
81{ 80{
@@ -566,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
566 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", 565 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
567 rc, CARD_DEVID(card)); 566 rc, CARD_DEVID(card));
568 atomic_set(&channel->irq_pending, 0); 567 atomic_set(&channel->irq_pending, 0);
568 qeth_release_buffer(channel, iob);
569 card->read_or_write_problem = 1; 569 card->read_or_write_problem = 1;
570 qeth_schedule_recovery(card); 570 qeth_schedule_recovery(card);
571 wake_up(&card->wait_q); 571 wake_up(&card->wait_q);
@@ -1127,6 +1127,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1127 rc = qeth_get_problem(card, cdev, irb); 1127 rc = qeth_get_problem(card, cdev, irb);
1128 if (rc) { 1128 if (rc) {
1129 card->read_or_write_problem = 1; 1129 card->read_or_write_problem = 1;
1130 if (iob)
1131 qeth_release_buffer(iob->channel, iob);
1130 qeth_clear_ipacmd_list(card); 1132 qeth_clear_ipacmd_list(card);
1131 qeth_schedule_recovery(card); 1133 qeth_schedule_recovery(card);
1132 goto out; 1134 goto out;
@@ -1466,6 +1468,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1466 CARD_RDEV(card) = gdev->cdev[0]; 1468 CARD_RDEV(card) = gdev->cdev[0];
1467 CARD_WDEV(card) = gdev->cdev[1]; 1469 CARD_WDEV(card) = gdev->cdev[1];
1468 CARD_DDEV(card) = gdev->cdev[2]; 1470 CARD_DDEV(card) = gdev->cdev[2];
1471
1472 card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
1473 if (!card->event_wq)
1474 goto out_wq;
1469 if (qeth_setup_channel(&card->read, true)) 1475 if (qeth_setup_channel(&card->read, true))
1470 goto out_ip; 1476 goto out_ip;
1471 if (qeth_setup_channel(&card->write, true)) 1477 if (qeth_setup_channel(&card->write, true))
@@ -1481,6 +1487,8 @@ out_data:
1481out_channel: 1487out_channel:
1482 qeth_clean_channel(&card->read); 1488 qeth_clean_channel(&card->read);
1483out_ip: 1489out_ip:
1490 destroy_workqueue(card->event_wq);
1491out_wq:
1484 dev_set_drvdata(&gdev->dev, NULL); 1492 dev_set_drvdata(&gdev->dev, NULL);
1485 kfree(card); 1493 kfree(card);
1486out: 1494out:
@@ -1809,6 +1817,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
1809 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); 1817 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
1810 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 1818 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
1811 atomic_set(&channel->irq_pending, 0); 1819 atomic_set(&channel->irq_pending, 0);
1820 qeth_release_buffer(channel, iob);
1812 wake_up(&card->wait_q); 1821 wake_up(&card->wait_q);
1813 return rc; 1822 return rc;
1814 } 1823 }
@@ -1878,6 +1887,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
1878 rc); 1887 rc);
1879 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 1888 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
1880 atomic_set(&channel->irq_pending, 0); 1889 atomic_set(&channel->irq_pending, 0);
1890 qeth_release_buffer(channel, iob);
1881 wake_up(&card->wait_q); 1891 wake_up(&card->wait_q);
1882 return rc; 1892 return rc;
1883 } 1893 }
@@ -2058,6 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2058 } 2068 }
2059 reply = qeth_alloc_reply(card); 2069 reply = qeth_alloc_reply(card);
2060 if (!reply) { 2070 if (!reply) {
2071 qeth_release_buffer(channel, iob);
2061 return -ENOMEM; 2072 return -ENOMEM;
2062 } 2073 }
2063 reply->callback = reply_cb; 2074 reply->callback = reply_cb;
@@ -2389,11 +2400,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2389 return 0; 2400 return 0;
2390} 2401}
2391 2402
2392static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) 2403static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2393{ 2404{
2394 if (!q) 2405 if (!q)
2395 return; 2406 return;
2396 2407
2408 qeth_clear_outq_buffers(q, 1);
2397 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); 2409 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2398 kfree(q); 2410 kfree(q);
2399} 2411}
@@ -2467,10 +2479,8 @@ out_freeoutqbufs:
2467 card->qdio.out_qs[i]->bufs[j] = NULL; 2479 card->qdio.out_qs[i]->bufs[j] = NULL;
2468 } 2480 }
2469out_freeoutq: 2481out_freeoutq:
2470 while (i > 0) { 2482 while (i > 0)
2471 qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); 2483 qeth_free_output_queue(card->qdio.out_qs[--i]);
2472 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2473 }
2474 kfree(card->qdio.out_qs); 2484 kfree(card->qdio.out_qs);
2475 card->qdio.out_qs = NULL; 2485 card->qdio.out_qs = NULL;
2476out_freepool: 2486out_freepool:
@@ -2503,10 +2513,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
2503 qeth_free_buffer_pool(card); 2513 qeth_free_buffer_pool(card);
2504 /* free outbound qdio_qs */ 2514 /* free outbound qdio_qs */
2505 if (card->qdio.out_qs) { 2515 if (card->qdio.out_qs) {
2506 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2516 for (i = 0; i < card->qdio.no_out_queues; i++)
2507 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); 2517 qeth_free_output_queue(card->qdio.out_qs[i]);
2508 qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
2509 }
2510 kfree(card->qdio.out_qs); 2518 kfree(card->qdio.out_qs);
2511 card->qdio.out_qs = NULL; 2519 card->qdio.out_qs = NULL;
2512 } 2520 }
@@ -5028,6 +5036,7 @@ static void qeth_core_free_card(struct qeth_card *card)
5028 qeth_clean_channel(&card->read); 5036 qeth_clean_channel(&card->read);
5029 qeth_clean_channel(&card->write); 5037 qeth_clean_channel(&card->write);
5030 qeth_clean_channel(&card->data); 5038 qeth_clean_channel(&card->data);
5039 destroy_workqueue(card->event_wq);
5031 qeth_free_qdio_buffers(card); 5040 qeth_free_qdio_buffers(card);
5032 unregister_service_level(&card->qeth_service_level); 5041 unregister_service_level(&card->qeth_service_level);
5033 dev_set_drvdata(&card->gdev->dev, NULL); 5042 dev_set_drvdata(&card->gdev->dev, NULL);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f108d4b44605..a43de2f9bcac 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -369,6 +369,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
369 qeth_clear_cmd_buffers(&card->read); 369 qeth_clear_cmd_buffers(&card->read);
370 qeth_clear_cmd_buffers(&card->write); 370 qeth_clear_cmd_buffers(&card->write);
371 } 371 }
372
373 flush_workqueue(card->event_wq);
372} 374}
373 375
374static int qeth_l2_process_inbound_buffer(struct qeth_card *card, 376static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
@@ -801,6 +803,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
801 803
802 if (cgdev->state == CCWGROUP_ONLINE) 804 if (cgdev->state == CCWGROUP_ONLINE)
803 qeth_l2_set_offline(cgdev); 805 qeth_l2_set_offline(cgdev);
806
807 cancel_work_sync(&card->close_dev_work);
804 if (qeth_netdev_is_registered(card->dev)) 808 if (qeth_netdev_is_registered(card->dev))
805 unregister_netdev(card->dev); 809 unregister_netdev(card->dev);
806} 810}
@@ -1434,7 +1438,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
1434 data->card = card; 1438 data->card = card;
1435 memcpy(&data->qports, qports, 1439 memcpy(&data->qports, qports,
1436 sizeof(struct qeth_sbp_state_change) + extrasize); 1440 sizeof(struct qeth_sbp_state_change) + extrasize);
1437 queue_work(qeth_wq, &data->worker); 1441 queue_work(card->event_wq, &data->worker);
1438} 1442}
1439 1443
1440struct qeth_bridge_host_data { 1444struct qeth_bridge_host_data {
@@ -1506,7 +1510,7 @@ static void qeth_bridge_host_event(struct qeth_card *card,
1506 data->card = card; 1510 data->card = card;
1507 memcpy(&data->hostevs, hostevs, 1511 memcpy(&data->hostevs, hostevs,
1508 sizeof(struct qeth_ipacmd_addr_change) + extrasize); 1512 sizeof(struct qeth_ipacmd_addr_change) + extrasize);
1509 queue_work(qeth_wq, &data->worker); 1513 queue_work(card->event_wq, &data->worker);
1510} 1514}
1511 1515
1512/* SETBRIDGEPORT support; sending commands */ 1516/* SETBRIDGEPORT support; sending commands */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 42a7cdc59b76..df34bff4ac31 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1433,6 +1433,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
1433 qeth_clear_cmd_buffers(&card->read); 1433 qeth_clear_cmd_buffers(&card->read);
1434 qeth_clear_cmd_buffers(&card->write); 1434 qeth_clear_cmd_buffers(&card->write);
1435 } 1435 }
1436
1437 flush_workqueue(card->event_wq);
1436} 1438}
1437 1439
1438/* 1440/*
@@ -2338,6 +2340,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2338 if (cgdev->state == CCWGROUP_ONLINE) 2340 if (cgdev->state == CCWGROUP_ONLINE)
2339 qeth_l3_set_offline(cgdev); 2341 qeth_l3_set_offline(cgdev);
2340 2342
2343 cancel_work_sync(&card->close_dev_work);
2341 if (qeth_netdev_is_registered(card->dev)) 2344 if (qeth_netdev_is_registered(card->dev))
2342 unregister_netdev(card->dev); 2345 unregister_netdev(card->dev);
2343 qeth_l3_clear_ip_htable(card, 0); 2346 qeth_l3_clear_ip_htable(card, 0);
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cf30d124b9e..e390f8c6d5f3 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
403 goto failed; 403 goto failed;
404 404
405 /* report size limit per scatter-gather segment */ 405 /* report size limit per scatter-gather segment */
406 adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
407 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; 406 adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
408 407
409 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; 408 adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 00acc7144bbc..f4f6a07c5222 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) 428 .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, 429 * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
430 /* GCD, adjusted later */ 430 /* GCD, adjusted later */
431 /* report size limit per scatter-gather segment */
432 .max_segment_size = ZFCP_QDIO_SBALE_LEN,
431 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 433 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
432 .shost_attrs = zfcp_sysfs_shost_attrs, 434 .shost_attrs = zfcp_sysfs_shost_attrs,
433 .sdev_attrs = zfcp_sysfs_sdev_attrs, 435 .sdev_attrs = zfcp_sysfs_sdev_attrs,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index fc9dbad476c0..ae1d56da671d 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -635,7 +635,7 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
635{ 635{
636 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 636 struct virtio_ccw_device *vcdev = to_vc_device(vdev);
637 unsigned long *indicatorp = NULL; 637 unsigned long *indicatorp = NULL;
638 int ret, i; 638 int ret, i, queue_idx = 0;
639 struct ccw1 *ccw; 639 struct ccw1 *ccw;
640 640
641 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 641 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
@@ -643,8 +643,14 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
643 return -ENOMEM; 643 return -ENOMEM;
644 644
645 for (i = 0; i < nvqs; ++i) { 645 for (i = 0; i < nvqs; ++i) {
646 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], 646 if (!names[i]) {
647 ctx ? ctx[i] : false, ccw); 647 vqs[i] = NULL;
648 continue;
649 }
650
651 vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i],
652 names[i], ctx ? ctx[i] : false,
653 ccw);
648 if (IS_ERR(vqs[i])) { 654 if (IS_ERR(vqs[i])) {
649 ret = PTR_ERR(vqs[i]); 655 ret = PTR_ERR(vqs[i]);
650 vqs[i] = NULL; 656 vqs[i] = NULL;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index e8f5f7c63190..cd096104bcec 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -646,8 +646,9 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
646 unsigned long *cpu_addr; 646 unsigned long *cpu_addr;
647 int retval = 1; 647 int retval = 1;
648 648
649 cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev, 649 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
650 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL); 650 size * TW_Q_LENGTH, &dma_handle,
651 GFP_KERNEL);
651 if (!cpu_addr) { 652 if (!cpu_addr) {
652 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); 653 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
653 goto out; 654 goto out;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 128d658d472a..16957d7ac414 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
295 if(tpnt->sdev_attrs == NULL) 295 if(tpnt->sdev_attrs == NULL)
296 tpnt->sdev_attrs = NCR_700_dev_attrs; 296 tpnt->sdev_attrs = NCR_700_dev_attrs;
297 297
298 memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, 298 memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); 299 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
300 if(memory == NULL) { 300 if(memory == NULL) {
301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); 301 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index ff53fd0d12f2..66c514310f3c 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1123,8 +1123,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
1123 1123
1124 /* Get total memory needed for SCB */ 1124 /* Get total memory needed for SCB */
1125 sz = ORC_MAXQUEUE * sizeof(struct orc_scb); 1125 sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
1126 host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys, 1126 host->scb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->scb_phys,
1127 GFP_KERNEL); 1127 GFP_KERNEL);
1128 if (!host->scb_virt) { 1128 if (!host->scb_virt) {
1129 printk("inia100: SCB memory allocation error\n"); 1129 printk("inia100: SCB memory allocation error\n");
1130 goto out_host_put; 1130 goto out_host_put;
@@ -1132,8 +1132,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
1132 1132
1133 /* Get total memory needed for ESCB */ 1133 /* Get total memory needed for ESCB */
1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb); 1134 sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
1135 host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys, 1135 host->escb_virt = dma_alloc_coherent(&pdev->dev, sz, &host->escb_phys,
1136 GFP_KERNEL); 1136 GFP_KERNEL);
1137 if (!host->escb_virt) { 1137 if (!host->escb_virt) {
1138 printk("inia100: ESCB memory allocation error\n"); 1138 printk("inia100: ESCB memory allocation error\n");
1139 goto out_free_scb_array; 1139 goto out_free_scb_array;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 634ddb90e7aa..7e56a11836c1 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1747,11 +1747,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1747 shost->max_sectors = (shost->sg_tablesize * 8) + 112; 1747 shost->max_sectors = (shost->sg_tablesize * 8) + 112;
1748 } 1748 }
1749 1749
1750 error = dma_set_max_seg_size(&pdev->dev, 1750 if (aac->adapter_info.options & AAC_OPT_NEW_COMM)
1751 (aac->adapter_info.options & AAC_OPT_NEW_COMM) ? 1751 shost->max_segment_size = shost->max_sectors << 9;
1752 (shost->max_sectors << 9) : 65536); 1752 else
1753 if (error) 1753 shost->max_segment_size = 65536;
1754 goto out_deinit;
1755 1754
1756 /* 1755 /*
1757 * Firmware printf works only with older firmware. 1756 * Firmware printf works only with older firmware.
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f83f79b07b50..07efcb9b5b94 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
280 return snprintf(buf, PAGE_SIZE, "%s\n", 280 return snprintf(buf, PAGE_SIZE, "%s\n",
281 asd_dev_rev[asd_ha->revision_id]); 281 asd_dev_rev[asd_ha->revision_id]);
282} 282}
283static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); 283static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
284 284
285static ssize_t asd_show_dev_bios_build(struct device *dev, 285static ssize_t asd_show_dev_bios_build(struct device *dev,
286 struct device_attribute *attr,char *buf) 286 struct device_attribute *attr,char *buf)
@@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
477{ 477{
478 int err; 478 int err;
479 479
480 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); 480 err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
481 if (err) 481 if (err)
482 return err; 482 return err;
483 483
@@ -499,13 +499,13 @@ err_update_bios:
499err_biosb: 499err_biosb:
500 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 500 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
501err_rev: 501err_rev:
502 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); 502 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
503 return err; 503 return err;
504} 504}
505 505
506static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) 506static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
507{ 507{
508 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); 508 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
509 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); 509 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
510 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); 510 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
511 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); 511 device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 0f6751b0a633..57c6fa388bf6 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -587,8 +587,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
587 case ACB_ADAPTER_TYPE_B: { 587 case ACB_ADAPTER_TYPE_B: {
588 struct MessageUnit_B *reg; 588 struct MessageUnit_B *reg;
589 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); 589 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
590 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 590 dma_coherent = dma_alloc_coherent(&pdev->dev,
591 &dma_coherent_handle, GFP_KERNEL); 591 acb->roundup_ccbsize,
592 &dma_coherent_handle,
593 GFP_KERNEL);
592 if (!dma_coherent) { 594 if (!dma_coherent) {
593 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 595 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
594 return false; 596 return false;
@@ -617,8 +619,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
617 struct MessageUnit_D *reg; 619 struct MessageUnit_D *reg;
618 620
619 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); 621 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
620 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 622 dma_coherent = dma_alloc_coherent(&pdev->dev,
621 &dma_coherent_handle, GFP_KERNEL); 623 acb->roundup_ccbsize,
624 &dma_coherent_handle,
625 GFP_KERNEL);
622 if (!dma_coherent) { 626 if (!dma_coherent) {
623 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 627 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
624 return false; 628 return false;
@@ -659,8 +663,10 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
659 uint32_t completeQ_size; 663 uint32_t completeQ_size;
660 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; 664 completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128;
661 acb->roundup_ccbsize = roundup(completeQ_size, 32); 665 acb->roundup_ccbsize = roundup(completeQ_size, 32);
662 dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize, 666 dma_coherent = dma_alloc_coherent(&pdev->dev,
663 &dma_coherent_handle, GFP_KERNEL); 667 acb->roundup_ccbsize,
668 &dma_coherent_handle,
669 GFP_KERNEL);
664 if (!dma_coherent){ 670 if (!dma_coherent){
665 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); 671 pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
666 return false; 672 return false;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 39f3820572b4..74e260027c7d 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3321,8 +3321,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3321 q->len = len; 3321 q->len = len;
3322 q->entry_size = entry_size; 3322 q->entry_size = entry_size;
3323 mem->size = len * entry_size; 3323 mem->size = len * entry_size;
3324 mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3324 mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
3325 GFP_KERNEL); 3325 GFP_KERNEL);
3326 if (!mem->va) 3326 if (!mem->va)
3327 return -ENOMEM; 3327 return -ENOMEM;
3328 return 0; 3328 return 0;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index ca7b7bbc8371..d4febaadfaa3 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -293,8 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
293 struct be_dma_mem *cmd, 293 struct be_dma_mem *cmd,
294 u8 subsystem, u8 opcode, u32 size) 294 u8 subsystem, u8 opcode, u32 size)
295{ 295{
296 cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma, 296 cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
297 GFP_KERNEL); 297 GFP_KERNEL);
298 if (!cmd->va) { 298 if (!cmd->va) {
299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
300 "BG_%d : Failed to allocate memory for if info\n"); 300 "BG_%d : Failed to allocate memory for if info\n");
@@ -1510,10 +1510,9 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
1510 return -EINVAL; 1510 return -EINVAL;
1511 1511
1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params); 1512 nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
1513 nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, 1513 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
1514 nonemb_cmd.size, 1514 nonemb_cmd.size, &nonemb_cmd.dma,
1515 &nonemb_cmd.dma, 1515 GFP_KERNEL);
1516 GFP_KERNEL);
1517 if (!nonemb_cmd.va) { 1516 if (!nonemb_cmd.va) {
1518 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 1517 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
1519 "BM_%d : invldt_cmds_params alloc failed\n"); 1518 "BM_%d : invldt_cmds_params alloc failed\n");
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 5d163ca1b366..d8e6d7480f35 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3264,9 +3264,9 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
3264 /* Allocate dma coherent memory */ 3264 /* Allocate dma coherent memory */
3265 buf_info = buf_base; 3265 buf_info = buf_base;
3266 buf_info->size = payload_len; 3266 buf_info->size = payload_len;
3267 buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev, 3267 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev,
3268 buf_info->size, &buf_info->phys, 3268 buf_info->size, &buf_info->phys,
3269 GFP_KERNEL); 3269 GFP_KERNEL);
3270 if (!buf_info->virt) 3270 if (!buf_info->virt)
3271 goto out_free_mem; 3271 goto out_free_mem;
3272 3272
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index e8ae4d671d23..039328d9ef13 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1857,10 +1857,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1857 * entries. Hence the limit with one page is 8192 task context 1857 * entries. Hence the limit with one page is 8192 task context
1858 * entries. 1858 * entries.
1859 */ 1859 */
1860 hba->task_ctx_bd_tbl = dma_zalloc_coherent(&hba->pcidev->dev, 1860 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1861 PAGE_SIZE, 1861 PAGE_SIZE,
1862 &hba->task_ctx_bd_dma, 1862 &hba->task_ctx_bd_dma,
1863 GFP_KERNEL); 1863 GFP_KERNEL);
1864 if (!hba->task_ctx_bd_tbl) { 1864 if (!hba->task_ctx_bd_tbl) {
1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n"); 1865 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1866 rc = -1; 1866 rc = -1;
@@ -1894,10 +1894,10 @@ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1894 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1895 for (i = 0; i < task_ctx_arr_sz; i++) { 1895 for (i = 0; i < task_ctx_arr_sz; i++) {
1896 1896
1897 hba->task_ctx[i] = dma_zalloc_coherent(&hba->pcidev->dev, 1897 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1898 PAGE_SIZE, 1898 PAGE_SIZE,
1899 &hba->task_ctx_dma[i], 1899 &hba->task_ctx_dma[i],
1900 GFP_KERNEL); 1900 GFP_KERNEL);
1901 if (!hba->task_ctx[i]) { 1901 if (!hba->task_ctx[i]) {
1902 printk(KERN_ERR PFX "unable to alloc task context\n"); 1902 printk(KERN_ERR PFX "unable to alloc task context\n");
1903 rc = -1; 1903 rc = -1;
@@ -2031,19 +2031,19 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
2031 } 2031 }
2032 2032
2033 for (i = 0; i < segment_count; ++i) { 2033 for (i = 0; i < segment_count; ++i) {
2034 hba->hash_tbl_segments[i] = dma_zalloc_coherent(&hba->pcidev->dev, 2034 hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev,
2035 BNX2FC_HASH_TBL_CHUNK_SIZE, 2035 BNX2FC_HASH_TBL_CHUNK_SIZE,
2036 &dma_segment_array[i], 2036 &dma_segment_array[i],
2037 GFP_KERNEL); 2037 GFP_KERNEL);
2038 if (!hba->hash_tbl_segments[i]) { 2038 if (!hba->hash_tbl_segments[i]) {
2039 printk(KERN_ERR PFX "hash segment alloc failed\n"); 2039 printk(KERN_ERR PFX "hash segment alloc failed\n");
2040 goto cleanup_dma; 2040 goto cleanup_dma;
2041 } 2041 }
2042 } 2042 }
2043 2043
2044 hba->hash_tbl_pbl = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2044 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2045 &hba->hash_tbl_pbl_dma, 2045 &hba->hash_tbl_pbl_dma,
2046 GFP_KERNEL); 2046 GFP_KERNEL);
2047 if (!hba->hash_tbl_pbl) { 2047 if (!hba->hash_tbl_pbl) {
2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 2048 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
2049 goto cleanup_dma; 2049 goto cleanup_dma;
@@ -2104,10 +2104,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2104 return -ENOMEM; 2104 return -ENOMEM;
2105 2105
2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 2106 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
2107 hba->t2_hash_tbl_ptr = dma_zalloc_coherent(&hba->pcidev->dev, 2107 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2108 mem_size, 2108 &hba->t2_hash_tbl_ptr_dma,
2109 &hba->t2_hash_tbl_ptr_dma, 2109 GFP_KERNEL);
2110 GFP_KERNEL);
2111 if (!hba->t2_hash_tbl_ptr) { 2110 if (!hba->t2_hash_tbl_ptr) {
2112 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); 2111 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
2113 bnx2fc_free_fw_resc(hba); 2112 bnx2fc_free_fw_resc(hba);
@@ -2116,9 +2115,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2116 2115
2117 mem_size = BNX2FC_NUM_MAX_SESS * 2116 mem_size = BNX2FC_NUM_MAX_SESS *
2118 sizeof(struct fcoe_t2_hash_table_entry); 2117 sizeof(struct fcoe_t2_hash_table_entry);
2119 hba->t2_hash_tbl = dma_zalloc_coherent(&hba->pcidev->dev, mem_size, 2118 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
2120 &hba->t2_hash_tbl_dma, 2119 &hba->t2_hash_tbl_dma,
2121 GFP_KERNEL); 2120 GFP_KERNEL);
2122 if (!hba->t2_hash_tbl) { 2121 if (!hba->t2_hash_tbl) {
2123 printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); 2122 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
2124 bnx2fc_free_fw_resc(hba); 2123 bnx2fc_free_fw_resc(hba);
@@ -2140,9 +2139,9 @@ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
2140 return -ENOMEM; 2139 return -ENOMEM;
2141 } 2140 }
2142 2141
2143 hba->stats_buffer = dma_zalloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 2142 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
2144 &hba->stats_buf_dma, 2143 &hba->stats_buf_dma,
2145 GFP_KERNEL); 2144 GFP_KERNEL);
2146 if (!hba->stats_buffer) { 2145 if (!hba->stats_buffer) {
2147 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); 2146 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
2148 bnx2fc_free_fw_resc(hba); 2147 bnx2fc_free_fw_resc(hba);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c13a5b..bc9f2a2365f4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
240 return NULL; 240 return NULL;
241 } 241 }
242 242
243 cmgr->hba = hba;
243 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), 244 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
244 GFP_KERNEL); 245 GFP_KERNEL);
245 if (!cmgr->free_list) { 246 if (!cmgr->free_list) {
@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
256 goto mem_err; 257 goto mem_err;
257 } 258 }
258 259
259 cmgr->hba = hba;
260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 260 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
261 261
262 for (i = 0; i < arr_sz; i++) { 262 for (i = 0; i < arr_sz; i++) {
@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
295 295
296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 296 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
297 mem_size = num_ios * sizeof(struct io_bdt *); 297 mem_size = num_ios * sizeof(struct io_bdt *);
298 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 298 cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
299 if (!cmgr->io_bdt_pool) { 299 if (!cmgr->io_bdt_pool) {
300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 300 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
301 goto mem_err; 301 goto mem_err;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index e3d1c7c440c8..d735e87e416a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -672,8 +672,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & 672 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
673 CNIC_PAGE_MASK; 673 CNIC_PAGE_MASK;
674 674
675 tgt->sq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 675 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
676 &tgt->sq_dma, GFP_KERNEL); 676 &tgt->sq_dma, GFP_KERNEL);
677 if (!tgt->sq) { 677 if (!tgt->sq) {
678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", 678 printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
679 tgt->sq_mem_size); 679 tgt->sq_mem_size);
@@ -685,8 +685,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & 685 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
686 CNIC_PAGE_MASK; 686 CNIC_PAGE_MASK;
687 687
688 tgt->cq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 688 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
689 &tgt->cq_dma, GFP_KERNEL); 689 &tgt->cq_dma, GFP_KERNEL);
690 if (!tgt->cq) { 690 if (!tgt->cq) {
691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", 691 printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
692 tgt->cq_mem_size); 692 tgt->cq_mem_size);
@@ -698,8 +698,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & 698 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
699 CNIC_PAGE_MASK; 699 CNIC_PAGE_MASK;
700 700
701 tgt->rq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 701 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
702 &tgt->rq_dma, GFP_KERNEL); 702 &tgt->rq_dma, GFP_KERNEL);
703 if (!tgt->rq) { 703 if (!tgt->rq) {
704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", 704 printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
705 tgt->rq_mem_size); 705 tgt->rq_mem_size);
@@ -710,8 +710,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & 710 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
711 CNIC_PAGE_MASK; 711 CNIC_PAGE_MASK;
712 712
713 tgt->rq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 713 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
714 &tgt->rq_pbl_dma, GFP_KERNEL); 714 &tgt->rq_pbl_dma, GFP_KERNEL);
715 if (!tgt->rq_pbl) { 715 if (!tgt->rq_pbl) {
716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", 716 printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
717 tgt->rq_pbl_size); 717 tgt->rq_pbl_size);
@@ -735,9 +735,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & 735 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
736 CNIC_PAGE_MASK; 736 CNIC_PAGE_MASK;
737 737
738 tgt->xferq = dma_zalloc_coherent(&hba->pcidev->dev, 738 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
739 tgt->xferq_mem_size, &tgt->xferq_dma, 739 tgt->xferq_mem_size, &tgt->xferq_dma,
740 GFP_KERNEL); 740 GFP_KERNEL);
741 if (!tgt->xferq) { 741 if (!tgt->xferq) {
742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", 742 printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
743 tgt->xferq_mem_size); 743 tgt->xferq_mem_size);
@@ -749,9 +749,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & 749 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
750 CNIC_PAGE_MASK; 750 CNIC_PAGE_MASK;
751 751
752 tgt->confq = dma_zalloc_coherent(&hba->pcidev->dev, 752 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
753 tgt->confq_mem_size, &tgt->confq_dma, 753 tgt->confq_mem_size, &tgt->confq_dma,
754 GFP_KERNEL); 754 GFP_KERNEL);
755 if (!tgt->confq) { 755 if (!tgt->confq) {
756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", 756 printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
757 tgt->confq_mem_size); 757 tgt->confq_mem_size);
@@ -763,9 +763,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
763 tgt->confq_pbl_size = 763 tgt->confq_pbl_size =
764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; 764 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
765 765
766 tgt->confq_pbl = dma_zalloc_coherent(&hba->pcidev->dev, 766 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
767 tgt->confq_pbl_size, 767 tgt->confq_pbl_size,
768 &tgt->confq_pbl_dma, GFP_KERNEL); 768 &tgt->confq_pbl_dma, GFP_KERNEL);
769 if (!tgt->confq_pbl) { 769 if (!tgt->confq_pbl) {
770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", 770 printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
771 tgt->confq_pbl_size); 771 tgt->confq_pbl_size);
@@ -787,9 +787,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
787 /* Allocate and map ConnDB */ 787 /* Allocate and map ConnDB */
788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); 788 tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
789 789
790 tgt->conn_db = dma_zalloc_coherent(&hba->pcidev->dev, 790 tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
791 tgt->conn_db_mem_size, 791 tgt->conn_db_mem_size,
792 &tgt->conn_db_dma, GFP_KERNEL); 792 &tgt->conn_db_dma, GFP_KERNEL);
793 if (!tgt->conn_db) { 793 if (!tgt->conn_db) {
794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n", 794 printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
795 tgt->conn_db_mem_size); 795 tgt->conn_db_mem_size);
@@ -802,8 +802,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & 802 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
803 CNIC_PAGE_MASK; 803 CNIC_PAGE_MASK;
804 804
805 tgt->lcq = dma_zalloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 805 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
806 &tgt->lcq_dma, GFP_KERNEL); 806 &tgt->lcq_dma, GFP_KERNEL);
807 807
808 if (!tgt->lcq) { 808 if (!tgt->lcq) {
809 printk(KERN_ERR PFX "unable to allocate lcq %d\n", 809 printk(KERN_ERR PFX "unable to allocate lcq %d\n",
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91f5316aa3ab..fae6f71e677d 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1070,8 +1070,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1070 1070
1071 /* Allocate memory area for actual SQ element */ 1071 /* Allocate memory area for actual SQ element */
1072 ep->qp.sq_virt = 1072 ep->qp.sq_virt =
1073 dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, 1073 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
1074 &ep->qp.sq_phys, GFP_KERNEL); 1074 &ep->qp.sq_phys, GFP_KERNEL);
1075 if (!ep->qp.sq_virt) { 1075 if (!ep->qp.sq_virt) {
1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", 1076 printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
1077 ep->qp.sq_mem_size); 1077 ep->qp.sq_mem_size);
@@ -1106,8 +1106,8 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1106 1106
1107 /* Allocate memory area for actual CQ element */ 1107 /* Allocate memory area for actual CQ element */
1108 ep->qp.cq_virt = 1108 ep->qp.cq_virt =
1109 dma_zalloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, 1109 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
1110 &ep->qp.cq_phys, GFP_KERNEL); 1110 &ep->qp.cq_phys, GFP_KERNEL);
1111 if (!ep->qp.cq_virt) { 1111 if (!ep->qp.cq_virt) {
1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", 1112 printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
1113 ep->qp.cq_mem_size); 1113 ep->qp.cq_mem_size);
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 8a004036e3d7..9bd2bd8dc2be 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
594 } 594 }
595 595
596 fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING); 596 fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
597 ln->fc_vport = fc_vport;
597 598
598 if (csio_fcoe_alloc_vnp(hw, ln)) 599 if (csio_fcoe_alloc_vnp(hw, ln))
599 goto error; 600 goto error;
600 601
601 *(struct csio_lnode **)fc_vport->dd_data = ln; 602 *(struct csio_lnode **)fc_vport->dd_data = ln;
602 ln->fc_vport = fc_vport;
603 if (!fc_vport->node_name) 603 if (!fc_vport->node_name)
604 fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln)); 604 fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
605 if (!fc_vport->port_name) 605 if (!fc_vport->port_name)
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index dc12933533d5..66bbd21819ae 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -233,8 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
233 233
234 q = wrm->q_arr[free_idx]; 234 q = wrm->q_arr[free_idx];
235 235
236 q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart, 236 q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
237 GFP_KERNEL); 237 GFP_KERNEL);
238 if (!q->vstart) { 238 if (!q->vstart) {
239 csio_err(hw, 239 csio_err(hw,
240 "Failed to allocate DMA memory for " 240 "Failed to allocate DMA memory for "
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 8a20411699d9..75e1273a44b3 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
1144} 1144}
1145 1145
1146static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, 1146static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1147 unsigned int tid, int pg_idx, bool reply) 1147 unsigned int tid, int pg_idx)
1148{ 1148{
1149 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, 1149 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1150 GFP_KERNEL); 1150 GFP_KERNEL);
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1160 req = (struct cpl_set_tcb_field *)skb->head; 1160 req = (struct cpl_set_tcb_field *)skb->head;
1161 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1161 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1162 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1162 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1163 req->reply = V_NO_REPLY(reply ? 0 : 1); 1163 req->reply = V_NO_REPLY(1);
1164 req->cpu_idx = 0; 1164 req->cpu_idx = 0;
1165 req->word = htons(31); 1165 req->word = htons(31);
1166 req->mask = cpu_to_be64(0xF0000000); 1166 req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
1177 * @tid: connection id 1177 * @tid: connection id
1178 * @hcrc: header digest enabled 1178 * @hcrc: header digest enabled
1179 * @dcrc: data digest enabled 1179 * @dcrc: data digest enabled
1180 * @reply: request reply from h/w
1181 * set up the iscsi digest settings for a connection identified by tid 1180 * set up the iscsi digest settings for a connection identified by tid
1182 */ 1181 */
1183static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 1182static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1184 int hcrc, int dcrc, int reply) 1183 int hcrc, int dcrc)
1185{ 1184{
1186 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0, 1185 struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
1187 GFP_KERNEL); 1186 GFP_KERNEL);
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1197 req = (struct cpl_set_tcb_field *)skb->head; 1196 req = (struct cpl_set_tcb_field *)skb->head;
1198 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); 1197 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1199 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 1198 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1200 req->reply = V_NO_REPLY(reply ? 0 : 1); 1199 req->reply = V_NO_REPLY(1);
1201 req->cpu_idx = 0; 1200 req->cpu_idx = 0;
1202 req->word = htons(31); 1201 req->word = htons(31);
1203 req->mask = cpu_to_be64(0x0F000000); 1202 req->mask = cpu_to_be64(0x0F000000);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 49f8028ac524..d26f50af00ea 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1548,16 +1548,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1548 struct cxgbi_sock *csk; 1548 struct cxgbi_sock *csk;
1549 1549
1550 csk = lookup_tid(t, tid); 1550 csk = lookup_tid(t, tid);
1551 if (!csk) 1551 if (!csk) {
1552 pr_err("can't find conn. for tid %u.\n", tid); 1552 pr_err("can't find conn. for tid %u.\n", tid);
1553 return;
1554 }
1553 1555
1554 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 1556 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1555 "csk 0x%p,%u,%lx,%u, status 0x%x.\n", 1557 "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1556 csk, csk->state, csk->flags, csk->tid, rpl->status); 1558 csk, csk->state, csk->flags, csk->tid, rpl->status);
1557 1559
1558 if (rpl->status != CPL_ERR_NONE) 1560 if (rpl->status != CPL_ERR_NONE) {
1559 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n", 1561 pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1560 csk, tid, rpl->status); 1562 csk, tid, rpl->status);
1563 csk->err = -EINVAL;
1564 }
1565
1566 complete(&csk->cmpl);
1561 1567
1562 __kfree_skb(skb); 1568 __kfree_skb(skb);
1563} 1569}
@@ -1983,7 +1989,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1983} 1989}
1984 1990
1985static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, 1991static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1986 int pg_idx, bool reply) 1992 int pg_idx)
1987{ 1993{
1988 struct sk_buff *skb; 1994 struct sk_buff *skb;
1989 struct cpl_set_tcb_field *req; 1995 struct cpl_set_tcb_field *req;
@@ -1999,7 +2005,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1999 req = (struct cpl_set_tcb_field *)skb->head; 2005 req = (struct cpl_set_tcb_field *)skb->head;
2000 INIT_TP_WR(req, csk->tid); 2006 INIT_TP_WR(req, csk->tid);
2001 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); 2007 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2002 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2008 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2003 req->word_cookie = htons(0); 2009 req->word_cookie = htons(0);
2004 req->mask = cpu_to_be64(0x3 << 8); 2010 req->mask = cpu_to_be64(0x3 << 8);
2005 req->val = cpu_to_be64(pg_idx << 8); 2011 req->val = cpu_to_be64(pg_idx << 8);
@@ -2008,12 +2014,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
2008 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2014 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2009 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); 2015 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2010 2016
2017 reinit_completion(&csk->cmpl);
2011 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2018 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2012 return 0; 2019 wait_for_completion(&csk->cmpl);
2020
2021 return csk->err;
2013} 2022}
2014 2023
2015static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, 2024static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2016 int hcrc, int dcrc, int reply) 2025 int hcrc, int dcrc)
2017{ 2026{
2018 struct sk_buff *skb; 2027 struct sk_buff *skb;
2019 struct cpl_set_tcb_field *req; 2028 struct cpl_set_tcb_field *req;
@@ -2031,7 +2040,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2031 req = (struct cpl_set_tcb_field *)skb->head; 2040 req = (struct cpl_set_tcb_field *)skb->head;
2032 INIT_TP_WR(req, tid); 2041 INIT_TP_WR(req, tid);
2033 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); 2042 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2034 req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid)); 2043 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2035 req->word_cookie = htons(0); 2044 req->word_cookie = htons(0);
2036 req->mask = cpu_to_be64(0x3 << 4); 2045 req->mask = cpu_to_be64(0x3 << 4);
2037 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) | 2046 req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -2041,8 +2050,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2041 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, 2050 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2042 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); 2051 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2043 2052
2053 reinit_completion(&csk->cmpl);
2044 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); 2054 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2045 return 0; 2055 wait_for_completion(&csk->cmpl);
2056
2057 return csk->err;
2046} 2058}
2047 2059
2048static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev) 2060static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 75f876409fb9..245742557c03 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
573 skb_queue_head_init(&csk->receive_queue); 573 skb_queue_head_init(&csk->receive_queue);
574 skb_queue_head_init(&csk->write_queue); 574 skb_queue_head_init(&csk->write_queue);
575 timer_setup(&csk->retry_timer, NULL, 0); 575 timer_setup(&csk->retry_timer, NULL, 0);
576 init_completion(&csk->cmpl);
576 rwlock_init(&csk->callback_lock); 577 rwlock_init(&csk->callback_lock);
577 csk->cdev = cdev; 578 csk->cdev = cdev;
578 csk->flags = 0; 579 csk->flags = 0;
@@ -2251,14 +2252,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2251 if (!err && conn->hdrdgst_en) 2252 if (!err && conn->hdrdgst_en)
2252 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2253 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2253 conn->hdrdgst_en, 2254 conn->hdrdgst_en,
2254 conn->datadgst_en, 0); 2255 conn->datadgst_en);
2255 break; 2256 break;
2256 case ISCSI_PARAM_DATADGST_EN: 2257 case ISCSI_PARAM_DATADGST_EN:
2257 err = iscsi_set_param(cls_conn, param, buf, buflen); 2258 err = iscsi_set_param(cls_conn, param, buf, buflen);
2258 if (!err && conn->datadgst_en) 2259 if (!err && conn->datadgst_en)
2259 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, 2260 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2260 conn->hdrdgst_en, 2261 conn->hdrdgst_en,
2261 conn->datadgst_en, 0); 2262 conn->datadgst_en);
2262 break; 2263 break;
2263 case ISCSI_PARAM_MAX_R2T: 2264 case ISCSI_PARAM_MAX_R2T:
2264 return iscsi_tcp_set_max_r2t(conn, buf); 2265 return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2384,7 +2385,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2384 2385
2385 ppm = csk->cdev->cdev2ppm(csk->cdev); 2386 ppm = csk->cdev->cdev2ppm(csk->cdev);
2386 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, 2387 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
2387 ppm->tformat.pgsz_idx_dflt, 0); 2388 ppm->tformat.pgsz_idx_dflt);
2388 if (err < 0) 2389 if (err < 0)
2389 return err; 2390 return err;
2390 2391
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 5d5d8b50d842..1917ff57651d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -149,6 +149,7 @@ struct cxgbi_sock {
149 struct sk_buff_head receive_queue; 149 struct sk_buff_head receive_queue;
150 struct sk_buff_head write_queue; 150 struct sk_buff_head write_queue;
151 struct timer_list retry_timer; 151 struct timer_list retry_timer;
152 struct completion cmpl;
152 int err; 153 int err;
153 rwlock_t callback_lock; 154 rwlock_t callback_lock;
154 void *user_data; 155 void *user_data;
@@ -490,9 +491,9 @@ struct cxgbi_device {
490 struct cxgbi_ppm *, 491 struct cxgbi_ppm *,
491 struct cxgbi_task_tag_info *); 492 struct cxgbi_task_tag_info *);
492 int (*csk_ddp_setup_digest)(struct cxgbi_sock *, 493 int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
493 unsigned int, int, int, int); 494 unsigned int, int, int);
494 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *, 495 int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
495 unsigned int, int, bool); 496 unsigned int, int);
496 497
497 void (*csk_release_offload_resources)(struct cxgbi_sock *); 498 void (*csk_release_offload_resources)(struct cxgbi_sock *);
498 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *); 499 int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index bfa13e3b191c..c8bad2c093b8 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
3687 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 3687 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3688 3688
3689 cfg = shost_priv(host); 3689 cfg = shost_priv(host);
3690 cfg->state = STATE_PROBING;
3690 cfg->host = host; 3691 cfg->host = host;
3691 rc = alloc_mem(cfg); 3692 rc = alloc_mem(cfg);
3692 if (rc) { 3693 if (rc) {
@@ -3775,6 +3776,7 @@ out:
3775 return rc; 3776 return rc;
3776 3777
3777out_remove: 3778out_remove:
3779 cfg->state = STATE_PROBED;
3778 cxlflash_remove(pdev); 3780 cxlflash_remove(pdev);
3779 goto out; 3781 goto out;
3780} 3782}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index e2420a810e99..c92b3822c408 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2507,6 +2507,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2507 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2507 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
2508 } 2508 }
2509 2509
2510 if (hisi_hba->prot_mask) {
2511 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
2512 prot_mask);
2513 scsi_host_set_prot(hisi_hba->shost, prot_mask);
2514 }
2515
2510 rc = scsi_add_host(shost, dev); 2516 rc = scsi_add_host(shost, dev);
2511 if (rc) 2517 if (rc)
2512 goto err_out_ha; 2518 goto err_out_ha;
@@ -2519,12 +2525,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2519 if (rc) 2525 if (rc)
2520 goto err_out_register_ha; 2526 goto err_out_register_ha;
2521 2527
2522 if (hisi_hba->prot_mask) {
2523 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
2524 prot_mask);
2525 scsi_host_set_prot(hisi_hba->shost, prot_mask);
2526 }
2527
2528 scsi_scan_host(shost); 2528 scsi_scan_host(shost);
2529 2529
2530 return 0; 2530 return 0;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 68b90c4f79a3..1727d0c71b12 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -576,6 +576,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
576 shost->max_lun = ~0; 576 shost->max_lun = ~0;
577 shost->max_cmd_len = MAX_COMMAND_SIZE; 577 shost->max_cmd_len = MAX_COMMAND_SIZE;
578 578
579 /* turn on DIF support */
580 scsi_host_set_prot(shost,
581 SHOST_DIF_TYPE1_PROTECTION |
582 SHOST_DIF_TYPE2_PROTECTION |
583 SHOST_DIF_TYPE3_PROTECTION);
584 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
585
579 err = scsi_add_host(shost, &pdev->dev); 586 err = scsi_add_host(shost, &pdev->dev);
580 if (err) 587 if (err)
581 goto err_shost; 588 goto err_shost;
@@ -663,13 +670,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
663 goto err_host_alloc; 670 goto err_host_alloc;
664 } 671 }
665 pci_info->hosts[i] = h; 672 pci_info->hosts[i] = h;
666
667 /* turn on DIF support */
668 scsi_host_set_prot(to_shost(h),
669 SHOST_DIF_TYPE1_PROTECTION |
670 SHOST_DIF_TYPE2_PROTECTION |
671 SHOST_DIF_TYPE3_PROTECTION);
672 scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
673 } 673 }
674 674
675 err = isci_setup_interrupts(pdev); 675 err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590ed955..ff943f477d6f 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1726 fc_frame_payload_op(fp) != ELS_LS_ACC) { 1726 fc_frame_payload_op(fp) != ELS_LS_ACC) {
1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); 1727 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
1728 fc_lport_error(lport, fp); 1728 fc_lport_error(lport, fp);
1729 goto err; 1729 goto out;
1730 } 1730 }
1731 1731
1732 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1732 flp = fc_frame_payload_get(fp, sizeof(*flp));
1733 if (!flp) { 1733 if (!flp) {
1734 FC_LPORT_DBG(lport, "FLOGI bad response\n"); 1734 FC_LPORT_DBG(lport, "FLOGI bad response\n");
1735 fc_lport_error(lport, fp); 1735 fc_lport_error(lport, fp);
1736 goto err; 1736 goto out;
1737 } 1737 }
1738 1738
1739 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1739 mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " 1743 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
1744 "lport->mfs:%hu\n", mfs, lport->mfs); 1744 "lport->mfs:%hu\n", mfs, lport->mfs);
1745 fc_lport_error(lport, fp); 1745 fc_lport_error(lport, fp);
1746 goto err; 1746 goto out;
1747 } 1747 }
1748 1748
1749 if (mfs <= lport->mfs) { 1749 if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 9192a1d9dec6..dfba4921b265 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
184 struct fc_rport_priv *rdata; 184 struct fc_rport_priv *rdata;
185 185
186 rdata = container_of(kref, struct fc_rport_priv, kref); 186 rdata = container_of(kref, struct fc_rport_priv, kref);
187 WARN_ON(!list_empty(&rdata->peers));
188 kfree_rcu(rdata, rcu); 187 kfree_rcu(rdata, rcu);
189} 188}
190EXPORT_SYMBOL(fc_rport_destroy); 189EXPORT_SYMBOL(fc_rport_destroy);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 8698af86485d..2dc564e59430 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2730,8 +2730,8 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2730 INIT_LIST_HEAD(&dmabuf->list); 2730 INIT_LIST_HEAD(&dmabuf->list);
2731 2731
2732 /* now, allocate dma buffer */ 2732 /* now, allocate dma buffer */
2733 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2733 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2734 &(dmabuf->phys), GFP_KERNEL); 2734 &(dmabuf->phys), GFP_KERNEL);
2735 2735
2736 if (!dmabuf->virt) { 2736 if (!dmabuf->virt) {
2737 kfree(dmabuf); 2737 kfree(dmabuf);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c1c36812c3d2..bede11e16349 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6973,9 +6973,9 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
6973 if (!dmabuf) 6973 if (!dmabuf)
6974 return NULL; 6974 return NULL;
6975 6975
6976 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6976 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6977 LPFC_HDR_TEMPLATE_SIZE, 6977 LPFC_HDR_TEMPLATE_SIZE,
6978 &dmabuf->phys, GFP_KERNEL); 6978 &dmabuf->phys, GFP_KERNEL);
6979 if (!dmabuf->virt) { 6979 if (!dmabuf->virt) {
6980 rpi_hdr = NULL; 6980 rpi_hdr = NULL;
6981 goto err_free_dmabuf; 6981 goto err_free_dmabuf;
@@ -7397,8 +7397,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
7397 } 7397 }
7398 7398
7399 /* Allocate memory for SLI-2 structures */ 7399 /* Allocate memory for SLI-2 structures */
7400 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE, 7400 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
7401 &phba->slim2p.phys, GFP_KERNEL); 7401 &phba->slim2p.phys, GFP_KERNEL);
7402 if (!phba->slim2p.virt) 7402 if (!phba->slim2p.virt)
7403 goto out_iounmap; 7403 goto out_iounmap;
7404 7404
@@ -7816,8 +7816,8 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
7816 * plus an alignment restriction of 16 bytes. 7816 * plus an alignment restriction of 16 bytes.
7817 */ 7817 */
7818 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 7818 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
7819 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size, 7819 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
7820 &dmabuf->phys, GFP_KERNEL); 7820 &dmabuf->phys, GFP_KERNEL);
7821 if (!dmabuf->virt) { 7821 if (!dmabuf->virt) {
7822 kfree(dmabuf); 7822 kfree(dmabuf);
7823 return -ENOMEM; 7823 return -ENOMEM;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f6a5083a621e..4d3b94317515 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1827,9 +1827,9 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1827 * page, this is used as a priori size of SLI4_PAGE_SIZE for 1827 * page, this is used as a priori size of SLI4_PAGE_SIZE for
1828 * the later DMA memory free. 1828 * the later DMA memory free.
1829 */ 1829 */
1830 viraddr = dma_zalloc_coherent(&phba->pcidev->dev, 1830 viraddr = dma_alloc_coherent(&phba->pcidev->dev,
1831 SLI4_PAGE_SIZE, &phyaddr, 1831 SLI4_PAGE_SIZE, &phyaddr,
1832 GFP_KERNEL); 1832 GFP_KERNEL);
1833 /* In case of malloc fails, proceed with whatever we have */ 1833 /* In case of malloc fails, proceed with whatever we have */
1834 if (!viraddr) 1834 if (!viraddr)
1835 break; 1835 break;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 4c66b19e6199..8c9f79042228 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
297 lport); 297 lport);
298 298
299 /* release any threads waiting for the unreg to complete */ 299 /* release any threads waiting for the unreg to complete */
300 complete(&lport->lport_unreg_done); 300 if (lport->vport->localport)
301 complete(lport->lport_unreg_cmp);
301} 302}
302 303
303/* lpfc_nvme_remoteport_delete 304/* lpfc_nvme_remoteport_delete
@@ -2545,7 +2546,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
2545 */ 2546 */
2546void 2547void
2547lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, 2548lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2548 struct lpfc_nvme_lport *lport) 2549 struct lpfc_nvme_lport *lport,
2550 struct completion *lport_unreg_cmp)
2549{ 2551{
2550#if (IS_ENABLED(CONFIG_NVME_FC)) 2552#if (IS_ENABLED(CONFIG_NVME_FC))
2551 u32 wait_tmo; 2553 u32 wait_tmo;
@@ -2557,8 +2559,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2557 */ 2559 */
2558 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); 2560 wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2559 while (true) { 2561 while (true) {
2560 ret = wait_for_completion_timeout(&lport->lport_unreg_done, 2562 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2561 wait_tmo);
2562 if (unlikely(!ret)) { 2563 if (unlikely(!ret)) {
2563 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, 2564 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2564 "6176 Lport %p Localport %p wait " 2565 "6176 Lport %p Localport %p wait "
@@ -2592,12 +2593,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2592 struct lpfc_nvme_lport *lport; 2593 struct lpfc_nvme_lport *lport;
2593 struct lpfc_nvme_ctrl_stat *cstat; 2594 struct lpfc_nvme_ctrl_stat *cstat;
2594 int ret; 2595 int ret;
2596 DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2595 2597
2596 if (vport->nvmei_support == 0) 2598 if (vport->nvmei_support == 0)
2597 return; 2599 return;
2598 2600
2599 localport = vport->localport; 2601 localport = vport->localport;
2600 vport->localport = NULL;
2601 lport = (struct lpfc_nvme_lport *)localport->private; 2602 lport = (struct lpfc_nvme_lport *)localport->private;
2602 cstat = lport->cstat; 2603 cstat = lport->cstat;
2603 2604
@@ -2608,13 +2609,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2608 /* lport's rport list is clear. Unregister 2609 /* lport's rport list is clear. Unregister
2609 * lport and release resources. 2610 * lport and release resources.
2610 */ 2611 */
2611 init_completion(&lport->lport_unreg_done); 2612 lport->lport_unreg_cmp = &lport_unreg_cmp;
2612 ret = nvme_fc_unregister_localport(localport); 2613 ret = nvme_fc_unregister_localport(localport);
2613 2614
2614 /* Wait for completion. This either blocks 2615 /* Wait for completion. This either blocks
2615 * indefinitely or succeeds 2616 * indefinitely or succeeds
2616 */ 2617 */
2617 lpfc_nvme_lport_unreg_wait(vport, lport); 2618 lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2619 vport->localport = NULL;
2618 kfree(cstat); 2620 kfree(cstat);
2619 2621
2620 /* Regardless of the unregister upcall response, clear 2622 /* Regardless of the unregister upcall response, clear
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cfd4719be25c..b234d0298994 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat {
50/* Declare nvme-based local and remote port definitions. */ 50/* Declare nvme-based local and remote port definitions. */
51struct lpfc_nvme_lport { 51struct lpfc_nvme_lport {
52 struct lpfc_vport *vport; 52 struct lpfc_vport *vport;
53 struct completion lport_unreg_done; 53 struct completion *lport_unreg_cmp;
54 /* Add stats counters here */ 54 /* Add stats counters here */
55 struct lpfc_nvme_ctrl_stat *cstat; 55 struct lpfc_nvme_ctrl_stat *cstat;
56 atomic_t fc4NvmeLsRequests; 56 atomic_t fc4NvmeLsRequests;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 6245f442d784..95fee83090eb 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1003 struct lpfc_nvmet_tgtport *tport = targetport->private; 1003 struct lpfc_nvmet_tgtport *tport = targetport->private;
1004 1004
1005 /* release any threads waiting for the unreg to complete */ 1005 /* release any threads waiting for the unreg to complete */
1006 complete(&tport->tport_unreg_done); 1006 if (tport->phba->targetport)
1007 complete(tport->tport_unreg_cmp);
1007} 1008}
1008 1009
1009static void 1010static void
@@ -1692,6 +1693,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1692 struct lpfc_nvmet_tgtport *tgtp; 1693 struct lpfc_nvmet_tgtport *tgtp;
1693 struct lpfc_queue *wq; 1694 struct lpfc_queue *wq;
1694 uint32_t qidx; 1695 uint32_t qidx;
1696 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1695 1697
1696 if (phba->nvmet_support == 0) 1698 if (phba->nvmet_support == 0)
1697 return; 1699 return;
@@ -1701,9 +1703,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1701 wq = phba->sli4_hba.nvme_wq[qidx]; 1703 wq = phba->sli4_hba.nvme_wq[qidx];
1702 lpfc_nvmet_wqfull_flush(phba, wq, NULL); 1704 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1703 } 1705 }
1704 init_completion(&tgtp->tport_unreg_done); 1706 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1705 nvmet_fc_unregister_targetport(phba->targetport); 1707 nvmet_fc_unregister_targetport(phba->targetport);
1706 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5); 1708 wait_for_completion_timeout(&tport_unreg_cmp, 5);
1707 lpfc_nvmet_cleanup_io_context(phba); 1709 lpfc_nvmet_cleanup_io_context(phba);
1708 } 1710 }
1709 phba->targetport = NULL; 1711 phba->targetport = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 1aaff63f1f41..0ec1082ce7ef 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -34,7 +34,7 @@
34/* Used for NVME Target */ 34/* Used for NVME Target */
35struct lpfc_nvmet_tgtport { 35struct lpfc_nvmet_tgtport {
36 struct lpfc_hba *phba; 36 struct lpfc_hba *phba;
37 struct completion tport_unreg_done; 37 struct completion *tport_unreg_cmp;
38 38
39 /* Stats counters - lpfc_nvmet_unsol_ls_buffer */ 39 /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
40 atomic_t rcv_ls_req_in; 40 atomic_t rcv_ls_req_in;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 30734caf77e1..2242e9b3ca12 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5362,8 +5362,8 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5362 * mailbox command. 5362 * mailbox command.
5363 */ 5363 */
5364 dma_size = *vpd_size; 5364 dma_size = *vpd_size;
5365 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5365 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5366 &dmabuf->phys, GFP_KERNEL); 5366 &dmabuf->phys, GFP_KERNEL);
5367 if (!dmabuf->virt) { 5367 if (!dmabuf->virt) {
5368 kfree(dmabuf); 5368 kfree(dmabuf);
5369 return -ENOMEM; 5369 return -ENOMEM;
@@ -6300,10 +6300,9 @@ lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6300 goto free_mem; 6300 goto free_mem;
6301 } 6301 }
6302 6302
6303 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 6303 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6304 LPFC_RAS_MAX_ENTRY_SIZE, 6304 LPFC_RAS_MAX_ENTRY_SIZE,
6305 &dmabuf->phys, 6305 &dmabuf->phys, GFP_KERNEL);
6306 GFP_KERNEL);
6307 if (!dmabuf->virt) { 6306 if (!dmabuf->virt) {
6308 kfree(dmabuf); 6307 kfree(dmabuf);
6309 rc = -ENOMEM; 6308 rc = -ENOMEM;
@@ -9408,6 +9407,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9408 cmnd = CMD_XMIT_SEQUENCE64_CR; 9407 cmnd = CMD_XMIT_SEQUENCE64_CR;
9409 if (phba->link_flag & LS_LOOPBACK_MODE) 9408 if (phba->link_flag & LS_LOOPBACK_MODE)
9410 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9409 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9410 /* fall through */
9411 case CMD_XMIT_SEQUENCE64_CR: 9411 case CMD_XMIT_SEQUENCE64_CR:
9412 /* word3 iocb=io_tag32 wqe=reserved */ 9412 /* word3 iocb=io_tag32 wqe=reserved */
9413 wqe->xmit_sequence.rsvd3 = 0; 9413 wqe->xmit_sequence.rsvd3 = 0;
@@ -13529,6 +13529,7 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13529 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13529 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13530 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13531 "2537 Receive Frame Truncated!!\n"); 13531 "2537 Receive Frame Truncated!!\n");
13532 /* fall through */
13532 case FC_STATUS_RQ_SUCCESS: 13533 case FC_STATUS_RQ_SUCCESS:
13533 spin_lock_irqsave(&phba->hbalock, iflags); 13534 spin_lock_irqsave(&phba->hbalock, iflags);
13534 lpfc_sli4_rq_release(hrq, drq); 13535 lpfc_sli4_rq_release(hrq, drq);
@@ -13938,7 +13939,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13938 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13939 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13939 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13940 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13940 "6126 Receive Frame Truncated!!\n"); 13941 "6126 Receive Frame Truncated!!\n");
13941 /* Drop thru */ 13942 /* fall through */
13942 case FC_STATUS_RQ_SUCCESS: 13943 case FC_STATUS_RQ_SUCCESS:
13943 spin_lock_irqsave(&phba->hbalock, iflags); 13944 spin_lock_irqsave(&phba->hbalock, iflags);
13944 lpfc_sli4_rq_release(hrq, drq); 13945 lpfc_sli4_rq_release(hrq, drq);
@@ -14613,9 +14614,9 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14613 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 14614 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14614 if (!dmabuf) 14615 if (!dmabuf)
14615 goto out_fail; 14616 goto out_fail;
14616 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 14617 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14617 hw_page_size, &dmabuf->phys, 14618 hw_page_size, &dmabuf->phys,
14618 GFP_KERNEL); 14619 GFP_KERNEL);
14619 if (!dmabuf->virt) { 14620 if (!dmabuf->virt) {
14620 kfree(dmabuf); 14621 kfree(dmabuf);
14621 goto out_fail; 14622 goto out_fail;
@@ -14850,7 +14851,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14850 eq->entry_count); 14851 eq->entry_count);
14851 if (eq->entry_count < 256) 14852 if (eq->entry_count < 256)
14852 return -EINVAL; 14853 return -EINVAL;
14853 /* otherwise default to smallest count (drop through) */ 14854 /* fall through - otherwise default to smallest count */
14854 case 256: 14855 case 256:
14855 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14856 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14856 LPFC_EQ_CNT_256); 14857 LPFC_EQ_CNT_256);
@@ -14981,7 +14982,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14981 LPFC_CQ_CNT_WORD7); 14982 LPFC_CQ_CNT_WORD7);
14982 break; 14983 break;
14983 } 14984 }
14984 /* Fall Thru */ 14985 /* fall through */
14985 default: 14986 default:
14986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14987 "0361 Unsupported CQ count: " 14988 "0361 Unsupported CQ count: "
@@ -14992,7 +14993,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14992 status = -EINVAL; 14993 status = -EINVAL;
14993 goto out; 14994 goto out;
14994 } 14995 }
14995 /* otherwise default to smallest count (drop through) */ 14996 /* fall through - otherwise default to smallest count */
14996 case 256: 14997 case 256:
14997 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14998 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14998 LPFC_CQ_CNT_256); 14999 LPFC_CQ_CNT_256);
@@ -15152,7 +15153,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15152 LPFC_CQ_CNT_WORD7); 15153 LPFC_CQ_CNT_WORD7);
15153 break; 15154 break;
15154 } 15155 }
15155 /* Fall Thru */ 15156 /* fall through */
15156 default: 15157 default:
15157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15158 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15158 "3118 Bad CQ count. (%d)\n", 15159 "3118 Bad CQ count. (%d)\n",
@@ -15161,7 +15162,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15161 status = -EINVAL; 15162 status = -EINVAL;
15162 goto out; 15163 goto out;
15163 } 15164 }
15164 /* otherwise default to smallest (drop thru) */ 15165 /* fall through - otherwise default to smallest */
15165 case 256: 15166 case 256:
15166 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15167 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15167 &cq_set->u.request, LPFC_CQ_CNT_256); 15168 &cq_set->u.request, LPFC_CQ_CNT_256);
@@ -15433,7 +15434,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15433 status = -EINVAL; 15434 status = -EINVAL;
15434 goto out; 15435 goto out;
15435 } 15436 }
15436 /* otherwise default to smallest count (drop through) */ 15437 /* fall through - otherwise default to smallest count */
15437 case 16: 15438 case 16:
15438 bf_set(lpfc_mq_context_ring_size, 15439 bf_set(lpfc_mq_context_ring_size,
15439 &mq_create_ext->u.request.context, 15440 &mq_create_ext->u.request.context,
@@ -15852,7 +15853,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15852 status = -EINVAL; 15853 status = -EINVAL;
15853 goto out; 15854 goto out;
15854 } 15855 }
15855 /* otherwise default to smallest count (drop through) */ 15856 /* fall through - otherwise default to smallest count */
15856 case 512: 15857 case 512:
15857 bf_set(lpfc_rq_context_rqe_count, 15858 bf_set(lpfc_rq_context_rqe_count,
15858 &rq_create->u.request.context, 15859 &rq_create->u.request.context,
@@ -15989,7 +15990,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15989 status = -EINVAL; 15990 status = -EINVAL;
15990 goto out; 15991 goto out;
15991 } 15992 }
15992 /* otherwise default to smallest count (drop through) */ 15993 /* fall through - otherwise default to smallest count */
15993 case 512: 15994 case 512:
15994 bf_set(lpfc_rq_context_rqe_count, 15995 bf_set(lpfc_rq_context_rqe_count,
15995 &rq_create->u.request.context, 15996 &rq_create->u.request.context,
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index e836392b75e8..f112458023ff 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -967,9 +967,10 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
967 * Allocate the common 16-byte aligned memory for the handshake 967 * Allocate the common 16-byte aligned memory for the handshake
968 * mailbox. 968 * mailbox.
969 */ 969 */
970 raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev, 970 raid_dev->una_mbox64 = dma_alloc_coherent(&adapter->pdev->dev,
971 sizeof(mbox64_t), &raid_dev->una_mbox64_dma, 971 sizeof(mbox64_t),
972 GFP_KERNEL); 972 &raid_dev->una_mbox64_dma,
973 GFP_KERNEL);
973 974
974 if (!raid_dev->una_mbox64) { 975 if (!raid_dev->una_mbox64) {
975 con_log(CL_ANN, (KERN_WARNING 976 con_log(CL_ANN, (KERN_WARNING
@@ -995,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
995 align; 996 align;
996 997
997 // Allocate memory for commands issued internally 998 // Allocate memory for commands issued internally
998 adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE, 999 adapter->ibuf = dma_alloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
999 &adapter->ibuf_dma_h, GFP_KERNEL); 1000 &adapter->ibuf_dma_h, GFP_KERNEL);
1000 if (!adapter->ibuf) { 1001 if (!adapter->ibuf) {
1001 1002
1002 con_log(CL_ANN, (KERN_WARNING 1003 con_log(CL_ANN, (KERN_WARNING
@@ -2897,8 +2898,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
2897 * Issue an ENQUIRY3 command to find out certain adapter parameters, 2898 * Issue an ENQUIRY3 command to find out certain adapter parameters,
2898 * e.g., max channels, max commands etc. 2899 * e.g., max channels, max commands etc.
2899 */ 2900 */
2900 pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), 2901 pinfo = dma_alloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
2901 &pinfo_dma_h, GFP_KERNEL); 2902 &pinfo_dma_h, GFP_KERNEL);
2902 if (pinfo == NULL) { 2903 if (pinfo == NULL) {
2903 con_log(CL_ANN, (KERN_WARNING 2904 con_log(CL_ANN, (KERN_WARNING
2904 "megaraid: out of memory, %s %d\n", __func__, 2905 "megaraid: out of memory, %s %d\n", __func__,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f7bdd783360a..fcbff83c0097 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2273,9 +2273,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2273 sizeof(struct MR_LD_VF_AFFILIATION_111)); 2273 sizeof(struct MR_LD_VF_AFFILIATION_111));
2274 else { 2274 else {
2275 new_affiliation_111 = 2275 new_affiliation_111 =
2276 dma_zalloc_coherent(&instance->pdev->dev, 2276 dma_alloc_coherent(&instance->pdev->dev,
2277 sizeof(struct MR_LD_VF_AFFILIATION_111), 2277 sizeof(struct MR_LD_VF_AFFILIATION_111),
2278 &new_affiliation_111_h, GFP_KERNEL); 2278 &new_affiliation_111_h, GFP_KERNEL);
2279 if (!new_affiliation_111) { 2279 if (!new_affiliation_111) {
2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2280 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2281 "memory for new affiliation for scsi%d\n", 2281 "memory for new affiliation for scsi%d\n",
@@ -2380,10 +2380,9 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2380 sizeof(struct MR_LD_VF_AFFILIATION)); 2380 sizeof(struct MR_LD_VF_AFFILIATION));
2381 else { 2381 else {
2382 new_affiliation = 2382 new_affiliation =
2383 dma_zalloc_coherent(&instance->pdev->dev, 2383 dma_alloc_coherent(&instance->pdev->dev,
2384 (MAX_LOGICAL_DRIVES + 1) * 2384 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2385 sizeof(struct MR_LD_VF_AFFILIATION), 2385 &new_affiliation_h, GFP_KERNEL);
2386 &new_affiliation_h, GFP_KERNEL);
2387 if (!new_affiliation) { 2386 if (!new_affiliation) {
2388 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate " 2387 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2389 "memory for new affiliation for scsi%d\n", 2388 "memory for new affiliation for scsi%d\n",
@@ -2546,9 +2545,10 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2546 2545
2547 if (initial) { 2546 if (initial) {
2548 instance->hb_host_mem = 2547 instance->hb_host_mem =
2549 dma_zalloc_coherent(&instance->pdev->dev, 2548 dma_alloc_coherent(&instance->pdev->dev,
2550 sizeof(struct MR_CTRL_HB_HOST_MEM), 2549 sizeof(struct MR_CTRL_HB_HOST_MEM),
2551 &instance->hb_host_mem_h, GFP_KERNEL); 2550 &instance->hb_host_mem_h,
2551 GFP_KERNEL);
2552 if (!instance->hb_host_mem) { 2552 if (!instance->hb_host_mem) {
2553 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate" 2553 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2554 " memory for heartbeat host memory for scsi%d\n", 2554 " memory for heartbeat host memory for scsi%d\n",
@@ -5816,9 +5816,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
5816 } 5816 }
5817 5817
5818 dcmd = &cmd->frame->dcmd; 5818 dcmd = &cmd->frame->dcmd;
5819 el_info = dma_zalloc_coherent(&instance->pdev->dev, 5819 el_info = dma_alloc_coherent(&instance->pdev->dev,
5820 sizeof(struct megasas_evt_log_info), &el_info_h, 5820 sizeof(struct megasas_evt_log_info),
5821 GFP_KERNEL); 5821 &el_info_h, GFP_KERNEL);
5822 if (!el_info) { 5822 if (!el_info) {
5823 megasas_return_cmd(instance, cmd); 5823 megasas_return_cmd(instance, cmd);
5824 return -ENOMEM; 5824 return -ENOMEM;
@@ -6236,7 +6236,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
6236 instance->consistent_mask_64bit = true; 6236 instance->consistent_mask_64bit = true;
6237 6237
6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n", 6238 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6239 ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"), 6239 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6240 (instance->consistent_mask_64bit ? "63" : "32")); 6240 (instance->consistent_mask_64bit ? "63" : "32"));
6241 6241
6242 return 0; 6242 return 0;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 211c17c33aa0..647f48a28f85 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -175,7 +175,8 @@ megasas_clear_intr_fusion(struct megasas_instance *instance)
175 /* 175 /*
176 * Check if it is our interrupt 176 * Check if it is our interrupt
177 */ 177 */
178 status = readl(&regs->outbound_intr_status); 178 status = megasas_readl(instance,
179 &regs->outbound_intr_status);
179 180
180 if (status & 1) { 181 if (status & 1) {
181 writel(status, &regs->outbound_intr_status); 182 writel(status, &regs->outbound_intr_status);
@@ -689,8 +690,9 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
689 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * 690 array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
690 MAX_MSIX_QUEUES_FUSION; 691 MAX_MSIX_QUEUES_FUSION;
691 692
692 fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev, 693 fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
693 array_size, &fusion->rdpq_phys, GFP_KERNEL); 694 array_size, &fusion->rdpq_phys,
695 GFP_KERNEL);
694 if (!fusion->rdpq_virt) { 696 if (!fusion->rdpq_virt) {
695 dev_err(&instance->pdev->dev, 697 dev_err(&instance->pdev->dev,
696 "Failed from %s %d\n", __func__, __LINE__); 698 "Failed from %s %d\n", __func__, __LINE__);
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index f3e182eb0970..c9dc7740e9e7 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,9 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
1915 /* We use the PCI APIs for now until the generic one gets fixed 1915 /* We use the PCI APIs for now until the generic one gets fixed
1916 * enough or until we get some macio-specific versions 1916 * enough or until we get some macio-specific versions
1917 */ 1917 */
1918 dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev, 1918 dma_cmd_space = dma_alloc_coherent(&macio_get_pci_dev(mdev)->dev,
1919 ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL); 1919 ms->dma_cmd_size, &dma_cmd_bus,
1920 GFP_KERNEL);
1920 if (dma_cmd_space == NULL) { 1921 if (dma_cmd_space == NULL) {
1921 printk(KERN_ERR "mesh: can't allocate DMA table\n"); 1922 printk(KERN_ERR "mesh: can't allocate DMA table\n");
1922 goto out_unmap; 1923 goto out_unmap;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index dbe753fba486..36f64205ecfa 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,9 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
143 143
144 case RESOURCE_UNCACHED_MEMORY: 144 case RESOURCE_UNCACHED_MEMORY:
145 size = round_up(size, 8); 145 size = round_up(size, 8);
146 res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, 146 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
147 &res->bus_addr, GFP_KERNEL); 147 &res->bus_addr,
148 GFP_KERNEL);
148 if (!res->virt_addr) { 149 if (!res->virt_addr) {
149 dev_err(&mhba->pdev->dev, 150 dev_err(&mhba->pdev->dev,
150 "unable to allocate consistent mem," 151 "unable to allocate consistent mem,"
@@ -246,8 +247,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
246 if (size == 0) 247 if (size == 0)
247 return 0; 248 return 0;
248 249
249 virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr, 250 virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
250 GFP_KERNEL); 251 GFP_KERNEL);
251 if (!virt_addr) 252 if (!virt_addr)
252 return -1; 253 return -1;
253 254
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b3be49d41375..084f2fcced0a 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
116 u64 align_offset = 0; 116 u64 align_offset = 0;
117 if (align) 117 if (align)
118 align_offset = (dma_addr_t)align - 1; 118 align_offset = (dma_addr_t)align - 1;
119 mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align, 119 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
120 &mem_dma_handle, GFP_KERNEL); 120 &mem_dma_handle, GFP_KERNEL);
121 if (!mem_virt_alloc) { 121 if (!mem_virt_alloc) {
122 pm8001_printk("memory allocation error\n"); 122 pm8001_printk("memory allocation error\n");
123 return -1; 123 return -1;
@@ -657,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
657 if (dev->dev_type == SAS_SATA_DEV) { 657 if (dev->dev_type == SAS_SATA_DEV) {
658 pm8001_device->attached_phy = 658 pm8001_device->attached_phy =
659 dev->rphy->identify.phy_identifier; 659 dev->rphy->identify.phy_identifier;
660 flag = 1; /* directly sata*/ 660 flag = 1; /* directly sata */
661 } 661 }
662 } /*register this device to HBA*/ 662 } /*register this device to HBA*/
663 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); 663 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index edcaf4b0cb0b..9bbc19fc190b 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1050,16 +1050,17 @@ static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1050 sizeof(void *); 1050 sizeof(void *);
1051 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; 1051 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
1052 1052
1053 fcport->sq = dma_zalloc_coherent(&qedf->pdev->dev, 1053 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1054 fcport->sq_mem_size, &fcport->sq_dma, GFP_KERNEL); 1054 &fcport->sq_dma, GFP_KERNEL);
1055 if (!fcport->sq) { 1055 if (!fcport->sq) {
1056 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); 1056 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1057 rval = 1; 1057 rval = 1;
1058 goto out; 1058 goto out;
1059 } 1059 }
1060 1060
1061 fcport->sq_pbl = dma_zalloc_coherent(&qedf->pdev->dev, 1061 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1062 fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL); 1062 fcport->sq_pbl_size,
1063 &fcport->sq_pbl_dma, GFP_KERNEL);
1063 if (!fcport->sq_pbl) { 1064 if (!fcport->sq_pbl) {
1064 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); 1065 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1065 rval = 1; 1066 rval = 1;
@@ -2680,8 +2681,10 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2680 } 2681 }
2681 2682
2682 /* Allocate list of PBL pages */ 2683 /* Allocate list of PBL pages */
2683 qedf->bdq_pbl_list = dma_zalloc_coherent(&qedf->pdev->dev, 2684 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
2684 QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL); 2685 QEDF_PAGE_SIZE,
2686 &qedf->bdq_pbl_list_dma,
2687 GFP_KERNEL);
2685 if (!qedf->bdq_pbl_list) { 2688 if (!qedf->bdq_pbl_list) {
2686 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); 2689 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
2687 return -ENOMEM; 2690 return -ENOMEM;
@@ -2770,9 +2773,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2770 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); 2773 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
2771 2774
2772 qedf->global_queues[i]->cq = 2775 qedf->global_queues[i]->cq =
2773 dma_zalloc_coherent(&qedf->pdev->dev, 2776 dma_alloc_coherent(&qedf->pdev->dev,
2774 qedf->global_queues[i]->cq_mem_size, 2777 qedf->global_queues[i]->cq_mem_size,
2775 &qedf->global_queues[i]->cq_dma, GFP_KERNEL); 2778 &qedf->global_queues[i]->cq_dma,
2779 GFP_KERNEL);
2776 2780
2777 if (!qedf->global_queues[i]->cq) { 2781 if (!qedf->global_queues[i]->cq) {
2778 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); 2782 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
@@ -2781,9 +2785,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
2781 } 2785 }
2782 2786
2783 qedf->global_queues[i]->cq_pbl = 2787 qedf->global_queues[i]->cq_pbl =
2784 dma_zalloc_coherent(&qedf->pdev->dev, 2788 dma_alloc_coherent(&qedf->pdev->dev,
2785 qedf->global_queues[i]->cq_pbl_size, 2789 qedf->global_queues[i]->cq_pbl_size,
2786 &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL); 2790 &qedf->global_queues[i]->cq_pbl_dma,
2791 GFP_KERNEL);
2787 2792
2788 if (!qedf->global_queues[i]->cq_pbl) { 2793 if (!qedf->global_queues[i]->cq_pbl) {
2789 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); 2794 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 4da660c1c431..6d6d6013e35b 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -953,6 +953,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
953 953
954 qedi_ep = ep->dd_data; 954 qedi_ep = ep->dd_data;
955 if (qedi_ep->state == EP_STATE_IDLE || 955 if (qedi_ep->state == EP_STATE_IDLE ||
956 qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
956 qedi_ep->state == EP_STATE_OFLDCONN_FAILED) 957 qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
957 return -1; 958 return -1;
958 959
@@ -1035,6 +1036,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
1035 1036
1036 switch (qedi_ep->state) { 1037 switch (qedi_ep->state) {
1037 case EP_STATE_OFLDCONN_START: 1038 case EP_STATE_OFLDCONN_START:
1039 case EP_STATE_OFLDCONN_NONE:
1038 goto ep_release_conn; 1040 goto ep_release_conn;
1039 case EP_STATE_OFLDCONN_FAILED: 1041 case EP_STATE_OFLDCONN_FAILED:
1040 break; 1042 break;
@@ -1225,6 +1227,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
1225 1227
1226 if (!is_valid_ether_addr(&path_data->mac_addr[0])) { 1228 if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
1227 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); 1229 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
1230 qedi_ep->state = EP_STATE_OFLDCONN_NONE;
1228 ret = -EIO; 1231 ret = -EIO;
1229 goto set_path_exit; 1232 goto set_path_exit;
1230 } 1233 }
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 11260776212f..892d70d54553 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -59,6 +59,7 @@ enum {
59 EP_STATE_OFLDCONN_FAILED = 0x2000, 59 EP_STATE_OFLDCONN_FAILED = 0x2000,
60 EP_STATE_CONNECT_FAILED = 0x4000, 60 EP_STATE_CONNECT_FAILED = 0x4000,
61 EP_STATE_DISCONN_TIMEDOUT = 0x8000, 61 EP_STATE_DISCONN_TIMEDOUT = 0x8000,
62 EP_STATE_OFLDCONN_NONE = 0x10000,
62}; 63};
63 64
64struct qedi_conn; 65struct qedi_conn;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5c53409a8cea..e74a62448ba4 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1394,10 +1394,9 @@ static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1394{ 1394{
1395 struct qedi_nvm_iscsi_image nvm_image; 1395 struct qedi_nvm_iscsi_image nvm_image;
1396 1396
1397 qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev, 1397 qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
1398 sizeof(nvm_image), 1398 sizeof(nvm_image),
1399 &qedi->nvm_buf_dma, 1399 &qedi->nvm_buf_dma, GFP_KERNEL);
1400 GFP_KERNEL);
1401 if (!qedi->iscsi_image) { 1400 if (!qedi->iscsi_image) {
1402 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1401 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
1403 return -ENOMEM; 1402 return -ENOMEM;
@@ -1510,10 +1509,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
1510 } 1509 }
1511 1510
1512 /* Allocate list of PBL pages */ 1511 /* Allocate list of PBL pages */
1513 qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, 1512 qedi->bdq_pbl_list = dma_alloc_coherent(&qedi->pdev->dev,
1514 QEDI_PAGE_SIZE, 1513 QEDI_PAGE_SIZE,
1515 &qedi->bdq_pbl_list_dma, 1514 &qedi->bdq_pbl_list_dma,
1516 GFP_KERNEL); 1515 GFP_KERNEL);
1517 if (!qedi->bdq_pbl_list) { 1516 if (!qedi->bdq_pbl_list) {
1518 QEDI_ERR(&qedi->dbg_ctx, 1517 QEDI_ERR(&qedi->dbg_ctx,
1519 "Could not allocate list of PBL pages.\n"); 1518 "Could not allocate list of PBL pages.\n");
@@ -1609,10 +1608,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1609 (qedi->global_queues[i]->cq_pbl_size + 1608 (qedi->global_queues[i]->cq_pbl_size +
1610 (QEDI_PAGE_SIZE - 1)); 1609 (QEDI_PAGE_SIZE - 1));
1611 1610
1612 qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev, 1611 qedi->global_queues[i]->cq = dma_alloc_coherent(&qedi->pdev->dev,
1613 qedi->global_queues[i]->cq_mem_size, 1612 qedi->global_queues[i]->cq_mem_size,
1614 &qedi->global_queues[i]->cq_dma, 1613 &qedi->global_queues[i]->cq_dma,
1615 GFP_KERNEL); 1614 GFP_KERNEL);
1616 1615
1617 if (!qedi->global_queues[i]->cq) { 1616 if (!qedi->global_queues[i]->cq) {
1618 QEDI_WARN(&qedi->dbg_ctx, 1617 QEDI_WARN(&qedi->dbg_ctx,
@@ -1620,10 +1619,10 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1620 status = -ENOMEM; 1619 status = -ENOMEM;
1621 goto mem_alloc_failure; 1620 goto mem_alloc_failure;
1622 } 1621 }
1623 qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, 1622 qedi->global_queues[i]->cq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
1624 qedi->global_queues[i]->cq_pbl_size, 1623 qedi->global_queues[i]->cq_pbl_size,
1625 &qedi->global_queues[i]->cq_pbl_dma, 1624 &qedi->global_queues[i]->cq_pbl_dma,
1626 GFP_KERNEL); 1625 GFP_KERNEL);
1627 1626
1628 if (!qedi->global_queues[i]->cq_pbl) { 1627 if (!qedi->global_queues[i]->cq_pbl) {
1629 QEDI_WARN(&qedi->dbg_ctx, 1628 QEDI_WARN(&qedi->dbg_ctx,
@@ -1691,16 +1690,16 @@ int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
1691 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *); 1690 ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
1692 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE; 1691 ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
1693 1692
1694 ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, 1693 ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
1695 &ep->sq_dma, GFP_KERNEL); 1694 &ep->sq_dma, GFP_KERNEL);
1696 if (!ep->sq) { 1695 if (!ep->sq) {
1697 QEDI_WARN(&qedi->dbg_ctx, 1696 QEDI_WARN(&qedi->dbg_ctx,
1698 "Could not allocate send queue.\n"); 1697 "Could not allocate send queue.\n");
1699 rval = -ENOMEM; 1698 rval = -ENOMEM;
1700 goto out; 1699 goto out;
1701 } 1700 }
1702 ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size, 1701 ep->sq_pbl = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
1703 &ep->sq_pbl_dma, GFP_KERNEL); 1702 &ep->sq_pbl_dma, GFP_KERNEL);
1704 if (!ep->sq_pbl) { 1703 if (!ep->sq_pbl) {
1705 QEDI_WARN(&qedi->dbg_ctx, 1704 QEDI_WARN(&qedi->dbg_ctx,
1706 "Could not allocate send queue PBL.\n"); 1705 "Could not allocate send queue PBL.\n");
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index a414f51302b7..6856dfdfa473 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -4248,7 +4248,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4248 ha->devnum = devnum; /* specifies microcode load address */ 4248 ha->devnum = devnum; /* specifies microcode load address */
4249 4249
4250#ifdef QLA_64BIT_PTR 4250#ifdef QLA_64BIT_PTR
4251 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { 4251 if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
4252 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) { 4252 if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
4253 printk(KERN_WARNING "scsi(%li): Unable to set a " 4253 printk(KERN_WARNING "scsi(%li): Unable to set a "
4254 "suitable DMA mask - aborting\n", ha->host_no); 4254 "suitable DMA mask - aborting\n", ha->host_no);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 00444dc79756..ac504a1ff0ff 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2415,8 +2415,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
2415 if (qla2x00_chip_is_down(vha)) 2415 if (qla2x00_chip_is_down(vha))
2416 goto done; 2416 goto done;
2417 2417
2418 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2418 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2419 &stats_dma, GFP_KERNEL); 2419 GFP_KERNEL);
2420 if (!stats) { 2420 if (!stats) {
2421 ql_log(ql_log_warn, vha, 0x707d, 2421 ql_log(ql_log_warn, vha, 0x707d,
2422 "Failed to allocate memory for stats.\n"); 2422 "Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4a9fd8d944d6..17d42658ad9a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2312,8 +2312,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2312 if (!IS_FWI2_CAPABLE(ha)) 2312 if (!IS_FWI2_CAPABLE(ha))
2313 return -EPERM; 2313 return -EPERM;
2314 2314
2315 stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), 2315 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2316 &stats_dma, GFP_KERNEL); 2316 GFP_KERNEL);
2317 if (!stats) { 2317 if (!stats) {
2318 ql_log(ql_log_warn, vha, 0x70e2, 2318 ql_log(ql_log_warn, vha, 0x70e2,
2319 "Failed to allocate memory for stats.\n"); 2319 "Failed to allocate memory for stats.\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 26b93c563f92..d1fc4958222a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -4394,6 +4394,8 @@ typedef struct scsi_qla_host {
4394 uint16_t n2n_id; 4394 uint16_t n2n_id;
4395 struct list_head gpnid_list; 4395 struct list_head gpnid_list;
4396 struct fab_scan scan; 4396 struct fab_scan scan;
4397
4398 unsigned int irq_offset;
4397} scsi_qla_host_t; 4399} scsi_qla_host_t;
4398 4400
4399struct qla27xx_image_status { 4401struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 90cfa394f942..cbc3bc49d4d1 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -4147,9 +4147,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4147 return rval; 4147 return rval;
4148 } 4148 }
4149 4149
4150 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( 4150 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
4151 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), 4151 sizeof(struct ct_sns_pkt),
4152 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); 4152 &sp->u.iocb_cmd.u.ctarg.req_dma,
4153 GFP_KERNEL);
4153 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); 4154 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4154 if (!sp->u.iocb_cmd.u.ctarg.req) { 4155 if (!sp->u.iocb_cmd.u.ctarg.req) {
4155 ql_log(ql_log_warn, vha, 0xffff, 4156 ql_log(ql_log_warn, vha, 0xffff,
@@ -4165,9 +4166,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4165 ((vha->hw->max_fibre_devices - 1) * 4166 ((vha->hw->max_fibre_devices - 1) *
4166 sizeof(struct ct_sns_gpn_ft_data)); 4167 sizeof(struct ct_sns_gpn_ft_data));
4167 4168
4168 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( 4169 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
4169 &vha->hw->pdev->dev, rspsz, 4170 rspsz,
4170 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); 4171 &sp->u.iocb_cmd.u.ctarg.rsp_dma,
4172 GFP_KERNEL);
4171 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); 4173 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
4172 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4174 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4173 ql_log(ql_log_warn, vha, 0xffff, 4175 ql_log(ql_log_warn, vha, 0xffff,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 364bb52ed2a6..8d1acc802a67 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1785 1785
1786 /* Issue Marker IOCB */ 1786 /* Issue Marker IOCB */
1787 qla2x00_marker(vha, vha->hw->req_q_map[0], 1787 qla2x00_marker(vha, vha->hw->req_q_map[0],
1788 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, 1788 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
1789 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); 1789 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1790 } 1790 }
1791 1791
1792done_free_sp: 1792done_free_sp:
1793 sp->free(sp); 1793 sp->free(sp);
1794 sp->fcport->flags &= ~FCF_ASYNC_SENT; 1794 fcport->flags &= ~FCF_ASYNC_SENT;
1795done: 1795done:
1796 return rval; 1796 return rval;
1797} 1797}
@@ -3099,8 +3099,8 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3099 FCE_SIZE, ha->fce, ha->fce_dma); 3099 FCE_SIZE, ha->fce, ha->fce_dma);
3100 3100
3101 /* Allocate memory for Fibre Channel Event Buffer. */ 3101 /* Allocate memory for Fibre Channel Event Buffer. */
3102 tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 3102 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3103 GFP_KERNEL); 3103 GFP_KERNEL);
3104 if (!tc) { 3104 if (!tc) {
3105 ql_log(ql_log_warn, vha, 0x00be, 3105 ql_log(ql_log_warn, vha, 0x00be,
3106 "Unable to allocate (%d KB) for FCE.\n", 3106 "Unable to allocate (%d KB) for FCE.\n",
@@ -3131,8 +3131,8 @@ try_eft:
3131 EFT_SIZE, ha->eft, ha->eft_dma); 3131 EFT_SIZE, ha->eft, ha->eft_dma);
3132 3132
3133 /* Allocate memory for Extended Trace Buffer. */ 3133 /* Allocate memory for Extended Trace Buffer. */
3134 tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, 3134 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3135 GFP_KERNEL); 3135 GFP_KERNEL);
3136 if (!tc) { 3136 if (!tc) {
3137 ql_log(ql_log_warn, vha, 0x00c1, 3137 ql_log(ql_log_warn, vha, 0x00c1,
3138 "Unable to allocate (%d KB) for EFT.\n", 3138 "Unable to allocate (%d KB) for EFT.\n",
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 30d3090842f8..8507c43b918c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3446,6 +3446,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3446 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3446 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3447 } 3447 }
3448 } 3448 }
3449 vha->irq_offset = desc.pre_vectors;
3449 ha->msix_entries = kcalloc(ha->msix_count, 3450 ha->msix_entries = kcalloc(ha->msix_count,
3450 sizeof(struct qla_msix_entry), 3451 sizeof(struct qla_msix_entry),
3451 GFP_KERNEL); 3452 GFP_KERNEL);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index ea69dafc9774..c6ef83d0d99b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -6939,7 +6939,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
6939 if (USER_CTRL_IRQ(vha->hw)) 6939 if (USER_CTRL_IRQ(vha->hw))
6940 rc = blk_mq_map_queues(qmap); 6940 rc = blk_mq_map_queues(qmap);
6941 else 6941 else
6942 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0); 6942 rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
6943 return rc; 6943 return rc;
6944} 6944}
6945 6945
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1ef74aa2d00a..2bf5e3e639e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -153,8 +153,8 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
153 dma_addr_t sys_info_dma; 153 dma_addr_t sys_info_dma;
154 int status = QLA_ERROR; 154 int status = QLA_ERROR;
155 155
156 sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 156 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
157 &sys_info_dma, GFP_KERNEL); 157 &sys_info_dma, GFP_KERNEL);
158 if (sys_info == NULL) { 158 if (sys_info == NULL) {
159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 159 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
160 ha->host_no, __func__)); 160 ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 5d56904687b9..dac9a7013208 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -625,9 +625,9 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
625 uint32_t mbox_sts[MBOX_REG_COUNT]; 625 uint32_t mbox_sts[MBOX_REG_COUNT];
626 int status = QLA_ERROR; 626 int status = QLA_ERROR;
627 627
628 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 628 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
629 sizeof(struct addr_ctrl_blk), 629 sizeof(struct addr_ctrl_blk),
630 &init_fw_cb_dma, GFP_KERNEL); 630 &init_fw_cb_dma, GFP_KERNEL);
631 if (init_fw_cb == NULL) { 631 if (init_fw_cb == NULL) {
632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n", 632 DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
633 ha->host_no, __func__)); 633 ha->host_no, __func__));
@@ -709,9 +709,9 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
709 uint32_t mbox_cmd[MBOX_REG_COUNT]; 709 uint32_t mbox_cmd[MBOX_REG_COUNT];
710 uint32_t mbox_sts[MBOX_REG_COUNT]; 710 uint32_t mbox_sts[MBOX_REG_COUNT];
711 711
712 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 712 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
713 sizeof(struct addr_ctrl_blk), 713 sizeof(struct addr_ctrl_blk),
714 &init_fw_cb_dma, GFP_KERNEL); 714 &init_fw_cb_dma, GFP_KERNEL);
715 if (init_fw_cb == NULL) { 715 if (init_fw_cb == NULL) {
716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, 716 printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
717 __func__); 717 __func__);
@@ -1340,9 +1340,9 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
1340 uint32_t mbox_sts[MBOX_REG_COUNT]; 1340 uint32_t mbox_sts[MBOX_REG_COUNT];
1341 int status = QLA_ERROR; 1341 int status = QLA_ERROR;
1342 1342
1343 about_fw = dma_zalloc_coherent(&ha->pdev->dev, 1343 about_fw = dma_alloc_coherent(&ha->pdev->dev,
1344 sizeof(struct about_fw_info), 1344 sizeof(struct about_fw_info),
1345 &about_fw_dma, GFP_KERNEL); 1345 &about_fw_dma, GFP_KERNEL);
1346 if (!about_fw) { 1346 if (!about_fw) {
1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory " 1347 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Unable to alloc memory "
1348 "for about_fw\n", __func__)); 1348 "for about_fw\n", __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index d2b333d629be..5a31877c9d04 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4052,8 +4052,8 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
4052 dma_addr_t sys_info_dma; 4052 dma_addr_t sys_info_dma;
4053 int status = QLA_ERROR; 4053 int status = QLA_ERROR;
4054 4054
4055 sys_info = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*sys_info), 4055 sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
4056 &sys_info_dma, GFP_KERNEL); 4056 &sys_info_dma, GFP_KERNEL);
4057 if (sys_info == NULL) { 4057 if (sys_info == NULL) {
4058 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 4058 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
4059 ha->host_no, __func__)); 4059 ha->host_no, __func__));
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 949e186cc5d7..a77bfb224248 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -2704,9 +2704,9 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
2704 uint32_t rem = len; 2704 uint32_t rem = len;
2705 struct nlattr *attr; 2705 struct nlattr *attr;
2706 2706
2707 init_fw_cb = dma_zalloc_coherent(&ha->pdev->dev, 2707 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
2708 sizeof(struct addr_ctrl_blk), 2708 sizeof(struct addr_ctrl_blk),
2709 &init_fw_cb_dma, GFP_KERNEL); 2709 &init_fw_cb_dma, GFP_KERNEL);
2710 if (!init_fw_cb) { 2710 if (!init_fw_cb) {
2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", 2711 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
2712 __func__); 2712 __func__);
@@ -4206,8 +4206,8 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
4206 sizeof(struct shadow_regs) + 4206 sizeof(struct shadow_regs) +
4207 MEM_ALIGN_VALUE + 4207 MEM_ALIGN_VALUE +
4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4208 (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
4209 ha->queues = dma_zalloc_coherent(&ha->pdev->dev, ha->queues_len, 4209 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len,
4210 &ha->queues_dma, GFP_KERNEL); 4210 &ha->queues_dma, GFP_KERNEL);
4211 if (ha->queues == NULL) { 4211 if (ha->queues == NULL) {
4212 ql4_printk(KERN_WARNING, ha, 4212 ql4_printk(KERN_WARNING, ha,
4213 "Memory Allocation failed - queues.\n"); 4213 "Memory Allocation failed - queues.\n");
@@ -7232,6 +7232,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
7232 7232
7233 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, 7233 rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
7234 fw_ddb_entry); 7234 fw_ddb_entry);
7235 if (rc)
7236 goto free_sess;
7235 7237
7236 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n", 7238 ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
7237 __func__, fnode_sess->dev.kobj.name); 7239 __func__, fnode_sess->dev.kobj.name);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 661512bec3ac..e27f4df24021 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -62,7 +62,7 @@
62 62
63/* make sure inq_product_rev string corresponds to this version */ 63/* make sure inq_product_rev string corresponds to this version */
64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ 64#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
65static const char *sdebug_version_date = "20180128"; 65static const char *sdebug_version_date = "20190125";
66 66
67#define MY_NAME "scsi_debug" 67#define MY_NAME "scsi_debug"
68 68
@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); 735 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
736} 736}
737 737
738static void *fake_store(unsigned long long lba) 738static void *lba2fake_store(unsigned long long lba)
739{ 739{
740 lba = do_div(lba, sdebug_store_sectors); 740 lba = do_div(lba, sdebug_store_sectors);
741 741
@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
2514 return ret; 2514 return ret;
2515} 2515}
2516 2516
2517/* If fake_store(lba,num) compares equal to arr(num), then copy top half of 2517/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
2518 * arr into fake_store(lba,num) and return true. If comparison fails then 2518 * arr into lba2fake_store(lba,num) and return true. If comparison fails then
2519 * return false. */ 2519 * return false. */
2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) 2520static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
2521{ 2521{
@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2643 if (sdt->app_tag == cpu_to_be16(0xffff)) 2643 if (sdt->app_tag == cpu_to_be16(0xffff))
2644 continue; 2644 continue;
2645 2645
2646 ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); 2646 ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
2647 if (ret) { 2647 if (ret) {
2648 dif_errors++; 2648 dif_errors++;
2649 return ret; 2649 return ret;
@@ -3261,10 +3261,12 @@ err_out:
3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, 3261static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3262 u32 ei_lba, bool unmap, bool ndob) 3262 u32 ei_lba, bool unmap, bool ndob)
3263{ 3263{
3264 int ret;
3264 unsigned long iflags; 3265 unsigned long iflags;
3265 unsigned long long i; 3266 unsigned long long i;
3266 int ret; 3267 u32 lb_size = sdebug_sector_size;
3267 u64 lba_off; 3268 u64 block, lbaa;
3269 u8 *fs1p;
3268 3270
3269 ret = check_device_access_params(scp, lba, num); 3271 ret = check_device_access_params(scp, lba, num);
3270 if (ret) 3272 if (ret)
@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3276 unmap_region(lba, num); 3278 unmap_region(lba, num);
3277 goto out; 3279 goto out;
3278 } 3280 }
3279 3281 lbaa = lba;
3280 lba_off = lba * sdebug_sector_size; 3282 block = do_div(lbaa, sdebug_store_sectors);
3281 /* if ndob then zero 1 logical block, else fetch 1 logical block */ 3283 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3284 fs1p = fake_storep + (block * lb_size);
3282 if (ndob) { 3285 if (ndob) {
3283 memset(fake_storep + lba_off, 0, sdebug_sector_size); 3286 memset(fs1p, 0, lb_size);
3284 ret = 0; 3287 ret = 0;
3285 } else 3288 } else
3286 ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, 3289 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3287 sdebug_sector_size);
3288 3290
3289 if (-1 == ret) { 3291 if (-1 == ret) {
3290 write_unlock_irqrestore(&atomic_rw, iflags); 3292 write_unlock_irqrestore(&atomic_rw, iflags);
3291 return DID_ERROR << 16; 3293 return DID_ERROR << 16;
3292 } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) 3294 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3293 sdev_printk(KERN_INFO, scp->device, 3295 sdev_printk(KERN_INFO, scp->device,
3294 "%s: %s: lb size=%u, IO sent=%d bytes\n", 3296 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3295 my_name, "write same", 3297 my_name, "write same", lb_size, ret);
3296 sdebug_sector_size, ret);
3297 3298
3298 /* Copy first sector to remaining blocks */ 3299 /* Copy first sector to remaining blocks */
3299 for (i = 1 ; i < num ; i++) 3300 for (i = 1 ; i < num ; i++) {
3300 memcpy(fake_storep + ((lba + i) * sdebug_sector_size), 3301 lbaa = lba + i;
3301 fake_storep + lba_off, 3302 block = do_div(lbaa, sdebug_store_sectors);
3302 sdebug_sector_size); 3303 memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3303 3304 }
3304 if (scsi_debug_lbp()) 3305 if (scsi_debug_lbp())
3305 map_region(lba, num); 3306 map_region(lba, num);
3306out: 3307out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b13cc9288ba0..6d65ac584eba 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1842,8 +1842,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
1842 blk_queue_segment_boundary(q, shost->dma_boundary); 1842 blk_queue_segment_boundary(q, shost->dma_boundary);
1843 dma_set_seg_boundary(dev, shost->dma_boundary); 1843 dma_set_seg_boundary(dev, shost->dma_boundary);
1844 1844
1845 blk_queue_max_segment_size(q, 1845 blk_queue_max_segment_size(q, shost->max_segment_size);
1846 min(shost->max_segment_size, dma_get_max_seg_size(dev))); 1846 dma_set_max_seg_size(dev, shost->max_segment_size);
1847 1847
1848 /* 1848 /*
1849 * Set a reasonable default alignment: The larger of 32-byte (dword), 1849 * Set a reasonable default alignment: The larger of 32-byte (dword),
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index a2b4179bfdf7..7639df91b110 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -80,8 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
80 80
81 if (err == 0) { 81 if (err == 0) {
82 pm_runtime_disable(dev); 82 pm_runtime_disable(dev);
83 pm_runtime_set_active(dev); 83 err = pm_runtime_set_active(dev);
84 pm_runtime_enable(dev); 84 pm_runtime_enable(dev);
85
86 /*
87 * Forcibly set runtime PM status of request queue to "active"
88 * to make sure we can again get requests from the queue
89 * (see also blk_pm_peek_request()).
90 *
91 * The resume hook will correct runtime PM status of the disk.
92 */
93 if (!err && scsi_is_sdev_device(dev)) {
94 struct scsi_device *sdev = to_scsi_device(dev);
95
96 if (sdev->request_queue->dev)
97 blk_set_runtime_active(sdev->request_queue);
98 }
85 } 99 }
86 100
87 return err; 101 return err;
@@ -140,16 +154,6 @@ static int scsi_bus_resume_common(struct device *dev,
140 else 154 else
141 fn = NULL; 155 fn = NULL;
142 156
143 /*
144 * Forcibly set runtime PM status of request queue to "active" to
145 * make sure we can again get requests from the queue (see also
146 * blk_pm_peek_request()).
147 *
148 * The resume hook will correct runtime PM status of the disk.
149 */
150 if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
151 blk_set_runtime_active(to_scsi_device(dev)->request_queue);
152
153 if (fn) { 157 if (fn) {
154 async_schedule_domain(fn, dev, &scsi_sd_pm_domain); 158 async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
155 159
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a1a44f52e0e8..5464d467e23e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -206,6 +206,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
206 sp = buffer_data[0] & 0x80 ? 1 : 0; 206 sp = buffer_data[0] & 0x80 ? 1 : 0;
207 buffer_data[0] &= ~0x80; 207 buffer_data[0] &= ~0x80;
208 208
209 /*
210 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
211 * received mode parameter buffer before doing MODE SELECT.
212 */
213 data.device_specific = 0;
214
209 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, 215 if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
210 SD_MAX_RETRIES, &data, &sshdr)) { 216 SD_MAX_RETRIES, &data, &sshdr)) {
211 if (scsi_sense_valid(&sshdr)) 217 if (scsi_sense_valid(&sshdr))
@@ -2945,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2945 if (rot == 1) { 2951 if (rot == 1) {
2946 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2952 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2947 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2953 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2948 } else {
2949 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2950 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
2951 } 2954 }
2952 2955
2953 if (sdkp->device->type == TYPE_ZBC) { 2956 if (sdkp->device->type == TYPE_ZBC) {
@@ -3084,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
3084 if (sdkp->media_present) { 3087 if (sdkp->media_present) {
3085 sd_read_capacity(sdkp, buffer); 3088 sd_read_capacity(sdkp, buffer);
3086 3089
3090 /*
3091 * set the default to rotational. All non-rotational devices
3092 * support the block characteristics VPD page, which will
3093 * cause this to be updated correctly and any device which
3094 * doesn't support it should be treated as rotational.
3095 */
3096 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
3097 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
3098
3087 if (scsi_device_supports_vpd(sdp)) { 3099 if (scsi_device_supports_vpd(sdp)) {
3088 sd_read_block_provisioning(sdkp); 3100 sd_read_block_provisioning(sdkp);
3089 sd_read_block_limits(sdkp); 3101 sd_read_block_limits(sdkp);
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 83365b29a4d8..fff86940388b 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -462,12 +462,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
462 sdkp->device->use_10_for_rw = 0; 462 sdkp->device->use_10_for_rw = 0;
463 463
464 /* 464 /*
465 * If something changed, revalidate the disk zone bitmaps once we have 465 * Revalidate the disk zone bitmaps once the block device capacity is
466 * the capacity, that is on the second revalidate execution during disk 466 * set on the second revalidate execution during disk scan and if
467 * scan and always during normal revalidate. 467 * something changed when executing a normal revalidate.
468 */ 468 */
469 if (sdkp->first_scan) 469 if (sdkp->first_scan) {
470 sdkp->zone_blocks = zone_blocks;
471 sdkp->nr_zones = nr_zones;
470 return 0; 472 return 0;
473 }
474
471 if (sdkp->zone_blocks != zone_blocks || 475 if (sdkp->zone_blocks != zone_blocks ||
472 sdkp->nr_zones != nr_zones || 476 sdkp->nr_zones != nr_zones ||
473 disk->queue->nr_zones != nr_zones) { 477 disk->queue->nr_zones != nr_zones) {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index e2fa3f476227..f564af8949e8 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -323,7 +323,7 @@ static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
323static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, 323static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
324 struct pqi_scsi_dev *device) 324 struct pqi_scsi_dev *device)
325{ 325{
326 return device->in_remove & !ctrl_info->in_shutdown; 326 return device->in_remove && !ctrl_info->in_shutdown;
327} 327}
328 328
329static inline void pqi_schedule_rescan_worker_with_delay( 329static inline void pqi_schedule_rescan_worker_with_delay(
@@ -3576,9 +3576,9 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3576 alloc_length += PQI_EXTRA_SGL_MEMORY; 3576 alloc_length += PQI_EXTRA_SGL_MEMORY;
3577 3577
3578 ctrl_info->queue_memory_base = 3578 ctrl_info->queue_memory_base =
3579 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3579 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3580 alloc_length, 3580 &ctrl_info->queue_memory_base_dma_handle,
3581 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3581 GFP_KERNEL);
3582 3582
3583 if (!ctrl_info->queue_memory_base) 3583 if (!ctrl_info->queue_memory_base)
3584 return -ENOMEM; 3584 return -ENOMEM;
@@ -3715,10 +3715,9 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3715 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3716 3716
3717 ctrl_info->admin_queue_memory_base = 3717 ctrl_info->admin_queue_memory_base =
3718 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3718 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length,
3719 alloc_length, 3719 &ctrl_info->admin_queue_memory_base_dma_handle,
3720 &ctrl_info->admin_queue_memory_base_dma_handle, 3720 GFP_KERNEL);
3721 GFP_KERNEL);
3722 3721
3723 if (!ctrl_info->admin_queue_memory_base) 3722 if (!ctrl_info->admin_queue_memory_base)
3724 return -ENOMEM; 3723 return -ENOMEM;
@@ -4602,9 +4601,10 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4602 4601
4603static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4602static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4604{ 4603{
4605 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4604 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev,
4606 ctrl_info->error_buffer_length, 4605 ctrl_info->error_buffer_length,
4607 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4606 &ctrl_info->error_buffer_dma_handle,
4607 GFP_KERNEL);
4608 4608
4609 if (!ctrl_info->error_buffer) 4609 if (!ctrl_info->error_buffer)
4610 return -ENOMEM; 4610 return -ENOMEM;
@@ -7487,8 +7487,8 @@ static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info,
7487 dma_addr_t dma_handle; 7487 dma_addr_t dma_handle;
7488 7488
7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 7489 ctrl_info->pqi_ofa_chunk_virt_addr[i] =
7490 dma_zalloc_coherent(dev, chunk_size, &dma_handle, 7490 dma_alloc_coherent(dev, chunk_size, &dma_handle,
7491 GFP_KERNEL); 7491 GFP_KERNEL);
7492 7492
7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 7493 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i])
7494 break; 7494 break;
@@ -7545,10 +7545,10 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info,
7545 struct device *dev; 7545 struct device *dev;
7546 7546
7547 dev = &ctrl_info->pci_dev->dev; 7547 dev = &ctrl_info->pci_dev->dev;
7548 pqi_ofa_memory = dma_zalloc_coherent(dev, 7548 pqi_ofa_memory = dma_alloc_coherent(dev,
7549 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, 7549 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH,
7550 &ctrl_info->pqi_ofa_mem_dma_handle, 7550 &ctrl_info->pqi_ofa_mem_dma_handle,
7551 GFP_KERNEL); 7551 GFP_KERNEL);
7552 7552
7553 if (!pqi_ofa_memory) 7553 if (!pqi_ofa_memory)
7554 return; 7554 return;
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index dd65fea07687..6d176815e6ce 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -195,7 +195,7 @@ enum ufs_desc_def_size {
195 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, 195 QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
196 QUERY_DESC_UNIT_DEF_SIZE = 0x23, 196 QUERY_DESC_UNIT_DEF_SIZE = 0x23,
197 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, 197 QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
198 QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, 198 QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
199 QUERY_DESC_POWER_DEF_SIZE = 0x62, 199 QUERY_DESC_POWER_DEF_SIZE = 0x62,
200 QUERY_DESC_HEALTH_DEF_SIZE = 0x25, 200 QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
201}; 201};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9ba7671b84f8..2ddf24466a62 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -108,13 +108,19 @@
108int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, 108int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
109 const char *prefix) 109 const char *prefix)
110{ 110{
111 u8 *regs; 111 u32 *regs;
112 size_t pos;
113
114 if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
115 return -EINVAL;
112 116
113 regs = kzalloc(len, GFP_KERNEL); 117 regs = kzalloc(len, GFP_KERNEL);
114 if (!regs) 118 if (!regs)
115 return -ENOMEM; 119 return -ENOMEM;
116 120
117 memcpy_fromio(regs, hba->mmio_base + offset, len); 121 for (pos = 0; pos < len; pos += 4)
122 regs[pos / 4] = ufshcd_readl(hba, offset + pos);
123
118 ufshcd_hex_dump(prefix, regs, len); 124 ufshcd_hex_dump(prefix, regs, len);
119 kfree(regs); 125 kfree(regs);
120 126
@@ -8001,6 +8007,8 @@ out:
8001 trace_ufshcd_system_resume(dev_name(hba->dev), ret, 8007 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
8002 ktime_to_us(ktime_sub(ktime_get(), start)), 8008 ktime_to_us(ktime_sub(ktime_get(), start)),
8003 hba->curr_dev_pwr_mode, hba->uic_link_state); 8009 hba->curr_dev_pwr_mode, hba->uic_link_state);
8010 if (!ret)
8011 hba->is_sys_suspended = false;
8004 return ret; 8012 return ret;
8005} 8013}
8006EXPORT_SYMBOL(ufshcd_system_resume); 8014EXPORT_SYMBOL(ufshcd_system_resume);
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 9436aa83ff1b..e6d48dccb8d5 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -62,7 +62,7 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
62 return -ENODEV; 62 return -ENODEV;
63 } 63 }
64 64
65 if (!dma_zalloc_coherent(dev, *size, addr, 0)) { 65 if (!dma_alloc_coherent(dev, *size, addr, 0)) {
66 dev_err(dev, "DMA Alloc memory failed\n"); 66 dev_err(dev, "DMA Alloc memory failed\n");
67 return -ENODEV; 67 return -ENODEV;
68 } 68 }
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 52c153cd795a..636f83f781f5 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work);
1143static irqreturn_t portal_isr(int irq, void *ptr) 1143static irqreturn_t portal_isr(int irq, void *ptr)
1144{ 1144{
1145 struct qman_portal *p = ptr; 1145 struct qman_portal *p = ptr;
1146
1147 u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
1148 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; 1146 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
1147 u32 clear = 0;
1149 1148
1150 if (unlikely(!is)) 1149 if (unlikely(!is))
1151 return IRQ_NONE; 1150 return IRQ_NONE;
1152 1151
1153 /* DQRR-handling if it's interrupt-driven */ 1152 /* DQRR-handling if it's interrupt-driven */
1154 if (is & QM_PIRQ_DQRI) 1153 if (is & QM_PIRQ_DQRI) {
1155 __poll_portal_fast(p, QMAN_POLL_LIMIT); 1154 __poll_portal_fast(p, QMAN_POLL_LIMIT);
1155 clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
1156 }
1156 /* Handling of anything else that's interrupt-driven */ 1157 /* Handling of anything else that's interrupt-driven */
1157 clear |= __poll_portal_slow(p, is); 1158 clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
1158 qm_out(&p->p, QM_REG_ISR, clear); 1159 qm_out(&p->p, QM_REG_ISR, clear);
1159 return IRQ_HANDLED; 1160 return IRQ_HANDLED;
1160} 1161}
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index f78c34647ca2..76480df195a8 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -44,10 +44,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
44 const char *sprop; 44 const char *sprop;
45 int ret = 0; 45 int ret = 0;
46 u32 val; 46 u32 val;
47 struct resource *res;
48 struct device_node *np2;
49 static int siram_init_flag;
50 struct platform_device *pdev;
51 47
52 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL); 48 sprop = of_get_property(np, "fsl,rx-sync-clock", NULL);
53 if (sprop) { 49 if (sprop) {
@@ -124,57 +120,6 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
124 utdm->siram_entry_id = val; 120 utdm->siram_entry_id = val;
125 121
126 set_si_param(utdm, ut_info); 122 set_si_param(utdm, ut_info);
127
128 np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-si");
129 if (!np2)
130 return -EINVAL;
131
132 pdev = of_find_device_by_node(np2);
133 if (!pdev) {
134 pr_err("%pOFn: failed to lookup pdev\n", np2);
135 of_node_put(np2);
136 return -EINVAL;
137 }
138
139 of_node_put(np2);
140 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
141 utdm->si_regs = devm_ioremap_resource(&pdev->dev, res);
142 if (IS_ERR(utdm->si_regs)) {
143 ret = PTR_ERR(utdm->si_regs);
144 goto err_miss_siram_property;
145 }
146
147 np2 = of_find_compatible_node(NULL, NULL, "fsl,t1040-qe-siram");
148 if (!np2) {
149 ret = -EINVAL;
150 goto err_miss_siram_property;
151 }
152
153 pdev = of_find_device_by_node(np2);
154 if (!pdev) {
155 ret = -EINVAL;
156 pr_err("%pOFn: failed to lookup pdev\n", np2);
157 of_node_put(np2);
158 goto err_miss_siram_property;
159 }
160
161 of_node_put(np2);
162 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
163 utdm->siram = devm_ioremap_resource(&pdev->dev, res);
164 if (IS_ERR(utdm->siram)) {
165 ret = PTR_ERR(utdm->siram);
166 goto err_miss_siram_property;
167 }
168
169 if (siram_init_flag == 0) {
170 memset_io(utdm->siram, 0, resource_size(res));
171 siram_init_flag = 1;
172 }
173
174 return ret;
175
176err_miss_siram_property:
177 devm_iounmap(&pdev->dev, utdm->si_regs);
178 return ret; 123 return ret;
179} 124}
180EXPORT_SYMBOL(ucc_of_parse_tdm); 125EXPORT_SYMBOL(ucc_of_parse_tdm);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 4d8012e1205c..68bfca6f20dd 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -44,7 +44,7 @@ config ARCH_RZN1
44 bool 44 bool
45 select ARM_AMBA 45 select ARM_AMBA
46 46
47if ARM 47if ARM && ARCH_RENESAS
48 48
49#comment "Renesas ARM SoCs System Type" 49#comment "Renesas ARM SoCs System Type"
50 50
diff --git a/drivers/soc/renesas/r8a774c0-sysc.c b/drivers/soc/renesas/r8a774c0-sysc.c
index e1ac4c0f6640..11050e17ea81 100644
--- a/drivers/soc/renesas/r8a774c0-sysc.c
+++ b/drivers/soc/renesas/r8a774c0-sysc.c
@@ -28,19 +28,6 @@ static struct rcar_sysc_area r8a774c0_areas[] __initdata = {
28 { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A }, 28 { "3dg-b", 0x100, 1, R8A774C0_PD_3DG_B, R8A774C0_PD_3DG_A },
29}; 29};
30 30
31static void __init rcar_sysc_fix_parent(struct rcar_sysc_area *areas,
32 unsigned int num_areas, u8 id,
33 int new_parent)
34{
35 unsigned int i;
36
37 for (i = 0; i < num_areas; i++)
38 if (areas[i].isr_bit == id) {
39 areas[i].parent = new_parent;
40 return;
41 }
42}
43
44/* Fixups for RZ/G2E ES1.0 revision */ 31/* Fixups for RZ/G2E ES1.0 revision */
45static const struct soc_device_attribute r8a774c0[] __initconst = { 32static const struct soc_device_attribute r8a774c0[] __initconst = {
46 { .soc_id = "r8a774c0", .revision = "ES1.0" }, 33 { .soc_id = "r8a774c0", .revision = "ES1.0" },
@@ -50,12 +37,10 @@ static const struct soc_device_attribute r8a774c0[] __initconst = {
50static int __init r8a774c0_sysc_init(void) 37static int __init r8a774c0_sysc_init(void)
51{ 38{
52 if (soc_device_match(r8a774c0)) { 39 if (soc_device_match(r8a774c0)) {
53 rcar_sysc_fix_parent(r8a774c0_areas, 40 /* Fix incorrect 3DG hierarchy */
54 ARRAY_SIZE(r8a774c0_areas), 41 swap(r8a774c0_areas[6], r8a774c0_areas[7]);
55 R8A774C0_PD_3DG_A, R8A774C0_PD_3DG_B); 42 r8a774c0_areas[6].parent = R8A774C0_PD_ALWAYS_ON;
56 rcar_sysc_fix_parent(r8a774c0_areas, 43 r8a774c0_areas[7].parent = R8A774C0_PD_3DG_B;
57 ARRAY_SIZE(r8a774c0_areas),
58 R8A774C0_PD_3DG_B, R8A774C0_PD_ALWAYS_ON);
59 } 44 }
60 45
61 return 0; 46 return 0;
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
index d7e4e18ec3df..1ae9af5f17ec 100644
--- a/drivers/spi/spi-pic32-sqi.c
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -466,9 +466,9 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
466 int i; 466 int i;
467 467
468 /* allocate coherent DMAable memory for hardware buffer descriptors. */ 468 /* allocate coherent DMAable memory for hardware buffer descriptors. */
469 sqi->bd = dma_zalloc_coherent(&sqi->master->dev, 469 sqi->bd = dma_alloc_coherent(&sqi->master->dev,
470 sizeof(*bd) * PESQI_BD_COUNT, 470 sizeof(*bd) * PESQI_BD_COUNT,
471 &sqi->bd_dma, GFP_KERNEL); 471 &sqi->bd_dma, GFP_KERNEL);
472 if (!sqi->bd) { 472 if (!sqi->bd) {
473 dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); 473 dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
474 return -ENOMEM; 474 return -ENOMEM;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index a0802de8c3a1..6f5afab7c1a1 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -248,10 +248,10 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
248 struct ion_dma_buf_attachment *a = attachment->priv; 248 struct ion_dma_buf_attachment *a = attachment->priv;
249 struct ion_buffer *buffer = dmabuf->priv; 249 struct ion_buffer *buffer = dmabuf->priv;
250 250
251 free_duped_table(a->table);
252 mutex_lock(&buffer->lock); 251 mutex_lock(&buffer->lock);
253 list_del(&a->list); 252 list_del(&a->list);
254 mutex_unlock(&buffer->lock); 253 mutex_unlock(&buffer->lock);
254 free_duped_table(a->table);
255 255
256 kfree(a); 256 kfree(a);
257} 257}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
index 21a76a8ccc26..6027b19f7bc2 100644
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ b/drivers/staging/mt7621-eth/mtk_eth_soc.c
@@ -1396,8 +1396,7 @@ static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
1396 if (!ring->tx_buf) 1396 if (!ring->tx_buf)
1397 goto no_tx_mem; 1397 goto no_tx_mem;
1398 1398
1399 ring->tx_dma = dma_zalloc_coherent(eth->dev, 1399 ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
1400 ring->tx_ring_size * sz,
1401 &ring->tx_phys, 1400 &ring->tx_phys,
1402 GFP_ATOMIC | __GFP_ZERO); 1401 GFP_ATOMIC | __GFP_ZERO);
1403 if (!ring->tx_dma) 1402 if (!ring->tx_dma)
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 2848fa71a33d..d6248eecf123 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
170 return -ENODEV; 170 return -ENODEV;
171 171
172 priv->last_link = 0; 172 priv->last_link = 0;
173 phy_start_aneg(phydev); 173 phy_start(phydev);
174 174
175 return 0; 175 return 0;
176no_phy: 176no_phy:
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index 364d6ea14bf8..2f90f60f1681 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -154,7 +154,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
154 154
155 pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset; 155 pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
156 156
157 crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); 157 crypto_ops = lib80211_get_crypto_ops("WEP");
158 158
159 if (!crypto_ops) 159 if (!crypto_ops)
160 return; 160 return;
@@ -210,7 +210,7 @@ int rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
210 void *crypto_private = NULL; 210 void *crypto_private = NULL;
211 int status = _SUCCESS; 211 int status = _SUCCESS;
212 const int keyindex = prxattrib->key_index; 212 const int keyindex = prxattrib->key_index;
213 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("WEP"), "lib80211_crypt_wep"); 213 struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("WEP");
214 char iv[4], icv[4]; 214 char iv[4], icv[4];
215 215
216 if (!crypto_ops) { 216 if (!crypto_ops) {
@@ -1291,7 +1291,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
1291 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt; 1291 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
1292 void *crypto_private = NULL; 1292 void *crypto_private = NULL;
1293 u8 *key, *pframe = skb->data; 1293 u8 *key, *pframe = skb->data;
1294 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("CCMP"), "lib80211_crypt_ccmp"); 1294 struct lib80211_crypto_ops *crypto_ops = lib80211_get_crypto_ops("CCMP");
1295 struct security_priv *psecuritypriv = &padapter->securitypriv; 1295 struct security_priv *psecuritypriv = &padapter->securitypriv;
1296 char iv[8], icv[8]; 1296 char iv[8], icv[8];
1297 1297
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 28cbd6b3d26c..dfee6985efa6 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
35 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 35 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
36 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 36 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
37 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ 37 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
38 {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
38 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ 39 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
39 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 40 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
40 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ 41 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index bcc8dfa8e672..9efb4dcb9d3a 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -850,18 +850,18 @@ enum ieee80211_state {
850#define IP_FMT "%pI4" 850#define IP_FMT "%pI4"
851#define IP_ARG(x) (x) 851#define IP_ARG(x) (x)
852 852
853extern __inline int is_multicast_mac_addr(const u8 *addr) 853static inline int is_multicast_mac_addr(const u8 *addr)
854{ 854{
855 return ((addr[0] != 0xff) && (0x01 & addr[0])); 855 return ((addr[0] != 0xff) && (0x01 & addr[0]));
856} 856}
857 857
858extern __inline int is_broadcast_mac_addr(const u8 *addr) 858static inline int is_broadcast_mac_addr(const u8 *addr)
859{ 859{
860 return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \ 860 return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
861 (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff)); 861 (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
862} 862}
863 863
864extern __inline int is_zero_mac_addr(const u8 *addr) 864static inline int is_zero_mac_addr(const u8 *addr)
865{ 865{
866 return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \ 866 return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
867 (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00)); 867 (addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
index 7c03b69b8ed3..6d02904de63f 100644
--- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
+++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c
@@ -22,7 +22,7 @@ static const struct sdio_device_id sdio_ids[] =
22 { SDIO_DEVICE(0x024c, 0xb723), }, 22 { SDIO_DEVICE(0x024c, 0xb723), },
23 { /* end: all zeroes */ }, 23 { /* end: all zeroes */ },
24}; 24};
25static const struct acpi_device_id acpi_ids[] __used = { 25static const struct acpi_device_id acpi_ids[] = {
26 {"OBDA8723", 0x0000}, 26 {"OBDA8723", 0x0000},
27 {} 27 {}
28}; 28};
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index c92bbd05516e..005de0024dd4 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch)
265 return; 265 return;
266 } 266 }
267 267
268 speakup_tty->ops->send_xchar(speakup_tty, ch); 268 if (speakup_tty->ops->send_xchar)
269 speakup_tty->ops->send_xchar(speakup_tty, ch);
269 mutex_unlock(&speakup_tty_mutex); 270 mutex_unlock(&speakup_tty_mutex);
270} 271}
271 272
@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
277 return; 278 return;
278 } 279 }
279 280
280 speakup_tty->ops->tiocmset(speakup_tty, set, clear); 281 if (speakup_tty->ops->tiocmset)
282 speakup_tty->ops->tiocmset(speakup_tty, set, clear);
281 mutex_unlock(&speakup_tty_mutex); 283 mutex_unlock(&speakup_tty_mutex);
282} 284}
283 285
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index 338b6e952515..dd4898861b83 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -407,10 +407,8 @@ create_pagelist(char __user *buf, size_t count, unsigned short type)
407 /* Allocate enough storage to hold the page pointers and the page 407 /* Allocate enough storage to hold the page pointers and the page
408 * list 408 * list
409 */ 409 */
410 pagelist = dma_zalloc_coherent(g_dev, 410 pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
411 pagelist_size, 411 GFP_KERNEL);
412 &dma_addr,
413 GFP_KERNEL);
414 412
415 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist); 413 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
416 414
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 9e17ec651bde..53f5a1cb4636 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -446,6 +446,7 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
446static inline void 446static inline void
447remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) 447remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
448{ 448{
449 event->fired = 1;
449 event->armed = 0; 450 event->armed = 0;
450 wake_up_all(wq); 451 wake_up_all(wq);
451} 452}
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1ab0e8562d40..c9097e7367d8 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -440,12 +440,9 @@ static bool device_init_rings(struct vnt_private *priv)
440 void *vir_pool; 440 void *vir_pool;
441 441
442 /*allocate all RD/TD rings a single pool*/ 442 /*allocate all RD/TD rings a single pool*/
443 vir_pool = dma_zalloc_coherent(&priv->pcid->dev, 443 vir_pool = dma_alloc_coherent(&priv->pcid->dev,
444 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + 444 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc) + priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) + priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
445 priv->opts.rx_descs1 * sizeof(struct vnt_rx_desc) + 445 &priv->pool_dma, GFP_ATOMIC);
446 priv->opts.tx_descs[0] * sizeof(struct vnt_tx_desc) +
447 priv->opts.tx_descs[1] * sizeof(struct vnt_tx_desc),
448 &priv->pool_dma, GFP_ATOMIC);
449 if (!vir_pool) { 446 if (!vir_pool) {
450 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n"); 447 dev_err(&priv->pcid->dev, "allocate desc dma memory failed\n");
451 return false; 448 return false;
@@ -459,13 +456,9 @@ static bool device_init_rings(struct vnt_private *priv)
459 priv->rd1_pool_dma = priv->rd0_pool_dma + 456 priv->rd1_pool_dma = priv->rd0_pool_dma +
460 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc); 457 priv->opts.rx_descs0 * sizeof(struct vnt_rx_desc);
461 458
462 priv->tx0_bufs = dma_zalloc_coherent(&priv->pcid->dev, 459 priv->tx0_bufs = dma_alloc_coherent(&priv->pcid->dev,
463 priv->opts.tx_descs[0] * PKT_BUF_SZ + 460 priv->opts.tx_descs[0] * PKT_BUF_SZ + priv->opts.tx_descs[1] * PKT_BUF_SZ + CB_BEACON_BUF_SIZE + CB_MAX_BUF_SIZE,
464 priv->opts.tx_descs[1] * PKT_BUF_SZ + 461 &priv->tx_bufs_dma0, GFP_ATOMIC);
465 CB_BEACON_BUF_SIZE +
466 CB_MAX_BUF_SIZE,
467 &priv->tx_bufs_dma0,
468 GFP_ATOMIC);
469 if (!priv->tx0_bufs) { 462 if (!priv->tx0_bufs) {
470 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n"); 463 dev_err(&priv->pcid->dev, "allocate buf dma memory failed\n");
471 464
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 70c854d939ce..3d0badc34825 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -36,7 +36,7 @@ struct wilc_op_mode {
36struct wilc_reg_frame { 36struct wilc_reg_frame {
37 bool reg; 37 bool reg;
38 u8 reg_id; 38 u8 reg_id;
39 __le32 frame_type; 39 __le16 frame_type;
40} __packed; 40} __packed;
41 41
42struct wilc_drv_handler { 42struct wilc_drv_handler {
@@ -1744,7 +1744,6 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
1744 result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, 1744 result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list,
1745 ARRAY_SIZE(wid_list), 1745 ARRAY_SIZE(wid_list),
1746 wilc_get_vif_idx(vif)); 1746 wilc_get_vif_idx(vif));
1747 kfree(gtk_key);
1748 } else if (mode == WILC_STATION_MODE) { 1747 } else if (mode == WILC_STATION_MODE) {
1749 struct wid wid; 1748 struct wid wid;
1750 1749
@@ -1754,9 +1753,9 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
1754 wid.val = (u8 *)gtk_key; 1753 wid.val = (u8 *)gtk_key;
1755 result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1, 1754 result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1,
1756 wilc_get_vif_idx(vif)); 1755 wilc_get_vif_idx(vif));
1757 kfree(gtk_key);
1758 } 1756 }
1759 1757
1758 kfree(gtk_key);
1760 return result; 1759 return result;
1761} 1760}
1762 1761
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 3c5e9e030cad..489e5a5038f8 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -1252,21 +1252,22 @@ static u32 init_chip(struct net_device *dev)
1252 ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg); 1252 ret = wilc->hif_func->hif_read_reg(wilc, 0x1118, &reg);
1253 if (!ret) { 1253 if (!ret) {
1254 netdev_err(dev, "fail read reg 0x1118\n"); 1254 netdev_err(dev, "fail read reg 0x1118\n");
1255 return ret; 1255 goto release;
1256 } 1256 }
1257 reg |= BIT(0); 1257 reg |= BIT(0);
1258 ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg); 1258 ret = wilc->hif_func->hif_write_reg(wilc, 0x1118, reg);
1259 if (!ret) { 1259 if (!ret) {
1260 netdev_err(dev, "fail write reg 0x1118\n"); 1260 netdev_err(dev, "fail write reg 0x1118\n");
1261 return ret; 1261 goto release;
1262 } 1262 }
1263 ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71); 1263 ret = wilc->hif_func->hif_write_reg(wilc, 0xc0000, 0x71);
1264 if (!ret) { 1264 if (!ret) {
1265 netdev_err(dev, "fail write reg 0xc0000\n"); 1265 netdev_err(dev, "fail write reg 0xc0000\n");
1266 return ret; 1266 goto release;
1267 } 1267 }
1268 } 1268 }
1269 1269
1270release:
1270 release_bus(wilc, WILC_BUS_RELEASE_ONLY); 1271 release_bus(wilc, WILC_BUS_RELEASE_ONLY);
1271 1272
1272 return ret; 1273 return ret;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 984941e036c8..bd15a564fe24 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -714,7 +714,7 @@ static int __init iscsi_target_init_module(void)
714 sizeof(struct iscsi_queue_req), 714 sizeof(struct iscsi_queue_req),
715 __alignof__(struct iscsi_queue_req), 0, NULL); 715 __alignof__(struct iscsi_queue_req), 0, NULL);
716 if (!lio_qr_cache) { 716 if (!lio_qr_cache) {
717 pr_err("nable to kmem_cache_create() for" 717 pr_err("Unable to kmem_cache_create() for"
718 " lio_qr_cache\n"); 718 " lio_qr_cache\n");
719 goto bitmap_out; 719 goto bitmap_out;
720 } 720 }
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 72016d0dfca5..8e7fffbb8802 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -852,6 +852,12 @@ static ssize_t pi_prot_type_store(struct config_item *item,
852 return count; 852 return count;
853} 853}
854 854
855/* always zero, but attr needs to remain RW to avoid userspace breakage */
856static ssize_t pi_prot_format_show(struct config_item *item, char *page)
857{
858 return snprintf(page, PAGE_SIZE, "0\n");
859}
860
855static ssize_t pi_prot_format_store(struct config_item *item, 861static ssize_t pi_prot_format_store(struct config_item *item,
856 const char *page, size_t count) 862 const char *page, size_t count)
857{ 863{
@@ -1132,7 +1138,7 @@ CONFIGFS_ATTR(, emulate_3pc);
1132CONFIGFS_ATTR(, emulate_pr); 1138CONFIGFS_ATTR(, emulate_pr);
1133CONFIGFS_ATTR(, pi_prot_type); 1139CONFIGFS_ATTR(, pi_prot_type);
1134CONFIGFS_ATTR_RO(, hw_pi_prot_type); 1140CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1135CONFIGFS_ATTR_WO(, pi_prot_format); 1141CONFIGFS_ATTR(, pi_prot_format);
1136CONFIGFS_ATTR(, pi_prot_verify); 1142CONFIGFS_ATTR(, pi_prot_verify);
1137CONFIGFS_ATTR(, enforce_pr_isids); 1143CONFIGFS_ATTR(, enforce_pr_isids);
1138CONFIGFS_ATTR(, is_nonrot); 1144CONFIGFS_ATTR(, is_nonrot);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 1e6d24943565..5831e0eecea1 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -148,7 +148,7 @@ struct tcmu_dev {
148 size_t ring_size; 148 size_t ring_size;
149 149
150 struct mutex cmdr_lock; 150 struct mutex cmdr_lock;
151 struct list_head cmdr_queue; 151 struct list_head qfull_queue;
152 152
153 uint32_t dbi_max; 153 uint32_t dbi_max;
154 uint32_t dbi_thresh; 154 uint32_t dbi_thresh;
@@ -159,6 +159,7 @@ struct tcmu_dev {
159 159
160 struct timer_list cmd_timer; 160 struct timer_list cmd_timer;
161 unsigned int cmd_time_out; 161 unsigned int cmd_time_out;
162 struct list_head inflight_queue;
162 163
163 struct timer_list qfull_timer; 164 struct timer_list qfull_timer;
164 int qfull_time_out; 165 int qfull_time_out;
@@ -179,7 +180,7 @@ struct tcmu_dev {
179struct tcmu_cmd { 180struct tcmu_cmd {
180 struct se_cmd *se_cmd; 181 struct se_cmd *se_cmd;
181 struct tcmu_dev *tcmu_dev; 182 struct tcmu_dev *tcmu_dev;
182 struct list_head cmdr_queue_entry; 183 struct list_head queue_entry;
183 184
184 uint16_t cmd_id; 185 uint16_t cmd_id;
185 186
@@ -192,6 +193,7 @@ struct tcmu_cmd {
192 unsigned long deadline; 193 unsigned long deadline;
193 194
194#define TCMU_CMD_BIT_EXPIRED 0 195#define TCMU_CMD_BIT_EXPIRED 0
196#define TCMU_CMD_BIT_INFLIGHT 1
195 unsigned long flags; 197 unsigned long flags;
196}; 198};
197/* 199/*
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
586 if (!tcmu_cmd) 588 if (!tcmu_cmd)
587 return NULL; 589 return NULL;
588 590
589 INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); 591 INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
590 tcmu_cmd->se_cmd = se_cmd; 592 tcmu_cmd->se_cmd = se_cmd;
591 tcmu_cmd->tcmu_dev = udev; 593 tcmu_cmd->tcmu_dev = udev;
592 594
@@ -915,11 +917,13 @@ setup_timer:
915 return 0; 917 return 0;
916 918
917 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 919 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
918 mod_timer(timer, tcmu_cmd->deadline); 920 if (!timer_pending(timer))
921 mod_timer(timer, tcmu_cmd->deadline);
922
919 return 0; 923 return 0;
920} 924}
921 925
922static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) 926static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
923{ 927{
924 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 928 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
925 unsigned int tmo; 929 unsigned int tmo;
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
942 if (ret) 946 if (ret)
943 return ret; 947 return ret;
944 948
945 list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); 949 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
946 pr_debug("adding cmd %u on dev %s to ring space wait queue\n", 950 pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
947 tcmu_cmd->cmd_id, udev->name); 951 tcmu_cmd->cmd_id, udev->name);
948 return 0; 952 return 0;
@@ -999,7 +1003,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
999 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 1003 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
1000 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1004 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1001 1005
1002 if (!list_empty(&udev->cmdr_queue)) 1006 if (!list_empty(&udev->qfull_queue))
1003 goto queue; 1007 goto queue;
1004 1008
1005 mb = udev->mb_addr; 1009 mb = udev->mb_addr;
@@ -1096,13 +1100,16 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
1096 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1100 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1097 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1101 tcmu_flush_dcache_range(mb, sizeof(*mb));
1098 1102
1103 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1104 set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
1105
1099 /* TODO: only if FLUSH and FUA? */ 1106 /* TODO: only if FLUSH and FUA? */
1100 uio_event_notify(&udev->uio_info); 1107 uio_event_notify(&udev->uio_info);
1101 1108
1102 return 0; 1109 return 0;
1103 1110
1104queue: 1111queue:
1105 if (add_to_cmdr_queue(tcmu_cmd)) { 1112 if (add_to_qfull_queue(tcmu_cmd)) {
1106 *scsi_err = TCM_OUT_OF_RESOURCES; 1113 *scsi_err = TCM_OUT_OF_RESOURCES;
1107 return -1; 1114 return -1;
1108 } 1115 }
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
1145 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1152 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1146 goto out; 1153 goto out;
1147 1154
1155 list_del_init(&cmd->queue_entry);
1156
1148 tcmu_cmd_reset_dbi_cur(cmd); 1157 tcmu_cmd_reset_dbi_cur(cmd);
1149 1158
1150 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1159 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
@@ -1194,9 +1203,29 @@ out:
1194 tcmu_free_cmd(cmd); 1203 tcmu_free_cmd(cmd);
1195} 1204}
1196 1205
1206static void tcmu_set_next_deadline(struct list_head *queue,
1207 struct timer_list *timer)
1208{
1209 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1210 unsigned long deadline = 0;
1211
1212 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
1213 if (!time_after(jiffies, tcmu_cmd->deadline)) {
1214 deadline = tcmu_cmd->deadline;
1215 break;
1216 }
1217 }
1218
1219 if (deadline)
1220 mod_timer(timer, deadline);
1221 else
1222 del_timer(timer);
1223}
1224
1197static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1225static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1198{ 1226{
1199 struct tcmu_mailbox *mb; 1227 struct tcmu_mailbox *mb;
1228 struct tcmu_cmd *cmd;
1200 int handled = 0; 1229 int handled = 0;
1201 1230
1202 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1231 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1210 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1239 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1211 1240
1212 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1241 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
1213 struct tcmu_cmd *cmd;
1214 1242
1215 tcmu_flush_dcache_range(entry, sizeof(*entry)); 1243 tcmu_flush_dcache_range(entry, sizeof(*entry));
1216 1244
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1243 /* no more pending commands */ 1271 /* no more pending commands */
1244 del_timer(&udev->cmd_timer); 1272 del_timer(&udev->cmd_timer);
1245 1273
1246 if (list_empty(&udev->cmdr_queue)) { 1274 if (list_empty(&udev->qfull_queue)) {
1247 /* 1275 /*
1248 * no more pending or waiting commands so try to 1276 * no more pending or waiting commands so try to
1249 * reclaim blocks if needed. 1277 * reclaim blocks if needed.
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
1252 tcmu_global_max_blocks) 1280 tcmu_global_max_blocks)
1253 schedule_delayed_work(&tcmu_unmap_work, 0); 1281 schedule_delayed_work(&tcmu_unmap_work, 0);
1254 } 1282 }
1283 } else if (udev->cmd_time_out) {
1284 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1255 } 1285 }
1256 1286
1257 return handled; 1287 return handled;
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1271 if (!time_after(jiffies, cmd->deadline)) 1301 if (!time_after(jiffies, cmd->deadline))
1272 return 0; 1302 return 0;
1273 1303
1274 is_running = list_empty(&cmd->cmdr_queue_entry); 1304 is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
1275 se_cmd = cmd->se_cmd; 1305 se_cmd = cmd->se_cmd;
1276 1306
1277 if (is_running) { 1307 if (is_running) {
@@ -1287,9 +1317,9 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
1287 * target_complete_cmd will translate this to LUN COMM FAILURE 1317 * target_complete_cmd will translate this to LUN COMM FAILURE
1288 */ 1318 */
1289 scsi_status = SAM_STAT_CHECK_CONDITION; 1319 scsi_status = SAM_STAT_CHECK_CONDITION;
1320 list_del_init(&cmd->queue_entry);
1290 } else { 1321 } else {
1291 list_del_init(&cmd->cmdr_queue_entry); 1322 list_del_init(&cmd->queue_entry);
1292
1293 idr_remove(&udev->commands, id); 1323 idr_remove(&udev->commands, id);
1294 tcmu_free_cmd(cmd); 1324 tcmu_free_cmd(cmd);
1295 scsi_status = SAM_STAT_TASK_SET_FULL; 1325 scsi_status = SAM_STAT_TASK_SET_FULL;
@@ -1372,7 +1402,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1372 1402
1373 INIT_LIST_HEAD(&udev->node); 1403 INIT_LIST_HEAD(&udev->node);
1374 INIT_LIST_HEAD(&udev->timedout_entry); 1404 INIT_LIST_HEAD(&udev->timedout_entry);
1375 INIT_LIST_HEAD(&udev->cmdr_queue); 1405 INIT_LIST_HEAD(&udev->qfull_queue);
1406 INIT_LIST_HEAD(&udev->inflight_queue);
1376 idr_init(&udev->commands); 1407 idr_init(&udev->commands);
1377 1408
1378 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1409 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
@@ -1383,7 +1414,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1383 return &udev->se_dev; 1414 return &udev->se_dev;
1384} 1415}
1385 1416
1386static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) 1417static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
1387{ 1418{
1388 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1419 struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1389 LIST_HEAD(cmds); 1420 LIST_HEAD(cmds);
@@ -1391,15 +1422,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
1391 sense_reason_t scsi_ret; 1422 sense_reason_t scsi_ret;
1392 int ret; 1423 int ret;
1393 1424
1394 if (list_empty(&udev->cmdr_queue)) 1425 if (list_empty(&udev->qfull_queue))
1395 return true; 1426 return true;
1396 1427
1397 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1428 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1398 1429
1399 list_splice_init(&udev->cmdr_queue, &cmds); 1430 list_splice_init(&udev->qfull_queue, &cmds);
1400 1431
1401 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { 1432 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1402 list_del_init(&tcmu_cmd->cmdr_queue_entry); 1433 list_del_init(&tcmu_cmd->queue_entry);
1403 1434
1404 pr_debug("removing cmd %u on dev %s from queue\n", 1435 pr_debug("removing cmd %u on dev %s from queue\n",
1405 tcmu_cmd->cmd_id, udev->name); 1436 tcmu_cmd->cmd_id, udev->name);
@@ -1437,14 +1468,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
1437 * cmd was requeued, so just put all cmds back in 1468 * cmd was requeued, so just put all cmds back in
1438 * the queue 1469 * the queue
1439 */ 1470 */
1440 list_splice_tail(&cmds, &udev->cmdr_queue); 1471 list_splice_tail(&cmds, &udev->qfull_queue);
1441 drained = false; 1472 drained = false;
1442 goto done; 1473 break;
1443 } 1474 }
1444 } 1475 }
1445 if (list_empty(&udev->cmdr_queue)) 1476
1446 del_timer(&udev->qfull_timer); 1477 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1447done:
1448 return drained; 1478 return drained;
1449} 1479}
1450 1480
@@ -1454,7 +1484,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1454 1484
1455 mutex_lock(&udev->cmdr_lock); 1485 mutex_lock(&udev->cmdr_lock);
1456 tcmu_handle_completions(udev); 1486 tcmu_handle_completions(udev);
1457 run_cmdr_queue(udev, false); 1487 run_qfull_queue(udev, false);
1458 mutex_unlock(&udev->cmdr_lock); 1488 mutex_unlock(&udev->cmdr_lock);
1459 1489
1460 return 0; 1490 return 0;
@@ -1982,7 +2012,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
1982 /* complete IO that has executed successfully */ 2012 /* complete IO that has executed successfully */
1983 tcmu_handle_completions(udev); 2013 tcmu_handle_completions(udev);
1984 /* fail IO waiting to be queued */ 2014 /* fail IO waiting to be queued */
1985 run_cmdr_queue(udev, true); 2015 run_qfull_queue(udev, true);
1986 2016
1987unlock: 2017unlock:
1988 mutex_unlock(&udev->cmdr_lock); 2018 mutex_unlock(&udev->cmdr_lock);
@@ -1997,7 +2027,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
1997 mutex_lock(&udev->cmdr_lock); 2027 mutex_lock(&udev->cmdr_lock);
1998 2028
1999 idr_for_each_entry(&udev->commands, cmd, i) { 2029 idr_for_each_entry(&udev->commands, cmd, i) {
2000 if (!list_empty(&cmd->cmdr_queue_entry)) 2030 if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
2001 continue; 2031 continue;
2002 2032
2003 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2033 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
@@ -2006,6 +2036,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2006 2036
2007 idr_remove(&udev->commands, i); 2037 idr_remove(&udev->commands, i);
2008 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2038 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
2039 list_del_init(&cmd->queue_entry);
2009 if (err_level == 1) { 2040 if (err_level == 1) {
2010 /* 2041 /*
2011 * Userspace was not able to start the 2042 * Userspace was not able to start the
@@ -2666,6 +2697,10 @@ static void check_timedout_devices(void)
2666 2697
2667 mutex_lock(&udev->cmdr_lock); 2698 mutex_lock(&udev->cmdr_lock);
2668 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 2699 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
2700
2701 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
2702 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
2703
2669 mutex_unlock(&udev->cmdr_lock); 2704 mutex_unlock(&udev->cmdr_lock);
2670 2705
2671 spin_lock_bh(&timed_out_udevs_lock); 2706 spin_lock_bh(&timed_out_udevs_lock);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 30323426902e..58bb7d72dc2b 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -152,6 +152,7 @@ config CPU_THERMAL
152 bool "generic cpu cooling support" 152 bool "generic cpu cooling support"
153 depends on CPU_FREQ 153 depends on CPU_FREQ
154 depends on THERMAL_OF 154 depends on THERMAL_OF
155 depends on THERMAL=y
155 help 156 help
156 This implements the generic cpu cooling mechanism through frequency 157 This implements the generic cpu cooling mechanism through frequency
157 reduction. An ACPI version of this already exists 158 reduction. An ACPI version of this already exists
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index dfd23245f778..6fff16113628 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy)
774 774
775 cdev = __cpufreq_cooling_register(np, policy, capacitance); 775 cdev = __cpufreq_cooling_register(np, policy, capacitance);
776 if (IS_ERR(cdev)) { 776 if (IS_ERR(cdev)) {
777 pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", 777 pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n",
778 policy->cpu, PTR_ERR(cdev)); 778 policy->cpu, PTR_ERR(cdev));
779 cdev = NULL; 779 cdev = NULL;
780 } 780 }
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 0582bd12a239..0ca908d12750 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -4,7 +4,7 @@
4 4
5config INT340X_THERMAL 5config INT340X_THERMAL
6 tristate "ACPI INT340X thermal drivers" 6 tristate "ACPI INT340X thermal drivers"
7 depends on X86 && ACPI 7 depends on X86 && ACPI && PCI
8 select THERMAL_GOV_USER_SPACE 8 select THERMAL_GOV_USER_SPACE
9 select ACPI_THERMAL_REL 9 select ACPI_THERMAL_REL
10 select ACPI_FAN 10 select ACPI_FAN
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 284cf2c5a8fd..8e1cf4d789be 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
84 struct pci_dev *pci_dev; \ 84 struct pci_dev *pci_dev; \
85 struct platform_device *pdev; \ 85 struct platform_device *pdev; \
86 struct proc_thermal_device *proc_dev; \ 86 struct proc_thermal_device *proc_dev; \
87\ 87 \
88 if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
89 dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
90 return 0; \
91 } \
92 \
88 if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \ 93 if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
89 pdev = to_platform_device(dev); \ 94 pdev = to_platform_device(dev); \
90 proc_dev = platform_get_drvdata(pdev); \ 95 proc_dev = platform_get_drvdata(pdev); \
@@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev,
298 *priv = proc_priv; 303 *priv = proc_priv;
299 304
300 ret = proc_thermal_read_ppcc(proc_priv); 305 ret = proc_thermal_read_ppcc(proc_priv);
301 if (!ret) {
302 ret = sysfs_create_group(&dev->kobj,
303 &power_limit_attribute_group);
304
305 }
306 if (ret) 306 if (ret)
307 return ret; 307 return ret;
308 308
@@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev,
316 316
317 proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); 317 proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
318 if (IS_ERR(proc_priv->int340x_zone)) { 318 if (IS_ERR(proc_priv->int340x_zone)) {
319 ret = PTR_ERR(proc_priv->int340x_zone); 319 return PTR_ERR(proc_priv->int340x_zone);
320 goto remove_group;
321 } else 320 } else
322 ret = 0; 321 ret = 0;
323 322
@@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev,
331 330
332remove_zone: 331remove_zone:
333 int340x_thermal_zone_remove(proc_priv->int340x_zone); 332 int340x_thermal_zone_remove(proc_priv->int340x_zone);
334remove_group:
335 sysfs_remove_group(&proc_priv->dev->kobj,
336 &power_limit_attribute_group);
337 333
338 return ret; 334 return ret;
339} 335}
@@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev)
364 platform_set_drvdata(pdev, proc_priv); 360 platform_set_drvdata(pdev, proc_priv);
365 proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV; 361 proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
366 362
367 return 0; 363 dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
364
365 return sysfs_create_group(&pdev->dev.kobj,
366 &power_limit_attribute_group);
368} 367}
369 368
370static int int3401_remove(struct platform_device *pdev) 369static int int3401_remove(struct platform_device *pdev)
@@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
423 proc_priv->soc_dts = intel_soc_dts_iosf_init( 422 proc_priv->soc_dts = intel_soc_dts_iosf_init(
424 INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0); 423 INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
425 424
426 if (proc_priv->soc_dts && pdev->irq) { 425 if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
427 ret = pci_enable_msi(pdev); 426 ret = pci_enable_msi(pdev);
428 if (!ret) { 427 if (!ret) {
429 ret = request_threaded_irq(pdev->irq, NULL, 428 ret = request_threaded_irq(pdev->irq, NULL,
@@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
441 dev_err(&pdev->dev, "No auxiliary DTSs enabled\n"); 440 dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
442 } 441 }
443 442
444 return 0; 443 dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
444
445 return sysfs_create_group(&pdev->dev.kobj,
446 &power_limit_attribute_group);
445} 447}
446 448
447static void proc_thermal_pci_remove(struct pci_dev *pdev) 449static void proc_thermal_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4bfdb4a1e47d..2df059cc07e2 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np)
867 867
868 ret = of_property_read_u32(np, "polling-delay-passive", &prop); 868 ret = of_property_read_u32(np, "polling-delay-passive", &prop);
869 if (ret < 0) { 869 if (ret < 0) {
870 pr_err("missing polling-delay-passive property\n"); 870 pr_err("%pOFn: missing polling-delay-passive property\n", np);
871 goto free_tz; 871 goto free_tz;
872 } 872 }
873 tz->passive_delay = prop; 873 tz->passive_delay = prop;
874 874
875 ret = of_property_read_u32(np, "polling-delay", &prop); 875 ret = of_property_read_u32(np, "polling-delay", &prop);
876 if (ret < 0) { 876 if (ret < 0) {
877 pr_err("missing polling-delay property\n"); 877 pr_err("%pOFn: missing polling-delay property\n", np);
878 goto free_tz; 878 goto free_tz;
879 } 879 }
880 tz->polling_delay = prop; 880 tz->polling_delay = prop;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 4164414d4c64..8bdf42bc8fc8 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
597 /* too large for caller's buffer */ 597 /* too large for caller's buffer */
598 ret = -EOVERFLOW; 598 ret = -EOVERFLOW;
599 } else { 599 } else {
600 __set_current_state(TASK_RUNNING);
600 if (copy_to_user(buf, rbuf->buf, rbuf->count)) 601 if (copy_to_user(buf, rbuf->buf, rbuf->count))
601 ret = -EFAULT; 602 ret = -EFAULT;
602 else 603 else
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 189ab1212d9a..e441221e04b9 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1070,15 +1070,16 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
1070 1070
1071 ret = 0; 1071 ret = 0;
1072 } 1072 }
1073 }
1074 1073
1075 /* Initialise interrupt backoff work if required */ 1074 /* Initialise interrupt backoff work if required */
1076 if (up->overrun_backoff_time_ms > 0) { 1075 if (up->overrun_backoff_time_ms > 0) {
1077 uart->overrun_backoff_time_ms = up->overrun_backoff_time_ms; 1076 uart->overrun_backoff_time_ms =
1078 INIT_DELAYED_WORK(&uart->overrun_backoff, 1077 up->overrun_backoff_time_ms;
1079 serial_8250_overrun_backoff_work); 1078 INIT_DELAYED_WORK(&uart->overrun_backoff,
1080 } else { 1079 serial_8250_overrun_backoff_work);
1081 uart->overrun_backoff_time_ms = 0; 1080 } else {
1081 uart->overrun_backoff_time_ms = 0;
1082 }
1082 } 1083 }
1083 1084
1084 mutex_unlock(&serial_mutex); 1085 mutex_unlock(&serial_mutex);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index e2c407656fa6..c1fdbc0b6840 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
357 if (dmacnt == 2) { 357 if (dmacnt == 2) {
358 data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), 358 data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma),
359 GFP_KERNEL); 359 GFP_KERNEL);
360 if (!data->dma)
361 return -ENOMEM;
362
360 data->dma->fn = mtk8250_dma_filter; 363 data->dma->fn = mtk8250_dma_filter;
361 data->dma->rx_size = MTK_UART_RX_SIZE; 364 data->dma->rx_size = MTK_UART_RX_SIZE;
362 data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; 365 data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER;
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index f80a300b5d68..48bd694a5fa1 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3420,6 +3420,11 @@ static int
3420serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) 3420serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
3421{ 3421{
3422 int num_iomem, num_port, first_port = -1, i; 3422 int num_iomem, num_port, first_port = -1, i;
3423 int rc;
3424
3425 rc = serial_pci_is_class_communication(dev);
3426 if (rc)
3427 return rc;
3423 3428
3424 /* 3429 /*
3425 * Should we try to make guesses for multiport serial devices later? 3430 * Should we try to make guesses for multiport serial devices later?
@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
3647 3652
3648 board = &pci_boards[ent->driver_data]; 3653 board = &pci_boards[ent->driver_data];
3649 3654
3650 rc = serial_pci_is_class_communication(dev);
3651 if (rc)
3652 return rc;
3653
3654 rc = serial_pci_is_blacklisted(dev); 3655 rc = serial_pci_is_blacklisted(dev);
3655 if (rc) 3656 if (rc)
3656 return rc; 3657 return rc;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 67b9bf3b500e..089a6f285d5e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -85,6 +85,18 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
85 with "earlycon=smh" on the kernel command line. The console is 85 with "earlycon=smh" on the kernel command line. The console is
86 enabled when early_param is processed. 86 enabled when early_param is processed.
87 87
88config SERIAL_EARLYCON_RISCV_SBI
89 bool "Early console using RISC-V SBI"
90 depends on RISCV
91 select SERIAL_CORE
92 select SERIAL_CORE_CONSOLE
93 select SERIAL_EARLYCON
94 help
95 Support for early debug console using RISC-V SBI. This enables
96 the console before standard serial driver is probed. This is enabled
97 with "earlycon=sbi" on the kernel command line. The console is
98 enabled when early_param is processed.
99
88config SERIAL_SB1250_DUART 100config SERIAL_SB1250_DUART
89 tristate "BCM1xxx on-chip DUART serial support" 101 tristate "BCM1xxx on-chip DUART serial support"
90 depends on SIBYTE_SB1xxx_SOC=y 102 depends on SIBYTE_SB1xxx_SOC=y
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 8c303736b7e8..1511e8a9f856 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SERIAL_CORE) += serial_core.o
7 7
8obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o 8obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
9obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o 9obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o
10obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o
10 11
11# These Sparc drivers have to appear before others such as 8250 12# These Sparc drivers have to appear before others such as 8250
12# which share ttySx minor node space. Otherwise console device 13# which share ttySx minor node space. Otherwise console device
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c
new file mode 100644
index 000000000000..ce81523c3113
--- /dev/null
+++ b/drivers/tty/serial/earlycon-riscv-sbi.c
@@ -0,0 +1,31 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * RISC-V SBI based earlycon
4 *
5 * Copyright (C) 2018 Anup Patel <anup@brainfault.org>
6 */
7#include <linux/kernel.h>
8#include <linux/console.h>
9#include <linux/init.h>
10#include <linux/serial_core.h>
11#include <asm/sbi.h>
12
13static void sbi_putc(struct uart_port *port, int c)
14{
15 sbi_console_putchar(c);
16}
17
18static void sbi_console_write(struct console *con,
19 const char *s, unsigned n)
20{
21 struct earlycon_device *dev = con->data;
22 uart_console_write(&dev->port, s, n, sbi_putc);
23}
24
25static int __init early_sbi_setup(struct earlycon_device *device,
26 const char *opt)
27{
28 device->con->write = sbi_console_write;
29 return 0;
30}
31EARLYCON_DECLARE(sbi, early_sbi_setup);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 241a48e5052c..debdd1b9e01a 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1697,7 +1697,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
1697 } 1697 }
1698 1698
1699 /* ask the core to calculate the divisor */ 1699 /* ask the core to calculate the divisor */
1700 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); 1700 baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
1701 1701
1702 spin_lock_irqsave(&sport->port.lock, flags); 1702 spin_lock_irqsave(&sport->port.lock, flags);
1703 1703
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index e052b69ceb98..9de9f0f239a1 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -114,9 +114,9 @@ struct ltq_uart_port {
114 114
115static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg) 115static inline void asc_update_bits(u32 clear, u32 set, void __iomem *reg)
116{ 116{
117 u32 tmp = readl(reg); 117 u32 tmp = __raw_readl(reg);
118 118
119 writel((tmp & ~clear) | set, reg); 119 __raw_writel((tmp & ~clear) | set, reg);
120} 120}
121 121
122static inline struct 122static inline struct
@@ -144,7 +144,7 @@ lqasc_start_tx(struct uart_port *port)
144static void 144static void
145lqasc_stop_rx(struct uart_port *port) 145lqasc_stop_rx(struct uart_port *port)
146{ 146{
147 writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); 147 __raw_writel(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
148} 148}
149 149
150static int 150static int
@@ -153,11 +153,12 @@ lqasc_rx_chars(struct uart_port *port)
153 struct tty_port *tport = &port->state->port; 153 struct tty_port *tport = &port->state->port;
154 unsigned int ch = 0, rsr = 0, fifocnt; 154 unsigned int ch = 0, rsr = 0, fifocnt;
155 155
156 fifocnt = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; 156 fifocnt = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
157 ASCFSTAT_RXFFLMASK;
157 while (fifocnt--) { 158 while (fifocnt--) {
158 u8 flag = TTY_NORMAL; 159 u8 flag = TTY_NORMAL;
159 ch = readb(port->membase + LTQ_ASC_RBUF); 160 ch = readb(port->membase + LTQ_ASC_RBUF);
160 rsr = (readl(port->membase + LTQ_ASC_STATE) 161 rsr = (__raw_readl(port->membase + LTQ_ASC_STATE)
161 & ASCSTATE_ANY) | UART_DUMMY_UER_RX; 162 & ASCSTATE_ANY) | UART_DUMMY_UER_RX;
162 tty_flip_buffer_push(tport); 163 tty_flip_buffer_push(tport);
163 port->icount.rx++; 164 port->icount.rx++;
@@ -217,7 +218,7 @@ lqasc_tx_chars(struct uart_port *port)
217 return; 218 return;
218 } 219 }
219 220
220 while (((readl(port->membase + LTQ_ASC_FSTAT) & 221 while (((__raw_readl(port->membase + LTQ_ASC_FSTAT) &
221 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { 222 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
222 if (port->x_char) { 223 if (port->x_char) {
223 writeb(port->x_char, port->membase + LTQ_ASC_TBUF); 224 writeb(port->x_char, port->membase + LTQ_ASC_TBUF);
@@ -245,7 +246,7 @@ lqasc_tx_int(int irq, void *_port)
245 unsigned long flags; 246 unsigned long flags;
246 struct uart_port *port = (struct uart_port *)_port; 247 struct uart_port *port = (struct uart_port *)_port;
247 spin_lock_irqsave(&ltq_asc_lock, flags); 248 spin_lock_irqsave(&ltq_asc_lock, flags);
248 writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); 249 __raw_writel(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
249 spin_unlock_irqrestore(&ltq_asc_lock, flags); 250 spin_unlock_irqrestore(&ltq_asc_lock, flags);
250 lqasc_start_tx(port); 251 lqasc_start_tx(port);
251 return IRQ_HANDLED; 252 return IRQ_HANDLED;
@@ -270,7 +271,7 @@ lqasc_rx_int(int irq, void *_port)
270 unsigned long flags; 271 unsigned long flags;
271 struct uart_port *port = (struct uart_port *)_port; 272 struct uart_port *port = (struct uart_port *)_port;
272 spin_lock_irqsave(&ltq_asc_lock, flags); 273 spin_lock_irqsave(&ltq_asc_lock, flags);
273 writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); 274 __raw_writel(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
274 lqasc_rx_chars(port); 275 lqasc_rx_chars(port);
275 spin_unlock_irqrestore(&ltq_asc_lock, flags); 276 spin_unlock_irqrestore(&ltq_asc_lock, flags);
276 return IRQ_HANDLED; 277 return IRQ_HANDLED;
@@ -280,7 +281,8 @@ static unsigned int
280lqasc_tx_empty(struct uart_port *port) 281lqasc_tx_empty(struct uart_port *port)
281{ 282{
282 int status; 283 int status;
283 status = readl(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; 284 status = __raw_readl(port->membase + LTQ_ASC_FSTAT) &
285 ASCFSTAT_TXFFLMASK;
284 return status ? 0 : TIOCSER_TEMT; 286 return status ? 0 : TIOCSER_TEMT;
285} 287}
286 288
@@ -313,12 +315,12 @@ lqasc_startup(struct uart_port *port)
313 asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), 315 asc_update_bits(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
314 port->membase + LTQ_ASC_CLC); 316 port->membase + LTQ_ASC_CLC);
315 317
316 writel(0, port->membase + LTQ_ASC_PISEL); 318 __raw_writel(0, port->membase + LTQ_ASC_PISEL);
317 writel( 319 __raw_writel(
318 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | 320 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) |
319 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, 321 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU,
320 port->membase + LTQ_ASC_TXFCON); 322 port->membase + LTQ_ASC_TXFCON);
321 writel( 323 __raw_writel(
322 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) 324 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK)
323 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, 325 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU,
324 port->membase + LTQ_ASC_RXFCON); 326 port->membase + LTQ_ASC_RXFCON);
@@ -350,7 +352,7 @@ lqasc_startup(struct uart_port *port)
350 goto err2; 352 goto err2;
351 } 353 }
352 354
353 writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, 355 __raw_writel(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
354 port->membase + LTQ_ASC_IRNREN); 356 port->membase + LTQ_ASC_IRNREN);
355 return 0; 357 return 0;
356 358
@@ -369,7 +371,7 @@ lqasc_shutdown(struct uart_port *port)
369 free_irq(ltq_port->rx_irq, port); 371 free_irq(ltq_port->rx_irq, port);
370 free_irq(ltq_port->err_irq, port); 372 free_irq(ltq_port->err_irq, port);
371 373
372 writel(0, port->membase + LTQ_ASC_CON); 374 __raw_writel(0, port->membase + LTQ_ASC_CON);
373 asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, 375 asc_update_bits(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
374 port->membase + LTQ_ASC_RXFCON); 376 port->membase + LTQ_ASC_RXFCON);
375 asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, 377 asc_update_bits(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
@@ -461,13 +463,13 @@ lqasc_set_termios(struct uart_port *port,
461 asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); 463 asc_update_bits(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON);
462 464
463 /* now we can write the new baudrate into the register */ 465 /* now we can write the new baudrate into the register */
464 writel(divisor, port->membase + LTQ_ASC_BG); 466 __raw_writel(divisor, port->membase + LTQ_ASC_BG);
465 467
466 /* turn the baudrate generator back on */ 468 /* turn the baudrate generator back on */
467 asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON); 469 asc_update_bits(0, ASCCON_R, port->membase + LTQ_ASC_CON);
468 470
469 /* enable rx */ 471 /* enable rx */
470 writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); 472 __raw_writel(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
471 473
472 spin_unlock_irqrestore(&ltq_asc_lock, flags); 474 spin_unlock_irqrestore(&ltq_asc_lock, flags);
473 475
@@ -578,7 +580,7 @@ lqasc_console_putchar(struct uart_port *port, int ch)
578 return; 580 return;
579 581
580 do { 582 do {
581 fifofree = (readl(port->membase + LTQ_ASC_FSTAT) 583 fifofree = (__raw_readl(port->membase + LTQ_ASC_FSTAT)
582 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; 584 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
583 } while (fifofree == 0); 585 } while (fifofree == 0);
584 writeb(ch, port->membase + LTQ_ASC_TBUF); 586 writeb(ch, port->membase + LTQ_ASC_TBUF);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index a72d6d9fb983..38016609c7fa 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -225,7 +225,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
225 unsigned int mctrl = TIOCM_DSR | TIOCM_CAR; 225 unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
226 u32 geni_ios; 226 u32 geni_ios;
227 227
228 if (uart_console(uport) || !uart_cts_enabled(uport)) { 228 if (uart_console(uport)) {
229 mctrl |= TIOCM_CTS; 229 mctrl |= TIOCM_CTS;
230 } else { 230 } else {
231 geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS); 231 geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
@@ -241,7 +241,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
241{ 241{
242 u32 uart_manual_rfr = 0; 242 u32 uart_manual_rfr = 0;
243 243
244 if (uart_console(uport) || !uart_cts_enabled(uport)) 244 if (uart_console(uport))
245 return; 245 return;
246 246
247 if (!(mctrl & TIOCM_RTS)) 247 if (!(mctrl & TIOCM_RTS))
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d4cca5bdaf1c..556f50aa1b58 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty)
130 struct uart_port *port; 130 struct uart_port *port;
131 unsigned long flags; 131 unsigned long flags;
132 132
133 if (!state)
134 return;
135
133 port = uart_port_lock(state, flags); 136 port = uart_port_lock(state, flags);
134 __uart_start(tty); 137 __uart_start(tty);
135 uart_port_unlock(port, flags); 138 uart_port_unlock(port, flags);
@@ -550,10 +553,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
550 int ret = 0; 553 int ret = 0;
551 554
552 circ = &state->xmit; 555 circ = &state->xmit;
553 if (!circ->buf) 556 port = uart_port_lock(state, flags);
557 if (!circ->buf) {
558 uart_port_unlock(port, flags);
554 return 0; 559 return 0;
560 }
555 561
556 port = uart_port_lock(state, flags);
557 if (port && uart_circ_chars_free(circ) != 0) { 562 if (port && uart_circ_chars_free(circ) != 0) {
558 circ->buf[circ->head] = c; 563 circ->buf[circ->head] = c;
559 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); 564 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
@@ -586,11 +591,13 @@ static int uart_write(struct tty_struct *tty,
586 return -EL3HLT; 591 return -EL3HLT;
587 } 592 }
588 593
594 port = uart_port_lock(state, flags);
589 circ = &state->xmit; 595 circ = &state->xmit;
590 if (!circ->buf) 596 if (!circ->buf) {
597 uart_port_unlock(port, flags);
591 return 0; 598 return 0;
599 }
592 600
593 port = uart_port_lock(state, flags);
594 while (port) { 601 while (port) {
595 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); 602 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
596 if (count < c) 603 if (count < c)
@@ -723,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty)
723 upstat_t mask = UPSTAT_SYNC_FIFO; 730 upstat_t mask = UPSTAT_SYNC_FIFO;
724 struct uart_port *port; 731 struct uart_port *port;
725 732
733 if (!state)
734 return;
735
726 port = uart_port_ref(state); 736 port = uart_port_ref(state);
727 if (!port) 737 if (!port)
728 return; 738 return;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 8df0fd824520..64bbeb7d7e0c 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1921,7 +1921,7 @@ out_nomem:
1921 1921
1922static void sci_free_irq(struct sci_port *port) 1922static void sci_free_irq(struct sci_port *port)
1923{ 1923{
1924 int i; 1924 int i, j;
1925 1925
1926 /* 1926 /*
1927 * Intentionally in reverse order so we iterate over the muxed 1927 * Intentionally in reverse order so we iterate over the muxed
@@ -1937,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port)
1937 if (unlikely(irq < 0)) 1937 if (unlikely(irq < 0))
1938 continue; 1938 continue;
1939 1939
1940 /* Check if already freed (irq was muxed) */
1941 for (j = 0; j < i; j++)
1942 if (port->irqs[j] == irq)
1943 j = i + 1;
1944 if (j > i)
1945 continue;
1946
1940 free_irq(port->irqs[i], port); 1947 free_irq(port->irqs[i], port);
1941 kfree(port->irqstr[i]); 1948 kfree(port->irqstr[i]);
1942 1949
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index bfe9ad85b362..21ffcce16927 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1256,7 +1256,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1256static int tty_reopen(struct tty_struct *tty) 1256static int tty_reopen(struct tty_struct *tty)
1257{ 1257{
1258 struct tty_driver *driver = tty->driver; 1258 struct tty_driver *driver = tty->driver;
1259 int retval; 1259 struct tty_ldisc *ld;
1260 int retval = 0;
1260 1261
1261 if (driver->type == TTY_DRIVER_TYPE_PTY && 1262 if (driver->type == TTY_DRIVER_TYPE_PTY &&
1262 driver->subtype == PTY_TYPE_MASTER) 1263 driver->subtype == PTY_TYPE_MASTER)
@@ -1268,13 +1269,18 @@ static int tty_reopen(struct tty_struct *tty)
1268 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) 1269 if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
1269 return -EBUSY; 1270 return -EBUSY;
1270 1271
1271 retval = tty_ldisc_lock(tty, 5 * HZ); 1272 ld = tty_ldisc_ref_wait(tty);
1272 if (retval) 1273 if (ld) {
1273 return retval; 1274 tty_ldisc_deref(ld);
1275 } else {
1276 retval = tty_ldisc_lock(tty, 5 * HZ);
1277 if (retval)
1278 return retval;
1274 1279
1275 if (!tty->ldisc) 1280 if (!tty->ldisc)
1276 retval = tty_ldisc_reinit(tty, tty->termios.c_line); 1281 retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1277 tty_ldisc_unlock(tty); 1282 tty_ldisc_unlock(tty);
1283 }
1278 1284
1279 if (retval == 0) 1285 if (retval == 0)
1280 tty->count++; 1286 tty->count++;
@@ -2183,7 +2189,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
2183 ld = tty_ldisc_ref_wait(tty); 2189 ld = tty_ldisc_ref_wait(tty);
2184 if (!ld) 2190 if (!ld)
2185 return -EIO; 2191 return -EIO;
2186 ld->ops->receive_buf(tty, &ch, &mbz, 1); 2192 if (ld->ops->receive_buf)
2193 ld->ops->receive_buf(tty, &ch, &mbz, 1);
2187 tty_ldisc_deref(ld); 2194 tty_ldisc_deref(ld);
2188 return 0; 2195 return 0;
2189} 2196}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 41ec8e5010f3..bba75560d11e 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1272,6 +1272,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1272 if (con_is_visible(vc)) 1272 if (con_is_visible(vc))
1273 update_screen(vc); 1273 update_screen(vc);
1274 vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); 1274 vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
1275 notify_update(vc);
1275 return err; 1276 return err;
1276} 1277}
1277 1278
@@ -2764,8 +2765,8 @@ rescan_last_byte:
2764 con_flush(vc, draw_from, draw_to, &draw_x); 2765 con_flush(vc, draw_from, draw_to, &draw_x);
2765 vc_uniscr_debug_check(vc); 2766 vc_uniscr_debug_check(vc);
2766 console_conditional_schedule(); 2767 console_conditional_schedule();
2767 console_unlock();
2768 notify_update(vc); 2768 notify_update(vc);
2769 console_unlock();
2769 return n; 2770 return n;
2770} 2771}
2771 2772
@@ -2884,8 +2885,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2884 unsigned char c; 2885 unsigned char c;
2885 static DEFINE_SPINLOCK(printing_lock); 2886 static DEFINE_SPINLOCK(printing_lock);
2886 const ushort *start; 2887 const ushort *start;
2887 ushort cnt = 0; 2888 ushort start_x, cnt;
2888 ushort myx;
2889 int kmsg_console; 2889 int kmsg_console;
2890 2890
2891 /* console busy or not yet initialized */ 2891 /* console busy or not yet initialized */
@@ -2898,10 +2898,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2898 if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) 2898 if (kmsg_console && vc_cons_allocated(kmsg_console - 1))
2899 vc = vc_cons[kmsg_console - 1].d; 2899 vc = vc_cons[kmsg_console - 1].d;
2900 2900
2901 /* read `x' only after setting currcons properly (otherwise
2902 the `x' macro will read the x of the foreground console). */
2903 myx = vc->vc_x;
2904
2905 if (!vc_cons_allocated(fg_console)) { 2901 if (!vc_cons_allocated(fg_console)) {
2906 /* impossible */ 2902 /* impossible */
2907 /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ 2903 /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */
@@ -2916,53 +2912,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
2916 hide_cursor(vc); 2912 hide_cursor(vc);
2917 2913
2918 start = (ushort *)vc->vc_pos; 2914 start = (ushort *)vc->vc_pos;
2919 2915 start_x = vc->vc_x;
2920 /* Contrived structure to try to emulate original need_wrap behaviour 2916 cnt = 0;
2921 * Problems caused when we have need_wrap set on '\n' character */
2922 while (count--) { 2917 while (count--) {
2923 c = *b++; 2918 c = *b++;
2924 if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { 2919 if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
2925 if (cnt > 0) { 2920 if (cnt && con_is_visible(vc))
2926 if (con_is_visible(vc)) 2921 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2927 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); 2922 cnt = 0;
2928 vc->vc_x += cnt;
2929 if (vc->vc_need_wrap)
2930 vc->vc_x--;
2931 cnt = 0;
2932 }
2933 if (c == 8) { /* backspace */ 2923 if (c == 8) { /* backspace */
2934 bs(vc); 2924 bs(vc);
2935 start = (ushort *)vc->vc_pos; 2925 start = (ushort *)vc->vc_pos;
2936 myx = vc->vc_x; 2926 start_x = vc->vc_x;
2937 continue; 2927 continue;
2938 } 2928 }
2939 if (c != 13) 2929 if (c != 13)
2940 lf(vc); 2930 lf(vc);
2941 cr(vc); 2931 cr(vc);
2942 start = (ushort *)vc->vc_pos; 2932 start = (ushort *)vc->vc_pos;
2943 myx = vc->vc_x; 2933 start_x = vc->vc_x;
2944 if (c == 10 || c == 13) 2934 if (c == 10 || c == 13)
2945 continue; 2935 continue;
2946 } 2936 }
2937 vc_uniscr_putc(vc, c);
2947 scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); 2938 scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos);
2948 notify_write(vc, c); 2939 notify_write(vc, c);
2949 cnt++; 2940 cnt++;
2950 if (myx == vc->vc_cols - 1) { 2941 if (vc->vc_x == vc->vc_cols - 1) {
2951 vc->vc_need_wrap = 1;
2952 continue;
2953 }
2954 vc->vc_pos += 2;
2955 myx++;
2956 }
2957 if (cnt > 0) {
2958 if (con_is_visible(vc))
2959 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
2960 vc->vc_x += cnt;
2961 if (vc->vc_x == vc->vc_cols) {
2962 vc->vc_x--;
2963 vc->vc_need_wrap = 1; 2942 vc->vc_need_wrap = 1;
2943 } else {
2944 vc->vc_pos += 2;
2945 vc->vc_x++;
2964 } 2946 }
2965 } 2947 }
2948 if (cnt && con_is_visible(vc))
2949 vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x);
2966 set_cursor(vc); 2950 set_cursor(vc);
2967 notify_update(vc); 2951 notify_update(vc);
2968 2952
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index e81de9ca8729..9b45aa422e69 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -316,7 +316,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
316 if (IS_ERR(data->usbmisc_data)) 316 if (IS_ERR(data->usbmisc_data))
317 return PTR_ERR(data->usbmisc_data); 317 return PTR_ERR(data->usbmisc_data);
318 318
319 if (of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC) { 319 if ((of_usb_get_phy_mode(dev->of_node) == USBPHY_INTERFACE_MODE_HSIC)
320 && data->usbmisc_data) {
320 pdata.flags |= CI_HDRC_IMX_IS_HSIC; 321 pdata.flags |= CI_HDRC_IMX_IS_HSIC;
321 data->usbmisc_data->hsic = 1; 322 data->usbmisc_data->hsic = 1;
322 data->pinctrl = devm_pinctrl_get(dev); 323 data->pinctrl = devm_pinctrl_get(dev);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ed8c62b2d9d1..739f8960811a 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1865,6 +1865,13 @@ static const struct usb_device_id acm_ids[] = {
1865 .driver_info = IGNORE_DEVICE, 1865 .driver_info = IGNORE_DEVICE,
1866 }, 1866 },
1867 1867
1868 { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
1869 .driver_info = SEND_ZERO_PACKET,
1870 },
1871 { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
1872 .driver_info = SEND_ZERO_PACKET,
1873 },
1874
1868 /* control interfaces without any protocol set */ 1875 /* control interfaces without any protocol set */
1869 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1876 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1870 USB_CDC_PROTO_NONE) }, 1877 USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 356b05c82dbc..f713cecc1f41 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -143,9 +143,12 @@ int usb_choose_configuration(struct usb_device *udev)
143 continue; 143 continue;
144 } 144 }
145 145
146 if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) { 146 if (i > 0 && desc && is_audio(desc)) {
147 best = c; 147 if (is_uac3_config(desc)) {
148 break; 148 best = c;
149 break;
150 }
151 continue;
149 } 152 }
150 153
151 /* From the remaining configs, choose the first one whose 154 /* From the remaining configs, choose the first one whose
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index dc7f7fd71684..c12ac56606c3 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -119,11 +119,6 @@ static const struct attribute_group ports_group = {
119 .attrs = ports_attrs, 119 .attrs = ports_attrs,
120}; 120};
121 121
122static const struct attribute_group *ports_groups[] = {
123 &ports_group,
124 NULL
125};
126
127/*************************************** 122/***************************************
128 * Adding & removing ports 123 * Adding & removing ports
129 ***************************************/ 124 ***************************************/
@@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action,
307static int usbport_trig_activate(struct led_classdev *led_cdev) 302static int usbport_trig_activate(struct led_classdev *led_cdev)
308{ 303{
309 struct usbport_trig_data *usbport_data; 304 struct usbport_trig_data *usbport_data;
305 int err;
310 306
311 usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); 307 usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL);
312 if (!usbport_data) 308 if (!usbport_data)
@@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
315 311
316 /* List of ports */ 312 /* List of ports */
317 INIT_LIST_HEAD(&usbport_data->ports); 313 INIT_LIST_HEAD(&usbport_data->ports);
314 err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group);
315 if (err)
316 goto err_free;
318 usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); 317 usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports);
319 usbport_trig_update_count(usbport_data); 318 usbport_trig_update_count(usbport_data);
320 319
@@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev)
322 usbport_data->nb.notifier_call = usbport_trig_notify; 321 usbport_data->nb.notifier_call = usbport_trig_notify;
323 led_set_trigger_data(led_cdev, usbport_data); 322 led_set_trigger_data(led_cdev, usbport_data);
324 usb_register_notify(&usbport_data->nb); 323 usb_register_notify(&usbport_data->nb);
325
326 return 0; 324 return 0;
325
326err_free:
327 kfree(usbport_data);
328 return err;
327} 329}
328 330
329static void usbport_trig_deactivate(struct led_classdev *led_cdev) 331static void usbport_trig_deactivate(struct led_classdev *led_cdev)
@@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev)
335 usbport_trig_remove_port(usbport_data, port); 337 usbport_trig_remove_port(usbport_data, port);
336 } 338 }
337 339
340 sysfs_remove_group(&led_cdev->dev->kobj, &ports_group);
341
338 usb_unregister_notify(&usbport_data->nb); 342 usb_unregister_notify(&usbport_data->nb);
339 343
340 kfree(usbport_data); 344 kfree(usbport_data);
@@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = {
344 .name = "usbport", 348 .name = "usbport",
345 .activate = usbport_trig_activate, 349 .activate = usbport_trig_activate,
346 .deactivate = usbport_trig_deactivate, 350 .deactivate = usbport_trig_deactivate,
347 .groups = ports_groups,
348}; 351};
349 352
350static int __init usbport_trig_init(void) 353static int __init usbport_trig_init(void)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 514c5214ddb2..8bc35d53408b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -394,7 +394,8 @@ static const struct usb_device_id usb_quirk_list[] = {
394 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET }, 394 { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
395 395
396 /* Corsair K70 RGB */ 396 /* Corsair K70 RGB */
397 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 397 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
398 USB_QUIRK_DELAY_CTRL_MSG },
398 399
399 /* Corsair Strafe */ 400 /* Corsair Strafe */
400 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT | 401 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 68ad75a7460d..55ef3cc2701b 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -261,7 +261,7 @@ static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
261 261
262 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) { 262 if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
263 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__); 263 dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
264 dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT); 264 dwc2_set_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
265 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG); 265 dwc2_set_bit(hsotg, DCTL, DCTL_RMTWKUPSIG);
266 } 266 }
267} 267}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index cb7fcd7c0ad8..c1e9ea621f41 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
78 for (i = 0; i < exynos->num_clks; i++) { 78 for (i = 0; i < exynos->num_clks; i++) {
79 ret = clk_prepare_enable(exynos->clks[i]); 79 ret = clk_prepare_enable(exynos->clks[i]);
80 if (ret) { 80 if (ret) {
81 while (--i > 0) 81 while (i-- > 0)
82 clk_disable_unprepare(exynos->clks[i]); 82 clk_disable_unprepare(exynos->clks[i]);
83 return ret; 83 return ret;
84 } 84 }
@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev)
223 for (i = 0; i < exynos->num_clks; i++) { 223 for (i = 0; i < exynos->num_clks; i++) {
224 ret = clk_prepare_enable(exynos->clks[i]); 224 ret = clk_prepare_enable(exynos->clks[i]);
225 if (ret) { 225 if (ret) {
226 while (--i > 0) 226 while (i-- > 0)
227 clk_disable_unprepare(exynos->clks[i]); 227 clk_disable_unprepare(exynos->clks[i]);
228 return ret; 228 return ret;
229 } 229 }
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 07bd31bb2f8a..6c9b76bcc2e1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -177,6 +177,7 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
177 req->started = false; 177 req->started = false;
178 list_del(&req->list); 178 list_del(&req->list);
179 req->remaining = 0; 179 req->remaining = 0;
180 req->needs_extra_trb = false;
180 181
181 if (req->request.status == -EINPROGRESS) 182 if (req->request.status == -EINPROGRESS)
182 req->request.status = status; 183 req->request.status = status;
@@ -1118,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
1118 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1119 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1119 unsigned int rem = length % maxp; 1120 unsigned int rem = length % maxp;
1120 1121
1121 if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { 1122 if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
1122 struct dwc3 *dwc = dep->dwc; 1123 struct dwc3 *dwc = dep->dwc;
1123 struct dwc3_trb *trb; 1124 struct dwc3_trb *trb;
1124 1125
@@ -1984,6 +1985,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
1984 1985
1985 /* begin to receive SETUP packets */ 1986 /* begin to receive SETUP packets */
1986 dwc->ep0state = EP0_SETUP_PHASE; 1987 dwc->ep0state = EP0_SETUP_PHASE;
1988 dwc->link_state = DWC3_LINK_STATE_SS_DIS;
1987 dwc3_ep0_out_start(dwc); 1989 dwc3_ep0_out_start(dwc);
1988 1990
1989 dwc3_gadget_enable_irq(dwc); 1991 dwc3_gadget_enable_irq(dwc);
@@ -3379,6 +3381,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
3379 dwc3_disconnect_gadget(dwc); 3381 dwc3_disconnect_gadget(dwc);
3380 __dwc3_gadget_stop(dwc); 3382 __dwc3_gadget_stop(dwc);
3381 3383
3384 synchronize_irq(dwc->irq_gadget);
3385
3382 return 0; 3386 return 0;
3383} 3387}
3384 3388
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9cdef108fb1b..ed68a4860b7d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func(
838 838
839 ss = kzalloc(sizeof(*ss), GFP_KERNEL); 839 ss = kzalloc(sizeof(*ss), GFP_KERNEL);
840 if (!ss) 840 if (!ss)
841 return NULL; 841 return ERR_PTR(-ENOMEM);
842 842
843 ss_opts = container_of(fi, struct f_ss_opts, func_inst); 843 ss_opts = container_of(fi, struct f_ss_opts, func_inst);
844 844
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 01b44e159623..ccbd1d34eb2a 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -172,8 +172,9 @@ static int scratchpad_setup(struct bdc *bdc)
172 /* Refer to BDC spec, Table 4 for description of SPB */ 172 /* Refer to BDC spec, Table 4 for description of SPB */
173 sp_buff_size = 1 << (sp_buff_size + 5); 173 sp_buff_size = 1 << (sp_buff_size + 5);
174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size); 174 dev_dbg(bdc->dev, "Allocating %d bytes for scratchpad\n", sp_buff_size);
175 bdc->scratchpad.buff = dma_zalloc_coherent(bdc->dev, sp_buff_size, 175 bdc->scratchpad.buff = dma_alloc_coherent(bdc->dev, sp_buff_size,
176 &bdc->scratchpad.sp_dma, GFP_KERNEL); 176 &bdc->scratchpad.sp_dma,
177 GFP_KERNEL);
177 178
178 if (!bdc->scratchpad.buff) 179 if (!bdc->scratchpad.buff)
179 goto fail; 180 goto fail;
@@ -202,11 +203,9 @@ static int setup_srr(struct bdc *bdc, int interrupter)
202 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST); 203 bdc_writel(bdc->regs, BDC_SRRINT(0), BDC_SRR_RWS | BDC_SRR_RST);
203 bdc->srr.dqp_index = 0; 204 bdc->srr.dqp_index = 0;
204 /* allocate the status report descriptors */ 205 /* allocate the status report descriptors */
205 bdc->srr.sr_bds = dma_zalloc_coherent( 206 bdc->srr.sr_bds = dma_alloc_coherent(bdc->dev,
206 bdc->dev, 207 NUM_SR_ENTRIES * sizeof(struct bdc_bd),
207 NUM_SR_ENTRIES * sizeof(struct bdc_bd), 208 &bdc->srr.dma_addr, GFP_KERNEL);
208 &bdc->srr.dma_addr,
209 GFP_KERNEL);
210 if (!bdc->srr.sr_bds) 209 if (!bdc->srr.sr_bds)
211 return -ENOMEM; 210 return -ENOMEM;
212 211
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 660878a19505..b77f3126580e 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
2083#if defined(PLX_PCI_RDK2) 2083#if defined(PLX_PCI_RDK2)
2084 /* see if PCI int for us by checking irqstat */ 2084 /* see if PCI int for us by checking irqstat */
2085 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); 2085 intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
2086 if (!intcsr & (1 << NET2272_PCI_IRQ)) { 2086 if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
2087 spin_unlock(&dev->lock); 2087 spin_unlock(&dev->lock);
2088 return IRQ_NONE; 2088 return IRQ_NONE;
2089 } 2089 }
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index f26109eafdbf..66ec1fdf9fe7 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -302,3 +302,4 @@ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
302MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>"); 302MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
303MODULE_ALIAS("mv-ehci"); 303MODULE_ALIAS("mv-ehci");
304MODULE_LICENSE("GPL"); 304MODULE_LICENSE("GPL");
305MODULE_DEVICE_TABLE(of, ehci_mv_dt_ids);
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 6218bfe54f52..98deb5f64268 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -596,9 +596,9 @@ static int uhci_start(struct usb_hcd *hcd)
596 &uhci_debug_operations); 596 &uhci_debug_operations);
597#endif 597#endif
598 598
599 uhci->frame = dma_zalloc_coherent(uhci_dev(uhci), 599 uhci->frame = dma_alloc_coherent(uhci_dev(uhci),
600 UHCI_NUMFRAMES * sizeof(*uhci->frame), 600 UHCI_NUMFRAMES * sizeof(*uhci->frame),
601 &uhci->frame_dma_handle, GFP_KERNEL); 601 &uhci->frame_dma_handle, GFP_KERNEL);
602 if (!uhci->frame) { 602 if (!uhci->frame) {
603 dev_err(uhci_dev(uhci), 603 dev_err(uhci_dev(uhci),
604 "unable to allocate consistent memory for frame list\n"); 604 "unable to allocate consistent memory for frame list\n");
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 36a3eb8849f1..8067f178fa84 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1672,8 +1672,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); 1672 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1673 for (i = 0; i < num_sp; i++) { 1673 for (i = 0; i < num_sp; i++) {
1674 dma_addr_t dma; 1674 dma_addr_t dma;
1675 void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, 1675 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1676 flags); 1676 flags);
1677 if (!buf) 1677 if (!buf)
1678 goto fail_sp4; 1678 goto fail_sp4;
1679 1679
@@ -1799,8 +1799,8 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
1799 struct xhci_erst_entry *entry; 1799 struct xhci_erst_entry *entry;
1800 1800
1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs; 1801 size = sizeof(struct xhci_erst_entry) * evt_ring->num_segs;
1802 erst->entries = dma_zalloc_coherent(xhci_to_hcd(xhci)->self.sysdev, 1802 erst->entries = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev,
1803 size, &erst->erst_dma_addr, flags); 1803 size, &erst->erst_dma_addr, flags);
1804 if (!erst->entries) 1804 if (!erst->entries)
1805 return -ENOMEM; 1805 return -ENOMEM;
1806 1806
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index eae8b1b1b45b..ffe462a657b1 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum)
452 } 452 }
453 453
454 if (request) { 454 if (request) {
455 u8 is_dma = 0;
456 bool short_packet = false;
457 455
458 trace_musb_req_tx(req); 456 trace_musb_req_tx(req);
459 457
460 if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 458 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
461 is_dma = 1;
462 csr |= MUSB_TXCSR_P_WZC_BITS; 459 csr |= MUSB_TXCSR_P_WZC_BITS;
463 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 460 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
464 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); 461 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
476 */ 473 */
477 if ((request->zero && request->length) 474 if ((request->zero && request->length)
478 && (request->length % musb_ep->packet_sz == 0) 475 && (request->length % musb_ep->packet_sz == 0)
479 && (request->actual == request->length)) 476 && (request->actual == request->length)) {
480 short_packet = true;
481 477
482 if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
483 (is_dma && (!dma->desired_mode ||
484 (request->actual &
485 (musb_ep->packet_sz - 1)))))
486 short_packet = true;
487
488 if (short_packet) {
489 /* 478 /*
490 * On DMA completion, FIFO may not be 479 * On DMA completion, FIFO may not be
491 * available yet... 480 * available yet...
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a688f7f87829..5fc6825745f2 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
346 channel->status = MUSB_DMA_STATUS_FREE; 346 channel->status = MUSB_DMA_STATUS_FREE;
347 347
348 /* completed */ 348 /* completed */
349 if ((devctl & MUSB_DEVCTL_HM) 349 if (musb_channel->transmit &&
350 && (musb_channel->transmit) 350 (!channel->desired_mode ||
351 && ((channel->desired_mode == 0) 351 (channel->actual_len %
352 || (channel->actual_len & 352 musb_channel->max_packet_sz))) {
353 (musb_channel->max_packet_sz - 1)))
354 ) {
355 u8 epnum = musb_channel->epnum; 353 u8 epnum = musb_channel->epnum;
356 int offset = musb->io.ep_offset(epnum, 354 int offset = musb->io.ep_offset(epnum,
357 MUSB_TXCSR); 355 MUSB_TXCSR);
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
363 */ 361 */
364 musb_ep_select(mbase, epnum); 362 musb_ep_select(mbase, epnum);
365 txcsr = musb_readw(mbase, offset); 363 txcsr = musb_readw(mbase, offset);
366 txcsr &= ~(MUSB_TXCSR_DMAENAB 364 if (channel->desired_mode == 1) {
365 txcsr &= ~(MUSB_TXCSR_DMAENAB
367 | MUSB_TXCSR_AUTOSET); 366 | MUSB_TXCSR_AUTOSET);
368 musb_writew(mbase, offset, txcsr); 367 musb_writew(mbase, offset, txcsr);
369 /* Send out the packet */ 368 /* Send out the packet */
370 txcsr &= ~MUSB_TXCSR_DMAMODE; 369 txcsr &= ~MUSB_TXCSR_DMAMODE;
370 txcsr |= MUSB_TXCSR_DMAENAB;
371 }
371 txcsr |= MUSB_TXCSR_TXPKTRDY; 372 txcsr |= MUSB_TXCSR_TXPKTRDY;
372 musb_writew(mbase, offset, txcsr); 373 musb_writew(mbase, offset, txcsr);
373 } 374 }
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index d7312eed6088..91ea3083e7ad 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,7 @@ config AB8500_USB
21 21
22config FSL_USB2_OTG 22config FSL_USB2_OTG
23 bool "Freescale USB OTG Transceiver Driver" 23 bool "Freescale USB OTG Transceiver Driver"
24 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM 24 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
25 depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' 25 depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
26 select USB_PHY 26 select USB_PHY
27 help 27 help
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 27bdb7222527..f5f0568d8533 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
61 if (ret) 61 if (ret)
62 return ret; 62 return ret;
63 63
64 ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
65 if (ret)
66 return ret;
67 am_phy->usb_phy_gen.phy.init = am335x_init; 64 am_phy->usb_phy_gen.phy.init = am335x_init;
68 am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; 65 am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
69 66
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
82 device_set_wakeup_enable(dev, false); 79 device_set_wakeup_enable(dev, false);
83 phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); 80 phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
84 81
85 return 0; 82 return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
86} 83}
87 84
88static int am335x_phy_remove(struct platform_device *pdev) 85static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 1ab2a6191013..77ef4c481f3c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1783,6 +1783,10 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
1783 int result; 1783 int result;
1784 u16 val; 1784 u16 val;
1785 1785
1786 result = usb_autopm_get_interface(serial->interface);
1787 if (result)
1788 return result;
1789
1786 val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value; 1790 val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
1787 result = usb_control_msg(serial->dev, 1791 result = usb_control_msg(serial->dev,
1788 usb_sndctrlpipe(serial->dev, 0), 1792 usb_sndctrlpipe(serial->dev, 0),
@@ -1795,6 +1799,8 @@ static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
1795 val, result); 1799 val, result);
1796 } 1800 }
1797 1801
1802 usb_autopm_put_interface(serial->interface);
1803
1798 return result; 1804 return result;
1799} 1805}
1800 1806
@@ -1846,9 +1852,15 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
1846 unsigned char *buf; 1852 unsigned char *buf;
1847 int result; 1853 int result;
1848 1854
1855 result = usb_autopm_get_interface(serial->interface);
1856 if (result)
1857 return result;
1858
1849 buf = kmalloc(1, GFP_KERNEL); 1859 buf = kmalloc(1, GFP_KERNEL);
1850 if (!buf) 1860 if (!buf) {
1861 usb_autopm_put_interface(serial->interface);
1851 return -ENOMEM; 1862 return -ENOMEM;
1863 }
1852 1864
1853 result = usb_control_msg(serial->dev, 1865 result = usb_control_msg(serial->dev,
1854 usb_rcvctrlpipe(serial->dev, 0), 1866 usb_rcvctrlpipe(serial->dev, 0),
@@ -1863,6 +1875,7 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
1863 } 1875 }
1864 1876
1865 kfree(buf); 1877 kfree(buf);
1878 usb_autopm_put_interface(serial->interface);
1866 1879
1867 return result; 1880 return result;
1868} 1881}
diff --git a/drivers/usb/serial/keyspan_usa26msg.h b/drivers/usb/serial/keyspan_usa26msg.h
index 09e21e84fc4e..a68f1fb25b8a 100644
--- a/drivers/usb/serial/keyspan_usa26msg.h
+++ b/drivers/usb/serial/keyspan_usa26msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa26msg.h 3 usa26msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa28msg.h b/drivers/usb/serial/keyspan_usa28msg.h
index dee454c4609a..a19f3fe5d98d 100644
--- a/drivers/usb/serial/keyspan_usa28msg.h
+++ b/drivers/usb/serial/keyspan_usa28msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa28msg.h 3 usa28msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa49msg.h b/drivers/usb/serial/keyspan_usa49msg.h
index 163b2dea2ec5..8c3970fdd868 100644
--- a/drivers/usb/serial/keyspan_usa49msg.h
+++ b/drivers/usb/serial/keyspan_usa49msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa49msg.h 3 usa49msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa67msg.h b/drivers/usb/serial/keyspan_usa67msg.h
index 20fa3e2f7187..dcf502fdbb44 100644
--- a/drivers/usb/serial/keyspan_usa67msg.h
+++ b/drivers/usb/serial/keyspan_usa67msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa67msg.h 3 usa67msg.h
3 4
diff --git a/drivers/usb/serial/keyspan_usa90msg.h b/drivers/usb/serial/keyspan_usa90msg.h
index 86708ecd8735..c4ca0f631d20 100644
--- a/drivers/usb/serial/keyspan_usa90msg.h
+++ b/drivers/usb/serial/keyspan_usa90msg.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: BSD-3-Clause */
1/* 2/*
2 usa90msg.h 3 usa90msg.h
3 4
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 98e7a5df0f6d..bb3f9aa4a909 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = {
46 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, 46 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
47 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, 47 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
48 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, 48 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
49 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
49 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, 50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
50 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 51 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
51 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), 52 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 4e2554d55362..559941ca884d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -8,6 +8,7 @@
8 8
9#define PL2303_VENDOR_ID 0x067b 9#define PL2303_VENDOR_ID 0x067b
10#define PL2303_PRODUCT_ID 0x2303 10#define PL2303_PRODUCT_ID 0x2303
11#define PL2303_PRODUCT_ID_TB 0x2304
11#define PL2303_PRODUCT_ID_RSAQ2 0x04bb 12#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
12#define PL2303_PRODUCT_ID_DCU11 0x1234 13#define PL2303_PRODUCT_ID_DCU11 0x1234
13#define PL2303_PRODUCT_ID_PHAROS 0xaaa0 14#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
@@ -20,6 +21,7 @@
20#define PL2303_PRODUCT_ID_MOTOROLA 0x0307 21#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
21#define PL2303_PRODUCT_ID_ZTEK 0xe1f1 22#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
22 23
24
23#define ATEN_VENDOR_ID 0x0557 25#define ATEN_VENDOR_ID 0x0557
24#define ATEN_VENDOR_ID2 0x0547 26#define ATEN_VENDOR_ID2 0x0547
25#define ATEN_PRODUCT_ID 0x2008 27#define ATEN_PRODUCT_ID 0x2008
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 4d0273508043..edbbb13d6de6 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS);
85/* Motorola Tetra driver */ 85/* Motorola Tetra driver */
86#define MOTOROLA_TETRA_IDS() \ 86#define MOTOROLA_TETRA_IDS() \
87 { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ 87 { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
88 { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ 88 { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
89 { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
89DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); 90DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
90 91
91/* Novatel Wireless GPS driver */ 92/* Novatel Wireless GPS driver */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index fde2e71a6ade..a73ea495d5a7 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -235,8 +235,12 @@ static int slave_configure(struct scsi_device *sdev)
235 if (!(us->fflags & US_FL_NEEDS_CAP16)) 235 if (!(us->fflags & US_FL_NEEDS_CAP16))
236 sdev->try_rc_10_first = 1; 236 sdev->try_rc_10_first = 1;
237 237
238 /* assume SPC3 or latter devices support sense size > 18 */ 238 /*
239 if (sdev->scsi_level > SCSI_SPC_2) 239 * assume SPC3 or latter devices support sense size > 18
240 * unless US_FL_BAD_SENSE quirk is specified.
241 */
242 if (sdev->scsi_level > SCSI_SPC_2 &&
243 !(us->fflags & US_FL_BAD_SENSE))
240 us->fflags |= US_FL_SANE_SENSE; 244 us->fflags |= US_FL_SANE_SENSE;
241 245
242 /* 246 /*
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index f7f83b21dc74..ea0d27a94afe 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1266,6 +1266,18 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1266 US_FL_FIX_CAPACITY ), 1266 US_FL_FIX_CAPACITY ),
1267 1267
1268/* 1268/*
1269 * Reported by Icenowy Zheng <icenowy@aosc.io>
1270 * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
1271 * that do not process read/write command if a long sense is requested,
1272 * so force to use 18-byte sense.
1273 */
1274UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
1275 "SMI",
1276 "SM3350 UFS-to-USB-Mass-Storage bridge",
1277 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1278 US_FL_BAD_SENSE ),
1279
1280/*
1269 * Reported by Paul Hartman <paul.hartman+linux@gmail.com> 1281 * Reported by Paul Hartman <paul.hartman+linux@gmail.com>
1270 * This card reader returns "Illegal Request, Logical Block Address 1282 * This card reader returns "Illegal Request, Logical Block Address
1271 * Out of Range" for the first READ(10) after a new card is inserted. 1283 * Out of Range" for the first READ(10) after a new card is inserted.
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4bc29b586698..f1c39a3c7534 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
2297 pdo_pps_apdo_max_voltage(snk)); 2297 pdo_pps_apdo_max_voltage(snk));
2298 port->pps_data.max_curr = min_pps_apdo_current(src, snk); 2298 port->pps_data.max_curr = min_pps_apdo_current(src, snk);
2299 port->pps_data.out_volt = min(port->pps_data.max_volt, 2299 port->pps_data.out_volt = min(port->pps_data.max_volt,
2300 port->pps_data.out_volt); 2300 max(port->pps_data.min_volt,
2301 port->pps_data.out_volt));
2301 port->pps_data.op_curr = min(port->pps_data.max_curr, 2302 port->pps_data.op_curr = min(port->pps_data.max_curr,
2302 port->pps_data.op_curr); 2303 port->pps_data.op_curr);
2303 } 2304 }
diff --git a/drivers/usb/usbip/README b/drivers/usb/usbip/README
deleted file mode 100644
index 41a2cf2e77a6..000000000000
--- a/drivers/usb/usbip/README
+++ /dev/null
@@ -1,7 +0,0 @@
1TODO:
2 - more discussion about the protocol
3 - testing
4 - review of the userspace interface
5 - document the protocol
6
7Please send patches for this code to Greg Kroah-Hartman <greg@kroah.com>
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h
index 228ccdb8d1c8..b2aa986ab9ed 100644
--- a/drivers/vfio/pci/trace.h
+++ b/drivers/vfio/pci/trace.h
@@ -1,13 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0+ */ 1/* SPDX-License-Identifier: GPL-2.0-only */
2/* 2/*
3 * VFIO PCI mmap/mmap_fault tracepoints 3 * VFIO PCI mmap/mmap_fault tracepoints
4 * 4 *
5 * Copyright (C) 2018 IBM Corp. All rights reserved. 5 * Copyright (C) 2018 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru> 6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 7 */
12 8
13#undef TRACE_SYSTEM 9#undef TRACE_SYSTEM
@@ -94,7 +90,7 @@ TRACE_EVENT(vfio_pci_npu2_mmap,
94#endif /* _TRACE_VFIO_PCI_H */ 90#endif /* _TRACE_VFIO_PCI_H */
95 91
96#undef TRACE_INCLUDE_PATH 92#undef TRACE_INCLUDE_PATH
97#define TRACE_INCLUDE_PATH . 93#define TRACE_INCLUDE_PATH ../../drivers/vfio/pci
98#undef TRACE_INCLUDE_FILE 94#undef TRACE_INCLUDE_FILE
99#define TRACE_INCLUDE_FILE trace 95#define TRACE_INCLUDE_FILE trace
100 96
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index 054a2cf9dd8e..32f695ffe128 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -1,14 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0+ 1// SPDX-License-Identifier: GPL-2.0-only
2/* 2/*
3 * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2. 3 * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
4 * 4 *
5 * Copyright (C) 2018 IBM Corp. All rights reserved. 5 * Copyright (C) 2018 IBM Corp. All rights reserved.
6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru> 6 * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Register an on-GPU RAM region for cacheable access. 8 * Register an on-GPU RAM region for cacheable access.
13 * 9 *
14 * Derived from original vfio_pci_igd.c: 10 * Derived from original vfio_pci_igd.c:
@@ -178,11 +174,11 @@ static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
178 struct vfio_pci_region *region, struct vfio_info_cap *caps) 174 struct vfio_pci_region *region, struct vfio_info_cap *caps)
179{ 175{
180 struct vfio_pci_nvgpu_data *data = region->data; 176 struct vfio_pci_nvgpu_data *data = region->data;
181 struct vfio_region_info_cap_nvlink2_ssatgt cap = { 0 }; 177 struct vfio_region_info_cap_nvlink2_ssatgt cap = {
182 178 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
183 cap.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT; 179 .header.version = 1,
184 cap.header.version = 1; 180 .tgt = data->gpu_tgt
185 cap.tgt = data->gpu_tgt; 181 };
186 182
187 return vfio_info_add_capability(caps, &cap.header, sizeof(cap)); 183 return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
188} 184}
@@ -365,18 +361,18 @@ static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
365 struct vfio_pci_region *region, struct vfio_info_cap *caps) 361 struct vfio_pci_region *region, struct vfio_info_cap *caps)
366{ 362{
367 struct vfio_pci_npu2_data *data = region->data; 363 struct vfio_pci_npu2_data *data = region->data;
368 struct vfio_region_info_cap_nvlink2_ssatgt captgt = { 0 }; 364 struct vfio_region_info_cap_nvlink2_ssatgt captgt = {
369 struct vfio_region_info_cap_nvlink2_lnkspd capspd = { 0 }; 365 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT,
366 .header.version = 1,
367 .tgt = data->gpu_tgt
368 };
369 struct vfio_region_info_cap_nvlink2_lnkspd capspd = {
370 .header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD,
371 .header.version = 1,
372 .link_speed = data->link_speed
373 };
370 int ret; 374 int ret;
371 375
372 captgt.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
373 captgt.header.version = 1;
374 captgt.tgt = data->gpu_tgt;
375
376 capspd.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD;
377 capspd.header.version = 1;
378 capspd.link_speed = data->link_speed;
379
380 ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt)); 376 ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
381 if (ret) 377 if (ret)
382 return ret; 378 return ret;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 7651cfb14836..73652e21efec 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -878,7 +878,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
878 return -EINVAL; 878 return -EINVAL;
879 if (!unmap->size || unmap->size & mask) 879 if (!unmap->size || unmap->size & mask)
880 return -EINVAL; 880 return -EINVAL;
881 if (unmap->iova + unmap->size < unmap->iova || 881 if (unmap->iova + unmap->size - 1 < unmap->iova ||
882 unmap->size > SIZE_MAX) 882 unmap->size > SIZE_MAX)
883 return -EINVAL; 883 return -EINVAL;
884 884
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 36f3d0f49e60..df51a35cf537 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1236,7 +1236,8 @@ static void handle_rx(struct vhost_net *net)
1236 if (nvq->done_idx > VHOST_NET_BATCH) 1236 if (nvq->done_idx > VHOST_NET_BATCH)
1237 vhost_net_signal_used(nvq); 1237 vhost_net_signal_used(nvq);
1238 if (unlikely(vq_log)) 1238 if (unlikely(vq_log))
1239 vhost_log_write(vq, vq_log, log, vhost_len); 1239 vhost_log_write(vq, vq_log, log, vhost_len,
1240 vq->iov, in);
1240 total_len += vhost_len; 1241 total_len += vhost_len;
1241 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { 1242 if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
1242 vhost_poll_queue(&vq->poll); 1243 vhost_poll_queue(&vq->poll);
@@ -1336,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
1336 n->vqs[i].rx_ring = NULL; 1337 n->vqs[i].rx_ring = NULL;
1337 vhost_net_buf_init(&n->vqs[i].rxq); 1338 vhost_net_buf_init(&n->vqs[i].rxq);
1338 } 1339 }
1339 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); 1340 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1341 UIO_MAXIOV + VHOST_NET_BATCH);
1340 1342
1341 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); 1343 vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
1342 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); 1344 vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 8e10ab436d1f..23593cb23dd0 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1127,16 +1127,18 @@ vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
1127 struct vhost_virtqueue *vq, 1127 struct vhost_virtqueue *vq,
1128 struct vhost_scsi_ctx *vc) 1128 struct vhost_scsi_ctx *vc)
1129{ 1129{
1130 struct virtio_scsi_ctrl_tmf_resp __user *resp;
1131 struct virtio_scsi_ctrl_tmf_resp rsp; 1130 struct virtio_scsi_ctrl_tmf_resp rsp;
1131 struct iov_iter iov_iter;
1132 int ret; 1132 int ret;
1133 1133
1134 pr_debug("%s\n", __func__); 1134 pr_debug("%s\n", __func__);
1135 memset(&rsp, 0, sizeof(rsp)); 1135 memset(&rsp, 0, sizeof(rsp));
1136 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED; 1136 rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
1137 resp = vq->iov[vc->out].iov_base; 1137
1138 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1138 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1139 if (!ret) 1139
1140 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1141 if (likely(ret == sizeof(rsp)))
1140 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1142 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1141 else 1143 else
1142 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n"); 1144 pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
@@ -1147,16 +1149,18 @@ vhost_scsi_send_an_resp(struct vhost_scsi *vs,
1147 struct vhost_virtqueue *vq, 1149 struct vhost_virtqueue *vq,
1148 struct vhost_scsi_ctx *vc) 1150 struct vhost_scsi_ctx *vc)
1149{ 1151{
1150 struct virtio_scsi_ctrl_an_resp __user *resp;
1151 struct virtio_scsi_ctrl_an_resp rsp; 1152 struct virtio_scsi_ctrl_an_resp rsp;
1153 struct iov_iter iov_iter;
1152 int ret; 1154 int ret;
1153 1155
1154 pr_debug("%s\n", __func__); 1156 pr_debug("%s\n", __func__);
1155 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */ 1157 memset(&rsp, 0, sizeof(rsp)); /* event_actual = 0 */
1156 rsp.response = VIRTIO_SCSI_S_OK; 1158 rsp.response = VIRTIO_SCSI_S_OK;
1157 resp = vq->iov[vc->out].iov_base; 1159
1158 ret = __copy_to_user(resp, &rsp, sizeof(rsp)); 1160 iov_iter_init(&iov_iter, READ, &vq->iov[vc->out], vc->in, sizeof(rsp));
1159 if (!ret) 1161
1162 ret = copy_to_iter(&rsp, sizeof(rsp), &iov_iter);
1163 if (likely(ret == sizeof(rsp)))
1160 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0); 1164 vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
1161 else 1165 else
1162 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n"); 1166 pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
@@ -1623,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1623 vqs[i] = &vs->vqs[i].vq; 1627 vqs[i] = &vs->vqs[i].vq;
1624 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; 1628 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
1625 } 1629 }
1626 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); 1630 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
1627 1631
1628 vhost_scsi_init_inflight(vs, NULL); 1632 vhost_scsi_init_inflight(vs, NULL);
1629 1633
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9f7942cbcbb2..24a129fcdd61 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
390 vq->indirect = kmalloc_array(UIO_MAXIOV, 390 vq->indirect = kmalloc_array(UIO_MAXIOV,
391 sizeof(*vq->indirect), 391 sizeof(*vq->indirect),
392 GFP_KERNEL); 392 GFP_KERNEL);
393 vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log), 393 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log),
394 GFP_KERNEL); 394 GFP_KERNEL);
395 vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads), 395 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads),
396 GFP_KERNEL); 396 GFP_KERNEL);
397 if (!vq->indirect || !vq->log || !vq->heads) 397 if (!vq->indirect || !vq->log || !vq->heads)
398 goto err_nomem; 398 goto err_nomem;
@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
414} 414}
415 415
416void vhost_dev_init(struct vhost_dev *dev, 416void vhost_dev_init(struct vhost_dev *dev,
417 struct vhost_virtqueue **vqs, int nvqs) 417 struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
418{ 418{
419 struct vhost_virtqueue *vq; 419 struct vhost_virtqueue *vq;
420 int i; 420 int i;
@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev,
427 dev->iotlb = NULL; 427 dev->iotlb = NULL;
428 dev->mm = NULL; 428 dev->mm = NULL;
429 dev->worker = NULL; 429 dev->worker = NULL;
430 dev->iov_limit = iov_limit;
430 init_llist_head(&dev->work_list); 431 init_llist_head(&dev->work_list);
431 init_waitqueue_head(&dev->wait); 432 init_waitqueue_head(&dev->wait);
432 INIT_LIST_HEAD(&dev->read_list); 433 INIT_LIST_HEAD(&dev->read_list);
@@ -1034,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1034 int type, ret; 1035 int type, ret;
1035 1036
1036 ret = copy_from_iter(&type, sizeof(type), from); 1037 ret = copy_from_iter(&type, sizeof(type), from);
1037 if (ret != sizeof(type)) 1038 if (ret != sizeof(type)) {
1039 ret = -EINVAL;
1038 goto done; 1040 goto done;
1041 }
1039 1042
1040 switch (type) { 1043 switch (type) {
1041 case VHOST_IOTLB_MSG: 1044 case VHOST_IOTLB_MSG:
@@ -1054,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1054 1057
1055 iov_iter_advance(from, offset); 1058 iov_iter_advance(from, offset);
1056 ret = copy_from_iter(&msg, sizeof(msg), from); 1059 ret = copy_from_iter(&msg, sizeof(msg), from);
1057 if (ret != sizeof(msg)) 1060 if (ret != sizeof(msg)) {
1061 ret = -EINVAL;
1058 goto done; 1062 goto done;
1063 }
1059 if (vhost_process_iotlb_msg(dev, &msg)) { 1064 if (vhost_process_iotlb_msg(dev, &msg)) {
1060 ret = -EFAULT; 1065 ret = -EFAULT;
1061 goto done; 1066 goto done;
@@ -1733,13 +1738,87 @@ static int log_write(void __user *log_base,
1733 return r; 1738 return r;
1734} 1739}
1735 1740
1741static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
1742{
1743 struct vhost_umem *umem = vq->umem;
1744 struct vhost_umem_node *u;
1745 u64 start, end, l, min;
1746 int r;
1747 bool hit = false;
1748
1749 while (len) {
1750 min = len;
1751 /* More than one GPAs can be mapped into a single HVA. So
1752 * iterate all possible umems here to be safe.
1753 */
1754 list_for_each_entry(u, &umem->umem_list, link) {
1755 if (u->userspace_addr > hva - 1 + len ||
1756 u->userspace_addr - 1 + u->size < hva)
1757 continue;
1758 start = max(u->userspace_addr, hva);
1759 end = min(u->userspace_addr - 1 + u->size,
1760 hva - 1 + len);
1761 l = end - start + 1;
1762 r = log_write(vq->log_base,
1763 u->start + start - u->userspace_addr,
1764 l);
1765 if (r < 0)
1766 return r;
1767 hit = true;
1768 min = min(l, min);
1769 }
1770
1771 if (!hit)
1772 return -EFAULT;
1773
1774 len -= min;
1775 hva += min;
1776 }
1777
1778 return 0;
1779}
1780
1781static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
1782{
1783 struct iovec iov[64];
1784 int i, ret;
1785
1786 if (!vq->iotlb)
1787 return log_write(vq->log_base, vq->log_addr + used_offset, len);
1788
1789 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
1790 len, iov, 64, VHOST_ACCESS_WO);
1791 if (ret)
1792 return ret;
1793
1794 for (i = 0; i < ret; i++) {
1795 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1796 iov[i].iov_len);
1797 if (ret)
1798 return ret;
1799 }
1800
1801 return 0;
1802}
1803
1736int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 1804int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
1737 unsigned int log_num, u64 len) 1805 unsigned int log_num, u64 len, struct iovec *iov, int count)
1738{ 1806{
1739 int i, r; 1807 int i, r;
1740 1808
1741 /* Make sure data written is seen before log. */ 1809 /* Make sure data written is seen before log. */
1742 smp_wmb(); 1810 smp_wmb();
1811
1812 if (vq->iotlb) {
1813 for (i = 0; i < count; i++) {
1814 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
1815 iov[i].iov_len);
1816 if (r < 0)
1817 return r;
1818 }
1819 return 0;
1820 }
1821
1743 for (i = 0; i < log_num; ++i) { 1822 for (i = 0; i < log_num; ++i) {
1744 u64 l = min(log[i].len, len); 1823 u64 l = min(log[i].len, len);
1745 r = log_write(vq->log_base, log[i].addr, l); 1824 r = log_write(vq->log_base, log[i].addr, l);
@@ -1769,9 +1848,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1769 smp_wmb(); 1848 smp_wmb();
1770 /* Log used flag write. */ 1849 /* Log used flag write. */
1771 used = &vq->used->flags; 1850 used = &vq->used->flags;
1772 log_write(vq->log_base, vq->log_addr + 1851 log_used(vq, (used - (void __user *)vq->used),
1773 (used - (void __user *)vq->used), 1852 sizeof vq->used->flags);
1774 sizeof vq->used->flags);
1775 if (vq->log_ctx) 1853 if (vq->log_ctx)
1776 eventfd_signal(vq->log_ctx, 1); 1854 eventfd_signal(vq->log_ctx, 1);
1777 } 1855 }
@@ -1789,9 +1867,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1789 smp_wmb(); 1867 smp_wmb();
1790 /* Log avail event write */ 1868 /* Log avail event write */
1791 used = vhost_avail_event(vq); 1869 used = vhost_avail_event(vq);
1792 log_write(vq->log_base, vq->log_addr + 1870 log_used(vq, (used - (void __user *)vq->used),
1793 (used - (void __user *)vq->used), 1871 sizeof *vhost_avail_event(vq));
1794 sizeof *vhost_avail_event(vq));
1795 if (vq->log_ctx) 1872 if (vq->log_ctx)
1796 eventfd_signal(vq->log_ctx, 1); 1873 eventfd_signal(vq->log_ctx, 1);
1797 } 1874 }
@@ -2191,10 +2268,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2191 /* Make sure data is seen before log. */ 2268 /* Make sure data is seen before log. */
2192 smp_wmb(); 2269 smp_wmb();
2193 /* Log used ring entry write. */ 2270 /* Log used ring entry write. */
2194 log_write(vq->log_base, 2271 log_used(vq, ((void __user *)used - (void __user *)vq->used),
2195 vq->log_addr + 2272 count * sizeof *used);
2196 ((void __user *)used - (void __user *)vq->used),
2197 count * sizeof *used);
2198 } 2273 }
2199 old = vq->last_used_idx; 2274 old = vq->last_used_idx;
2200 new = (vq->last_used_idx += count); 2275 new = (vq->last_used_idx += count);
@@ -2236,9 +2311,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
2236 /* Make sure used idx is seen before log. */ 2311 /* Make sure used idx is seen before log. */
2237 smp_wmb(); 2312 smp_wmb();
2238 /* Log used index update. */ 2313 /* Log used index update. */
2239 log_write(vq->log_base, 2314 log_used(vq, offsetof(struct vring_used, idx),
2240 vq->log_addr + offsetof(struct vring_used, idx), 2315 sizeof vq->used->idx);
2241 sizeof vq->used->idx);
2242 if (vq->log_ctx) 2316 if (vq->log_ctx)
2243 eventfd_signal(vq->log_ctx, 1); 2317 eventfd_signal(vq->log_ctx, 1);
2244 } 2318 }
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 466ef7542291..9490e7ddb340 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -170,9 +170,11 @@ struct vhost_dev {
170 struct list_head read_list; 170 struct list_head read_list;
171 struct list_head pending_list; 171 struct list_head pending_list;
172 wait_queue_head_t wait; 172 wait_queue_head_t wait;
173 int iov_limit;
173}; 174};
174 175
175void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); 176void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
177 int nvqs, int iov_limit);
176long vhost_dev_set_owner(struct vhost_dev *dev); 178long vhost_dev_set_owner(struct vhost_dev *dev);
177bool vhost_dev_has_owner(struct vhost_dev *dev); 179bool vhost_dev_has_owner(struct vhost_dev *dev);
178long vhost_dev_check_owner(struct vhost_dev *); 180long vhost_dev_check_owner(struct vhost_dev *);
@@ -205,7 +207,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
205bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); 207bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
206 208
207int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 209int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
208 unsigned int log_num, u64 len); 210 unsigned int log_num, u64 len,
211 struct iovec *iov, int count);
209int vq_iotlb_prefetch(struct vhost_virtqueue *vq); 212int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
210 213
211struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); 214struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bc42d38ae031..bb5fc0e9fbc2 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; 531 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; 532 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
533 533
534 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); 534 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
535 535
536 file->private_data = vsock; 536 file->private_data = vsock;
537 spin_lock_init(&vsock->send_pkt_list_lock); 537 spin_lock_init(&vsock->send_pkt_list_lock);
@@ -642,7 +642,7 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
642 hash_del_rcu(&vsock->hash); 642 hash_del_rcu(&vsock->hash);
643 643
644 vsock->guest_cid = guest_cid; 644 vsock->guest_cid = guest_cid;
645 hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); 645 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
646 mutex_unlock(&vhost_vsock_mutex); 646 mutex_unlock(&vhost_vsock_mutex);
647 647
648 return 0; 648 return 0;
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 6d8dc2c77520..51e0c4be08df 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -174,7 +174,7 @@ static int pm860x_backlight_dt_init(struct platform_device *pdev,
174 return -ENODEV; 174 return -ENODEV;
175 } 175 }
176 for_each_child_of_node(nproot, np) { 176 for_each_child_of_node(nproot, np) {
177 if (!of_node_cmp(np->name, name)) { 177 if (of_node_name_eq(np, name)) {
178 of_property_read_u32(np, "marvell,88pm860x-iset", 178 of_property_read_u32(np, "marvell,88pm860x-iset",
179 &iset); 179 &iset);
180 data->iset = PM8606_WLED_CURRENT(iset); 180 data->iset = PM8606_WLED_CURRENT(iset);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index f9ef0673a083..feb90764a811 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -30,6 +30,7 @@ struct pwm_bl_data {
30 struct device *dev; 30 struct device *dev;
31 unsigned int lth_brightness; 31 unsigned int lth_brightness;
32 unsigned int *levels; 32 unsigned int *levels;
33 bool enabled;
33 struct regulator *power_supply; 34 struct regulator *power_supply;
34 struct gpio_desc *enable_gpio; 35 struct gpio_desc *enable_gpio;
35 unsigned int scale; 36 unsigned int scale;
@@ -50,7 +51,7 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
50 int err; 51 int err;
51 52
52 pwm_get_state(pb->pwm, &state); 53 pwm_get_state(pb->pwm, &state);
53 if (state.enabled) 54 if (pb->enabled)
54 return; 55 return;
55 56
56 err = regulator_enable(pb->power_supply); 57 err = regulator_enable(pb->power_supply);
@@ -65,6 +66,8 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
65 66
66 if (pb->enable_gpio) 67 if (pb->enable_gpio)
67 gpiod_set_value_cansleep(pb->enable_gpio, 1); 68 gpiod_set_value_cansleep(pb->enable_gpio, 1);
69
70 pb->enabled = true;
68} 71}
69 72
70static void pwm_backlight_power_off(struct pwm_bl_data *pb) 73static void pwm_backlight_power_off(struct pwm_bl_data *pb)
@@ -72,7 +75,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
72 struct pwm_state state; 75 struct pwm_state state;
73 76
74 pwm_get_state(pb->pwm, &state); 77 pwm_get_state(pb->pwm, &state);
75 if (!state.enabled) 78 if (!pb->enabled)
76 return; 79 return;
77 80
78 if (pb->enable_gpio) 81 if (pb->enable_gpio)
@@ -86,6 +89,7 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
86 pwm_apply_state(pb->pwm, &state); 89 pwm_apply_state(pb->pwm, &state);
87 90
88 regulator_disable(pb->power_supply); 91 regulator_disable(pb->power_supply);
92 pb->enabled = false;
89} 93}
90 94
91static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) 95static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
@@ -269,6 +273,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
269 memset(data, 0, sizeof(*data)); 273 memset(data, 0, sizeof(*data));
270 274
271 /* 275 /*
276 * These values are optional and set as 0 by default, the out values
277 * are modified only if a valid u32 value can be decoded.
278 */
279 of_property_read_u32(node, "post-pwm-on-delay-ms",
280 &data->post_pwm_on_delay);
281 of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
282
283 data->enable_gpio = -EINVAL;
284
285 /*
272 * Determine the number of brightness levels, if this property is not 286 * Determine the number of brightness levels, if this property is not
273 * set a default table of brightness levels will be used. 287 * set a default table of brightness levels will be used.
274 */ 288 */
@@ -380,15 +394,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
380 data->max_brightness--; 394 data->max_brightness--;
381 } 395 }
382 396
383 /*
384 * These values are optional and set as 0 by default, the out values
385 * are modified only if a valid u32 value can be decoded.
386 */
387 of_property_read_u32(node, "post-pwm-on-delay-ms",
388 &data->post_pwm_on_delay);
389 of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
390
391 data->enable_gpio = -EINVAL;
392 return 0; 397 return 0;
393} 398}
394 399
@@ -483,6 +488,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
483 pb->check_fb = data->check_fb; 488 pb->check_fb = data->check_fb;
484 pb->exit = data->exit; 489 pb->exit = data->exit;
485 pb->dev = &pdev->dev; 490 pb->dev = &pdev->dev;
491 pb->enabled = false;
486 pb->post_pwm_on_delay = data->post_pwm_on_delay; 492 pb->post_pwm_on_delay = data->post_pwm_on_delay;
487 pb->pwm_off_delay = data->pwm_off_delay; 493 pb->pwm_off_delay = data->pwm_off_delay;
488 494
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 09731b2f6815..c6b3bdbbdbc9 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count)
271 271
272static void vgacon_restore_screen(struct vc_data *c) 272static void vgacon_restore_screen(struct vc_data *c)
273{ 273{
274 c->vc_origin = c->vc_visible_origin;
274 vgacon_scrollback_cur->save = 0; 275 vgacon_scrollback_cur->save = 0;
275 276
276 if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { 277 if (!vga_is_gfx && !vgacon_scrollback_cur->restore) {
@@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
287 int start, end, count, soff; 288 int start, end, count, soff;
288 289
289 if (!lines) { 290 if (!lines) {
290 c->vc_visible_origin = c->vc_origin; 291 vgacon_restore_screen(c);
291 vga_set_mem_top(c);
292 return; 292 return;
293 } 293 }
294 294
@@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
298 if (!vgacon_scrollback_cur->save) { 298 if (!vgacon_scrollback_cur->save) {
299 vgacon_cursor(c, CM_ERASE); 299 vgacon_cursor(c, CM_ERASE);
300 vgacon_save_screen(c); 300 vgacon_save_screen(c);
301 c->vc_origin = (unsigned long)c->vc_screenbuf;
301 vgacon_scrollback_cur->save = 1; 302 vgacon_scrollback_cur->save = 1;
302 } 303 }
303 304
@@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines)
335 int copysize; 336 int copysize;
336 337
337 int diff = c->vc_rows - count; 338 int diff = c->vc_rows - count;
338 void *d = (void *) c->vc_origin; 339 void *d = (void *) c->vc_visible_origin;
339 void *s = (void *) c->vc_screenbuf; 340 void *s = (void *) c->vc_screenbuf;
340 341
341 count *= c->vc_size_row; 342 count *= c->vc_size_row;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 8976190b6c1f..bfa1360ec750 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -510,6 +510,13 @@ static int __init fb_console_setup(char *this_opt)
510 continue; 510 continue;
511 } 511 }
512#endif 512#endif
513
514 if (!strncmp(options, "logo-pos:", 9)) {
515 options += 9;
516 if (!strcmp(options, "center"))
517 fb_center_logo = true;
518 continue;
519 }
513 } 520 }
514 return 1; 521 return 1;
515} 522}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 558ed2ed3124..cb43a2258c51 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -53,6 +53,9 @@ EXPORT_SYMBOL(registered_fb);
53int num_registered_fb __read_mostly; 53int num_registered_fb __read_mostly;
54EXPORT_SYMBOL(num_registered_fb); 54EXPORT_SYMBOL(num_registered_fb);
55 55
56bool fb_center_logo __read_mostly;
57EXPORT_SYMBOL(fb_center_logo);
58
56static struct fb_info *get_fb_info(unsigned int idx) 59static struct fb_info *get_fb_info(unsigned int idx)
57{ 60{
58 struct fb_info *fb_info; 61 struct fb_info *fb_info;
@@ -506,8 +509,7 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
506 fb_set_logo(info, logo, logo_new, fb_logo.depth); 509 fb_set_logo(info, logo, logo_new, fb_logo.depth);
507 } 510 }
508 511
509#ifdef CONFIG_FB_LOGO_CENTER 512 if (fb_center_logo) {
510 {
511 int xres = info->var.xres; 513 int xres = info->var.xres;
512 int yres = info->var.yres; 514 int yres = info->var.yres;
513 515
@@ -520,11 +522,11 @@ static int fb_show_logo_line(struct fb_info *info, int rotate,
520 --n; 522 --n;
521 image.dx = (xres - n * (logo->width + 8) - 8) / 2; 523 image.dx = (xres - n * (logo->width + 8) - 8) / 2;
522 image.dy = y ?: (yres - logo->height) / 2; 524 image.dy = y ?: (yres - logo->height) / 2;
525 } else {
526 image.dx = 0;
527 image.dy = y;
523 } 528 }
524#else 529
525 image.dx = 0;
526 image.dy = y;
527#endif
528 image.width = logo->width; 530 image.width = logo->width;
529 image.height = logo->height; 531 image.height = logo->height;
530 532
@@ -684,9 +686,8 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
684 } 686 }
685 687
686 height = fb_logo.logo->height; 688 height = fb_logo.logo->height;
687#ifdef CONFIG_FB_LOGO_CENTER 689 if (fb_center_logo)
688 height += (yres - fb_logo.logo->height) / 2; 690 height += (yres - fb_logo.logo->height) / 2;
689#endif
690 691
691 return fb_prepare_extra_logos(info, height, yres); 692 return fb_prepare_extra_logos(info, height, yres);
692} 693}
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index a74096c53cb5..43f2a4816860 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1446,9 +1446,9 @@ static int fb_probe(struct platform_device *device)
1446 da8xx_fb_fix.line_length - 1; 1446 da8xx_fb_fix.line_length - 1;
1447 1447
1448 /* allocate palette buffer */ 1448 /* allocate palette buffer */
1449 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, 1449 par->v_palette_base = dma_alloc_coherent(NULL, PALETTE_SIZE,
1450 &par->p_palette_base, 1450 &par->p_palette_base,
1451 GFP_KERNEL | GFP_DMA); 1451 GFP_KERNEL | GFP_DMA);
1452 if (!par->v_palette_base) { 1452 if (!par->v_palette_base) {
1453 dev_err(&device->dev, 1453 dev_err(&device->dev,
1454 "GLCD: kmalloc for palette buffer failed\n"); 1454 "GLCD: kmalloc for palette buffer failed\n");
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 31f769d67195..057d3cdef92e 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -318,28 +318,28 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
318} 318}
319 319
320static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp, 320static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp,
321 const char *name, unsigned long address) 321 unsigned long address)
322{ 322{
323 struct offb_par *par = (struct offb_par *) info->par; 323 struct offb_par *par = (struct offb_par *) info->par;
324 324
325 if (dp && !strncmp(name, "ATY,Rage128", 11)) { 325 if (of_node_name_prefix(dp, "ATY,Rage128")) {
326 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 326 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
327 if (par->cmap_adr) 327 if (par->cmap_adr)
328 par->cmap_type = cmap_r128; 328 par->cmap_type = cmap_r128;
329 } else if (dp && (!strncmp(name, "ATY,RageM3pA", 12) 329 } else if (of_node_name_prefix(dp, "ATY,RageM3pA") ||
330 || !strncmp(name, "ATY,RageM3p12A", 14))) { 330 of_node_name_prefix(dp, "ATY,RageM3p12A")) {
331 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 331 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
332 if (par->cmap_adr) 332 if (par->cmap_adr)
333 par->cmap_type = cmap_M3A; 333 par->cmap_type = cmap_M3A;
334 } else if (dp && !strncmp(name, "ATY,RageM3pB", 12)) { 334 } else if (of_node_name_prefix(dp, "ATY,RageM3pB")) {
335 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); 335 par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
336 if (par->cmap_adr) 336 if (par->cmap_adr)
337 par->cmap_type = cmap_M3B; 337 par->cmap_type = cmap_M3B;
338 } else if (dp && !strncmp(name, "ATY,Rage6", 9)) { 338 } else if (of_node_name_prefix(dp, "ATY,Rage6")) {
339 par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff); 339 par->cmap_adr = offb_map_reg(dp, 1, 0, 0x1fff);
340 if (par->cmap_adr) 340 if (par->cmap_adr)
341 par->cmap_type = cmap_radeon; 341 par->cmap_type = cmap_radeon;
342 } else if (!strncmp(name, "ATY,", 4)) { 342 } else if (of_node_name_prefix(dp, "ATY,")) {
343 unsigned long base = address & 0xff000000UL; 343 unsigned long base = address & 0xff000000UL;
344 par->cmap_adr = 344 par->cmap_adr =
345 ioremap(base + 0x7ff000, 0x1000) + 0xcc0; 345 ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
@@ -350,7 +350,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
350 par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); 350 par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
351 if (par->cmap_adr) 351 if (par->cmap_adr)
352 par->cmap_type = cmap_gxt2000; 352 par->cmap_type = cmap_gxt2000;
353 } else if (dp && !strncmp(name, "vga,Display-", 12)) { 353 } else if (of_node_name_prefix(dp, "vga,Display-")) {
354 /* Look for AVIVO initialized by SLOF */ 354 /* Look for AVIVO initialized by SLOF */
355 struct device_node *pciparent = of_get_parent(dp); 355 struct device_node *pciparent = of_get_parent(dp);
356 const u32 *vid, *did; 356 const u32 *vid, *did;
@@ -438,7 +438,7 @@ static void __init offb_init_fb(const char *name,
438 438
439 par->cmap_type = cmap_unknown; 439 par->cmap_type = cmap_unknown;
440 if (depth == 8) 440 if (depth == 8)
441 offb_init_palette_hacks(info, dp, name, address); 441 offb_init_palette_hacks(info, dp, address);
442 else 442 else
443 fix->visual = FB_VISUAL_TRUECOLOR; 443 fix->visual = FB_VISUAL_TRUECOLOR;
444 444
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 53f93616c671..8e23160ec59f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
609 609
610 int r = 0; 610 int r = 0;
611 611
612 memset(&p, 0, sizeof(p));
613
612 switch (cmd) { 614 switch (cmd) {
613 case OMAPFB_SYNC_GFX: 615 case OMAPFB_SYNC_GFX:
614 DBG("ioctl SYNC_GFX\n"); 616 DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 1e972c4e88b1..d1f6196c8b9a 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,15 +10,6 @@ menuconfig LOGO
10 10
11if LOGO 11if LOGO
12 12
13config FB_LOGO_CENTER
14 bool "Center the logo"
15 depends on FB=y
16 help
17 When this option is selected, the bootup logo is centered both
18 horizontally and vertically. If more than one logo is displayed
19 due to multiple CPUs, the collected line of logos is centered
20 as a whole.
21
22config FB_LOGO_EXTRA 13config FB_LOGO_EXTRA
23 bool 14 bool
24 depends on FB=y 15 depends on FB=y
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 728ecd1eea30..fb12fe205f86 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -61,6 +61,10 @@ enum virtio_balloon_vq {
61 VIRTIO_BALLOON_VQ_MAX 61 VIRTIO_BALLOON_VQ_MAX
62}; 62};
63 63
64enum virtio_balloon_config_read {
65 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0,
66};
67
64struct virtio_balloon { 68struct virtio_balloon {
65 struct virtio_device *vdev; 69 struct virtio_device *vdev;
66 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; 70 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
@@ -77,14 +81,20 @@ struct virtio_balloon {
77 /* Prevent updating balloon when it is being canceled. */ 81 /* Prevent updating balloon when it is being canceled. */
78 spinlock_t stop_update_lock; 82 spinlock_t stop_update_lock;
79 bool stop_update; 83 bool stop_update;
84 /* Bitmap to indicate if reading the related config fields are needed */
85 unsigned long config_read_bitmap;
80 86
81 /* The list of allocated free pages, waiting to be given back to mm */ 87 /* The list of allocated free pages, waiting to be given back to mm */
82 struct list_head free_page_list; 88 struct list_head free_page_list;
83 spinlock_t free_page_list_lock; 89 spinlock_t free_page_list_lock;
84 /* The number of free page blocks on the above list */ 90 /* The number of free page blocks on the above list */
85 unsigned long num_free_page_blocks; 91 unsigned long num_free_page_blocks;
86 /* The cmd id received from host */ 92 /*
87 u32 cmd_id_received; 93 * The cmd id received from host.
94 * Read it via virtio_balloon_cmd_id_received to get the latest value
95 * sent from host.
96 */
97 u32 cmd_id_received_cache;
88 /* The cmd id that is actively in use */ 98 /* The cmd id that is actively in use */
89 __virtio32 cmd_id_active; 99 __virtio32 cmd_id_active;
90 /* Buffer to store the stop sign */ 100 /* Buffer to store the stop sign */
@@ -390,37 +400,31 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
390 return num_returned; 400 return num_returned;
391} 401}
392 402
403static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb)
404{
405 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
406 return;
407
408 /* No need to queue the work if the bit was already set. */
409 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
410 &vb->config_read_bitmap))
411 return;
412
413 queue_work(vb->balloon_wq, &vb->report_free_page_work);
414}
415
393static void virtballoon_changed(struct virtio_device *vdev) 416static void virtballoon_changed(struct virtio_device *vdev)
394{ 417{
395 struct virtio_balloon *vb = vdev->priv; 418 struct virtio_balloon *vb = vdev->priv;
396 unsigned long flags; 419 unsigned long flags;
397 s64 diff = towards_target(vb);
398
399 if (diff) {
400 spin_lock_irqsave(&vb->stop_update_lock, flags);
401 if (!vb->stop_update)
402 queue_work(system_freezable_wq,
403 &vb->update_balloon_size_work);
404 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
405 }
406 420
407 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 421 spin_lock_irqsave(&vb->stop_update_lock, flags);
408 virtio_cread(vdev, struct virtio_balloon_config, 422 if (!vb->stop_update) {
409 free_page_report_cmd_id, &vb->cmd_id_received); 423 queue_work(system_freezable_wq,
410 if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { 424 &vb->update_balloon_size_work);
411 /* Pass ULONG_MAX to give back all the free pages */ 425 virtio_balloon_queue_free_page_work(vb);
412 return_free_pages_to_mm(vb, ULONG_MAX);
413 } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
414 vb->cmd_id_received !=
415 virtio32_to_cpu(vdev, vb->cmd_id_active)) {
416 spin_lock_irqsave(&vb->stop_update_lock, flags);
417 if (!vb->stop_update) {
418 queue_work(vb->balloon_wq,
419 &vb->report_free_page_work);
420 }
421 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
422 }
423 } 426 }
427 spin_unlock_irqrestore(&vb->stop_update_lock, flags);
424} 428}
425 429
426static void update_balloon_size(struct virtio_balloon *vb) 430static void update_balloon_size(struct virtio_balloon *vb)
@@ -527,6 +531,17 @@ static int init_vqs(struct virtio_balloon *vb)
527 return 0; 531 return 0;
528} 532}
529 533
534static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
535{
536 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
537 &vb->config_read_bitmap))
538 virtio_cread(vb->vdev, struct virtio_balloon_config,
539 free_page_report_cmd_id,
540 &vb->cmd_id_received_cache);
541
542 return vb->cmd_id_received_cache;
543}
544
530static int send_cmd_id_start(struct virtio_balloon *vb) 545static int send_cmd_id_start(struct virtio_balloon *vb)
531{ 546{
532 struct scatterlist sg; 547 struct scatterlist sg;
@@ -537,7 +552,8 @@ static int send_cmd_id_start(struct virtio_balloon *vb)
537 while (virtqueue_get_buf(vq, &unused)) 552 while (virtqueue_get_buf(vq, &unused))
538 ; 553 ;
539 554
540 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received); 555 vb->cmd_id_active = virtio32_to_cpu(vb->vdev,
556 virtio_balloon_cmd_id_received(vb));
541 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); 557 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
542 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); 558 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
543 if (!err) 559 if (!err)
@@ -620,7 +636,8 @@ static int send_free_pages(struct virtio_balloon *vb)
620 * stop the reporting. 636 * stop the reporting.
621 */ 637 */
622 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); 638 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
623 if (cmd_id_active != vb->cmd_id_received) 639 if (unlikely(cmd_id_active !=
640 virtio_balloon_cmd_id_received(vb)))
624 break; 641 break;
625 642
626 /* 643 /*
@@ -637,11 +654,9 @@ static int send_free_pages(struct virtio_balloon *vb)
637 return 0; 654 return 0;
638} 655}
639 656
640static void report_free_page_func(struct work_struct *work) 657static void virtio_balloon_report_free_page(struct virtio_balloon *vb)
641{ 658{
642 int err; 659 int err;
643 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
644 report_free_page_work);
645 struct device *dev = &vb->vdev->dev; 660 struct device *dev = &vb->vdev->dev;
646 661
647 /* Start by sending the received cmd id to host with an outbuf. */ 662 /* Start by sending the received cmd id to host with an outbuf. */
@@ -659,6 +674,23 @@ static void report_free_page_func(struct work_struct *work)
659 dev_err(dev, "Failed to send a stop id, err = %d\n", err); 674 dev_err(dev, "Failed to send a stop id, err = %d\n", err);
660} 675}
661 676
677static void report_free_page_func(struct work_struct *work)
678{
679 struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
680 report_free_page_work);
681 u32 cmd_id_received;
682
683 cmd_id_received = virtio_balloon_cmd_id_received(vb);
684 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
685 /* Pass ULONG_MAX to give back all the free pages */
686 return_free_pages_to_mm(vb, ULONG_MAX);
687 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
688 cmd_id_received !=
689 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) {
690 virtio_balloon_report_free_page(vb);
691 }
692}
693
662#ifdef CONFIG_BALLOON_COMPACTION 694#ifdef CONFIG_BALLOON_COMPACTION
663/* 695/*
664 * virtballoon_migratepage - perform the balloon page migration on behalf of 696 * virtballoon_migratepage - perform the balloon page migration on behalf of
@@ -885,7 +917,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
885 goto out_del_vqs; 917 goto out_del_vqs;
886 } 918 }
887 INIT_WORK(&vb->report_free_page_work, report_free_page_func); 919 INIT_WORK(&vb->report_free_page_work, report_free_page_func);
888 vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP; 920 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP;
889 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 921 vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
890 VIRTIO_BALLOON_CMD_ID_STOP); 922 VIRTIO_BALLOON_CMD_ID_STOP);
891 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, 923 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 4cd9ea5c75be..d9dd0f789279 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -468,7 +468,7 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
468{ 468{
469 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 469 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
470 unsigned int irq = platform_get_irq(vm_dev->pdev, 0); 470 unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
471 int i, err; 471 int i, err, queue_idx = 0;
472 472
473 err = request_irq(irq, vm_interrupt, IRQF_SHARED, 473 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
474 dev_name(&vdev->dev), vm_dev); 474 dev_name(&vdev->dev), vm_dev);
@@ -476,7 +476,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
476 return err; 476 return err;
477 477
478 for (i = 0; i < nvqs; ++i) { 478 for (i = 0; i < nvqs; ++i) {
479 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i], 479 if (!names[i]) {
480 vqs[i] = NULL;
481 continue;
482 }
483
484 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
480 ctx ? ctx[i] : false); 485 ctx ? ctx[i] : false);
481 if (IS_ERR(vqs[i])) { 486 if (IS_ERR(vqs[i])) {
482 vm_del_vqs(vdev); 487 vm_del_vqs(vdev);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 465a6f5142cc..d0584c040c60 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -285,7 +285,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
285{ 285{
286 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 286 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
287 u16 msix_vec; 287 u16 msix_vec;
288 int i, err, nvectors, allocated_vectors; 288 int i, err, nvectors, allocated_vectors, queue_idx = 0;
289 289
290 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 290 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
291 if (!vp_dev->vqs) 291 if (!vp_dev->vqs)
@@ -321,7 +321,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
321 msix_vec = allocated_vectors++; 321 msix_vec = allocated_vectors++;
322 else 322 else
323 msix_vec = VP_MSIX_VQ_VECTOR; 323 msix_vec = VP_MSIX_VQ_VECTOR;
324 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 324 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
325 ctx ? ctx[i] : false, 325 ctx ? ctx[i] : false,
326 msix_vec); 326 msix_vec);
327 if (IS_ERR(vqs[i])) { 327 if (IS_ERR(vqs[i])) {
@@ -356,7 +356,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
356 const char * const names[], const bool *ctx) 356 const char * const names[], const bool *ctx)
357{ 357{
358 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 358 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
359 int i, err; 359 int i, err, queue_idx = 0;
360 360
361 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); 361 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
362 if (!vp_dev->vqs) 362 if (!vp_dev->vqs)
@@ -374,7 +374,7 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
374 vqs[i] = NULL; 374 vqs[i] = NULL;
375 continue; 375 continue;
376 } 376 }
377 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], 377 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
378 ctx ? ctx[i] : false, 378 ctx ? ctx[i] : false,
379 VIRTIO_MSI_NO_VECTOR); 379 VIRTIO_MSI_NO_VECTOR);
380 if (IS_ERR(vqs[i])) { 380 if (IS_ERR(vqs[i])) {
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cd7e755484e3..a0b07c331255 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -152,7 +152,12 @@ struct vring_virtqueue {
152 /* Available for packed ring */ 152 /* Available for packed ring */
153 struct { 153 struct {
154 /* Actual memory layout for this queue. */ 154 /* Actual memory layout for this queue. */
155 struct vring_packed vring; 155 struct {
156 unsigned int num;
157 struct vring_packed_desc *desc;
158 struct vring_packed_desc_event *driver;
159 struct vring_packed_desc_event *device;
160 } vring;
156 161
157 /* Driver ring wrap counter. */ 162 /* Driver ring wrap counter. */
158 bool avail_wrap_counter; 163 bool avail_wrap_counter;
@@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
1609 !context; 1614 !context;
1610 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 1615 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1611 1616
1617 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1618 vq->weak_barriers = false;
1619
1612 vq->packed.ring_dma_addr = ring_dma_addr; 1620 vq->packed.ring_dma_addr = ring_dma_addr;
1613 vq->packed.driver_event_dma_addr = driver_event_dma_addr; 1621 vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1614 vq->packed.device_event_dma_addr = device_event_dma_addr; 1622 vq->packed.device_event_dma_addr = device_event_dma_addr;
@@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
2079 !context; 2087 !context;
2080 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); 2088 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2081 2089
2090 if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2091 vq->weak_barriers = false;
2092
2082 vq->split.queue_dma_addr = 0; 2093 vq->split.queue_dma_addr = 0;
2083 vq->split.queue_size_in_bytes = 0; 2094 vq->split.queue_size_in_bytes = 0;
2084 2095
@@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev)
2213 break; 2224 break;
2214 case VIRTIO_F_RING_PACKED: 2225 case VIRTIO_F_RING_PACKED:
2215 break; 2226 break;
2227 case VIRTIO_F_ORDER_PLATFORM:
2228 break;
2216 default: 2229 default:
2217 /* We don't understand this bit. */ 2230 /* We don't understand this bit. */
2218 __virtio_clear_bit(vdev, i); 2231 __virtio_clear_bit(vdev, i);
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 5c4a764717c4..81208cd3f4ec 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -17,6 +17,7 @@
17#include <linux/watchdog.h> 17#include <linux/watchdog.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/mod_devicetable.h>
20 21
21#include <asm/mach-ralink/ralink_regs.h> 22#include <asm/mach-ralink/ralink_regs.h>
22 23
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 98967f0a7d10..db7c57d82cfd 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -18,6 +18,7 @@
18#include <linux/watchdog.h> 18#include <linux/watchdog.h>
19#include <linux/moduleparam.h> 19#include <linux/moduleparam.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/mod_devicetable.h>
21 22
22#include <asm/mach-ralink/ralink_regs.h> 23#include <asm/mach-ralink/ralink_regs.h>
23 24
diff --git a/drivers/watchdog/tqmx86_wdt.c b/drivers/watchdog/tqmx86_wdt.c
index 0d3a0fbbd7a5..52941207a12a 100644
--- a/drivers/watchdog/tqmx86_wdt.c
+++ b/drivers/watchdog/tqmx86_wdt.c
@@ -79,13 +79,13 @@ static int tqmx86_wdt_probe(struct platform_device *pdev)
79 return -ENOMEM; 79 return -ENOMEM;
80 80
81 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 81 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
82 if (IS_ERR(res)) 82 if (!res)
83 return PTR_ERR(res); 83 return -ENODEV;
84 84
85 priv->io_base = devm_ioport_map(&pdev->dev, res->start, 85 priv->io_base = devm_ioport_map(&pdev->dev, res->start,
86 resource_size(res)); 86 resource_size(res));
87 if (IS_ERR(priv->io_base)) 87 if (!priv->io_base)
88 return PTR_ERR(priv->io_base); 88 return -ENOMEM;
89 89
90 watchdog_set_drvdata(&priv->wdd, priv); 90 watchdog_set_drvdata(&priv->wdd, priv);
91 91
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 93194f3e7540..117e76b2f939 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
1650 xen_have_vector_callback = 0; 1650 xen_have_vector_callback = 0;
1651 return; 1651 return;
1652 } 1652 }
1653 pr_info("Xen HVM callback vector for event delivery is enabled\n"); 1653 pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
1654 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, 1654 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
1655 xen_hvm_callback_vector); 1655 xen_hvm_callback_vector);
1656 } 1656 }
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 2e5d845b5091..7aa64d1b119c 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
160 160
161 /* write the data, then modify the indexes */ 161 /* write the data, then modify the indexes */
162 virt_wmb(); 162 virt_wmb();
163 if (ret < 0) 163 if (ret < 0) {
164 atomic_set(&map->read, 0);
164 intf->in_error = ret; 165 intf->in_error = ret;
165 else 166 } else
166 intf->in_prod = prod + ret; 167 intf->in_prod = prod + ret;
167 /* update the indexes, then notify the other end */ 168 /* update the indexes, then notify the other end */
168 virt_wmb(); 169 virt_wmb();
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
282static void pvcalls_sk_state_change(struct sock *sock) 283static void pvcalls_sk_state_change(struct sock *sock)
283{ 284{
284 struct sock_mapping *map = sock->sk_user_data; 285 struct sock_mapping *map = sock->sk_user_data;
285 struct pvcalls_data_intf *intf;
286 286
287 if (map == NULL) 287 if (map == NULL)
288 return; 288 return;
289 289
290 intf = map->ring; 290 atomic_inc(&map->read);
291 intf->in_error = -ENOTCONN;
292 notify_remote_via_irq(map->irq); 291 notify_remote_via_irq(map->irq);
293} 292}
294 293
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 77224d8f3e6f..8a249c95c193 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -31,6 +31,12 @@
31#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE) 31#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
32#define PVCALLS_FRONT_MAX_SPIN 5000 32#define PVCALLS_FRONT_MAX_SPIN 5000
33 33
34static struct proto pvcalls_proto = {
35 .name = "PVCalls",
36 .owner = THIS_MODULE,
37 .obj_size = sizeof(struct sock),
38};
39
34struct pvcalls_bedata { 40struct pvcalls_bedata {
35 struct xen_pvcalls_front_ring ring; 41 struct xen_pvcalls_front_ring ring;
36 grant_ref_t ref; 42 grant_ref_t ref;
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
335 return ret; 341 return ret;
336} 342}
337 343
344static void free_active_ring(struct sock_mapping *map)
345{
346 if (!map->active.ring)
347 return;
348
349 free_pages((unsigned long)map->active.data.in,
350 map->active.ring->ring_order);
351 free_page((unsigned long)map->active.ring);
352}
353
354static int alloc_active_ring(struct sock_mapping *map)
355{
356 void *bytes;
357
358 map->active.ring = (struct pvcalls_data_intf *)
359 get_zeroed_page(GFP_KERNEL);
360 if (!map->active.ring)
361 goto out;
362
363 map->active.ring->ring_order = PVCALLS_RING_ORDER;
364 bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
365 PVCALLS_RING_ORDER);
366 if (!bytes)
367 goto out;
368
369 map->active.data.in = bytes;
370 map->active.data.out = bytes +
371 XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
372
373 return 0;
374
375out:
376 free_active_ring(map);
377 return -ENOMEM;
378}
379
338static int create_active(struct sock_mapping *map, int *evtchn) 380static int create_active(struct sock_mapping *map, int *evtchn)
339{ 381{
340 void *bytes; 382 void *bytes;
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
343 *evtchn = -1; 385 *evtchn = -1;
344 init_waitqueue_head(&map->active.inflight_conn_req); 386 init_waitqueue_head(&map->active.inflight_conn_req);
345 387
346 map->active.ring = (struct pvcalls_data_intf *) 388 bytes = map->active.data.in;
347 __get_free_page(GFP_KERNEL | __GFP_ZERO);
348 if (map->active.ring == NULL)
349 goto out_error;
350 map->active.ring->ring_order = PVCALLS_RING_ORDER;
351 bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
352 PVCALLS_RING_ORDER);
353 if (bytes == NULL)
354 goto out_error;
355 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) 389 for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
356 map->active.ring->ref[i] = gnttab_grant_foreign_access( 390 map->active.ring->ref[i] = gnttab_grant_foreign_access(
357 pvcalls_front_dev->otherend_id, 391 pvcalls_front_dev->otherend_id,
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
361 pvcalls_front_dev->otherend_id, 395 pvcalls_front_dev->otherend_id,
362 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); 396 pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
363 397
364 map->active.data.in = bytes;
365 map->active.data.out = bytes +
366 XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
367
368 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); 398 ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
369 if (ret) 399 if (ret)
370 goto out_error; 400 goto out_error;
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
385out_error: 415out_error:
386 if (*evtchn >= 0) 416 if (*evtchn >= 0)
387 xenbus_free_evtchn(pvcalls_front_dev, *evtchn); 417 xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
388 free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
389 free_page((unsigned long)map->active.ring);
390 return ret; 418 return ret;
391} 419}
392 420
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
406 return PTR_ERR(map); 434 return PTR_ERR(map);
407 435
408 bedata = dev_get_drvdata(&pvcalls_front_dev->dev); 436 bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
437 ret = alloc_active_ring(map);
438 if (ret < 0) {
439 pvcalls_exit_sock(sock);
440 return ret;
441 }
409 442
410 spin_lock(&bedata->socket_lock); 443 spin_lock(&bedata->socket_lock);
411 ret = get_request(bedata, &req_id); 444 ret = get_request(bedata, &req_id);
412 if (ret < 0) { 445 if (ret < 0) {
413 spin_unlock(&bedata->socket_lock); 446 spin_unlock(&bedata->socket_lock);
447 free_active_ring(map);
414 pvcalls_exit_sock(sock); 448 pvcalls_exit_sock(sock);
415 return ret; 449 return ret;
416 } 450 }
417 ret = create_active(map, &evtchn); 451 ret = create_active(map, &evtchn);
418 if (ret < 0) { 452 if (ret < 0) {
419 spin_unlock(&bedata->socket_lock); 453 spin_unlock(&bedata->socket_lock);
454 free_active_ring(map);
420 pvcalls_exit_sock(sock); 455 pvcalls_exit_sock(sock);
421 return ret; 456 return ret;
422 } 457 }
@@ -469,8 +504,10 @@ static int __write_ring(struct pvcalls_data_intf *intf,
469 virt_mb(); 504 virt_mb();
470 505
471 size = pvcalls_queued(prod, cons, array_size); 506 size = pvcalls_queued(prod, cons, array_size);
472 if (size >= array_size) 507 if (size > array_size)
473 return -EINVAL; 508 return -EINVAL;
509 if (size == array_size)
510 return 0;
474 if (len > array_size - size) 511 if (len > array_size - size)
475 len = array_size - size; 512 len = array_size - size;
476 513
@@ -560,15 +597,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
560 error = intf->in_error; 597 error = intf->in_error;
561 /* get pointers before reading from the ring */ 598 /* get pointers before reading from the ring */
562 virt_rmb(); 599 virt_rmb();
563 if (error < 0)
564 return error;
565 600
566 size = pvcalls_queued(prod, cons, array_size); 601 size = pvcalls_queued(prod, cons, array_size);
567 masked_prod = pvcalls_mask(prod, array_size); 602 masked_prod = pvcalls_mask(prod, array_size);
568 masked_cons = pvcalls_mask(cons, array_size); 603 masked_cons = pvcalls_mask(cons, array_size);
569 604
570 if (size == 0) 605 if (size == 0)
571 return 0; 606 return error ?: size;
572 607
573 if (len > size) 608 if (len > size)
574 len = size; 609 len = size;
@@ -780,25 +815,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
780 } 815 }
781 } 816 }
782 817
783 spin_lock(&bedata->socket_lock); 818 map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
784 ret = get_request(bedata, &req_id); 819 if (map2 == NULL) {
785 if (ret < 0) {
786 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 820 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
787 (void *)&map->passive.flags); 821 (void *)&map->passive.flags);
788 spin_unlock(&bedata->socket_lock); 822 pvcalls_exit_sock(sock);
823 return -ENOMEM;
824 }
825 ret = alloc_active_ring(map2);
826 if (ret < 0) {
827 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
828 (void *)&map->passive.flags);
829 kfree(map2);
789 pvcalls_exit_sock(sock); 830 pvcalls_exit_sock(sock);
790 return ret; 831 return ret;
791 } 832 }
792 map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); 833 spin_lock(&bedata->socket_lock);
793 if (map2 == NULL) { 834 ret = get_request(bedata, &req_id);
835 if (ret < 0) {
794 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 836 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
795 (void *)&map->passive.flags); 837 (void *)&map->passive.flags);
796 spin_unlock(&bedata->socket_lock); 838 spin_unlock(&bedata->socket_lock);
839 free_active_ring(map2);
840 kfree(map2);
797 pvcalls_exit_sock(sock); 841 pvcalls_exit_sock(sock);
798 return -ENOMEM; 842 return ret;
799 } 843 }
844
800 ret = create_active(map2, &evtchn); 845 ret = create_active(map2, &evtchn);
801 if (ret < 0) { 846 if (ret < 0) {
847 free_active_ring(map2);
802 kfree(map2); 848 kfree(map2);
803 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, 849 clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
804 (void *)&map->passive.flags); 850 (void *)&map->passive.flags);
@@ -839,7 +885,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
839 885
840received: 886received:
841 map2->sock = newsock; 887 map2->sock = newsock;
842 newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL); 888 newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
843 if (!newsock->sk) { 889 if (!newsock->sk) {
844 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; 890 bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
845 map->passive.inflight_req_id = PVCALLS_INVALID_ID; 891 map->passive.inflight_req_id = PVCALLS_INVALID_ID;
@@ -1032,8 +1078,8 @@ int pvcalls_front_release(struct socket *sock)
1032 spin_lock(&bedata->socket_lock); 1078 spin_lock(&bedata->socket_lock);
1033 list_del(&map->list); 1079 list_del(&map->list);
1034 spin_unlock(&bedata->socket_lock); 1080 spin_unlock(&bedata->socket_lock);
1035 if (READ_ONCE(map->passive.inflight_req_id) != 1081 if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
1036 PVCALLS_INVALID_ID) { 1082 READ_ONCE(map->passive.inflight_req_id) != 0) {
1037 pvcalls_front_free_map(bedata, 1083 pvcalls_front_free_map(bedata,
1038 map->passive.accept_map); 1084 map->passive.accept_map);
1039 } 1085 }
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 989cf872b98c..bb7888429be6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -645,7 +645,7 @@ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
645 void *cpu_addr, dma_addr_t dma_addr, size_t size, 645 void *cpu_addr, dma_addr_t dma_addr, size_t size,
646 unsigned long attrs) 646 unsigned long attrs)
647{ 647{
648#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 648#ifdef CONFIG_ARM
649 if (xen_get_dma_ops(dev)->mmap) 649 if (xen_get_dma_ops(dev)->mmap)
650 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, 650 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
651 dma_addr, size, attrs); 651 dma_addr, size, attrs);
@@ -662,7 +662,7 @@ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
662 void *cpu_addr, dma_addr_t handle, size_t size, 662 void *cpu_addr, dma_addr_t handle, size_t size,
663 unsigned long attrs) 663 unsigned long attrs)
664{ 664{
665#if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 665#ifdef CONFIG_ARM
666 if (xen_get_dma_ops(dev)->get_sgtable) { 666 if (xen_get_dma_ops(dev)->get_sgtable) {
667#if 0 667#if 0
668 /* 668 /*