aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_apd.c4
-rw-r--r--drivers/acpi/acpi_lpss.c14
-rw-r--r--drivers/acpi/acpi_watchdog.c7
-rw-r--r--drivers/acpi/acpica/nsxfeval.c10
-rw-r--r--drivers/acpi/ec.c56
-rw-r--r--drivers/acpi/internal.h5
-rw-r--r--drivers/acpi/nfit/core.c10
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/property.c2
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/acpi/spcr.c36
-rw-r--r--drivers/android/binder.c19
-rw-r--r--drivers/ata/Kconfig4
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/libata-eh.c7
-rw-r--r--drivers/ata/libata-scsi.c6
-rw-r--r--drivers/ata/sata_rcar.c8
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/dma-coherent.c164
-rw-r--r--drivers/base/dma-mapping.c2
-rw-r--r--drivers/base/firmware_class.c49
-rw-r--r--drivers/base/power/domain.c8
-rw-r--r--drivers/base/regmap/regmap-w1.c4
-rw-r--r--drivers/block/loop.c42
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/nbd.c20
-rw-r--r--drivers/block/sunvdc.c61
-rw-r--r--drivers/block/virtio_blk.c23
-rw-r--r--drivers/block/xen-blkfront.c31
-rw-r--r--drivers/block/zram/zram_drv.c4
-rw-r--r--drivers/bus/uniphier-system-bus.c14
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/clk/clk-gemini.c14
-rw-r--r--drivers/clk/keystone/sci-clk.c66
-rw-r--r--drivers/clk/meson/clk-mpll.c7
-rw-r--r--drivers/clk/meson/clkc.h1
-rw-r--r--drivers/clk/meson/gxbb.c5
-rw-r--r--drivers/clk/meson/meson8b.c5
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c7
-rw-r--r--drivers/clocksource/Kconfig2
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/em_sti.c11
-rw-r--r--drivers/clocksource/timer-of.c16
-rw-r--r--drivers/cpufreq/intel_pstate.c32
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c10
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/bcm/spu2.c1
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c3
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c8
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/dax/device-dax.h2
-rw-r--r--drivers/dax/device.c33
-rw-r--r--drivers/dax/pmem.c12
-rw-r--r--drivers/dax/super.c6
-rw-r--r--drivers/dma-buf/dma-fence.c17
-rw-r--r--drivers/dma-buf/sync_debug.c2
-rw-r--r--drivers/dma-buf/sync_file.c13
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/fsi/fsi-core.c7
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-lp87565.c46
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-mxc.c3
-rw-r--r--drivers/gpio/gpio-tegra.c6
-rw-r--r--drivers/gpio/gpiolib-sysfs.c10
-rw-r--r--drivers/gpio/gpiolib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c11
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c41
-rw-r--r--drivers/gpu/drm/drm_gem.c6
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c15
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c93
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c23
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c8
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c181
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c1
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c41
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h3
-rw-r--r--drivers/gpu/drm/stm/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c66
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/host1x/dev.c8
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c3
-rw-r--r--drivers/hid/hid-multitouch.c16
-rw-r--r--drivers/hid/hid-ortek.c6
-rw-r--r--drivers/hid/usbhid/hid-core.c16
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/hwmon/applesmc.c13
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c23
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c6
-rw-r--r--drivers/i2c/busses/i2c-simtec.c6
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i2c/i2c-core-base.c5
-rw-r--r--drivers/i2c/i2c-core.h9
-rw-r--r--drivers/i2c/muxes/Kconfig2
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c9
-rw-r--r--drivers/iio/accel/st_accel_core.c32
-rw-r--r--drivers/iio/adc/aspeed_adc.c26
-rw-r--r--drivers/iio/adc/axp288_adc.c42
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/stm32-adc-core.c10
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c29
-rw-r--r--drivers/iio/imu/adis16480.c2
-rw-r--r--drivers/iio/light/tsl2563.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c27
-rw-r--r--drivers/iio/pressure/bmp280.h5
-rw-r--r--drivers/iio/pressure/st_pressure_core.c2
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c82
-rw-r--r--drivers/infiniband/core/addr.c108
-rw-r--r--drivers/infiniband/core/cma.c34
-rw-r--r--drivers/infiniband/core/device.c5
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c51
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c55
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h9
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c119
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c88
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c125
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c60
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c19
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h2
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c40
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c38
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c17
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c52
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c21
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c27
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c51
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c33
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/mouse/alps.c41
-rw-r--r--drivers/input/mouse/alps.h8
-rw-r--r--drivers/input/mouse/elan_i2c_core.c5
-rw-r--r--drivers/input/mouse/trackpoint.c7
-rw-r--r--drivers/input/mouse/trackpoint.h3
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/arm-smmu.c23
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c7
-rw-r--r--drivers/iommu/io-pgtable.h9
-rw-r--r--drivers/iommu/iommu-sysfs.c32
-rw-r--r--drivers/iommu/mtk_iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.h1
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c13
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h4
-rw-r--r--drivers/irqchip/irq-atmel-aic.c14
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c4
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c1
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-gic-realview.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c40
-rw-r--r--drivers/irqchip/irq-gic-v3.c16
-rw-r--r--drivers/irqchip/irq-gic.c14
-rw-r--r--drivers/irqchip/irq-mips-cpu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/isdn/divert/isdn_divert.c25
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c2
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/hisax/config.c2
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c2
-rw-r--r--drivers/isdn/hisax/hisax_fcpcipnp.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c28
-rw-r--r--drivers/isdn/i4l/isdn_common.c1
-rw-r--r--drivers/isdn/i4l/isdn_net.c5
-rw-r--r--drivers/isdn/mISDN/fsm.c5
-rw-r--r--drivers/isdn/mISDN/fsm.h2
-rw-r--r--drivers/isdn/mISDN/layer1.c3
-rw-r--r--drivers/isdn/mISDN/layer2.c15
-rw-r--r--drivers/isdn/mISDN/tei.c20
-rw-r--r--drivers/lightnvm/pblk-rb.c4
-rw-r--r--drivers/lightnvm/pblk-read.c23
-rw-r--r--drivers/lightnvm/pblk.h2
-rw-r--r--drivers/mailbox/pcc.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-integrity.c22
-rw-r--r--drivers/md/dm-raid.c29
-rw-r--r--drivers/md/dm-table.c35
-rw-r--r--drivers/md/dm-verity-fec.c21
-rw-r--r--drivers/md/dm-zoned-metadata.c12
-rw-r--r--drivers/md/dm-zoned-reclaim.c2
-rw-r--r--drivers/md/dm-zoned-target.c8
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/md.h58
-rw-r--r--drivers/md/raid1-10.c81
-rw-r--r--drivers/md/raid1.c68
-rw-r--r--drivers/md/raid10.c25
-rw-r--r--drivers/md/raid5-cache.c61
-rw-r--r--drivers/md/raid5-ppl.c2
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/media/cec/cec-adap.c2
-rw-r--r--drivers/media/cec/cec-notifier.c6
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c143
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.h7
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c5
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h15
-rw-r--r--drivers/media/dvb-frontends/lnbh25.c6
-rw-r--r--drivers/media/dvb-frontends/stv0367.c210
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c1
-rw-r--r--drivers/media/i2c/tvp5150.c25
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c102
-rw-r--r--drivers/media/pci/ngene/ngene-core.c32
-rw-r--r--drivers/media/pci/ngene/ngene-i2c.c6
-rw-r--r--drivers/media/pci/ngene/ngene.h6
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c1
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/coda/coda-bit.c8
-rw-r--r--drivers/media/platform/coda/coda-common.c4
-rw-r--r--drivers/media/platform/coda/coda.h2
-rw-r--r--drivers/media/platform/davinci/ccdc_hw_device.h10
-rw-r--r--drivers/media/platform/davinci/dm355_ccdc.c92
-rw-r--r--drivers/media/platform/davinci/dm644x_ccdc.c151
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c93
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c133
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h6
-rw-r--r--drivers/media/platform/qcom/venus/core.c18
-rw-r--r--drivers/media/platform/qcom/venus/core.h1
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c76
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h5
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c11
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-debug.c14
-rw-r--r--drivers/media/platform/vimc/vimc-capture.c15
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c15
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c15
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c15
-rw-r--r--drivers/media/radio/radio-wl1273.c15
-rw-r--r--drivers/media/rc/ir-lirc-codec.c2
-rw-r--r--drivers/media/tuners/fc0011.c1
-rw-r--r--drivers/media/tuners/mxl5005s.c2
-rw-r--r--drivers/media/usb/au0828/au0828-input.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c10
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c38
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c18
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c1
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx.h1
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c2
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c18
-rw-r--r--drivers/media/usb/stkwebcam/stk-sensor.c32
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c70
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.h6
-rw-r--r--drivers/media/v4l2-core/tuner-core.c2
-rw-r--r--drivers/memory/atmel-ebi.c10
-rw-r--r--drivers/mfd/atmel-smc.c2
-rw-r--r--drivers/mfd/da9062-core.c6
-rw-r--r--drivers/misc/mei/pci-me.c6
-rw-r--r--drivers/misc/mei/pci-txe.c6
-rw-r--r--drivers/mmc/core/block.c52
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c13
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c35
-rw-r--r--drivers/mmc/host/sunxi-mmc.c8
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c15
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c21
-rw-r--r--drivers/mtd/nand/nand_base.c13
-rw-r--r--drivers/mtd/nand/nand_timings.c6
-rw-r--r--drivers/mtd/nand/nandsim.c1
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/mux/Kconfig19
-rw-r--r--drivers/mux/mux-core.c2
-rw-r--r--drivers/net/bonding/bond_main.c17
-rw-r--r--drivers/net/dsa/b53/b53_common.c1
-rw-r--r--drivers/net/dsa/mt7530.c38
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c18
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c9
-rw-r--r--drivers/net/ethernet/broadcom/b44.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c21
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c70
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c301
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c29
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c58
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c15
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c10
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c8
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c9
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.h6
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c49
-rw-r--r--drivers/net/ethernet/ti/cpts.c111
-rw-r--r--drivers/net/ethernet/ti/cpts.h2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c43
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/irda/mcs7780.c16
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/mdio-mux.c4
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c44
-rw-r--r--drivers/net/ppp/pptp.c2
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c13
-rw-r--r--drivers/net/usb/asix.h1
-rw-r--r--drivers/net/usb/asix_common.c53
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c28
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c6
-rw-r--r--drivers/net/usb/lan78xx.c18
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/smsc95xx.c1
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h2
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/ntb/ntb_transport.c6
-rw-r--r--drivers/ntb/test/ntb_tool.c2
-rw-r--r--drivers/nvdimm/core.c7
-rw-r--r--drivers/nvme/host/core.c41
-rw-r--r--drivers/nvme/host/fabrics.c3
-rw-r--r--drivers/nvme/host/fc.c121
-rw-r--r--drivers/nvme/host/pci.c72
-rw-r--r--drivers/nvme/target/admin-cmd.c22
-rw-r--r--drivers/nvme/target/configfs.c30
-rw-r--r--drivers/nvme/target/core.c5
-rw-r--r--drivers/nvme/target/fc.c326
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvmem/rockchip-efuse.c2
-rw-r--r--drivers/of/device.c8
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/property.c17
-rw-r--r--drivers/parisc/dino.c2
-rw-r--r--drivers/parisc/pdc_stable.c8
-rw-r--r--drivers/pci/msi.c13
-rw-r--r--drivers/pci/pci.c37
-rw-r--r--drivers/pci/probe.c43
-rw-r--r--drivers/pci/quirks.c89
-rw-r--r--drivers/perf/arm_pmu.c41
-rw-r--r--drivers/perf/arm_pmu_platform.c9
-rw-r--r--drivers/perf/qcom_l2_pmu.c2
-rw-r--r--drivers/phy/broadcom/Kconfig2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c25
-rw-r--r--drivers/pinctrl/stm32/Kconfig9
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c2
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c11
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/dell-wmi.c12
-rw-r--r--drivers/platform/x86/intel-vbtn.c4
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/ptp/ptp_clock.c42
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/s390/cio/chp.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/sbus/char/display7seg.c4
-rw-r--r--drivers/sbus/char/flash.c4
-rw-r--r--drivers/sbus/char/uctrl.c4
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/aacraid/aachba.c16
-rw-r--r--drivers/scsi/aacraid/aacraid.h2
-rw-r--r--drivers/scsi/aic7xxx/Makefile12
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile53
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c68
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c45
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c64
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4
-rw-r--r--drivers/scsi/csiostor/csio_init.c12
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c7
-rw-r--r--drivers/scsi/cxlflash/main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c10
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/ipr.c33
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/isci/request.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c30
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c6
-rw-r--r--drivers/scsi/qedf/qedf.h3
-rw-r--r--drivers/scsi/qedf/qedf_main.c22
-rw-r--r--drivers/scsi/qedi/Kconfig1
-rw-r--r--drivers/scsi/qedi/qedi.h17
-rw-r--r--drivers/scsi/qedi/qedi_fw.c2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c419
-rw-r--r--drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h210
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c12
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c30
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c6
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sd_zbc.c9
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/sg.c25
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/soc/imx/gpcv2.c15
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/soc/zte/Kconfig1
-rw-r--r--drivers/spmi/spmi-pmic-arb.c17
-rw-r--r--drivers/spmi/spmi.c12
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c6
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c19
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.h4
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h3
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/Makefile10
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h2
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c165
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.h6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/sm750fb/sm750.c24
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/spk_priv.h2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c22
-rw-r--r--drivers/staging/vboxvideo/Kconfig12
-rw-r--r--drivers/staging/vboxvideo/Makefile7
-rw-r--r--drivers/staging/vboxvideo/TODO9
-rw-r--r--drivers/staging/vboxvideo/hgsmi_base.c246
-rw-r--r--drivers/staging/vboxvideo/hgsmi_ch_setup.h66
-rw-r--r--drivers/staging/vboxvideo/hgsmi_channels.h53
-rw-r--r--drivers/staging/vboxvideo/hgsmi_defs.h92
-rw-r--r--drivers/staging/vboxvideo/modesetting.c142
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c286
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h296
-rw-r--r--drivers/staging/vboxvideo/vbox_err.h50
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c412
-rw-r--r--drivers/staging/vboxvideo/vbox_hgsmi.c115
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c197
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c534
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c877
-rw-r--r--drivers/staging/vboxvideo/vbox_prime.c74
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c472
-rw-r--r--drivers/staging/vboxvideo/vboxvideo.h491
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_guest.h95
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_vbe.h84
-rw-r--r--drivers/staging/vboxvideo/vbva_base.c233
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c10
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c16
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c7
-rw-r--r--drivers/target/target_core_tpg.c4
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/target_core_user.c13
-rw-r--r--drivers/thunderbolt/eeprom.c9
-rw-r--r--drivers/thunderbolt/icm.c9
-rw-r--r--drivers/thunderbolt/switch.c11
-rw-r--r--drivers/thunderbolt/tb.h4
-rw-r--r--drivers/thunderbolt/tb_msgs.h12
-rw-r--r--drivers/tty/pty.c119
-rw-r--r--drivers/tty/serial/8250/8250_core.c23
-rw-r--r--drivers/tty/serial/8250/8250_exar.c4
-rw-r--r--drivers/tty/serial/amba-pl011.c37
-rw-r--r--drivers/tty/serial/fsl_lpuart.c24
-rw-r--r--drivers/tty/serial/imx.c27
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/serial/st-asc.c1
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/gadget.c3
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c18
-rw-r--r--drivers/usb/dwc3/gadget.c41
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c20
-rw-r--r--drivers/usb/gadget/function/f_uac2.c25
-rw-r--r--drivers/usb/gadget/udc/Kconfig5
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c35
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c6
-rw-r--r--drivers/usb/host/pci-quirks.c91
-rw-r--r--drivers/usb/host/pci-quirks.h3
-rw-r--r--drivers/usb/host/xhci-hub.c14
-rw-r--r--drivers/usb/host/xhci-pci.c13
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-msm-usb.c17
-rw-r--r--drivers/usb/renesas_usbhs/common.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c26
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c9
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/storage/isd200.c5
-rw-r--r--drivers/usb/storage/unusual_uas.h4
-rw-r--r--drivers/usb/storage/usb.c18
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h1
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c13
-rw-r--r--drivers/vhost/vhost.c28
-rw-r--r--drivers/vhost/vhost.h3
-rw-r--r--drivers/video/fbdev/efifb.c8
-rw-r--r--drivers/video/fbdev/imxfb.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/virtio/virtio_balloon.c28
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/w1/masters/omap_hdq.c3
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/xen/Makefile3
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/biomerge.c3
-rw-r--r--drivers/xen/events/events_base.c15
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--drivers/xen/xen-balloon.c22
-rw-r--r--drivers/xen/xen-selfballoon.c4
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
-rw-r--r--drivers/xen/xenfs/super.c1
782 files changed, 12661 insertions, 4319 deletions
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index fc6c416f8724..d5999eb41c00 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -180,8 +180,8 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
180 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, 180 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) },
181 { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, 181 { "BRCM900D", APD_ADDR(vulcan_spi_desc) },
182 { "CAV900D", APD_ADDR(vulcan_spi_desc) }, 182 { "CAV900D", APD_ADDR(vulcan_spi_desc) },
183 { "HISI0A21", APD_ADDR(hip07_i2c_desc) }, 183 { "HISI02A1", APD_ADDR(hip07_i2c_desc) },
184 { "HISI0A22", APD_ADDR(hip08_i2c_desc) }, 184 { "HISI02A2", APD_ADDR(hip08_i2c_desc) },
185#endif 185#endif
186 { } 186 { }
187}; 187};
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index e51a1e98e62f..f88caf5aab76 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -85,6 +85,7 @@ static const struct lpss_device_desc lpss_dma_desc = {
85}; 85};
86 86
87struct lpss_private_data { 87struct lpss_private_data {
88 struct acpi_device *adev;
88 void __iomem *mmio_base; 89 void __iomem *mmio_base;
89 resource_size_t mmio_size; 90 resource_size_t mmio_size;
90 unsigned int fixed_clk_rate; 91 unsigned int fixed_clk_rate;
@@ -155,6 +156,12 @@ static struct pwm_lookup byt_pwm_lookup[] = {
155 156
156static void byt_pwm_setup(struct lpss_private_data *pdata) 157static void byt_pwm_setup(struct lpss_private_data *pdata)
157{ 158{
159 struct acpi_device *adev = pdata->adev;
160
161 /* Only call pwm_add_table for the first PWM controller */
162 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
163 return;
164
158 if (!acpi_dev_present("INT33FD", NULL, -1)) 165 if (!acpi_dev_present("INT33FD", NULL, -1))
159 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); 166 pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
160} 167}
@@ -180,6 +187,12 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
180 187
181static void bsw_pwm_setup(struct lpss_private_data *pdata) 188static void bsw_pwm_setup(struct lpss_private_data *pdata)
182{ 189{
190 struct acpi_device *adev = pdata->adev;
191
192 /* Only call pwm_add_table for the first PWM controller */
193 if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1"))
194 return;
195
183 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); 196 pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
184} 197}
185 198
@@ -456,6 +469,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
456 goto err_out; 469 goto err_out;
457 } 470 }
458 471
472 pdata->adev = adev;
459 pdata->dev_desc = dev_desc; 473 pdata->dev_desc = dev_desc;
460 474
461 if (dev_desc->setup) 475 if (dev_desc->setup)
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 8c4e0a18460a..bf22c29d2517 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -86,7 +86,12 @@ void __init acpi_watchdog_init(void)
86 86
87 found = false; 87 found = false;
88 resource_list_for_each_entry(rentry, &resource_list) { 88 resource_list_for_each_entry(rentry, &resource_list) {
89 if (resource_contains(rentry->res, &res)) { 89 if (rentry->res->flags == res.flags &&
90 resource_overlaps(rentry->res, &res)) {
91 if (res.start < rentry->res->start)
92 rentry->res->start = res.start;
93 if (res.end > rentry->res->end)
94 rentry->res->end = res.end;
90 found = true; 95 found = true;
91 break; 96 break;
92 } 97 }
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 538c61677c10..783f4c838aee 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
100 free_buffer_on_error = TRUE; 100 free_buffer_on_error = TRUE;
101 } 101 }
102 102
103 status = acpi_get_handle(handle, pathname, &target_handle); 103 if (pathname) {
104 if (ACPI_FAILURE(status)) { 104 status = acpi_get_handle(handle, pathname, &target_handle);
105 return_ACPI_STATUS(status); 105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
107 }
108 } else {
109 target_handle = handle;
106 } 110 }
107 111
108 full_pathname = acpi_ns_get_external_pathname(target_handle); 112 full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ddb01e9fa5b2..ae3d6d152633 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -151,6 +151,10 @@ static bool ec_freeze_events __read_mostly = false;
151module_param(ec_freeze_events, bool, 0644); 151module_param(ec_freeze_events, bool, 0644);
152MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); 152MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume");
153 153
154static bool ec_no_wakeup __read_mostly;
155module_param(ec_no_wakeup, bool, 0644);
156MODULE_PARM_DESC(ec_no_wakeup, "Do not wake up from suspend-to-idle");
157
154struct acpi_ec_query_handler { 158struct acpi_ec_query_handler {
155 struct list_head node; 159 struct list_head node;
156 acpi_ec_query_func func; 160 acpi_ec_query_func func;
@@ -535,6 +539,14 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
535 spin_unlock_irqrestore(&ec->lock, flags); 539 spin_unlock_irqrestore(&ec->lock, flags);
536 __acpi_ec_flush_event(ec); 540 __acpi_ec_flush_event(ec);
537} 541}
542
543void acpi_ec_flush_work(void)
544{
545 if (first_ec)
546 __acpi_ec_flush_event(first_ec);
547
548 flush_scheduled_work();
549}
538#endif /* CONFIG_PM_SLEEP */ 550#endif /* CONFIG_PM_SLEEP */
539 551
540static bool acpi_ec_guard_event(struct acpi_ec *ec) 552static bool acpi_ec_guard_event(struct acpi_ec *ec)
@@ -1729,7 +1741,7 @@ error:
1729 * functioning ECDT EC first in order to handle the events. 1741 * functioning ECDT EC first in order to handle the events.
1730 * https://bugzilla.kernel.org/show_bug.cgi?id=115021 1742 * https://bugzilla.kernel.org/show_bug.cgi?id=115021
1731 */ 1743 */
1732int __init acpi_ec_ecdt_start(void) 1744static int __init acpi_ec_ecdt_start(void)
1733{ 1745{
1734 acpi_handle handle; 1746 acpi_handle handle;
1735 1747
@@ -1880,6 +1892,32 @@ static int acpi_ec_suspend(struct device *dev)
1880 return 0; 1892 return 0;
1881} 1893}
1882 1894
1895static int acpi_ec_suspend_noirq(struct device *dev)
1896{
1897 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1898
1899 /*
1900 * The SCI handler doesn't run at this point, so the GPE can be
1901 * masked at the low level without side effects.
1902 */
1903 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1904 ec->reference_count >= 1)
1905 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
1906
1907 return 0;
1908}
1909
1910static int acpi_ec_resume_noirq(struct device *dev)
1911{
1912 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1913
1914 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1915 ec->reference_count >= 1)
1916 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
1917
1918 return 0;
1919}
1920
1883static int acpi_ec_resume(struct device *dev) 1921static int acpi_ec_resume(struct device *dev)
1884{ 1922{
1885 struct acpi_ec *ec = 1923 struct acpi_ec *ec =
@@ -1891,6 +1929,7 @@ static int acpi_ec_resume(struct device *dev)
1891#endif 1929#endif
1892 1930
1893static const struct dev_pm_ops acpi_ec_pm = { 1931static const struct dev_pm_ops acpi_ec_pm = {
1932 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq)
1894 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) 1933 SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume)
1895}; 1934};
1896 1935
@@ -1964,20 +2003,17 @@ static inline void acpi_ec_query_exit(void)
1964int __init acpi_ec_init(void) 2003int __init acpi_ec_init(void)
1965{ 2004{
1966 int result; 2005 int result;
2006 int ecdt_fail, dsdt_fail;
1967 2007
1968 /* register workqueue for _Qxx evaluations */ 2008 /* register workqueue for _Qxx evaluations */
1969 result = acpi_ec_query_init(); 2009 result = acpi_ec_query_init();
1970 if (result) 2010 if (result)
1971 goto err_exit; 2011 return result;
1972 /* Now register the driver for the EC */
1973 result = acpi_bus_register_driver(&acpi_ec_driver);
1974 if (result)
1975 goto err_exit;
1976 2012
1977err_exit: 2013 /* Drivers must be started after acpi_ec_query_init() */
1978 if (result) 2014 ecdt_fail = acpi_ec_ecdt_start();
1979 acpi_ec_query_exit(); 2015 dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
1980 return result; 2016 return ecdt_fail && dsdt_fail ? -ENODEV : 0;
1981} 2017}
1982 2018
1983/* EC driver currently not unloadable */ 2019/* EC driver currently not unloadable */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9531d3276f65..3f5af4d7a739 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
185int acpi_ec_init(void); 185int acpi_ec_init(void);
186int acpi_ec_ecdt_probe(void); 186int acpi_ec_ecdt_probe(void);
187int acpi_ec_dsdt_probe(void); 187int acpi_ec_dsdt_probe(void);
188int acpi_ec_ecdt_start(void);
189void acpi_ec_block_transactions(void); 188void acpi_ec_block_transactions(void);
190void acpi_ec_unblock_transactions(void); 189void acpi_ec_unblock_transactions(void);
191int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, 190int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
@@ -193,6 +192,10 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
193 void *data); 192 void *data);
194void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit); 193void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
195 194
195#ifdef CONFIG_PM_SLEEP
196void acpi_ec_flush_work(void);
197#endif
198
196 199
197/*-------------------------------------------------------------------------- 200/*--------------------------------------------------------------------------
198 Suspend/Resume 201 Suspend/Resume
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b75b734ee73a..19182d091587 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3160,6 +3160,8 @@ static struct acpi_driver acpi_nfit_driver = {
3160 3160
3161static __init int nfit_init(void) 3161static __init int nfit_init(void)
3162{ 3162{
3163 int ret;
3164
3163 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); 3165 BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3164 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); 3166 BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3165 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); 3167 BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
@@ -3187,8 +3189,14 @@ static __init int nfit_init(void)
3187 return -ENOMEM; 3189 return -ENOMEM;
3188 3190
3189 nfit_mce_register(); 3191 nfit_mce_register();
3192 ret = acpi_bus_register_driver(&acpi_nfit_driver);
3193 if (ret) {
3194 nfit_mce_unregister();
3195 destroy_workqueue(nfit_wq);
3196 }
3197
3198 return ret;
3190 3199
3191 return acpi_bus_register_driver(&acpi_nfit_driver);
3192} 3200}
3193 3201
3194static __exit void nfit_exit(void) 3202static __exit void nfit_exit(void)
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index edb0c79f7c64..917f1cc0fda4 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -443,7 +443,7 @@ int __init acpi_numa_init(void)
443 * So go over all cpu entries in SRAT to get apicid to node mapping. 443 * So go over all cpu entries in SRAT to get apicid to node mapping.
444 */ 444 */
445 445
446 /* SRAT: Static Resource Affinity Table */ 446 /* SRAT: System Resource Affinity Table */
447 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 447 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
448 struct acpi_subtable_proc srat_proc[3]; 448 struct acpi_subtable_proc srat_proc[3];
449 449
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 917c789f953d..476a52c60cf3 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1047,7 +1047,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
1047 fwnode_for_each_child_node(fwnode, child) { 1047 fwnode_for_each_child_node(fwnode, child) {
1048 u32 nr; 1048 u32 nr;
1049 1049
1050 if (!fwnode_property_read_u32(fwnode, prop_name, &nr)) 1050 if (fwnode_property_read_u32(child, prop_name, &nr))
1051 continue; 1051 continue;
1052 1052
1053 if (val == nr) 1053 if (val == nr)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 33897298f03e..70fd5502c284 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2084,7 +2084,6 @@ int __init acpi_scan_init(void)
2084 2084
2085 acpi_gpe_apply_masked_gpes(); 2085 acpi_gpe_apply_masked_gpes();
2086 acpi_update_all_gpes(); 2086 acpi_update_all_gpes();
2087 acpi_ec_ecdt_start();
2088 2087
2089 acpi_scan_initialized = true; 2088 acpi_scan_initialized = true;
2090 2089
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index be17664736b2..fa8243c5c062 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -777,11 +777,11 @@ static void acpi_freeze_sync(void)
777 /* 777 /*
778 * Process all pending events in case there are any wakeup ones. 778 * Process all pending events in case there are any wakeup ones.
779 * 779 *
780 * The EC driver uses the system workqueue, so that one needs to be 780 * The EC driver uses the system workqueue and an additional special
781 * flushed too. 781 * one, so those need to be flushed too.
782 */ 782 */
783 acpi_ec_flush_work();
783 acpi_os_wait_events_complete(); 784 acpi_os_wait_events_complete();
784 flush_scheduled_work();
785 s2idle_wakeup = false; 785 s2idle_wakeup = false;
786} 786}
787 787
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 4ac3e06b41d8..98aa8c808a33 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -17,6 +17,16 @@
17#include <linux/serial_core.h> 17#include <linux/serial_core.h>
18 18
19/* 19/*
20 * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
21 * occasionally getting stuck as 1. To avoid the potential for a hang, check
22 * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
23 * implementations, so only do so if an affected platform is detected in
24 * parse_spcr().
25 */
26bool qdf2400_e44_present;
27EXPORT_SYMBOL(qdf2400_e44_present);
28
29/*
20 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit. 30 * Some Qualcomm Datacenter Technologies SoCs have a defective UART BUSY bit.
21 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI 31 * Detect them by examining the OEM fields in the SPCR header, similiar to PCI
22 * quirk detection in pci_mcfg.c. 32 * quirk detection in pci_mcfg.c.
@@ -147,8 +157,30 @@ int __init parse_spcr(bool earlycon)
147 goto done; 157 goto done;
148 } 158 }
149 159
150 if (qdf2400_erratum_44_present(&table->header)) 160 /*
151 uart = "qdf2400_e44"; 161 * If the E44 erratum is required, then we need to tell the pl011
162 * driver to implement the work-around.
163 *
164 * The global variable is used by the probe function when it
165 * creates the UARTs, whether or not they're used as a console.
166 *
167 * If the user specifies "traditional" earlycon, the qdf2400_e44
168 * console name matches the EARLYCON_DECLARE() statement, and
169 * SPCR is not used. Parameter "earlycon" is false.
170 *
171 * If the user specifies "SPCR" earlycon, then we need to update
172 * the console name so that it also says "qdf2400_e44". Parameter
173 * "earlycon" is true.
174 *
175 * For consistency, if we change the console name, then we do it
176 * for everyone, not just earlycon.
177 */
178 if (qdf2400_erratum_44_present(&table->header)) {
179 qdf2400_e44_present = true;
180 if (earlycon)
181 uart = "qdf2400_e44";
182 }
183
152 if (xgene_8250_erratum_present(table)) 184 if (xgene_8250_erratum_present(table))
153 iotype = "mmio32"; 185 iotype = "mmio32";
154 186
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index aae4d8d4be36..831cdd7d197d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc,
2200 list_add_tail(&t->work.entry, target_list); 2200 list_add_tail(&t->work.entry, target_list);
2201 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 2201 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2202 list_add_tail(&tcomplete->entry, &thread->todo); 2202 list_add_tail(&tcomplete->entry, &thread->todo);
2203 if (target_wait) 2203 if (target_wait) {
2204 wake_up_interruptible(target_wait); 2204 if (reply || !(t->flags & TF_ONE_WAY))
2205 wake_up_interruptible_sync(target_wait);
2206 else
2207 wake_up_interruptible(target_wait);
2208 }
2205 return; 2209 return;
2206 2210
2207err_translate_failed: 2211err_translate_failed:
@@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3247 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 3251 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3248 proc->pid, current->pid, cmd, arg);*/ 3252 proc->pid, current->pid, cmd, arg);*/
3249 3253
3250 if (unlikely(current->mm != proc->vma_vm_mm)) {
3251 pr_err("current mm mismatch proc mm\n");
3252 return -EINVAL;
3253 }
3254 trace_binder_ioctl(cmd, arg); 3254 trace_binder_ioctl(cmd, arg);
3255 3255
3256 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 3256 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3362 const char *failure_string; 3362 const char *failure_string;
3363 struct binder_buffer *buffer; 3363 struct binder_buffer *buffer;
3364 3364
3365 if (proc->tsk != current) 3365 if (proc->tsk != current->group_leader)
3366 return -EINVAL; 3366 return -EINVAL;
3367 3367
3368 if ((vma->vm_end - vma->vm_start) > SZ_4M) 3368 if ((vma->vm_end - vma->vm_start) > SZ_4M)
@@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
3464 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 3464 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3465 if (proc == NULL) 3465 if (proc == NULL)
3466 return -ENOMEM; 3466 return -ENOMEM;
3467 get_task_struct(current); 3467 get_task_struct(current->group_leader);
3468 proc->tsk = current; 3468 proc->tsk = current->group_leader;
3469 proc->vma_vm_mm = current->mm;
3470 INIT_LIST_HEAD(&proc->todo); 3469 INIT_LIST_HEAD(&proc->todo);
3471 init_waitqueue_head(&proc->wait); 3470 init_waitqueue_head(&proc->wait);
3472 proc->default_priority = task_nice(current); 3471 proc->default_priority = task_nice(current);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 948fc86980a1..363fc5330c21 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -215,7 +215,7 @@ config SATA_FSL
215 215
216config SATA_GEMINI 216config SATA_GEMINI
217 tristate "Gemini SATA bridge support" 217 tristate "Gemini SATA bridge support"
218 depends on PATA_FTIDE010 218 depends on ARCH_GEMINI || COMPILE_TEST
219 default ARCH_GEMINI 219 default ARCH_GEMINI
220 help 220 help
221 This enabled support for the FTIDE010 to SATA bridge 221 This enabled support for the FTIDE010 to SATA bridge
@@ -613,7 +613,7 @@ config PATA_FTIDE010
613 tristate "Faraday Technology FTIDE010 PATA support" 613 tristate "Faraday Technology FTIDE010 PATA support"
614 depends on OF 614 depends on OF
615 depends on ARM 615 depends on ARM
616 default ARCH_GEMINI 616 depends on SATA_GEMINI
617 help 617 help
618 This option enables support for the Faraday FTIDE010 618 This option enables support for the Faraday FTIDE010
619 PATA controller found in the Cortina Gemini SoCs. 619 PATA controller found in the Cortina Gemini SoCs.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8453f9a4682f..fa7dd4394c02 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2083,7 +2083,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2083retry: 2083retry:
2084 ata_tf_init(dev, &tf); 2084 ata_tf_init(dev, &tf);
2085 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) && 2085 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2086 !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) { 2086 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2087 tf.command = ATA_CMD_READ_LOG_DMA_EXT; 2087 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2088 tf.protocol = ATA_PROT_DMA; 2088 tf.protocol = ATA_PROT_DMA;
2089 dma = true; 2089 dma = true;
@@ -2102,8 +2102,8 @@ retry:
2102 buf, sectors * ATA_SECT_SIZE, 0); 2102 buf, sectors * ATA_SECT_SIZE, 0);
2103 2103
2104 if (err_mask && dma) { 2104 if (err_mask && dma) {
2105 dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG; 2105 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2106 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n"); 2106 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2107 goto retry; 2107 goto retry;
2108 } 2108 }
2109 2109
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b70bcf6d2914..3dbd05532c09 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1434,7 +1434,7 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1434 1434
1435/** 1435/**
1436 * ata_eh_done - EH action complete 1436 * ata_eh_done - EH action complete
1437* @ap: target ATA port 1437 * @link: ATA link for which EH actions are complete
1438 * @dev: target ATA dev for per-dev action (can be NULL) 1438 * @dev: target ATA dev for per-dev action (can be NULL)
1439 * @action: action just completed 1439 * @action: action just completed
1440 * 1440 *
@@ -1576,7 +1576,7 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1576 1576
1577/** 1577/**
1578 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT 1578 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1579 * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to 1579 * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
1580 * @cmd: scsi command for which the sense code should be set 1580 * @cmd: scsi command for which the sense code should be set
1581 * 1581 *
1582 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK 1582 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
@@ -4175,7 +4175,6 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
4175 struct ata_link *link; 4175 struct ata_link *link;
4176 struct ata_device *dev; 4176 struct ata_device *dev;
4177 unsigned long flags; 4177 unsigned long flags;
4178 int rc = 0;
4179 4178
4180 /* are we resuming? */ 4179 /* are we resuming? */
4181 spin_lock_irqsave(ap->lock, flags); 4180 spin_lock_irqsave(ap->lock, flags);
@@ -4202,7 +4201,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
4202 ata_acpi_set_state(ap, ap->pm_mesg); 4201 ata_acpi_set_state(ap, ap->pm_mesg);
4203 4202
4204 if (ap->ops->port_resume) 4203 if (ap->ops->port_resume)
4205 rc = ap->ops->port_resume(ap); 4204 ap->ops->port_resume(ap);
4206 4205
4207 /* tell ACPI that we're resuming */ 4206 /* tell ACPI that we're resuming */
4208 ata_acpi_on_resume(ap); 4207 ata_acpi_on_resume(ap);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d462c5a3a7ef..44ba292f2cd7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3030,10 +3030,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
3030static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) 3030static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
3031{ 3031{
3032 if (!sata_pmp_attached(ap)) { 3032 if (!sata_pmp_attached(ap)) {
3033 if (likely(devno < ata_link_max_devices(&ap->link))) 3033 if (likely(devno >= 0 &&
3034 devno < ata_link_max_devices(&ap->link)))
3034 return &ap->link.device[devno]; 3035 return &ap->link.device[devno];
3035 } else { 3036 } else {
3036 if (likely(devno < ap->nr_pmp_links)) 3037 if (likely(devno >= 0 &&
3038 devno < ap->nr_pmp_links))
3037 return &ap->pmp_link[devno].device[0]; 3039 return &ap->pmp_link[devno].device[0];
3038 } 3040 }
3039 3041
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index ee9844758736..537d11869069 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -858,6 +858,14 @@ static const struct of_device_id sata_rcar_match[] = {
858 .compatible = "renesas,sata-r8a7795", 858 .compatible = "renesas,sata-r8a7795",
859 .data = (void *)RCAR_GEN2_SATA 859 .data = (void *)RCAR_GEN2_SATA
860 }, 860 },
861 {
862 .compatible = "renesas,rcar-gen2-sata",
863 .data = (void *)RCAR_GEN2_SATA
864 },
865 {
866 .compatible = "renesas,rcar-gen3-sata",
867 .data = (void *)RCAR_GEN2_SATA
868 },
861 { }, 869 { },
862}; 870};
863MODULE_DEVICE_TABLE(of, sata_rcar_match); 871MODULE_DEVICE_TABLE(of, sata_rcar_match);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 292dec18ffb8..07bdd51b3b9a 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1613,7 +1613,7 @@ static int zatm_init_one(struct pci_dev *pci_dev,
1613 1613
1614 ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); 1614 ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
1615 if (ret < 0) 1615 if (ret < 0)
1616 goto out_disable; 1616 goto out_release;
1617 1617
1618 zatm_dev->pci_dev = pci_dev; 1618 zatm_dev->pci_dev = pci_dev;
1619 dev->dev_data = zatm_dev; 1619 dev->dev_data = zatm_dev;
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 2ae24c28e70c..1c152aed6b82 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de
25{ 25{
26 if (dev && dev->dma_mem) 26 if (dev && dev->dma_mem)
27 return dev->dma_mem; 27 return dev->dma_mem;
28 return dma_coherent_default_memory; 28 return NULL;
29} 29}
30 30
31static inline dma_addr_t dma_get_device_base(struct device *dev, 31static inline dma_addr_t dma_get_device_base(struct device *dev,
@@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
165} 165}
166EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 166EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
167 167
168/** 168static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
169 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area 169 ssize_t size, dma_addr_t *dma_handle)
170 *
171 * @dev: device from which we allocate memory
172 * @size: size of requested memory area
173 * @dma_handle: This will be filled with the correct dma handle
174 * @ret: This pointer will be filled with the virtual address
175 * to allocated area.
176 *
177 * This function should be only called from per-arch dma_alloc_coherent()
178 * to support allocation from per-device coherent memory pools.
179 *
180 * Returns 0 if dma_alloc_coherent should continue with allocating from
181 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
182 */
183int dma_alloc_from_coherent(struct device *dev, ssize_t size,
184 dma_addr_t *dma_handle, void **ret)
185{ 170{
186 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
187 int order = get_order(size); 171 int order = get_order(size);
188 unsigned long flags; 172 unsigned long flags;
189 int pageno; 173 int pageno;
190 int dma_memory_map; 174 int dma_memory_map;
175 void *ret;
191 176
192 if (!mem)
193 return 0;
194
195 *ret = NULL;
196 spin_lock_irqsave(&mem->spinlock, flags); 177 spin_lock_irqsave(&mem->spinlock, flags);
197 178
198 if (unlikely(size > (mem->size << PAGE_SHIFT))) 179 if (unlikely(size > (mem->size << PAGE_SHIFT)))
@@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
203 goto err; 184 goto err;
204 185
205 /* 186 /*
206 * Memory was found in the per-device area. 187 * Memory was found in the coherent area.
207 */ 188 */
208 *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); 189 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
209 *ret = mem->virt_base + (pageno << PAGE_SHIFT); 190 ret = mem->virt_base + (pageno << PAGE_SHIFT);
210 dma_memory_map = (mem->flags & DMA_MEMORY_MAP); 191 dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
211 spin_unlock_irqrestore(&mem->spinlock, flags); 192 spin_unlock_irqrestore(&mem->spinlock, flags);
212 if (dma_memory_map) 193 if (dma_memory_map)
213 memset(*ret, 0, size); 194 memset(ret, 0, size);
214 else 195 else
215 memset_io(*ret, 0, size); 196 memset_io(ret, 0, size);
216 197
217 return 1; 198 return ret;
218 199
219err: 200err:
220 spin_unlock_irqrestore(&mem->spinlock, flags); 201 spin_unlock_irqrestore(&mem->spinlock, flags);
202 return NULL;
203}
204
205/**
206 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
207 * @dev: device from which we allocate memory
208 * @size: size of requested memory area
209 * @dma_handle: This will be filled with the correct dma handle
210 * @ret: This pointer will be filled with the virtual address
211 * to allocated area.
212 *
213 * This function should be only called from per-arch dma_alloc_coherent()
214 * to support allocation from per-device coherent memory pools.
215 *
216 * Returns 0 if dma_alloc_coherent should continue with allocating from
217 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
218 */
219int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
220 dma_addr_t *dma_handle, void **ret)
221{
222 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
223
224 if (!mem)
225 return 0;
226
227 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
228 if (*ret)
229 return 1;
230
221 /* 231 /*
222 * In the case where the allocation can not be satisfied from the 232 * In the case where the allocation can not be satisfied from the
223 * per-device area, try to fall back to generic memory if the 233 * per-device area, try to fall back to generic memory if the
@@ -225,25 +235,20 @@ err:
225 */ 235 */
226 return mem->flags & DMA_MEMORY_EXCLUSIVE; 236 return mem->flags & DMA_MEMORY_EXCLUSIVE;
227} 237}
228EXPORT_SYMBOL(dma_alloc_from_coherent); 238EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
229 239
230/** 240void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
231 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
232 * @dev: device from which the memory was allocated
233 * @order: the order of pages allocated
234 * @vaddr: virtual address of allocated pages
235 *
236 * This checks whether the memory was allocated from the per-device
237 * coherent memory pool and if so, releases that memory.
238 *
239 * Returns 1 if we correctly released the memory, or 0 if
240 * dma_release_coherent() should proceed with releasing memory from
241 * generic pools.
242 */
243int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
244{ 241{
245 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 242 if (!dma_coherent_default_memory)
243 return NULL;
244
245 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
246 dma_handle);
247}
246 248
249static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
250 int order, void *vaddr)
251{
247 if (mem && vaddr >= mem->virt_base && vaddr < 252 if (mem && vaddr >= mem->virt_base && vaddr <
248 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 253 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
249 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 254 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
@@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
256 } 261 }
257 return 0; 262 return 0;
258} 263}
259EXPORT_SYMBOL(dma_release_from_coherent);
260 264
261/** 265/**
262 * dma_mmap_from_coherent() - try to mmap the memory allocated from 266 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
263 * per-device coherent memory pool to userspace
264 * @dev: device from which the memory was allocated 267 * @dev: device from which the memory was allocated
265 * @vma: vm_area for the userspace memory 268 * @order: the order of pages allocated
266 * @vaddr: cpu address returned by dma_alloc_from_coherent 269 * @vaddr: virtual address of allocated pages
267 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
268 * @ret: result from remap_pfn_range()
269 * 270 *
270 * This checks whether the memory was allocated from the per-device 271 * This checks whether the memory was allocated from the per-device
271 * coherent memory pool and if so, maps that memory to the provided vma. 272 * coherent memory pool and if so, releases that memory.
272 * 273 *
273 * Returns 1 if we correctly mapped the memory, or 0 if the caller should 274 * Returns 1 if we correctly released the memory, or 0 if the caller should
274 * proceed with mapping memory from generic pools. 275 * proceed with releasing memory from generic pools.
275 */ 276 */
276int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, 277int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
277 void *vaddr, size_t size, int *ret)
278{ 278{
279 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 279 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
280 280
281 return __dma_release_from_coherent(mem, order, vaddr);
282}
283EXPORT_SYMBOL(dma_release_from_dev_coherent);
284
285int dma_release_from_global_coherent(int order, void *vaddr)
286{
287 if (!dma_coherent_default_memory)
288 return 0;
289
290 return __dma_release_from_coherent(dma_coherent_default_memory, order,
291 vaddr);
292}
293
294static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
295 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
296{
281 if (mem && vaddr >= mem->virt_base && vaddr + size <= 297 if (mem && vaddr >= mem->virt_base && vaddr + size <=
282 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 298 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
283 unsigned long off = vma->vm_pgoff; 299 unsigned long off = vma->vm_pgoff;
@@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
296 } 312 }
297 return 0; 313 return 0;
298} 314}
299EXPORT_SYMBOL(dma_mmap_from_coherent); 315
316/**
317 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
318 * @dev: device from which the memory was allocated
319 * @vma: vm_area for the userspace memory
320 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
321 * @size: size of the memory buffer allocated
322 * @ret: result from remap_pfn_range()
323 *
324 * This checks whether the memory was allocated from the per-device
325 * coherent memory pool and if so, maps that memory to the provided vma.
326 *
327 * Returns 1 if we correctly mapped the memory, or 0 if the caller should
328 * proceed with mapping memory from generic pools.
329 */
330int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
331 void *vaddr, size_t size, int *ret)
332{
333 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
334
335 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
336}
337EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
338
339int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
340 size_t size, int *ret)
341{
342 if (!dma_coherent_default_memory)
343 return 0;
344
345 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
346 vaddr, size, ret);
347}
300 348
301/* 349/*
302 * Support for reserved memory regions defined in device tree 350 * Support for reserved memory regions defined in device tree
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 5096755d185e..b555ff9dd8fc 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
235 235
236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 236 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
237 237
238 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 238 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
239 return ret; 239 return ret;
240 240
241 if (off < count && user_count <= (count - off)) { 241 if (off < count && user_count <= (count - off)) {
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b9f907eedbf7..bfbe1e154128 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,7 +30,6 @@
30#include <linux/syscore_ops.h> 30#include <linux/syscore_ops.h>
31#include <linux/reboot.h> 31#include <linux/reboot.h>
32#include <linux/security.h> 32#include <linux/security.h>
33#include <linux/swait.h>
34 33
35#include <generated/utsrelease.h> 34#include <generated/utsrelease.h>
36 35
@@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void)
112 * state of the firmware loading. 111 * state of the firmware loading.
113 */ 112 */
114struct fw_state { 113struct fw_state {
115 struct swait_queue_head wq; 114 struct completion completion;
116 enum fw_status status; 115 enum fw_status status;
117}; 116};
118 117
119static void fw_state_init(struct fw_state *fw_st) 118static void fw_state_init(struct fw_state *fw_st)
120{ 119{
121 init_swait_queue_head(&fw_st->wq); 120 init_completion(&fw_st->completion);
122 fw_st->status = FW_STATUS_UNKNOWN; 121 fw_st->status = FW_STATUS_UNKNOWN;
123} 122}
124 123
@@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
131{ 130{
132 long ret; 131 long ret;
133 132
134 ret = swait_event_interruptible_timeout(fw_st->wq, 133 ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout);
135 __fw_state_is_done(READ_ONCE(fw_st->status)),
136 timeout);
137 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) 134 if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
138 return -ENOENT; 135 return -ENOENT;
139 if (!ret) 136 if (!ret)
@@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st,
148 WRITE_ONCE(fw_st->status, status); 145 WRITE_ONCE(fw_st->status, status);
149 146
150 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) 147 if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
151 swake_up(&fw_st->wq); 148 complete_all(&fw_st->completion);
152} 149}
153 150
154#define fw_state_start(fw_st) \ 151#define fw_state_start(fw_st) \
155 __fw_state_set(fw_st, FW_STATUS_LOADING) 152 __fw_state_set(fw_st, FW_STATUS_LOADING)
156#define fw_state_done(fw_st) \ 153#define fw_state_done(fw_st) \
157 __fw_state_set(fw_st, FW_STATUS_DONE) 154 __fw_state_set(fw_st, FW_STATUS_DONE)
155#define fw_state_aborted(fw_st) \
156 __fw_state_set(fw_st, FW_STATUS_ABORTED)
158#define fw_state_wait(fw_st) \ 157#define fw_state_wait(fw_st) \
159 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) 158 __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
160 159
161#ifndef CONFIG_FW_LOADER_USER_HELPER
162
163#define fw_state_is_aborted(fw_st) false
164
165#else /* CONFIG_FW_LOADER_USER_HELPER */
166
167static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) 160static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
168{ 161{
169 return fw_st->status == status; 162 return fw_st->status == status;
170} 163}
171 164
165#define fw_state_is_aborted(fw_st) \
166 __fw_state_check(fw_st, FW_STATUS_ABORTED)
167
168#ifdef CONFIG_FW_LOADER_USER_HELPER
169
172#define fw_state_aborted(fw_st) \ 170#define fw_state_aborted(fw_st) \
173 __fw_state_set(fw_st, FW_STATUS_ABORTED) 171 __fw_state_set(fw_st, FW_STATUS_ABORTED)
174#define fw_state_is_done(fw_st) \ 172#define fw_state_is_done(fw_st) \
175 __fw_state_check(fw_st, FW_STATUS_DONE) 173 __fw_state_check(fw_st, FW_STATUS_DONE)
176#define fw_state_is_loading(fw_st) \ 174#define fw_state_is_loading(fw_st) \
177 __fw_state_check(fw_st, FW_STATUS_LOADING) 175 __fw_state_check(fw_st, FW_STATUS_LOADING)
178#define fw_state_is_aborted(fw_st) \
179 __fw_state_check(fw_st, FW_STATUS_ABORTED)
180#define fw_state_wait_timeout(fw_st, timeout) \ 176#define fw_state_wait_timeout(fw_st, timeout) \
181 __fw_state_wait_common(fw_st, timeout) 177 __fw_state_wait_common(fw_st, timeout)
182 178
@@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
1200 return 1; /* need to load */ 1196 return 1; /* need to load */
1201} 1197}
1202 1198
1199/*
1200 * Batched requests need only one wake, we need to do this step last due to the
1201 * fallback mechanism. The buf is protected with kref_get(), and it won't be
1202 * released until the last user calls release_firmware().
1203 *
1204 * Failed batched requests are possible as well, in such cases we just share
1205 * the struct firmware_buf and won't release it until all requests are woken
1206 * and have gone through this same path.
1207 */
1208static void fw_abort_batch_reqs(struct firmware *fw)
1209{
1210 struct firmware_buf *buf;
1211
1212 /* Loaded directly? */
1213 if (!fw || !fw->priv)
1214 return;
1215
1216 buf = fw->priv;
1217 if (!fw_state_is_aborted(&buf->fw_st))
1218 fw_state_aborted(&buf->fw_st);
1219}
1220
1203/* called from request_firmware() and request_firmware_work_func() */ 1221/* called from request_firmware() and request_firmware_work_func() */
1204static int 1222static int
1205_request_firmware(const struct firmware **firmware_p, const char *name, 1223_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
1243 1261
1244 out: 1262 out:
1245 if (ret < 0) { 1263 if (ret < 0) {
1264 fw_abort_batch_reqs(fw);
1246 release_firmware(fw); 1265 release_firmware(fw);
1247 fw = NULL; 1266 fw = NULL;
1248 } 1267 }
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 3b8210ebb50e..60303aa28587 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1222,8 +1222,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1222 1222
1223 spin_unlock_irq(&dev->power.lock); 1223 spin_unlock_irq(&dev->power.lock);
1224 1224
1225 dev_pm_domain_set(dev, &genpd->domain);
1226
1227 return gpd_data; 1225 return gpd_data;
1228 1226
1229 err_free: 1227 err_free:
@@ -1237,8 +1235,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1237static void genpd_free_dev_data(struct device *dev, 1235static void genpd_free_dev_data(struct device *dev,
1238 struct generic_pm_domain_data *gpd_data) 1236 struct generic_pm_domain_data *gpd_data)
1239{ 1237{
1240 dev_pm_domain_set(dev, NULL);
1241
1242 spin_lock_irq(&dev->power.lock); 1238 spin_lock_irq(&dev->power.lock);
1243 1239
1244 dev->power.subsys_data->domain_data = NULL; 1240 dev->power.subsys_data->domain_data = NULL;
@@ -1275,6 +1271,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1275 if (ret) 1271 if (ret)
1276 goto out; 1272 goto out;
1277 1273
1274 dev_pm_domain_set(dev, &genpd->domain);
1275
1278 genpd->device_count++; 1276 genpd->device_count++;
1279 genpd->max_off_time_changed = true; 1277 genpd->max_off_time_changed = true;
1280 1278
@@ -1336,6 +1334,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
1336 if (genpd->detach_dev) 1334 if (genpd->detach_dev)
1337 genpd->detach_dev(genpd, dev); 1335 genpd->detach_dev(genpd, dev);
1338 1336
1337 dev_pm_domain_set(dev, NULL);
1338
1339 list_del_init(&pdd->list_node); 1339 list_del_init(&pdd->list_node);
1340 1340
1341 genpd_unlock(genpd); 1341 genpd_unlock(genpd);
diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c
index 5f04e7bf063e..e6c64b0be5b2 100644
--- a/drivers/base/regmap/regmap-w1.c
+++ b/drivers/base/regmap/regmap-w1.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Register map access API - W1 (1-Wire) support 2 * Register map access API - W1 (1-Wire) support
3 * 3 *
4 * Copyright (C) 2017 OAO Radioavionica 4 * Copyright (c) 2017 Radioavionica Corporation
5 * Author: Alex A. Mihaylov <minimumlaw@rambler.ru> 5 * Author: Alex A. Mihaylov <minimumlaw@rambler.ru>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -11,7 +11,7 @@
11 11
12#include <linux/regmap.h> 12#include <linux/regmap.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include "../../w1/w1.h" 14#include <linux/w1.h>
15 15
16#include "internal.h" 16#include "internal.h"
17 17
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ef8334949b42..f321b96405f5 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
221} 221}
222 222
223static int 223static int
224figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, 224figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
225 loff_t logical_blocksize)
226{ 225{
227 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); 226 loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
228 sector_t x = (sector_t)size; 227 sector_t x = (sector_t)size;
@@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
234 lo->lo_offset = offset; 233 lo->lo_offset = offset;
235 if (lo->lo_sizelimit != sizelimit) 234 if (lo->lo_sizelimit != sizelimit)
236 lo->lo_sizelimit = sizelimit; 235 lo->lo_sizelimit = sizelimit;
237 if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
238 lo->lo_logical_blocksize = logical_blocksize;
239 blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
240 blk_queue_logical_block_size(lo->lo_queue,
241 lo->lo_logical_blocksize);
242 }
243 set_capacity(lo->lo_disk, x); 236 set_capacity(lo->lo_disk, x);
244 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); 237 bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
245 /* let user-space know about the new size */ 238 /* let user-space know about the new size */
@@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
820 struct file *file = lo->lo_backing_file; 813 struct file *file = lo->lo_backing_file;
821 struct inode *inode = file->f_mapping->host; 814 struct inode *inode = file->f_mapping->host;
822 struct request_queue *q = lo->lo_queue; 815 struct request_queue *q = lo->lo_queue;
823 int lo_bits = 9;
824 816
825 /* 817 /*
826 * We use punch hole to reclaim the free space used by the 818 * We use punch hole to reclaim the free space used by the
@@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
840 832
841 q->limits.discard_granularity = inode->i_sb->s_blocksize; 833 q->limits.discard_granularity = inode->i_sb->s_blocksize;
842 q->limits.discard_alignment = 0; 834 q->limits.discard_alignment = 0;
843 if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
844 lo_bits = blksize_bits(lo->lo_logical_blocksize);
845 835
846 blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits); 836 blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
847 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits); 837 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
848 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 838 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
849} 839}
850 840
@@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
938 928
939 lo->use_dio = false; 929 lo->use_dio = false;
940 lo->lo_blocksize = lo_blocksize; 930 lo->lo_blocksize = lo_blocksize;
941 lo->lo_logical_blocksize = 512;
942 lo->lo_device = bdev; 931 lo->lo_device = bdev;
943 lo->lo_flags = lo_flags; 932 lo->lo_flags = lo_flags;
944 lo->lo_backing_file = file; 933 lo->lo_backing_file = file;
@@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1104 int err; 1093 int err;
1105 struct loop_func_table *xfer; 1094 struct loop_func_table *xfer;
1106 kuid_t uid = current_uid(); 1095 kuid_t uid = current_uid();
1107 int lo_flags = lo->lo_flags;
1108 1096
1109 if (lo->lo_encrypt_key_size && 1097 if (lo->lo_encrypt_key_size &&
1110 !uid_eq(lo->lo_key_owner, uid) && 1098 !uid_eq(lo->lo_key_owner, uid) &&
@@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1137 if (err) 1125 if (err)
1138 goto exit; 1126 goto exit;
1139 1127
1140 if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
1141 if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
1142 lo->lo_logical_blocksize = 512;
1143 lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
1144 if (LO_INFO_BLOCKSIZE(info) != 512 &&
1145 LO_INFO_BLOCKSIZE(info) != 1024 &&
1146 LO_INFO_BLOCKSIZE(info) != 2048 &&
1147 LO_INFO_BLOCKSIZE(info) != 4096)
1148 return -EINVAL;
1149 if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
1150 return -EINVAL;
1151 }
1152
1153 if (lo->lo_offset != info->lo_offset || 1128 if (lo->lo_offset != info->lo_offset ||
1154 lo->lo_sizelimit != info->lo_sizelimit || 1129 lo->lo_sizelimit != info->lo_sizelimit) {
1155 lo->lo_flags != lo_flags || 1130 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
1156 ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
1157 lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
1158 if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
1159 LO_INFO_BLOCKSIZE(info))) {
1160 err = -EFBIG; 1131 err = -EFBIG;
1161 goto exit; 1132 goto exit;
1162 } 1133 }
@@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
1348 if (unlikely(lo->lo_state != Lo_bound)) 1319 if (unlikely(lo->lo_state != Lo_bound))
1349 return -ENXIO; 1320 return -ENXIO;
1350 1321
1351 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit, 1322 return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
1352 lo->lo_logical_blocksize);
1353} 1323}
1354 1324
1355static int loop_set_dio(struct loop_device *lo, unsigned long arg) 1325static int loop_set_dio(struct loop_device *lo, unsigned long arg)
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 2c096b9a17b8..fecd3f97ef8c 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -49,7 +49,6 @@ struct loop_device {
49 struct file * lo_backing_file; 49 struct file * lo_backing_file;
50 struct block_device *lo_device; 50 struct block_device *lo_device;
51 unsigned lo_blocksize; 51 unsigned lo_blocksize;
52 unsigned lo_logical_blocksize;
53 void *key_data; 52 void *key_data;
54 53
55 gfp_t old_gfp_mask; 54 gfp_t old_gfp_mask;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index dea7d85134ee..5bdf923294a5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work)
626 struct nbd_device *nbd = args->nbd; 626 struct nbd_device *nbd = args->nbd;
627 struct nbd_config *config = nbd->config; 627 struct nbd_config *config = nbd->config;
628 struct nbd_cmd *cmd; 628 struct nbd_cmd *cmd;
629 int ret = 0;
630 629
631 while (1) { 630 while (1) {
632 cmd = nbd_read_stat(nbd, args->index); 631 cmd = nbd_read_stat(nbd, args->index);
@@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work)
636 mutex_lock(&nsock->tx_lock); 635 mutex_lock(&nsock->tx_lock);
637 nbd_mark_nsock_dead(nbd, nsock, 1); 636 nbd_mark_nsock_dead(nbd, nsock, 1);
638 mutex_unlock(&nsock->tx_lock); 637 mutex_unlock(&nsock->tx_lock);
639 ret = PTR_ERR(cmd);
640 break; 638 break;
641 } 639 }
642 640
@@ -910,7 +908,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
910 continue; 908 continue;
911 } 909 }
912 sk_set_memalloc(sock->sk); 910 sk_set_memalloc(sock->sk);
913 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; 911 if (nbd->tag_set.timeout)
912 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
914 atomic_inc(&config->recv_threads); 913 atomic_inc(&config->recv_threads);
915 refcount_inc(&nbd->config_refs); 914 refcount_inc(&nbd->config_refs);
916 old = nsock->sock; 915 old = nsock->sock;
@@ -924,6 +923,8 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
924 mutex_unlock(&nsock->tx_lock); 923 mutex_unlock(&nsock->tx_lock);
925 sockfd_put(old); 924 sockfd_put(old);
926 925
926 clear_bit(NBD_DISCONNECTED, &config->runtime_flags);
927
927 /* We take the tx_mutex in an error path in the recv_work, so we 928 /* We take the tx_mutex in an error path in the recv_work, so we
928 * need to queue_work outside of the tx_mutex. 929 * need to queue_work outside of the tx_mutex.
929 */ 930 */
@@ -980,11 +981,15 @@ static void send_disconnects(struct nbd_device *nbd)
980 int i, ret; 981 int i, ret;
981 982
982 for (i = 0; i < config->num_connections; i++) { 983 for (i = 0; i < config->num_connections; i++) {
984 struct nbd_sock *nsock = config->socks[i];
985
983 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 986 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
987 mutex_lock(&nsock->tx_lock);
984 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); 988 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
985 if (ret <= 0) 989 if (ret <= 0)
986 dev_err(disk_to_dev(nbd->disk), 990 dev_err(disk_to_dev(nbd->disk),
987 "Send disconnect failed %d\n", ret); 991 "Send disconnect failed %d\n", ret);
992 mutex_unlock(&nsock->tx_lock);
988 } 993 }
989} 994}
990 995
@@ -993,9 +998,8 @@ static int nbd_disconnect(struct nbd_device *nbd)
993 struct nbd_config *config = nbd->config; 998 struct nbd_config *config = nbd->config;
994 999
995 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 1000 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
996 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, 1001 set_bit(NBD_DISCONNECT_REQUESTED, &config->runtime_flags);
997 &config->runtime_flags)) 1002 send_disconnects(nbd);
998 send_disconnects(nbd);
999 return 0; 1003 return 0;
1000} 1004}
1001 1005
@@ -1076,7 +1080,9 @@ static int nbd_start_device(struct nbd_device *nbd)
1076 return -ENOMEM; 1080 return -ENOMEM;
1077 } 1081 }
1078 sk_set_memalloc(config->socks[i]->sock->sk); 1082 sk_set_memalloc(config->socks[i]->sock->sk);
1079 config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout; 1083 if (nbd->tag_set.timeout)
1084 config->socks[i]->sock->sk->sk_sndtimeo =
1085 nbd->tag_set.timeout;
1080 atomic_inc(&config->recv_threads); 1086 atomic_inc(&config->recv_threads);
1081 refcount_inc(&nbd->config_refs); 1087 refcount_inc(&nbd->config_refs);
1082 INIT_WORK(&args->work, recv_work); 1088 INIT_WORK(&args->work, recv_work);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6b16ead1da58..ad9749463d4f 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -875,6 +875,56 @@ static void print_version(void)
875 printk(KERN_INFO "%s", version); 875 printk(KERN_INFO "%s", version);
876} 876}
877 877
878struct vdc_check_port_data {
879 int dev_no;
880 char *type;
881};
882
883static int vdc_device_probed(struct device *dev, void *arg)
884{
885 struct vio_dev *vdev = to_vio_dev(dev);
886 struct vdc_check_port_data *port_data;
887
888 port_data = (struct vdc_check_port_data *)arg;
889
890 if ((vdev->dev_no == port_data->dev_no) &&
891 (!(strcmp((char *)&vdev->type, port_data->type))) &&
892 dev_get_drvdata(dev)) {
893 /* This device has already been configured
894 * by vdc_port_probe()
895 */
896 return 1;
897 } else {
898 return 0;
899 }
900}
901
902/* Determine whether the VIO device is part of an mpgroup
903 * by locating all the virtual-device-port nodes associated
904 * with the parent virtual-device node for the VIO device
905 * and checking whether any of these nodes are vdc-ports
906 * which have already been configured.
907 *
908 * Returns true if this device is part of an mpgroup and has
909 * already been probed.
910 */
911static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
912{
913 struct vdc_check_port_data port_data;
914 struct device *dev;
915
916 port_data.dev_no = vdev->dev_no;
917 port_data.type = (char *)&vdev->type;
918
919 dev = device_find_child(vdev->dev.parent, &port_data,
920 vdc_device_probed);
921
922 if (dev)
923 return true;
924
925 return false;
926}
927
878static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) 928static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
879{ 929{
880 struct mdesc_handle *hp; 930 struct mdesc_handle *hp;
@@ -893,6 +943,14 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
893 goto err_out_release_mdesc; 943 goto err_out_release_mdesc;
894 } 944 }
895 945
946 /* Check if this device is part of an mpgroup */
947 if (vdc_port_mpgroup_check(vdev)) {
948 printk(KERN_WARNING
949 "VIO: Ignoring extra vdisk port %s",
950 dev_name(&vdev->dev));
951 goto err_out_release_mdesc;
952 }
953
896 port = kzalloc(sizeof(*port), GFP_KERNEL); 954 port = kzalloc(sizeof(*port), GFP_KERNEL);
897 err = -ENOMEM; 955 err = -ENOMEM;
898 if (!port) { 956 if (!port) {
@@ -943,6 +1001,9 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
943 if (err) 1001 if (err)
944 goto err_out_free_tx_ring; 1002 goto err_out_free_tx_ring;
945 1003
1004 /* Note that the device driver_data is used to determine
1005 * whether the port has been probed.
1006 */
946 dev_set_drvdata(&vdev->dev, port); 1007 dev_set_drvdata(&vdev->dev, port);
947 1008
948 mdesc_release(hp); 1009 mdesc_release(hp);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4e02aa5fdac0..d3d5523862c2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
381 struct request_queue *q = vblk->disk->queue; 381 struct request_queue *q = vblk->disk->queue;
382 char cap_str_2[10], cap_str_10[10]; 382 char cap_str_2[10], cap_str_10[10];
383 char *envp[] = { "RESIZE=1", NULL }; 383 char *envp[] = { "RESIZE=1", NULL };
384 unsigned long long nblocks;
384 u64 capacity; 385 u64 capacity;
385 386
386 /* Host must always specify the capacity. */ 387 /* Host must always specify the capacity. */
@@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
393 capacity = (sector_t)-1; 394 capacity = (sector_t)-1;
394 } 395 }
395 396
396 string_get_size(capacity, queue_logical_block_size(q), 397 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
398
399 string_get_size(nblocks, queue_logical_block_size(q),
397 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); 400 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
398 string_get_size(capacity, queue_logical_block_size(q), 401 string_get_size(nblocks, queue_logical_block_size(q),
399 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); 402 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
400 403
401 dev_notice(&vdev->dev, 404 dev_notice(&vdev->dev,
402 "new size: %llu %d-byte logical blocks (%s/%s)\n", 405 "new size: %llu %d-byte logical blocks (%s/%s)\n",
403 (unsigned long long)capacity, 406 nblocks,
404 queue_logical_block_size(q), 407 queue_logical_block_size(q),
405 cap_str_10, cap_str_2); 408 cap_str_10,
409 cap_str_2);
406 410
407 set_capacity(vblk->disk, capacity); 411 set_capacity(vblk->disk, capacity);
408 revalidate_disk(vblk->disk); 412 revalidate_disk(vblk->disk);
@@ -541,12 +545,9 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
541 int i; 545 int i;
542 546
543 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); 547 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
544 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) 548 i = sysfs_match_string(virtblk_cache_types, buf);
545 if (sysfs_streq(buf, virtblk_cache_types[i]))
546 break;
547
548 if (i < 0) 549 if (i < 0)
549 return -EINVAL; 550 return i;
550 551
551 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i); 552 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
552 virtblk_update_cache_mode(vdev); 553 virtblk_update_cache_mode(vdev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c852ed3c01d5..2468c28d4771 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -111,7 +111,7 @@ struct blk_shadow {
111}; 111};
112 112
113struct blkif_req { 113struct blkif_req {
114 int error; 114 blk_status_t error;
115}; 115};
116 116
117static inline struct blkif_req *blkif_req(struct request *rq) 117static inline struct blkif_req *blkif_req(struct request *rq)
@@ -708,6 +708,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
708 * existing persistent grants, or if we have to get new grants, 708 * existing persistent grants, or if we have to get new grants,
709 * as there are not sufficiently many free. 709 * as there are not sufficiently many free.
710 */ 710 */
711 bool new_persistent_gnts = false;
711 struct scatterlist *sg; 712 struct scatterlist *sg;
712 int num_sg, max_grefs, num_grant; 713 int num_sg, max_grefs, num_grant;
713 714
@@ -719,19 +720,21 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
719 */ 720 */
720 max_grefs += INDIRECT_GREFS(max_grefs); 721 max_grefs += INDIRECT_GREFS(max_grefs);
721 722
722 /* 723 /* Check if we have enough persistent grants to allocate a requests */
723 * We have to reserve 'max_grefs' grants because persistent 724 if (rinfo->persistent_gnts_c < max_grefs) {
724 * grants are shared by all rings. 725 new_persistent_gnts = true;
725 */ 726
726 if (max_grefs > 0) 727 if (gnttab_alloc_grant_references(
727 if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) { 728 max_grefs - rinfo->persistent_gnts_c,
729 &setup.gref_head) < 0) {
728 gnttab_request_free_callback( 730 gnttab_request_free_callback(
729 &rinfo->callback, 731 &rinfo->callback,
730 blkif_restart_queue_callback, 732 blkif_restart_queue_callback,
731 rinfo, 733 rinfo,
732 max_grefs); 734 max_grefs - rinfo->persistent_gnts_c);
733 return 1; 735 return 1;
734 } 736 }
737 }
735 738
736 /* Fill out a communications ring structure. */ 739 /* Fill out a communications ring structure. */
737 id = blkif_ring_get_request(rinfo, req, &ring_req); 740 id = blkif_ring_get_request(rinfo, req, &ring_req);
@@ -832,7 +835,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
832 if (unlikely(require_extra_req)) 835 if (unlikely(require_extra_req))
833 rinfo->shadow[extra_id].req = *extra_ring_req; 836 rinfo->shadow[extra_id].req = *extra_ring_req;
834 837
835 if (max_grefs > 0) 838 if (new_persistent_gnts)
836 gnttab_free_grant_references(setup.gref_head); 839 gnttab_free_grant_references(setup.gref_head);
837 840
838 return 0; 841 return 0;
@@ -906,8 +909,8 @@ out_err:
906 return BLK_STS_IOERR; 909 return BLK_STS_IOERR;
907 910
908out_busy: 911out_busy:
909 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
910 blk_mq_stop_hw_queue(hctx); 912 blk_mq_stop_hw_queue(hctx);
913 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
911 return BLK_STS_RESOURCE; 914 return BLK_STS_RESOURCE;
912} 915}
913 916
@@ -1616,7 +1619,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1616 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { 1619 if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1617 printk(KERN_WARNING "blkfront: %s: %s op failed\n", 1620 printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1618 info->gd->disk_name, op_name(bret->operation)); 1621 info->gd->disk_name, op_name(bret->operation));
1619 blkif_req(req)->error = -EOPNOTSUPP; 1622 blkif_req(req)->error = BLK_STS_NOTSUPP;
1620 } 1623 }
1621 if (unlikely(bret->status == BLKIF_RSP_ERROR && 1624 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1622 rinfo->shadow[id].req.u.rw.nr_segments == 0)) { 1625 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
@@ -2072,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev)
2072 /* 2075 /*
2073 * Get the bios in the request so we can re-queue them. 2076 * Get the bios in the request so we can re-queue them.
2074 */ 2077 */
2075 if (req_op(shadow[i].request) == REQ_OP_FLUSH || 2078 if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
2076 req_op(shadow[i].request) == REQ_OP_DISCARD || 2079 req_op(shadow[j].request) == REQ_OP_DISCARD ||
2077 req_op(shadow[i].request) == REQ_OP_SECURE_ERASE || 2080 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
2078 shadow[j].request->cmd_flags & REQ_FUA) { 2081 shadow[j].request->cmd_flags & REQ_FUA) {
2079 /* 2082 /*
2080 * Flush operations don't contain bios, so 2083 * Flush operations don't contain bios, so
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 856d5dc02451..3b1b6340ba13 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -308,7 +308,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
308 struct device_attribute *attr, const char *buf, size_t len) 308 struct device_attribute *attr, const char *buf, size_t len)
309{ 309{
310 struct zram *zram = dev_to_zram(dev); 310 struct zram *zram = dev_to_zram(dev);
311 char compressor[CRYPTO_MAX_ALG_NAME]; 311 char compressor[ARRAY_SIZE(zram->compressor)];
312 size_t sz; 312 size_t sz;
313 313
314 strlcpy(compressor, buf, sizeof(compressor)); 314 strlcpy(compressor, buf, sizeof(compressor));
@@ -327,7 +327,7 @@ static ssize_t comp_algorithm_store(struct device *dev,
327 return -EBUSY; 327 return -EBUSY;
328 } 328 }
329 329
330 strlcpy(zram->compressor, compressor, sizeof(compressor)); 330 strcpy(zram->compressor, compressor);
331 up_write(&zram->init_lock); 331 up_write(&zram->init_lock);
332 return len; 332 return len;
333} 333}
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index 1e6e0269edcc..f76be6bd6eb3 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -256,10 +256,23 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
256 256
257 uniphier_system_bus_set_reg(priv); 257 uniphier_system_bus_set_reg(priv);
258 258
259 platform_set_drvdata(pdev, priv);
260
259 /* Now, the bus is configured. Populate platform_devices below it */ 261 /* Now, the bus is configured. Populate platform_devices below it */
260 return of_platform_default_populate(dev->of_node, NULL, dev); 262 return of_platform_default_populate(dev->of_node, NULL, dev);
261} 263}
262 264
265static int __maybe_unused uniphier_system_bus_resume(struct device *dev)
266{
267 uniphier_system_bus_set_reg(dev_get_drvdata(dev));
268
269 return 0;
270}
271
272static const struct dev_pm_ops uniphier_system_bus_pm_ops = {
273 SET_SYSTEM_SLEEP_PM_OPS(NULL, uniphier_system_bus_resume)
274};
275
263static const struct of_device_id uniphier_system_bus_match[] = { 276static const struct of_device_id uniphier_system_bus_match[] = {
264 { .compatible = "socionext,uniphier-system-bus" }, 277 { .compatible = "socionext,uniphier-system-bus" },
265 { /* sentinel */ } 278 { /* sentinel */ }
@@ -271,6 +284,7 @@ static struct platform_driver uniphier_system_bus_driver = {
271 .driver = { 284 .driver = {
272 .name = "uniphier-system-bus", 285 .name = "uniphier-system-bus",
273 .of_match_table = uniphier_system_bus_match, 286 .of_match_table = uniphier_system_bus_match,
287 .pm = &uniphier_system_bus_pm_ops,
274 }, 288 },
275}; 289};
276module_platform_driver(uniphier_system_bus_driver); 290module_platform_driver(uniphier_system_bus_driver);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index afa3ce7d3e72..8ad92707e45f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1492,7 +1492,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
1492#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1492#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1493 print_once = true; 1493 print_once = true;
1494#endif 1494#endif
1495 pr_notice("random: %s called from %pF with crng_init=%d\n", 1495 pr_notice("random: %s called from %pS with crng_init=%d\n",
1496 func_name, caller, crng_init); 1496 func_name, caller, crng_init);
1497} 1497}
1498 1498
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index c391a49aaaff..b4cf2f699a21 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -237,6 +237,18 @@ static int gemini_reset(struct reset_controller_dev *rcdev,
237 BIT(GEMINI_RESET_CPU1) | BIT(id)); 237 BIT(GEMINI_RESET_CPU1) | BIT(id));
238} 238}
239 239
240static int gemini_reset_assert(struct reset_controller_dev *rcdev,
241 unsigned long id)
242{
243 return 0;
244}
245
246static int gemini_reset_deassert(struct reset_controller_dev *rcdev,
247 unsigned long id)
248{
249 return 0;
250}
251
240static int gemini_reset_status(struct reset_controller_dev *rcdev, 252static int gemini_reset_status(struct reset_controller_dev *rcdev,
241 unsigned long id) 253 unsigned long id)
242{ 254{
@@ -253,6 +265,8 @@ static int gemini_reset_status(struct reset_controller_dev *rcdev,
253 265
254static const struct reset_control_ops gemini_reset_ops = { 266static const struct reset_control_ops gemini_reset_ops = {
255 .reset = gemini_reset, 267 .reset = gemini_reset,
268 .assert = gemini_reset_assert,
269 .deassert = gemini_reset_deassert,
256 .status = gemini_reset_status, 270 .status = gemini_reset_status,
257}; 271};
258 272
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index 43b0f2f08df2..9cdf9d5050ac 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -22,6 +22,7 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/soc/ti/ti_sci_protocol.h> 24#include <linux/soc/ti/ti_sci_protocol.h>
25#include <linux/bsearch.h>
25 26
26#define SCI_CLK_SSC_ENABLE BIT(0) 27#define SCI_CLK_SSC_ENABLE BIT(0)
27#define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1) 28#define SCI_CLK_ALLOW_FREQ_CHANGE BIT(1)
@@ -44,6 +45,7 @@ struct sci_clk_data {
44 * @dev: Device pointer for the clock provider 45 * @dev: Device pointer for the clock provider
45 * @clk_data: Clock data 46 * @clk_data: Clock data
46 * @clocks: Clocks array for this device 47 * @clocks: Clocks array for this device
48 * @num_clocks: Total number of clocks for this provider
47 */ 49 */
48struct sci_clk_provider { 50struct sci_clk_provider {
49 const struct ti_sci_handle *sci; 51 const struct ti_sci_handle *sci;
@@ -51,6 +53,7 @@ struct sci_clk_provider {
51 struct device *dev; 53 struct device *dev;
52 const struct sci_clk_data *clk_data; 54 const struct sci_clk_data *clk_data;
53 struct clk_hw **clocks; 55 struct clk_hw **clocks;
56 int num_clocks;
54}; 57};
55 58
56/** 59/**
@@ -58,7 +61,6 @@ struct sci_clk_provider {
58 * @hw: Hardware clock cookie for common clock framework 61 * @hw: Hardware clock cookie for common clock framework
59 * @dev_id: Device index 62 * @dev_id: Device index
60 * @clk_id: Clock index 63 * @clk_id: Clock index
61 * @node: Clocks list link
62 * @provider: Master clock provider 64 * @provider: Master clock provider
63 * @flags: Flags for the clock 65 * @flags: Flags for the clock
64 */ 66 */
@@ -66,7 +68,6 @@ struct sci_clk {
66 struct clk_hw hw; 68 struct clk_hw hw;
67 u16 dev_id; 69 u16 dev_id;
68 u8 clk_id; 70 u8 clk_id;
69 struct list_head node;
70 struct sci_clk_provider *provider; 71 struct sci_clk_provider *provider;
71 u8 flags; 72 u8 flags;
72}; 73};
@@ -367,6 +368,19 @@ err:
367 return &sci_clk->hw; 368 return &sci_clk->hw;
368} 369}
369 370
371static int _cmp_sci_clk(const void *a, const void *b)
372{
373 const struct sci_clk *ca = a;
374 const struct sci_clk *cb = *(struct sci_clk **)b;
375
376 if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id)
377 return 0;
378 if (ca->dev_id > cb->dev_id ||
379 (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id))
380 return 1;
381 return -1;
382}
383
370/** 384/**
371 * sci_clk_get - Xlate function for getting clock handles 385 * sci_clk_get - Xlate function for getting clock handles
372 * @clkspec: device tree clock specifier 386 * @clkspec: device tree clock specifier
@@ -380,29 +394,22 @@ err:
380static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data) 394static struct clk_hw *sci_clk_get(struct of_phandle_args *clkspec, void *data)
381{ 395{
382 struct sci_clk_provider *provider = data; 396 struct sci_clk_provider *provider = data;
383 u16 dev_id; 397 struct sci_clk **clk;
384 u8 clk_id; 398 struct sci_clk key;
385 const struct sci_clk_data *clks = provider->clk_data;
386 struct clk_hw **clocks = provider->clocks;
387 399
388 if (clkspec->args_count != 2) 400 if (clkspec->args_count != 2)
389 return ERR_PTR(-EINVAL); 401 return ERR_PTR(-EINVAL);
390 402
391 dev_id = clkspec->args[0]; 403 key.dev_id = clkspec->args[0];
392 clk_id = clkspec->args[1]; 404 key.clk_id = clkspec->args[1];
393 405
394 while (clks->num_clks) { 406 clk = bsearch(&key, provider->clocks, provider->num_clocks,
395 if (clks->dev == dev_id) { 407 sizeof(clk), _cmp_sci_clk);
396 if (clk_id >= clks->num_clks)
397 return ERR_PTR(-EINVAL);
398
399 return clocks[clk_id];
400 }
401 408
402 clks++; 409 if (!clk)
403 } 410 return ERR_PTR(-ENODEV);
404 411
405 return ERR_PTR(-ENODEV); 412 return &(*clk)->hw;
406} 413}
407 414
408static int ti_sci_init_clocks(struct sci_clk_provider *p) 415static int ti_sci_init_clocks(struct sci_clk_provider *p)
@@ -410,18 +417,29 @@ static int ti_sci_init_clocks(struct sci_clk_provider *p)
410 const struct sci_clk_data *data = p->clk_data; 417 const struct sci_clk_data *data = p->clk_data;
411 struct clk_hw *hw; 418 struct clk_hw *hw;
412 int i; 419 int i;
420 int num_clks = 0;
413 421
414 while (data->num_clks) { 422 while (data->num_clks) {
415 p->clocks = devm_kcalloc(p->dev, data->num_clks, 423 num_clks += data->num_clks;
416 sizeof(struct sci_clk), 424 data++;
417 GFP_KERNEL); 425 }
418 if (!p->clocks)
419 return -ENOMEM;
420 426
427 p->num_clocks = num_clks;
428
429 p->clocks = devm_kcalloc(p->dev, num_clks, sizeof(struct sci_clk),
430 GFP_KERNEL);
431 if (!p->clocks)
432 return -ENOMEM;
433
434 num_clks = 0;
435
436 data = p->clk_data;
437
438 while (data->num_clks) {
421 for (i = 0; i < data->num_clks; i++) { 439 for (i = 0; i < data->num_clks; i++) {
422 hw = _sci_clk_build(p, data->dev, i); 440 hw = _sci_clk_build(p, data->dev, i);
423 if (!IS_ERR(hw)) { 441 if (!IS_ERR(hw)) {
424 p->clocks[i] = hw; 442 p->clocks[num_clks++] = hw;
425 continue; 443 continue;
426 } 444 }
427 445
diff --git a/drivers/clk/meson/clk-mpll.c b/drivers/clk/meson/clk-mpll.c
index 39eab69fe51a..44a5a535ca63 100644
--- a/drivers/clk/meson/clk-mpll.c
+++ b/drivers/clk/meson/clk-mpll.c
@@ -161,6 +161,13 @@ static int mpll_set_rate(struct clk_hw *hw,
161 reg = PARM_SET(p->width, p->shift, reg, 1); 161 reg = PARM_SET(p->width, p->shift, reg, 1);
162 writel(reg, mpll->base + p->reg_off); 162 writel(reg, mpll->base + p->reg_off);
163 163
164 p = &mpll->ssen;
165 if (p->width != 0) {
166 reg = readl(mpll->base + p->reg_off);
167 reg = PARM_SET(p->width, p->shift, reg, 1);
168 writel(reg, mpll->base + p->reg_off);
169 }
170
164 p = &mpll->n2; 171 p = &mpll->n2;
165 reg = readl(mpll->base + p->reg_off); 172 reg = readl(mpll->base + p->reg_off);
166 reg = PARM_SET(p->width, p->shift, reg, n2); 173 reg = PARM_SET(p->width, p->shift, reg, n2);
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index d6feafe8bd6c..1629da9b4141 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -118,6 +118,7 @@ struct meson_clk_mpll {
118 struct parm sdm_en; 118 struct parm sdm_en;
119 struct parm n2; 119 struct parm n2;
120 struct parm en; 120 struct parm en;
121 struct parm ssen;
121 spinlock_t *lock; 122 spinlock_t *lock;
122}; 123};
123 124
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index a897ea45327c..a7ea5f3da89d 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -528,6 +528,11 @@ static struct meson_clk_mpll gxbb_mpll0 = {
528 .shift = 14, 528 .shift = 14,
529 .width = 1, 529 .width = 1,
530 }, 530 },
531 .ssen = {
532 .reg_off = HHI_MPLL_CNTL,
533 .shift = 25,
534 .width = 1,
535 },
531 .lock = &clk_lock, 536 .lock = &clk_lock,
532 .hw.init = &(struct clk_init_data){ 537 .hw.init = &(struct clk_init_data){
533 .name = "mpll0", 538 .name = "mpll0",
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index bb3f1de876b1..6ec512ad2598 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -267,6 +267,11 @@ static struct meson_clk_mpll meson8b_mpll0 = {
267 .shift = 14, 267 .shift = 14,
268 .width = 1, 268 .width = 1,
269 }, 269 },
270 .ssen = {
271 .reg_off = HHI_MPLL_CNTL,
272 .shift = 25,
273 .width = 1,
274 },
270 .lock = &clk_lock, 275 .lock = &clk_lock,
271 .hw.init = &(struct clk_init_data){ 276 .hw.init = &(struct clk_init_data){
272 .name = "mpll0", 277 .name = "mpll0",
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 0748a0b333c5..9a6476aa7d81 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -1283,16 +1283,16 @@ static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __ini
1283static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = { 1283static const struct samsung_pll_rate_table exynos5420_epll_24mhz_tbl[] = {
1284 PLL_36XX_RATE(600000000U, 100, 2, 1, 0), 1284 PLL_36XX_RATE(600000000U, 100, 2, 1, 0),
1285 PLL_36XX_RATE(400000000U, 200, 3, 2, 0), 1285 PLL_36XX_RATE(400000000U, 200, 3, 2, 0),
1286 PLL_36XX_RATE(393216000U, 197, 3, 2, 25690), 1286 PLL_36XX_RATE(393216003U, 197, 3, 2, -25690),
1287 PLL_36XX_RATE(361267200U, 301, 5, 2, 3671), 1287 PLL_36XX_RATE(361267218U, 301, 5, 2, 3671),
1288 PLL_36XX_RATE(200000000U, 200, 3, 3, 0), 1288 PLL_36XX_RATE(200000000U, 200, 3, 3, 0),
1289 PLL_36XX_RATE(196608000U, 197, 3, 3, -25690), 1289 PLL_36XX_RATE(196608001U, 197, 3, 3, -25690),
1290 PLL_36XX_RATE(180633600U, 301, 5, 3, 3671), 1290 PLL_36XX_RATE(180633609U, 301, 5, 3, 3671),
1291 PLL_36XX_RATE(131072000U, 131, 3, 3, 4719), 1291 PLL_36XX_RATE(131072006U, 131, 3, 3, 4719),
1292 PLL_36XX_RATE(100000000U, 200, 3, 4, 0), 1292 PLL_36XX_RATE(100000000U, 200, 3, 4, 0),
1293 PLL_36XX_RATE(65536000U, 131, 3, 4, 4719), 1293 PLL_36XX_RATE( 65536003U, 131, 3, 4, 4719),
1294 PLL_36XX_RATE(49152000U, 197, 3, 5, 25690), 1294 PLL_36XX_RATE( 49152000U, 197, 3, 5, -25690),
1295 PLL_36XX_RATE(32768000U, 131, 3, 5, 4719), 1295 PLL_36XX_RATE( 32768001U, 131, 3, 5, 4719),
1296}; 1296};
1297 1297
1298static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = { 1298static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index 5372bf8be5e6..31d7ffda9aab 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -184,7 +184,7 @@ static struct ccu_mux cpu_clk = {
184 .hw.init = CLK_HW_INIT_PARENTS("cpu", 184 .hw.init = CLK_HW_INIT_PARENTS("cpu",
185 cpu_parents, 185 cpu_parents,
186 &ccu_mux_ops, 186 &ccu_mux_ops,
187 CLK_IS_CRITICAL), 187 CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
188 } 188 }
189}; 189};
190 190
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index f99abc1106f0..08ef69945ffb 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -186,6 +186,13 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
186 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; 186 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
187 spin_lock_init(&pclk->lock); 187 spin_lock_init(&pclk->lock);
188 188
189 /*
190 * If the clock was already enabled by the firmware mark it as critical
191 * to avoid it being gated by the clock framework if no driver owns it.
192 */
193 if (plt_clk_is_enabled(&pclk->hw))
194 init.flags |= CLK_IS_CRITICAL;
195
189 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); 196 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
190 if (ret) { 197 if (ret) {
191 pclk = ERR_PTR(ret); 198 pclk = ERR_PTR(ret);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index fcae5ca6ac92..54a67f8a28eb 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -262,7 +262,7 @@ config CLKSRC_LPC32XX
262 262
263config CLKSRC_PISTACHIO 263config CLKSRC_PISTACHIO
264 bool "Clocksource for Pistachio SoC" if COMPILE_TEST 264 bool "Clocksource for Pistachio SoC" if COMPILE_TEST
265 depends on HAS_IOMEM 265 depends on GENERIC_CLOCKEVENTS && HAS_IOMEM
266 select TIMER_OF 266 select TIMER_OF
267 help 267 help
268 Enables the clocksource for the Pistachio SoC. 268 Enables the clocksource for the Pistachio SoC.
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index aae87c4c546e..72bbfccef113 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1440,7 +1440,7 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1440 * While unlikely, it's theoretically possible that none of the frames 1440 * While unlikely, it's theoretically possible that none of the frames
1441 * in a timer expose the combination of feature we want. 1441 * in a timer expose the combination of feature we want.
1442 */ 1442 */
1443 for (i = i; i < timer_count; i++) { 1443 for (i = 0; i < timer_count; i++) {
1444 timer = &timers[i]; 1444 timer = &timers[i];
1445 1445
1446 frame = arch_timer_mem_find_best_frame(timer); 1446 frame = arch_timer_mem_find_best_frame(timer);
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index bc48cbf6a795..269db74a0658 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -305,7 +305,7 @@ static int em_sti_probe(struct platform_device *pdev)
305 irq = platform_get_irq(pdev, 0); 305 irq = platform_get_irq(pdev, 0);
306 if (irq < 0) { 306 if (irq < 0) {
307 dev_err(&pdev->dev, "failed to get irq\n"); 307 dev_err(&pdev->dev, "failed to get irq\n");
308 return -EINVAL; 308 return irq;
309 } 309 }
310 310
311 /* map memory, let base point to the STI instance */ 311 /* map memory, let base point to the STI instance */
@@ -314,11 +314,12 @@ static int em_sti_probe(struct platform_device *pdev)
314 if (IS_ERR(p->base)) 314 if (IS_ERR(p->base))
315 return PTR_ERR(p->base); 315 return PTR_ERR(p->base);
316 316
317 if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt, 317 ret = devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
318 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, 318 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
319 dev_name(&pdev->dev), p)) { 319 dev_name(&pdev->dev), p);
320 if (ret) {
320 dev_err(&pdev->dev, "failed to request low IRQ\n"); 321 dev_err(&pdev->dev, "failed to request low IRQ\n");
321 return -ENOENT; 322 return ret;
322 } 323 }
323 324
324 /* get hold of clock */ 325 /* get hold of clock */
diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c
index f6e7491c873c..4d7aef9d9c15 100644
--- a/drivers/clocksource/timer-of.c
+++ b/drivers/clocksource/timer-of.c
@@ -41,8 +41,16 @@ static __init int timer_irq_init(struct device_node *np,
41 struct timer_of *to = container_of(of_irq, struct timer_of, of_irq); 41 struct timer_of *to = container_of(of_irq, struct timer_of, of_irq);
42 struct clock_event_device *clkevt = &to->clkevt; 42 struct clock_event_device *clkevt = &to->clkevt;
43 43
44 of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name): 44 if (of_irq->name) {
45 irq_of_parse_and_map(np, of_irq->index); 45 of_irq->irq = ret = of_irq_get_byname(np, of_irq->name);
46 if (ret < 0) {
47 pr_err("Failed to get interrupt %s for %s\n",
48 of_irq->name, np->full_name);
49 return ret;
50 }
51 } else {
52 of_irq->irq = irq_of_parse_and_map(np, of_irq->index);
53 }
46 if (!of_irq->irq) { 54 if (!of_irq->irq) {
47 pr_err("Failed to map interrupt for %s\n", np->full_name); 55 pr_err("Failed to map interrupt for %s\n", np->full_name);
48 return -EINVAL; 56 return -EINVAL;
@@ -120,9 +128,9 @@ static __init int timer_base_init(struct device_node *np,
120 const char *name = of_base->name ? of_base->name : np->full_name; 128 const char *name = of_base->name ? of_base->name : np->full_name;
121 129
122 of_base->base = of_io_request_and_map(np, of_base->index, name); 130 of_base->base = of_io_request_and_map(np, of_base->index, name);
123 if (!of_base->base) { 131 if (IS_ERR(of_base->base)) {
124 pr_err("Failed to iomap (%s)\n", name); 132 pr_err("Failed to iomap (%s)\n", name);
125 return -ENXIO; 133 return PTR_ERR(of_base->base);
126 } 134 }
127 135
128 return 0; 136 return 0;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b7fb8b7c980d..65ee4fcace1f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -225,6 +225,9 @@ struct global_params {
225 * @vid: Stores VID limits for this CPU 225 * @vid: Stores VID limits for this CPU
226 * @pid: Stores PID parameters for this CPU 226 * @pid: Stores PID parameters for this CPU
227 * @last_sample_time: Last Sample time 227 * @last_sample_time: Last Sample time
228 * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented
229 * This shift is a multiplier to mperf delta to
230 * calculate CPU busy.
228 * @prev_aperf: Last APERF value read from APERF MSR 231 * @prev_aperf: Last APERF value read from APERF MSR
229 * @prev_mperf: Last MPERF value read from MPERF MSR 232 * @prev_mperf: Last MPERF value read from MPERF MSR
230 * @prev_tsc: Last timestamp counter (TSC) value 233 * @prev_tsc: Last timestamp counter (TSC) value
@@ -259,6 +262,7 @@ struct cpudata {
259 262
260 u64 last_update; 263 u64 last_update;
261 u64 last_sample_time; 264 u64 last_sample_time;
265 u64 aperf_mperf_shift;
262 u64 prev_aperf; 266 u64 prev_aperf;
263 u64 prev_mperf; 267 u64 prev_mperf;
264 u64 prev_tsc; 268 u64 prev_tsc;
@@ -321,6 +325,7 @@ struct pstate_funcs {
321 int (*get_min)(void); 325 int (*get_min)(void);
322 int (*get_turbo)(void); 326 int (*get_turbo)(void);
323 int (*get_scaling)(void); 327 int (*get_scaling)(void);
328 int (*get_aperf_mperf_shift)(void);
324 u64 (*get_val)(struct cpudata*, int pstate); 329 u64 (*get_val)(struct cpudata*, int pstate);
325 void (*get_vid)(struct cpudata *); 330 void (*get_vid)(struct cpudata *);
326 void (*update_util)(struct update_util_data *data, u64 time, 331 void (*update_util)(struct update_util_data *data, u64 time,
@@ -1486,6 +1491,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
1486 return val; 1491 return val;
1487} 1492}
1488 1493
1494static int knl_get_aperf_mperf_shift(void)
1495{
1496 return 10;
1497}
1498
1489static int knl_get_turbo_pstate(void) 1499static int knl_get_turbo_pstate(void)
1490{ 1500{
1491 u64 value; 1501 u64 value;
@@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1543 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; 1553 cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1544 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; 1554 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1545 1555
1556 if (pstate_funcs.get_aperf_mperf_shift)
1557 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
1558
1546 if (pstate_funcs.get_vid) 1559 if (pstate_funcs.get_vid)
1547 pstate_funcs.get_vid(cpu); 1560 pstate_funcs.get_vid(cpu);
1548 1561
@@ -1600,8 +1613,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1600 1613
1601static inline int32_t get_avg_frequency(struct cpudata *cpu) 1614static inline int32_t get_avg_frequency(struct cpudata *cpu)
1602{ 1615{
1603 return mul_ext_fp(cpu->sample.core_avg_perf, 1616 return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
1604 cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
1605} 1617}
1606 1618
1607static inline int32_t get_avg_pstate(struct cpudata *cpu) 1619static inline int32_t get_avg_pstate(struct cpudata *cpu)
@@ -1616,7 +1628,8 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
1616 int32_t busy_frac, boost; 1628 int32_t busy_frac, boost;
1617 int target, avg_pstate; 1629 int target, avg_pstate;
1618 1630
1619 busy_frac = div_fp(sample->mperf, sample->tsc); 1631 busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
1632 sample->tsc);
1620 1633
1621 boost = cpu->iowait_boost; 1634 boost = cpu->iowait_boost;
1622 cpu->iowait_boost >>= 1; 1635 cpu->iowait_boost >>= 1;
@@ -1675,7 +1688,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1675 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); 1688 sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
1676 perf_scaled = mul_fp(perf_scaled, sample_ratio); 1689 perf_scaled = mul_fp(perf_scaled, sample_ratio);
1677 } else { 1690 } else {
1678 sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); 1691 sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift),
1692 cpu->sample.tsc);
1679 if (sample_ratio < int_tofp(1)) 1693 if (sample_ratio < int_tofp(1))
1680 perf_scaled = 0; 1694 perf_scaled = 0;
1681 } 1695 }
@@ -1807,6 +1821,7 @@ static const struct pstate_funcs knl_funcs = {
1807 .get_max_physical = core_get_max_pstate_physical, 1821 .get_max_physical = core_get_max_pstate_physical,
1808 .get_min = core_get_min_pstate, 1822 .get_min = core_get_min_pstate,
1809 .get_turbo = knl_get_turbo_pstate, 1823 .get_turbo = knl_get_turbo_pstate,
1824 .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
1810 .get_scaling = core_get_scaling, 1825 .get_scaling = core_get_scaling,
1811 .get_val = core_get_val, 1826 .get_val = core_get_val,
1812 .update_util = intel_pstate_update_util_pid, 1827 .update_util = intel_pstate_update_util_pid,
@@ -1906,13 +1921,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1906 return 0; 1921 return 0;
1907} 1922}
1908 1923
1909static unsigned int intel_pstate_get(unsigned int cpu_num)
1910{
1911 struct cpudata *cpu = all_cpu_data[cpu_num];
1912
1913 return cpu ? get_avg_frequency(cpu) : 0;
1914}
1915
1916static void intel_pstate_set_update_util_hook(unsigned int cpu_num) 1924static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1917{ 1925{
1918 struct cpudata *cpu = all_cpu_data[cpu_num]; 1926 struct cpudata *cpu = all_cpu_data[cpu_num];
@@ -2153,7 +2161,6 @@ static struct cpufreq_driver intel_pstate = {
2153 .setpolicy = intel_pstate_set_policy, 2161 .setpolicy = intel_pstate_set_policy,
2154 .suspend = intel_pstate_hwp_save_state, 2162 .suspend = intel_pstate_hwp_save_state,
2155 .resume = intel_pstate_resume, 2163 .resume = intel_pstate_resume,
2156 .get = intel_pstate_get,
2157 .init = intel_pstate_cpu_init, 2164 .init = intel_pstate_cpu_init,
2158 .exit = intel_pstate_cpu_exit, 2165 .exit = intel_pstate_cpu_exit,
2159 .stop_cpu = intel_pstate_stop_cpu, 2166 .stop_cpu = intel_pstate_stop_cpu,
@@ -2403,6 +2410,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2403 pstate_funcs.get_val = funcs->get_val; 2410 pstate_funcs.get_val = funcs->get_val;
2404 pstate_funcs.get_vid = funcs->get_vid; 2411 pstate_funcs.get_vid = funcs->get_vid;
2405 pstate_funcs.update_util = funcs->update_util; 2412 pstate_funcs.update_util = funcs->update_util;
2413 pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
2406 2414
2407 intel_pstate_use_acpi_profile(); 2415 intel_pstate_use_acpi_profile();
2408} 2416}
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 37b0698b7193..42896a67aeae 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -235,6 +235,7 @@ static inline int validate_dt_prop_sizes(const char *prop1, int prop1_len,
235 return -1; 235 return -1;
236} 236}
237 237
238extern u32 pnv_get_supported_cpuidle_states(void);
238static int powernv_add_idle_states(void) 239static int powernv_add_idle_states(void)
239{ 240{
240 struct device_node *power_mgt; 241 struct device_node *power_mgt;
@@ -248,6 +249,8 @@ static int powernv_add_idle_states(void)
248 const char *names[CPUIDLE_STATE_MAX]; 249 const char *names[CPUIDLE_STATE_MAX];
249 u32 has_stop_states = 0; 250 u32 has_stop_states = 0;
250 int i, rc; 251 int i, rc;
252 u32 supported_flags = pnv_get_supported_cpuidle_states();
253
251 254
252 /* Currently we have snooze statically defined */ 255 /* Currently we have snooze statically defined */
253 256
@@ -362,6 +365,13 @@ static int powernv_add_idle_states(void)
362 for (i = 0; i < dt_idle_states; i++) { 365 for (i = 0; i < dt_idle_states; i++) {
363 unsigned int exit_latency, target_residency; 366 unsigned int exit_latency, target_residency;
364 bool stops_timebase = false; 367 bool stops_timebase = false;
368
369 /*
370 * Skip the platform idle state whose flag isn't in
371 * the supported_cpuidle_states flag mask.
372 */
373 if ((flags[i] & supported_flags) != flags[i])
374 continue;
365 /* 375 /*
366 * If an idle state has exit latency beyond 376 * If an idle state has exit latency beyond
367 * POWERNV_THRESHOLD_LATENCY_NS then don't use it 377 * POWERNV_THRESHOLD_LATENCY_NS then don't use it
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 193204dfbf3a..4b75084fabad 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -655,7 +655,7 @@ source "drivers/crypto/virtio/Kconfig"
655config CRYPTO_DEV_BCM_SPU 655config CRYPTO_DEV_BCM_SPU
656 tristate "Broadcom symmetric crypto/hash acceleration support" 656 tristate "Broadcom symmetric crypto/hash acceleration support"
657 depends on ARCH_BCM_IPROC 657 depends on ARCH_BCM_IPROC
658 depends on BCM_PDC_MBOX 658 depends on MAILBOX
659 default m 659 default m
660 select CRYPTO_DES 660 select CRYPTO_DES
661 select CRYPTO_MD5 661 select CRYPTO_MD5
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index ef04c9748317..bf7ac621c591 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -302,6 +302,7 @@ spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode,
302 break; 302 break;
303 case HASH_ALG_SHA3_512: 303 case HASH_ALG_SHA3_512:
304 *spu2_type = SPU2_HASH_TYPE_SHA3_512; 304 *spu2_type = SPU2_HASH_TYPE_SHA3_512;
305 break;
305 case HASH_ALG_LAST: 306 case HASH_ALG_LAST:
306 default: 307 default:
307 err = -EINVAL; 308 err = -EINVAL;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index ae44a464cd2d..9ccefb9b7232 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -18,8 +18,9 @@
18#define SE_GROUP 0 18#define SE_GROUP 0
19 19
20#define DRIVER_VERSION "1.0" 20#define DRIVER_VERSION "1.0"
21#define FW_DIR "cavium/"
21/* SE microcode */ 22/* SE microcode */
22#define SE_FW "cnn55xx_se.fw" 23#define SE_FW FW_DIR "cnn55xx_se.fw"
23 24
24static const char nitrox_driver_name[] = "CNN55XX"; 25static const char nitrox_driver_name[] = "CNN55XX";
25 26
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index e7f87ac12685..1fabd4aee81b 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -773,7 +773,6 @@ static int safexcel_probe(struct platform_device *pdev)
773 struct device *dev = &pdev->dev; 773 struct device *dev = &pdev->dev;
774 struct resource *res; 774 struct resource *res;
775 struct safexcel_crypto_priv *priv; 775 struct safexcel_crypto_priv *priv;
776 u64 dma_mask;
777 int i, ret; 776 int i, ret;
778 777
779 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 778 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -802,9 +801,7 @@ static int safexcel_probe(struct platform_device *pdev)
802 return -EPROBE_DEFER; 801 return -EPROBE_DEFER;
803 } 802 }
804 803
805 if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask)) 804 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
806 dma_mask = DMA_BIT_MASK(64);
807 ret = dma_set_mask_and_coherent(dev, dma_mask);
808 if (ret) 805 if (ret)
809 goto err_clk; 806 goto err_clk;
810 807
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 8527a5899a2f..3f819399cd95 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -883,10 +883,7 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
883 if (ret) 883 if (ret)
884 return ret; 884 return ret;
885 885
886 memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE); 886 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
887 memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
888
889 for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
890 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) || 887 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
891 ctx->opad[i] != le32_to_cpu(ostate.state[i])) { 888 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
892 ctx->base.needs_inv = true; 889 ctx->base.needs_inv = true;
@@ -894,6 +891,9 @@ static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
894 } 891 }
895 } 892 }
896 893
894 memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
895 memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
896
897 return 0; 897 return 0;
898} 898}
899 899
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 427cbe012729..dadc4a808df5 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1073,7 +1073,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
1073 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, 1073 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1074 &crypt->icv_rev_aes); 1074 &crypt->icv_rev_aes);
1075 if (unlikely(!req_ctx->hmac_virt)) 1075 if (unlikely(!req_ctx->hmac_virt))
1076 goto free_buf_src; 1076 goto free_buf_dst;
1077 if (!encrypt) { 1077 if (!encrypt) {
1078 scatterwalk_map_and_copy(req_ctx->hmac_virt, 1078 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1079 req->src, cryptlen, authsize, 0); 1079 req->src, cryptlen, authsize, 0);
@@ -1088,10 +1088,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
1088 BUG_ON(qmgr_stat_overflow(SEND_QID)); 1088 BUG_ON(qmgr_stat_overflow(SEND_QID));
1089 return -EINPROGRESS; 1089 return -EINPROGRESS;
1090 1090
1091free_buf_src:
1092 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1093free_buf_dst: 1091free_buf_dst:
1094 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf); 1092 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1093free_buf_src:
1094 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1095 crypt->ctl_flags = CTL_FLAG_UNUSED; 1095 crypt->ctl_flags = CTL_FLAG_UNUSED;
1096 return -ENOMEM; 1096 return -ENOMEM;
1097} 1097}
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
index fdcd9769ffde..688b051750bd 100644
--- a/drivers/dax/device-dax.h
+++ b/drivers/dax/device-dax.h
@@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent,
21 int region_id, struct resource *res, unsigned int align, 21 int region_id, struct resource *res, unsigned int align,
22 void *addr, unsigned long flags); 22 void *addr, unsigned long flags);
23struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, 23struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
24 struct resource *res, int count); 24 int id, struct resource *res, int count);
25#endif /* __DEVICE_DAX_H__ */ 25#endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 12943d19bfc4..e9f3b3e4bbf4 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -529,7 +529,8 @@ static void dev_dax_release(struct device *dev)
529 struct dax_region *dax_region = dev_dax->region; 529 struct dax_region *dax_region = dev_dax->region;
530 struct dax_device *dax_dev = dev_dax->dax_dev; 530 struct dax_device *dax_dev = dev_dax->dax_dev;
531 531
532 ida_simple_remove(&dax_region->ida, dev_dax->id); 532 if (dev_dax->id >= 0)
533 ida_simple_remove(&dax_region->ida, dev_dax->id);
533 dax_region_put(dax_region); 534 dax_region_put(dax_region);
534 put_dax(dax_dev); 535 put_dax(dax_dev);
535 kfree(dev_dax); 536 kfree(dev_dax);
@@ -559,7 +560,7 @@ static void unregister_dev_dax(void *dev)
559} 560}
560 561
561struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, 562struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
562 struct resource *res, int count) 563 int id, struct resource *res, int count)
563{ 564{
564 struct device *parent = dax_region->dev; 565 struct device *parent = dax_region->dev;
565 struct dax_device *dax_dev; 566 struct dax_device *dax_dev;
@@ -567,7 +568,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
567 struct inode *inode; 568 struct inode *inode;
568 struct device *dev; 569 struct device *dev;
569 struct cdev *cdev; 570 struct cdev *cdev;
570 int rc = 0, i; 571 int rc, i;
572
573 if (!count)
574 return ERR_PTR(-EINVAL);
571 575
572 dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL); 576 dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL);
573 if (!dev_dax) 577 if (!dev_dax)
@@ -587,10 +591,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
587 if (i < count) 591 if (i < count)
588 goto err_id; 592 goto err_id;
589 593
590 dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); 594 if (id < 0) {
591 if (dev_dax->id < 0) { 595 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
592 rc = dev_dax->id; 596 dev_dax->id = id;
593 goto err_id; 597 if (id < 0) {
598 rc = id;
599 goto err_id;
600 }
601 } else {
602 /* region provider owns @id lifetime */
603 dev_dax->id = -1;
594 } 604 }
595 605
596 /* 606 /*
@@ -598,8 +608,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
598 * device outside of mmap of the resulting character device. 608 * device outside of mmap of the resulting character device.
599 */ 609 */
600 dax_dev = alloc_dax(dev_dax, NULL, NULL); 610 dax_dev = alloc_dax(dev_dax, NULL, NULL);
601 if (!dax_dev) 611 if (!dax_dev) {
612 rc = -ENOMEM;
602 goto err_dax; 613 goto err_dax;
614 }
603 615
604 /* from here on we're committed to teardown via dax_dev_release() */ 616 /* from here on we're committed to teardown via dax_dev_release() */
605 dev = &dev_dax->dev; 617 dev = &dev_dax->dev;
@@ -620,7 +632,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
620 dev->parent = parent; 632 dev->parent = parent;
621 dev->groups = dax_attribute_groups; 633 dev->groups = dax_attribute_groups;
622 dev->release = dev_dax_release; 634 dev->release = dev_dax_release;
623 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); 635 dev_set_name(dev, "dax%d.%d", dax_region->id, id);
624 636
625 rc = cdev_device_add(cdev, dev); 637 rc = cdev_device_add(cdev, dev);
626 if (rc) { 638 if (rc) {
@@ -636,7 +648,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
636 return dev_dax; 648 return dev_dax;
637 649
638 err_dax: 650 err_dax:
639 ida_simple_remove(&dax_region->ida, dev_dax->id); 651 if (dev_dax->id >= 0)
652 ida_simple_remove(&dax_region->ida, dev_dax->id);
640 err_id: 653 err_id:
641 kfree(dev_dax); 654 kfree(dev_dax);
642 655
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 9f2a0b4fd801..8d8c852ba8f2 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data)
58 58
59static int dax_pmem_probe(struct device *dev) 59static int dax_pmem_probe(struct device *dev)
60{ 60{
61 int rc;
62 void *addr; 61 void *addr;
63 struct resource res; 62 struct resource res;
63 int rc, id, region_id;
64 struct nd_pfn_sb *pfn_sb; 64 struct nd_pfn_sb *pfn_sb;
65 struct dev_dax *dev_dax; 65 struct dev_dax *dev_dax;
66 struct dax_pmem *dax_pmem; 66 struct dax_pmem *dax_pmem;
67 struct nd_region *nd_region;
68 struct nd_namespace_io *nsio; 67 struct nd_namespace_io *nsio;
69 struct dax_region *dax_region; 68 struct dax_region *dax_region;
70 struct nd_namespace_common *ndns; 69 struct nd_namespace_common *ndns;
@@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev)
123 /* adjust the dax_region resource to the start of data */ 122 /* adjust the dax_region resource to the start of data */
124 res.start += le64_to_cpu(pfn_sb->dataoff); 123 res.start += le64_to_cpu(pfn_sb->dataoff);
125 124
126 nd_region = to_nd_region(dev->parent); 125 rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
127 dax_region = alloc_dax_region(dev, nd_region->id, &res, 126 if (rc != 2)
127 return -EINVAL;
128
129 dax_region = alloc_dax_region(dev, region_id, &res,
128 le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); 130 le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
129 if (!dax_region) 131 if (!dax_region)
130 return -ENOMEM; 132 return -ENOMEM;
131 133
132 /* TODO: support for subdividing a dax region... */ 134 /* TODO: support for subdividing a dax region... */
133 dev_dax = devm_create_dev_dax(dax_region, &res, 1); 135 dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
134 136
135 /* child dev_dax instances now own the lifetime of the dax_region */ 137 /* child dev_dax instances now own the lifetime of the dax_region */
136 dax_region_put(dax_region); 138 dax_region_put(dax_region);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index ce9e563e6e1d..938eb4868f7f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -278,6 +278,12 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc)
278} 278}
279EXPORT_SYMBOL_GPL(dax_write_cache); 279EXPORT_SYMBOL_GPL(dax_write_cache);
280 280
281bool dax_write_cache_enabled(struct dax_device *dax_dev)
282{
283 return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags);
284}
285EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
286
281bool dax_alive(struct dax_device *dax_dev) 287bool dax_alive(struct dax_device *dax_dev)
282{ 288{
283 lockdep_assert_held(&dax_srcu); 289 lockdep_assert_held(&dax_srcu);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 57da14c15987..56e0a0e1b600 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence)
75 if (WARN_ON(!fence)) 75 if (WARN_ON(!fence))
76 return -EINVAL; 76 return -EINVAL;
77 77
78 if (!ktime_to_ns(fence->timestamp)) {
79 fence->timestamp = ktime_get();
80 smp_mb__before_atomic();
81 }
82
83 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 78 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
84 ret = -EINVAL; 79 ret = -EINVAL;
85 80
@@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence)
87 * we might have raced with the unlocked dma_fence_signal, 82 * we might have raced with the unlocked dma_fence_signal,
88 * still run through all callbacks 83 * still run through all callbacks
89 */ 84 */
90 } else 85 } else {
86 fence->timestamp = ktime_get();
87 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
91 trace_dma_fence_signaled(fence); 88 trace_dma_fence_signaled(fence);
89 }
92 90
93 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 91 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
94 list_del_init(&cur->node); 92 list_del_init(&cur->node);
@@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence)
115 if (!fence) 113 if (!fence)
116 return -EINVAL; 114 return -EINVAL;
117 115
118 if (!ktime_to_ns(fence->timestamp)) {
119 fence->timestamp = ktime_get();
120 smp_mb__before_atomic();
121 }
122
123 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 116 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
124 return -EINVAL; 117 return -EINVAL;
125 118
119 fence->timestamp = ktime_get();
120 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
126 trace_dma_fence_signaled(fence); 121 trace_dma_fence_signaled(fence);
127 122
128 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { 123 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 82a6e7f6d37f..59a3b2f8ee91 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s,
84 show ? "_" : "", 84 show ? "_" : "",
85 sync_status_str(status)); 85 sync_status_str(status));
86 86
87 if (status) { 87 if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
88 struct timespec64 ts64 = 88 struct timespec64 ts64 =
89 ktime_to_timespec64(fence->timestamp); 89 ktime_to_timespec64(fence->timestamp);
90 90
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index 545e2c5c4815..66fb40d0ebdb 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
304{ 304{
305 struct sync_file *sync_file = file->private_data; 305 struct sync_file *sync_file = file->private_data;
306 306
307 if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) 307 if (test_bit(POLL_ENABLED, &sync_file->flags))
308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb); 308 dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
309 dma_fence_put(sync_file->fence); 309 dma_fence_put(sync_file->fence);
310 kfree(sync_file); 310 kfree(sync_file);
@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
318 318
319 poll_wait(file, &sync_file->wq, wait); 319 poll_wait(file, &sync_file->wq, wait);
320 320
321 if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { 321 if (list_empty(&sync_file->cb.node) &&
322 !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
322 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, 323 if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
323 fence_check_cb_func) < 0) 324 fence_check_cb_func) < 0)
324 wake_up_all(&sync_file->wq); 325 wake_up_all(&sync_file->wq);
@@ -391,7 +392,13 @@ static void sync_fill_fence_info(struct dma_fence *fence,
391 sizeof(info->driver_name)); 392 sizeof(info->driver_name));
392 393
393 info->status = dma_fence_get_status(fence); 394 info->status = dma_fence_get_status(fence);
394 info->timestamp_ns = ktime_to_ns(fence->timestamp); 395 while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
396 !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
397 cpu_relax();
398 info->timestamp_ns =
399 test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
400 ktime_to_ns(fence->timestamp) :
401 ktime_set(0, 0);
395} 402}
396 403
397static long sync_file_ioctl_fence_info(struct sync_file *sync_file, 404static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b10cbaa82ff5..b26256f23d67 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
717 tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i); 717 tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
718 718
719 tdc->irq = of_irq_get(pdev->dev.of_node, i); 719 tdc->irq = of_irq_get(pdev->dev.of_node, i);
720 if (tdc->irq < 0) { 720 if (tdc->irq <= 0) {
721 ret = tdc->irq; 721 ret = tdc->irq ?: -ENXIO;
722 goto irq_dispose; 722 goto irq_dispose;
723 } 723 }
724 724
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index a485864cb512..06432d84cbf8 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -532,7 +532,7 @@ static inline uint32_t fsi_smode_sid(int x)
532 return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT; 532 return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT;
533} 533}
534 534
535static const uint32_t fsi_slave_smode(int id) 535static uint32_t fsi_slave_smode(int id)
536{ 536{
537 return FSI_SMODE_WSC | FSI_SMODE_ECRC 537 return FSI_SMODE_WSC | FSI_SMODE_ECRC
538 | fsi_smode_sid(id) 538 | fsi_smode_sid(id)
@@ -883,17 +883,16 @@ struct bus_type fsi_bus_type = {
883}; 883};
884EXPORT_SYMBOL_GPL(fsi_bus_type); 884EXPORT_SYMBOL_GPL(fsi_bus_type);
885 885
886static int fsi_init(void) 886static int __init fsi_init(void)
887{ 887{
888 return bus_register(&fsi_bus_type); 888 return bus_register(&fsi_bus_type);
889} 889}
890postcore_initcall(fsi_init);
890 891
891static void fsi_exit(void) 892static void fsi_exit(void)
892{ 893{
893 bus_unregister(&fsi_bus_type); 894 bus_unregister(&fsi_bus_type);
894} 895}
895
896module_init(fsi_init);
897module_exit(fsi_exit); 896module_exit(fsi_exit);
898module_param(discard_errors, int, 0664); 897module_param(discard_errors, int, 0664);
899MODULE_LICENSE("GPL"); 898MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f235eae04c16..461d6fc3688b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -504,6 +504,7 @@ config GPIO_XGENE_SB
504 depends on ARCH_XGENE && OF_GPIO 504 depends on ARCH_XGENE && OF_GPIO
505 select GPIO_GENERIC 505 select GPIO_GENERIC
506 select GPIOLIB_IRQCHIP 506 select GPIOLIB_IRQCHIP
507 select IRQ_DOMAIN_HIERARCHY
507 help 508 help
508 This driver supports the GPIO block within the APM X-Gene 509 This driver supports the GPIO block within the APM X-Gene
509 Standby Domain. Say yes here to enable the GPIO functionality. 510 Standby Domain. Say yes here to enable the GPIO functionality.
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index fb8d304cfa17..0ecd2369c2ca 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -132,7 +132,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
132 if (!p) 132 if (!p)
133 return -ENOMEM; 133 return -ENOMEM;
134 134
135 ret = device_property_read_u32(&pdev->dev, "linux,first-pin", 135 ret = device_property_read_u32(&pdev->dev, "exar,first-pin",
136 &first_pin); 136 &first_pin);
137 if (ret) 137 if (ret)
138 return ret; 138 return ret;
diff --git a/drivers/gpio/gpio-lp87565.c b/drivers/gpio/gpio-lp87565.c
index 6313c50bb91b..a121c8f10610 100644
--- a/drivers/gpio/gpio-lp87565.c
+++ b/drivers/gpio/gpio-lp87565.c
@@ -26,6 +26,27 @@ struct lp87565_gpio {
26 struct regmap *map; 26 struct regmap *map;
27}; 27};
28 28
29static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
30{
31 struct lp87565_gpio *gpio = gpiochip_get_data(chip);
32 int ret, val;
33
34 ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val);
35 if (ret < 0)
36 return ret;
37
38 return !!(val & BIT(offset));
39}
40
41static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
42 int value)
43{
44 struct lp87565_gpio *gpio = gpiochip_get_data(chip);
45
46 regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
47 BIT(offset), value ? BIT(offset) : 0);
48}
49
29static int lp87565_gpio_get_direction(struct gpio_chip *chip, 50static int lp87565_gpio_get_direction(struct gpio_chip *chip,
30 unsigned int offset) 51 unsigned int offset)
31{ 52{
@@ -54,30 +75,11 @@ static int lp87565_gpio_direction_output(struct gpio_chip *chip,
54{ 75{
55 struct lp87565_gpio *gpio = gpiochip_get_data(chip); 76 struct lp87565_gpio *gpio = gpiochip_get_data(chip);
56 77
78 lp87565_gpio_set(chip, offset, value);
79
57 return regmap_update_bits(gpio->map, 80 return regmap_update_bits(gpio->map,
58 LP87565_REG_GPIO_CONFIG, 81 LP87565_REG_GPIO_CONFIG,
59 BIT(offset), !value ? BIT(offset) : 0); 82 BIT(offset), BIT(offset));
60}
61
62static int lp87565_gpio_get(struct gpio_chip *chip, unsigned int offset)
63{
64 struct lp87565_gpio *gpio = gpiochip_get_data(chip);
65 int ret, val;
66
67 ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val);
68 if (ret < 0)
69 return ret;
70
71 return !!(val & BIT(offset));
72}
73
74static void lp87565_gpio_set(struct gpio_chip *chip, unsigned int offset,
75 int value)
76{
77 struct lp87565_gpio *gpio = gpiochip_get_data(chip);
78
79 regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT,
80 BIT(offset), value ? BIT(offset) : 0);
81} 83}
82 84
83static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset) 85static int lp87565_gpio_request(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index e338c3743562..45c65f805fd6 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
557 edge_cause = mvebu_gpio_read_edge_cause(mvchip); 557 edge_cause = mvebu_gpio_read_edge_cause(mvchip);
558 edge_mask = mvebu_gpio_read_edge_mask(mvchip); 558 edge_mask = mvebu_gpio_read_edge_mask(mvchip);
559 559
560 cause = (data_in ^ level_mask) | (edge_cause & edge_mask); 560 cause = (data_in & level_mask) | (edge_cause & edge_mask);
561 561
562 for (i = 0; i < mvchip->chip.ngpio; i++) { 562 for (i = 0; i < mvchip->chip.ngpio; i++) {
563 int irq; 563 int irq;
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 3abea3f0b307..92692251ade1 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -424,6 +424,9 @@ static int mxc_gpio_probe(struct platform_device *pdev)
424 return PTR_ERR(port->base); 424 return PTR_ERR(port->base);
425 425
426 port->irq_high = platform_get_irq(pdev, 1); 426 port->irq_high = platform_get_irq(pdev, 1);
427 if (port->irq_high < 0)
428 port->irq_high = 0;
429
427 port->irq = platform_get_irq(pdev, 0); 430 port->irq = platform_get_irq(pdev, 0);
428 if (port->irq < 0) 431 if (port->irq < 0)
429 return port->irq; 432 return port->irq;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 88529d3c06c9..506c6a67c5fc 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -360,7 +360,7 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
360{ 360{
361 int port; 361 int port;
362 int pin; 362 int pin;
363 int unmasked = 0; 363 bool unmasked = false;
364 int gpio; 364 int gpio;
365 u32 lvl; 365 u32 lvl;
366 unsigned long sta; 366 unsigned long sta;
@@ -384,8 +384,8 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
384 * before executing the handler so that we don't 384 * before executing the handler so that we don't
385 * miss edges 385 * miss edges
386 */ 386 */
387 if (lvl & (0x100 << pin)) { 387 if (!unmasked && lvl & (0x100 << pin)) {
388 unmasked = 1; 388 unmasked = true;
389 chained_irq_exit(chip, desc); 389 chained_irq_exit(chip, desc);
390 } 390 }
391 391
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 16fe9742597b..fc80add5fedb 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -2,6 +2,7 @@
2#include <linux/mutex.h> 2#include <linux/mutex.h>
3#include <linux/device.h> 3#include <linux/device.h>
4#include <linux/sysfs.h> 4#include <linux/sysfs.h>
5#include <linux/gpio.h>
5#include <linux/gpio/consumer.h> 6#include <linux/gpio/consumer.h>
6#include <linux/gpio/driver.h> 7#include <linux/gpio/driver.h>
7#include <linux/interrupt.h> 8#include <linux/interrupt.h>
@@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = {
432}; 433};
433ATTRIBUTE_GROUPS(gpiochip); 434ATTRIBUTE_GROUPS(gpiochip);
434 435
436static struct gpio_desc *gpio_to_valid_desc(int gpio)
437{
438 return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL;
439}
440
435/* 441/*
436 * /sys/class/gpio/export ... write-only 442 * /sys/class/gpio/export ... write-only
437 * integer N ... number of GPIO to export (full access) 443 * integer N ... number of GPIO to export (full access)
@@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class,
450 if (status < 0) 456 if (status < 0)
451 goto done; 457 goto done;
452 458
453 desc = gpio_to_desc(gpio); 459 desc = gpio_to_valid_desc(gpio);
454 /* reject invalid GPIOs */ 460 /* reject invalid GPIOs */
455 if (!desc) { 461 if (!desc) {
456 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); 462 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
@@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class,
493 if (status < 0) 499 if (status < 0)
494 goto done; 500 goto done;
495 501
496 desc = gpio_to_desc(gpio); 502 desc = gpio_to_valid_desc(gpio);
497 /* reject bogus commands (gpio_unexport ignores them) */ 503 /* reject bogus commands (gpio_unexport ignores them) */
498 if (!desc) { 504 if (!desc) {
499 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio); 505 pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 9568708a550b..cd003b74512f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -704,24 +704,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
704{ 704{
705 struct lineevent_state *le = p; 705 struct lineevent_state *le = p;
706 struct gpioevent_data ge; 706 struct gpioevent_data ge;
707 int ret; 707 int ret, level;
708 708
709 ge.timestamp = ktime_get_real_ns(); 709 ge.timestamp = ktime_get_real_ns();
710 level = gpiod_get_value_cansleep(le->desc);
710 711
711 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 712 if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
712 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 713 && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
713 int level = gpiod_get_value_cansleep(le->desc);
714
715 if (level) 714 if (level)
716 /* Emit low-to-high event */ 715 /* Emit low-to-high event */
717 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 716 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
718 else 717 else
719 /* Emit high-to-low event */ 718 /* Emit high-to-low event */
720 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 719 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
721 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) { 720 } else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
722 /* Emit low-to-high event */ 721 /* Emit low-to-high event */
723 ge.id = GPIOEVENT_EVENT_RISING_EDGE; 722 ge.id = GPIOEVENT_EVENT_RISING_EDGE;
724 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 723 } else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
725 /* Emit high-to-low event */ 724 /* Emit high-to-low event */
726 ge.id = GPIOEVENT_EVENT_FALLING_EDGE; 725 ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
727 } else { 726 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 5f8ada1d872b..37971d9402e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
101 if (adev->kfd) { 101 if (adev->kfd) {
102 struct kgd2kfd_shared_resources gpu_resources = { 102 struct kgd2kfd_shared_resources gpu_resources = {
103 .compute_vmid_bitmap = 0xFF00, 103 .compute_vmid_bitmap = 0xFF00,
104 .num_mec = adev->gfx.mec.num_mec,
105 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, 104 .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
106 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe 105 .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe
107 }; 106 };
@@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
122 121
123 /* According to linux/bitmap.h we shouldn't use bitmap_clear if 122 /* According to linux/bitmap.h we shouldn't use bitmap_clear if
124 * nbits is not compile time constant */ 123 * nbits is not compile time constant */
125 last_valid_bit = adev->gfx.mec.num_mec 124 last_valid_bit = 1 /* only first MEC can have compute queues */
126 * adev->gfx.mec.num_pipe_per_mec 125 * adev->gfx.mec.num_pipe_per_mec
127 * adev->gfx.mec.num_queue_per_pipe; 126 * adev->gfx.mec.num_queue_per_pipe;
128 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) 127 for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index f621ee115c98..5e771bc11b00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -198,12 +198,16 @@ amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
198 result = idr_find(&fpriv->bo_list_handles, id); 198 result = idr_find(&fpriv->bo_list_handles, id);
199 199
200 if (result) { 200 if (result) {
201 if (kref_get_unless_zero(&result->refcount)) 201 if (kref_get_unless_zero(&result->refcount)) {
202 rcu_read_unlock();
202 mutex_lock(&result->lock); 203 mutex_lock(&result->lock);
203 else 204 } else {
205 rcu_read_unlock();
204 result = NULL; 206 result = NULL;
207 }
208 } else {
209 rcu_read_unlock();
205 } 210 }
206 rcu_read_unlock();
207 211
208 return result; 212 return result;
209} 213}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 38f739fb727b..6558a3ed57a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -359,7 +359,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
359 head = bo->mn_list.next; 359 head = bo->mn_list.next;
360 360
361 bo->mn = NULL; 361 bo->mn = NULL;
362 list_del(&bo->mn_list); 362 list_del_init(&bo->mn_list);
363 363
364 if (list_empty(head)) { 364 if (list_empty(head)) {
365 struct amdgpu_mn_node *node; 365 struct amdgpu_mn_node *node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6899180b265..c586f44312f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
244 struct dma_fence *f = e->fence; 244 struct dma_fence *f = e->fence;
245 struct amd_sched_fence *s_fence = to_amd_sched_fence(f); 245 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
246 246
247 if (dma_fence_is_signaled(f)) {
248 hash_del(&e->node);
249 dma_fence_put(f);
250 kmem_cache_free(amdgpu_sync_slab, e);
251 continue;
252 }
247 if (ring && s_fence) { 253 if (ring && s_fence) {
248 /* For fences from the same ring it is sufficient 254 /* For fences from the same ring it is sufficient
249 * when they are scheduled. 255 * when they are scheduled.
@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
256 } 262 }
257 } 263 }
258 264
259 if (dma_fence_is_signaled(f)) {
260 hash_del(&e->node);
261 dma_fence_put(f);
262 kmem_cache_free(amdgpu_sync_slab, e);
263 continue;
264 }
265
266 return f; 265 return f;
267 } 266 }
268 267
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 18fd01f3e4b2..003a131bad47 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -1,24 +1,25 @@
1
2/* 1/*
3*************************************************************************************************** 2 * Copyright 2017 Advanced Micro Devices, Inc.
4* 3 *
5* Trade secret of Advanced Micro Devices, Inc. 4 * Permission is hereby granted, free of charge, to any person obtaining a
6* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished) 5 * copy of this software and associated documentation files (the "Software"),
7* 6 * to deal in the Software without restriction, including without limitation
8* All rights reserved. This notice is intended as a precaution against inadvertent publication and 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9* does not imply publication or any waiver of confidentiality. The year included in the foregoing 8 * and/or sell copies of the Software, and to permit persons to whom the
10* notice is the year of creation of the work. 9 * Software is furnished to do so, subject to the following conditions:
11* 10 *
12*************************************************************************************************** 11 * The above copyright notice and this permission notice shall be included in
13*/ 12 * all copies or substantial portions of the Software.
14/** 13 *
15*************************************************************************************************** 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16* @brief gfx9 Clearstate Definitions 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*************************************************************************************************** 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18* 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19* Do not edit! This is a machine-generated file! 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20* 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21*/ 20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
22 23
23static const unsigned int gfx9_SECT_CONTEXT_def_1[] = 24static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
24{ 25{
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3a0b69b09ed6..c9b9c88231aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1475,21 +1475,23 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1475 1475
1476static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) 1476static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1477{ 1477{
1478 u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1478 u32 data;
1479 1479
1480 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { 1480 if (instance == 0xffffffff)
1481 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 1481 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1482 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1482 else
1483 } else if (se_num == 0xffffffff) { 1483 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1484 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1484
1485 if (se_num == 0xffffffff)
1485 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1486 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1486 } else if (sh_num == 0xffffffff) { 1487 else
1487 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1488 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1488 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1489 } else { 1489
1490 if (sh_num == 0xffffffff)
1491 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1492 else
1490 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1493 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1491 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1494
1492 }
1493 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); 1495 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1494} 1496}
1495 1497
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index f45fb0f022b3..4267fa417997 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1385,6 +1385,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
1385 amdgpu_program_register_sequence(adev, 1385 amdgpu_program_register_sequence(adev,
1386 pitcairn_mgcg_cgcg_init, 1386 pitcairn_mgcg_cgcg_init,
1387 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); 1387 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
1388 break;
1388 case CHIP_VERDE: 1389 case CHIP_VERDE:
1389 amdgpu_program_register_sequence(adev, 1390 amdgpu_program_register_sequence(adev,
1390 verde_golden_registers, 1391 verde_golden_registers,
@@ -1409,6 +1410,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
1409 amdgpu_program_register_sequence(adev, 1410 amdgpu_program_register_sequence(adev,
1410 oland_mgcg_cgcg_init, 1411 oland_mgcg_cgcg_init,
1411 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); 1412 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
1413 break;
1412 case CHIP_HAINAN: 1414 case CHIP_HAINAN:
1413 amdgpu_program_register_sequence(adev, 1415 amdgpu_program_register_sequence(adev,
1414 hainan_golden_registers, 1416 hainan_golden_registers,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 88187bfc5ea3..3f95f7cb4019 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
226 226
227 kfd->shared_resources = *gpu_resources; 227 kfd->shared_resources = *gpu_resources;
228 228
229 /* We only use the first MEC */
230 if (kfd->shared_resources.num_mec > 1)
231 kfd->shared_resources.num_mec = 1;
232
233 /* calculate max size of mqds needed for queues */ 229 /* calculate max size of mqds needed for queues */
234 size = max_num_of_queues_per_device * 230 size = max_num_of_queues_per_device *
235 kfd->device_info->mqd_size_aligned; 231 kfd->device_info->mqd_size_aligned;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 955aa304ff48..602769ced3bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
77 return false; 77 return false;
78} 78}
79 79
80unsigned int get_mec_num(struct device_queue_manager *dqm)
81{
82 BUG_ON(!dqm || !dqm->dev);
83
84 return dqm->dev->shared_resources.num_mec;
85}
86
87unsigned int get_queues_num(struct device_queue_manager *dqm) 80unsigned int get_queues_num(struct device_queue_manager *dqm)
88{ 81{
89 BUG_ON(!dqm || !dqm->dev); 82 BUG_ON(!dqm || !dqm->dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 66b9615bc3c1..faf820a06400 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
180void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops); 180void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
181void program_sh_mem_settings(struct device_queue_manager *dqm, 181void program_sh_mem_settings(struct device_queue_manager *dqm,
182 struct qcm_process_device *qpd); 182 struct qcm_process_device *qpd);
183unsigned int get_mec_num(struct device_queue_manager *dqm);
184unsigned int get_queues_num(struct device_queue_manager *dqm); 183unsigned int get_queues_num(struct device_queue_manager *dqm);
185unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); 184unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
186unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); 185unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 91ef1484b3bb..36f376677a53 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources {
63 /* Bit n == 1 means VMID n is available for KFD. */ 63 /* Bit n == 1 means VMID n is available for KFD. */
64 unsigned int compute_vmid_bitmap; 64 unsigned int compute_vmid_bitmap;
65 65
66 /* number of mec available from the hardware */
67 uint32_t num_mec;
68
69 /* number of pipes per mec */ 66 /* number of pipes per mec */
70 uint32_t num_pipe_per_mec; 67 uint32_t num_pipe_per_mec;
71 68
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index d6f097f44b6c..197174e562d2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2128,15 +2128,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2128 pp_table->AvfsGbCksOff.m2_shift = 12; 2128 pp_table->AvfsGbCksOff.m2_shift = 12;
2129 pp_table->AvfsGbCksOff.b_shift = 0; 2129 pp_table->AvfsGbCksOff.b_shift = 0;
2130 2130
2131 for (i = 0; i < dep_table->count; i++) { 2131 for (i = 0; i < dep_table->count; i++)
2132 if (dep_table->entries[i].sclk_offset == 0) 2132 pp_table->StaticVoltageOffsetVid[i] =
2133 pp_table->StaticVoltageOffsetVid[i] = 248; 2133 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2134 else
2135 pp_table->StaticVoltageOffsetVid[i] =
2136 (uint8_t)(dep_table->entries[i].sclk_offset *
2137 VOLTAGE_VID_OFFSET_SCALE2 /
2138 VOLTAGE_VID_OFFSET_SCALE1);
2139 }
2140 2134
2141 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2135 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2142 data->disp_clk_quad_eqn_a) && 2136 data->disp_clk_quad_eqn_a) &&
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 5c26488e7a2d..0529e500c534 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
1255 1255
1256 /* port@2 is the output port */ 1256 /* port@2 is the output port */
1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); 1257 ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
1258 if (ret) 1258 if (ret && ret != -ENODEV)
1259 return ret; 1259 return ret;
1260 1260
1261 /* Shut down GPIO is optional */ 1261 /* Shut down GPIO is optional */
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c0f336d23f9c..aed25c4183bb 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1655 if (config->funcs->atomic_check) 1655 if (config->funcs->atomic_check)
1656 ret = config->funcs->atomic_check(state->dev, state); 1656 ret = config->funcs->atomic_check(state->dev, state);
1657 1657
1658 if (ret)
1659 return ret;
1660
1658 if (!state->allow_modeset) { 1661 if (!state->allow_modeset) {
1659 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 1662 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1660 if (drm_atomic_crtc_needs_modeset(crtc_state)) { 1663 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1665 } 1668 }
1666 } 1669 }
1667 1670
1668 return ret; 1671 return 0;
1669} 1672}
1670EXPORT_SYMBOL(drm_atomic_check_only); 1673EXPORT_SYMBOL(drm_atomic_check_only);
1671 1674
@@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
2167 struct drm_atomic_state *state; 2170 struct drm_atomic_state *state;
2168 struct drm_modeset_acquire_ctx ctx; 2171 struct drm_modeset_acquire_ctx ctx;
2169 struct drm_plane *plane; 2172 struct drm_plane *plane;
2170 struct drm_out_fence_state *fence_state = NULL; 2173 struct drm_out_fence_state *fence_state;
2171 unsigned plane_mask; 2174 unsigned plane_mask;
2172 int ret = 0; 2175 int ret = 0;
2173 unsigned int i, j, num_fences = 0; 2176 unsigned int i, j, num_fences;
2174 2177
2175 /* disallow for drivers not supporting atomic: */ 2178 /* disallow for drivers not supporting atomic: */
2176 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2179 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
@@ -2211,6 +2214,8 @@ retry:
2211 plane_mask = 0; 2214 plane_mask = 0;
2212 copied_objs = 0; 2215 copied_objs = 0;
2213 copied_props = 0; 2216 copied_props = 0;
2217 fence_state = NULL;
2218 num_fences = 0;
2214 2219
2215 for (i = 0; i < arg->count_objs; i++) { 2220 for (i = 0; i < arg->count_objs; i++) {
2216 uint32_t obj_id, count_props; 2221 uint32_t obj_id, count_props;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 213fb837e1c4..08af8d6b844b 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -544,7 +544,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
544 DP_DETAILED_CAP_INFO_AVAILABLE; 544 DP_DETAILED_CAP_INFO_AVAILABLE;
545 int clk; 545 int clk;
546 int bpc; 546 int bpc;
547 char id[6]; 547 char id[7];
548 int len; 548 int len;
549 uint8_t rev[2]; 549 uint8_t rev[2];
550 int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; 550 int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
@@ -583,6 +583,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
583 seq_puts(m, "\t\tType: N/A\n"); 583 seq_puts(m, "\t\tType: N/A\n");
584 } 584 }
585 585
586 memset(id, 0, sizeof(id));
586 drm_dp_downstream_id(aux, id); 587 drm_dp_downstream_id(aux, id);
587 seq_printf(m, "\t\tID: %s\n", id); 588 seq_printf(m, "\t\tID: %s\n", id);
588 589
@@ -591,7 +592,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
591 seq_printf(m, "\t\tHW: %d.%d\n", 592 seq_printf(m, "\t\tHW: %d.%d\n",
592 (rev[0] & 0xf0) >> 4, rev[0] & 0xf); 593 (rev[0] & 0xf0) >> 4, rev[0] & 0xf);
593 594
594 len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2); 595 len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2);
595 if (len > 0) 596 if (len > 0)
596 seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); 597 seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]);
597 598
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index bfd237c15e76..ae5f06895562 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
330 return false; 330 return false;
331 } 331 }
332 332
333 /*
334 * ignore out-of-order messages or messages that are part of a
335 * failed transaction
336 */
337 if (!recv_hdr.somt && !msg->have_somt)
338 return false;
339
333 /* get length contained in this portion */ 340 /* get length contained in this portion */
334 msg->curchunk_len = recv_hdr.msg_len; 341 msg->curchunk_len = recv_hdr.msg_len;
335 msg->curchunk_hdrlen = hdrlen; 342 msg->curchunk_hdrlen = hdrlen;
@@ -2164,7 +2171,7 @@ out_unlock:
2164} 2171}
2165EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 2172EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2166 2173
2167static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 2174static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2168{ 2175{
2169 int len; 2176 int len;
2170 u8 replyblock[32]; 2177 u8 replyblock[32];
@@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2179 replyblock, len); 2186 replyblock, len);
2180 if (ret != len) { 2187 if (ret != len) {
2181 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); 2188 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2182 return; 2189 return false;
2183 } 2190 }
2184 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); 2191 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2185 if (!ret) { 2192 if (!ret) {
2186 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); 2193 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2187 return; 2194 return false;
2188 } 2195 }
2189 replylen = msg->curchunk_len + msg->curchunk_hdrlen; 2196 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2190 2197
@@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2196 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 2203 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2197 replyblock, len); 2204 replyblock, len);
2198 if (ret != len) { 2205 if (ret != len) {
2199 DRM_DEBUG_KMS("failed to read a chunk\n"); 2206 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2207 len, ret);
2208 return false;
2200 } 2209 }
2210
2201 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); 2211 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2202 if (ret == false) 2212 if (!ret) {
2203 DRM_DEBUG_KMS("failed to build sideband msg\n"); 2213 DRM_DEBUG_KMS("failed to build sideband msg\n");
2214 return false;
2215 }
2216
2204 curreply += len; 2217 curreply += len;
2205 replylen -= len; 2218 replylen -= len;
2206 } 2219 }
2220 return true;
2207} 2221}
2208 2222
2209static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 2223static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2210{ 2224{
2211 int ret = 0; 2225 int ret = 0;
2212 2226
2213 drm_dp_get_one_sb_msg(mgr, false); 2227 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2228 memset(&mgr->down_rep_recv, 0,
2229 sizeof(struct drm_dp_sideband_msg_rx));
2230 return 0;
2231 }
2214 2232
2215 if (mgr->down_rep_recv.have_eomt) { 2233 if (mgr->down_rep_recv.have_eomt) {
2216 struct drm_dp_sideband_msg_tx *txmsg; 2234 struct drm_dp_sideband_msg_tx *txmsg;
@@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2266static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 2284static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2267{ 2285{
2268 int ret = 0; 2286 int ret = 0;
2269 drm_dp_get_one_sb_msg(mgr, true); 2287
2288 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2289 memset(&mgr->up_req_recv, 0,
2290 sizeof(struct drm_dp_sideband_msg_rx));
2291 return 0;
2292 }
2270 2293
2271 if (mgr->up_req_recv.have_eomt) { 2294 if (mgr->up_req_recv.have_eomt) {
2272 struct drm_dp_sideband_msg_req_body msg; 2295 struct drm_dp_sideband_msg_req_body msg;
@@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2318 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); 2341 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2319 } 2342 }
2320 2343
2321 drm_dp_put_mst_branch_device(mstb); 2344 if (mstb)
2345 drm_dp_put_mst_branch_device(mstb);
2346
2322 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2347 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2323 } 2348 }
2324 return ret; 2349 return ret;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8dc11064253d..cdaac37907b1 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
255 struct drm_gem_object *obj = ptr; 255 struct drm_gem_object *obj = ptr;
256 struct drm_device *dev = obj->dev; 256 struct drm_device *dev = obj->dev;
257 257
258 if (dev->driver->gem_close_object)
259 dev->driver->gem_close_object(obj, file_priv);
260
258 if (drm_core_check_feature(dev, DRIVER_PRIME)) 261 if (drm_core_check_feature(dev, DRIVER_PRIME))
259 drm_gem_remove_prime_handles(obj, file_priv); 262 drm_gem_remove_prime_handles(obj, file_priv);
260 drm_vma_node_revoke(&obj->vma_node, file_priv); 263 drm_vma_node_revoke(&obj->vma_node, file_priv);
261 264
262 if (dev->driver->gem_close_object)
263 dev->driver->gem_close_object(obj, file_priv);
264
265 drm_gem_object_handle_put_unlocked(obj); 265 drm_gem_object_handle_put_unlocked(obj);
266 266
267 return 0; 267 return 0;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 5dc8c4350602..e40c12fabbde 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
601 601
602 crtc = drm_crtc_find(dev, plane_req->crtc_id); 602 crtc = drm_crtc_find(dev, plane_req->crtc_id);
603 if (!crtc) { 603 if (!crtc) {
604 drm_framebuffer_put(fb);
604 DRM_DEBUG_KMS("Unknown crtc ID %d\n", 605 DRM_DEBUG_KMS("Unknown crtc ID %d\n",
605 plane_req->crtc_id); 606 plane_req->crtc_id);
606 return -ENOENT; 607 return -ENOENT;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 5bd93169dac2..6463fc2c736f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
270 if (ret) 270 if (ret)
271 return ret; 271 return ret;
272 272
273 if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { 273 if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
274 DRM_ERROR("relocation %u outside object", i); 274 DRM_ERROR("relocation %u outside object\n", i);
275 return -EINVAL; 275 return -EINVAL;
276 } 276 }
277 277
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 1d185347c64c..305dc3d4ff77 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -75,6 +75,7 @@ config DRM_EXYNOS_DP
75config DRM_EXYNOS_HDMI 75config DRM_EXYNOS_HDMI
76 bool "HDMI" 76 bool "HDMI"
77 depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON 77 depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON
78 select CEC_CORE if CEC_NOTIFIER
78 help 79 help
79 Choose this option if you want to use Exynos HDMI for DRM. 80 Choose this option if you want to use Exynos HDMI for DRM.
80 81
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 35a8dfc93836..242bd50faa26 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -453,7 +453,6 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
453 struct component_match *match; 453 struct component_match *match;
454 454
455 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 455 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
456 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
457 456
458 match = exynos_drm_match_add(&pdev->dev); 457 match = exynos_drm_match_add(&pdev->dev);
459 if (IS_ERR(match)) 458 if (IS_ERR(match))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a11b79596e2f..b6a46d9a016e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1651,8 +1651,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1651 return ret; 1651 return ret;
1652 1652
1653 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); 1653 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0);
1654 if (!dsi->bridge_node)
1655 return -EINVAL;
1656 1654
1657 return 0; 1655 return 0;
1658} 1656}
@@ -1687,9 +1685,11 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1687 return ret; 1685 return ret;
1688 } 1686 }
1689 1687
1690 bridge = of_drm_find_bridge(dsi->bridge_node); 1688 if (dsi->bridge_node) {
1691 if (bridge) 1689 bridge = of_drm_find_bridge(dsi->bridge_node);
1692 drm_bridge_attach(encoder, bridge, NULL); 1690 if (bridge)
1691 drm_bridge_attach(encoder, bridge, NULL);
1692 }
1693 1693
1694 return mipi_dsi_host_register(&dsi->dsi_host); 1694 return mipi_dsi_host_register(&dsi->dsi_host);
1695} 1695}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index d48fd7c918f8..73217c281c9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -145,13 +145,19 @@ static struct drm_framebuffer *
145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, 145exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
146 const struct drm_mode_fb_cmd2 *mode_cmd) 146 const struct drm_mode_fb_cmd2 *mode_cmd)
147{ 147{
148 const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
148 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; 149 struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
149 struct drm_gem_object *obj; 150 struct drm_gem_object *obj;
150 struct drm_framebuffer *fb; 151 struct drm_framebuffer *fb;
151 int i; 152 int i;
152 int ret; 153 int ret;
153 154
154 for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { 155 for (i = 0; i < info->num_planes; i++) {
156 unsigned int height = (i == 0) ? mode_cmd->height :
157 DIV_ROUND_UP(mode_cmd->height, info->vsub);
158 unsigned long size = height * mode_cmd->pitches[i] +
159 mode_cmd->offsets[i];
160
155 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); 161 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
156 if (!obj) { 162 if (!obj) {
157 DRM_ERROR("failed to lookup gem object\n"); 163 DRM_ERROR("failed to lookup gem object\n");
@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
160 } 166 }
161 167
162 exynos_gem[i] = to_exynos_gem(obj); 168 exynos_gem[i] = to_exynos_gem(obj);
169
170 if (size > exynos_gem[i]->size) {
171 i++;
172 ret = -EINVAL;
173 goto err;
174 }
163 } 175 }
164 176
165 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); 177 fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index e45720543a45..16bbee897e0d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -340,16 +340,10 @@ static int exynos_mic_bind(struct device *dev, struct device *master,
340 void *data) 340 void *data)
341{ 341{
342 struct exynos_mic *mic = dev_get_drvdata(dev); 342 struct exynos_mic *mic = dev_get_drvdata(dev);
343 int ret;
344 343
345 mic->bridge.funcs = &mic_bridge_funcs;
346 mic->bridge.of_node = dev->of_node;
347 mic->bridge.driver_private = mic; 344 mic->bridge.driver_private = mic;
348 ret = drm_bridge_add(&mic->bridge);
349 if (ret)
350 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
351 345
352 return ret; 346 return 0;
353} 347}
354 348
355static void exynos_mic_unbind(struct device *dev, struct device *master, 349static void exynos_mic_unbind(struct device *dev, struct device *master,
@@ -365,8 +359,6 @@ static void exynos_mic_unbind(struct device *dev, struct device *master,
365 359
366already_disabled: 360already_disabled:
367 mutex_unlock(&mic_mutex); 361 mutex_unlock(&mic_mutex);
368
369 drm_bridge_remove(&mic->bridge);
370} 362}
371 363
372static const struct component_ops exynos_mic_component_ops = { 364static const struct component_ops exynos_mic_component_ops = {
@@ -461,6 +453,15 @@ static int exynos_mic_probe(struct platform_device *pdev)
461 453
462 platform_set_drvdata(pdev, mic); 454 platform_set_drvdata(pdev, mic);
463 455
456 mic->bridge.funcs = &mic_bridge_funcs;
457 mic->bridge.of_node = dev->of_node;
458
459 ret = drm_bridge_add(&mic->bridge);
460 if (ret) {
461 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
462 return ret;
463 }
464
464 pm_runtime_enable(dev); 465 pm_runtime_enable(dev);
465 466
466 ret = component_add(dev, &exynos_mic_component_ops); 467 ret = component_add(dev, &exynos_mic_component_ops);
@@ -479,8 +480,13 @@ err:
479 480
480static int exynos_mic_remove(struct platform_device *pdev) 481static int exynos_mic_remove(struct platform_device *pdev)
481{ 482{
483 struct exynos_mic *mic = platform_get_drvdata(pdev);
484
482 component_del(&pdev->dev, &exynos_mic_component_ops); 485 component_del(&pdev->dev, &exynos_mic_component_ops);
483 pm_runtime_disable(&pdev->dev); 486 pm_runtime_disable(&pdev->dev);
487
488 drm_bridge_remove(&mic->bridge);
489
484 return 0; 490 return 0;
485} 491}
486 492
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 06bfbe400cf1..d3b69d66736f 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1501,8 +1501,6 @@ static void hdmi_disable(struct drm_encoder *encoder)
1501 */ 1501 */
1502 cancel_delayed_work(&hdata->hotplug_work); 1502 cancel_delayed_work(&hdata->hotplug_work);
1503 cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); 1503 cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID);
1504
1505 hdmiphy_disable(hdata);
1506} 1504}
1507 1505
1508static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { 1506static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
@@ -1676,7 +1674,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
1676 return hdmi_bridge_init(hdata); 1674 return hdmi_bridge_init(hdata);
1677} 1675}
1678 1676
1679static struct of_device_id hdmi_match_types[] = { 1677static const struct of_device_id hdmi_match_types[] = {
1680 { 1678 {
1681 .compatible = "samsung,exynos4210-hdmi", 1679 .compatible = "samsung,exynos4210-hdmi",
1682 .data = &exynos4210_hdmi_driver_data, 1680 .data = &exynos4210_hdmi_driver_data,
@@ -1934,8 +1932,7 @@ static int hdmi_remove(struct platform_device *pdev)
1934 return 0; 1932 return 0;
1935} 1933}
1936 1934
1937#ifdef CONFIG_PM 1935static int __maybe_unused exynos_hdmi_suspend(struct device *dev)
1938static int exynos_hdmi_suspend(struct device *dev)
1939{ 1936{
1940 struct hdmi_context *hdata = dev_get_drvdata(dev); 1937 struct hdmi_context *hdata = dev_get_drvdata(dev);
1941 1938
@@ -1944,7 +1941,7 @@ static int exynos_hdmi_suspend(struct device *dev)
1944 return 0; 1941 return 0;
1945} 1942}
1946 1943
1947static int exynos_hdmi_resume(struct device *dev) 1944static int __maybe_unused exynos_hdmi_resume(struct device *dev)
1948{ 1945{
1949 struct hdmi_context *hdata = dev_get_drvdata(dev); 1946 struct hdmi_context *hdata = dev_get_drvdata(dev);
1950 int ret; 1947 int ret;
@@ -1955,7 +1952,6 @@ static int exynos_hdmi_resume(struct device *dev)
1955 1952
1956 return 0; 1953 return 0;
1957} 1954}
1958#endif
1959 1955
1960static const struct dev_pm_ops exynos_hdmi_pm_ops = { 1956static const struct dev_pm_ops exynos_hdmi_pm_ops = {
1961 SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL) 1957 SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 6bed4f3ffcd6..a998a8dd783c 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1094,28 +1094,28 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
1094 .atomic_check = mixer_atomic_check, 1094 .atomic_check = mixer_atomic_check,
1095}; 1095};
1096 1096
1097static struct mixer_drv_data exynos5420_mxr_drv_data = { 1097static const struct mixer_drv_data exynos5420_mxr_drv_data = {
1098 .version = MXR_VER_128_0_0_184, 1098 .version = MXR_VER_128_0_0_184,
1099 .is_vp_enabled = 0, 1099 .is_vp_enabled = 0,
1100}; 1100};
1101 1101
1102static struct mixer_drv_data exynos5250_mxr_drv_data = { 1102static const struct mixer_drv_data exynos5250_mxr_drv_data = {
1103 .version = MXR_VER_16_0_33_0, 1103 .version = MXR_VER_16_0_33_0,
1104 .is_vp_enabled = 0, 1104 .is_vp_enabled = 0,
1105}; 1105};
1106 1106
1107static struct mixer_drv_data exynos4212_mxr_drv_data = { 1107static const struct mixer_drv_data exynos4212_mxr_drv_data = {
1108 .version = MXR_VER_0_0_0_16, 1108 .version = MXR_VER_0_0_0_16,
1109 .is_vp_enabled = 1, 1109 .is_vp_enabled = 1,
1110}; 1110};
1111 1111
1112static struct mixer_drv_data exynos4210_mxr_drv_data = { 1112static const struct mixer_drv_data exynos4210_mxr_drv_data = {
1113 .version = MXR_VER_0_0_0_16, 1113 .version = MXR_VER_0_0_0_16,
1114 .is_vp_enabled = 1, 1114 .is_vp_enabled = 1,
1115 .has_sclk = 1, 1115 .has_sclk = 1,
1116}; 1116};
1117 1117
1118static struct of_device_id mixer_match_types[] = { 1118static const struct of_device_id mixer_match_types[] = {
1119 { 1119 {
1120 .compatible = "samsung,exynos4210-mixer", 1120 .compatible = "samsung,exynos4210-mixer",
1121 .data = &exynos4210_mxr_drv_data, 1121 .data = &exynos4210_mxr_drv_data,
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 713848c36349..e556a46cd4c2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2714unmap_src: 2714unmap_src:
2715 i915_gem_object_unpin_map(obj); 2715 i915_gem_object_unpin_map(obj);
2716put_obj: 2716put_obj:
2717 i915_gem_object_put(wa_ctx->indirect_ctx.obj); 2717 i915_gem_object_put(obj);
2718 return ret; 2718 return ret;
2719} 2719}
2720 2720
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 2deb05f618fb..7cb0818a13de 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -323,27 +323,27 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
323{ 323{
324 struct intel_gvt_irq *irq = &gvt->irq; 324 struct intel_gvt_irq *irq = &gvt->irq;
325 struct intel_vgpu *vgpu; 325 struct intel_vgpu *vgpu;
326 bool have_enabled_pipe = false;
327 int pipe, id; 326 int pipe, id;
328 327
329 if (WARN_ON(!mutex_is_locked(&gvt->lock))) 328 if (WARN_ON(!mutex_is_locked(&gvt->lock)))
330 return; 329 return;
331 330
332 hrtimer_cancel(&irq->vblank_timer.timer);
333
334 for_each_active_vgpu(gvt, vgpu, id) { 331 for_each_active_vgpu(gvt, vgpu, id) {
335 for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { 332 for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
336 have_enabled_pipe = 333 if (pipe_is_enabled(vgpu, pipe))
337 pipe_is_enabled(vgpu, pipe); 334 goto out;
338 if (have_enabled_pipe)
339 break;
340 } 335 }
341 } 336 }
342 337
343 if (have_enabled_pipe) 338 /* all the pipes are disabled */
344 hrtimer_start(&irq->vblank_timer.timer, 339 hrtimer_cancel(&irq->vblank_timer.timer);
345 ktime_add_ns(ktime_get(), irq->vblank_timer.period), 340 return;
346 HRTIMER_MODE_ABS); 341
342out:
343 hrtimer_start(&irq->vblank_timer.timer,
344 ktime_add_ns(ktime_get(), irq->vblank_timer.period),
345 HRTIMER_MODE_ABS);
346
347} 347}
348 348
349static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) 349static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 700050556242..1648887d3f55 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -46,6 +46,8 @@
46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \ 46#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
47 ((a)->lrca == (b)->lrca)) 47 ((a)->lrca == (b)->lrca))
48 48
49static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
50
49static int context_switch_events[] = { 51static int context_switch_events[] = {
50 [RCS] = RCS_AS_CONTEXT_SWITCH, 52 [RCS] = RCS_AS_CONTEXT_SWITCH,
51 [BCS] = BCS_AS_CONTEXT_SWITCH, 53 [BCS] = BCS_AS_CONTEXT_SWITCH,
@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
499static int complete_execlist_workload(struct intel_vgpu_workload *workload) 501static int complete_execlist_workload(struct intel_vgpu_workload *workload)
500{ 502{
501 struct intel_vgpu *vgpu = workload->vgpu; 503 struct intel_vgpu *vgpu = workload->vgpu;
502 struct intel_vgpu_execlist *execlist = 504 int ring_id = workload->ring_id;
503 &vgpu->execlist[workload->ring_id]; 505 struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
504 struct intel_vgpu_workload *next_workload; 506 struct intel_vgpu_workload *next_workload;
505 struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; 507 struct list_head *next = workload_q_head(vgpu, ring_id)->next;
506 bool lite_restore = false; 508 bool lite_restore = false;
507 int ret; 509 int ret;
508 510
@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
512 release_shadow_batch_buffer(workload); 514 release_shadow_batch_buffer(workload);
513 release_shadow_wa_ctx(&workload->wa_ctx); 515 release_shadow_wa_ctx(&workload->wa_ctx);
514 516
515 if (workload->status || vgpu->resetting) 517 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
518 /* if workload->status is not successful means HW GPU
519 * has occurred GPU hang or something wrong with i915/GVT,
520 * and GVT won't inject context switch interrupt to guest.
521 * So this error is a vGPU hang actually to the guest.
522 * According to this we should emunlate a vGPU hang. If
523 * there are pending workloads which are already submitted
524 * from guest, we should clean them up like HW GPU does.
525 *
526 * if it is in middle of engine resetting, the pending
527 * workloads won't be submitted to HW GPU and will be
528 * cleaned up during the resetting process later, so doing
529 * the workload clean up here doesn't have any impact.
530 **/
531 clean_workloads(vgpu, ENGINE_MASK(ring_id));
516 goto out; 532 goto out;
533 }
517 534
518 if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { 535 if (!list_empty(workload_q_head(vgpu, ring_id))) {
519 struct execlist_ctx_descriptor_format *this_desc, *next_desc; 536 struct execlist_ctx_descriptor_format *this_desc, *next_desc;
520 537
521 next_workload = container_of(next, 538 next_workload = container_of(next,
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 5dad9298b2d5..a26c1705430e 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
72 struct intel_gvt_device_info *info = &gvt->device_info; 72 struct intel_gvt_device_info *info = &gvt->device_info;
73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev; 73 struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
74 struct intel_gvt_mmio_info *e; 74 struct intel_gvt_mmio_info *e;
75 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
76 int num = gvt->mmio.num_mmio_block;
75 struct gvt_firmware_header *h; 77 struct gvt_firmware_header *h;
76 void *firmware; 78 void *firmware;
77 void *p; 79 void *p;
78 unsigned long size, crc32_start; 80 unsigned long size, crc32_start;
79 int i; 81 int i, j;
80 int ret; 82 int ret;
81 83
82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size; 84 size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
105 hash_for_each(gvt->mmio.mmio_info_table, i, e, node) 107 hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
106 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); 108 *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
107 109
110 for (i = 0; i < num; i++, block++) {
111 for (j = 0; j < block->size; j += 4)
112 *(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
113 I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
114 block->offset) + j));
115 }
116
108 memcpy(gvt->firmware.mmio, p, info->mmio_size); 117 memcpy(gvt->firmware.mmio, p, info->mmio_size);
109 118
110 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; 119 crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 3a74e79eac2f..2964a4d01a66 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -149,7 +149,7 @@ struct intel_vgpu {
149 bool active; 149 bool active;
150 bool pv_notified; 150 bool pv_notified;
151 bool failsafe; 151 bool failsafe;
152 bool resetting; 152 unsigned int resetting_eng;
153 void *sched_data; 153 void *sched_data;
154 struct vgpu_sched_ctl sched_ctl; 154 struct vgpu_sched_ctl sched_ctl;
155 155
@@ -195,6 +195,15 @@ struct intel_gvt_fence {
195 unsigned long vgpu_allocated_fence_num; 195 unsigned long vgpu_allocated_fence_num;
196}; 196};
197 197
198/* Special MMIO blocks. */
199struct gvt_mmio_block {
200 unsigned int device;
201 i915_reg_t offset;
202 unsigned int size;
203 gvt_mmio_func read;
204 gvt_mmio_func write;
205};
206
198#define INTEL_GVT_MMIO_HASH_BITS 11 207#define INTEL_GVT_MMIO_HASH_BITS 11
199 208
200struct intel_gvt_mmio { 209struct intel_gvt_mmio {
@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
214/* This reg could be accessed by unaligned address */ 223/* This reg could be accessed by unaligned address */
215#define F_UNALIGN (1 << 6) 224#define F_UNALIGN (1 << 6)
216 225
226 struct gvt_mmio_block *mmio_block;
227 unsigned int num_mmio_block;
228
217 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 229 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
218 unsigned int num_tracked_mmio; 230 unsigned int num_tracked_mmio;
219}; 231};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 17febe830ff6..feed9921b3b3 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
2857 return 0; 2857 return 0;
2858} 2858}
2859 2859
2860/* Special MMIO blocks. */
2861static struct gvt_mmio_block {
2862 unsigned int device;
2863 i915_reg_t offset;
2864 unsigned int size;
2865 gvt_mmio_func read;
2866 gvt_mmio_func write;
2867} gvt_mmio_blocks[] = {
2868 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2869 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2870 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2871 pvinfo_mmio_read, pvinfo_mmio_write},
2872 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2873 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2874 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2875};
2876
2877static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, 2860static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
2878 unsigned int offset) 2861 unsigned int offset)
2879{ 2862{
2880 unsigned long device = intel_gvt_get_device_type(gvt); 2863 unsigned long device = intel_gvt_get_device_type(gvt);
2881 struct gvt_mmio_block *block = gvt_mmio_blocks; 2864 struct gvt_mmio_block *block = gvt->mmio.mmio_block;
2865 int num = gvt->mmio.num_mmio_block;
2882 int i; 2866 int i;
2883 2867
2884 for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { 2868 for (i = 0; i < num; i++, block++) {
2885 if (!(device & block->device)) 2869 if (!(device & block->device))
2886 continue; 2870 continue;
2887 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && 2871 if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
2912 gvt->mmio.mmio_attribute = NULL; 2896 gvt->mmio.mmio_attribute = NULL;
2913} 2897}
2914 2898
2899/* Special MMIO blocks. */
2900static struct gvt_mmio_block mmio_blocks[] = {
2901 {D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
2902 {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
2903 {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
2904 pvinfo_mmio_read, pvinfo_mmio_write},
2905 {D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
2906 {D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
2907 {D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
2908};
2909
2915/** 2910/**
2916 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device 2911 * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
2917 * @gvt: GVT device 2912 * @gvt: GVT device
@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
2951 goto err; 2946 goto err;
2952 } 2947 }
2953 2948
2949 gvt->mmio.mmio_block = mmio_blocks;
2950 gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
2951
2954 gvt_dbg_mmio("traced %u virtual mmio registers\n", 2952 gvt_dbg_mmio("traced %u virtual mmio registers\n",
2955 gvt->mmio.num_tracked_mmio); 2953 gvt->mmio.num_tracked_mmio);
2956 return 0; 2954 return 0;
@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
3030 gvt_mmio_func func; 3028 gvt_mmio_func func;
3031 int ret; 3029 int ret;
3032 3030
3033 if (WARN_ON(bytes > 4)) 3031 if (WARN_ON(bytes > 8))
3034 return -EINVAL; 3032 return -EINVAL;
3035 3033
3036 /* 3034 /*
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4f7057d62d88..22e08eb2d0b7 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
432 432
433 i915_gem_request_put(fetch_and_zero(&workload->req)); 433 i915_gem_request_put(fetch_and_zero(&workload->req));
434 434
435 if (!workload->status && !vgpu->resetting) { 435 if (!workload->status && !(vgpu->resetting_eng &
436 ENGINE_MASK(ring_id))) {
436 update_guest_context(workload); 437 update_guest_context(workload);
437 438
438 for_each_set_bit(event, workload->pending_events, 439 for_each_set_bit(event, workload->pending_events,
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 90c14e6e3ea0..3deadcbd5a24 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
480{ 480{
481 struct intel_gvt *gvt = vgpu->gvt; 481 struct intel_gvt *gvt = vgpu->gvt;
482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 482 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
483 unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
483 484
484 gvt_dbg_core("------------------------------------------\n"); 485 gvt_dbg_core("------------------------------------------\n");
485 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 486 gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
486 vgpu->id, dmlr, engine_mask); 487 vgpu->id, dmlr, engine_mask);
487 vgpu->resetting = true; 488
489 vgpu->resetting_eng = resetting_eng;
488 490
489 intel_vgpu_stop_schedule(vgpu); 491 intel_vgpu_stop_schedule(vgpu);
490 /* 492 /*
@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
497 mutex_lock(&gvt->lock); 499 mutex_lock(&gvt->lock);
498 } 500 }
499 501
500 intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); 502 intel_vgpu_reset_execlist(vgpu, resetting_eng);
501 503
502 /* full GPU reset or device model level reset */ 504 /* full GPU reset or device model level reset */
503 if (engine_mask == ALL_ENGINES || dmlr) { 505 if (engine_mask == ALL_ENGINES || dmlr) {
@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
520 } 522 }
521 } 523 }
522 524
523 vgpu->resetting = false; 525 vgpu->resetting_eng = 0;
524 gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 526 gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
525 gvt_dbg_core("------------------------------------------\n"); 527 gvt_dbg_core("------------------------------------------\n");
526} 528}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 00d8967c8512..d1bd53b73738 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4580,7 +4580,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4580 4580
4581 sseu->slice_mask |= BIT(s); 4581 sseu->slice_mask |= BIT(s);
4582 4582
4583 if (IS_GEN9_BC(dev_priv)) 4583 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
4584 sseu->subslice_mask = 4584 sseu->subslice_mask =
4585 INTEL_INFO(dev_priv)->sseu.subslice_mask; 4585 INTEL_INFO(dev_priv)->sseu.subslice_mask;
4586 4586
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index 152f16c11878..348b29a845c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -114,7 +114,7 @@ i915_clflush_notify(struct i915_sw_fence *fence,
114 return NOTIFY_DONE; 114 return NOTIFY_DONE;
115} 115}
116 116
117void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 117bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
118 unsigned int flags) 118 unsigned int flags)
119{ 119{
120 struct clflush *clflush; 120 struct clflush *clflush;
@@ -128,7 +128,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
128 */ 128 */
129 if (!i915_gem_object_has_struct_page(obj)) { 129 if (!i915_gem_object_has_struct_page(obj)) {
130 obj->cache_dirty = false; 130 obj->cache_dirty = false;
131 return; 131 return false;
132 } 132 }
133 133
134 /* If the GPU is snooping the contents of the CPU cache, 134 /* If the GPU is snooping the contents of the CPU cache,
@@ -140,7 +140,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
140 * tracking. 140 * tracking.
141 */ 141 */
142 if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) 142 if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent)
143 return; 143 return false;
144 144
145 trace_i915_gem_object_clflush(obj); 145 trace_i915_gem_object_clflush(obj);
146 146
@@ -179,4 +179,5 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
179 } 179 }
180 180
181 obj->cache_dirty = false; 181 obj->cache_dirty = false;
182 return true;
182} 183}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
index 2455a7820937..f390247561b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.h
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.h
@@ -28,7 +28,7 @@
28struct drm_i915_private; 28struct drm_i915_private;
29struct drm_i915_gem_object; 29struct drm_i915_gem_object;
30 30
31void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 31bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
32 unsigned int flags); 32 unsigned int flags);
33#define I915_CLFLUSH_FORCE BIT(0) 33#define I915_CLFLUSH_FORCE BIT(0)
34#define I915_CLFLUSH_SYNC BIT(1) 34#define I915_CLFLUSH_SYNC BIT(1)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 39ed58a21fc1..e1e971ee2ed5 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
688} 688}
689 689
690static bool 690static bool
691needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, 691needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
692 struct intel_engine_cs *engine,
693 struct i915_gem_context *to)
694{ 692{
693 struct i915_gem_context *from = engine->legacy_active_context;
694
695 if (!ppgtt) 695 if (!ppgtt)
696 return false; 696 return false;
697 697
698 /* Always load the ppgtt on first use */ 698 /* Always load the ppgtt on first use */
699 if (!engine->legacy_active_context) 699 if (!from)
700 return true; 700 return true;
701 701
702 /* Same context without new entries, skip */ 702 /* Same context without new entries, skip */
703 if (engine->legacy_active_context == to && 703 if ((!from->ppgtt || from->ppgtt == ppgtt) &&
704 !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) 704 !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
705 return false; 705 return false;
706 706
@@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
744 if (skip_rcs_switch(ppgtt, engine, to)) 744 if (skip_rcs_switch(ppgtt, engine, to))
745 return 0; 745 return 0;
746 746
747 if (needs_pd_load_pre(ppgtt, engine, to)) { 747 if (needs_pd_load_pre(ppgtt, engine)) {
748 /* Older GENs and non render rings still want the load first, 748 /* Older GENs and non render rings still want the load first,
749 * "PP_DCLV followed by PP_DIR_BASE register through Load 749 * "PP_DCLV followed by PP_DIR_BASE register through Load
750 * Register Immediate commands in Ring Buffer before submitting 750 * Register Immediate commands in Ring Buffer before submitting
@@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
841 struct i915_hw_ppgtt *ppgtt = 841 struct i915_hw_ppgtt *ppgtt =
842 to->ppgtt ?: req->i915->mm.aliasing_ppgtt; 842 to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
843 843
844 if (needs_pd_load_pre(ppgtt, engine, to)) { 844 if (needs_pd_load_pre(ppgtt, engine)) {
845 int ret; 845 int ret;
846 846
847 trace_switch_mm(engine, to); 847 trace_switch_mm(engine, to);
@@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
852 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); 852 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
853 } 853 }
854 854
855 engine->legacy_active_context = to;
855 return 0; 856 return 0;
856 } 857 }
857 858
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 054b2e54cdaf..e9503f6d1100 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -560,9 +560,6 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
560 eb->args->flags |= __EXEC_HAS_RELOC; 560 eb->args->flags |= __EXEC_HAS_RELOC;
561 } 561 }
562 562
563 entry->flags |= __EXEC_OBJECT_HAS_PIN;
564 GEM_BUG_ON(eb_vma_misplaced(entry, vma));
565
566 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { 563 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) {
567 err = i915_vma_get_fence(vma); 564 err = i915_vma_get_fence(vma);
568 if (unlikely(err)) { 565 if (unlikely(err)) {
@@ -574,6 +571,9 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
574 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 571 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
575 } 572 }
576 573
574 entry->flags |= __EXEC_OBJECT_HAS_PIN;
575 GEM_BUG_ON(eb_vma_misplaced(entry, vma));
576
577 return 0; 577 return 0;
578} 578}
579 579
@@ -1458,7 +1458,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1458 * to read. However, if the array is not writable the user loses 1458 * to read. However, if the array is not writable the user loses
1459 * the updated relocation values. 1459 * the updated relocation values.
1460 */ 1460 */
1461 if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) 1461 if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
1462 return -EFAULT; 1462 return -EFAULT;
1463 1463
1464 do { 1464 do {
@@ -1775,7 +1775,7 @@ out:
1775 } 1775 }
1776 } 1776 }
1777 1777
1778 return err ?: have_copy; 1778 return err;
1779} 1779}
1780 1780
1781static int eb_relocate(struct i915_execbuffer *eb) 1781static int eb_relocate(struct i915_execbuffer *eb)
@@ -1825,7 +1825,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
1825 int err; 1825 int err;
1826 1826
1827 for (i = 0; i < count; i++) { 1827 for (i = 0; i < count; i++) {
1828 const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 1828 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
1829 struct i915_vma *vma = exec_to_vma(entry); 1829 struct i915_vma *vma = exec_to_vma(entry);
1830 struct drm_i915_gem_object *obj = vma->obj; 1830 struct drm_i915_gem_object *obj = vma->obj;
1831 1831
@@ -1841,12 +1841,14 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
1841 eb->request->capture_list = capture; 1841 eb->request->capture_list = capture;
1842 } 1842 }
1843 1843
1844 if (unlikely(obj->cache_dirty && !obj->cache_coherent)) {
1845 if (i915_gem_clflush_object(obj, 0))
1846 entry->flags &= ~EXEC_OBJECT_ASYNC;
1847 }
1848
1844 if (entry->flags & EXEC_OBJECT_ASYNC) 1849 if (entry->flags & EXEC_OBJECT_ASYNC)
1845 goto skip_flushes; 1850 goto skip_flushes;
1846 1851
1847 if (unlikely(obj->cache_dirty && !obj->cache_coherent))
1848 i915_gem_clflush_object(obj, 0);
1849
1850 err = i915_gem_request_await_object 1852 err = i915_gem_request_await_object
1851 (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); 1853 (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE);
1852 if (err) 1854 if (err)
@@ -2209,7 +2211,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2209 goto err_unlock; 2211 goto err_unlock;
2210 2212
2211 err = eb_relocate(&eb); 2213 err = eb_relocate(&eb);
2212 if (err) 2214 if (err) {
2213 /* 2215 /*
2214 * If the user expects the execobject.offset and 2216 * If the user expects the execobject.offset and
2215 * reloc.presumed_offset to be an exact match, 2217 * reloc.presumed_offset to be an exact match,
@@ -2218,8 +2220,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
2218 * relocation. 2220 * relocation.
2219 */ 2221 */
2220 args->flags &= ~__EXEC_HAS_RELOC; 2222 args->flags &= ~__EXEC_HAS_RELOC;
2221 if (err < 0)
2222 goto err_vma; 2223 goto err_vma;
2224 }
2223 2225
2224 if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { 2226 if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) {
2225 DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); 2227 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 7032c542a9b1..4dd4c2159a92 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
242 goto err_unpin; 242 goto err_unpin;
243 } 243 }
244 244
245 ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
246 if (ret)
247 goto err_unpin;
248
245 ret = req->engine->emit_bb_start(req, 249 ret = req->engine->emit_bb_start(req,
246 so->batch_offset, so->batch_size, 250 so->batch_offset, so->batch_size,
247 I915_DISPATCH_SECURE); 251 I915_DISPATCH_SECURE);
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 1032f98add11..77fb39808131 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
43 return true; 43 return true;
44 44
45 case MUTEX_TRYLOCK_FAILED: 45 case MUTEX_TRYLOCK_FAILED:
46 *unlock = false;
47 preempt_disable();
46 do { 48 do {
47 cpu_relax(); 49 cpu_relax();
48 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 50 if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
49 case MUTEX_TRYLOCK_SUCCESS:
50 *unlock = true; 51 *unlock = true;
51 return true; 52 break;
52 } 53 }
53 } while (!need_resched()); 54 } while (!need_resched());
55 preempt_enable();
56 return *unlock;
54 57
55 return false; 58 case MUTEX_TRYLOCK_SUCCESS:
59 *unlock = true;
60 return true;
56 } 61 }
57 62
58 BUG(); 63 BUG();
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9cd22f83b0cf..f33d90226704 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
1601 u32 *cs; 1601 u32 *cs;
1602 int i; 1602 int i;
1603 1603
1604 cs = intel_ring_begin(req, n_flex_regs * 2 + 4); 1604 cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
1605 if (IS_ERR(cs)) 1605 if (IS_ERR(cs))
1606 return PTR_ERR(cs); 1606 return PTR_ERR(cs);
1607 1607
1608 *cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); 1608 *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
1609 1609
1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); 1610 *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | 1611 *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 4a673fc1a432..20cf272c97b1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -284,12 +284,12 @@ static inline void __i915_vma_pin(struct i915_vma *vma)
284 284
285static inline void __i915_vma_unpin(struct i915_vma *vma) 285static inline void __i915_vma_unpin(struct i915_vma *vma)
286{ 286{
287 GEM_BUG_ON(!i915_vma_is_pinned(vma));
288 vma->flags--; 287 vma->flags--;
289} 288}
290 289
291static inline void i915_vma_unpin(struct i915_vma *vma) 290static inline void i915_vma_unpin(struct i915_vma *vma)
292{ 291{
292 GEM_BUG_ON(!i915_vma_is_pinned(vma));
293 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 293 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
294 __i915_vma_unpin(vma); 294 __i915_vma_unpin(vma);
295} 295}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 639d45c1dd2e..7ea7fd1e8856 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1120 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; 1120 bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
1121 uint8_t aux_channel, ddc_pin; 1121 uint8_t aux_channel, ddc_pin;
1122 /* Each DDI port can have more than one value on the "DVO Port" field, 1122 /* Each DDI port can have more than one value on the "DVO Port" field,
1123 * so look for all the possible values for each port and abort if more 1123 * so look for all the possible values for each port.
1124 * than one is found. */ 1124 */
1125 int dvo_ports[][3] = { 1125 int dvo_ports[][3] = {
1126 {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, 1126 {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
1127 {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, 1127 {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
@@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1130 {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, 1130 {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
1131 }; 1131 };
1132 1132
1133 /* Find the child device to use, abort if more than one found. */ 1133 /*
1134 * Find the first child device to reference the port, report if more
1135 * than one found.
1136 */
1134 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 1137 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
1135 it = dev_priv->vbt.child_dev + i; 1138 it = dev_priv->vbt.child_dev + i;
1136 1139
@@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1140 1143
1141 if (it->common.dvo_port == dvo_ports[port][j]) { 1144 if (it->common.dvo_port == dvo_ports[port][j]) {
1142 if (child) { 1145 if (child) {
1143 DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n", 1146 DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
1144 port_name(port)); 1147 port_name(port));
1145 return; 1148 } else {
1149 child = it;
1146 } 1150 }
1147 child = it;
1148 } 1151 }
1149 } 1152 }
1150 } 1153 }
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 306c6b06b330..17c4ae7e4e7c 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
398 } 398 }
399 399
400 /* Program the max register to clamp values > 1.0. */ 400 /* Program the max register to clamp values > 1.0. */
401 i = lut_size - 1;
401 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), 402 I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
402 drm_color_lut_extract(lut[i].red, 16)); 403 drm_color_lut_extract(lut[i].red, 16));
403 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), 404 I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 80e96f1f49d2..d3b3252a8742 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
1762 if (dev_priv->vbt.edp.low_vswing) { 1762 if (dev_priv->vbt.edp.low_vswing) {
1763 if (voltage == VOLTAGE_INFO_0_85V) { 1763 if (voltage == VOLTAGE_INFO_0_85V) {
1764 *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); 1764 *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
1765 return cnl_ddi_translations_dp_0_85V; 1765 return cnl_ddi_translations_edp_0_85V;
1766 } else if (voltage == VOLTAGE_INFO_0_95V) { 1766 } else if (voltage == VOLTAGE_INFO_0_95V) {
1767 *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); 1767 *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
1768 return cnl_ddi_translations_edp_0_95V; 1768 return cnl_ddi_translations_edp_0_95V;
@@ -1896,8 +1896,8 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level)
1896 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); 1896 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
1897 val &= ~LOADGEN_SELECT; 1897 val &= ~LOADGEN_SELECT;
1898 1898
1899 if (((rate < 600000) && (width == 4) && (ln >= 1)) || 1899 if ((rate <= 600000 && width == 4 && ln >= 1) ||
1900 ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) { 1900 (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
1901 val |= LOADGEN_SELECT; 1901 val |= LOADGEN_SELECT;
1902 } 1902 }
1903 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); 1903 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dec9e58545a1..cc484b56eeaa 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3427,26 +3427,6 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3427 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3427 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3428} 3428}
3429 3429
3430static void intel_update_primary_planes(struct drm_device *dev)
3431{
3432 struct drm_crtc *crtc;
3433
3434 for_each_crtc(dev, crtc) {
3435 struct intel_plane *plane = to_intel_plane(crtc->primary);
3436 struct intel_plane_state *plane_state =
3437 to_intel_plane_state(plane->base.state);
3438
3439 if (plane_state->base.visible) {
3440 trace_intel_update_plane(&plane->base,
3441 to_intel_crtc(crtc));
3442
3443 plane->update_plane(plane,
3444 to_intel_crtc_state(crtc->state),
3445 plane_state);
3446 }
3447 }
3448}
3449
3450static int 3430static int
3451__intel_display_resume(struct drm_device *dev, 3431__intel_display_resume(struct drm_device *dev,
3452 struct drm_atomic_state *state, 3432 struct drm_atomic_state *state,
@@ -3499,6 +3479,19 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3499 struct drm_atomic_state *state; 3479 struct drm_atomic_state *state;
3500 int ret; 3480 int ret;
3501 3481
3482
3483 /* reset doesn't touch the display */
3484 if (!i915.force_reset_modeset_test &&
3485 !gpu_reset_clobbers_display(dev_priv))
3486 return;
3487
3488 /* We have a modeset vs reset deadlock, defensively unbreak it.
3489 *
3490 * FIXME: We can do a _lot_ better, this is just a first iteration.
3491 */
3492 i915_gem_set_wedged(dev_priv);
3493 DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n");
3494
3502 /* 3495 /*
3503 * Need mode_config.mutex so that we don't 3496 * Need mode_config.mutex so that we don't
3504 * trample ongoing ->detect() and whatnot. 3497 * trample ongoing ->detect() and whatnot.
@@ -3512,12 +3505,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
3512 3505
3513 drm_modeset_backoff(ctx); 3506 drm_modeset_backoff(ctx);
3514 } 3507 }
3515
3516 /* reset doesn't touch the display, but flips might get nuked anyway, */
3517 if (!i915.force_reset_modeset_test &&
3518 !gpu_reset_clobbers_display(dev_priv))
3519 return;
3520
3521 /* 3508 /*
3522 * Disabling the crtcs gracefully seems nicer. Also the 3509 * Disabling the crtcs gracefully seems nicer. Also the
3523 * g33 docs say we should at least disable all the planes. 3510 * g33 docs say we should at least disable all the planes.
@@ -3547,6 +3534,14 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3547 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3534 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
3548 int ret; 3535 int ret;
3549 3536
3537 /* reset doesn't touch the display */
3538 if (!i915.force_reset_modeset_test &&
3539 !gpu_reset_clobbers_display(dev_priv))
3540 return;
3541
3542 if (!state)
3543 goto unlock;
3544
3550 /* 3545 /*
3551 * Flips in the rings will be nuked by the reset, 3546 * Flips in the rings will be nuked by the reset,
3552 * so complete all pending flips so that user space 3547 * so complete all pending flips so that user space
@@ -3558,22 +3553,10 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3558 3553
3559 /* reset doesn't touch the display */ 3554 /* reset doesn't touch the display */
3560 if (!gpu_reset_clobbers_display(dev_priv)) { 3555 if (!gpu_reset_clobbers_display(dev_priv)) {
3561 if (!state) { 3556 /* for testing only restore the display */
3562 /* 3557 ret = __intel_display_resume(dev, state, ctx);
3563 * Flips in the rings have been nuked by the reset,
3564 * so update the base address of all primary
3565 * planes to the the last fb to make sure we're
3566 * showing the correct fb after a reset.
3567 *
3568 * FIXME: Atomic will make this obsolete since we won't schedule
3569 * CS-based flips (which might get lost in gpu resets) any more.
3570 */
3571 intel_update_primary_planes(dev);
3572 } else {
3573 ret = __intel_display_resume(dev, state, ctx);
3574 if (ret) 3558 if (ret)
3575 DRM_ERROR("Restoring old state failed with %i\n", ret); 3559 DRM_ERROR("Restoring old state failed with %i\n", ret);
3576 }
3577 } else { 3560 } else {
3578 /* 3561 /*
3579 * The display has been reset as well, 3562 * The display has been reset as well,
@@ -3597,8 +3580,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
3597 intel_hpd_init(dev_priv); 3580 intel_hpd_init(dev_priv);
3598 } 3581 }
3599 3582
3600 if (state) 3583 drm_atomic_state_put(state);
3601 drm_atomic_state_put(state); 3584unlock:
3602 drm_modeset_drop_locks(ctx); 3585 drm_modeset_drop_locks(ctx);
3603 drm_modeset_acquire_fini(ctx); 3586 drm_modeset_acquire_fini(ctx);
3604 mutex_unlock(&dev->mode_config.mutex); 3587 mutex_unlock(&dev->mode_config.mutex);
@@ -9117,6 +9100,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9117 u64 power_domain_mask; 9100 u64 power_domain_mask;
9118 bool active; 9101 bool active;
9119 9102
9103 if (INTEL_GEN(dev_priv) >= 9) {
9104 intel_crtc_init_scalers(crtc, pipe_config);
9105
9106 pipe_config->scaler_state.scaler_id = -1;
9107 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9108 }
9109
9120 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9110 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9121 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9111 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9122 return false; 9112 return false;
@@ -9145,13 +9135,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9145 pipe_config->gamma_mode = 9135 pipe_config->gamma_mode =
9146 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9136 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9147 9137
9148 if (INTEL_GEN(dev_priv) >= 9) {
9149 intel_crtc_init_scalers(crtc, pipe_config);
9150
9151 pipe_config->scaler_state.scaler_id = -1;
9152 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9153 }
9154
9155 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9138 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9156 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9139 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9157 power_domain_mask |= BIT_ULL(power_domain); 9140 power_domain_mask |= BIT_ULL(power_domain);
@@ -9540,7 +9523,16 @@ static void i9xx_update_cursor(struct intel_plane *plane,
9540 * On some platforms writing CURCNTR first will also 9523 * On some platforms writing CURCNTR first will also
9541 * cause CURPOS to be armed by the CURBASE write. 9524 * cause CURPOS to be armed by the CURBASE write.
9542 * Without the CURCNTR write the CURPOS write would 9525 * Without the CURCNTR write the CURPOS write would
9543 * arm itself. 9526 * arm itself. Thus we always start the full update
9527 * with a CURCNTR write.
9528 *
9529 * On other platforms CURPOS always requires the
9530 * CURBASE write to arm the update. Additonally
9531 * a write to any of the cursor register will cancel
9532 * an already armed cursor update. Thus leaving out
9533 * the CURBASE write after CURPOS could lead to a
9534 * cursor that doesn't appear to move, or even change
9535 * shape. Thus we always write CURBASE.
9544 * 9536 *
9545 * CURCNTR and CUR_FBC_CTL are always 9537 * CURCNTR and CUR_FBC_CTL are always
9546 * armed by the CURBASE write only. 9538 * armed by the CURBASE write only.
@@ -9559,6 +9551,7 @@ static void i9xx_update_cursor(struct intel_plane *plane,
9559 plane->cursor.cntl = cntl; 9551 plane->cursor.cntl = cntl;
9560 } else { 9552 } else {
9561 I915_WRITE_FW(CURPOS(pipe), pos); 9553 I915_WRITE_FW(CURPOS(pipe), pos);
9554 I915_WRITE_FW(CURBASE(pipe), base);
9562 } 9555 }
9563 9556
9564 POSTING_READ_FW(CURBASE(pipe)); 9557 POSTING_READ_FW(CURBASE(pipe));
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
index 6e09ceb71500..150a156f3b1e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
46 struct intel_encoder *encoder = connector->encoder; 46 struct intel_encoder *encoder = connector->encoder;
47 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 47 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
48 struct mipi_dsi_device *dsi_device; 48 struct mipi_dsi_device *dsi_device;
49 u8 data; 49 u8 data = 0;
50 enum port port; 50 enum port port;
51 51
52 /* FIXME: Need to take care of 16 bit brightness level */ 52 /* FIXME: Need to take care of 16 bit brightness level */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 7158c7ce9c09..91c07b0c8db9 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
306 306
307 if (!gpio_desc) { 307 if (!gpio_desc) {
308 gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, 308 gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
309 "panel", gpio_index, 309 NULL, gpio_index,
310 value ? GPIOD_OUT_LOW : 310 value ? GPIOD_OUT_LOW :
311 GPIOD_OUT_HIGH); 311 GPIOD_OUT_HIGH);
312 312
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 52d5b82790d9..c17ed0e62b67 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,7 +45,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
45 return true; 45 return true;
46 if (IS_SKYLAKE(dev_priv)) 46 if (IS_SKYLAKE(dev_priv))
47 return true; 47 return true;
48 if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D) 48 if (IS_KABYLAKE(dev_priv))
49 return true; 49 return true;
50 return false; 50 return false;
51} 51}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7404cf2aac28..2afa4daa88e8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1221 return ret; 1221 return ret;
1222} 1222}
1223 1223
1224static u8 gtiir[] = {
1225 [RCS] = 0,
1226 [BCS] = 0,
1227 [VCS] = 1,
1228 [VCS2] = 1,
1229 [VECS] = 3,
1230};
1231
1224static int gen8_init_common_ring(struct intel_engine_cs *engine) 1232static int gen8_init_common_ring(struct intel_engine_cs *engine)
1225{ 1233{
1226 struct drm_i915_private *dev_priv = engine->i915; 1234 struct drm_i915_private *dev_priv = engine->i915;
@@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1245 1253
1246 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); 1254 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
1247 1255
1248 /* After a GPU reset, we may have requests to replay */ 1256 GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
1257
1258 /*
1259 * Clear any pending interrupt state.
1260 *
1261 * We do it twice out of paranoia that some of the IIR are double
1262 * buffered, and if we only reset it once there may still be
1263 * an interrupt pending.
1264 */
1265 I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
1266 GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
1267 I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
1268 GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
1249 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1269 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1250 1270
1271 /* After a GPU reset, we may have requests to replay */
1251 submit = false; 1272 submit = false;
1252 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { 1273 for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
1253 if (!port_isset(&port[n])) 1274 if (!port_isset(&port[n]))
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 52b3a1fd4059..57ef5833c427 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -63,7 +63,6 @@ enum {
63}; 63};
64 64
65/* Logical Rings */ 65/* Logical Rings */
66void intel_logical_ring_stop(struct intel_engine_cs *engine);
67void intel_logical_ring_cleanup(struct intel_engine_cs *engine); 66void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
68int logical_render_ring_init(struct intel_engine_cs *engine); 67int logical_render_ring_init(struct intel_engine_cs *engine);
69int logical_xcs_ring_init(struct intel_engine_cs *engine); 68int logical_xcs_ring_init(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 5abef482eacf..beb9baaf2f2e 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
210 struct drm_device *dev = intel_dig_port->base.base.dev; 210 struct drm_device *dev = intel_dig_port->base.base.dev;
211 struct drm_i915_private *dev_priv = to_i915(dev); 211 struct drm_i915_private *dev_priv = to_i915(dev);
212 212
213 if (!IS_GEN9(dev_priv)) { 213 if (!HAS_LSPCON(dev_priv)) {
214 DRM_ERROR("LSPCON is supported on GEN9 only\n"); 214 DRM_ERROR("LSPCON is not supported on this platform\n");
215 return false; 215 return false;
216 } 216 }
217 217
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 96c2cbd81869..593349be8b9d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
469 469
470 if (i915.invert_brightness > 0 || 470 if (i915.invert_brightness > 0 ||
471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { 471 dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
472 return panel->backlight.max - val; 472 return panel->backlight.max - val + panel->backlight.min;
473 } 473 }
474 474
475 return val; 475 return val;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 48ea0fca1f72..40b224b44d1b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4463,8 +4463,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
4463 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && 4463 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
4464 (plane_bytes_per_line / 512 < 1)) 4464 (plane_bytes_per_line / 512 < 1))
4465 selected_result = method2; 4465 selected_result = method2;
4466 else if ((ddb_allocation && ddb_allocation / 4466 else if (ddb_allocation >=
4467 fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) 4467 fixed_16_16_to_u32_round_up(plane_blocks_per_line))
4468 selected_result = min_fixed_16_16(method1, method2); 4468 selected_result = min_fixed_16_16(method1, method2);
4469 else if (latency >= linetime_us) 4469 else if (latency >= linetime_us)
4470 selected_result = min_fixed_16_16(method1, method2); 4470 selected_result = min_fixed_16_16(method1, method2);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 627e2aa09766..8cdec455cf7d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -206,7 +206,7 @@ struct drm_i915_private *mock_gem_device(void)
206 mkwrite_device_info(i915)->ring_mask = BIT(0); 206 mkwrite_device_info(i915)->ring_mask = BIT(0);
207 i915->engine[RCS] = mock_engine(i915, "mock"); 207 i915->engine[RCS] = mock_engine(i915, "mock");
208 if (!i915->engine[RCS]) 208 if (!i915->engine[RCS])
209 goto err_dependencies; 209 goto err_priorities;
210 210
211 i915->kernel_context = mock_context(i915, NULL); 211 i915->kernel_context = mock_context(i915, NULL);
212 if (!i915->kernel_context) 212 if (!i915->kernel_context)
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 49546222c6d3..d3845989a29d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = {
54 DRM_FORMAT_RGBA8888, 54 DRM_FORMAT_RGBA8888,
55 DRM_FORMAT_RGBX8888, 55 DRM_FORMAT_RGBX8888,
56 DRM_FORMAT_BGRA8888, 56 DRM_FORMAT_BGRA8888,
57 DRM_FORMAT_BGRA8888, 57 DRM_FORMAT_BGRX8888,
58 DRM_FORMAT_UYVY, 58 DRM_FORMAT_UYVY,
59 DRM_FORMAT_VYUY, 59 DRM_FORMAT_VYUY,
60 DRM_FORMAT_YUYV, 60 DRM_FORMAT_YUYV,
@@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
545 return; 545 return;
546 } 546 }
547 547
548 ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
548 switch (ipu_plane->dp_flow) { 549 switch (ipu_plane->dp_flow) {
549 case IPU_DP_FLOW_SYNC_BG: 550 case IPU_DP_FLOW_SYNC_BG:
550 ipu_dp_setup_channel(ipu_plane->dp, 551 ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
551 IPUV3_COLORSPACE_RGB,
552 IPUV3_COLORSPACE_RGB);
553 ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); 552 ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
554 break; 553 break;
555 case IPU_DP_FLOW_SYNC_FG: 554 case IPU_DP_FLOW_SYNC_FG:
556 ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
557 ipu_dp_setup_channel(ipu_plane->dp, ics, 555 ipu_dp_setup_channel(ipu_plane->dp, ics,
558 IPUV3_COLORSPACE_UNKNOWN); 556 IPUV3_COLORSPACE_UNKNOWN);
559 /* Enable local alpha on partial plane */ 557 /* Enable local alpha on partial plane */
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 636031a30e17..8aca20209cb8 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
237 237
238 /* port@1 is the output port */ 238 /* port@1 is the output port */
239 ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge); 239 ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
240 if (ret) 240 if (ret && ret != -ENODEV)
241 return ret; 241 return ret;
242 242
243 imxpd->dev = dev; 243 imxpd->dev = dev;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index b638d192ce5e..99d39b2aefa6 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -5,7 +5,7 @@ config DRM_MSM
5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
6 depends on OF && COMMON_CLK 6 depends on OF && COMMON_CLK
7 depends on MMU 7 depends on MMU
8 select QCOM_MDT_LOADER 8 select QCOM_MDT_LOADER if ARCH_QCOM
9 select REGULATOR 9 select REGULATOR
10 select DRM_KMS_HELPER 10 select DRM_KMS_HELPER
11 select DRM_PANEL 11 select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index b4b54f1c24bc..f9eae03aa1dc 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,7 +15,7 @@
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <linux/qcom_scm.h> 16#include <linux/qcom_scm.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/of_reserved_mem.h> 18#include <linux/of_address.h>
19#include <linux/soc/qcom/mdt_loader.h> 19#include <linux/soc/qcom/mdt_loader.h>
20#include "msm_gem.h" 20#include "msm_gem.h"
21#include "msm_mmu.h" 21#include "msm_mmu.h"
@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
26 26
27#define GPU_PAS_ID 13 27#define GPU_PAS_ID 13
28 28
29#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
30
31static int zap_shader_load_mdt(struct device *dev, const char *fwname) 29static int zap_shader_load_mdt(struct device *dev, const char *fwname)
32{ 30{
33 const struct firmware *fw; 31 const struct firmware *fw;
32 struct device_node *np;
33 struct resource r;
34 phys_addr_t mem_phys; 34 phys_addr_t mem_phys;
35 ssize_t mem_size; 35 ssize_t mem_size;
36 void *mem_region = NULL; 36 void *mem_region = NULL;
37 int ret; 37 int ret;
38 38
39 if (!IS_ENABLED(CONFIG_ARCH_QCOM))
40 return -EINVAL;
41
42 np = of_get_child_by_name(dev->of_node, "zap-shader");
43 if (!np)
44 return -ENODEV;
45
46 np = of_parse_phandle(np, "memory-region", 0);
47 if (!np)
48 return -EINVAL;
49
50 ret = of_address_to_resource(np, 0, &r);
51 if (ret)
52 return ret;
53
54 mem_phys = r.start;
55 mem_size = resource_size(&r);
56
39 /* Request the MDT file for the firmware */ 57 /* Request the MDT file for the firmware */
40 ret = request_firmware(&fw, fwname, dev); 58 ret = request_firmware(&fw, fwname, dev);
41 if (ret) { 59 if (ret) {
@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
51 } 69 }
52 70
53 /* Allocate memory for the firmware image */ 71 /* Allocate memory for the firmware image */
54 mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); 72 mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
55 if (!mem_region) { 73 if (!mem_region) {
56 ret = -ENOMEM; 74 ret = -ENOMEM;
57 goto out; 75 goto out;
@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
69 DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); 87 DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
70 88
71out: 89out:
90 if (mem_region)
91 memunmap(mem_region);
92
72 release_firmware(fw); 93 release_firmware(fw);
73 94
74 return ret; 95 return ret;
75} 96}
76#else
77static int zap_shader_load_mdt(struct device *dev, const char *fwname)
78{
79 return -ENODEV;
80}
81#endif
82 97
83static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 98static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
84 struct msm_file_private *ctx) 99 struct msm_file_private *ctx)
@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
117 gpu->funcs->flush(gpu); 132 gpu->funcs->flush(gpu);
118} 133}
119 134
120struct a5xx_hwcg { 135static const struct {
121 u32 offset; 136 u32 offset;
122 u32 value; 137 u32 value;
123}; 138} a5xx_hwcg[] = {
124
125static const struct a5xx_hwcg a530_hwcg[] = {
126 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, 139 {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
127 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, 140 {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
128 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, 141 {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
217 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} 230 {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
218}; 231};
219 232
220static const struct { 233void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
221 int (*test)(struct adreno_gpu *gpu);
222 const struct a5xx_hwcg *regs;
223 unsigned int count;
224} a5xx_hwcg_regs[] = {
225 { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
226};
227
228static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
229 const struct a5xx_hwcg *regs, unsigned int count)
230{ 234{
231 unsigned int i; 235 unsigned int i;
232 236
233 for (i = 0; i < count; i++) 237 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
234 gpu_write(gpu, regs[i].offset, regs[i].value); 238 gpu_write(gpu, a5xx_hwcg[i].offset,
239 state ? a5xx_hwcg[i].value : 0);
235 240
236 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); 241 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
237 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); 242 gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
238}
239
240static void a5xx_enable_hwcg(struct msm_gpu *gpu)
241{
242 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
243 unsigned int i;
244
245 for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
246 if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
247 _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
248 a5xx_hwcg_regs[i].count);
249 return;
250 }
251 }
252} 243}
253 244
254static int a5xx_me_init(struct msm_gpu *gpu) 245static int a5xx_me_init(struct msm_gpu *gpu)
@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
377 return ret; 368 return ret;
378} 369}
379 370
380/* Set up a child device to "own" the zap shader */
381static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
382{
383 struct device_node *node;
384 int ret;
385
386 if (dev->parent)
387 return 0;
388
389 /* Find the sub-node for the zap shader */
390 node = of_get_child_by_name(parent->of_node, "zap-shader");
391 if (!node) {
392 DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
393 return -ENODEV;
394 }
395
396 dev->parent = parent;
397 dev->of_node = node;
398 dev_set_name(dev, "adreno_zap_shader");
399
400 ret = device_register(dev);
401 if (ret) {
402 DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
403 goto out;
404 }
405
406 ret = of_reserved_mem_device_init(dev);
407 if (ret) {
408 DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
409 device_unregister(dev);
410 }
411
412out:
413 if (ret)
414 dev->parent = NULL;
415
416 return ret;
417}
418
419static int a5xx_zap_shader_init(struct msm_gpu *gpu) 371static int a5xx_zap_shader_init(struct msm_gpu *gpu)
420{ 372{
421 static bool loaded; 373 static bool loaded;
@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
444 return -ENODEV; 396 return -ENODEV;
445 } 397 }
446 398
447 ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); 399 ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
448
449 if (!ret)
450 ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
451 adreno_gpu->info->zapfw);
452 400
453 loaded = !ret; 401 loaded = !ret;
454 402
@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
545 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); 493 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
546 494
547 /* Enable HWCG */ 495 /* Enable HWCG */
548 a5xx_enable_hwcg(gpu); 496 a5xx_set_hwcg(gpu, true);
549 497
550 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); 498 gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
551 499
@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
691 639
692 DBG("%s", gpu->name); 640 DBG("%s", gpu->name);
693 641
694 if (a5xx_gpu->zap_dev.parent)
695 device_unregister(&a5xx_gpu->zap_dev);
696
697 if (a5xx_gpu->pm4_bo) { 642 if (a5xx_gpu->pm4_bo) {
698 if (a5xx_gpu->pm4_iova) 643 if (a5xx_gpu->pm4_iova)
699 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); 644 msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
920 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 865 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
921 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 866 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
922 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, 867 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
923 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, 868 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
924 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, 869 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
925 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 870 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
926 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, 871 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
927 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 872 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
928 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 873 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
929 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 874 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
930 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, 875 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
931 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, 876 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
932 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, 877 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
933 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 878 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
934 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, 879 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
935 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 880 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
936 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, 881 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
937 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 882 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
938 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, 883 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
939 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, 884 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
940 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, 885 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
941 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 886 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
942 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, 887 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
943 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, 888 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
944 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, 889 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
945 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 890 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
946 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, 891 0xB9A0, 0xB9BF, ~0
947 ~0
948}; 892};
949 893
950static void a5xx_dump(struct msm_gpu *gpu) 894static void a5xx_dump(struct msm_gpu *gpu)
@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
1020{ 964{
1021 seq_printf(m, "status: %08x\n", 965 seq_printf(m, "status: %08x\n",
1022 gpu_read(gpu, REG_A5XX_RBBM_STATUS)); 966 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
967
968 /*
969 * Temporarily disable hardware clock gating before going into
970 * adreno_show to avoid issues while reading the registers
971 */
972 a5xx_set_hwcg(gpu, false);
1023 adreno_show(gpu, m); 973 adreno_show(gpu, m);
974 a5xx_set_hwcg(gpu, true);
1024} 975}
1025#endif 976#endif
1026 977
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6638bc85645d..1137092241d5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -36,8 +36,6 @@ struct a5xx_gpu {
36 uint32_t gpmu_dwords; 36 uint32_t gpmu_dwords;
37 37
38 uint32_t lm_leakage; 38 uint32_t lm_leakage;
39
40 struct device zap_dev;
41}; 39};
42 40
43#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) 41#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
59} 57}
60 58
61bool a5xx_idle(struct msm_gpu *gpu); 59bool a5xx_idle(struct msm_gpu *gpu);
60void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
62 61
63#endif /* __A5XX_GPU_H__ */ 62#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1ab2703674a..7414c6bbd582 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
48 *value = adreno_gpu->base.fast_rate; 48 *value = adreno_gpu->base.fast_rate;
49 return 0; 49 return 0;
50 case MSM_PARAM_TIMESTAMP: 50 case MSM_PARAM_TIMESTAMP:
51 if (adreno_gpu->funcs->get_timestamp) 51 if (adreno_gpu->funcs->get_timestamp) {
52 return adreno_gpu->funcs->get_timestamp(gpu, value); 52 int ret;
53
54 pm_runtime_get_sync(&gpu->pdev->dev);
55 ret = adreno_gpu->funcs->get_timestamp(gpu, value);
56 pm_runtime_put_autosuspend(&gpu->pdev->dev);
57
58 return ret;
59 }
53 return -EINVAL; 60 return -EINVAL;
54 default: 61 default:
55 DBG("%s: invalid param: %u", gpu->name, param); 62 DBG("%s: invalid param: %u", gpu->name, param);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9e9c5696bc03..c7b612c3d771 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
2137 struct msm_dsi_phy_clk_request *clk_req) 2137 struct msm_dsi_phy_clk_request *clk_req)
2138{ 2138{
2139 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2139 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2140 int ret;
2141
2142 ret = dsi_calc_clk_rate(msm_host);
2143 if (ret) {
2144 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2145 return;
2146 }
2140 2147
2141 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; 2148 clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
2142 clk_req->escclk_rate = msm_host->esc_clk_rate; 2149 clk_req->escclk_rate = msm_host->esc_clk_rate;
@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2280 struct drm_display_mode *mode) 2287 struct drm_display_mode *mode)
2281{ 2288{
2282 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2289 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
2283 int ret;
2284 2290
2285 if (msm_host->mode) { 2291 if (msm_host->mode) {
2286 drm_mode_destroy(msm_host->dev, msm_host->mode); 2292 drm_mode_destroy(msm_host->dev, msm_host->mode);
@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2293 return -ENOMEM; 2299 return -ENOMEM;
2294 } 2300 }
2295 2301
2296 ret = dsi_calc_clk_rate(msm_host);
2297 if (ret) {
2298 pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
2299 return ret;
2300 }
2301
2302 return 0; 2302 return 0;
2303} 2303}
2304 2304
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index cb5415d6c04b..735a87a699fa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
221 struct mdp5_ctl *ctl = mdp5_cstate->ctl; 221 struct mdp5_ctl *ctl = mdp5_cstate->ctl;
222 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; 222 uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
223 unsigned long flags; 223 unsigned long flags;
224 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; 224 enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
225 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; 225 enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
226 int i, plane_cnt = 0; 226 int i, plane_cnt = 0;
227 bool bg_alpha_enabled = false; 227 bool bg_alpha_enabled = false;
228 u32 mixer_op_mode = 0; 228 u32 mixer_op_mode = 0;
@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
753 if (!handle) { 753 if (!handle) {
754 DBG("Cursor off"); 754 DBG("Cursor off");
755 cursor_enable = false; 755 cursor_enable = false;
756 mdp5_enable(mdp5_kms);
756 goto set_cursor; 757 goto set_cursor;
757 } 758 }
758 759
@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
776 777
777 get_roi(crtc, &roi_w, &roi_h); 778 get_roi(crtc, &roi_w, &roi_h);
778 779
780 mdp5_enable(mdp5_kms);
781
779 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); 782 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
780 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), 783 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
781 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); 784 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -804,6 +807,7 @@ set_cursor:
804 crtc_flush(crtc, flush_mask); 807 crtc_flush(crtc, flush_mask);
805 808
806end: 809end:
810 mdp5_disable(mdp5_kms);
807 if (old_bo) { 811 if (old_bo) {
808 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); 812 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
809 /* enable vblank to complete cursor work: */ 813 /* enable vblank to complete cursor work: */
@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
836 840
837 get_roi(crtc, &roi_w, &roi_h); 841 get_roi(crtc, &roi_w, &roi_h);
838 842
843 mdp5_enable(mdp5_kms);
844
839 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 845 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
840 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), 846 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
841 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | 847 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
847 853
848 crtc_flush(crtc, flush_mask); 854 crtc_flush(crtc, flush_mask);
849 855
856 mdp5_disable(mdp5_kms);
857
850 return 0; 858 return 0;
851} 859}
852 860
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 97f3294fbfc6..70bef51245af 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
299 struct mdp5_interface *intf = mdp5_encoder->intf; 299 struct mdp5_interface *intf = mdp5_encoder->intf;
300 300
301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) 301 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
302 mdp5_cmd_encoder_disable(encoder); 302 mdp5_cmd_encoder_enable(encoder);
303 else 303 else
304 mdp5_vid_encoder_enable(encoder); 304 mdp5_vid_encoder_enable(encoder);
305} 305}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5d13fa5381ee..1c603aef3c59 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
502 const char *name, bool mandatory) 502 const char *name, bool mandatory)
503{ 503{
504 struct device *dev = &pdev->dev; 504 struct device *dev = &pdev->dev;
505 struct clk *clk = devm_clk_get(dev, name); 505 struct clk *clk = msm_clk_get(pdev, name);
506 if (IS_ERR(clk) && mandatory) { 506 if (IS_ERR(clk) && mandatory) {
507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); 507 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
508 return PTR_ERR(clk); 508 return PTR_ERR(clk);
@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
887 } 887 }
888 888
889 /* mandatory clocks: */ 889 /* mandatory clocks: */
890 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); 890 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
891 if (ret) 891 if (ret)
892 goto fail; 892 goto fail;
893 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); 893 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
894 if (ret) 894 if (ret)
895 goto fail; 895 goto fail;
896 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); 896 ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
897 if (ret) 897 if (ret)
898 goto fail; 898 goto fail;
899 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); 899 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
900 if (ret) 900 if (ret)
901 goto fail; 901 goto fail;
902 902
903 /* optional clocks: */ 903 /* optional clocks: */
904 get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); 904 get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
905 905
906 /* we need to set a default rate before enabling. Set a safe 906 /* we need to set a default rate before enabling. Set a safe
907 * rate first, then figure out hw revision, and then set a 907 * rate first, then figure out hw revision, and then set a
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index fe3a4de1a433..61f39c86dd09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
890 struct mdp5_hw_pipe *right_hwpipe; 890 struct mdp5_hw_pipe *right_hwpipe;
891 const struct mdp_format *format; 891 const struct mdp_format *format;
892 uint32_t nplanes, config = 0; 892 uint32_t nplanes, config = 0;
893 struct phase_step step = { 0 }; 893 struct phase_step step = { { 0 } };
894 struct pixel_ext pe = { 0 }; 894 struct pixel_ext pe = { { 0 } };
895 uint32_t hdecm = 0, vdecm = 0; 895 uint32_t hdecm = 0, vdecm = 0;
896 uint32_t pix_format; 896 uint32_t pix_format;
897 unsigned int rotation; 897 unsigned int rotation;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 65f35544c1ec..a0c60e738db8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
383 struct page **pages; 383 struct page **pages;
384 384
385 vma = add_vma(obj, aspace); 385 vma = add_vma(obj, aspace);
386 if (IS_ERR(vma)) 386 if (IS_ERR(vma)) {
387 return PTR_ERR(vma); 387 ret = PTR_ERR(vma);
388 goto unlock;
389 }
388 390
389 pages = get_pages(obj); 391 pages = get_pages(obj);
390 if (IS_ERR(pages)) { 392 if (IS_ERR(pages)) {
@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
405 407
406fail: 408fail:
407 del_vma(vma); 409 del_vma(vma);
408 410unlock:
409 mutex_unlock(&msm_obj->lock); 411 mutex_unlock(&msm_obj->lock);
410 return ret; 412 return ret;
411} 413}
@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
928 if (use_vram) { 930 if (use_vram) {
929 struct msm_gem_vma *vma; 931 struct msm_gem_vma *vma;
930 struct page **pages; 932 struct page **pages;
933 struct msm_gem_object *msm_obj = to_msm_bo(obj);
934
935 mutex_lock(&msm_obj->lock);
931 936
932 vma = add_vma(obj, NULL); 937 vma = add_vma(obj, NULL);
938 mutex_unlock(&msm_obj->lock);
933 if (IS_ERR(vma)) { 939 if (IS_ERR(vma)) {
934 ret = PTR_ERR(vma); 940 ret = PTR_ERR(vma);
935 goto fail; 941 goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 6bfca7470141..8a75c0bd8a78 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) 34 struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
35{ 35{
36 struct msm_gem_submit *submit; 36 struct msm_gem_submit *submit;
37 uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + 37 uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
38 (nr_cmds * sizeof(submit->cmd[0])); 38 ((u64)nr_cmds * sizeof(submit->cmd[0]));
39 39
40 if (sz > SIZE_MAX) 40 if (sz > SIZE_MAX)
41 return NULL; 41 return NULL;
@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
451 if (ret) 451 if (ret)
452 goto out; 452 goto out;
453 453
454 if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { 454 if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
455 ret = submit_fence_sync(submit); 455 ret = submit_fence_sync(submit);
456 if (ret) 456 if (ret)
457 goto out; 457 goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index c36321bc8714..d34e331554f3 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -42,7 +42,7 @@ void
42msm_gem_unmap_vma(struct msm_gem_address_space *aspace, 42msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
43 struct msm_gem_vma *vma, struct sg_table *sgt) 43 struct msm_gem_vma *vma, struct sg_table *sgt)
44{ 44{
45 if (!vma->iova) 45 if (!aspace || !vma->iova)
46 return; 46 return;
47 47
48 if (aspace->mmu) { 48 if (aspace->mmu) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 147b22163f9f..dab78c660dd6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1158,8 +1158,6 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
1158 return -ENODEV; 1158 return -ENODEV;
1159 if (WARN_ON(msg->size > 16)) 1159 if (WARN_ON(msg->size > 16))
1160 return -E2BIG; 1160 return -E2BIG;
1161 if (msg->size == 0)
1162 return msg->size;
1163 1161
1164 ret = nvkm_i2c_aux_acquire(aux); 1162 ret = nvkm_i2c_aux_acquire(aux);
1165 if (ret) 1163 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 8d1df5678eaa..f362c9fa8b3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -409,7 +409,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
409 struct nouveau_display *disp = nouveau_display(dev); 409 struct nouveau_display *disp = nouveau_display(dev);
410 struct nouveau_drm *drm = nouveau_drm(dev); 410 struct nouveau_drm *drm = nouveau_drm(dev);
411 struct drm_connector *connector; 411 struct drm_connector *connector;
412 struct drm_crtc *crtc;
413 412
414 if (!suspend) { 413 if (!suspend) {
415 if (drm_drv_uses_atomic_modeset(dev)) 414 if (drm_drv_uses_atomic_modeset(dev))
@@ -418,10 +417,6 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
418 drm_crtc_force_disable_all(dev); 417 drm_crtc_force_disable_all(dev);
419 } 418 }
420 419
421 /* Make sure that drm and hw vblank irqs get properly disabled. */
422 drm_for_each_crtc(crtc, dev)
423 drm_crtc_vblank_off(crtc);
424
425 /* disable flip completion events */ 420 /* disable flip completion events */
426 nvif_notify_put(&drm->flip); 421 nvif_notify_put(&drm->flip);
427 422
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index e3132a2ce34d..2bc0dc985214 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3674,15 +3674,24 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3674 drm_mode_connector_attach_encoder(connector, encoder); 3674 drm_mode_connector_attach_encoder(connector, encoder);
3675 3675
3676 if (dcbe->type == DCB_OUTPUT_DP) { 3676 if (dcbe->type == DCB_OUTPUT_DP) {
3677 struct nv50_disp *disp = nv50_disp(encoder->dev);
3677 struct nvkm_i2c_aux *aux = 3678 struct nvkm_i2c_aux *aux =
3678 nvkm_i2c_aux_find(i2c, dcbe->i2c_index); 3679 nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3679 if (aux) { 3680 if (aux) {
3680 nv_encoder->i2c = &nv_connector->aux.ddc; 3681 if (disp->disp->oclass < GF110_DISP) {
3682 /* HW has no support for address-only
3683 * transactions, so we're required to
3684 * use custom I2C-over-AUX code.
3685 */
3686 nv_encoder->i2c = &aux->i2c;
3687 } else {
3688 nv_encoder->i2c = &nv_connector->aux.ddc;
3689 }
3681 nv_encoder->aux = aux; 3690 nv_encoder->aux = aux;
3682 } 3691 }
3683 3692
3684 /*TODO: Use DP Info Table to check for support. */ 3693 /*TODO: Use DP Info Table to check for support. */
3685 if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) { 3694 if (disp->disp->oclass >= GF110_DISP) {
3686 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, 3695 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
3687 nv_connector->base.base.id, 3696 nv_connector->base.base.id,
3688 &nv_encoder->dp.mstm); 3697 &nv_encoder->dp.mstm);
@@ -3931,6 +3940,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
3931 3940
3932 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, 3941 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
3933 asyh->clr.mask, asyh->set.mask); 3942 asyh->clr.mask, asyh->set.mask);
3943 if (crtc_state->active && !asyh->state.active)
3944 drm_crtc_vblank_off(crtc);
3934 3945
3935 if (asyh->clr.mask) { 3946 if (asyh->clr.mask) {
3936 nv50_head_flush_clr(head, asyh, atom->flush_disable); 3947 nv50_head_flush_clr(head, asyh, atom->flush_disable);
@@ -4016,11 +4027,13 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4016 nv50_head_flush_set(head, asyh); 4027 nv50_head_flush_set(head, asyh);
4017 interlock_core = 1; 4028 interlock_core = 1;
4018 } 4029 }
4019 }
4020 4030
4021 for_each_crtc_in_state(state, crtc, crtc_state, i) { 4031 if (asyh->state.active) {
4022 if (crtc->state->event) 4032 if (!crtc_state->active)
4023 drm_crtc_vblank_get(crtc); 4033 drm_crtc_vblank_on(crtc);
4034 if (asyh->state.event)
4035 drm_crtc_vblank_get(crtc);
4036 }
4024 } 4037 }
4025 4038
4026 /* Update plane(s). */ 4039 /* Update plane(s). */
@@ -4067,12 +4080,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
4067 if (crtc->state->event) { 4080 if (crtc->state->event) {
4068 unsigned long flags; 4081 unsigned long flags;
4069 /* Get correct count/ts if racing with vblank irq */ 4082 /* Get correct count/ts if racing with vblank irq */
4070 drm_accurate_vblank_count(crtc); 4083 if (crtc->state->active)
4084 drm_accurate_vblank_count(crtc);
4071 spin_lock_irqsave(&crtc->dev->event_lock, flags); 4085 spin_lock_irqsave(&crtc->dev->event_lock, flags);
4072 drm_crtc_send_vblank_event(crtc, crtc->state->event); 4086 drm_crtc_send_vblank_event(crtc, crtc->state->event);
4073 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 4087 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4074 crtc->state->event = NULL; 4088 crtc->state->event = NULL;
4075 drm_crtc_vblank_put(crtc); 4089 if (crtc->state->active)
4090 drm_crtc_vblank_put(crtc);
4076 } 4091 }
4077 } 4092 }
4078 4093
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index c7c84d34d97e..88582af8bd89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
267 /* Create output path objects for each VBIOS display path. */ 267 /* Create output path objects for each VBIOS display path. */
268 i = -1; 268 i = -1;
269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { 269 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
270 if (ver < 0x40) /* No support for chipsets prior to NV50. */
271 break;
270 if (dcbE.type == DCB_OUTPUT_UNUSED) 272 if (dcbE.type == DCB_OUTPUT_UNUSED)
271 continue; 273 continue;
272 if (dcbE.type == DCB_OUTPUT_EOL) 274 if (dcbE.type == DCB_OUTPUT_EOL)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index a24312fb0228..a1e8bf48b778 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -22,6 +22,7 @@ struct nvkm_ior {
22 unsigned proto_evo:4; 22 unsigned proto_evo:4;
23 enum nvkm_ior_proto { 23 enum nvkm_ior_proto {
24 CRT, 24 CRT,
25 TV,
25 TMDS, 26 TMDS,
26 LVDS, 27 LVDS,
27 DP, 28 DP,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 19c635663399..6ea19466f436 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -22,7 +22,7 @@ struct nv50_disp {
22 u8 type[3]; 22 u8 type[3];
23 } pior; 23 } pior;
24 24
25 struct nv50_disp_chan *chan[17]; 25 struct nv50_disp_chan *chan[21];
26}; 26};
27 27
28void nv50_disp_super_1(struct nv50_disp *); 28void nv50_disp_super_1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 85aff85394ac..be9e7f8c3b23 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -62,6 +62,7 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
62 case 0: 62 case 0:
63 switch (outp->info.type) { 63 switch (outp->info.type) {
64 case DCB_OUTPUT_ANALOG: *type = DAC; return CRT; 64 case DCB_OUTPUT_ANALOG: *type = DAC; return CRT;
65 case DCB_OUTPUT_TV : *type = DAC; return TV;
65 case DCB_OUTPUT_TMDS : *type = SOR; return TMDS; 66 case DCB_OUTPUT_TMDS : *type = SOR; return TMDS;
66 case DCB_OUTPUT_LVDS : *type = SOR; return LVDS; 67 case DCB_OUTPUT_LVDS : *type = SOR; return LVDS;
67 case DCB_OUTPUT_DP : *type = SOR; return DP; 68 case DCB_OUTPUT_DP : *type = SOR; return DP;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index c794b2c2d21e..6d8f21290aa2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
129 129
130 if (bar->bar[0].mem) { 130 if (bar->bar[0].mem) {
131 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; 131 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
132 nvkm_wr32(device, 0x001714, 0xc0000000 | addr); 132 nvkm_wr32(device, 0x001714, 0x80000000 | addr);
133 } 133 }
134 134
135 return 0; 135 return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
index 48f01e40b8fc..b768e66a472b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
@@ -25,6 +25,7 @@ nvkm-y += nvkm/subdev/i2c/bit.o
25 25
26nvkm-y += nvkm/subdev/i2c/aux.o 26nvkm-y += nvkm/subdev/i2c/aux.o
27nvkm-y += nvkm/subdev/i2c/auxg94.o 27nvkm-y += nvkm/subdev/i2c/auxg94.o
28nvkm-y += nvkm/subdev/i2c/auxgf119.o
28nvkm-y += nvkm/subdev/i2c/auxgm200.o 29nvkm-y += nvkm/subdev/i2c/auxgm200.o
29 30
30nvkm-y += nvkm/subdev/i2c/anx9805.o 31nvkm-y += nvkm/subdev/i2c/anx9805.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index d172e42dd228..4c1f547da463 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -117,6 +117,10 @@ int
117nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, 117nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
118 u32 addr, u8 *data, u8 *size) 118 u32 addr, u8 *data, u8 *size)
119{ 119{
120 if (!*size && !aux->func->address_only) {
121 AUX_ERR(aux, "address-only transaction dropped");
122 return -ENOSYS;
123 }
120 return aux->func->xfer(aux, retry, type, addr, data, size); 124 return aux->func->xfer(aux, retry, type, addr, data, size);
121} 125}
122 126
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 27a4a39c87f0..9587ab456d9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -3,6 +3,7 @@
3#include "pad.h" 3#include "pad.h"
4 4
5struct nvkm_i2c_aux_func { 5struct nvkm_i2c_aux_func {
6 bool address_only;
6 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, 7 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
7 u32 addr, u8 *data, u8 *size); 8 u32 addr, u8 *data, u8 *size);
8 int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, 9 int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
@@ -17,7 +18,12 @@ void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
17int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, 18int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
18 u32 addr, u8 *data, u8 *size); 19 u32 addr, u8 *data, u8 *size);
19 20
21int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
22 int, u8, struct nvkm_i2c_aux **);
23
20int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 24int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
25int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *);
26int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
21int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 27int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
22 28
23#define AUX_MSG(b,l,f,a...) do { \ 29#define AUX_MSG(b,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index ab8cb196c34e..c8ab1b5741a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -72,7 +72,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
72 return 0; 72 return 0;
73} 73}
74 74
75static int 75int
76g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, 76g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
77 u8 type, u32 addr, u8 *data, u8 *size) 77 u8 type, u32 addr, u8 *data, u8 *size)
78{ 78{
@@ -105,9 +105,9 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
105 } 105 }
106 106
107 ctrl = nvkm_rd32(device, 0x00e4e4 + base); 107 ctrl = nvkm_rd32(device, 0x00e4e4 + base);
108 ctrl &= ~0x0001f0ff; 108 ctrl &= ~0x0001f1ff;
109 ctrl |= type << 12; 109 ctrl |= type << 12;
110 ctrl |= *size - 1; 110 ctrl |= (*size ? (*size - 1) : 0x00000100);
111 nvkm_wr32(device, 0x00e4e0 + base, addr); 111 nvkm_wr32(device, 0x00e4e0 + base, addr);
112 112
113 /* (maybe) retry transaction a number of times on failure... */ 113 /* (maybe) retry transaction a number of times on failure... */
@@ -160,14 +160,10 @@ out:
160 return ret < 0 ? ret : (stat & 0x000f0000) >> 16; 160 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
161} 161}
162 162
163static const struct nvkm_i2c_aux_func
164g94_i2c_aux_func = {
165 .xfer = g94_i2c_aux_xfer,
166};
167
168int 163int
169g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, 164g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func,
170 struct nvkm_i2c_aux **paux) 165 struct nvkm_i2c_pad *pad, int index, u8 drive,
166 struct nvkm_i2c_aux **paux)
171{ 167{
172 struct g94_i2c_aux *aux; 168 struct g94_i2c_aux *aux;
173 169
@@ -175,8 +171,20 @@ g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
175 return -ENOMEM; 171 return -ENOMEM;
176 *paux = &aux->base; 172 *paux = &aux->base;
177 173
178 nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base); 174 nvkm_i2c_aux_ctor(func, pad, index, &aux->base);
179 aux->ch = drive; 175 aux->ch = drive;
180 aux->base.intr = 1 << aux->ch; 176 aux->base.intr = 1 << aux->ch;
181 return 0; 177 return 0;
182} 178}
179
180static const struct nvkm_i2c_aux_func
181g94_i2c_aux = {
182 .xfer = g94_i2c_aux_xfer,
183};
184
185int
186g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
187 struct nvkm_i2c_aux **paux)
188{
189 return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux);
190}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
new file mode 100644
index 000000000000..dab40cd8fe3a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
@@ -0,0 +1,35 @@
1/*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "aux.h"
23
24static const struct nvkm_i2c_aux_func
25gf119_i2c_aux = {
26 .address_only = true,
27 .xfer = g94_i2c_aux_xfer,
28};
29
30int
31gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive,
32 struct nvkm_i2c_aux **paux)
33{
34 return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux);
35}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index ee091fa79628..7ef60895f43a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -105,9 +105,9 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
105 } 105 }
106 106
107 ctrl = nvkm_rd32(device, 0x00d954 + base); 107 ctrl = nvkm_rd32(device, 0x00d954 + base);
108 ctrl &= ~0x0001f0ff; 108 ctrl &= ~0x0001f1ff;
109 ctrl |= type << 12; 109 ctrl |= type << 12;
110 ctrl |= *size - 1; 110 ctrl |= (*size ? (*size - 1) : 0x00000100);
111 nvkm_wr32(device, 0x00d950 + base, addr); 111 nvkm_wr32(device, 0x00d950 + base, addr);
112 112
113 /* (maybe) retry transaction a number of times on failure... */ 113 /* (maybe) retry transaction a number of times on failure... */
@@ -162,6 +162,7 @@ out:
162 162
163static const struct nvkm_i2c_aux_func 163static const struct nvkm_i2c_aux_func
164gm200_i2c_aux_func = { 164gm200_i2c_aux_func = {
165 .address_only = true,
165 .xfer = gm200_i2c_aux_xfer, 166 .xfer = gm200_i2c_aux_xfer,
166}; 167};
167 168
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
index d53212f1aa52..3bc4d0310076 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
@@ -28,7 +28,7 @@
28static const struct nvkm_i2c_pad_func 28static const struct nvkm_i2c_pad_func
29gf119_i2c_pad_s_func = { 29gf119_i2c_pad_s_func = {
30 .bus_new_4 = gf119_i2c_bus_new, 30 .bus_new_4 = gf119_i2c_bus_new,
31 .aux_new_6 = g94_i2c_aux_new, 31 .aux_new_6 = gf119_i2c_aux_new,
32 .mode = g94_i2c_pad_mode, 32 .mode = g94_i2c_pad_mode,
33}; 33};
34 34
@@ -41,7 +41,7 @@ gf119_i2c_pad_s_new(struct nvkm_i2c *i2c, int id, struct nvkm_i2c_pad **ppad)
41static const struct nvkm_i2c_pad_func 41static const struct nvkm_i2c_pad_func
42gf119_i2c_pad_x_func = { 42gf119_i2c_pad_x_func = {
43 .bus_new_4 = gf119_i2c_bus_new, 43 .bus_new_4 = gf119_i2c_bus_new,
44 .aux_new_6 = g94_i2c_aux_new, 44 .aux_new_6 = gf119_i2c_aux_new,
45}; 45};
46 46
47int 47int
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 699fe7f9b8bf..a2ab6dcdf4a2 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev)
184 if (rdev->kfd) { 184 if (rdev->kfd) {
185 struct kgd2kfd_shared_resources gpu_resources = { 185 struct kgd2kfd_shared_resources gpu_resources = {
186 .compute_vmid_bitmap = 0xFF00, 186 .compute_vmid_bitmap = 0xFF00,
187 .num_mec = 1,
188 .num_pipe_per_mec = 4, 187 .num_pipe_per_mec = 4,
189 .num_queue_per_pipe = 8 188 .num_queue_per_pipe = 8
190 }; 189 };
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 50c41c0a50ef..dcc539ba85d6 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -5,6 +5,10 @@ config DRM_ROCKCHIP
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_PANEL 6 select DRM_PANEL
7 select VIDEOMODE_HELPERS 7 select VIDEOMODE_HELPERS
8 select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
9 select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
10 select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
11 select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
8 help 12 help
9 Choose this option if you have a Rockchip soc chipset. 13 Choose this option if you have a Rockchip soc chipset.
10 This driver provides kernel mode setting and buffer 14 This driver provides kernel mode setting and buffer
@@ -12,10 +16,10 @@ config DRM_ROCKCHIP
12 2D or 3D acceleration; acceleration is performed by other 16 2D or 3D acceleration; acceleration is performed by other
13 IP found on the SoC. 17 IP found on the SoC.
14 18
19if DRM_ROCKCHIP
20
15config ROCKCHIP_ANALOGIX_DP 21config ROCKCHIP_ANALOGIX_DP
16 bool "Rockchip specific extensions for Analogix DP driver" 22 bool "Rockchip specific extensions for Analogix DP driver"
17 depends on DRM_ROCKCHIP
18 select DRM_ANALOGIX_DP
19 help 23 help
20 This selects support for Rockchip SoC specific extensions 24 This selects support for Rockchip SoC specific extensions
21 for the Analogix Core DP driver. If you want to enable DP 25 for the Analogix Core DP driver. If you want to enable DP
@@ -23,9 +27,7 @@ config ROCKCHIP_ANALOGIX_DP
23 27
24config ROCKCHIP_CDN_DP 28config ROCKCHIP_CDN_DP
25 bool "Rockchip cdn DP" 29 bool "Rockchip cdn DP"
26 depends on DRM_ROCKCHIP 30 depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m)
27 depends on EXTCON
28 select SND_SOC_HDMI_CODEC if SND_SOC
29 help 31 help
30 This selects support for Rockchip SoC specific extensions 32 This selects support for Rockchip SoC specific extensions
31 for the cdn DP driver. If you want to enable Dp on 33 for the cdn DP driver. If you want to enable Dp on
@@ -34,8 +36,6 @@ config ROCKCHIP_CDN_DP
34 36
35config ROCKCHIP_DW_HDMI 37config ROCKCHIP_DW_HDMI
36 bool "Rockchip specific extensions for Synopsys DW HDMI" 38 bool "Rockchip specific extensions for Synopsys DW HDMI"
37 depends on DRM_ROCKCHIP
38 select DRM_DW_HDMI
39 help 39 help
40 This selects support for Rockchip SoC specific extensions 40 This selects support for Rockchip SoC specific extensions
41 for the Synopsys DesignWare HDMI driver. If you want to 41 for the Synopsys DesignWare HDMI driver. If you want to
@@ -44,8 +44,6 @@ config ROCKCHIP_DW_HDMI
44 44
45config ROCKCHIP_DW_MIPI_DSI 45config ROCKCHIP_DW_MIPI_DSI
46 bool "Rockchip specific extensions for Synopsys DW MIPI DSI" 46 bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
47 depends on DRM_ROCKCHIP
48 select DRM_MIPI_DSI
49 help 47 help
50 This selects support for Rockchip SoC specific extensions 48 This selects support for Rockchip SoC specific extensions
51 for the Synopsys DesignWare HDMI driver. If you want to 49 for the Synopsys DesignWare HDMI driver. If you want to
@@ -54,8 +52,9 @@ config ROCKCHIP_DW_MIPI_DSI
54 52
55config ROCKCHIP_INNO_HDMI 53config ROCKCHIP_INNO_HDMI
56 bool "Rockchip specific extensions for Innosilicon HDMI" 54 bool "Rockchip specific extensions for Innosilicon HDMI"
57 depends on DRM_ROCKCHIP
58 help 55 help
59 This selects support for Rockchip SoC specific extensions 56 This selects support for Rockchip SoC specific extensions
60 for the Innosilicon HDMI driver. If you want to enable 57 for the Innosilicon HDMI driver. If you want to enable
61 HDMI on RK3036 based SoC, you should select this option. 58 HDMI on RK3036 based SoC, you should select this option.
59
60endif
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index c6b1b7f3a2a3..c16bc0a7115b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm)
275static int rockchip_drm_sys_suspend(struct device *dev) 275static int rockchip_drm_sys_suspend(struct device *dev)
276{ 276{
277 struct drm_device *drm = dev_get_drvdata(dev); 277 struct drm_device *drm = dev_get_drvdata(dev);
278 struct rockchip_drm_private *priv = drm->dev_private; 278 struct rockchip_drm_private *priv;
279
280 if (!drm)
281 return 0;
279 282
280 drm_kms_helper_poll_disable(drm); 283 drm_kms_helper_poll_disable(drm);
281 rockchip_drm_fb_suspend(drm); 284 rockchip_drm_fb_suspend(drm);
282 285
286 priv = drm->dev_private;
283 priv->state = drm_atomic_helper_suspend(drm); 287 priv->state = drm_atomic_helper_suspend(drm);
284 if (IS_ERR(priv->state)) { 288 if (IS_ERR(priv->state)) {
285 rockchip_drm_fb_resume(drm); 289 rockchip_drm_fb_resume(drm);
@@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev)
293static int rockchip_drm_sys_resume(struct device *dev) 297static int rockchip_drm_sys_resume(struct device *dev)
294{ 298{
295 struct drm_device *drm = dev_get_drvdata(dev); 299 struct drm_device *drm = dev_get_drvdata(dev);
296 struct rockchip_drm_private *priv = drm->dev_private; 300 struct rockchip_drm_private *priv;
301
302 if (!drm)
303 return 0;
297 304
305 priv = drm->dev_private;
298 drm_atomic_helper_resume(drm, priv->state); 306 drm_atomic_helper_resume(drm, priv->state);
299 rockchip_drm_fb_resume(drm); 307 rockchip_drm_fb_resume(drm);
300 drm_kms_helper_poll_enable(drm); 308 drm_kms_helper_poll_enable(drm);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 5d450332c2fd..2900f1410d95 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
500static int vop_enable(struct drm_crtc *crtc) 500static int vop_enable(struct drm_crtc *crtc)
501{ 501{
502 struct vop *vop = to_vop(crtc); 502 struct vop *vop = to_vop(crtc);
503 int ret; 503 int ret, i;
504 504
505 ret = pm_runtime_get_sync(vop->dev); 505 ret = pm_runtime_get_sync(vop->dev);
506 if (ret < 0) { 506 if (ret < 0) {
@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
533 } 533 }
534 534
535 memcpy(vop->regs, vop->regsbak, vop->len); 535 memcpy(vop->regs, vop->regsbak, vop->len);
536 /*
537 * We need to make sure that all windows are disabled before we
538 * enable the crtc. Otherwise we might try to scan from a destroyed
539 * buffer later.
540 */
541 for (i = 0; i < vop->data->win_size; i++) {
542 struct vop_win *vop_win = &vop->win[i];
543 const struct vop_win_data *win = vop_win->data;
544
545 spin_lock(&vop->reg_lock);
546 VOP_WIN_SET(vop, win, enable, 0);
547 spin_unlock(&vop->reg_lock);
548 }
549
536 vop_cfg_done(vop); 550 vop_cfg_done(vop);
537 551
538 /* 552 /*
@@ -566,28 +580,11 @@ err_put_pm_runtime:
566static void vop_crtc_disable(struct drm_crtc *crtc) 580static void vop_crtc_disable(struct drm_crtc *crtc)
567{ 581{
568 struct vop *vop = to_vop(crtc); 582 struct vop *vop = to_vop(crtc);
569 int i;
570 583
571 WARN_ON(vop->event); 584 WARN_ON(vop->event);
572 585
573 rockchip_drm_psr_deactivate(&vop->crtc); 586 rockchip_drm_psr_deactivate(&vop->crtc);
574 587
575 /*
576 * We need to make sure that all windows are disabled before we
577 * disable that crtc. Otherwise we might try to scan from a destroyed
578 * buffer later.
579 */
580 for (i = 0; i < vop->data->win_size; i++) {
581 struct vop_win *vop_win = &vop->win[i];
582 const struct vop_win_data *win = vop_win->data;
583
584 spin_lock(&vop->reg_lock);
585 VOP_WIN_SET(vop, win, enable, 0);
586 spin_unlock(&vop->reg_lock);
587 }
588
589 vop_cfg_done(vop);
590
591 drm_crtc_vblank_off(crtc); 588 drm_crtc_vblank_off(crtc);
592 589
593 /* 590 /*
@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
682 * Src.x1 can be odd when do clip, but yuv plane start point 679 * Src.x1 can be odd when do clip, but yuv plane start point
683 * need align with 2 pixel. 680 * need align with 2 pixel.
684 */ 681 */
685 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) 682 if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
683 DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
686 return -EINVAL; 684 return -EINVAL;
685 }
687 686
688 return 0; 687 return 0;
689} 688}
@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
764 spin_lock(&vop->reg_lock); 763 spin_lock(&vop->reg_lock);
765 764
766 VOP_WIN_SET(vop, win, format, format); 765 VOP_WIN_SET(vop, win, format, format);
767 VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); 766 VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
768 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); 767 VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
769 if (is_yuv_support(fb->format->format)) { 768 if (is_yuv_support(fb->format->format)) {
770 int hsub = drm_format_horz_chroma_subsampling(fb->format->format); 769 int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
778 offset += (src->y1 >> 16) * fb->pitches[1] / vsub; 777 offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
779 778
780 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; 779 dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
781 VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2); 780 VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
782 VOP_WIN_SET(vop, win, uv_mst, dma_addr); 781 VOP_WIN_SET(vop, win, uv_mst, dma_addr);
783 } 782 }
784 783
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 9979fd0c2282..27eefbfcf3d0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
282 282
283 act_height = (src_h + vskiplines - 1) / vskiplines; 283 act_height = (src_h + vskiplines - 1) / vskiplines;
284 284
285 if (act_height == dst_h)
286 return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
287
285 return GET_SCL_FT_BILI_DN(act_height, dst_h); 288 return GET_SCL_FT_BILI_DN(act_height, dst_h);
286} 289}
287 290
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 2c4817fb0890..8fe5b184b4e8 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -7,7 +7,6 @@ config DRM_STM
7 select DRM_PANEL 7 select DRM_PANEL
8 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA 9 select FB_PROVIDE_GET_FB_UNMAPPED_AREA
10 default y
11 10
12 help 11 help
13 Enable support for the on-chip display controller on 12 Enable support for the on-chip display controller on
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index abc7d8fe06b4..a45a627283a1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -25,12 +25,20 @@
25#include "sun4i_framebuffer.h" 25#include "sun4i_framebuffer.h"
26#include "sun4i_tcon.h" 26#include "sun4i_tcon.h"
27 27
28static void sun4i_drv_lastclose(struct drm_device *dev)
29{
30 struct sun4i_drv *drv = dev->dev_private;
31
32 drm_fbdev_cma_restore_mode(drv->fbdev);
33}
34
28DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops); 35DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
29 36
30static struct drm_driver sun4i_drv_driver = { 37static struct drm_driver sun4i_drv_driver = {
31 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, 38 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
32 39
33 /* Generic Operations */ 40 /* Generic Operations */
41 .lastclose = sun4i_drv_lastclose,
34 .fops = &sun4i_drv_fops, 42 .fops = &sun4i_drv_fops,
35 .name = "sun4i-drm", 43 .name = "sun4i-drm",
36 .desc = "Allwinner sun4i Display Engine", 44 .desc = "Allwinner sun4i Display Engine",
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 403bbd5f99a9..a12cc7ea99b6 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc)
520 SCALER_DISPSTATX_EMPTY); 520 SCALER_DISPSTATX_EMPTY);
521} 521}
522 522
523static void vc4_crtc_update_dlist(struct drm_crtc *crtc)
524{
525 struct drm_device *dev = crtc->dev;
526 struct vc4_dev *vc4 = to_vc4_dev(dev);
527 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
528 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
529
530 if (crtc->state->event) {
531 unsigned long flags;
532
533 crtc->state->event->pipe = drm_crtc_index(crtc);
534
535 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
536
537 spin_lock_irqsave(&dev->event_lock, flags);
538 vc4_crtc->event = crtc->state->event;
539 crtc->state->event = NULL;
540
541 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
542 vc4_state->mm.start);
543
544 spin_unlock_irqrestore(&dev->event_lock, flags);
545 } else {
546 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
547 vc4_state->mm.start);
548 }
549}
550
523static void vc4_crtc_enable(struct drm_crtc *crtc) 551static void vc4_crtc_enable(struct drm_crtc *crtc)
524{ 552{
525 struct drm_device *dev = crtc->dev; 553 struct drm_device *dev = crtc->dev;
@@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
530 558
531 require_hvs_enabled(dev); 559 require_hvs_enabled(dev);
532 560
561 /* Enable vblank irq handling before crtc is started otherwise
562 * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist().
563 */
564 drm_crtc_vblank_on(crtc);
565 vc4_crtc_update_dlist(crtc);
566
533 /* Turn on the scaler, which will wait for vstart to start 567 /* Turn on the scaler, which will wait for vstart to start
534 * compositing. 568 * compositing.
535 */ 569 */
@@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc)
541 /* Turn on the pixel valve, which will emit the vstart signal. */ 575 /* Turn on the pixel valve, which will emit the vstart signal. */
542 CRTC_WRITE(PV_V_CONTROL, 576 CRTC_WRITE(PV_V_CONTROL,
543 CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); 577 CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN);
544
545 /* Enable vblank irq handling after crtc is started. */
546 drm_crtc_vblank_on(crtc);
547} 578}
548 579
549static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, 580static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
598{ 629{
599 struct drm_device *dev = crtc->dev; 630 struct drm_device *dev = crtc->dev;
600 struct vc4_dev *vc4 = to_vc4_dev(dev); 631 struct vc4_dev *vc4 = to_vc4_dev(dev);
601 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
602 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 632 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
603 struct drm_plane *plane; 633 struct drm_plane *plane;
604 bool debug_dump_regs = false; 634 bool debug_dump_regs = false;
@@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
620 650
621 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); 651 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
622 652
623 if (crtc->state->event) { 653 /* Only update DISPLIST if the CRTC was already running and is not
624 unsigned long flags; 654 * being disabled.
625 655 * vc4_crtc_enable() takes care of updating the dlist just after
626 crtc->state->event->pipe = drm_crtc_index(crtc); 656 * re-enabling VBLANK interrupts and before enabling the engine.
627 657 * If the CRTC is being disabled, there's no point in updating this
628 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 658 * information.
629 659 */
630 spin_lock_irqsave(&dev->event_lock, flags); 660 if (crtc->state->active && old_state->active)
631 vc4_crtc->event = crtc->state->event; 661 vc4_crtc_update_dlist(crtc);
632 crtc->state->event = NULL;
633
634 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
635 vc4_state->mm.start);
636
637 spin_unlock_irqrestore(&dev->event_lock, flags);
638 } else {
639 HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
640 vc4_state->mm.start);
641 }
642 662
643 if (debug_dump_regs) { 663 if (debug_dump_regs) {
644 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); 664 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 35bf781e418e..c7056322211c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -30,49 +30,49 @@
30#include <drm/ttm/ttm_placement.h> 30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h> 31#include <drm/ttm/ttm_page_alloc.h>
32 32
33static struct ttm_place vram_placement_flags = { 33static const struct ttm_place vram_placement_flags = {
34 .fpfn = 0, 34 .fpfn = 0,
35 .lpfn = 0, 35 .lpfn = 0,
36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED 36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
37}; 37};
38 38
39static struct ttm_place vram_ne_placement_flags = { 39static const struct ttm_place vram_ne_placement_flags = {
40 .fpfn = 0, 40 .fpfn = 0,
41 .lpfn = 0, 41 .lpfn = 0,
42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
43}; 43};
44 44
45static struct ttm_place sys_placement_flags = { 45static const struct ttm_place sys_placement_flags = {
46 .fpfn = 0, 46 .fpfn = 0,
47 .lpfn = 0, 47 .lpfn = 0,
48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED 48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
49}; 49};
50 50
51static struct ttm_place sys_ne_placement_flags = { 51static const struct ttm_place sys_ne_placement_flags = {
52 .fpfn = 0, 52 .fpfn = 0,
53 .lpfn = 0, 53 .lpfn = 0,
54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
55}; 55};
56 56
57static struct ttm_place gmr_placement_flags = { 57static const struct ttm_place gmr_placement_flags = {
58 .fpfn = 0, 58 .fpfn = 0,
59 .lpfn = 0, 59 .lpfn = 0,
60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED 60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
61}; 61};
62 62
63static struct ttm_place gmr_ne_placement_flags = { 63static const struct ttm_place gmr_ne_placement_flags = {
64 .fpfn = 0, 64 .fpfn = 0,
65 .lpfn = 0, 65 .lpfn = 0,
66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
67}; 67};
68 68
69static struct ttm_place mob_placement_flags = { 69static const struct ttm_place mob_placement_flags = {
70 .fpfn = 0, 70 .fpfn = 0,
71 .lpfn = 0, 71 .lpfn = 0,
72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED 72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
73}; 73};
74 74
75static struct ttm_place mob_ne_placement_flags = { 75static const struct ttm_place mob_ne_placement_flags = {
76 .fpfn = 0, 76 .fpfn = 0,
77 .lpfn = 0, 77 .lpfn = 0,
78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
@@ -85,7 +85,7 @@ struct ttm_placement vmw_vram_placement = {
85 .busy_placement = &vram_placement_flags 85 .busy_placement = &vram_placement_flags
86}; 86};
87 87
88static struct ttm_place vram_gmr_placement_flags[] = { 88static const struct ttm_place vram_gmr_placement_flags[] = {
89 { 89 {
90 .fpfn = 0, 90 .fpfn = 0,
91 .lpfn = 0, 91 .lpfn = 0,
@@ -97,7 +97,7 @@ static struct ttm_place vram_gmr_placement_flags[] = {
97 } 97 }
98}; 98};
99 99
100static struct ttm_place gmr_vram_placement_flags[] = { 100static const struct ttm_place gmr_vram_placement_flags[] = {
101 { 101 {
102 .fpfn = 0, 102 .fpfn = 0,
103 .lpfn = 0, 103 .lpfn = 0,
@@ -116,7 +116,7 @@ struct ttm_placement vmw_vram_gmr_placement = {
116 .busy_placement = &gmr_placement_flags 116 .busy_placement = &gmr_placement_flags
117}; 117};
118 118
119static struct ttm_place vram_gmr_ne_placement_flags[] = { 119static const struct ttm_place vram_gmr_ne_placement_flags[] = {
120 { 120 {
121 .fpfn = 0, 121 .fpfn = 0,
122 .lpfn = 0, 122 .lpfn = 0,
@@ -165,7 +165,7 @@ struct ttm_placement vmw_sys_ne_placement = {
165 .busy_placement = &sys_ne_placement_flags 165 .busy_placement = &sys_ne_placement_flags
166}; 166};
167 167
168static struct ttm_place evictable_placement_flags[] = { 168static const struct ttm_place evictable_placement_flags[] = {
169 { 169 {
170 .fpfn = 0, 170 .fpfn = 0,
171 .lpfn = 0, 171 .lpfn = 0,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 99a7f4ab7d97..86178796de6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -779,8 +779,8 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
779 if (ret) 779 if (ret)
780 return ret; 780 return ret;
781 781
782 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, 782 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
783 &header->handle); 783 &header->handle);
784 if (!header->cb_header) { 784 if (!header->cb_header) {
785 ret = -ENOMEM; 785 ret = -ENOMEM;
786 goto out_no_cb_header; 786 goto out_no_cb_header;
@@ -790,7 +790,6 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
790 cb_hdr = header->cb_header; 790 cb_hdr = header->cb_header;
791 offset = header->node.start << PAGE_SHIFT; 791 offset = header->node.start << PAGE_SHIFT;
792 header->cmd = man->map + offset; 792 header->cmd = man->map + offset;
793 memset(cb_hdr, 0, sizeof(*cb_hdr));
794 if (man->using_mob) { 793 if (man->using_mob) {
795 cb_hdr->flags = SVGA_CB_FLAG_MOB; 794 cb_hdr->flags = SVGA_CB_FLAG_MOB;
796 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; 795 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
@@ -827,8 +826,8 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
827 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) 826 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
828 return -ENOMEM; 827 return -ENOMEM;
829 828
830 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, 829 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
831 &header->handle); 830 &header->handle);
832 if (!dheader) 831 if (!dheader)
833 return -ENOMEM; 832 return -ENOMEM;
834 833
@@ -837,7 +836,6 @@ static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
837 cb_hdr = &dheader->cb_header; 836 cb_hdr = &dheader->cb_header;
838 header->cb_header = cb_hdr; 837 header->cb_header = cb_hdr;
839 header->cmd = dheader->cmd; 838 header->cmd = dheader->cmd;
840 memset(dheader, 0, sizeof(*dheader));
841 cb_hdr->status = SVGA_CB_STATUS_NONE; 839 cb_hdr->status = SVGA_CB_STATUS_NONE;
842 cb_hdr->flags = SVGA_CB_FLAG_NONE; 840 cb_hdr->flags = SVGA_CB_FLAG_NONE;
843 cb_hdr->ptr.pa = (u64)header->handle + 841 cb_hdr->ptr.pa = (u64)header->handle +
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 1f013d45c9e9..36c7b6c839c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
205 int ret; 205 int ret;
206 206
207 cres = kzalloc(sizeof(*cres), GFP_KERNEL); 207 cres = kzalloc(sizeof(*cres), GFP_KERNEL);
208 if (unlikely(cres == NULL)) 208 if (unlikely(!cres))
209 return -ENOMEM; 209 return -ENOMEM;
210 210
211 cres->hash.key = user_key | (res_type << 24); 211 cres->hash.key = user_key | (res_type << 24);
@@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
291 int ret; 291 int ret;
292 292
293 man = kzalloc(sizeof(*man), GFP_KERNEL); 293 man = kzalloc(sizeof(*man), GFP_KERNEL);
294 if (man == NULL) 294 if (!man)
295 return ERR_PTR(-ENOMEM); 295 return ERR_PTR(-ENOMEM);
296 296
297 man->dev_priv = dev_priv; 297 man->dev_priv = dev_priv;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index bcc6d4136c87..4212b3e673bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -210,8 +210,8 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
210 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 210 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
211 uctx->cotables[i] = vmw_cotable_alloc(dev_priv, 211 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
212 &uctx->res, i); 212 &uctx->res, i);
213 if (unlikely(uctx->cotables[i] == NULL)) { 213 if (unlikely(IS_ERR(uctx->cotables[i]))) {
214 ret = -ENOMEM; 214 ret = PTR_ERR(uctx->cotables[i]);
215 goto out_cotables; 215 goto out_cotables;
216 } 216 }
217 } 217 }
@@ -777,7 +777,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
777 } 777 }
778 778
779 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 779 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
780 if (unlikely(ctx == NULL)) { 780 if (unlikely(!ctx)) {
781 ttm_mem_global_free(vmw_mem_glob(dev_priv), 781 ttm_mem_global_free(vmw_mem_glob(dev_priv),
782 vmw_user_context_size); 782 vmw_user_context_size);
783 ret = -ENOMEM; 783 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 6c026d75c180..d87861bbe971 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -584,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
584 return ERR_PTR(ret); 584 return ERR_PTR(ret);
585 585
586 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); 586 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
587 if (unlikely(vcotbl == NULL)) { 587 if (unlikely(!vcotbl)) {
588 ret = -ENOMEM; 588 ret = -ENOMEM;
589 goto out_no_alloc; 589 goto out_no_alloc;
590 } 590 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4a641555b960..4436d53ae16c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -227,7 +227,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
227 DRM_AUTH | DRM_RENDER_ALLOW), 227 DRM_AUTH | DRM_RENDER_ALLOW),
228}; 228};
229 229
230static struct pci_device_id vmw_pci_id_list[] = { 230static const struct pci_device_id vmw_pci_id_list[] = {
231 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, 231 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
232 {0, 0, 0} 232 {0, 0, 0}
233}; 233};
@@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
630 char host_log[100] = {0}; 630 char host_log[100] = {0};
631 631
632 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 632 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
633 if (unlikely(dev_priv == NULL)) { 633 if (unlikely(!dev_priv)) {
634 DRM_ERROR("Failed allocating a device private struct.\n"); 634 DRM_ERROR("Failed allocating a device private struct.\n");
635 return -ENOMEM; 635 return -ENOMEM;
636 } 636 }
@@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1035 int ret = -ENOMEM; 1035 int ret = -ENOMEM;
1036 1036
1037 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1037 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
1038 if (unlikely(vmw_fp == NULL)) 1038 if (unlikely(!vmw_fp))
1039 return ret; 1039 return ret;
1040 1040
1041 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); 1041 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
@@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev,
1196 struct vmw_master *vmaster; 1196 struct vmw_master *vmaster;
1197 1197
1198 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 1198 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
1199 if (unlikely(vmaster == NULL)) 1199 if (unlikely(!vmaster))
1200 return -ENOMEM; 1200 return -ENOMEM;
1201 1201
1202 vmw_master_init(vmaster); 1202 vmw_master_init(vmaster);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c7b53d987f06..2cfb3c93f42a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
264 } 264 }
265 265
266 node = kzalloc(sizeof(*node), GFP_KERNEL); 266 node = kzalloc(sizeof(*node), GFP_KERNEL);
267 if (unlikely(node == NULL)) { 267 if (unlikely(!node)) {
268 DRM_ERROR("Failed to allocate a resource validation " 268 DRM_ERROR("Failed to allocate a resource validation "
269 "entry.\n"); 269 "entry.\n");
270 return -ENOMEM; 270 return -ENOMEM;
@@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
452 struct vmw_resource_relocation *rel; 452 struct vmw_resource_relocation *rel;
453 453
454 rel = kmalloc(sizeof(*rel), GFP_KERNEL); 454 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
455 if (unlikely(rel == NULL)) { 455 if (unlikely(!rel)) {
456 DRM_ERROR("Failed to allocate a resource relocation.\n"); 456 DRM_ERROR("Failed to allocate a resource relocation.\n");
457 return -ENOMEM; 457 return -ENOMEM;
458 } 458 }
@@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
519 struct vmw_sw_context *sw_context, 519 struct vmw_sw_context *sw_context,
520 SVGA3dCmdHeader *header) 520 SVGA3dCmdHeader *header)
521{ 521{
522 return capable(CAP_SYS_ADMIN) ? : -EINVAL; 522 return -EINVAL;
523} 523}
524 524
525static int vmw_cmd_ok(struct vmw_private *dev_priv, 525static int vmw_cmd_ok(struct vmw_private *dev_priv,
@@ -2584,7 +2584,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2584 2584
2585/** 2585/**
2586 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an 2586 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2587 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. 2587 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2588 * 2588 *
2589 * @dev_priv: Pointer to a device private struct. 2589 * @dev_priv: Pointer to a device private struct.
2590 * @sw_context: The software context being used for this batch. 2590 * @sw_context: The software context being used for this batch.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6b2708b4eafe..b8bc5bc7de7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
284{ 284{
285 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); 285 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
286 286
287 if (unlikely(fman == NULL)) 287 if (unlikely(!fman))
288 return NULL; 288 return NULL;
289 289
290 fman->dev_priv = dev_priv; 290 fman->dev_priv = dev_priv;
@@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
541 int ret; 541 int ret;
542 542
543 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 543 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
544 if (unlikely(fence == NULL)) 544 if (unlikely(!fence))
545 return -ENOMEM; 545 return -ENOMEM;
546 546
547 ret = vmw_fence_obj_init(fman, fence, seqno, 547 ret = vmw_fence_obj_init(fman, fence, seqno,
@@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
606 return ret; 606 return ret;
607 607
608 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); 608 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
609 if (unlikely(ufence == NULL)) { 609 if (unlikely(!ufence)) {
610 ret = -ENOMEM; 610 ret = -ENOMEM;
611 goto out_no_object; 611 goto out_no_object;
612 } 612 }
@@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
966 struct vmw_fence_manager *fman = fman_from_fence(fence); 966 struct vmw_fence_manager *fman = fman_from_fence(fence);
967 967
968 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); 968 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
969 if (unlikely(eaction == NULL)) 969 if (unlikely(!eaction))
970 return -ENOMEM; 970 return -ENOMEM;
971 971
972 eaction->event = event; 972 eaction->event = event;
@@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
1002 int ret; 1002 int ret;
1003 1003
1004 event = kzalloc(sizeof(*event), GFP_KERNEL); 1004 event = kzalloc(sizeof(*event), GFP_KERNEL);
1005 if (unlikely(event == NULL)) { 1005 if (unlikely(!event)) {
1006 DRM_ERROR("Failed to allocate an event.\n"); 1006 DRM_ERROR("Failed to allocate an event.\n");
1007 ret = -ENOMEM; 1007 ret = -ENOMEM;
1008 goto out_no_space; 1008 goto out_no_space;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index c1900f4390a4..d2b03d4a3c86 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
121 struct vmwgfx_gmrid_man *gman = 121 struct vmwgfx_gmrid_man *gman =
122 kzalloc(sizeof(*gman), GFP_KERNEL); 122 kzalloc(sizeof(*gman), GFP_KERNEL);
123 123
124 if (unlikely(gman == NULL)) 124 if (unlikely(!gman))
125 return -ENOMEM; 125 return -ENOMEM;
126 126
127 spin_lock_init(&gman->lock); 127 spin_lock_init(&gman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3d94ea67a825..61e06f0e8cd3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -384,6 +384,12 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
384 384
385 hotspot_x = du->hotspot_x; 385 hotspot_x = du->hotspot_x;
386 hotspot_y = du->hotspot_y; 386 hotspot_y = du->hotspot_y;
387
388 if (plane->fb) {
389 hotspot_x += plane->fb->hot_x;
390 hotspot_y += plane->fb->hot_y;
391 }
392
387 du->cursor_surface = vps->surf; 393 du->cursor_surface = vps->surf;
388 du->cursor_dmabuf = vps->dmabuf; 394 du->cursor_dmabuf = vps->dmabuf;
389 395
@@ -411,6 +417,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
411 vmw_cursor_update_position(dev_priv, true, 417 vmw_cursor_update_position(dev_priv, true,
412 du->cursor_x + hotspot_x, 418 du->cursor_x + hotspot_x,
413 du->cursor_y + hotspot_y); 419 du->cursor_y + hotspot_y);
420
421 du->core_hotspot_x = hotspot_x - du->hotspot_x;
422 du->core_hotspot_y = hotspot_y - du->hotspot_y;
414 } else { 423 } else {
415 DRM_ERROR("Failed to update cursor image\n"); 424 DRM_ERROR("Failed to update cursor image\n");
416 } 425 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 941bcfd131ff..b17f08fc50d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
320 320
321 if (dev_priv->has_dx) { 321 if (dev_priv->has_dx) {
322 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); 322 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
323 if (*otables == NULL) 323 if (!(*otables))
324 return -ENOMEM; 324 return -ENOMEM;
325 325
326 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); 326 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
327 } else { 327 } else {
328 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), 328 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
329 GFP_KERNEL); 329 GFP_KERNEL);
330 if (*otables == NULL) 330 if (!(*otables))
331 return -ENOMEM; 331 return -ENOMEM;
332 332
333 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); 333 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
@@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
407{ 407{
408 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); 408 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
409 409
410 if (unlikely(mob == NULL)) 410 if (unlikely(!mob))
411 return NULL; 411 return NULL;
412 412
413 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); 413 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 6063c9636d4a..97000996b8dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
244 244
245 reply_len = ebx; 245 reply_len = ebx;
246 reply = kzalloc(reply_len + 1, GFP_KERNEL); 246 reply = kzalloc(reply_len + 1, GFP_KERNEL);
247 if (reply == NULL) { 247 if (!reply) {
248 DRM_ERROR("Cannot allocate memory for reply\n"); 248 DRM_ERROR("Cannot allocate memory for reply\n");
249 return -ENOMEM; 249 return -ENOMEM;
250 } 250 }
@@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
340 340
341 msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; 341 msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
342 msg = kzalloc(msg_len, GFP_KERNEL); 342 msg = kzalloc(msg_len, GFP_KERNEL);
343 if (msg == NULL) { 343 if (!msg) {
344 DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); 344 DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
345 return -ENOMEM; 345 return -ENOMEM;
346 } 346 }
@@ -400,7 +400,7 @@ int vmw_host_log(const char *log)
400 400
401 msg_len = strlen(log) + strlen("log ") + 1; 401 msg_len = strlen(log) + strlen("log ") + 1;
402 msg = kzalloc(msg_len, GFP_KERNEL); 402 msg = kzalloc(msg_len, GFP_KERNEL);
403 if (msg == NULL) { 403 if (!msg) {
404 DRM_ERROR("Cannot allocate memory for log message\n"); 404 DRM_ERROR("Cannot allocate memory for log message\n");
405 return -ENOMEM; 405 return -ENOMEM;
406 } 406 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7d591f653dfa..a96f90f017d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
446 int ret; 446 int ret;
447 447
448 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); 448 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
449 if (unlikely(user_bo == NULL)) { 449 if (unlikely(!user_bo)) {
450 DRM_ERROR("Failed to allocate a buffer.\n"); 450 DRM_ERROR("Failed to allocate a buffer.\n");
451 return -ENOMEM; 451 return -ENOMEM;
452 } 452 }
@@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
836 } 836 }
837 837
838 backup = kzalloc(sizeof(*backup), GFP_KERNEL); 838 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
839 if (unlikely(backup == NULL)) 839 if (unlikely(!backup))
840 return -ENOMEM; 840 return -ENOMEM;
841 841
842 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size, 842 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 68f135c5b0d8..9b832f136813 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -751,7 +751,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
751 } 751 }
752 752
753 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 753 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
754 if (unlikely(ushader == NULL)) { 754 if (unlikely(!ushader)) {
755 ttm_mem_global_free(vmw_mem_glob(dev_priv), 755 ttm_mem_global_free(vmw_mem_glob(dev_priv),
756 vmw_user_shader_size); 756 vmw_user_shader_size);
757 ret = -ENOMEM; 757 ret = -ENOMEM;
@@ -821,7 +821,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
821 } 821 }
822 822
823 shader = kzalloc(sizeof(*shader), GFP_KERNEL); 823 shader = kzalloc(sizeof(*shader), GFP_KERNEL);
824 if (unlikely(shader == NULL)) { 824 if (unlikely(!shader)) {
825 ttm_mem_global_free(vmw_mem_glob(dev_priv), 825 ttm_mem_global_free(vmw_mem_glob(dev_priv),
826 vmw_shader_size); 826 vmw_shader_size);
827 ret = -ENOMEM; 827 ret = -ENOMEM;
@@ -981,7 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
981 981
982 /* Allocate and pin a DMA buffer */ 982 /* Allocate and pin a DMA buffer */
983 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 983 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
984 if (unlikely(buf == NULL)) 984 if (unlikely(!buf))
985 return -ENOMEM; 985 return -ENOMEM;
986 986
987 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, 987 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 50be1f034f9e..5284e8d2f7ba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1640,8 +1640,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
1640 * something arbitrarily large and we will reject any layout 1640 * something arbitrarily large and we will reject any layout
1641 * that doesn't fit prim_bb_mem later 1641 * that doesn't fit prim_bb_mem later
1642 */ 1642 */
1643 dev->mode_config.max_width = 16384; 1643 dev->mode_config.max_width = 8192;
1644 dev->mode_config.max_height = 16384; 1644 dev->mode_config.max_height = 8192;
1645 } 1645 }
1646 1646
1647 vmw_kms_create_implicit_placement_property(dev_priv, false); 1647 vmw_kms_create_implicit_placement_property(dev_priv, false);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 2c58a390123a..778272514164 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -186,8 +186,13 @@ static int host1x_probe(struct platform_device *pdev)
186 return -ENOMEM; 186 return -ENOMEM;
187 187
188 err = iommu_attach_device(host->domain, &pdev->dev); 188 err = iommu_attach_device(host->domain, &pdev->dev);
189 if (err) 189 if (err == -ENODEV) {
190 iommu_domain_free(host->domain);
191 host->domain = NULL;
192 goto skip_iommu;
193 } else if (err) {
190 goto fail_free_domain; 194 goto fail_free_domain;
195 }
191 196
192 geometry = &host->domain->geometry; 197 geometry = &host->domain->geometry;
193 198
@@ -198,6 +203,7 @@ static int host1x_probe(struct platform_device *pdev)
198 host->iova_end = geometry->aperture_end; 203 host->iova_end = geometry->aperture_end;
199 } 204 }
200 205
206skip_iommu:
201 err = host1x_channel_list_init(&host->channel_list, 207 err = host1x_channel_list_init(&host->channel_list,
202 host->info->nb_channels); 208 host->info->nb_channels);
203 if (err) { 209 if (err) {
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index 08766c6e7856..87a20b3dcf7a 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,6 +1,7 @@
1config IMX_IPUV3_CORE 1config IMX_IPUV3_CORE
2 tristate "IPUv3 core support" 2 tristate "IPUv3 core support"
3 depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM 3 depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
4 depends on DRM || !DRM # if DRM=m, this can't be 'y'
4 select GENERIC_IRQ_CHIP 5 select GENERIC_IRQ_CHIP
5 help 6 help
6 Choose this if you have a i.MX5/6 system and want to use the Image 7 Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6fd01a692197..9017dcc14502 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2216,6 +2216,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
2216#if IS_ENABLED(CONFIG_HID_ORTEK) 2216#if IS_ENABLED(CONFIG_HID_ORTEK)
2217 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, 2217 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
2218 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 2218 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
2219 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
2219 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 2220 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
2220#endif 2221#endif
2221#if IS_ENABLED(CONFIG_HID_PANTHERLORD) 2222#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3d911bfd91cf..c9ba4c6db74c 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -824,6 +824,7 @@
824#define USB_VENDOR_ID_ORTEK 0x05a4 824#define USB_VENDOR_ID_ORTEK 0x05a4
825#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 825#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
826#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 826#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
827#define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
827 828
828#define USB_VENDOR_ID_PLANTRONICS 0x047f 829#define USB_VENDOR_ID_PLANTRONICS 0x047f
829 830
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 41b39464ded8..501e16a9227d 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2732,6 +2732,9 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp)
2732 hidpp_battery_props, 2732 hidpp_battery_props,
2733 sizeof(hidpp_battery_props), 2733 sizeof(hidpp_battery_props),
2734 GFP_KERNEL); 2734 GFP_KERNEL);
2735 if (!battery_props)
2736 return -ENOMEM;
2737
2735 num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2; 2738 num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2;
2736 2739
2737 if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE) 2740 if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f3e35e7a189d..aff20f4b6d97 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -620,16 +620,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
620 return 0; 620 return 0;
621} 621}
622 622
623static int mt_touch_input_mapped(struct hid_device *hdev, struct hid_input *hi,
624 struct hid_field *field, struct hid_usage *usage,
625 unsigned long **bit, int *max)
626{
627 if (usage->type == EV_KEY || usage->type == EV_ABS)
628 set_bit(usage->type, hi->input->evbit);
629
630 return -1;
631}
632
633static int mt_compute_slot(struct mt_device *td, struct input_dev *input) 623static int mt_compute_slot(struct mt_device *td, struct input_dev *input)
634{ 624{
635 __s32 quirks = td->mtclass.quirks; 625 __s32 quirks = td->mtclass.quirks;
@@ -969,8 +959,10 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
969 return 0; 959 return 0;
970 960
971 if (field->application == HID_DG_TOUCHSCREEN || 961 if (field->application == HID_DG_TOUCHSCREEN ||
972 field->application == HID_DG_TOUCHPAD) 962 field->application == HID_DG_TOUCHPAD) {
973 return mt_touch_input_mapped(hdev, hi, field, usage, bit, max); 963 /* We own these mappings, tell hid-input to ignore them */
964 return -1;
965 }
974 966
975 /* let hid-core decide for the others */ 967 /* let hid-core decide for the others */
976 return 0; 968 return 0;
diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c
index 6620f15fec22..8783a064cdcf 100644
--- a/drivers/hid/hid-ortek.c
+++ b/drivers/hid/hid-ortek.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Ortek PKB-1700 6 * Ortek PKB-1700
7 * Ortek WKB-2000 7 * Ortek WKB-2000
8 * iHome IMAC-A210S
8 * Skycable wireless presenter 9 * Skycable wireless presenter
9 * 10 *
10 * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> 11 * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
@@ -28,10 +29,10 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
28 unsigned int *rsize) 29 unsigned int *rsize)
29{ 30{
30 if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { 31 if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
31 hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n"); 32 hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n");
32 rdesc[55] = 0x92; 33 rdesc[55] = 0x92;
33 } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { 34 } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
34 hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n"); 35 hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n");
35 rdesc[53] = 0x65; 36 rdesc[53] = 0x65;
36 } 37 }
37 return rdesc; 38 return rdesc;
@@ -40,6 +41,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
40static const struct hid_device_id ortek_devices[] = { 41static const struct hid_device_id ortek_devices[] = {
41 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, 42 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
42 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 43 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
44 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) },
43 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 45 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
44 { } 46 { }
45}; 47};
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 76013eb5cb7f..c008847e0b20 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -680,18 +680,21 @@ static int usbhid_open(struct hid_device *hid)
680 struct usbhid_device *usbhid = hid->driver_data; 680 struct usbhid_device *usbhid = hid->driver_data;
681 int res; 681 int res;
682 682
683 set_bit(HID_OPENED, &usbhid->iofl);
684
683 if (hid->quirks & HID_QUIRK_ALWAYS_POLL) 685 if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
684 return 0; 686 return 0;
685 687
686 res = usb_autopm_get_interface(usbhid->intf); 688 res = usb_autopm_get_interface(usbhid->intf);
687 /* the device must be awake to reliably request remote wakeup */ 689 /* the device must be awake to reliably request remote wakeup */
688 if (res < 0) 690 if (res < 0) {
691 clear_bit(HID_OPENED, &usbhid->iofl);
689 return -EIO; 692 return -EIO;
693 }
690 694
691 usbhid->intf->needs_remote_wakeup = 1; 695 usbhid->intf->needs_remote_wakeup = 1;
692 696
693 set_bit(HID_RESUME_RUNNING, &usbhid->iofl); 697 set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
694 set_bit(HID_OPENED, &usbhid->iofl);
695 set_bit(HID_IN_POLLING, &usbhid->iofl); 698 set_bit(HID_IN_POLLING, &usbhid->iofl);
696 699
697 res = hid_start_in(hid); 700 res = hid_start_in(hid);
@@ -727,19 +730,20 @@ static void usbhid_close(struct hid_device *hid)
727{ 730{
728 struct usbhid_device *usbhid = hid->driver_data; 731 struct usbhid_device *usbhid = hid->driver_data;
729 732
730 if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
731 return;
732
733 /* 733 /*
734 * Make sure we don't restart data acquisition due to 734 * Make sure we don't restart data acquisition due to
735 * a resumption we no longer care about by avoiding racing 735 * a resumption we no longer care about by avoiding racing
736 * with hid_start_in(). 736 * with hid_start_in().
737 */ 737 */
738 spin_lock_irq(&usbhid->lock); 738 spin_lock_irq(&usbhid->lock);
739 clear_bit(HID_IN_POLLING, &usbhid->iofl);
740 clear_bit(HID_OPENED, &usbhid->iofl); 739 clear_bit(HID_OPENED, &usbhid->iofl);
740 if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL))
741 clear_bit(HID_IN_POLLING, &usbhid->iofl);
741 spin_unlock_irq(&usbhid->lock); 742 spin_unlock_irq(&usbhid->lock);
742 743
744 if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
745 return;
746
743 hid_cancel_delayed_stuff(usbhid); 747 hid_cancel_delayed_stuff(usbhid);
744 usb_kill_urb(usbhid->urbin); 748 usb_kill_urb(usbhid->urbin);
745 usbhid->intf->needs_remote_wakeup = 0; 749 usbhid->intf->needs_remote_wakeup = 0;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index e9bf0bb87ac4..e57cc40cb768 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -606,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
606 get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); 606 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
607 607
608out: 608out:
609 /* re-enable tasklet for use on re-open */
610 tasklet_enable(&channel->callback_event);
609 return ret; 611 return ret;
610} 612}
611 613
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 0af7fd311979..76c34f4fde13 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -566,6 +566,8 @@ static int applesmc_init_smcreg_try(void)
566 if (ret) 566 if (ret)
567 return ret; 567 return ret;
568 s->fan_count = tmp[0]; 568 s->fan_count = tmp[0];
569 if (s->fan_count > 10)
570 s->fan_count = 10;
569 571
570 ret = applesmc_get_lower_bound(&s->temp_begin, "T"); 572 ret = applesmc_get_lower_bound(&s->temp_begin, "T");
571 if (ret) 573 if (ret)
@@ -811,7 +813,8 @@ static ssize_t applesmc_show_fan_speed(struct device *dev,
811 char newkey[5]; 813 char newkey[5];
812 u8 buffer[2]; 814 u8 buffer[2];
813 815
814 sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); 816 scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
817 to_index(attr));
815 818
816 ret = applesmc_read_key(newkey, buffer, 2); 819 ret = applesmc_read_key(newkey, buffer, 2);
817 speed = ((buffer[0] << 8 | buffer[1]) >> 2); 820 speed = ((buffer[0] << 8 | buffer[1]) >> 2);
@@ -834,7 +837,8 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
834 if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000) 837 if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
835 return -EINVAL; /* Bigger than a 14-bit value */ 838 return -EINVAL; /* Bigger than a 14-bit value */
836 839
837 sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); 840 scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)],
841 to_index(attr));
838 842
839 buffer[0] = (speed >> 6) & 0xff; 843 buffer[0] = (speed >> 6) & 0xff;
840 buffer[1] = (speed << 2) & 0xff; 844 buffer[1] = (speed << 2) & 0xff;
@@ -903,7 +907,7 @@ static ssize_t applesmc_show_fan_position(struct device *dev,
903 char newkey[5]; 907 char newkey[5];
904 u8 buffer[17]; 908 u8 buffer[17];
905 909
906 sprintf(newkey, FAN_ID_FMT, to_index(attr)); 910 scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr));
907 911
908 ret = applesmc_read_key(newkey, buffer, 16); 912 ret = applesmc_read_key(newkey, buffer, 16);
909 buffer[16] = 0; 913 buffer[16] = 0;
@@ -1116,7 +1120,8 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
1116 } 1120 }
1117 for (i = 0; i < num; i++) { 1121 for (i = 0; i < num; i++) {
1118 node = &grp->nodes[i]; 1122 node = &grp->nodes[i];
1119 sprintf(node->name, grp->format, i + 1); 1123 scnprintf(node->name, sizeof(node->name), grp->format,
1124 i + 1);
1120 node->sda.index = (grp->option << 16) | (i & 0xffff); 1125 node->sda.index = (grp->option << 16) | (i & 0xffff);
1121 node->sda.dev_attr.show = grp->show; 1126 node->sda.dev_attr.show = grp->show;
1122 node->sda.dev_attr.store = grp->store; 1127 node->sda.dev_attr.store = grp->store;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 1006b230b236..65fa29591d21 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -983,7 +983,7 @@ config I2C_UNIPHIER_F
983 983
984config I2C_VERSATILE 984config I2C_VERSATILE
985 tristate "ARM Versatile/Realview I2C bus support" 985 tristate "ARM Versatile/Realview I2C bus support"
986 depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST 986 depends on ARCH_MPS2 || ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || COMPILE_TEST
987 select I2C_ALGOBIT 987 select I2C_ALGOBIT
988 help 988 help
989 Say yes if you want to support the I2C serial bus on ARMs Versatile 989 Say yes if you want to support the I2C serial bus on ARMs Versatile
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index f19348328a71..6fdf9231c23c 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
410 } 410 }
411 411
412 /* We are in an invalid state; reset bus to a known state. */ 412 /* We are in an invalid state; reset bus to a known state. */
413 if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) { 413 if (!bus->msgs) {
414 dev_err(bus->dev, "bus in unknown state"); 414 dev_err(bus->dev, "bus in unknown state");
415 bus->cmd_err = -EIO; 415 bus->cmd_err = -EIO;
416 aspeed_i2c_do_stop(bus); 416 if (bus->master_state != ASPEED_I2C_MASTER_STOP)
417 aspeed_i2c_do_stop(bus);
417 goto out_no_complete; 418 goto out_no_complete;
418 } 419 }
419 msg = &bus->msgs[bus->msgs_index]; 420 msg = &bus->msgs[bus->msgs_index];
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2ea6d0d25a01..57248bccadbc 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
198 dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; 198 dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
199 199
200 dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | 200 dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
201 DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED | 201 DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
202 DW_IC_CON_SPEED_FAST;
203 202
204 dev->mode = DW_IC_SLAVE; 203 dev->mode = DW_IC_SLAVE;
205 204
@@ -298,6 +297,9 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
298 } 297 }
299 298
300 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev); 299 acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
300 /* Some broken DSTDs use 1MiHz instead of 1MHz */
301 if (acpi_speed == 1048576)
302 acpi_speed = 1000000;
301 /* 303 /*
302 * Find bus speed from the "clock-frequency" device property, ACPI 304 * Find bus speed from the "clock-frequency" device property, ACPI
303 * or by using fast mode if neither is set. 305 * or by using fast mode if neither is set.
@@ -319,7 +321,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
319 if (dev->clk_freq != 100000 && dev->clk_freq != 400000 321 if (dev->clk_freq != 100000 && dev->clk_freq != 400000
320 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { 322 && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
321 dev_err(&pdev->dev, 323 dev_err(&pdev->dev,
322 "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); 324 "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n",
325 dev->clk_freq);
323 ret = -EINVAL; 326 ret = -EINVAL;
324 goto exit_reset; 327 goto exit_reset;
325 } 328 }
@@ -426,7 +429,7 @@ static void dw_i2c_plat_complete(struct device *dev)
426#endif 429#endif
427 430
428#ifdef CONFIG_PM 431#ifdef CONFIG_PM
429static int dw_i2c_plat_suspend(struct device *dev) 432static int dw_i2c_plat_runtime_suspend(struct device *dev)
430{ 433{
431 struct platform_device *pdev = to_platform_device(dev); 434 struct platform_device *pdev = to_platform_device(dev);
432 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); 435 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@@ -448,11 +451,21 @@ static int dw_i2c_plat_resume(struct device *dev)
448 return 0; 451 return 0;
449} 452}
450 453
454#ifdef CONFIG_PM_SLEEP
455static int dw_i2c_plat_suspend(struct device *dev)
456{
457 pm_runtime_resume(dev);
458 return dw_i2c_plat_runtime_suspend(dev);
459}
460#endif
461
451static const struct dev_pm_ops dw_i2c_dev_pm_ops = { 462static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
452 .prepare = dw_i2c_plat_prepare, 463 .prepare = dw_i2c_plat_prepare,
453 .complete = dw_i2c_plat_complete, 464 .complete = dw_i2c_plat_complete,
454 SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume) 465 SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
455 SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL) 466 SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
467 dw_i2c_plat_resume,
468 NULL)
456}; 469};
457 470
458#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops) 471#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 0548c7ea578c..78d8fb73927d 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave)
177 return -EBUSY; 177 return -EBUSY;
178 if (slave->flags & I2C_CLIENT_TEN) 178 if (slave->flags & I2C_CLIENT_TEN)
179 return -EAFNOSUPPORT; 179 return -EAFNOSUPPORT;
180 pm_runtime_get_sync(dev->dev);
181
180 /* 182 /*
181 * Set slave address in the IC_SAR register, 183 * Set slave address in the IC_SAR register,
182 * the address to which the DW_apb_i2c responds. 184 * the address to which the DW_apb_i2c responds.
@@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
205 dev->disable_int(dev); 207 dev->disable_int(dev);
206 dev->disable(dev); 208 dev->disable(dev);
207 dev->slave = NULL; 209 dev->slave = NULL;
210 pm_runtime_put(dev->dev);
208 211
209 return 0; 212 return 0;
210} 213}
@@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
272 slave_activity = ((dw_readl(dev, DW_IC_STATUS) & 275 slave_activity = ((dw_readl(dev, DW_IC_STATUS) &
273 DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); 276 DW_IC_STATUS_SLAVE_ACTIVITY) >> 6);
274 277
275 if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY)) 278 if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
276 return 0; 279 return 0;
277 280
278 dev_dbg(dev->dev, 281 dev_dbg(dev->dev,
@@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
382 ret = i2c_add_numbered_adapter(adap); 385 ret = i2c_add_numbered_adapter(adap);
383 if (ret) 386 if (ret)
384 dev_err(dev->dev, "failure adding adapter: %d\n", ret); 387 dev_err(dev->dev, "failure adding adapter: %d\n", ret);
385 pm_runtime_put_noidle(dev->dev);
386 388
387 return ret; 389 return ret;
388} 390}
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index b4685bb9b5d7..adca51a99487 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
127 iounmap(pd->reg); 127 iounmap(pd->reg);
128 128
129 err_res: 129 err_res:
130 release_resource(pd->ioarea); 130 release_mem_region(pd->ioarea->start, size);
131 kfree(pd->ioarea);
132 131
133 err: 132 err:
134 kfree(pd); 133 kfree(pd);
@@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev)
142 i2c_del_adapter(&pd->adap); 141 i2c_del_adapter(&pd->adap);
143 142
144 iounmap(pd->reg); 143 iounmap(pd->reg);
145 release_resource(pd->ioarea); 144 release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
146 kfree(pd->ioarea);
147 kfree(pd); 145 kfree(pd);
148 146
149 return 0; 147 return 0;
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 4842ec3a5451..a9126b3cda61 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -230,6 +230,16 @@ void i2c_acpi_register_devices(struct i2c_adapter *adap)
230 dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); 230 dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
231} 231}
232 232
233const struct acpi_device_id *
234i2c_acpi_match_device(const struct acpi_device_id *matches,
235 struct i2c_client *client)
236{
237 if (!(client && matches))
238 return NULL;
239
240 return acpi_match_device(matches, &client->dev);
241}
242
233static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, 243static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
234 void *data, void **return_value) 244 void *data, void **return_value)
235{ 245{
@@ -289,7 +299,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev)
289} 299}
290EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); 300EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
291 301
292static int i2c_acpi_match_adapter(struct device *dev, void *data) 302static int i2c_acpi_find_match_adapter(struct device *dev, void *data)
293{ 303{
294 struct i2c_adapter *adapter = i2c_verify_adapter(dev); 304 struct i2c_adapter *adapter = i2c_verify_adapter(dev);
295 305
@@ -299,7 +309,7 @@ static int i2c_acpi_match_adapter(struct device *dev, void *data)
299 return ACPI_HANDLE(dev) == (acpi_handle)data; 309 return ACPI_HANDLE(dev) == (acpi_handle)data;
300} 310}
301 311
302static int i2c_acpi_match_device(struct device *dev, void *data) 312static int i2c_acpi_find_match_device(struct device *dev, void *data)
303{ 313{
304 return ACPI_COMPANION(dev) == data; 314 return ACPI_COMPANION(dev) == data;
305} 315}
@@ -309,7 +319,7 @@ static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
309 struct device *dev; 319 struct device *dev;
310 320
311 dev = bus_find_device(&i2c_bus_type, NULL, handle, 321 dev = bus_find_device(&i2c_bus_type, NULL, handle,
312 i2c_acpi_match_adapter); 322 i2c_acpi_find_match_adapter);
313 return dev ? i2c_verify_adapter(dev) : NULL; 323 return dev ? i2c_verify_adapter(dev) : NULL;
314} 324}
315 325
@@ -317,7 +327,8 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
317{ 327{
318 struct device *dev; 328 struct device *dev;
319 329
320 dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); 330 dev = bus_find_device(&i2c_bus_type, NULL, adev,
331 i2c_acpi_find_match_device);
321 return dev ? i2c_verify_client(dev) : NULL; 332 return dev ? i2c_verify_client(dev) : NULL;
322} 333}
323 334
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index c89dac7fd2e7..56e46581b84b 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -353,10 +353,11 @@ static int i2c_device_probe(struct device *dev)
353 } 353 }
354 354
355 /* 355 /*
356 * An I2C ID table is not mandatory, if and only if, a suitable Device 356 * An I2C ID table is not mandatory, if and only if, a suitable OF
357 * Tree match table entry is supplied for the probing device. 357 * or ACPI ID table is supplied for the probing device.
358 */ 358 */
359 if (!driver->id_table && 359 if (!driver->id_table &&
360 !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
360 !i2c_of_match_device(dev->driver->of_match_table, client)) 361 !i2c_of_match_device(dev->driver->of_match_table, client))
361 return -ENODEV; 362 return -ENODEV;
362 363
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 3b63f5e5b89c..3d3d9bf02101 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -31,9 +31,18 @@ int i2c_check_addr_validity(unsigned addr, unsigned short flags);
31int i2c_check_7bit_addr_validity_strict(unsigned short addr); 31int i2c_check_7bit_addr_validity_strict(unsigned short addr);
32 32
33#ifdef CONFIG_ACPI 33#ifdef CONFIG_ACPI
34const struct acpi_device_id *
35i2c_acpi_match_device(const struct acpi_device_id *matches,
36 struct i2c_client *client);
34void i2c_acpi_register_devices(struct i2c_adapter *adap); 37void i2c_acpi_register_devices(struct i2c_adapter *adap);
35#else /* CONFIG_ACPI */ 38#else /* CONFIG_ACPI */
36static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } 39static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
40static inline const struct acpi_device_id *
41i2c_acpi_match_device(const struct acpi_device_id *matches,
42 struct i2c_client *client)
43{
44 return NULL;
45}
37#endif /* CONFIG_ACPI */ 46#endif /* CONFIG_ACPI */
38extern struct notifier_block i2c_acpi_notifier; 47extern struct notifier_block i2c_acpi_notifier;
39 48
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 2c64d0e0740f..17121329bb79 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -83,7 +83,7 @@ config I2C_MUX_PINCTRL
83 different sets of pins at run-time. 83 different sets of pins at run-time.
84 84
85 This driver can also be built as a module. If so, the module will be 85 This driver can also be built as a module. If so, the module will be
86 called pinctrl-i2cmux. 86 called i2c-mux-pinctrl.
87 87
88config I2C_MUX_REG 88config I2C_MUX_REG
89 tristate "Register-based I2C multiplexer" 89 tristate "Register-based I2C multiplexer"
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 0e05f75934c9..1858e3ce3993 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -104,19 +104,19 @@ u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio)
104EXPORT_SYMBOL_GPL(ide_pio_cycle_time); 104EXPORT_SYMBOL_GPL(ide_pio_cycle_time);
105 105
106#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1) 106#define ENOUGH(v, unit) (((v) - 1) / (unit) + 1)
107#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0) 107#define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0)
108 108
109static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, 109static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q,
110 int T, int UT) 110 int T, int UT)
111{ 111{
112 q->setup = EZ(t->setup * 1000, T); 112 q->setup = EZ(t->setup, T);
113 q->act8b = EZ(t->act8b * 1000, T); 113 q->act8b = EZ(t->act8b, T);
114 q->rec8b = EZ(t->rec8b * 1000, T); 114 q->rec8b = EZ(t->rec8b, T);
115 q->cyc8b = EZ(t->cyc8b * 1000, T); 115 q->cyc8b = EZ(t->cyc8b, T);
116 q->active = EZ(t->active * 1000, T); 116 q->active = EZ(t->active, T);
117 q->recover = EZ(t->recover * 1000, T); 117 q->recover = EZ(t->recover, T);
118 q->cycle = EZ(t->cycle * 1000, T); 118 q->cycle = EZ(t->cycle, T);
119 q->udma = EZ(t->udma * 1000, UT); 119 q->udma = EZ(t->udma, UT);
120} 120}
121 121
122void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, 122void ide_timing_merge(struct ide_timing *a, struct ide_timing *b,
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 6b5d3be283c4..807299dd45eb 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -193,7 +193,6 @@ struct bmc150_accel_data {
193 struct regmap *regmap; 193 struct regmap *regmap;
194 int irq; 194 int irq;
195 struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS]; 195 struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
196 atomic_t active_intr;
197 struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS]; 196 struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
198 struct mutex mutex; 197 struct mutex mutex;
199 u8 fifo_mode, watermark; 198 u8 fifo_mode, watermark;
@@ -493,11 +492,6 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
493 goto out_fix_power_state; 492 goto out_fix_power_state;
494 } 493 }
495 494
496 if (state)
497 atomic_inc(&data->active_intr);
498 else
499 atomic_dec(&data->active_intr);
500
501 return 0; 495 return 0;
502 496
503out_fix_power_state: 497out_fix_power_state:
@@ -1710,8 +1704,7 @@ static int bmc150_accel_resume(struct device *dev)
1710 struct bmc150_accel_data *data = iio_priv(indio_dev); 1704 struct bmc150_accel_data *data = iio_priv(indio_dev);
1711 1705
1712 mutex_lock(&data->mutex); 1706 mutex_lock(&data->mutex);
1713 if (atomic_read(&data->active_intr)) 1707 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1714 bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
1715 bmc150_accel_fifo_set_mode(data); 1708 bmc150_accel_fifo_set_mode(data);
1716 mutex_unlock(&data->mutex); 1709 mutex_unlock(&data->mutex);
1717 1710
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 07d1489cd457..e44f62bf9caa 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -166,6 +166,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
166 .mask_ihl = 0x02, 166 .mask_ihl = 0x02,
167 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 167 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
168 }, 168 },
169 .sim = {
170 .addr = 0x23,
171 .value = BIT(0),
172 },
169 .multi_read_bit = true, 173 .multi_read_bit = true,
170 .bootime = 2, 174 .bootime = 2,
171 }, 175 },
@@ -234,6 +238,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
234 .mask_od = 0x40, 238 .mask_od = 0x40,
235 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 239 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
236 }, 240 },
241 .sim = {
242 .addr = 0x23,
243 .value = BIT(0),
244 },
237 .multi_read_bit = true, 245 .multi_read_bit = true,
238 .bootime = 2, 246 .bootime = 2,
239 }, 247 },
@@ -316,6 +324,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
316 .en_mask = 0x08, 324 .en_mask = 0x08,
317 }, 325 },
318 }, 326 },
327 .sim = {
328 .addr = 0x24,
329 .value = BIT(0),
330 },
319 .multi_read_bit = false, 331 .multi_read_bit = false,
320 .bootime = 2, 332 .bootime = 2,
321 }, 333 },
@@ -379,6 +391,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
379 .mask_int1 = 0x04, 391 .mask_int1 = 0x04,
380 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 392 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
381 }, 393 },
394 .sim = {
395 .addr = 0x21,
396 .value = BIT(1),
397 },
382 .multi_read_bit = true, 398 .multi_read_bit = true,
383 .bootime = 2, /* guess */ 399 .bootime = 2, /* guess */
384 }, 400 },
@@ -437,6 +453,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
437 .mask_od = 0x40, 453 .mask_od = 0x40,
438 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 454 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
439 }, 455 },
456 .sim = {
457 .addr = 0x21,
458 .value = BIT(7),
459 },
440 .multi_read_bit = false, 460 .multi_read_bit = false,
441 .bootime = 2, /* guess */ 461 .bootime = 2, /* guess */
442 }, 462 },
@@ -499,6 +519,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
499 .addr_ihl = 0x22, 519 .addr_ihl = 0x22,
500 .mask_ihl = 0x80, 520 .mask_ihl = 0x80,
501 }, 521 },
522 .sim = {
523 .addr = 0x23,
524 .value = BIT(0),
525 },
502 .multi_read_bit = true, 526 .multi_read_bit = true,
503 .bootime = 2, 527 .bootime = 2,
504 }, 528 },
@@ -547,6 +571,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
547 .mask_int1 = 0x04, 571 .mask_int1 = 0x04,
548 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 572 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
549 }, 573 },
574 .sim = {
575 .addr = 0x21,
576 .value = BIT(1),
577 },
550 .multi_read_bit = false, 578 .multi_read_bit = false,
551 .bootime = 2, 579 .bootime = 2,
552 }, 580 },
@@ -614,6 +642,10 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
614 .mask_ihl = 0x02, 642 .mask_ihl = 0x02,
615 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 643 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
616 }, 644 },
645 .sim = {
646 .addr = 0x23,
647 .value = BIT(0),
648 },
617 .multi_read_bit = true, 649 .multi_read_bit = true,
618 .bootime = 2, 650 .bootime = 2,
619 }, 651 },
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index e0ea411a0b2d..c02b23d675cb 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/iio/iio.h> 23#include <linux/iio/iio.h>
24#include <linux/iio/driver.h> 24#include <linux/iio/driver.h>
25#include <linux/iopoll.h>
25 26
26#define ASPEED_RESOLUTION_BITS 10 27#define ASPEED_RESOLUTION_BITS 10
27#define ASPEED_CLOCKS_PER_SAMPLE 12 28#define ASPEED_CLOCKS_PER_SAMPLE 12
@@ -38,11 +39,17 @@
38 39
39#define ASPEED_ENGINE_ENABLE BIT(0) 40#define ASPEED_ENGINE_ENABLE BIT(0)
40 41
42#define ASPEED_ADC_CTRL_INIT_RDY BIT(8)
43
44#define ASPEED_ADC_INIT_POLLING_TIME 500
45#define ASPEED_ADC_INIT_TIMEOUT 500000
46
41struct aspeed_adc_model_data { 47struct aspeed_adc_model_data {
42 const char *model_name; 48 const char *model_name;
43 unsigned int min_sampling_rate; // Hz 49 unsigned int min_sampling_rate; // Hz
44 unsigned int max_sampling_rate; // Hz 50 unsigned int max_sampling_rate; // Hz
45 unsigned int vref_voltage; // mV 51 unsigned int vref_voltage; // mV
52 bool wait_init_sequence;
46}; 53};
47 54
48struct aspeed_adc_data { 55struct aspeed_adc_data {
@@ -211,6 +218,24 @@ static int aspeed_adc_probe(struct platform_device *pdev)
211 goto scaler_error; 218 goto scaler_error;
212 } 219 }
213 220
221 model_data = of_device_get_match_data(&pdev->dev);
222
223 if (model_data->wait_init_sequence) {
224 /* Enable engine in normal mode. */
225 writel(ASPEED_OPERATION_MODE_NORMAL | ASPEED_ENGINE_ENABLE,
226 data->base + ASPEED_REG_ENGINE_CONTROL);
227
228 /* Wait for initial sequence complete. */
229 ret = readl_poll_timeout(data->base + ASPEED_REG_ENGINE_CONTROL,
230 adc_engine_control_reg_val,
231 adc_engine_control_reg_val &
232 ASPEED_ADC_CTRL_INIT_RDY,
233 ASPEED_ADC_INIT_POLLING_TIME,
234 ASPEED_ADC_INIT_TIMEOUT);
235 if (ret)
236 goto scaler_error;
237 }
238
214 /* Start all channels in normal mode. */ 239 /* Start all channels in normal mode. */
215 ret = clk_prepare_enable(data->clk_scaler->clk); 240 ret = clk_prepare_enable(data->clk_scaler->clk);
216 if (ret) 241 if (ret)
@@ -274,6 +299,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
274 .vref_voltage = 1800, // mV 299 .vref_voltage = 1800, // mV
275 .min_sampling_rate = 1, 300 .min_sampling_rate = 1,
276 .max_sampling_rate = 1000000, 301 .max_sampling_rate = 1000000,
302 .wait_init_sequence = true,
277}; 303};
278 304
279static const struct of_device_id aspeed_adc_matches[] = { 305static const struct of_device_id aspeed_adc_matches[] = {
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 64799ad7ebad..462a99c13e7a 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -28,6 +28,8 @@
28#include <linux/iio/driver.h> 28#include <linux/iio/driver.h>
29 29
30#define AXP288_ADC_EN_MASK 0xF1 30#define AXP288_ADC_EN_MASK 0xF1
31#define AXP288_ADC_TS_PIN_GPADC 0xF2
32#define AXP288_ADC_TS_PIN_ON 0xF3
31 33
32enum axp288_adc_id { 34enum axp288_adc_id {
33 AXP288_ADC_TS, 35 AXP288_ADC_TS,
@@ -121,6 +123,26 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
121 return IIO_VAL_INT; 123 return IIO_VAL_INT;
122} 124}
123 125
126static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
127 unsigned long address)
128{
129 int ret;
130
131 /* channels other than GPADC do not need to switch TS pin */
132 if (address != AXP288_GP_ADC_H)
133 return 0;
134
135 ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
136 if (ret)
137 return ret;
138
139 /* When switching to the GPADC pin give things some time to settle */
140 if (mode == AXP288_ADC_TS_PIN_GPADC)
141 usleep_range(6000, 10000);
142
143 return 0;
144}
145
124static int axp288_adc_read_raw(struct iio_dev *indio_dev, 146static int axp288_adc_read_raw(struct iio_dev *indio_dev,
125 struct iio_chan_spec const *chan, 147 struct iio_chan_spec const *chan,
126 int *val, int *val2, long mask) 148 int *val, int *val2, long mask)
@@ -131,7 +153,16 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
131 mutex_lock(&indio_dev->mlock); 153 mutex_lock(&indio_dev->mlock);
132 switch (mask) { 154 switch (mask) {
133 case IIO_CHAN_INFO_RAW: 155 case IIO_CHAN_INFO_RAW:
156 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
157 chan->address)) {
158 dev_err(&indio_dev->dev, "GPADC mode\n");
159 ret = -EINVAL;
160 break;
161 }
134 ret = axp288_adc_read_channel(val, chan->address, info->regmap); 162 ret = axp288_adc_read_channel(val, chan->address, info->regmap);
163 if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
164 chan->address))
165 dev_err(&indio_dev->dev, "TS pin restore\n");
135 break; 166 break;
136 default: 167 default:
137 ret = -EINVAL; 168 ret = -EINVAL;
@@ -141,6 +172,15 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
141 return ret; 172 return ret;
142} 173}
143 174
175static int axp288_adc_set_state(struct regmap *regmap)
176{
177 /* ADC should be always enabled for internal FG to function */
178 if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
179 return -EIO;
180
181 return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
182}
183
144static const struct iio_info axp288_adc_iio_info = { 184static const struct iio_info axp288_adc_iio_info = {
145 .read_raw = &axp288_adc_read_raw, 185 .read_raw = &axp288_adc_read_raw,
146 .driver_module = THIS_MODULE, 186 .driver_module = THIS_MODULE,
@@ -169,7 +209,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
169 * Set ADC to enabled state at all time, including system suspend. 209 * Set ADC to enabled state at all time, including system suspend.
170 * otherwise internal fuel gauge functionality may be affected. 210 * otherwise internal fuel gauge functionality may be affected.
171 */ 211 */
172 ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); 212 ret = axp288_adc_set_state(axp20x->regmap);
173 if (ret) { 213 if (ret) {
174 dev_err(&pdev->dev, "unable to enable ADC device\n"); 214 dev_err(&pdev->dev, "unable to enable ADC device\n");
175 return ret; 215 return ret;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 232c0b80d658..c3f86138cb55 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -644,7 +644,7 @@ static int ina2xx_capture_thread(void *data)
644{ 644{
645 struct iio_dev *indio_dev = data; 645 struct iio_dev *indio_dev = data;
646 struct ina2xx_chip_info *chip = iio_priv(indio_dev); 646 struct ina2xx_chip_info *chip = iio_priv(indio_dev);
647 unsigned int sampling_us = SAMPLING_PERIOD(chip); 647 int sampling_us = SAMPLING_PERIOD(chip);
648 int buffer_us; 648 int buffer_us;
649 649
650 /* 650 /*
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index e09233b03c05..609676384f5e 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -64,7 +64,7 @@
64#define STM32H7_CKMODE_MASK GENMASK(17, 16) 64#define STM32H7_CKMODE_MASK GENMASK(17, 16)
65 65
66/* STM32 H7 maximum analog clock rate (from datasheet) */ 66/* STM32 H7 maximum analog clock rate (from datasheet) */
67#define STM32H7_ADC_MAX_CLK_RATE 72000000 67#define STM32H7_ADC_MAX_CLK_RATE 36000000
68 68
69/** 69/**
70 * stm32_adc_common_regs - stm32 common registers, compatible dependent data 70 * stm32_adc_common_regs - stm32 common registers, compatible dependent data
@@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
148 return -EINVAL; 148 return -EINVAL;
149 } 149 }
150 150
151 priv->common.rate = rate; 151 priv->common.rate = rate / stm32f4_pclk_div[i];
152 val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR); 152 val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
153 val &= ~STM32F4_ADC_ADCPRE_MASK; 153 val &= ~STM32F4_ADC_ADCPRE_MASK;
154 val |= i << STM32F4_ADC_ADCPRE_SHIFT; 154 val |= i << STM32F4_ADC_ADCPRE_SHIFT;
155 writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR); 155 writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
156 156
157 dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n", 157 dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
158 rate / (stm32f4_pclk_div[i] * 1000)); 158 priv->common.rate / 1000);
159 159
160 return 0; 160 return 0;
161} 161}
@@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
250 250
251out: 251out:
252 /* rate used later by each ADC instance to control BOOST mode */ 252 /* rate used later by each ADC instance to control BOOST mode */
253 priv->common.rate = rate; 253 priv->common.rate = rate / div;
254 254
255 /* Set common clock mode and prescaler */ 255 /* Set common clock mode and prescaler */
256 val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR); 256 val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR);
@@ -260,7 +260,7 @@ out:
260 writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR); 260 writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR);
261 261
262 dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n", 262 dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n",
263 ckmode ? "bus" : "adc", div, rate / (div * 1000)); 263 ckmode ? "bus" : "adc", div, priv->common.rate / 1000);
264 264
265 return 0; 265 return 0;
266} 266}
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 81d4c39e414a..137f577d9432 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -256,6 +256,7 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
256 256
257err: 257err:
258 pm_runtime_put_autosuspend(indio_dev->dev.parent); 258 pm_runtime_put_autosuspend(indio_dev->dev.parent);
259 disable_irq(irq);
259 mutex_unlock(&info->mutex); 260 mutex_unlock(&info->mutex);
260 261
261 return ret; 262 return ret;
@@ -365,7 +366,6 @@ static irqreturn_t sun4i_gpadc_temp_data_irq_handler(int irq, void *dev_id)
365 complete(&info->completion); 366 complete(&info->completion);
366 367
367out: 368out:
368 disable_irq_nosync(info->temp_data_irq);
369 return IRQ_HANDLED; 369 return IRQ_HANDLED;
370} 370}
371 371
@@ -380,7 +380,6 @@ static irqreturn_t sun4i_gpadc_fifo_data_irq_handler(int irq, void *dev_id)
380 complete(&info->completion); 380 complete(&info->completion);
381 381
382out: 382out:
383 disable_irq_nosync(info->fifo_data_irq);
384 return IRQ_HANDLED; 383 return IRQ_HANDLED;
385} 384}
386 385
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 01fc76f7d660..c168e0db329a 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -77,7 +77,7 @@
77#define VF610_ADC_ADSTS_MASK 0x300 77#define VF610_ADC_ADSTS_MASK 0x300
78#define VF610_ADC_ADLPC_EN 0x80 78#define VF610_ADC_ADLPC_EN 0x80
79#define VF610_ADC_ADHSC_EN 0x400 79#define VF610_ADC_ADHSC_EN 0x400
80#define VF610_ADC_REFSEL_VALT 0x100 80#define VF610_ADC_REFSEL_VALT 0x800
81#define VF610_ADC_REFSEL_VBG 0x1000 81#define VF610_ADC_REFSEL_VBG 0x1000
82#define VF610_ADC_ADTRG_HARD 0x2000 82#define VF610_ADC_ADTRG_HARD 0x2000
83#define VF610_ADC_AVGS_8 0x4000 83#define VF610_ADC_AVGS_8 0x4000
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 16ade0a0327b..0e4b379ada45 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
111 s32 poll_value = 0; 111 s32 poll_value = 0;
112 112
113 if (state) { 113 if (state) {
114 if (!atomic_read(&st->user_requested_state))
115 return 0;
116 if (sensor_hub_device_open(st->hsdev)) 114 if (sensor_hub_device_open(st->hsdev))
117 return -EIO; 115 return -EIO;
118 116
@@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
161 &report_val); 159 &report_val);
162 } 160 }
163 161
162 pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
163 st->pdev->name, state_val, report_val);
164
164 sensor_hub_get_feature(st->hsdev, st->power_state.report_id, 165 sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
165 st->power_state.index, 166 st->power_state.index,
166 sizeof(state_val), &state_val); 167 sizeof(state_val), &state_val);
@@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
182 ret = pm_runtime_get_sync(&st->pdev->dev); 183 ret = pm_runtime_get_sync(&st->pdev->dev);
183 else { 184 else {
184 pm_runtime_mark_last_busy(&st->pdev->dev); 185 pm_runtime_mark_last_busy(&st->pdev->dev);
186 pm_runtime_use_autosuspend(&st->pdev->dev);
185 ret = pm_runtime_put_autosuspend(&st->pdev->dev); 187 ret = pm_runtime_put_autosuspend(&st->pdev->dev);
186 } 188 }
187 if (ret < 0) { 189 if (ret < 0) {
@@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
285 /* Default to 3 seconds, but can be changed from sysfs */ 287 /* Default to 3 seconds, but can be changed from sysfs */
286 pm_runtime_set_autosuspend_delay(&attrb->pdev->dev, 288 pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
287 3000); 289 3000);
288 pm_runtime_use_autosuspend(&attrb->pdev->dev);
289
290 return ret; 290 return ret;
291error_unreg_trigger: 291error_unreg_trigger:
292 iio_trigger_unregister(trig); 292 iio_trigger_unregister(trig);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 79c8c7cd70d5..6e6a1ecc99dd 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -550,6 +550,31 @@ out:
550} 550}
551EXPORT_SYMBOL(st_sensors_read_info_raw); 551EXPORT_SYMBOL(st_sensors_read_info_raw);
552 552
553static int st_sensors_init_interface_mode(struct iio_dev *indio_dev,
554 const struct st_sensor_settings *sensor_settings)
555{
556 struct st_sensor_data *sdata = iio_priv(indio_dev);
557 struct device_node *np = sdata->dev->of_node;
558 struct st_sensors_platform_data *pdata;
559
560 pdata = (struct st_sensors_platform_data *)sdata->dev->platform_data;
561 if (((np && of_property_read_bool(np, "spi-3wire")) ||
562 (pdata && pdata->spi_3wire)) && sensor_settings->sim.addr) {
563 int err;
564
565 err = sdata->tf->write_byte(&sdata->tb, sdata->dev,
566 sensor_settings->sim.addr,
567 sensor_settings->sim.value);
568 if (err < 0) {
569 dev_err(&indio_dev->dev,
570 "failed to init interface mode\n");
571 return err;
572 }
573 }
574
575 return 0;
576}
577
553int st_sensors_check_device_support(struct iio_dev *indio_dev, 578int st_sensors_check_device_support(struct iio_dev *indio_dev,
554 int num_sensors_list, 579 int num_sensors_list,
555 const struct st_sensor_settings *sensor_settings) 580 const struct st_sensor_settings *sensor_settings)
@@ -574,6 +599,10 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
574 return -ENODEV; 599 return -ENODEV;
575 } 600 }
576 601
602 err = st_sensors_init_interface_mode(indio_dev, &sensor_settings[i]);
603 if (err < 0)
604 return err;
605
577 if (sensor_settings[i].wai_addr) { 606 if (sensor_settings[i].wai_addr) {
578 err = sdata->tf->read_byte(&sdata->tb, sdata->dev, 607 err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
579 sensor_settings[i].wai_addr, &wai); 608 sensor_settings[i].wai_addr, &wai);
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 8cf84d3488b2..12898424d838 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
696 .gyro_max_val = IIO_RAD_TO_DEGREE(22500), 696 .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
697 .gyro_max_scale = 450, 697 .gyro_max_scale = 450,
698 .accel_max_val = IIO_M_S_2_TO_G(12500), 698 .accel_max_val = IIO_M_S_2_TO_G(12500),
699 .accel_max_scale = 5, 699 .accel_max_scale = 10,
700 }, 700 },
701 [ADIS16485] = { 701 [ADIS16485] = {
702 .channels = adis16485_channels, 702 .channels = adis16485_channels,
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index e7d4ea75e007..7599693f7fe9 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
626 struct tsl2563_chip *chip = iio_priv(dev_info); 626 struct tsl2563_chip *chip = iio_priv(dev_info);
627 627
628 iio_push_event(dev_info, 628 iio_push_event(dev_info,
629 IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 629 IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
630 0, 630 0,
631 IIO_EV_TYPE_THRESH, 631 IIO_EV_TYPE_THRESH,
632 IIO_EV_DIR_EITHER), 632 IIO_EV_DIR_EITHER),
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 8e1b0861fbe4..c38563699984 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -356,9 +356,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
356 .drdy_irq = { 356 .drdy_irq = {
357 .addr = 0x62, 357 .addr = 0x62,
358 .mask_int1 = 0x01, 358 .mask_int1 = 0x01,
359 .addr_ihl = 0x63, 359 .addr_stat_drdy = 0x67,
360 .mask_ihl = 0x04,
361 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
362 }, 360 },
363 .multi_read_bit = false, 361 .multi_read_bit = false,
364 .bootime = 2, 362 .bootime = 2,
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index d82b788374b6..0d2ea3ee371b 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -282,6 +282,11 @@ static int bmp280_read_temp(struct bmp280_data *data,
282 } 282 }
283 283
284 adc_temp = be32_to_cpu(tmp) >> 12; 284 adc_temp = be32_to_cpu(tmp) >> 12;
285 if (adc_temp == BMP280_TEMP_SKIPPED) {
286 /* reading was skipped */
287 dev_err(data->dev, "reading temperature skipped\n");
288 return -EIO;
289 }
285 comp_temp = bmp280_compensate_temp(data, adc_temp); 290 comp_temp = bmp280_compensate_temp(data, adc_temp);
286 291
287 /* 292 /*
@@ -317,6 +322,11 @@ static int bmp280_read_press(struct bmp280_data *data,
317 } 322 }
318 323
319 adc_press = be32_to_cpu(tmp) >> 12; 324 adc_press = be32_to_cpu(tmp) >> 12;
325 if (adc_press == BMP280_PRESS_SKIPPED) {
326 /* reading was skipped */
327 dev_err(data->dev, "reading pressure skipped\n");
328 return -EIO;
329 }
320 comp_press = bmp280_compensate_press(data, adc_press); 330 comp_press = bmp280_compensate_press(data, adc_press);
321 331
322 *val = comp_press; 332 *val = comp_press;
@@ -345,6 +355,11 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
345 } 355 }
346 356
347 adc_humidity = be16_to_cpu(tmp); 357 adc_humidity = be16_to_cpu(tmp);
358 if (adc_humidity == BMP280_HUMIDITY_SKIPPED) {
359 /* reading was skipped */
360 dev_err(data->dev, "reading humidity skipped\n");
361 return -EIO;
362 }
348 comp_humidity = bmp280_compensate_humidity(data, adc_humidity); 363 comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
349 364
350 *val = comp_humidity; 365 *val = comp_humidity;
@@ -597,14 +612,20 @@ static const struct bmp280_chip_info bmp280_chip_info = {
597 612
598static int bme280_chip_config(struct bmp280_data *data) 613static int bme280_chip_config(struct bmp280_data *data)
599{ 614{
600 int ret = bmp280_chip_config(data); 615 int ret;
601 u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1); 616 u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
602 617
618 /*
619 * Oversampling of humidity must be set before oversampling of
620 * temperature/pressure is set to become effective.
621 */
622 ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
623 BMP280_OSRS_HUMIDITY_MASK, osrs);
624
603 if (ret < 0) 625 if (ret < 0)
604 return ret; 626 return ret;
605 627
606 return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY, 628 return bmp280_chip_config(data);
607 BMP280_OSRS_HUMIDITY_MASK, osrs);
608} 629}
609 630
610static const struct bmp280_chip_info bme280_chip_info = { 631static const struct bmp280_chip_info bme280_chip_info = {
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
index 2c770e13be0e..61347438b779 100644
--- a/drivers/iio/pressure/bmp280.h
+++ b/drivers/iio/pressure/bmp280.h
@@ -96,6 +96,11 @@
96#define BME280_CHIP_ID 0x60 96#define BME280_CHIP_ID 0x60
97#define BMP280_SOFT_RESET_VAL 0xB6 97#define BMP280_SOFT_RESET_VAL 0xB6
98 98
99/* BMP280 register skipped special values */
100#define BMP280_TEMP_SKIPPED 0x80000
101#define BMP280_PRESS_SKIPPED 0x80000
102#define BMP280_HUMIDITY_SKIPPED 0x8000
103
99/* Regmap configurations */ 104/* Regmap configurations */
100extern const struct regmap_config bmp180_regmap_config; 105extern const struct regmap_config bmp180_regmap_config;
101extern const struct regmap_config bmp280_regmap_config; 106extern const struct regmap_config bmp280_regmap_config;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index aa61ec15c139..f1bce05ffa13 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -456,7 +456,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
456 .mask_od = 0x40, 456 .mask_od = 0x40,
457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR, 457 .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
458 }, 458 },
459 .multi_read_bit = true, 459 .multi_read_bit = false,
460 .bootime = 2, 460 .bootime = 2,
461 }, 461 },
462}; 462};
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index d22bc56dd9fc..25ad6abfee22 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -366,34 +366,32 @@ static int stm32_counter_read_raw(struct iio_dev *indio_dev,
366 int *val, int *val2, long mask) 366 int *val, int *val2, long mask)
367{ 367{
368 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 368 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
369 u32 dat;
369 370
370 switch (mask) { 371 switch (mask) {
371 case IIO_CHAN_INFO_RAW: 372 case IIO_CHAN_INFO_RAW:
372 { 373 regmap_read(priv->regmap, TIM_CNT, &dat);
373 u32 cnt; 374 *val = dat;
374 375 return IIO_VAL_INT;
375 regmap_read(priv->regmap, TIM_CNT, &cnt);
376 *val = cnt;
377 376
377 case IIO_CHAN_INFO_ENABLE:
378 regmap_read(priv->regmap, TIM_CR1, &dat);
379 *val = (dat & TIM_CR1_CEN) ? 1 : 0;
378 return IIO_VAL_INT; 380 return IIO_VAL_INT;
379 }
380 case IIO_CHAN_INFO_SCALE:
381 {
382 u32 smcr;
383 381
384 regmap_read(priv->regmap, TIM_SMCR, &smcr); 382 case IIO_CHAN_INFO_SCALE:
385 smcr &= TIM_SMCR_SMS; 383 regmap_read(priv->regmap, TIM_SMCR, &dat);
384 dat &= TIM_SMCR_SMS;
386 385
387 *val = 1; 386 *val = 1;
388 *val2 = 0; 387 *val2 = 0;
389 388
390 /* in quadrature case scale = 0.25 */ 389 /* in quadrature case scale = 0.25 */
391 if (smcr == 3) 390 if (dat == 3)
392 *val2 = 2; 391 *val2 = 2;
393 392
394 return IIO_VAL_FRACTIONAL_LOG2; 393 return IIO_VAL_FRACTIONAL_LOG2;
395 } 394 }
396 }
397 395
398 return -EINVAL; 396 return -EINVAL;
399} 397}
@@ -403,15 +401,31 @@ static int stm32_counter_write_raw(struct iio_dev *indio_dev,
403 int val, int val2, long mask) 401 int val, int val2, long mask)
404{ 402{
405 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 403 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
404 u32 dat;
406 405
407 switch (mask) { 406 switch (mask) {
408 case IIO_CHAN_INFO_RAW: 407 case IIO_CHAN_INFO_RAW:
409 regmap_write(priv->regmap, TIM_CNT, val); 408 return regmap_write(priv->regmap, TIM_CNT, val);
410 409
411 return IIO_VAL_INT;
412 case IIO_CHAN_INFO_SCALE: 410 case IIO_CHAN_INFO_SCALE:
413 /* fixed scale */ 411 /* fixed scale */
414 return -EINVAL; 412 return -EINVAL;
413
414 case IIO_CHAN_INFO_ENABLE:
415 if (val) {
416 regmap_read(priv->regmap, TIM_CR1, &dat);
417 if (!(dat & TIM_CR1_CEN))
418 clk_enable(priv->clk);
419 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
420 TIM_CR1_CEN);
421 } else {
422 regmap_read(priv->regmap, TIM_CR1, &dat);
423 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN,
424 0);
425 if (dat & TIM_CR1_CEN)
426 clk_disable(priv->clk);
427 }
428 return 0;
415 } 429 }
416 430
417 return -EINVAL; 431 return -EINVAL;
@@ -471,7 +485,7 @@ static int stm32_get_trigger_mode(struct iio_dev *indio_dev,
471 485
472 regmap_read(priv->regmap, TIM_SMCR, &smcr); 486 regmap_read(priv->regmap, TIM_SMCR, &smcr);
473 487
474 return smcr == TIM_SMCR_SMS ? 0 : -EINVAL; 488 return (smcr & TIM_SMCR_SMS) == TIM_SMCR_SMS ? 0 : -EINVAL;
475} 489}
476 490
477static const struct iio_enum stm32_trigger_mode_enum = { 491static const struct iio_enum stm32_trigger_mode_enum = {
@@ -507,9 +521,19 @@ static int stm32_set_enable_mode(struct iio_dev *indio_dev,
507{ 521{
508 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 522 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
509 int sms = stm32_enable_mode2sms(mode); 523 int sms = stm32_enable_mode2sms(mode);
524 u32 val;
510 525
511 if (sms < 0) 526 if (sms < 0)
512 return sms; 527 return sms;
528 /*
529 * Triggered mode sets CEN bit automatically by hardware. So, first
530 * enable counter clock, so it can use it. Keeps it in sync with CEN.
531 */
532 if (sms == 6) {
533 regmap_read(priv->regmap, TIM_CR1, &val);
534 if (!(val & TIM_CR1_CEN))
535 clk_enable(priv->clk);
536 }
513 537
514 regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms); 538 regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
515 539
@@ -571,11 +595,14 @@ static int stm32_get_quadrature_mode(struct iio_dev *indio_dev,
571{ 595{
572 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 596 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
573 u32 smcr; 597 u32 smcr;
598 int mode;
574 599
575 regmap_read(priv->regmap, TIM_SMCR, &smcr); 600 regmap_read(priv->regmap, TIM_SMCR, &smcr);
576 smcr &= TIM_SMCR_SMS; 601 mode = (smcr & TIM_SMCR_SMS) - 1;
602 if ((mode < 0) || (mode > ARRAY_SIZE(stm32_quadrature_modes)))
603 return -EINVAL;
577 604
578 return smcr - 1; 605 return mode;
579} 606}
580 607
581static const struct iio_enum stm32_quadrature_mode_enum = { 608static const struct iio_enum stm32_quadrature_mode_enum = {
@@ -592,13 +619,20 @@ static const char *const stm32_count_direction_states[] = {
592 619
593static int stm32_set_count_direction(struct iio_dev *indio_dev, 620static int stm32_set_count_direction(struct iio_dev *indio_dev,
594 const struct iio_chan_spec *chan, 621 const struct iio_chan_spec *chan,
595 unsigned int mode) 622 unsigned int dir)
596{ 623{
597 struct stm32_timer_trigger *priv = iio_priv(indio_dev); 624 struct stm32_timer_trigger *priv = iio_priv(indio_dev);
625 u32 val;
626 int mode;
598 627
599 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR, mode); 628 /* In encoder mode, direction is RO (given by TI1/TI2 signals) */
629 regmap_read(priv->regmap, TIM_SMCR, &val);
630 mode = (val & TIM_SMCR_SMS) - 1;
631 if ((mode >= 0) || (mode < ARRAY_SIZE(stm32_quadrature_modes)))
632 return -EBUSY;
600 633
601 return 0; 634 return regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_DIR,
635 dir ? TIM_CR1_DIR : 0);
602} 636}
603 637
604static int stm32_get_count_direction(struct iio_dev *indio_dev, 638static int stm32_get_count_direction(struct iio_dev *indio_dev,
@@ -609,7 +643,7 @@ static int stm32_get_count_direction(struct iio_dev *indio_dev,
609 643
610 regmap_read(priv->regmap, TIM_CR1, &cr1); 644 regmap_read(priv->regmap, TIM_CR1, &cr1);
611 645
612 return (cr1 & TIM_CR1_DIR); 646 return ((cr1 & TIM_CR1_DIR) ? 1 : 0);
613} 647}
614 648
615static const struct iio_enum stm32_count_direction_enum = { 649static const struct iio_enum stm32_count_direction_enum = {
@@ -672,7 +706,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
672static const struct iio_chan_spec stm32_trigger_channel = { 706static const struct iio_chan_spec stm32_trigger_channel = {
673 .type = IIO_COUNT, 707 .type = IIO_COUNT,
674 .channel = 0, 708 .channel = 0,
675 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), 709 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
710 BIT(IIO_CHAN_INFO_ENABLE) |
711 BIT(IIO_CHAN_INFO_SCALE),
676 .ext_info = stm32_trigger_count_info, 712 .ext_info = stm32_trigger_count_info,
677 .indexed = 1 713 .indexed = 1
678}; 714};
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a6cb379a4ebc..437522ca97b4 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -61,6 +61,7 @@ struct addr_req {
61 void (*callback)(int status, struct sockaddr *src_addr, 61 void (*callback)(int status, struct sockaddr *src_addr,
62 struct rdma_dev_addr *addr, void *context); 62 struct rdma_dev_addr *addr, void *context);
63 unsigned long timeout; 63 unsigned long timeout;
64 struct delayed_work work;
64 int status; 65 int status;
65 u32 seq; 66 u32 seq;
66}; 67};
@@ -268,6 +269,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
268 return ret; 269 return ret;
269 270
270 ret = rdma_copy_addr(dev_addr, dev, NULL); 271 ret = rdma_copy_addr(dev_addr, dev, NULL);
272 dev_addr->bound_dev_if = dev->ifindex;
271 if (vlan_id) 273 if (vlan_id)
272 *vlan_id = rdma_vlan_dev_vlan_id(dev); 274 *vlan_id = rdma_vlan_dev_vlan_id(dev);
273 dev_put(dev); 275 dev_put(dev);
@@ -280,6 +282,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
280 &((const struct sockaddr_in6 *)addr)->sin6_addr, 282 &((const struct sockaddr_in6 *)addr)->sin6_addr,
281 dev, 1)) { 283 dev, 1)) {
282 ret = rdma_copy_addr(dev_addr, dev, NULL); 284 ret = rdma_copy_addr(dev_addr, dev, NULL);
285 dev_addr->bound_dev_if = dev->ifindex;
283 if (vlan_id) 286 if (vlan_id)
284 *vlan_id = rdma_vlan_dev_vlan_id(dev); 287 *vlan_id = rdma_vlan_dev_vlan_id(dev);
285 break; 288 break;
@@ -293,7 +296,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
293} 296}
294EXPORT_SYMBOL(rdma_translate_ip); 297EXPORT_SYMBOL(rdma_translate_ip);
295 298
296static void set_timeout(unsigned long time) 299static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
297{ 300{
298 unsigned long delay; 301 unsigned long delay;
299 302
@@ -301,7 +304,7 @@ static void set_timeout(unsigned long time)
301 if ((long)delay < 0) 304 if ((long)delay < 0)
302 delay = 0; 305 delay = 0;
303 306
304 mod_delayed_work(addr_wq, &work, delay); 307 mod_delayed_work(addr_wq, delayed_work, delay);
305} 308}
306 309
307static void queue_req(struct addr_req *req) 310static void queue_req(struct addr_req *req)
@@ -316,8 +319,7 @@ static void queue_req(struct addr_req *req)
316 319
317 list_add(&req->list, &temp_req->list); 320 list_add(&req->list, &temp_req->list);
318 321
319 if (req_list.next == &req->list) 322 set_timeout(&req->work, req->timeout);
320 set_timeout(req->timeout);
321 mutex_unlock(&lock); 323 mutex_unlock(&lock);
322} 324}
323 325
@@ -405,10 +407,10 @@ static int addr4_resolve(struct sockaddr_in *src_in,
405 fl4.saddr = src_ip; 407 fl4.saddr = src_ip;
406 fl4.flowi4_oif = addr->bound_dev_if; 408 fl4.flowi4_oif = addr->bound_dev_if;
407 rt = ip_route_output_key(addr->net, &fl4); 409 rt = ip_route_output_key(addr->net, &fl4);
408 if (IS_ERR(rt)) { 410 ret = PTR_ERR_OR_ZERO(rt);
409 ret = PTR_ERR(rt); 411 if (ret)
410 goto out; 412 return ret;
411 } 413
412 src_in->sin_family = AF_INET; 414 src_in->sin_family = AF_INET;
413 src_in->sin_addr.s_addr = fl4.saddr; 415 src_in->sin_addr.s_addr = fl4.saddr;
414 416
@@ -423,8 +425,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
423 425
424 *prt = rt; 426 *prt = rt;
425 return 0; 427 return 0;
426out:
427 return ret;
428} 428}
429 429
430#if IS_ENABLED(CONFIG_IPV6) 430#if IS_ENABLED(CONFIG_IPV6)
@@ -509,6 +509,11 @@ static int addr_resolve(struct sockaddr *src_in,
509 struct dst_entry *dst; 509 struct dst_entry *dst;
510 int ret; 510 int ret;
511 511
512 if (!addr->net) {
513 pr_warn_ratelimited("%s: missing namespace\n", __func__);
514 return -EINVAL;
515 }
516
512 if (src_in->sa_family == AF_INET) { 517 if (src_in->sa_family == AF_INET) {
513 struct rtable *rt = NULL; 518 struct rtable *rt = NULL;
514 const struct sockaddr_in *dst_in4 = 519 const struct sockaddr_in *dst_in4 =
@@ -522,8 +527,12 @@ static int addr_resolve(struct sockaddr *src_in,
522 if (resolve_neigh) 527 if (resolve_neigh)
523 ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq); 528 ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
524 529
525 ndev = rt->dst.dev; 530 if (addr->bound_dev_if) {
526 dev_hold(ndev); 531 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
532 } else {
533 ndev = rt->dst.dev;
534 dev_hold(ndev);
535 }
527 536
528 ip_rt_put(rt); 537 ip_rt_put(rt);
529 } else { 538 } else {
@@ -539,19 +548,63 @@ static int addr_resolve(struct sockaddr *src_in,
539 if (resolve_neigh) 548 if (resolve_neigh)
540 ret = addr_resolve_neigh(dst, dst_in, addr, seq); 549 ret = addr_resolve_neigh(dst, dst_in, addr, seq);
541 550
542 ndev = dst->dev; 551 if (addr->bound_dev_if) {
543 dev_hold(ndev); 552 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
553 } else {
554 ndev = dst->dev;
555 dev_hold(ndev);
556 }
544 557
545 dst_release(dst); 558 dst_release(dst);
546 } 559 }
547 560
548 addr->bound_dev_if = ndev->ifindex; 561 if (ndev->flags & IFF_LOOPBACK) {
549 addr->net = dev_net(ndev); 562 ret = rdma_translate_ip(dst_in, addr, NULL);
563 /*
564 * Put the loopback device and get the translated
565 * device instead.
566 */
567 dev_put(ndev);
568 ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
569 } else {
570 addr->bound_dev_if = ndev->ifindex;
571 }
550 dev_put(ndev); 572 dev_put(ndev);
551 573
552 return ret; 574 return ret;
553} 575}
554 576
577static void process_one_req(struct work_struct *_work)
578{
579 struct addr_req *req;
580 struct sockaddr *src_in, *dst_in;
581
582 mutex_lock(&lock);
583 req = container_of(_work, struct addr_req, work.work);
584
585 if (req->status == -ENODATA) {
586 src_in = (struct sockaddr *)&req->src_addr;
587 dst_in = (struct sockaddr *)&req->dst_addr;
588 req->status = addr_resolve(src_in, dst_in, req->addr,
589 true, req->seq);
590 if (req->status && time_after_eq(jiffies, req->timeout)) {
591 req->status = -ETIMEDOUT;
592 } else if (req->status == -ENODATA) {
593 /* requeue the work for retrying again */
594 set_timeout(&req->work, req->timeout);
595 mutex_unlock(&lock);
596 return;
597 }
598 }
599 list_del(&req->list);
600 mutex_unlock(&lock);
601
602 req->callback(req->status, (struct sockaddr *)&req->src_addr,
603 req->addr, req->context);
604 put_client(req->client);
605 kfree(req);
606}
607
555static void process_req(struct work_struct *work) 608static void process_req(struct work_struct *work)
556{ 609{
557 struct addr_req *req, *temp_req; 610 struct addr_req *req, *temp_req;
@@ -569,20 +622,23 @@ static void process_req(struct work_struct *work)
569 true, req->seq); 622 true, req->seq);
570 if (req->status && time_after_eq(jiffies, req->timeout)) 623 if (req->status && time_after_eq(jiffies, req->timeout))
571 req->status = -ETIMEDOUT; 624 req->status = -ETIMEDOUT;
572 else if (req->status == -ENODATA) 625 else if (req->status == -ENODATA) {
626 set_timeout(&req->work, req->timeout);
573 continue; 627 continue;
628 }
574 } 629 }
575 list_move_tail(&req->list, &done_list); 630 list_move_tail(&req->list, &done_list);
576 } 631 }
577 632
578 if (!list_empty(&req_list)) {
579 req = list_entry(req_list.next, struct addr_req, list);
580 set_timeout(req->timeout);
581 }
582 mutex_unlock(&lock); 633 mutex_unlock(&lock);
583 634
584 list_for_each_entry_safe(req, temp_req, &done_list, list) { 635 list_for_each_entry_safe(req, temp_req, &done_list, list) {
585 list_del(&req->list); 636 list_del(&req->list);
637 /* It is safe to cancel other work items from this work item
638 * because at a time there can be only one work item running
639 * with this single threaded work queue.
640 */
641 cancel_delayed_work(&req->work);
586 req->callback(req->status, (struct sockaddr *) &req->src_addr, 642 req->callback(req->status, (struct sockaddr *) &req->src_addr,
587 req->addr, req->context); 643 req->addr, req->context);
588 put_client(req->client); 644 put_client(req->client);
@@ -625,6 +681,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
625 req->context = context; 681 req->context = context;
626 req->client = client; 682 req->client = client;
627 atomic_inc(&client->refcount); 683 atomic_inc(&client->refcount);
684 INIT_DELAYED_WORK(&req->work, process_one_req);
628 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); 685 req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
629 686
630 req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); 687 req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
@@ -679,7 +736,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
679 req->status = -ECANCELED; 736 req->status = -ECANCELED;
680 req->timeout = jiffies; 737 req->timeout = jiffies;
681 list_move(&req->list, &req_list); 738 list_move(&req->list, &req_list);
682 set_timeout(req->timeout); 739 set_timeout(&req->work, req->timeout);
683 break; 740 break;
684 } 741 }
685 } 742 }
@@ -785,9 +842,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
785 if (event == NETEVENT_NEIGH_UPDATE) { 842 if (event == NETEVENT_NEIGH_UPDATE) {
786 struct neighbour *neigh = ctx; 843 struct neighbour *neigh = ctx;
787 844
788 if (neigh->nud_state & NUD_VALID) { 845 if (neigh->nud_state & NUD_VALID)
789 set_timeout(jiffies); 846 set_timeout(&work, jiffies);
790 }
791 } 847 }
792 return 0; 848 return 0;
793} 849}
@@ -798,7 +854,7 @@ static struct notifier_block nb = {
798 854
799int addr_init(void) 855int addr_init(void)
800{ 856{
801 addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); 857 addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
802 if (!addr_wq) 858 if (!addr_wq)
803 return -ENOMEM; 859 return -ENOMEM;
804 860
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 31bb82d8ecd7..0eb393237ba2 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -623,22 +623,11 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
623 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) 623 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
624 return ret; 624 return ret;
625 625
626 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { 626 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
627 ndev = dev_get_by_index(&init_net, bound_if_index); 627 ndev = dev_get_by_index(&init_net, bound_if_index);
628 if (ndev && ndev->flags & IFF_LOOPBACK) { 628 else
629 pr_info("detected loopback device\n");
630 dev_put(ndev);
631
632 if (!device->get_netdev)
633 return -EOPNOTSUPP;
634
635 ndev = device->get_netdev(device, port);
636 if (!ndev)
637 return -ENODEV;
638 }
639 } else {
640 gid_type = IB_GID_TYPE_IB; 629 gid_type = IB_GID_TYPE_IB;
641 } 630
642 631
643 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, 632 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
644 ndev, NULL); 633 ndev, NULL);
@@ -1044,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
1044 } else 1033 } else
1045 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, 1034 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1046 qp_attr_mask); 1035 qp_attr_mask);
1036 qp_attr->port_num = id_priv->id.port_num;
1037 *qp_attr_mask |= IB_QP_PORT;
1047 } else 1038 } else
1048 ret = -ENOSYS; 1039 ret = -ENOSYS;
1049 1040
@@ -2569,21 +2560,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2569 goto err2; 2560 goto err2;
2570 } 2561 }
2571 2562
2572 if (ndev->flags & IFF_LOOPBACK) {
2573 dev_put(ndev);
2574 if (!id_priv->id.device->get_netdev) {
2575 ret = -EOPNOTSUPP;
2576 goto err2;
2577 }
2578
2579 ndev = id_priv->id.device->get_netdev(id_priv->id.device,
2580 id_priv->id.port_num);
2581 if (!ndev) {
2582 ret = -ENODEV;
2583 goto err2;
2584 }
2585 }
2586
2587 supported_gids = roce_gid_type_mask_support(id_priv->id.device, 2563 supported_gids = roce_gid_type_mask_support(id_priv->id.device,
2588 id_priv->id.port_num); 2564 id_priv->id.port_num);
2589 gid_type = cma_route_gid_type(addr->dev_addr.network, 2565 gid_type = cma_route_gid_type(addr->dev_addr.network,
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index a5dfab6adf49..221468f77128 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -537,10 +537,11 @@ void ib_unregister_device(struct ib_device *device)
537 } 537 }
538 up_read(&lists_rwsem); 538 up_read(&lists_rwsem);
539 539
540 mutex_unlock(&device_mutex);
541
542 ib_device_unregister_rdmacg(device); 540 ib_device_unregister_rdmacg(device);
543 ib_device_unregister_sysfs(device); 541 ib_device_unregister_sysfs(device);
542
543 mutex_unlock(&device_mutex);
544
544 ib_cache_cleanup_one(device); 545 ib_cache_cleanup_one(device);
545 546
546 ib_security_destroy_port_pkey_list(device); 547 ib_security_destroy_port_pkey_list(device);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index db958d3207ef..94a9eefb3cfc 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -42,6 +42,8 @@
42#include <rdma/ib_cache.h> 42#include <rdma/ib_cache.h>
43#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
44 44
45static struct workqueue_struct *gid_cache_wq;
46
45enum gid_op_type { 47enum gid_op_type {
46 GID_DEL = 0, 48 GID_DEL = 0,
47 GID_ADD 49 GID_ADD
@@ -560,7 +562,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
560 } 562 }
561 INIT_WORK(&ndev_work->work, netdevice_event_work_handler); 563 INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
562 564
563 queue_work(ib_wq, &ndev_work->work); 565 queue_work(gid_cache_wq, &ndev_work->work);
564 566
565 return NOTIFY_DONE; 567 return NOTIFY_DONE;
566} 568}
@@ -693,7 +695,7 @@ static int addr_event(struct notifier_block *this, unsigned long event,
693 dev_hold(ndev); 695 dev_hold(ndev);
694 work->gid_attr.ndev = ndev; 696 work->gid_attr.ndev = ndev;
695 697
696 queue_work(ib_wq, &work->work); 698 queue_work(gid_cache_wq, &work->work);
697 699
698 return NOTIFY_DONE; 700 return NOTIFY_DONE;
699} 701}
@@ -740,6 +742,10 @@ static struct notifier_block nb_inet6addr = {
740 742
741int __init roce_gid_mgmt_init(void) 743int __init roce_gid_mgmt_init(void)
742{ 744{
745 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
746 if (!gid_cache_wq)
747 return -ENOMEM;
748
743 register_inetaddr_notifier(&nb_inetaddr); 749 register_inetaddr_notifier(&nb_inetaddr);
744 if (IS_ENABLED(CONFIG_IPV6)) 750 if (IS_ENABLED(CONFIG_IPV6))
745 register_inet6addr_notifier(&nb_inet6addr); 751 register_inet6addr_notifier(&nb_inet6addr);
@@ -764,4 +770,5 @@ void __exit roce_gid_mgmt_cleanup(void)
764 * ib-core is removed, all physical devices have been removed, 770 * ib-core is removed, all physical devices have been removed,
765 * so no issue with remaining hardware contexts. 771 * so no issue with remaining hardware contexts.
766 */ 772 */
773 destroy_workqueue(gid_cache_wq);
767} 774}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 8ba9bfb073d1..739bd69ef1d4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1015,7 +1015,7 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
1015 cq->uobject = &obj->uobject; 1015 cq->uobject = &obj->uobject;
1016 cq->comp_handler = ib_uverbs_comp_handler; 1016 cq->comp_handler = ib_uverbs_comp_handler;
1017 cq->event_handler = ib_uverbs_cq_event_handler; 1017 cq->event_handler = ib_uverbs_cq_event_handler;
1018 cq->cq_context = &ev_file->ev_queue; 1018 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
1019 atomic_set(&cq->usecnt, 0); 1019 atomic_set(&cq->usecnt, 0);
1020 1020
1021 obj->uobject.object = cq; 1021 obj->uobject.object = cq;
@@ -1153,7 +1153,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1153 int out_len) 1153 int out_len)
1154{ 1154{
1155 struct ib_uverbs_resize_cq cmd; 1155 struct ib_uverbs_resize_cq cmd;
1156 struct ib_uverbs_resize_cq_resp resp; 1156 struct ib_uverbs_resize_cq_resp resp = {};
1157 struct ib_udata udata; 1157 struct ib_udata udata;
1158 struct ib_cq *cq; 1158 struct ib_cq *cq;
1159 int ret = -EINVAL; 1159 int ret = -EINVAL;
@@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1296 struct ib_uobject *uobj; 1296 struct ib_uobject *uobj;
1297 struct ib_cq *cq; 1297 struct ib_cq *cq;
1298 struct ib_ucq_object *obj; 1298 struct ib_ucq_object *obj;
1299 struct ib_uverbs_event_queue *ev_queue;
1300 int ret = -EINVAL; 1299 int ret = -EINVAL;
1301 1300
1302 if (copy_from_user(&cmd, buf, sizeof cmd)) 1301 if (copy_from_user(&cmd, buf, sizeof cmd))
@@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1313 */ 1312 */
1314 uverbs_uobject_get(uobj); 1313 uverbs_uobject_get(uobj);
1315 cq = uobj->object; 1314 cq = uobj->object;
1316 ev_queue = cq->cq_context;
1317 obj = container_of(cq->uobject, struct ib_ucq_object, uobject); 1315 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1318 1316
1319 memset(&resp, 0, sizeof(resp)); 1317 memset(&resp, 0, sizeof(resp));
@@ -1524,6 +1522,7 @@ static int create_qp(struct ib_uverbs_file *file,
1524 qp->qp_type = attr.qp_type; 1522 qp->qp_type = attr.qp_type;
1525 atomic_set(&qp->usecnt, 0); 1523 atomic_set(&qp->usecnt, 0);
1526 atomic_inc(&pd->usecnt); 1524 atomic_inc(&pd->usecnt);
1525 qp->port = 0;
1527 if (attr.send_cq) 1526 if (attr.send_cq)
1528 atomic_inc(&attr.send_cq->usecnt); 1527 atomic_inc(&attr.send_cq->usecnt);
1529 if (attr.recv_cq) 1528 if (attr.recv_cq)
@@ -1935,7 +1934,8 @@ static int modify_qp(struct ib_uverbs_file *file,
1935 goto out; 1934 goto out;
1936 } 1935 }
1937 1936
1938 if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) { 1937 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1938 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1939 ret = -EINVAL; 1939 ret = -EINVAL;
1940 goto release_qp; 1940 goto release_qp;
1941 } 1941 }
@@ -1963,8 +1963,9 @@ static int modify_qp(struct ib_uverbs_file *file,
1963 attr->alt_timeout = cmd->base.alt_timeout; 1963 attr->alt_timeout = cmd->base.alt_timeout;
1964 attr->rate_limit = cmd->rate_limit; 1964 attr->rate_limit = cmd->rate_limit;
1965 1965
1966 attr->ah_attr.type = rdma_ah_find_type(qp->device, 1966 if (cmd->base.attr_mask & IB_QP_AV)
1967 cmd->base.dest.port_num); 1967 attr->ah_attr.type = rdma_ah_find_type(qp->device,
1968 cmd->base.dest.port_num);
1968 if (cmd->base.dest.is_global) { 1969 if (cmd->base.dest.is_global) {
1969 rdma_ah_set_grh(&attr->ah_attr, NULL, 1970 rdma_ah_set_grh(&attr->ah_attr, NULL,
1970 cmd->base.dest.flow_label, 1971 cmd->base.dest.flow_label,
@@ -1982,8 +1983,9 @@ static int modify_qp(struct ib_uverbs_file *file,
1982 rdma_ah_set_port_num(&attr->ah_attr, 1983 rdma_ah_set_port_num(&attr->ah_attr,
1983 cmd->base.dest.port_num); 1984 cmd->base.dest.port_num);
1984 1985
1985 attr->alt_ah_attr.type = rdma_ah_find_type(qp->device, 1986 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
1986 cmd->base.dest.port_num); 1987 attr->alt_ah_attr.type =
1988 rdma_ah_find_type(qp->device, cmd->base.dest.port_num);
1987 if (cmd->base.alt_dest.is_global) { 1989 if (cmd->base.alt_dest.is_global) {
1988 rdma_ah_set_grh(&attr->alt_ah_attr, NULL, 1990 rdma_ah_set_grh(&attr->alt_ah_attr, NULL,
1989 cmd->base.alt_dest.flow_label, 1991 cmd->base.alt_dest.flow_label,
@@ -2005,28 +2007,13 @@ static int modify_qp(struct ib_uverbs_file *file,
2005 rdma_ah_set_port_num(&attr->alt_ah_attr, 2007 rdma_ah_set_port_num(&attr->alt_ah_attr,
2006 cmd->base.alt_dest.port_num); 2008 cmd->base.alt_dest.port_num);
2007 2009
2008 if (qp->real_qp == qp) { 2010 ret = ib_modify_qp_with_udata(qp, attr,
2009 if (cmd->base.attr_mask & IB_QP_AV) { 2011 modify_qp_mask(qp->qp_type,
2010 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); 2012 cmd->base.attr_mask),
2011 if (ret) 2013 udata);
2012 goto release_qp;
2013 }
2014 ret = ib_security_modify_qp(qp,
2015 attr,
2016 modify_qp_mask(qp->qp_type,
2017 cmd->base.attr_mask),
2018 udata);
2019 } else {
2020 ret = ib_security_modify_qp(qp,
2021 attr,
2022 modify_qp_mask(qp->qp_type,
2023 cmd->base.attr_mask),
2024 NULL);
2025 }
2026 2014
2027release_qp: 2015release_qp:
2028 uobj_put_obj_read(qp); 2016 uobj_put_obj_read(qp);
2029
2030out: 2017out:
2031 kfree(attr); 2018 kfree(attr);
2032 2019
@@ -2103,7 +2090,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2103 struct ib_uverbs_destroy_qp cmd; 2090 struct ib_uverbs_destroy_qp cmd;
2104 struct ib_uverbs_destroy_qp_resp resp; 2091 struct ib_uverbs_destroy_qp_resp resp;
2105 struct ib_uobject *uobj; 2092 struct ib_uobject *uobj;
2106 struct ib_qp *qp;
2107 struct ib_uqp_object *obj; 2093 struct ib_uqp_object *obj;
2108 int ret = -EINVAL; 2094 int ret = -EINVAL;
2109 2095
@@ -2117,7 +2103,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2117 if (IS_ERR(uobj)) 2103 if (IS_ERR(uobj))
2118 return PTR_ERR(uobj); 2104 return PTR_ERR(uobj);
2119 2105
2120 qp = uobj->object;
2121 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); 2106 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2122 /* 2107 /*
2123 * Make sure we don't free the memory in remove_commit as we still 2108 * Make sure we don't free the memory in remove_commit as we still
@@ -3019,7 +3004,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
3019{ 3004{
3020 struct ib_uverbs_ex_destroy_wq cmd = {}; 3005 struct ib_uverbs_ex_destroy_wq cmd = {};
3021 struct ib_uverbs_ex_destroy_wq_resp resp = {}; 3006 struct ib_uverbs_ex_destroy_wq_resp resp = {};
3022 struct ib_wq *wq;
3023 struct ib_uobject *uobj; 3007 struct ib_uobject *uobj;
3024 struct ib_uwq_object *obj; 3008 struct ib_uwq_object *obj;
3025 size_t required_cmd_sz; 3009 size_t required_cmd_sz;
@@ -3053,7 +3037,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
3053 if (IS_ERR(uobj)) 3037 if (IS_ERR(uobj))
3054 return PTR_ERR(uobj); 3038 return PTR_ERR(uobj);
3055 3039
3056 wq = uobj->object;
3057 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); 3040 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
3058 /* 3041 /*
3059 * Make sure we don't free the memory in remove_commit as we still 3042 * Make sure we don't free the memory in remove_commit as we still
@@ -3743,10 +3726,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3743 struct ib_uverbs_destroy_srq cmd; 3726 struct ib_uverbs_destroy_srq cmd;
3744 struct ib_uverbs_destroy_srq_resp resp; 3727 struct ib_uverbs_destroy_srq_resp resp;
3745 struct ib_uobject *uobj; 3728 struct ib_uobject *uobj;
3746 struct ib_srq *srq;
3747 struct ib_uevent_object *obj; 3729 struct ib_uevent_object *obj;
3748 int ret = -EINVAL; 3730 int ret = -EINVAL;
3749 enum ib_srq_type srq_type;
3750 3731
3751 if (copy_from_user(&cmd, buf, sizeof cmd)) 3732 if (copy_from_user(&cmd, buf, sizeof cmd))
3752 return -EFAULT; 3733 return -EFAULT;
@@ -3756,9 +3737,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3756 if (IS_ERR(uobj)) 3737 if (IS_ERR(uobj))
3757 return PTR_ERR(uobj); 3738 return PTR_ERR(uobj);
3758 3739
3759 srq = uobj->object;
3760 obj = container_of(uobj, struct ib_uevent_object, uobject); 3740 obj = container_of(uobj, struct ib_uevent_object, uobject);
3761 srq_type = srq->srq_type;
3762 /* 3741 /*
3763 * Make sure we don't free the memory in remove_commit as we still 3742 * Make sure we don't free the memory in remove_commit as we still
3764 * needs the uobject memory to create the response. 3743 * needs the uobject memory to create the response.
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3d2609608f58..5e530d2bee44 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
250 if (atomic_dec_and_test(&file->device->refcount)) 250 if (atomic_dec_and_test(&file->device->refcount))
251 ib_uverbs_comp_dev(file->device); 251 ib_uverbs_comp_dev(file->device);
252 252
253 kobject_put(&file->device->kobj);
253 kfree(file); 254 kfree(file);
254} 255}
255 256
@@ -917,7 +918,6 @@ err:
917static int ib_uverbs_close(struct inode *inode, struct file *filp) 918static int ib_uverbs_close(struct inode *inode, struct file *filp)
918{ 919{
919 struct ib_uverbs_file *file = filp->private_data; 920 struct ib_uverbs_file *file = filp->private_data;
920 struct ib_uverbs_device *dev = file->device;
921 921
922 mutex_lock(&file->cleanup_mutex); 922 mutex_lock(&file->cleanup_mutex);
923 if (file->ucontext) { 923 if (file->ucontext) {
@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
939 ib_uverbs_release_async_event_file); 939 ib_uverbs_release_async_event_file);
940 940
941 kref_put(&file->ref, ib_uverbs_release_file); 941 kref_put(&file->ref, ib_uverbs_release_file);
942 kobject_put(&dev->kobj);
943 942
944 return 0; 943 return 0;
945} 944}
@@ -1154,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1154 kref_get(&file->ref); 1153 kref_get(&file->ref);
1155 mutex_unlock(&uverbs_dev->lists_mutex); 1154 mutex_unlock(&uverbs_dev->lists_mutex);
1156 1155
1157 ib_uverbs_event_handler(&file->event_handler, &event);
1158 1156
1159 mutex_lock(&file->cleanup_mutex); 1157 mutex_lock(&file->cleanup_mutex);
1160 ucontext = file->ucontext; 1158 ucontext = file->ucontext;
@@ -1171,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1171 * for example due to freeing the resources 1169 * for example due to freeing the resources
1172 * (e.g mmput). 1170 * (e.g mmput).
1173 */ 1171 */
1172 ib_uverbs_event_handler(&file->event_handler, &event);
1174 ib_dev->disassociate_ucontext(ucontext); 1173 ib_dev->disassociate_ucontext(ucontext);
1175 mutex_lock(&file->cleanup_mutex); 1174 mutex_lock(&file->cleanup_mutex);
1176 ib_uverbs_cleanup_ucontext(file, ucontext, true); 1175 ib_uverbs_cleanup_ucontext(file, ucontext, true);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index c973a83c898b..b456e3ca1876 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -452,6 +452,19 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
452} 452}
453EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); 453EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
454 454
455/*
456 * This function creates ah from the incoming packet.
457 * Incoming packet has dgid of the receiver node on which this code is
458 * getting executed and, sgid contains the GID of the sender.
459 *
460 * When resolving mac address of destination, the arrived dgid is used
461 * as sgid and, sgid is used as dgid because sgid contains destinations
462 * GID whom to respond to.
463 *
464 * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the
465 * position of arguments dgid and sgid do not match the order of the
466 * parameters.
467 */
455int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, 468int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
456 const struct ib_wc *wc, const struct ib_grh *grh, 469 const struct ib_wc *wc, const struct ib_grh *grh,
457 struct rdma_ah_attr *ah_attr) 470 struct rdma_ah_attr *ah_attr)
@@ -507,11 +520,6 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
507 } 520 }
508 521
509 resolved_dev = dev_get_by_index(&init_net, if_index); 522 resolved_dev = dev_get_by_index(&init_net, if_index);
510 if (resolved_dev->flags & IFF_LOOPBACK) {
511 dev_put(resolved_dev);
512 resolved_dev = idev;
513 dev_hold(resolved_dev);
514 }
515 rcu_read_lock(); 523 rcu_read_lock();
516 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev, 524 if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev,
517 resolved_dev)) 525 resolved_dev))
@@ -830,6 +838,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
830 spin_lock_init(&qp->mr_lock); 838 spin_lock_init(&qp->mr_lock);
831 INIT_LIST_HEAD(&qp->rdma_mrs); 839 INIT_LIST_HEAD(&qp->rdma_mrs);
832 INIT_LIST_HEAD(&qp->sig_mrs); 840 INIT_LIST_HEAD(&qp->sig_mrs);
841 qp->port = 0;
833 842
834 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) 843 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
835 return ib_create_xrc_qp(qp, qp_init_attr); 844 return ib_create_xrc_qp(qp, qp_init_attr);
@@ -1268,20 +1277,40 @@ out:
1268} 1277}
1269EXPORT_SYMBOL(ib_resolve_eth_dmac); 1278EXPORT_SYMBOL(ib_resolve_eth_dmac);
1270 1279
1271int ib_modify_qp(struct ib_qp *qp, 1280/**
1272 struct ib_qp_attr *qp_attr, 1281 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1273 int qp_attr_mask) 1282 * @qp: The QP to modify.
1283 * @attr: On input, specifies the QP attributes to modify. On output,
1284 * the current values of selected QP attributes are returned.
1285 * @attr_mask: A bit-mask used to specify which attributes of the QP
1286 * are being modified.
1287 * @udata: pointer to user's input output buffer information
1288 * are being modified.
1289 * It returns 0 on success and returns appropriate error code on error.
1290 */
1291int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr,
1292 int attr_mask, struct ib_udata *udata)
1274{ 1293{
1294 int ret;
1275 1295
1276 if (qp_attr_mask & IB_QP_AV) { 1296 if (attr_mask & IB_QP_AV) {
1277 int ret; 1297 ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr);
1278
1279 ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
1280 if (ret) 1298 if (ret)
1281 return ret; 1299 return ret;
1282 } 1300 }
1301 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1302 if (!ret && (attr_mask & IB_QP_PORT))
1303 qp->port = attr->port_num;
1304
1305 return ret;
1306}
1307EXPORT_SYMBOL(ib_modify_qp_with_udata);
1283 1308
1284 return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); 1309int ib_modify_qp(struct ib_qp *qp,
1310 struct ib_qp_attr *qp_attr,
1311 int qp_attr_mask)
1312{
1313 return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL);
1285} 1314}
1286EXPORT_SYMBOL(ib_modify_qp); 1315EXPORT_SYMBOL(ib_modify_qp);
1287 1316
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 08772836fded..85527532c49d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -51,6 +51,8 @@
51#define BNXT_RE_PAGE_SIZE_8M BIT(23) 51#define BNXT_RE_PAGE_SIZE_8M BIT(23)
52#define BNXT_RE_PAGE_SIZE_1G BIT(30) 52#define BNXT_RE_PAGE_SIZE_1G BIT(30)
53 53
54#define BNXT_RE_MAX_MR_SIZE BIT(30)
55
54#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) 56#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
55#define BNXT_RE_MAX_MRW_COUNT (64 * 1024) 57#define BNXT_RE_MAX_MRW_COUNT (64 * 1024)
56#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) 58#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
@@ -60,6 +62,13 @@
60 62
61#define BNXT_RE_RQ_WQE_THRESHOLD 32 63#define BNXT_RE_RQ_WQE_THRESHOLD 32
62 64
65/*
66 * Setting the default ack delay value to 16, which means
67 * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
68 */
69
70#define BNXT_RE_DEFAULT_ACK_DELAY 16
71
63struct bnxt_re_work { 72struct bnxt_re_work {
64 struct work_struct work; 73 struct work_struct work;
65 unsigned long event; 74 unsigned long event;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index c7bd68311d0c..f0e01b3ac711 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver); 145 ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver);
146 bnxt_qplib_get_guid(rdev->netdev->dev_addr, 146 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
147 (u8 *)&ib_attr->sys_image_guid); 147 (u8 *)&ib_attr->sys_image_guid);
148 ib_attr->max_mr_size = ~0ull; 148 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K | 149 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K;
150 BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M |
151 BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G;
152 150
153 ib_attr->vendor_id = rdev->en_dev->pdev->vendor; 151 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
154 ib_attr->vendor_part_id = rdev->en_dev->pdev->device; 152 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev,
174 ib_attr->max_mr = dev_attr->max_mr; 172 ib_attr->max_mr = dev_attr->max_mr;
175 ib_attr->max_pd = dev_attr->max_pd; 173 ib_attr->max_pd = dev_attr->max_pd;
176 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; 174 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
177 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom; 175 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
178 ib_attr->atomic_cap = IB_ATOMIC_HCA; 176 if (dev_attr->is_atomic) {
179 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; 177 ib_attr->atomic_cap = IB_ATOMIC_HCA;
178 ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
179 }
180 180
181 ib_attr->max_ee_rd_atom = 0; 181 ib_attr->max_ee_rd_atom = 0;
182 ib_attr->max_res_rd_atom = 0; 182 ib_attr->max_res_rd_atom = 0;
@@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; 201 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
202 202
203 ib_attr->max_pkeys = 1; 203 ib_attr->max_pkeys = 1;
204 ib_attr->local_ca_ack_delay = 0; 204 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
205 return 0; 205 return 0;
206} 206}
207 207
@@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
390 return -EINVAL; 390 return -EINVAL;
391 ctx->refcnt--; 391 ctx->refcnt--;
392 if (!ctx->refcnt) { 392 if (!ctx->refcnt) {
393 rc = bnxt_qplib_del_sgid 393 rc = bnxt_qplib_del_sgid(sgid_tbl,
394 (sgid_tbl, 394 &sgid_tbl->tbl[ctx->idx],
395 &sgid_tbl->tbl[ctx->idx], true); 395 true);
396 if (rc) 396 if (rc) {
397 dev_err(rdev_to_dev(rdev), 397 dev_err(rdev_to_dev(rdev),
398 "Failed to remove GID: %#x", rc); 398 "Failed to remove GID: %#x", rc);
399 ctx_tbl = sgid_tbl->ctx; 399 } else {
400 ctx_tbl[ctx->idx] = NULL; 400 ctx_tbl = sgid_tbl->ctx;
401 kfree(ctx); 401 ctx_tbl[ctx->idx] = NULL;
402 kfree(ctx);
403 }
402 } 404 }
403 } else { 405 } else {
404 return -EINVAL; 406 return -EINVAL;
@@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
588 590
589 /* Create a fence MW only for kernel consumers */ 591 /* Create a fence MW only for kernel consumers */
590 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); 592 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
591 if (!mw) { 593 if (IS_ERR(mw)) {
592 dev_err(rdev_to_dev(rdev), 594 dev_err(rdev_to_dev(rdev),
593 "Failed to create fence-MW for PD: %p\n", pd); 595 "Failed to create fence-MW for PD: %p\n", pd);
594 rc = -EINVAL; 596 rc = PTR_ERR(mw);
595 goto fail; 597 goto fail;
596 } 598 }
597 fence->mw = mw; 599 fence->mw = mw;
@@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
612 int rc; 614 int rc;
613 615
614 bnxt_re_destroy_fence_mr(pd); 616 bnxt_re_destroy_fence_mr(pd);
615 if (ib_pd->uobject && pd->dpi.dbr) {
616 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
617 struct bnxt_re_ucontext *ucntx;
618 617
619 /* Free DPI only if this is the first PD allocated by the 618 if (pd->qplib_pd.id) {
620 * application and mark the context dpi as NULL 619 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
621 */ 620 &rdev->qplib_res.pd_tbl,
622 ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); 621 &pd->qplib_pd);
623
624 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
625 &rdev->qplib_res.dpi_tbl,
626 &pd->dpi);
627 if (rc) 622 if (rc)
628 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI"); 623 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
629 /* Don't fail, continue*/
630 ucntx->dpi = NULL;
631 }
632
633 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
634 &rdev->qplib_res.pd_tbl,
635 &pd->qplib_pd);
636 if (rc) {
637 dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
638 return rc;
639 } 624 }
640 625
641 kfree(pd); 626 kfree(pd);
@@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
667 if (udata) { 652 if (udata) {
668 struct bnxt_re_pd_resp resp; 653 struct bnxt_re_pd_resp resp;
669 654
670 if (!ucntx->dpi) { 655 if (!ucntx->dpi.dbr) {
671 /* Allocate DPI in alloc_pd to avoid failing of 656 /* Allocate DPI in alloc_pd to avoid failing of
672 * ibv_devinfo and family of application when DPIs 657 * ibv_devinfo and family of application when DPIs
673 * are depleted. 658 * are depleted.
674 */ 659 */
675 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, 660 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
676 &pd->dpi, ucntx)) { 661 &ucntx->dpi, ucntx)) {
677 rc = -ENOMEM; 662 rc = -ENOMEM;
678 goto dbfail; 663 goto dbfail;
679 } 664 }
680 ucntx->dpi = &pd->dpi;
681 } 665 }
682 666
683 resp.pdid = pd->qplib_pd.id; 667 resp.pdid = pd->qplib_pd.id;
684 /* Still allow mapping this DBR to the new user PD. */ 668 /* Still allow mapping this DBR to the new user PD. */
685 resp.dpi = ucntx->dpi->dpi; 669 resp.dpi = ucntx->dpi.dpi;
686 resp.dbr = (u64)ucntx->dpi->umdbr; 670 resp.dbr = (u64)ucntx->dpi.umdbr;
687 671
688 rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); 672 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
689 if (rc) { 673 if (rc) {
@@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
960 qplib_qp->rq.nmap = umem->nmap; 944 qplib_qp->rq.nmap = umem->nmap;
961 } 945 }
962 946
963 qplib_qp->dpi = cntx->dpi; 947 qplib_qp->dpi = &cntx->dpi;
964 return 0; 948 return 0;
965rqfail: 949rqfail:
966 ib_umem_release(qp->sumem); 950 ib_umem_release(qp->sumem);
@@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1530 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1514 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1531 qp->qplib_qp.modify_flags |= 1515 qp->qplib_qp.modify_flags |=
1532 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; 1516 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1533 qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic; 1517 /* Cap the max_rd_atomic to device max */
1518 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1519 dev_attr->max_qp_rd_atom);
1534 } 1520 }
1535 if (qp_attr_mask & IB_QP_SQ_PSN) { 1521 if (qp_attr_mask & IB_QP_SQ_PSN) {
1536 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; 1522 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1537 qp->qplib_qp.sq.psn = qp_attr->sq_psn; 1523 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1538 } 1524 }
1539 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1525 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1526 if (qp_attr->max_dest_rd_atomic >
1527 dev_attr->max_qp_init_rd_atom) {
1528 dev_err(rdev_to_dev(rdev),
1529 "max_dest_rd_atomic requested%d is > dev_max%d",
1530 qp_attr->max_dest_rd_atomic,
1531 dev_attr->max_qp_init_rd_atom);
1532 return -EINVAL;
1533 }
1534
1540 qp->qplib_qp.modify_flags |= 1535 qp->qplib_qp.modify_flags |=
1541 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; 1536 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1542 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; 1537 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
@@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
2403 } 2398 }
2404 cq->qplib_cq.sghead = cq->umem->sg_head.sgl; 2399 cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
2405 cq->qplib_cq.nmap = cq->umem->nmap; 2400 cq->qplib_cq.nmap = cq->umem->nmap;
2406 cq->qplib_cq.dpi = uctx->dpi; 2401 cq->qplib_cq.dpi = &uctx->dpi;
2407 } else { 2402 } else {
2408 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); 2403 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2409 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), 2404 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
@@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
2905 2900
2906 spin_lock_irqsave(&cq->cq_lock, flags); 2901 spin_lock_irqsave(&cq->cq_lock, flags);
2907 budget = min_t(u32, num_entries, cq->max_cql); 2902 budget = min_t(u32, num_entries, cq->max_cql);
2903 num_entries = budget;
2908 if (!cq->cql) { 2904 if (!cq->cql) {
2909 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); 2905 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
2910 goto exit; 2906 goto exit;
@@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3031 else if (ib_cqn_flags & IB_CQ_SOLICITED) 3027 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3032 type = DBR_DBR_TYPE_CQ_ARMSE; 3028 type = DBR_DBR_TYPE_CQ_ARMSE;
3033 3029
3030 /* Poll to see if there are missed events */
3031 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3032 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
3033 return 1;
3034
3034 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); 3035 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3035 3036
3036 return 0; 3037 return 0;
@@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3245 struct scatterlist *sg; 3246 struct scatterlist *sg;
3246 int entry; 3247 int entry;
3247 3248
3249 if (length > BNXT_RE_MAX_MR_SIZE) {
3250 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n",
3251 length, BNXT_RE_MAX_MR_SIZE);
3252 return ERR_PTR(-ENOMEM);
3253 }
3254
3248 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 3255 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3249 if (!mr) 3256 if (!mr)
3250 return ERR_PTR(-ENOMEM); 3257 return ERR_PTR(-ENOMEM);
@@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3388 struct bnxt_re_ucontext *uctx = container_of(ib_uctx, 3395 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3389 struct bnxt_re_ucontext, 3396 struct bnxt_re_ucontext,
3390 ib_uctx); 3397 ib_uctx);
3398
3399 struct bnxt_re_dev *rdev = uctx->rdev;
3400 int rc = 0;
3401
3391 if (uctx->shpg) 3402 if (uctx->shpg)
3392 free_page((unsigned long)uctx->shpg); 3403 free_page((unsigned long)uctx->shpg);
3404
3405 if (uctx->dpi.dbr) {
3406 /* Free DPI only if this is the first PD allocated by the
3407 * application and mark the context dpi as NULL
3408 */
3409 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3410 &rdev->qplib_res.dpi_tbl,
3411 &uctx->dpi);
3412 if (rc)
3413 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!");
3414 /* Don't fail, continue*/
3415 uctx->dpi.dbr = NULL;
3416 }
3417
3393 kfree(uctx); 3418 kfree(uctx);
3394 return 0; 3419 return 0;
3395} 3420}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 6c160f6a5398..a0bb7e33d7ca 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -59,7 +59,6 @@ struct bnxt_re_pd {
59 struct bnxt_re_dev *rdev; 59 struct bnxt_re_dev *rdev;
60 struct ib_pd ib_pd; 60 struct ib_pd ib_pd;
61 struct bnxt_qplib_pd qplib_pd; 61 struct bnxt_qplib_pd qplib_pd;
62 struct bnxt_qplib_dpi dpi;
63 struct bnxt_re_fence_data fence; 62 struct bnxt_re_fence_data fence;
64}; 63};
65 64
@@ -127,7 +126,7 @@ struct bnxt_re_mw {
127struct bnxt_re_ucontext { 126struct bnxt_re_ucontext {
128 struct bnxt_re_dev *rdev; 127 struct bnxt_re_dev *rdev;
129 struct ib_ucontext ib_uctx; 128 struct ib_ucontext ib_uctx;
130 struct bnxt_qplib_dpi *dpi; 129 struct bnxt_qplib_dpi dpi;
131 void *shpg; 130 void *shpg;
132 spinlock_t sh_lock; /* protect shpg */ 131 spinlock_t sh_lock; /* protect shpg */
133}; 132};
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 1fce5e73216b..ceae2d92fb08 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
333 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); 333 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
334 req.update_period_ms = cpu_to_le32(1000); 334 req.update_period_ms = cpu_to_le32(1000);
335 req.stats_dma_addr = cpu_to_le64(dma_map); 335 req.stats_dma_addr = cpu_to_le64(dma_map);
336 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
336 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, 337 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
337 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); 338 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
338 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); 339 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index f05500bcdcf1..9af1514e5944 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1128,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1128 } 1128 }
1129 /* Each SGE entry = 1 WQE size16 */ 1129 /* Each SGE entry = 1 WQE size16 */
1130 wqe_size16 = wqe->num_sge; 1130 wqe_size16 = wqe->num_sge;
1131 /* HW requires wqe size has room for atleast one SGE even if
1132 * none was supplied by ULP
1133 */
1134 if (!wqe->num_sge)
1135 wqe_size16++;
1131 } 1136 }
1132 1137
1133 /* Specifics */ 1138 /* Specifics */
@@ -1364,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1364 rqe->flags = wqe->flags; 1369 rqe->flags = wqe->flags;
1365 rqe->wqe_size = wqe->num_sge + 1370 rqe->wqe_size = wqe->num_sge +
1366 ((offsetof(typeof(*rqe), data) + 15) >> 4); 1371 ((offsetof(typeof(*rqe), data) + 15) >> 4);
1372 /* HW requires wqe size has room for atleast one SGE even if none
1373 * was supplied by ULP
1374 */
1375 if (!wqe->num_sge)
1376 rqe->wqe_size++;
1367 1377
1368 /* Supply the rqe->wr_id index to the wr_id_tbl for now */ 1378 /* Supply the rqe->wr_id index to the wr_id_tbl for now */
1369 rqe->wr_id[0] = cpu_to_le32(sw_prod); 1379 rqe->wr_id[0] = cpu_to_le32(sw_prod);
@@ -1885,6 +1895,25 @@ flush_rq:
1885 return rc; 1895 return rc;
1886} 1896}
1887 1897
1898bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
1899{
1900 struct cq_base *hw_cqe, **hw_cqe_ptr;
1901 unsigned long flags;
1902 u32 sw_cons, raw_cons;
1903 bool rc = true;
1904
1905 spin_lock_irqsave(&cq->hwq.lock, flags);
1906 raw_cons = cq->hwq.cons;
1907 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
1908 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
1909 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
1910
1911 /* Check for Valid bit. If the CQE is valid, return false */
1912 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
1913 spin_unlock_irqrestore(&cq->hwq.lock, flags);
1914 return rc;
1915}
1916
1888static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, 1917static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
1889 struct cq_res_raweth_qp1 *hwcqe, 1918 struct cq_res_raweth_qp1 *hwcqe,
1890 struct bnxt_qplib_cqe **pcqe, 1919 struct bnxt_qplib_cqe **pcqe,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 36b7b7db0e3f..19176e06c98a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -449,6 +449,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
449int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 449int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
450int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 450int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
451 int num, struct bnxt_qplib_qp **qp); 451 int num, struct bnxt_qplib_qp **qp);
452bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
452void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 453void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
453void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 454void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
454int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); 455int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index fde18cf0e406..ef91ab786dd4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
51 0, 0, 0, 0, 0, 0, 0, 0 } }; 51 0, 0, 0, 0, 0, 0, 0, 0 } };
52 52
53/* Device */ 53/* Device */
54
55static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
56{
57 int rc;
58 u16 pcie_ctl2;
59
60 rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
61 &pcie_ctl2);
62 if (rc)
63 return false;
64 return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
65}
66
54int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, 67int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
55 struct bnxt_qplib_dev_attr *attr) 68 struct bnxt_qplib_dev_attr *attr)
56{ 69{
@@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
81 94
82 /* Extract the context from the side buffer */ 95 /* Extract the context from the side buffer */
83 attr->max_qp = le32_to_cpu(sb->max_qp); 96 attr->max_qp = le32_to_cpu(sb->max_qp);
97 /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
98 attr->max_qp += 1;
84 attr->max_qp_rd_atom = 99 attr->max_qp_rd_atom =
85 sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? 100 sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
86 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom; 101 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
129 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); 144 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
130 } 145 }
131 146
147 attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
132bail: 148bail:
133 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 149 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
134 return rc; 150 return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a543f959098b..2ce7e2a32cf0 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -42,6 +42,8 @@
42 42
43#define BNXT_QPLIB_RESERVED_QP_WRS 128 43#define BNXT_QPLIB_RESERVED_QP_WRS 128
44 44
45#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040
46
45struct bnxt_qplib_dev_attr { 47struct bnxt_qplib_dev_attr {
46 char fw_ver[32]; 48 char fw_ver[32];
47 u16 max_sgid; 49 u16 max_sgid;
@@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr {
70 u32 max_inline_data; 72 u32 max_inline_data;
71 u32 l2_db_size; 73 u32 l2_db_size;
72 u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ]; 74 u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
75 bool is_atomic;
73}; 76};
74 77
75struct bnxt_qplib_pd { 78struct bnxt_qplib_pd {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 29d30744d6c9..0cd0c1fa27d4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
718 struct iwch_mr *mhp; 718 struct iwch_mr *mhp;
719 u32 mmid; 719 u32 mmid;
720 u32 stag = 0; 720 u32 stag = 0;
721 int ret = 0; 721 int ret = -ENOMEM;
722 722
723 if (mr_type != IB_MR_TYPE_MEM_REG || 723 if (mr_type != IB_MR_TYPE_MEM_REG ||
724 max_num_sg > T3_MAX_FASTREG_DEPTH) 724 max_num_sg > T3_MAX_FASTREG_DEPTH)
@@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
731 goto err; 731 goto err;
732 732
733 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); 733 mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
734 if (!mhp->pages) { 734 if (!mhp->pages)
735 ret = -ENOMEM;
736 goto pl_err; 735 goto pl_err;
737 }
738 736
739 mhp->rhp = rhp; 737 mhp->rhp = rhp;
740 ret = iwch_alloc_pbl(mhp, max_num_sg); 738 ret = iwch_alloc_pbl(mhp, max_num_sg);
@@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd,
751 mhp->attr.state = 1; 749 mhp->attr.state = 1;
752 mmid = (stag) >> 8; 750 mmid = (stag) >> 8;
753 mhp->ibmr.rkey = mhp->ibmr.lkey = stag; 751 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
754 if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) 752 ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid);
753 if (ret)
755 goto err3; 754 goto err3;
756 755
757 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); 756 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index e16fcaf6b5a3..be07da1997e6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
963 goto err3; 963 goto err3;
964 964
965 if (ucontext) { 965 if (ucontext) {
966 ret = -ENOMEM;
966 mm = kmalloc(sizeof *mm, GFP_KERNEL); 967 mm = kmalloc(sizeof *mm, GFP_KERNEL);
967 if (!mm) 968 if (!mm)
968 goto err4; 969 goto err4;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 5332f06b99ba..c2fba76becd4 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
661 rhp = php->rhp; 661 rhp = php->rhp;
662 662
663 if (mr_type != IB_MR_TYPE_MEM_REG || 663 if (mr_type != IB_MR_TYPE_MEM_REG ||
664 max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && 664 max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
665 use_dsgl)) 665 use_dsgl))
666 return ERR_PTR(-EINVAL); 666 return ERR_PTR(-EINVAL);
667 667
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bfc77596acbe..cb7fc0d35d1d 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
569{ 569{
570 if (wr->num_sge > 1) 570 if (wr->num_sge > 1)
571 return -EINVAL; 571 return -EINVAL;
572 if (wr->num_sge) { 572 if (wr->num_sge && wr->sg_list[0].length) {
573 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); 573 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
574 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr 574 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
575 >> 32)); 575 >> 32));
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2ba00b89df6a..94b54850ec75 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12847,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12847 /* clear from the handled mask of the general interrupt */ 12847 /* clear from the handled mask of the general interrupt */
12848 m = isrc / 64; 12848 m = isrc / 64;
12849 n = isrc % 64; 12849 n = isrc % 64;
12850 dd->gi_mask[m] &= ~((u64)1 << n); 12850 if (likely(m < CCE_NUM_INT_CSRS)) {
12851 dd->gi_mask[m] &= ~((u64)1 << n);
12852 } else {
12853 dd_dev_err(dd, "remap interrupt err\n");
12854 return;
12855 }
12851 12856
12852 /* direct the chip source to the given MSI-X interrupt */ 12857 /* direct the chip source to the given MSI-X interrupt */
12853 m = isrc / 8; 12858 m = isrc / 8;
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 650305cc0373..1a7af9f60c13 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
647 qp->pid); 647 qp->pid);
648} 648}
649 649
650void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, 650void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
651 gfp_t gfp)
652{ 651{
653 struct hfi1_qp_priv *priv; 652 struct hfi1_qp_priv *priv;
654 653
655 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); 654 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
656 if (!priv) 655 if (!priv)
657 return ERR_PTR(-ENOMEM); 656 return ERR_PTR(-ENOMEM);
658 657
659 priv->owner = qp; 658 priv->owner = qp;
660 659
661 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp, 660 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
662 rdi->dparms.node); 661 rdi->dparms.node);
663 if (!priv->s_ahg) { 662 if (!priv->s_ahg) {
664 kfree(priv); 663 kfree(priv);
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 1eb9cd7b8c19..6fe542b6a927 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp);
123/* 123/*
124 * Functions provided by hfi1 driver for rdmavt to use 124 * Functions provided by hfi1 driver for rdmavt to use
125 */ 125 */
126void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, 126void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
127 gfp_t gfp);
128void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); 127void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
129unsigned free_all_qps(struct rvt_dev_info *rdi); 128unsigned free_all_qps(struct rvt_dev_info *rdi);
130void notify_qp_reset(struct rvt_qp *qp); 129void notify_qp_reset(struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index f78a733a63ec..d545302b8ef8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
64 } else { 64 } else {
65 u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); 65 u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
66 66
67 if (!dmac) 67 if (!dmac) {
68 kfree(ah);
68 return ERR_PTR(-EINVAL); 69 return ERR_PTR(-EINVAL);
70 }
69 memcpy(ah->av.mac, dmac, ETH_ALEN); 71 memcpy(ah->av.mac, dmac, ETH_ALEN);
70 } 72 }
71 73
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 37d5d29597a4..2540b65e242c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
228 switch (wr->opcode) { 228 switch (wr->opcode) {
229 case IB_WR_RDMA_READ: 229 case IB_WR_RDMA_READ:
230 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; 230 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
231 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 231 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
232 atomic_wr(wr)->rkey); 232 rdma_wr(wr)->rkey);
233 break; 233 break;
234 case IB_WR_RDMA_WRITE: 234 case IB_WR_RDMA_WRITE:
235 case IB_WR_RDMA_WRITE_WITH_IMM: 235 case IB_WR_RDMA_WRITE_WITH_IMM:
236 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; 236 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
237 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 237 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
238 atomic_wr(wr)->rkey); 238 rdma_wr(wr)->rkey);
239 break; 239 break;
240 case IB_WR_SEND: 240 case IB_WR_SEND:
241 case IB_WR_SEND_WITH_INV: 241 case IB_WR_SEND_WITH_INV:
@@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
661 union ib_gid dgid; 661 union ib_gid dgid;
662 u64 subnet_prefix; 662 u64 subnet_prefix;
663 int attr_mask = 0; 663 int attr_mask = 0;
664 int i; 664 int i, j;
665 int ret; 665 int ret;
666 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
666 u8 phy_port; 667 u8 phy_port;
668 u8 port = 0;
667 u8 sl; 669 u8 sl;
668 670
669 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; 671 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
@@ -709,27 +711,35 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
709 attr.rnr_retry = 7; 711 attr.rnr_retry = 7;
710 attr.timeout = 0x12; 712 attr.timeout = 0x12;
711 attr.path_mtu = IB_MTU_256; 713 attr.path_mtu = IB_MTU_256;
714 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
712 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); 715 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
713 rdma_ah_set_static_rate(&attr.ah_attr, 3); 716 rdma_ah_set_static_rate(&attr.ah_attr, 3);
714 717
715 subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 718 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
716 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 719 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
720 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
721 (i % HNS_ROCE_MAX_PORTS);
722 sl = i / HNS_ROCE_MAX_PORTS;
723
724 for (j = 0; j < caps->num_ports; j++) {
725 if (hr_dev->iboe.phy_port[j] == phy_port) {
726 queue_en[i] = 1;
727 port = j;
728 break;
729 }
730 }
731
732 if (!queue_en[i])
733 continue;
734
717 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); 735 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
718 if (IS_ERR(free_mr->mr_free_qp[i])) { 736 if (!free_mr->mr_free_qp[i]) {
719 dev_err(dev, "Create loop qp failed!\n"); 737 dev_err(dev, "Create loop qp failed!\n");
720 goto create_lp_qp_failed; 738 goto create_lp_qp_failed;
721 } 739 }
722 hr_qp = free_mr->mr_free_qp[i]; 740 hr_qp = free_mr->mr_free_qp[i];
723 741
724 sl = i / caps->num_ports; 742 hr_qp->port = port;
725
726 if (caps->num_ports == HNS_ROCE_MAX_PORTS)
727 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
728 (i % caps->num_ports);
729 else
730 phy_port = i % caps->num_ports;
731
732 hr_qp->port = phy_port + 1;
733 hr_qp->phy_port = phy_port; 743 hr_qp->phy_port = phy_port;
734 hr_qp->ibqp.qp_type = IB_QPT_RC; 744 hr_qp->ibqp.qp_type = IB_QPT_RC;
735 hr_qp->ibqp.device = &hr_dev->ib_dev; 745 hr_qp->ibqp.device = &hr_dev->ib_dev;
@@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
739 hr_qp->ibqp.recv_cq = cq; 749 hr_qp->ibqp.recv_cq = cq;
740 hr_qp->ibqp.send_cq = cq; 750 hr_qp->ibqp.send_cq = cq;
741 751
742 rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1); 752 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
743 rdma_ah_set_sl(&attr.ah_attr, phy_port + 1); 753 rdma_ah_set_sl(&attr.ah_attr, sl);
744 attr.port_num = phy_port + 1; 754 attr.port_num = port + 1;
745 755
746 attr.dest_qp_num = hr_qp->qpn; 756 attr.dest_qp_num = hr_qp->qpn;
747 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), 757 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
748 hr_dev->dev_addr[phy_port], 758 hr_dev->dev_addr[port],
749 MAC_ADDR_OCTET_NUM); 759 MAC_ADDR_OCTET_NUM);
750 760
751 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); 761 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
752 memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3); 762 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
753 memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3); 763 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
754 dgid.raw[11] = 0xff; 764 dgid.raw[11] = 0xff;
755 dgid.raw[12] = 0xfe; 765 dgid.raw[12] = 0xfe;
756 dgid.raw[8] ^= 2; 766 dgid.raw[8] ^= 2;
757 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); 767 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
758 attr_mask |= IB_QP_PORT;
759 768
760 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, 769 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
761 IB_QPS_RESET, IB_QPS_INIT); 770 IB_QPS_RESET, IB_QPS_INIT);
@@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
812 821
813 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 822 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
814 hr_qp = free_mr->mr_free_qp[i]; 823 hr_qp = free_mr->mr_free_qp[i];
824 if (!hr_qp)
825 continue;
826
815 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); 827 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
816 if (ret) 828 if (ret)
817 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", 829 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
@@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
963 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; 975 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
964 int i; 976 int i;
965 int ret; 977 int ret;
966 int ne; 978 int ne = 0;
967 979
968 mr_work = container_of(work, struct hns_roce_mr_free_work, work); 980 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
969 hr_mr = (struct hns_roce_mr *)mr_work->mr; 981 hr_mr = (struct hns_roce_mr *)mr_work->mr;
@@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
976 988
977 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 989 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
978 hr_qp = free_mr->mr_free_qp[i]; 990 hr_qp = free_mr->mr_free_qp[i];
991 if (!hr_qp)
992 continue;
993 ne++;
994
979 ret = hns_roce_v1_send_lp_wqe(hr_qp); 995 ret = hns_roce_v1_send_lp_wqe(hr_qp);
980 if (ret) { 996 if (ret) {
981 dev_err(dev, 997 dev_err(dev,
@@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
985 } 1001 }
986 } 1002 }
987 1003
988 ne = HNS_ROCE_V1_RESV_QP;
989 do { 1004 do {
990 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); 1005 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
991 if (ret < 0) { 1006 if (ret < 0) {
@@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
995 goto free_work; 1010 goto free_work;
996 } 1011 }
997 ne -= ret; 1012 ne -= ret;
998 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); 1013 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1014 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
999 } while (ne && time_before_eq(jiffies, end)); 1015 } while (ne && time_before_eq(jiffies, end));
1000 1016
1001 if (ne != 0) 1017 if (ne != 0)
@@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2181 } 2197 }
2182 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 2198 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2183 ++wq->tail; 2199 ++wq->tail;
2184 } else { 2200 } else {
2185 /* RQ conrespond to CQE */ 2201 /* RQ conrespond to CQE */
2186 wc->byte_len = le32_to_cpu(cqe->byte_cnt); 2202 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2187 opcode = roce_get_field(cqe->cqe_byte_4, 2203 opcode = roce_get_field(cqe->cqe_byte_4,
@@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
3533 old_cnt = roce_get_field(old_send, 3549 old_cnt = roce_get_field(old_send,
3534 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, 3550 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3535 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); 3551 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
3536 if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) 3552 if (cur_cnt - old_cnt >
3553 SDB_ST_CMP_VAL) {
3537 success_flags = 1; 3554 success_flags = 1;
3538 else { 3555 } else {
3539 send_ptr = roce_get_field(old_send, 3556 send_ptr =
3557 roce_get_field(old_send,
3540 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, 3558 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3541 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + 3559 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3542 roce_get_field(sdb_retry_cnt, 3560 roce_get_field(sdb_retry_cnt,
@@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3641 struct hns_roce_dev *hr_dev; 3659 struct hns_roce_dev *hr_dev;
3642 struct hns_roce_qp *hr_qp; 3660 struct hns_roce_qp *hr_qp;
3643 struct device *dev; 3661 struct device *dev;
3662 unsigned long qpn;
3644 int ret; 3663 int ret;
3645 3664
3646 qp_work_entry = container_of(work, struct hns_roce_qp_work, work); 3665 qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
@@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3648 dev = &hr_dev->pdev->dev; 3667 dev = &hr_dev->pdev->dev;
3649 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; 3668 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
3650 hr_qp = qp_work_entry->qp; 3669 hr_qp = qp_work_entry->qp;
3670 qpn = hr_qp->qpn;
3651 3671
3652 dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn); 3672 dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
3653 3673
3654 qp_work_entry->sche_cnt++; 3674 qp_work_entry->sche_cnt++;
3655 3675
@@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3660 &qp_work_entry->db_wait_stage); 3680 &qp_work_entry->db_wait_stage);
3661 if (ret) { 3681 if (ret) {
3662 dev_err(dev, "Check QP(0x%lx) db process status failed!\n", 3682 dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3663 hr_qp->qpn); 3683 qpn);
3664 return; 3684 return;
3665 } 3685 }
3666 3686
@@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3674 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, 3694 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3675 IB_QPS_RESET); 3695 IB_QPS_RESET);
3676 if (ret) { 3696 if (ret) {
3677 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn); 3697 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
3678 return; 3698 return;
3679 } 3699 }
3680 3700
@@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3683 3703
3684 if (hr_qp->ibqp.qp_type == IB_QPT_RC) { 3704 if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
3685 /* RC QP, release QPN */ 3705 /* RC QP, release QPN */
3686 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); 3706 hns_roce_release_range_qp(hr_dev, qpn, 1);
3687 kfree(hr_qp); 3707 kfree(hr_qp);
3688 } else 3708 } else
3689 kfree(hr_to_hr_sqp(hr_qp)); 3709 kfree(hr_to_hr_sqp(hr_qp));
3690 3710
3691 kfree(qp_work_entry); 3711 kfree(qp_work_entry);
3692 3712
3693 dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn); 3713 dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
3694} 3714}
3695 3715
3696int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) 3716int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c3b41f95e70a..d9777b662eba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
125 return -ENODEV; 125 return -ENODEV;
126 } 126 }
127 127
128 spin_lock_bh(&hr_dev->iboe.lock);
129
130 switch (event) { 128 switch (event) {
131 case NETDEV_UP: 129 case NETDEV_UP:
132 case NETDEV_CHANGE: 130 case NETDEV_CHANGE:
@@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
144 break; 142 break;
145 } 143 }
146 144
147 spin_unlock_bh(&hr_dev->iboe.lock);
148 return 0; 145 return 0;
149} 146}
150 147
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index da2eb5a281fa..9b1566468744 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
527int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); 527int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
528void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq); 528void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq);
529 529
530void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev);
530void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev); 531void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
531void i40iw_add_pdusecount(struct i40iw_pd *iwpd); 532void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
532void i40iw_rem_devusecount(struct i40iw_device *iwdev); 533void i40iw_rem_devusecount(struct i40iw_device *iwdev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 6ae98aa7f74e..5a2fa743676c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
3487 if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) || 3487 if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
3488 (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) || 3488 (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
3489 (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) || 3489 (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
3490 (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { 3490 (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
3491 iwdev->reset)) {
3491 issue_close = 1; 3492 issue_close = 1;
3492 iwqp->cm_id = NULL; 3493 iwqp->cm_id = NULL;
3493 if (!iwqp->flush_issued) { 3494 if (!iwqp->flush_issued) {
@@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
4265 cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry); 4266 cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
4266 attr.qp_state = IB_QPS_ERR; 4267 attr.qp_state = IB_QPS_ERR;
4267 i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); 4268 i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
4269 if (iwdev->reset)
4270 i40iw_cm_disconn(cm_node->iwqp);
4268 i40iw_rem_ref_cm_node(cm_node); 4271 i40iw_rem_ref_cm_node(cm_node);
4269 } 4272 }
4270} 4273}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index a027e2072477..a49ff2eb6fb3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
130 u64 base = 0; 130 u64 base = 0;
131 u32 i, j; 131 u32 i, j;
132 u32 k = 0; 132 u32 k = 0;
133 u32 low;
134 133
135 /* copy base values in obj_info */ 134 /* copy base values in obj_info */
136 for (i = I40IW_HMC_IW_QP, j = 0; 135 for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
137 i <= I40IW_HMC_IW_PBLE; i++, j += 8) { 136 if ((i == I40IW_HMC_IW_SRQ) ||
137 (i == I40IW_HMC_IW_FSIMC) ||
138 (i == I40IW_HMC_IW_FSIAV)) {
139 info[i].base = 0;
140 info[i].cnt = 0;
141 continue;
142 }
138 get_64bit_val(buf, j, &temp); 143 get_64bit_val(buf, j, &temp);
139 info[i].base = RS_64_1(temp, 32) * 512; 144 info[i].base = RS_64_1(temp, 32) * 512;
140 if (info[i].base > base) { 145 if (info[i].base > base) {
141 base = info[i].base; 146 base = info[i].base;
142 k = i; 147 k = i;
143 } 148 }
144 low = (u32)(temp); 149 if (i == I40IW_HMC_IW_APBVT_ENTRY) {
145 if (low) 150 info[i].cnt = 1;
146 info[i].cnt = low; 151 continue;
152 }
153 if (i == I40IW_HMC_IW_QP)
154 info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
155 else if (i == I40IW_HMC_IW_CQ)
156 info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
157 else
158 info[i].cnt = (u32)(temp);
147 } 159 }
148 size = info[k].cnt * info[k].size + info[k].base; 160 size = info[k].cnt * info[k].size + info[k].base;
149 if (size & 0x1FFFFF) 161 if (size & 0x1FFFFF)
@@ -155,6 +167,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
155} 167}
156 168
157/** 169/**
170 * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
171 * @buf: ptr to fpm query buffer
172 * @buf_idx: index into buf
173 * @info: ptr to i40iw_hmc_obj_info struct
174 * @rsrc_idx: resource index into info
175 *
176 * Decode a 64 bit value from fpm query buffer into max count and size
177 */
178static u64 i40iw_sc_decode_fpm_query(u64 *buf,
179 u32 buf_idx,
180 struct i40iw_hmc_obj_info *obj_info,
181 u32 rsrc_idx)
182{
183 u64 temp;
184 u32 size;
185
186 get_64bit_val(buf, buf_idx, &temp);
187 obj_info[rsrc_idx].max_cnt = (u32)temp;
188 size = (u32)RS_64_1(temp, 32);
189 obj_info[rsrc_idx].size = LS_64_1(1, size);
190
191 return temp;
192}
193
194/**
158 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer 195 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
159 * @buf: ptr to fpm query buffer 196 * @buf: ptr to fpm query buffer
160 * @info: ptr to i40iw_hmc_obj_info struct 197 * @info: ptr to i40iw_hmc_obj_info struct
@@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
168 struct i40iw_hmc_info *hmc_info, 205 struct i40iw_hmc_info *hmc_info,
169 struct i40iw_hmc_fpm_misc *hmc_fpm_misc) 206 struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
170{ 207{
171 u64 temp;
172 struct i40iw_hmc_obj_info *obj_info; 208 struct i40iw_hmc_obj_info *obj_info;
173 u32 i, j, size; 209 u64 temp;
210 u32 size;
174 u16 max_pe_sds; 211 u16 max_pe_sds;
175 212
176 obj_info = hmc_info->hmc_obj; 213 obj_info = hmc_info->hmc_obj;
@@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
185 hmc_fpm_misc->max_sds = max_pe_sds; 222 hmc_fpm_misc->max_sds = max_pe_sds;
186 hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; 223 hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
187 224
188 for (i = I40IW_HMC_IW_QP, j = 8; 225 get_64bit_val(buf, 8, &temp);
189 i <= I40IW_HMC_IW_ARP; i++, j += 8) { 226 obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
190 get_64bit_val(buf, j, &temp); 227 size = (u32)RS_64_1(temp, 32);
191 if (i == I40IW_HMC_IW_QP) 228 obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
192 obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
193 else if (i == I40IW_HMC_IW_CQ)
194 obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
195 else
196 obj_info[i].max_cnt = (u32)temp;
197 229
198 size = (u32)RS_64_1(temp, 32); 230 get_64bit_val(buf, 16, &temp);
199 obj_info[i].size = ((u64)1 << size); 231 obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
200 } 232 size = (u32)RS_64_1(temp, 32);
201 for (i = I40IW_HMC_IW_MR, j = 48; 233 obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
202 i <= I40IW_HMC_IW_PBLE; i++, j += 8) { 234
203 get_64bit_val(buf, j, &temp); 235 i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
204 obj_info[i].max_cnt = (u32)temp; 236 i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
205 size = (u32)RS_64_1(temp, 32); 237
206 obj_info[i].size = LS_64_1(1, size); 238 obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
207 } 239 obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
240
241 i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
242 i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
208 243
209 get_64bit_val(buf, 120, &temp);
210 hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
211 get_64bit_val(buf, 120, &temp);
212 hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
213 get_64bit_val(buf, 120, &temp);
214 hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
215 get_64bit_val(buf, 64, &temp); 244 get_64bit_val(buf, 64, &temp);
245 obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
246 obj_info[I40IW_HMC_IW_XFFL].size = 4;
216 hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); 247 hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
217 if (!hmc_fpm_misc->xf_block_size) 248 if (!hmc_fpm_misc->xf_block_size)
218 return I40IW_ERR_INVALID_SIZE; 249 return I40IW_ERR_INVALID_SIZE;
250
251 i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
252
219 get_64bit_val(buf, 80, &temp); 253 get_64bit_val(buf, 80, &temp);
254 obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
255 obj_info[I40IW_HMC_IW_Q1FL].size = 4;
220 hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); 256 hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
221 if (!hmc_fpm_misc->q1_block_size) 257 if (!hmc_fpm_misc->q1_block_size)
222 return I40IW_ERR_INVALID_SIZE; 258 return I40IW_ERR_INVALID_SIZE;
259
260 i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
261
262 get_64bit_val(buf, 112, &temp);
263 obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
264 obj_info[I40IW_HMC_IW_PBLE].size = 8;
265
266 get_64bit_val(buf, 120, &temp);
267 hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
268 hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
269 hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
270
223 return 0; 271 return 0;
224} 272}
225 273
@@ -1970,6 +2018,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
1970 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); 2018 ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
1971 } 2019 }
1972 2020
2021 cqp->process_cqp_sds = i40iw_update_sds_noccq;
2022
1973 return ret_code; 2023 return ret_code;
1974} 2024}
1975 2025
@@ -3390,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_
3390 hmc_info->sd_table.sd_entry = virt_mem.va; 3440 hmc_info->sd_table.sd_entry = virt_mem.va;
3391 } 3441 }
3392 3442
3393 /* fill size of objects which are fixed */
3394 hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
3395 hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
3396 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
3397 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
3398 hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
3399
3400 return ret_code; 3443 return ret_code;
3401} 3444}
3402 3445
@@ -4838,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
4838{ 4881{
4839 u8 fcn_id = vsi->fcn_id; 4882 u8 fcn_id = vsi->fcn_id;
4840 4883
4841 if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID)) 4884 if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
4842 vsi->dev->fcn_id_array[fcn_id] = false; 4885 vsi->dev->fcn_id_array[fcn_id] = false;
4843 i40iw_hw_stats_stop_timer(vsi); 4886 i40iw_hw_stats_stop_timer(vsi);
4844} 4887}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index a39ac12b6a7e..2ebaadbed379 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -1507,8 +1507,8 @@ enum {
1507 I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), 1507 I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
1508 I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), 1508 I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
1509 I40IW_SHADOWAREA_MASK = (128 - 1), 1509 I40IW_SHADOWAREA_MASK = (128 - 1),
1510 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0, 1510 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1),
1511 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0 1511 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1)
1512}; 1512};
1513 1513
1514enum i40iw_alignment { 1514enum i40iw_alignment {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index e0f47cc2effc..ae8463ff59a7 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
243 if (free_hwcqp) 243 if (free_hwcqp)
244 dev->cqp_ops->cqp_destroy(dev->cqp); 244 dev->cqp_ops->cqp_destroy(dev->cqp);
245 245
246 i40iw_cleanup_pending_cqp_op(iwdev);
247
246 i40iw_free_dma_mem(dev->hw, &cqp->sq); 248 i40iw_free_dma_mem(dev->hw, &cqp->sq);
247 kfree(cqp->scratch_array); 249 kfree(cqp->scratch_array);
248 iwdev->cqp.scratch_array = NULL; 250 iwdev->cqp.scratch_array = NULL;
@@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
274/** 276/**
275 * i40iw_destroy_aeq - destroy aeq 277 * i40iw_destroy_aeq - destroy aeq
276 * @iwdev: iwarp device 278 * @iwdev: iwarp device
277 * @reset: true if called before reset
278 * 279 *
279 * Issue a destroy aeq request and 280 * Issue a destroy aeq request and
280 * free the resources associated with the aeq 281 * free the resources associated with the aeq
281 * The function is called during driver unload 282 * The function is called during driver unload
282 */ 283 */
283static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) 284static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
284{ 285{
285 enum i40iw_status_code status = I40IW_ERR_NOT_READY; 286 enum i40iw_status_code status = I40IW_ERR_NOT_READY;
286 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 287 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
288 289
289 if (!iwdev->msix_shared) 290 if (!iwdev->msix_shared)
290 i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); 291 i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
291 if (reset) 292 if (iwdev->reset)
292 goto exit; 293 goto exit;
293 294
294 if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) 295 if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
@@ -304,19 +305,17 @@ exit:
304 * i40iw_destroy_ceq - destroy ceq 305 * i40iw_destroy_ceq - destroy ceq
305 * @iwdev: iwarp device 306 * @iwdev: iwarp device
306 * @iwceq: ceq to be destroyed 307 * @iwceq: ceq to be destroyed
307 * @reset: true if called before reset
308 * 308 *
309 * Issue a destroy ceq request and 309 * Issue a destroy ceq request and
310 * free the resources associated with the ceq 310 * free the resources associated with the ceq
311 */ 311 */
312static void i40iw_destroy_ceq(struct i40iw_device *iwdev, 312static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
313 struct i40iw_ceq *iwceq, 313 struct i40iw_ceq *iwceq)
314 bool reset)
315{ 314{
316 enum i40iw_status_code status; 315 enum i40iw_status_code status;
317 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 316 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
318 317
319 if (reset) 318 if (iwdev->reset)
320 goto exit; 319 goto exit;
321 320
322 status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); 321 status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
@@ -335,12 +334,11 @@ exit:
335/** 334/**
336 * i40iw_dele_ceqs - destroy all ceq's 335 * i40iw_dele_ceqs - destroy all ceq's
337 * @iwdev: iwarp device 336 * @iwdev: iwarp device
338 * @reset: true if called before reset
339 * 337 *
340 * Go through all of the device ceq's and for each ceq 338 * Go through all of the device ceq's and for each ceq
341 * disable the ceq interrupt and destroy the ceq 339 * disable the ceq interrupt and destroy the ceq
342 */ 340 */
343static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) 341static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
344{ 342{
345 u32 i = 0; 343 u32 i = 0;
346 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 344 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
@@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
349 347
350 if (iwdev->msix_shared) { 348 if (iwdev->msix_shared) {
351 i40iw_disable_irq(dev, msix_vec, (void *)iwdev); 349 i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
352 i40iw_destroy_ceq(iwdev, iwceq, reset); 350 i40iw_destroy_ceq(iwdev, iwceq);
353 iwceq++; 351 iwceq++;
354 i++; 352 i++;
355 } 353 }
356 354
357 for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { 355 for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
358 i40iw_disable_irq(dev, msix_vec, (void *)iwceq); 356 i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
359 i40iw_destroy_ceq(iwdev, iwceq, reset); 357 i40iw_destroy_ceq(iwdev, iwceq);
360 } 358 }
361} 359}
362 360
363/** 361/**
364 * i40iw_destroy_ccq - destroy control cq 362 * i40iw_destroy_ccq - destroy control cq
365 * @iwdev: iwarp device 363 * @iwdev: iwarp device
366 * @reset: true if called before reset
367 * 364 *
368 * Issue destroy ccq request and 365 * Issue destroy ccq request and
369 * free the resources associated with the ccq 366 * free the resources associated with the ccq
370 */ 367 */
371static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset) 368static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
372{ 369{
373 struct i40iw_sc_dev *dev = &iwdev->sc_dev; 370 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
374 struct i40iw_ccq *ccq = &iwdev->ccq; 371 struct i40iw_ccq *ccq = &iwdev->ccq;
375 enum i40iw_status_code status = 0; 372 enum i40iw_status_code status = 0;
376 373
377 if (!reset) 374 if (!iwdev->reset)
378 status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true); 375 status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
379 if (status) 376 if (status)
380 i40iw_pr_err("ccq destroy failed %d\n", status); 377 i40iw_pr_err("ccq destroy failed %d\n", status);
@@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
810 iwceq->msix_idx = msix_vec->idx; 807 iwceq->msix_idx = msix_vec->idx;
811 status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); 808 status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
812 if (status) { 809 if (status) {
813 i40iw_destroy_ceq(iwdev, iwceq, false); 810 i40iw_destroy_ceq(iwdev, iwceq);
814 break; 811 break;
815 } 812 }
816 i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); 813 i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
@@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
912 909
913 status = i40iw_configure_aeq_vector(iwdev); 910 status = i40iw_configure_aeq_vector(iwdev);
914 if (status) { 911 if (status) {
915 i40iw_destroy_aeq(iwdev, false); 912 i40iw_destroy_aeq(iwdev);
916 return status; 913 return status;
917 } 914 }
918 915
@@ -1442,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1442/** 1439/**
1443 * i40iw_deinit_device - clean up the device resources 1440 * i40iw_deinit_device - clean up the device resources
1444 * @iwdev: iwarp device 1441 * @iwdev: iwarp device
1445 * @reset: true if called before reset
1446 * 1442 *
1447 * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses, 1443 * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
1448 * destroy the device queues and free the pble and the hmc objects 1444 * destroy the device queues and free the pble and the hmc objects
1449 */ 1445 */
1450static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) 1446static void i40iw_deinit_device(struct i40iw_device *iwdev)
1451{ 1447{
1452 struct i40e_info *ldev = iwdev->ldev; 1448 struct i40e_info *ldev = iwdev->ldev;
1453 1449
@@ -1464,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
1464 i40iw_destroy_rdma_device(iwdev->iwibdev); 1460 i40iw_destroy_rdma_device(iwdev->iwibdev);
1465 /* fallthrough */ 1461 /* fallthrough */
1466 case IP_ADDR_REGISTERED: 1462 case IP_ADDR_REGISTERED:
1467 if (!reset) 1463 if (!iwdev->reset)
1468 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1464 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1469 /* fallthrough */ 1465 /* fallthrough */
1470 case INET_NOTIFIER: 1466 case INET_NOTIFIER:
@@ -1474,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset)
1474 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1470 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1475 } 1471 }
1476 /* fallthrough */ 1472 /* fallthrough */
1473 case PBLE_CHUNK_MEM:
1474 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1475 /* fallthrough */
1477 case CEQ_CREATED: 1476 case CEQ_CREATED:
1478 i40iw_dele_ceqs(iwdev, reset); 1477 i40iw_dele_ceqs(iwdev);
1479 /* fallthrough */ 1478 /* fallthrough */
1480 case AEQ_CREATED: 1479 case AEQ_CREATED:
1481 i40iw_destroy_aeq(iwdev, reset); 1480 i40iw_destroy_aeq(iwdev);
1482 /* fallthrough */ 1481 /* fallthrough */
1483 case IEQ_CREATED: 1482 case IEQ_CREATED:
1484 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset); 1483 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
1485 /* fallthrough */ 1484 /* fallthrough */
1486 case ILQ_CREATED: 1485 case ILQ_CREATED:
1487 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset); 1486 i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
1488 /* fallthrough */ 1487 /* fallthrough */
1489 case CCQ_CREATED: 1488 case CCQ_CREATED:
1490 i40iw_destroy_ccq(iwdev, reset); 1489 i40iw_destroy_ccq(iwdev);
1491 /* fallthrough */
1492 case PBLE_CHUNK_MEM:
1493 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1494 /* fallthrough */ 1490 /* fallthrough */
1495 case HMC_OBJS_CREATED: 1491 case HMC_OBJS_CREATED:
1496 i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset); 1492 i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
1497 /* fallthrough */ 1493 /* fallthrough */
1498 case CQP_CREATED: 1494 case CQP_CREATED:
1499 i40iw_destroy_cqp(iwdev, true); 1495 i40iw_destroy_cqp(iwdev, true);
@@ -1670,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1670 status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); 1666 status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
1671 if (status) 1667 if (status)
1672 break; 1668 break;
1669 iwdev->init_state = PBLE_CHUNK_MEM;
1673 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); 1670 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
1674 i40iw_register_notifiers(); 1671 i40iw_register_notifiers();
1675 iwdev->init_state = INET_NOTIFIER; 1672 iwdev->init_state = INET_NOTIFIER;
@@ -1693,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1693 } while (0); 1690 } while (0);
1694 1691
1695 i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); 1692 i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
1696 i40iw_deinit_device(iwdev, false); 1693 i40iw_deinit_device(iwdev);
1697 return -ERESTART; 1694 return -ERESTART;
1698} 1695}
1699 1696
@@ -1774,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
1774 iwdev = &hdl->device; 1771 iwdev = &hdl->device;
1775 iwdev->closing = true; 1772 iwdev->closing = true;
1776 1773
1774 if (reset)
1775 iwdev->reset = true;
1776
1777 i40iw_cm_disconnect_all(iwdev); 1777 i40iw_cm_disconnect_all(iwdev);
1778 destroy_workqueue(iwdev->virtchnl_wq); 1778 destroy_workqueue(iwdev->virtchnl_wq);
1779 i40iw_deinit_device(iwdev, reset); 1779 i40iw_deinit_device(iwdev);
1780} 1780}
1781 1781
1782/** 1782/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index db41ab40da9c..7f5583d83622 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
408 set_64bit_val(wqe, 0, info->paddr); 408 set_64bit_val(wqe, 0, info->paddr);
409 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); 409 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
410 set_64bit_val(wqe, 16, header[0]); 410 set_64bit_val(wqe, 16, header[0]);
411
412 /* Ensure all data is written before writing valid bit */
413 wmb();
411 set_64bit_val(wqe, 24, header[1]); 414 set_64bit_val(wqe, 24, header[1]);
412 415
413 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); 416 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
@@ -682,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
682 cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); 685 cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
683 tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); 686 tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
684 ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, 687 ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
685 I40IW_CQ0_ALIGNMENT_MASK); 688 I40IW_CQ0_ALIGNMENT);
686 if (ret) 689 if (ret)
687 return ret; 690 return ret;
688 691
@@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
1411 1414
1412 if (!list_empty(rxlist)) { 1415 if (!list_empty(rxlist)) {
1413 tmpbuf = (struct i40iw_puda_buf *)rxlist->next; 1416 tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
1414 plist = &tmpbuf->list;
1415 while ((struct list_head *)tmpbuf != rxlist) { 1417 while ((struct list_head *)tmpbuf != rxlist) {
1416 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) 1418 if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
1417 break; 1419 break;
1420 plist = &tmpbuf->list;
1418 tmpbuf = (struct i40iw_puda_buf *)plist->next; 1421 tmpbuf = (struct i40iw_puda_buf *)plist->next;
1419 } 1422 }
1420 /* Insert buf before tmpbuf */ 1423 /* Insert buf before tmpbuf */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
index 91c421762f06..f7013f11d808 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -62,7 +62,7 @@ enum i40iw_status_code {
62 I40IW_ERR_INVALID_ALIGNMENT = -23, 62 I40IW_ERR_INVALID_ALIGNMENT = -23,
63 I40IW_ERR_FLUSHED_QUEUE = -24, 63 I40IW_ERR_FLUSHED_QUEUE = -24,
64 I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, 64 I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
65 I40IW_ERR_INVALID_IMM_DATA_SIZE = -26, 65 I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
66 I40IW_ERR_TIMEOUT = -27, 66 I40IW_ERR_TIMEOUT = -27,
67 I40IW_ERR_OPCODE_MISMATCH = -28, 67 I40IW_ERR_OPCODE_MISMATCH = -28,
68 I40IW_ERR_CQP_COMPL_ERROR = -29, 68 I40IW_ERR_CQP_COMPL_ERROR = -29,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index b0d3a0e8a9b5..1060725d18bc 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
435 435
436 op_info = &info->op.inline_rdma_write; 436 op_info = &info->op.inline_rdma_write;
437 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) 437 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
438 return I40IW_ERR_INVALID_IMM_DATA_SIZE; 438 return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
439 439
440 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); 440 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
441 if (ret_code) 441 if (ret_code)
@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
511 511
512 op_info = &info->op.inline_send; 512 op_info = &info->op.inline_send;
513 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) 513 if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
514 return I40IW_ERR_INVALID_IMM_DATA_SIZE; 514 return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
515 515
516 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); 516 ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
517 if (ret_code) 517 if (ret_code)
@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
784 get_64bit_val(cqe, 0, &qword0); 784 get_64bit_val(cqe, 0, &qword0);
785 get_64bit_val(cqe, 16, &qword2); 785 get_64bit_val(cqe, 16, &qword2);
786 786
787 info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM); 787 info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
788 788
789 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); 789 info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
790 790
@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
1187 u8 *wqe_size) 1187 u8 *wqe_size)
1188{ 1188{
1189 if (data_size > I40IW_MAX_INLINE_DATA_SIZE) 1189 if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
1190 return I40IW_ERR_INVALID_IMM_DATA_SIZE; 1190 return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
1191 1191
1192 if (data_size <= 16) 1192 if (data_size <= 16)
1193 *wqe_size = I40IW_QP_WQE_MIN_SIZE; 1193 *wqe_size = I40IW_QP_WQE_MIN_SIZE;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 56d986924a4c..e311ec559f4e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait
337 */ 337 */
338void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) 338void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
339{ 339{
340 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
340 unsigned long flags; 341 unsigned long flags;
341 342
342 if (cqp_request->dynamic) { 343 if (cqp_request->dynamic) {
@@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp
350 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); 351 list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
351 spin_unlock_irqrestore(&cqp->req_lock, flags); 352 spin_unlock_irqrestore(&cqp->req_lock, flags);
352 } 353 }
354 wake_up(&iwdev->close_wq);
353} 355}
354 356
355/** 357/**
@@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
365} 367}
366 368
367/** 369/**
370 * i40iw_free_pending_cqp_request -free pending cqp request objs
371 * @cqp: cqp ptr
372 * @cqp_request: to be put back in cqp list
373 */
374static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
375 struct i40iw_cqp_request *cqp_request)
376{
377 struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
378
379 if (cqp_request->waiting) {
380 cqp_request->compl_info.error = true;
381 cqp_request->request_done = true;
382 wake_up(&cqp_request->waitq);
383 }
384 i40iw_put_cqp_request(cqp, cqp_request);
385 wait_event_timeout(iwdev->close_wq,
386 !atomic_read(&cqp_request->refcount),
387 1000);
388}
389
390/**
391 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
392 * @iwdev: iwarp device
393 */
394void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
395{
396 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
397 struct i40iw_cqp *cqp = &iwdev->cqp;
398 struct i40iw_cqp_request *cqp_request = NULL;
399 struct cqp_commands_info *pcmdinfo = NULL;
400 u32 i, pending_work, wqe_idx;
401
402 pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
403 wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
404 for (i = 0; i < pending_work; i++) {
405 cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
406 if (cqp_request)
407 i40iw_free_pending_cqp_request(cqp, cqp_request);
408 wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
409 }
410
411 while (!list_empty(&dev->cqp_cmd_head)) {
412 pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
413 cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
414 if (cqp_request)
415 i40iw_free_pending_cqp_request(cqp, cqp_request);
416 }
417}
418
419/**
368 * i40iw_free_qp - callback after destroy cqp completes 420 * i40iw_free_qp - callback after destroy cqp completes
369 * @cqp_request: cqp request for destroy qp 421 * @cqp_request: cqp request for destroy qp
370 * @num: not used 422 * @num: not used
@@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
546 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; 598 cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
547 cqp_info->in.u.qp_destroy.remove_hash_idx = true; 599 cqp_info->in.u.qp_destroy.remove_hash_idx = true;
548 status = i40iw_handle_cqp_op(iwdev, cqp_request); 600 status = i40iw_handle_cqp_op(iwdev, cqp_request);
549 if (status) 601 if (!status)
550 i40iw_pr_err("CQP-OP Destroy QP fail"); 602 return;
603
604 i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
605 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
606 i40iw_rem_devusecount(iwdev);
551} 607}
552 608
553/** 609/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4dbe61ec7a77..02d871db7ca5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
426 struct i40iw_qp *iwqp, 426 struct i40iw_qp *iwqp,
427 u32 qp_num) 427 u32 qp_num)
428{ 428{
429 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
430
429 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); 431 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
430 if (qp_num) 432 if (qp_num)
431 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); 433 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
434 if (iwpbl->pbl_allocated)
435 i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
432 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem); 436 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
433 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem); 437 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
434 kfree(iwqp->kqp.wrid_mem); 438 kfree(iwqp->kqp.wrid_mem);
@@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
483 struct i40iw_qp *iwqp, 487 struct i40iw_qp *iwqp,
484 struct i40iw_qp_init_info *init_info) 488 struct i40iw_qp_init_info *init_info)
485{ 489{
486 struct i40iw_pbl *iwpbl = iwqp->iwpbl; 490 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
487 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; 491 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
488 492
489 iwqp->page = qpmr->sq_page; 493 iwqp->page = qpmr->sq_page;
@@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
688 ucontext = to_ucontext(ibpd->uobject->context); 692 ucontext = to_ucontext(ibpd->uobject->context);
689 693
690 if (req.user_wqe_buffers) { 694 if (req.user_wqe_buffers) {
695 struct i40iw_pbl *iwpbl;
696
691 spin_lock_irqsave( 697 spin_lock_irqsave(
692 &ucontext->qp_reg_mem_list_lock, flags); 698 &ucontext->qp_reg_mem_list_lock, flags);
693 iwqp->iwpbl = i40iw_get_pbl( 699 iwpbl = i40iw_get_pbl(
694 (unsigned long)req.user_wqe_buffers, 700 (unsigned long)req.user_wqe_buffers,
695 &ucontext->qp_reg_mem_list); 701 &ucontext->qp_reg_mem_list);
696 spin_unlock_irqrestore( 702 spin_unlock_irqrestore(
697 &ucontext->qp_reg_mem_list_lock, flags); 703 &ucontext->qp_reg_mem_list_lock, flags);
698 704
699 if (!iwqp->iwpbl) { 705 if (!iwpbl) {
700 err_code = -ENODATA; 706 err_code = -ENODATA;
701 i40iw_pr_err("no pbl info\n"); 707 i40iw_pr_err("no pbl info\n");
702 goto error; 708 goto error;
703 } 709 }
710 memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
704 } 711 }
705 } 712 }
706 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); 713 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
@@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1161 memset(&req, 0, sizeof(req)); 1168 memset(&req, 0, sizeof(req));
1162 iwcq->user_mode = true; 1169 iwcq->user_mode = true;
1163 ucontext = to_ucontext(context); 1170 ucontext = to_ucontext(context);
1164 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) 1171 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1172 err_code = -EFAULT;
1165 goto cq_free_resources; 1173 goto cq_free_resources;
1174 }
1166 1175
1167 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); 1176 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1168 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer, 1177 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
@@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
2063 ucontext = to_ucontext(ibpd->uobject->context); 2072 ucontext = to_ucontext(ibpd->uobject->context);
2064 i40iw_del_memlist(iwmr, ucontext); 2073 i40iw_del_memlist(iwmr, ucontext);
2065 } 2074 }
2066 if (iwpbl->pbl_allocated) 2075 if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
2067 i40iw_free_pble(iwdev->pble_rsrc, palloc); 2076 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2068 kfree(iwmr); 2077 kfree(iwmr);
2069 return 0; 2078 return 0;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 07c3fec77de6..9067443cd311 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -170,7 +170,7 @@ struct i40iw_qp {
170 struct i40iw_qp_kmode kqp; 170 struct i40iw_qp_kmode kqp;
171 struct i40iw_dma_mem host_ctx; 171 struct i40iw_dma_mem host_ctx;
172 struct timer_list terminate_timer; 172 struct timer_list terminate_timer;
173 struct i40iw_pbl *iwpbl; 173 struct i40iw_pbl iwpbl;
174 struct i40iw_dma_mem q2_ctx_mem; 174 struct i40iw_dma_mem q2_ctx_mem;
175 struct i40iw_dma_mem ietf_mem; 175 struct i40iw_dma_mem ietf_mem;
176 struct completion sq_drained; 176 struct completion sq_drained;
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 1e6c526450d9..fedaf8260105 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
323 mad->mad_hdr.attr_id == CM_REP_ATTR_ID || 323 mad->mad_hdr.attr_id == CM_REP_ATTR_ID ||
324 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { 324 mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) {
325 sl_cm_id = get_local_comm_id(mad); 325 sl_cm_id = get_local_comm_id(mad);
326 id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id);
327 if (id)
328 goto cont;
326 id = id_map_alloc(ibdev, slave_id, sl_cm_id); 329 id = id_map_alloc(ibdev, slave_id, sl_cm_id);
327 if (IS_ERR(id)) { 330 if (IS_ERR(id)) {
328 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", 331 mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n",
@@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id
343 return -EINVAL; 346 return -EINVAL;
344 } 347 }
345 348
349cont:
346 set_local_comm_id(mad, id->pv_cm_id); 350 set_local_comm_id(mad, id->pv_cm_id);
347 351
348 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) 352 if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 4f5a143fc0a7..ff931c580557 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
102 int err; 102 int err;
103 103
104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, 104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
105 PAGE_SIZE * 2, &buf->buf, GFP_KERNEL); 105 PAGE_SIZE * 2, &buf->buf);
106 106
107 if (err) 107 if (err)
108 goto out; 108 goto out;
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
113 if (err) 113 if (err)
114 goto err_buf; 114 goto err_buf;
115 115
116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); 116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
117 if (err) 117 if (err)
118 goto err_mtt; 118 goto err_mtt;
119 119
@@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
219 219
220 uar = &to_mucontext(context)->uar; 220 uar = &to_mucontext(context)->uar;
221 } else { 221 } else {
222 err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); 222 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
223 if (err) 223 if (err)
224 goto err_cq; 224 goto err_cq;
225 225
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75b2f7d4cd95..d1b43cbbfea7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1155,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1155 * call to mlx4_ib_vma_close. 1155 * call to mlx4_ib_vma_close.
1156 */ 1156 */
1157 put_task_struct(owning_process); 1157 put_task_struct(owning_process);
1158 msleep(1); 1158 usleep_range(1000, 2000);
1159 owning_process = get_pid_task(ibcontext->tgid, 1159 owning_process = get_pid_task(ibcontext->tgid,
1160 PIDTYPE_PID); 1160 PIDTYPE_PID);
1161 if (!owning_process || 1161 if (!owning_process ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 3405e947dc1e..b73f89700ef9 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
1091 if (!count) 1091 if (!count)
1092 break; 1092 break;
1093 1093
1094 msleep(1); 1094 usleep_range(1000, 2000);
1095 } while (time_after(end, jiffies)); 1095 } while (time_after(end, jiffies));
1096 1096
1097 flush_workqueue(ctx->mcg_wq); 1097 flush_workqueue(ctx->mcg_wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c2b9cbf4da05..9db82e67e959 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags {
185 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, 185 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
186 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 186 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
187 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, 187 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
188 MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
189 188
190 /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */ 189 /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
191 MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI, 190 MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 996e9058e515..75c0e6c5dd56 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
634 634
635static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, 635static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
636 struct ib_qp_init_attr *init_attr, 636 struct ib_qp_init_attr *init_attr,
637 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp, 637 struct ib_udata *udata, int sqpn,
638 gfp_t gfp) 638 struct mlx4_ib_qp **caller_qp)
639{ 639{
640 int qpn; 640 int qpn;
641 int err; 641 int err;
@@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
691 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || 691 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
692 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | 692 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
693 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { 693 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
694 sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); 694 sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
695 if (!sqp) 695 if (!sqp)
696 return -ENOMEM; 696 return -ENOMEM;
697 qp = &sqp->qp; 697 qp = &sqp->qp;
698 qp->pri.vid = 0xFFFF; 698 qp->pri.vid = 0xFFFF;
699 qp->alt.vid = 0xFFFF; 699 qp->alt.vid = 0xFFFF;
700 } else { 700 } else {
701 qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); 701 qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
702 if (!qp) 702 if (!qp)
703 return -ENOMEM; 703 return -ENOMEM;
704 qp->pri.vid = 0xFFFF; 704 qp->pri.vid = 0xFFFF;
@@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
780 goto err; 780 goto err;
781 781
782 if (qp_has_rq(init_attr)) { 782 if (qp_has_rq(init_attr)) {
783 err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); 783 err = mlx4_db_alloc(dev->dev, &qp->db, 0);
784 if (err) 784 if (err)
785 goto err; 785 goto err;
786 786
@@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
788 } 788 }
789 789
790 if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size, 790 if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
791 &qp->buf, gfp)) { 791 &qp->buf)) {
792 memcpy(&init_attr->cap, &backup_cap, 792 memcpy(&init_attr->cap, &backup_cap,
793 sizeof(backup_cap)); 793 sizeof(backup_cap));
794 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, 794 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
@@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
797 goto err_db; 797 goto err_db;
798 798
799 if (mlx4_buf_alloc(dev->dev, qp->buf_size, 799 if (mlx4_buf_alloc(dev->dev, qp->buf_size,
800 PAGE_SIZE * 2, &qp->buf, gfp)) { 800 PAGE_SIZE * 2, &qp->buf)) {
801 err = -ENOMEM; 801 err = -ENOMEM;
802 goto err_db; 802 goto err_db;
803 } 803 }
@@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
808 if (err) 808 if (err)
809 goto err_buf; 809 goto err_buf;
810 810
811 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); 811 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
812 if (err) 812 if (err)
813 goto err_mtt; 813 goto err_mtt;
814 814
815 qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64), 815 qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
816 gfp | __GFP_NOWARN); 816 GFP_KERNEL | __GFP_NOWARN);
817 if (!qp->sq.wrid) 817 if (!qp->sq.wrid)
818 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), 818 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
819 gfp, PAGE_KERNEL); 819 GFP_KERNEL, PAGE_KERNEL);
820 qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64), 820 qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
821 gfp | __GFP_NOWARN); 821 GFP_KERNEL | __GFP_NOWARN);
822 if (!qp->rq.wrid) 822 if (!qp->rq.wrid)
823 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), 823 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
824 gfp, PAGE_KERNEL); 824 GFP_KERNEL, PAGE_KERNEL);
825 if (!qp->sq.wrid || !qp->rq.wrid) { 825 if (!qp->sq.wrid || !qp->rq.wrid) {
826 err = -ENOMEM; 826 err = -ENOMEM;
827 goto err_wrid; 827 goto err_wrid;
@@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
859 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 859 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
860 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 860 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
861 861
862 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); 862 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
863 if (err) 863 if (err)
864 goto err_qpn; 864 goto err_qpn;
865 865
@@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1127 int err; 1127 int err;
1128 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 1128 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1129 u16 xrcdn = 0; 1129 u16 xrcdn = 0;
1130 gfp_t gfp;
1131 1130
1132 gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
1133 GFP_NOIO : GFP_KERNEL;
1134 /* 1131 /*
1135 * We only support LSO, vendor flag1, and multicast loopback blocking, 1132 * We only support LSO, vendor flag1, and multicast loopback blocking,
1136 * and only for kernel UD QPs. 1133 * and only for kernel UD QPs.
@@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1140 MLX4_IB_SRIOV_TUNNEL_QP | 1137 MLX4_IB_SRIOV_TUNNEL_QP |
1141 MLX4_IB_SRIOV_SQP | 1138 MLX4_IB_SRIOV_SQP |
1142 MLX4_IB_QP_NETIF | 1139 MLX4_IB_QP_NETIF |
1143 MLX4_IB_QP_CREATE_ROCE_V2_GSI | 1140 MLX4_IB_QP_CREATE_ROCE_V2_GSI))
1144 MLX4_IB_QP_CREATE_USE_GFP_NOIO))
1145 return ERR_PTR(-EINVAL); 1141 return ERR_PTR(-EINVAL);
1146 1142
1147 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { 1143 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
@@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1154 return ERR_PTR(-EINVAL); 1150 return ERR_PTR(-EINVAL);
1155 1151
1156 if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | 1152 if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
1157 MLX4_IB_QP_CREATE_USE_GFP_NOIO |
1158 MLX4_IB_QP_CREATE_ROCE_V2_GSI | 1153 MLX4_IB_QP_CREATE_ROCE_V2_GSI |
1159 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) && 1154 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
1160 init_attr->qp_type != IB_QPT_UD) || 1155 init_attr->qp_type != IB_QPT_UD) ||
@@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1179 case IB_QPT_RC: 1174 case IB_QPT_RC:
1180 case IB_QPT_UC: 1175 case IB_QPT_UC:
1181 case IB_QPT_RAW_PACKET: 1176 case IB_QPT_RAW_PACKET:
1182 qp = kzalloc(sizeof *qp, gfp); 1177 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1183 if (!qp) 1178 if (!qp)
1184 return ERR_PTR(-ENOMEM); 1179 return ERR_PTR(-ENOMEM);
1185 qp->pri.vid = 0xFFFF; 1180 qp->pri.vid = 0xFFFF;
@@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1188 case IB_QPT_UD: 1183 case IB_QPT_UD:
1189 { 1184 {
1190 err = create_qp_common(to_mdev(pd->device), pd, init_attr, 1185 err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1191 udata, 0, &qp, gfp); 1186 udata, 0, &qp);
1192 if (err) { 1187 if (err) {
1193 kfree(qp); 1188 kfree(qp);
1194 return ERR_PTR(err); 1189 return ERR_PTR(err);
@@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1217 } 1212 }
1218 1213
1219 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 1214 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
1220 sqpn, 1215 sqpn, &qp);
1221 &qp, gfp);
1222 if (err) 1216 if (err)
1223 return ERR_PTR(err); 1217 return ERR_PTR(err);
1224 1218
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index e32dd58937a8..0facaf5f6d23 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
135 if (err) 135 if (err)
136 goto err_mtt; 136 goto err_mtt;
137 } else { 137 } else {
138 err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL); 138 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
139 if (err) 139 if (err)
140 goto err_srq; 140 goto err_srq;
141 141
142 *srq->db.db = 0; 142 *srq->db.db = 0;
143 143
144 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf, 144 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
145 GFP_KERNEL)) { 145 &srq->buf)) {
146 err = -ENOMEM; 146 err = -ENOMEM;
147 goto err_db; 147 goto err_db;
148 } 148 }
@@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
167 if (err) 167 if (err)
168 goto err_buf; 168 goto err_buf;
169 169
170 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); 170 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
171 if (err) 171 if (err)
172 goto err_mtt; 172 goto err_mtt;
173 173
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a7f2e60085c4..f7fcde1ff0aa 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1085,6 +1085,12 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1085 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == 1085 bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1086 IB_LINK_LAYER_INFINIBAND); 1086 IB_LINK_LAYER_INFINIBAND);
1087 1087
1088 /* CM layer calls ib_modify_port() regardless of the link layer. For
1089 * Ethernet ports, qkey violation and Port capabilities are meaningless.
1090 */
1091 if (!is_ib)
1092 return 0;
1093
1088 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { 1094 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1089 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; 1095 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1090 value = ~props->clr_port_cap_mask | props->set_port_cap_mask; 1096 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 763bb5b36144..2c40a2e989d2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
582 } 582 }
583} 583}
584 584
585static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
586{
587 if (!mlx5_debugfs_root)
588 return;
589
590 debugfs_remove_recursive(dev->cache.root);
591 dev->cache.root = NULL;
592}
593
585static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) 594static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
586{ 595{
587 struct mlx5_mr_cache *cache = &dev->cache; 596 struct mlx5_mr_cache *cache = &dev->cache;
@@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
600 sprintf(ent->name, "%d", ent->order); 609 sprintf(ent->name, "%d", ent->order);
601 ent->dir = debugfs_create_dir(ent->name, cache->root); 610 ent->dir = debugfs_create_dir(ent->name, cache->root);
602 if (!ent->dir) 611 if (!ent->dir)
603 return -ENOMEM; 612 goto err;
604 613
605 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, 614 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
606 &size_fops); 615 &size_fops);
607 if (!ent->fsize) 616 if (!ent->fsize)
608 return -ENOMEM; 617 goto err;
609 618
610 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, 619 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
611 &limit_fops); 620 &limit_fops);
612 if (!ent->flimit) 621 if (!ent->flimit)
613 return -ENOMEM; 622 goto err;
614 623
615 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, 624 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
616 &ent->cur); 625 &ent->cur);
617 if (!ent->fcur) 626 if (!ent->fcur)
618 return -ENOMEM; 627 goto err;
619 628
620 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, 629 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
621 &ent->miss); 630 &ent->miss);
622 if (!ent->fmiss) 631 if (!ent->fmiss)
623 return -ENOMEM; 632 goto err;
624 } 633 }
625 634
626 return 0; 635 return 0;
627} 636err:
628 637 mlx5_mr_cache_debugfs_cleanup(dev);
629static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
630{
631 if (!mlx5_debugfs_root)
632 return;
633 638
634 debugfs_remove_recursive(dev->cache.root); 639 return -ENOMEM;
635} 640}
636 641
637static void delay_time_func(unsigned long ctx) 642static void delay_time_func(unsigned long ctx)
@@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
692 if (err) 697 if (err)
693 mlx5_ib_warn(dev, "cache debugfs failure\n"); 698 mlx5_ib_warn(dev, "cache debugfs failure\n");
694 699
700 /*
701 * We don't want to fail driver if debugfs failed to initialize,
702 * so we are not forwarding error to the user.
703 */
704
695 return 0; 705 return 0;
696} 706}
697 707
@@ -825,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
825 access_flags, 0); 835 access_flags, 0);
826 err = PTR_ERR_OR_ZERO(*umem); 836 err = PTR_ERR_OR_ZERO(*umem);
827 if (err < 0) { 837 if (err < 0) {
828 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); 838 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
829 return err; 839 return err;
830 } 840 }
831 841
@@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1779 mr->ndescs = sg_nents; 1789 mr->ndescs = sg_nents;
1780 1790
1781 for_each_sg(sgl, sg, sg_nents, i) { 1791 for_each_sg(sgl, sg, sg_nents, i) {
1782 if (unlikely(i > mr->max_descs)) 1792 if (unlikely(i >= mr->max_descs))
1783 break; 1793 break;
1784 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); 1794 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1785 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); 1795 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index ae0746754008..3d701c7a4c91 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -939,7 +939,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
939 939
940 if (qp->ibqp.qp_type != IB_QPT_RC) { 940 if (qp->ibqp.qp_type != IB_QPT_RC) {
941 av = *wqe; 941 av = *wqe;
942 if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT)) 942 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
943 *wqe += sizeof(struct mlx5_av); 943 *wqe += sizeof(struct mlx5_av);
944 else 944 else
945 *wqe += sizeof(struct mlx5_base_av); 945 *wqe += sizeof(struct mlx5_base_av);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0889ff367c86..f58f8f5f3ebe 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1238,6 +1238,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1238 goto err_destroy_tis; 1238 goto err_destroy_tis;
1239 1239
1240 sq->base.container_mibqp = qp; 1240 sq->base.container_mibqp = qp;
1241 sq->base.mqp.event = mlx5_ib_qp_event;
1241 } 1242 }
1242 1243
1243 if (qp->rq.wqe_cnt) { 1244 if (qp->rq.wqe_cnt) {
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8f9d8b4ad583..b0adf65e4bdb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
551 if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) 551 if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
552 || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) 552 || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
553 int_cnt++; 553 int_cnt++;
554 msleep(1); 554 usleep_range(1000, 2000);
555 } 555 }
556 if (int_cnt > 1) { 556 if (int_cnt > 1) {
557 spin_lock_irqsave(&nesadapter->phy_lock, flags); 557 spin_lock_irqsave(&nesadapter->phy_lock, flags);
@@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
592 break; 592 break;
593 } 593 }
594 } 594 }
595 msleep(1); 595 usleep_range(1000, 2000);
596 } 596 }
597 } 597 }
598 } 598 }
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 2f30bda8457a..27d5e8d9f08d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -744,7 +744,8 @@ err:
744 if (is_uctx_pd) { 744 if (is_uctx_pd) {
745 ocrdma_release_ucontext_pd(uctx); 745 ocrdma_release_ucontext_pd(uctx);
746 } else { 746 } else {
747 status = _ocrdma_dealloc_pd(dev, pd); 747 if (_ocrdma_dealloc_pd(dev, pd))
748 pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__);
748 } 749 }
749exit: 750exit:
750 return ERR_PTR(status); 751 return ERR_PTR(status);
@@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1901 goto err; 1902 goto err;
1902 1903
1903 if (udata == NULL) { 1904 if (udata == NULL) {
1905 status = -ENOMEM;
1904 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, 1906 srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt,
1905 GFP_KERNEL); 1907 GFP_KERNEL);
1906 if (srq->rqe_wr_id_tbl == NULL) 1908 if (srq->rqe_wr_id_tbl == NULL)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 548e4d1e998f..2ae71b8f1ba8 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -53,6 +53,14 @@
53 53
54#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) 54#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
55 55
56static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
57 size_t len)
58{
59 size_t min_len = min_t(size_t, len, udata->outlen);
60
61 return ib_copy_to_udata(udata, src, min_len);
62}
63
56int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 64int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57{ 65{
58 if (index > QEDR_ROCE_PKEY_TABLE_LEN) 66 if (index > QEDR_ROCE_PKEY_TABLE_LEN)
@@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
378 uresp.sges_per_srq_wr = dev->attr.max_srq_sge; 386 uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
379 uresp.max_cqes = QEDR_MAX_CQES; 387 uresp.max_cqes = QEDR_MAX_CQES;
380 388
381 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 389 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
382 if (rc) 390 if (rc)
383 goto err; 391 goto err;
384 392
@@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
499 507
500 uresp.pd_id = pd_id; 508 uresp.pd_id = pd_id;
501 509
502 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 510 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
503 if (rc) { 511 if (rc) {
504 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); 512 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
505 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); 513 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
@@ -729,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev,
729 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); 737 uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
730 uresp.icid = cq->icid; 738 uresp.icid = cq->icid;
731 739
732 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 740 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
733 if (rc) 741 if (rc)
734 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); 742 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
735 743
@@ -1238,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1238 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; 1246 uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1239 uresp.qp_id = qp->qp_id; 1247 uresp.qp_id = qp->qp_id;
1240 1248
1241 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1249 rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1242 if (rc) 1250 if (rc)
1243 DP_ERR(dev, 1251 DP_ERR(dev,
1244 "create qp: failed a copy to user space with qp icid=0x%x.\n", 1252 "create qp: failed a copy to user space with qp icid=0x%x.\n",
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 5984981e7dd4..a343e3b5d4cb 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
104 104
105}; 105};
106 106
107static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, 107static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
108 gfp_t gfp)
109{ 108{
110 unsigned long page = get_zeroed_page(gfp); 109 unsigned long page = get_zeroed_page(GFP_KERNEL);
111 110
112 /* 111 /*
113 * Free the page if someone raced with us installing it. 112 * Free the page if someone raced with us installing it.
@@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
126 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. 125 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
127 */ 126 */
128int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 127int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
129 enum ib_qp_type type, u8 port, gfp_t gfp) 128 enum ib_qp_type type, u8 port)
130{ 129{
131 u32 i, offset, max_scan, qpn; 130 u32 i, offset, max_scan, qpn;
132 struct rvt_qpn_map *map; 131 struct rvt_qpn_map *map;
@@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
160 max_scan = qpt->nmaps - !offset; 159 max_scan = qpt->nmaps - !offset;
161 for (i = 0;;) { 160 for (i = 0;;) {
162 if (unlikely(!map->page)) { 161 if (unlikely(!map->page)) {
163 get_map_page(qpt, map, gfp); 162 get_map_page(qpt, map);
164 if (unlikely(!map->page)) 163 if (unlikely(!map->page))
165 break; 164 break;
166 } 165 }
@@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
317 return ib_mtu_enum_to_int(pmtu); 316 return ib_mtu_enum_to_int(pmtu);
318} 317}
319 318
320void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) 319void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
321{ 320{
322 struct qib_qp_priv *priv; 321 struct qib_qp_priv *priv;
323 322
324 priv = kzalloc(sizeof(*priv), gfp); 323 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
325 if (!priv) 324 if (!priv)
326 return ERR_PTR(-ENOMEM); 325 return ERR_PTR(-ENOMEM);
327 priv->owner = qp; 326 priv->owner = qp;
328 327
329 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); 328 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
330 if (!priv->s_hdr) { 329 if (!priv->s_hdr) {
331 kfree(priv); 330 kfree(priv);
332 return ERR_PTR(-ENOMEM); 331 return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index da0db5485ddc..a52fc67b40d7 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd,
274 * Functions provided by qib driver for rdmavt to use 274 * Functions provided by qib driver for rdmavt to use
275 */ 275 */
276unsigned qib_free_all_qps(struct rvt_dev_info *rdi); 276unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
277void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); 277void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
278void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); 278void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
279void qib_notify_qp_reset(struct rvt_qp *qp); 279void qib_notify_qp_reset(struct rvt_qp *qp);
280int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 280int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
281 enum ib_qp_type type, u8 port, gfp_t gfp); 281 enum ib_qp_type type, u8 port);
282void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait); 282void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
283#ifdef CONFIG_DEBUG_FS 283#ifdef CONFIG_DEBUG_FS
284 284
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index 69bda611d313..90aa326fd7c0 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
65 struct pvrdma_dev *dev = to_vdev(ibcq->device); 65 struct pvrdma_dev *dev = to_vdev(ibcq->device);
66 struct pvrdma_cq *cq = to_vcq(ibcq); 66 struct pvrdma_cq *cq = to_vcq(ibcq);
67 u32 val = cq->cq_handle; 67 u32 val = cq->cq_handle;
68 unsigned long flags;
69 int has_data = 0;
68 70
69 val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 71 val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
70 PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; 72 PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
71 73
74 spin_lock_irqsave(&cq->cq_lock, flags);
75
72 pvrdma_write_uar_cq(dev, val); 76 pvrdma_write_uar_cq(dev, val);
73 77
74 return 0; 78 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
79 unsigned int head;
80
81 has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
82 cq->ibcq.cqe, &head);
83 if (unlikely(has_data == PVRDMA_INVALID_IDX))
84 dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
85 }
86
87 spin_unlock_irqrestore(&cq->cq_lock, flags);
88
89 return has_data;
75} 90}
76 91
77/** 92/**
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 727e81cc2c8f..8876ee7bc326 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -118,10 +118,9 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
118EXPORT_SYMBOL(ib_rvt_state_ops); 118EXPORT_SYMBOL(ib_rvt_state_ops);
119 119
120static void get_map_page(struct rvt_qpn_table *qpt, 120static void get_map_page(struct rvt_qpn_table *qpt,
121 struct rvt_qpn_map *map, 121 struct rvt_qpn_map *map)
122 gfp_t gfp)
123{ 122{
124 unsigned long page = get_zeroed_page(gfp); 123 unsigned long page = get_zeroed_page(GFP_KERNEL);
125 124
126 /* 125 /*
127 * Free the page if someone raced with us installing it. 126 * Free the page if someone raced with us installing it.
@@ -173,7 +172,7 @@ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
173 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); 172 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
174 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { 173 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
175 if (!map->page) { 174 if (!map->page) {
176 get_map_page(qpt, map, GFP_KERNEL); 175 get_map_page(qpt, map);
177 if (!map->page) { 176 if (!map->page) {
178 ret = -ENOMEM; 177 ret = -ENOMEM;
179 break; 178 break;
@@ -342,14 +341,14 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
342 * Return: The queue pair number 341 * Return: The queue pair number
343 */ 342 */
344static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 343static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
345 enum ib_qp_type type, u8 port_num, gfp_t gfp) 344 enum ib_qp_type type, u8 port_num)
346{ 345{
347 u32 i, offset, max_scan, qpn; 346 u32 i, offset, max_scan, qpn;
348 struct rvt_qpn_map *map; 347 struct rvt_qpn_map *map;
349 u32 ret; 348 u32 ret;
350 349
351 if (rdi->driver_f.alloc_qpn) 350 if (rdi->driver_f.alloc_qpn)
352 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); 351 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
353 352
354 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 353 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
355 unsigned n; 354 unsigned n;
@@ -374,7 +373,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
374 max_scan = qpt->nmaps - !offset; 373 max_scan = qpt->nmaps - !offset;
375 for (i = 0;;) { 374 for (i = 0;;) {
376 if (unlikely(!map->page)) { 375 if (unlikely(!map->page)) {
377 get_map_page(qpt, map, gfp); 376 get_map_page(qpt, map);
378 if (unlikely(!map->page)) 377 if (unlikely(!map->page))
379 break; 378 break;
380 } 379 }
@@ -672,7 +671,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
672 struct ib_qp *ret = ERR_PTR(-ENOMEM); 671 struct ib_qp *ret = ERR_PTR(-ENOMEM);
673 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); 672 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
674 void *priv = NULL; 673 void *priv = NULL;
675 gfp_t gfp;
676 size_t sqsize; 674 size_t sqsize;
677 675
678 if (!rdi) 676 if (!rdi)
@@ -680,18 +678,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
680 678
681 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || 679 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
682 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || 680 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
683 init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) 681 init_attr->create_flags)
684 return ERR_PTR(-EINVAL); 682 return ERR_PTR(-EINVAL);
685 683
686 /* GFP_NOIO is applicable to RC QP's only */
687
688 if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
689 init_attr->qp_type != IB_QPT_RC)
690 return ERR_PTR(-EINVAL);
691
692 gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
693 GFP_NOIO : GFP_KERNEL;
694
695 /* Check receive queue parameters if no SRQ is specified. */ 684 /* Check receive queue parameters if no SRQ is specified. */
696 if (!init_attr->srq) { 685 if (!init_attr->srq) {
697 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || 686 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
@@ -719,14 +708,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
719 sz = sizeof(struct rvt_sge) * 708 sz = sizeof(struct rvt_sge) *
720 init_attr->cap.max_send_sge + 709 init_attr->cap.max_send_sge +
721 sizeof(struct rvt_swqe); 710 sizeof(struct rvt_swqe);
722 if (gfp == GFP_NOIO) 711 swq = vzalloc_node(sqsize * sz, rdi->dparms.node);
723 swq = __vmalloc(
724 sqsize * sz,
725 gfp | __GFP_ZERO, PAGE_KERNEL);
726 else
727 swq = vzalloc_node(
728 sqsize * sz,
729 rdi->dparms.node);
730 if (!swq) 712 if (!swq)
731 return ERR_PTR(-ENOMEM); 713 return ERR_PTR(-ENOMEM);
732 714
@@ -741,7 +723,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
741 } else if (init_attr->cap.max_recv_sge > 1) 723 } else if (init_attr->cap.max_recv_sge > 1)
742 sg_list_sz = sizeof(*qp->r_sg_list) * 724 sg_list_sz = sizeof(*qp->r_sg_list) *
743 (init_attr->cap.max_recv_sge - 1); 725 (init_attr->cap.max_recv_sge - 1);
744 qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); 726 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
727 rdi->dparms.node);
745 if (!qp) 728 if (!qp)
746 goto bail_swq; 729 goto bail_swq;
747 730
@@ -751,7 +734,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
751 kzalloc_node( 734 kzalloc_node(
752 sizeof(*qp->s_ack_queue) * 735 sizeof(*qp->s_ack_queue) *
753 rvt_max_atomic(rdi), 736 rvt_max_atomic(rdi),
754 gfp, 737 GFP_KERNEL,
755 rdi->dparms.node); 738 rdi->dparms.node);
756 if (!qp->s_ack_queue) 739 if (!qp->s_ack_queue)
757 goto bail_qp; 740 goto bail_qp;
@@ -766,7 +749,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
766 * Driver needs to set up it's private QP structure and do any 749 * Driver needs to set up it's private QP structure and do any
767 * initialization that is needed. 750 * initialization that is needed.
768 */ 751 */
769 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); 752 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
770 if (IS_ERR(priv)) { 753 if (IS_ERR(priv)) {
771 ret = priv; 754 ret = priv;
772 goto bail_qp; 755 goto bail_qp;
@@ -786,11 +769,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
786 qp->r_rq.wq = vmalloc_user( 769 qp->r_rq.wq = vmalloc_user(
787 sizeof(struct rvt_rwq) + 770 sizeof(struct rvt_rwq) +
788 qp->r_rq.size * sz); 771 qp->r_rq.size * sz);
789 else if (gfp == GFP_NOIO)
790 qp->r_rq.wq = __vmalloc(
791 sizeof(struct rvt_rwq) +
792 qp->r_rq.size * sz,
793 gfp | __GFP_ZERO, PAGE_KERNEL);
794 else 772 else
795 qp->r_rq.wq = vzalloc_node( 773 qp->r_rq.wq = vzalloc_node(
796 sizeof(struct rvt_rwq) + 774 sizeof(struct rvt_rwq) +
@@ -824,7 +802,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
824 802
825 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, 803 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
826 init_attr->qp_type, 804 init_attr->qp_type,
827 init_attr->port_num, gfp); 805 init_attr->port_num);
828 if (err < 0) { 806 if (err < 0) {
829 ret = ERR_PTR(err); 807 ret = ERR_PTR(err);
830 goto bail_rq_wq; 808 goto bail_rq_wq;
@@ -1280,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1280 1258
1281 if (attr_mask & IB_QP_TIMEOUT) { 1259 if (attr_mask & IB_QP_TIMEOUT) {
1282 qp->timeout = attr->timeout; 1260 qp->timeout = attr->timeout;
1283 qp->timeout_jiffies = 1261 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1284 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1285 1000UL);
1286 } 1262 }
1287 1263
1288 if (attr_mask & IB_QP_QKEY) 1264 if (attr_mask & IB_QP_QKEY)
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c3a140ed4df2..08f3f90d2912 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -441,6 +441,8 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)
441 if (unlikely(qp->need_req_skb && 441 if (unlikely(qp->need_req_skb &&
442 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) 442 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
443 rxe_run_task(&qp->req.task, 1); 443 rxe_run_task(&qp->req.task, 1);
444
445 rxe_drop_ref(qp);
444} 446}
445 447
446int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb) 448int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
@@ -473,6 +475,7 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb)
473 return -EAGAIN; 475 return -EAGAIN;
474 } 476 }
475 477
478 rxe_add_ref(pkt->qp);
476 atomic_inc(&pkt->qp->skb_out); 479 atomic_inc(&pkt->qp->skb_out);
477 kfree_skb(skb); 480 kfree_skb(skb);
478 481
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index be944d5aa9af..a958ee918a49 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1219,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1219 kfree_skb(skb); 1219 kfree_skb(skb);
1220 } 1220 }
1221 1221
1222 if (notify)
1223 return;
1224
1222 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue)) 1225 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1223 advance_consumer(qp->rq.queue); 1226 advance_consumer(qp->rq.queue);
1224} 1227}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 073e66783f1d..af90a7d42b96 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -914,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
914 914
915 spin_unlock_irqrestore(&rq->producer_lock, flags); 915 spin_unlock_irqrestore(&rq->producer_lock, flags);
916 916
917 if (qp->resp.state == QP_STATE_ERROR)
918 rxe_run_task(&qp->resp.task, 1);
919
917err1: 920err1:
918 return err; 921 return err;
919} 922}
@@ -1240,6 +1243,8 @@ int rxe_register_device(struct rxe_dev *rxe)
1240 addrconf_addr_eui48((unsigned char *)&dev->node_guid, 1243 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1241 rxe->ndev->dev_addr); 1244 rxe->ndev->dev_addr);
1242 dev->dev.dma_ops = &dma_virt_ops; 1245 dev->dev.dma_ops = &dma_virt_ops;
1246 dma_coerce_mask_and_coherent(&dev->dev,
1247 dma_get_required_mask(dev->dev.parent));
1243 1248
1244 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; 1249 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1245 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) 1250 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ff50a7bd66d8..7ac25059c40f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -336,6 +336,7 @@ struct ipoib_dev_priv {
336 unsigned long flags; 336 unsigned long flags;
337 337
338 struct rw_semaphore vlan_rwsem; 338 struct rw_semaphore vlan_rwsem;
339 struct mutex mcast_mutex;
339 340
340 struct rb_root path_tree; 341 struct rb_root path_tree;
341 struct list_head path_list; 342 struct list_head path_list;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 7cbcfdac6529..d69410c2ed97 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -39,6 +39,7 @@
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41#include <linux/sched/signal.h> 41#include <linux/sched/signal.h>
42#include <linux/sched/mm.h>
42 43
43#include "ipoib.h" 44#include "ipoib.h"
44 45
@@ -510,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
510 case IB_CM_REQ_RECEIVED: 511 case IB_CM_REQ_RECEIVED:
511 return ipoib_cm_req_handler(cm_id, event); 512 return ipoib_cm_req_handler(cm_id, event);
512 case IB_CM_DREQ_RECEIVED: 513 case IB_CM_DREQ_RECEIVED:
513 p = cm_id->context;
514 ib_send_cm_drep(cm_id, NULL, 0); 514 ib_send_cm_drep(cm_id, NULL, 0);
515 /* Fall through */ 515 /* Fall through */
516 case IB_CM_REJ_RECEIVED: 516 case IB_CM_REJ_RECEIVED:
@@ -954,7 +954,7 @@ void ipoib_cm_dev_stop(struct net_device *dev)
954 break; 954 break;
955 } 955 }
956 spin_unlock_irq(&priv->lock); 956 spin_unlock_irq(&priv->lock);
957 msleep(1); 957 usleep_range(1000, 2000);
958 ipoib_drain_cq(dev); 958 ipoib_drain_cq(dev);
959 spin_lock_irq(&priv->lock); 959 spin_lock_irq(&priv->lock);
960 } 960 }
@@ -1047,9 +1047,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1047 .sq_sig_type = IB_SIGNAL_ALL_WR, 1047 .sq_sig_type = IB_SIGNAL_ALL_WR,
1048 .qp_type = IB_QPT_RC, 1048 .qp_type = IB_QPT_RC,
1049 .qp_context = tx, 1049 .qp_context = tx,
1050 .create_flags = IB_QP_CREATE_USE_GFP_NOIO 1050 .create_flags = 0
1051 }; 1051 };
1052
1053 struct ib_qp *tx_qp; 1052 struct ib_qp *tx_qp;
1054 1053
1055 if (dev->features & NETIF_F_SG) 1054 if (dev->features & NETIF_F_SG)
@@ -1057,10 +1056,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
1057 min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1); 1056 min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
1058 1057
1059 tx_qp = ib_create_qp(priv->pd, &attr); 1058 tx_qp = ib_create_qp(priv->pd, &attr);
1060 if (PTR_ERR(tx_qp) == -EINVAL) {
1061 attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
1062 tx_qp = ib_create_qp(priv->pd, &attr);
1063 }
1064 tx->max_send_sge = attr.cap.max_send_sge; 1059 tx->max_send_sge = attr.cap.max_send_sge;
1065 return tx_qp; 1060 return tx_qp;
1066} 1061}
@@ -1131,10 +1126,11 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1131 struct sa_path_rec *pathrec) 1126 struct sa_path_rec *pathrec)
1132{ 1127{
1133 struct ipoib_dev_priv *priv = ipoib_priv(p->dev); 1128 struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1129 unsigned int noio_flag;
1134 int ret; 1130 int ret;
1135 1131
1136 p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring, 1132 noio_flag = memalloc_noio_save();
1137 GFP_NOIO, PAGE_KERNEL); 1133 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
1138 if (!p->tx_ring) { 1134 if (!p->tx_ring) {
1139 ret = -ENOMEM; 1135 ret = -ENOMEM;
1140 goto err_tx; 1136 goto err_tx;
@@ -1142,9 +1138,10 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1142 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); 1138 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1143 1139
1144 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1140 p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1141 memalloc_noio_restore(noio_flag);
1145 if (IS_ERR(p->qp)) { 1142 if (IS_ERR(p->qp)) {
1146 ret = PTR_ERR(p->qp); 1143 ret = PTR_ERR(p->qp);
1147 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); 1144 ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
1148 goto err_qp; 1145 goto err_qp;
1149 } 1146 }
1150 1147
@@ -1206,7 +1203,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1206 goto timeout; 1203 goto timeout;
1207 } 1204 }
1208 1205
1209 msleep(1); 1206 usleep_range(1000, 2000);
1210 } 1207 }
1211 } 1208 }
1212 1209
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 7871379342f4..184a22f48027 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
52 IPOIB_NETDEV_STAT(tx_bytes), 52 IPOIB_NETDEV_STAT(tx_bytes),
53 IPOIB_NETDEV_STAT(tx_errors), 53 IPOIB_NETDEV_STAT(tx_errors),
54 IPOIB_NETDEV_STAT(rx_dropped), 54 IPOIB_NETDEV_STAT(rx_dropped),
55 IPOIB_NETDEV_STAT(tx_dropped) 55 IPOIB_NETDEV_STAT(tx_dropped),
56 IPOIB_NETDEV_STAT(multicast),
56}; 57};
57 58
58#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) 59#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index efe7402f4885..2e075377242e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
256 256
257 ++dev->stats.rx_packets; 257 ++dev->stats.rx_packets;
258 dev->stats.rx_bytes += skb->len; 258 dev->stats.rx_bytes += skb->len;
259 if (skb->pkt_type == PACKET_MULTICAST)
260 dev->stats.multicast++;
259 261
260 skb->dev = dev; 262 skb->dev = dev;
261 if ((dev->features & NETIF_F_RXCSUM) && 263 if ((dev->features & NETIF_F_RXCSUM) &&
@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
709 return pending; 711 return pending;
710} 712}
711 713
714static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
715 struct ib_qp *qp,
716 enum ib_qp_state new_state)
717{
718 struct ib_qp_attr qp_attr;
719 struct ib_qp_init_attr query_init_attr;
720 int ret;
721
722 ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
723 if (ret) {
724 ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
725 return;
726 }
727 /* print according to the new-state and the previous state.*/
728 if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
729 ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
730 else
731 ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
732 new_state, qp_attr.qp_state);
733}
734
712int ipoib_ib_dev_stop_default(struct net_device *dev) 735int ipoib_ib_dev_stop_default(struct net_device *dev)
713{ 736{
714 struct ipoib_dev_priv *priv = ipoib_priv(dev); 737 struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
728 */ 751 */
729 qp_attr.qp_state = IB_QPS_ERR; 752 qp_attr.qp_state = IB_QPS_ERR;
730 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 753 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
731 ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); 754 check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
732 755
733 /* Wait for all sends and receives to complete */ 756 /* Wait for all sends and receives to complete */
734 begin = jiffies; 757 begin = jiffies;
@@ -770,7 +793,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
770 793
771 ipoib_drain_cq(dev); 794 ipoib_drain_cq(dev);
772 795
773 msleep(1); 796 usleep_range(1000, 2000);
774 } 797 }
775 798
776 ipoib_dbg(priv, "All sends and receives done.\n"); 799 ipoib_dbg(priv, "All sends and receives done.\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6e86eeee370e..6c77df34869d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -233,6 +233,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
233static int ipoib_change_mtu(struct net_device *dev, int new_mtu) 233static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
234{ 234{
235 struct ipoib_dev_priv *priv = ipoib_priv(dev); 235 struct ipoib_dev_priv *priv = ipoib_priv(dev);
236 int ret = 0;
236 237
237 /* dev->mtu > 2K ==> connected mode */ 238 /* dev->mtu > 2K ==> connected mode */
238 if (ipoib_cm_admin_enabled(dev)) { 239 if (ipoib_cm_admin_enabled(dev)) {
@@ -256,9 +257,34 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
256 ipoib_dbg(priv, "MTU must be smaller than the underlying " 257 ipoib_dbg(priv, "MTU must be smaller than the underlying "
257 "link layer MTU - 4 (%u)\n", priv->mcast_mtu); 258 "link layer MTU - 4 (%u)\n", priv->mcast_mtu);
258 259
259 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); 260 new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
260 261
261 return 0; 262 if (priv->rn_ops->ndo_change_mtu) {
263 bool carrier_status = netif_carrier_ok(dev);
264
265 netif_carrier_off(dev);
266
267 /* notify lower level on the real mtu */
268 ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
269
270 if (carrier_status)
271 netif_carrier_on(dev);
272 } else {
273 dev->mtu = new_mtu;
274 }
275
276 return ret;
277}
278
279static void ipoib_get_stats(struct net_device *dev,
280 struct rtnl_link_stats64 *stats)
281{
282 struct ipoib_dev_priv *priv = ipoib_priv(dev);
283
284 if (priv->rn_ops->ndo_get_stats64)
285 priv->rn_ops->ndo_get_stats64(dev, stats);
286 else
287 netdev_stats_to_stats64(stats, &dev->stats);
262} 288}
263 289
264/* Called with an RCU read lock taken */ 290/* Called with an RCU read lock taken */
@@ -1534,6 +1560,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
1534 int i, wait_flushed = 0; 1560 int i, wait_flushed = 0;
1535 1561
1536 init_completion(&priv->ntbl.flushed); 1562 init_completion(&priv->ntbl.flushed);
1563 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1537 1564
1538 spin_lock_irqsave(&priv->lock, flags); 1565 spin_lock_irqsave(&priv->lock, flags);
1539 1566
@@ -1578,7 +1605,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
1578 1605
1579 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); 1606 ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
1580 init_completion(&priv->ntbl.deleted); 1607 init_completion(&priv->ntbl.deleted);
1581 set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
1582 1608
1583 /* Stop GC if called at init fail need to cancel work */ 1609 /* Stop GC if called at init fail need to cancel work */
1584 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); 1610 stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
@@ -1808,6 +1834,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
1808 .ndo_get_vf_stats = ipoib_get_vf_stats, 1834 .ndo_get_vf_stats = ipoib_get_vf_stats,
1809 .ndo_set_vf_guid = ipoib_set_vf_guid, 1835 .ndo_set_vf_guid = ipoib_set_vf_guid,
1810 .ndo_set_mac_address = ipoib_set_mac, 1836 .ndo_set_mac_address = ipoib_set_mac,
1837 .ndo_get_stats64 = ipoib_get_stats,
1811}; 1838};
1812 1839
1813static const struct net_device_ops ipoib_netdev_ops_vf = { 1840static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -1820,6 +1847,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
1820 .ndo_tx_timeout = ipoib_timeout, 1847 .ndo_tx_timeout = ipoib_timeout,
1821 .ndo_set_rx_mode = ipoib_set_mcast_list, 1848 .ndo_set_rx_mode = ipoib_set_mcast_list,
1822 .ndo_get_iflink = ipoib_get_iflink, 1849 .ndo_get_iflink = ipoib_get_iflink,
1850 .ndo_get_stats64 = ipoib_get_stats,
1823}; 1851};
1824 1852
1825void ipoib_setup_common(struct net_device *dev) 1853void ipoib_setup_common(struct net_device *dev)
@@ -1850,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev)
1850 priv->dev = dev; 1878 priv->dev = dev;
1851 spin_lock_init(&priv->lock); 1879 spin_lock_init(&priv->lock);
1852 init_rwsem(&priv->vlan_rwsem); 1880 init_rwsem(&priv->vlan_rwsem);
1881 mutex_init(&priv->mcast_mutex);
1853 1882
1854 INIT_LIST_HEAD(&priv->path_list); 1883 INIT_LIST_HEAD(&priv->path_list);
1855 INIT_LIST_HEAD(&priv->child_intfs); 1884 INIT_LIST_HEAD(&priv->child_intfs);
@@ -2146,14 +2175,14 @@ static struct net_device *ipoib_add_port(const char *format,
2146 priv->dev->dev_id = port - 1; 2175 priv->dev->dev_id = port - 1;
2147 2176
2148 result = ib_query_port(hca, port, &attr); 2177 result = ib_query_port(hca, port, &attr);
2149 if (!result) 2178 if (result) {
2150 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
2151 else {
2152 printk(KERN_WARNING "%s: ib_query_port %d failed\n", 2179 printk(KERN_WARNING "%s: ib_query_port %d failed\n",
2153 hca->name, port); 2180 hca->name, port);
2154 goto device_init_failed; 2181 goto device_init_failed;
2155 } 2182 }
2156 2183
2184 priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
2185
2157 /* MTU will be reset when mcast join happens */ 2186 /* MTU will be reset when mcast join happens */
2158 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 2187 priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
2159 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 2188 priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
@@ -2184,12 +2213,14 @@ static struct net_device *ipoib_add_port(const char *format,
2184 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", 2213 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
2185 hca->name, port, result); 2214 hca->name, port, result);
2186 goto device_init_failed; 2215 goto device_init_failed;
2187 } else 2216 }
2188 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid)); 2217
2218 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
2219 sizeof(union ib_gid));
2189 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 2220 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
2190 2221
2191 result = ipoib_dev_init(priv->dev, hca, port); 2222 result = ipoib_dev_init(priv->dev, hca, port);
2192 if (result < 0) { 2223 if (result) {
2193 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", 2224 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
2194 hca->name, port, result); 2225 hca->name, port, result);
2195 goto device_init_failed; 2226 goto device_init_failed;
@@ -2212,6 +2243,7 @@ static struct net_device *ipoib_add_port(const char *format,
2212 goto register_failed; 2243 goto register_failed;
2213 } 2244 }
2214 2245
2246 result = -ENOMEM;
2215 if (ipoib_cm_add_mode_attr(priv->dev)) 2247 if (ipoib_cm_add_mode_attr(priv->dev))
2216 goto sysfs_failed; 2248 goto sysfs_failed;
2217 if (ipoib_add_pkey_attr(priv->dev)) 2249 if (ipoib_add_pkey_attr(priv->dev))
@@ -2337,6 +2369,7 @@ static int __init ipoib_init_module(void)
2337 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); 2369 ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
2338#ifdef CONFIG_INFINIBAND_IPOIB_CM 2370#ifdef CONFIG_INFINIBAND_IPOIB_CM
2339 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); 2371 ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
2372 ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
2340#endif 2373#endif
2341 2374
2342 /* 2375 /*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 057f58e6afca..93e149efc1f5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
684int ipoib_mcast_stop_thread(struct net_device *dev) 684int ipoib_mcast_stop_thread(struct net_device *dev)
685{ 685{
686 struct ipoib_dev_priv *priv = ipoib_priv(dev); 686 struct ipoib_dev_priv *priv = ipoib_priv(dev);
687 unsigned long flags;
688 687
689 ipoib_dbg_mcast(priv, "stopping multicast thread\n"); 688 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
690 689
691 spin_lock_irqsave(&priv->lock, flags); 690 cancel_delayed_work_sync(&priv->mcast_task);
692 cancel_delayed_work(&priv->mcast_task);
693 spin_unlock_irqrestore(&priv->lock, flags);
694
695 flush_workqueue(priv->wq);
696 691
697 return 0; 692 return 0;
698} 693}
@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
748{ 743{
749 struct ipoib_mcast *mcast, *tmcast; 744 struct ipoib_mcast *mcast, *tmcast;
750 745
746 /*
747 * make sure the in-flight joins have finished before we attempt
748 * to leave
749 */
750 list_for_each_entry_safe(mcast, tmcast, remove_list, list)
751 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
752 wait_for_completion(&mcast->done);
753
751 list_for_each_entry_safe(mcast, tmcast, remove_list, list) { 754 list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
752 ipoib_mcast_leave(mcast->dev, mcast); 755 ipoib_mcast_leave(mcast->dev, mcast);
753 ipoib_mcast_free(mcast); 756 ipoib_mcast_free(mcast);
@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
838 struct ipoib_mcast *mcast, *tmcast; 841 struct ipoib_mcast *mcast, *tmcast;
839 unsigned long flags; 842 unsigned long flags;
840 843
844 mutex_lock(&priv->mcast_mutex);
841 ipoib_dbg_mcast(priv, "flushing multicast list\n"); 845 ipoib_dbg_mcast(priv, "flushing multicast list\n");
842 846
843 spin_lock_irqsave(&priv->lock, flags); 847 spin_lock_irqsave(&priv->lock, flags);
@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
856 860
857 spin_unlock_irqrestore(&priv->lock, flags); 861 spin_unlock_irqrestore(&priv->lock, flags);
858 862
859 /*
860 * make sure the in-flight joins have finished before we attempt
861 * to leave
862 */
863 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
864 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
865 wait_for_completion(&mcast->done);
866
867 ipoib_mcast_remove_list(&remove_list); 863 ipoib_mcast_remove_list(&remove_list);
864 mutex_unlock(&priv->mcast_mutex);
868} 865}
869 866
870static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) 867static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
982 netif_addr_unlock(dev); 979 netif_addr_unlock(dev);
983 local_irq_restore(flags); 980 local_irq_restore(flags);
984 981
985 /*
986 * make sure the in-flight joins have finished before we attempt
987 * to leave
988 */
989 list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
990 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
991 wait_for_completion(&mcast->done);
992
993 ipoib_mcast_remove_list(&remove_list); 982 ipoib_mcast_remove_list(&remove_list);
994 983
995 /* 984 /*
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5a887efb4bdf..37b33d708c2d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -83,6 +83,7 @@ static struct scsi_host_template iscsi_iser_sht;
83static struct iscsi_transport iscsi_iser_transport; 83static struct iscsi_transport iscsi_iser_transport;
84static struct scsi_transport_template *iscsi_iser_scsi_transport; 84static struct scsi_transport_template *iscsi_iser_scsi_transport;
85static struct workqueue_struct *release_wq; 85static struct workqueue_struct *release_wq;
86static DEFINE_MUTEX(unbind_iser_conn_mutex);
86struct iser_global ig; 87struct iser_global ig;
87 88
88int iser_debug_level = 0; 89int iser_debug_level = 0;
@@ -550,12 +551,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
550 */ 551 */
551 if (iser_conn) { 552 if (iser_conn) {
552 mutex_lock(&iser_conn->state_mutex); 553 mutex_lock(&iser_conn->state_mutex);
554 mutex_lock(&unbind_iser_conn_mutex);
553 iser_conn_terminate(iser_conn); 555 iser_conn_terminate(iser_conn);
554 iscsi_conn_stop(cls_conn, flag); 556 iscsi_conn_stop(cls_conn, flag);
555 557
556 /* unbind */ 558 /* unbind */
557 iser_conn->iscsi_conn = NULL; 559 iser_conn->iscsi_conn = NULL;
558 conn->dd_data = NULL; 560 conn->dd_data = NULL;
561 mutex_unlock(&unbind_iser_conn_mutex);
559 562
560 complete(&iser_conn->stop_completion); 563 complete(&iser_conn->stop_completion);
561 mutex_unlock(&iser_conn->state_mutex); 564 mutex_unlock(&iser_conn->state_mutex);
@@ -977,13 +980,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
977 struct iser_conn *iser_conn; 980 struct iser_conn *iser_conn;
978 struct ib_device *ib_dev; 981 struct ib_device *ib_dev;
979 982
983 mutex_lock(&unbind_iser_conn_mutex);
984
980 session = starget_to_session(scsi_target(sdev))->dd_data; 985 session = starget_to_session(scsi_target(sdev))->dd_data;
981 iser_conn = session->leadconn->dd_data; 986 iser_conn = session->leadconn->dd_data;
987 if (!iser_conn) {
988 mutex_unlock(&unbind_iser_conn_mutex);
989 return -ENOTCONN;
990 }
982 ib_dev = iser_conn->ib_conn.device->ib_device; 991 ib_dev = iser_conn->ib_conn.device->ib_device;
983 992
984 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) 993 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
985 blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); 994 blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
986 995
996 mutex_unlock(&unbind_iser_conn_mutex);
997
987 return 0; 998 return 0;
988} 999}
989 1000
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 12ed62ce9ff7..2a07692007bd 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task,
137 137
138 if (unsol_sz < edtl) { 138 if (unsol_sz < edtl) {
139 hdr->flags |= ISER_WSV; 139 hdr->flags |= ISER_WSV;
140 hdr->write_stag = cpu_to_be32(mem_reg->rkey); 140 if (buf_out->data_len > imm_sz) {
141 hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); 141 hdr->write_stag = cpu_to_be32(mem_reg->rkey);
142 hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
143 }
142 144
143 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " 145 iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X "
144 "VA:%#llX + unsol:%d\n", 146 "VA:%#llX + unsol:%d\n",
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index c538a38c91ce..26a004e97ae0 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -708,8 +708,14 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
708 unsigned short sg_tablesize, sup_sg_tablesize; 708 unsigned short sg_tablesize, sup_sg_tablesize;
709 709
710 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); 710 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
711 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, 711 if (device->ib_device->attrs.device_cap_flags &
712 device->ib_device->attrs.max_fast_reg_page_list_len); 712 IB_DEVICE_MEM_MGT_EXTENSIONS)
713 sup_sg_tablesize =
714 min_t(
715 uint, ISCSI_ISER_MAX_SG_TABLESIZE,
716 device->ib_device->attrs.max_fast_reg_page_list_len);
717 else
718 sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE;
713 719
714 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); 720 iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
715} 721}
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index f600f3a7a3c6..23520df7650f 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -331,7 +331,7 @@ static int soc_button_probe(struct platform_device *pdev)
331 error = gpiod_count(dev, NULL); 331 error = gpiod_count(dev, NULL);
332 if (error < 0) { 332 if (error < 0) {
333 dev_dbg(dev, "no GPIO attached, ignoring...\n"); 333 dev_dbg(dev, "no GPIO attached, ignoring...\n");
334 return error; 334 return -ENODEV;
335 } 335 }
336 336
337 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 337 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 262d1057c1da..850b00e3ad8e 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1215,14 +1215,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1215 1215
1216 case SS4_PACKET_ID_TWO: 1216 case SS4_PACKET_ID_TWO:
1217 if (priv->flags & ALPS_BUTTONPAD) { 1217 if (priv->flags & ALPS_BUTTONPAD) {
1218 f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); 1218 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1219 f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
1220 f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
1221 } else {
1222 f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
1223 f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
1224 }
1219 f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); 1225 f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
1220 f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
1221 f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); 1226 f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
1222 } else { 1227 } else {
1223 f->mt[0].x = SS4_STD_MF_X_V2(p, 0); 1228 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1229 f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
1230 f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
1231 } else {
1232 f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
1233 f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1234 }
1224 f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); 1235 f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
1225 f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1226 f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); 1236 f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
1227 } 1237 }
1228 f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; 1238 f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
@@ -1239,16 +1249,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
1239 1249
1240 case SS4_PACKET_ID_MULTI: 1250 case SS4_PACKET_ID_MULTI:
1241 if (priv->flags & ALPS_BUTTONPAD) { 1251 if (priv->flags & ALPS_BUTTONPAD) {
1242 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); 1252 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1253 f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
1254 f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
1255 } else {
1256 f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
1257 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
1258 }
1259
1243 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); 1260 f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
1244 f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
1245 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); 1261 f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
1246 no_data_x = SS4_MFPACKET_NO_AX_BL; 1262 no_data_x = SS4_MFPACKET_NO_AX_BL;
1247 no_data_y = SS4_MFPACKET_NO_AY_BL; 1263 no_data_y = SS4_MFPACKET_NO_AY_BL;
1248 } else { 1264 } else {
1249 f->mt[2].x = SS4_STD_MF_X_V2(p, 0); 1265 if (IS_SS4PLUS_DEV(priv->dev_id)) {
1266 f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
1267 f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
1268 } else {
1269 f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
1270 f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
1271 }
1250 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); 1272 f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
1251 f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
1252 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); 1273 f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
1253 no_data_x = SS4_MFPACKET_NO_AX; 1274 no_data_x = SS4_MFPACKET_NO_AX;
1254 no_data_y = SS4_MFPACKET_NO_AY; 1275 no_data_y = SS4_MFPACKET_NO_AY;
@@ -2541,8 +2562,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
2541 2562
2542 memset(otp, 0, sizeof(otp)); 2563 memset(otp, 0, sizeof(otp));
2543 2564
2544 if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || 2565 if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
2545 alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) 2566 alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
2546 return -1; 2567 return -1;
2547 2568
2548 alps_update_device_area_ss4_v2(otp, priv); 2569 alps_update_device_area_ss4_v2(otp, priv);
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index ed2d6879fa52..c80a7c76cb76 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -100,6 +100,10 @@ enum SS4_PACKET_ID {
100 ((_b[1 + _i * 3] << 5) & 0x1F00) \ 100 ((_b[1 + _i * 3] << 5) & 0x1F00) \
101 ) 101 )
102 102
103#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
104 ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
105 )
106
103#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \ 107#define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
104 ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \ 108 ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
105 ((_b[2 + (_i) * 3] << 4) & 0x0E00) \ 109 ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
@@ -109,6 +113,10 @@ enum SS4_PACKET_ID {
109 ((_b[0 + (_i) * 3] >> 3) & 0x0010) \ 113 ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
110 ) 114 )
111 115
116#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
117 ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
118 )
119
112#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \ 120#define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
113 ((_b[0 + (_i) * 3] >> 3) & 0x0008) \ 121 ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
114 ) 122 )
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 3b616cb7c67f..cfbc8ba4c96c 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1247,7 +1247,12 @@ static const struct acpi_device_id elan_acpi_id[] = {
1247 { "ELAN0000", 0 }, 1247 { "ELAN0000", 0 },
1248 { "ELAN0100", 0 }, 1248 { "ELAN0100", 0 },
1249 { "ELAN0600", 0 }, 1249 { "ELAN0600", 0 },
1250 { "ELAN0602", 0 },
1250 { "ELAN0605", 0 }, 1251 { "ELAN0605", 0 },
1252 { "ELAN0608", 0 },
1253 { "ELAN0605", 0 },
1254 { "ELAN0609", 0 },
1255 { "ELAN060B", 0 },
1251 { "ELAN1000", 0 }, 1256 { "ELAN1000", 0 },
1252 { } 1257 { }
1253}; 1258};
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 922ea02edcc3..0871010f18d5 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
265 if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID))) 265 if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
266 return -1; 266 return -1;
267 267
268 if (param[0] != TP_MAGIC_IDENT) 268 /* add new TP ID. */
269 if (!(param[0] & TP_MAGIC_IDENT))
269 return -1; 270 return -1;
270 271
271 if (firmware_id) 272 if (firmware_id)
@@ -380,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
380 return 0; 381 return 0;
381 382
382 if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) { 383 if (trackpoint_read(ps2dev, TP_EXT_BTN, &button_info)) {
383 psmouse_warn(psmouse, "failed to get extended button data\n"); 384 psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
384 button_info = 0; 385 button_info = 0x33;
385 } 386 }
386 387
387 psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL); 388 psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 5617ed3a7d7a..88055755f82e 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,8 +21,9 @@
21#define TP_COMMAND 0xE2 /* Commands start with this */ 21#define TP_COMMAND 0xE2 /* Commands start with this */
22 22
23#define TP_READ_ID 0xE1 /* Sent for device identification */ 23#define TP_READ_ID 0xE1 /* Sent for device identification */
24#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */ 24#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
25 /* by the firmware ID */ 25 /* by the firmware ID */
26 /* Firmware ID includes 0x1, 0x2, 0x3 */
26 27
27 28
28/* 29/*
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 688e77576e5a..354cbd6392cd 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4452,6 +4452,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
4452 /* Setting */ 4452 /* Setting */
4453 irte->hi.fields.ga_root_ptr = (pi_data->base >> 12); 4453 irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
4454 irte->hi.fields.vector = vcpu_pi_info->vector; 4454 irte->hi.fields.vector = vcpu_pi_info->vector;
4455 irte->lo.fields_vapic.ga_log_intr = 1;
4455 irte->lo.fields_vapic.guest_mode = 1; 4456 irte->lo.fields_vapic.guest_mode = 1;
4456 irte->lo.fields_vapic.ga_tag = pi_data->ga_tag; 4457 irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
4457 4458
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 5cc597b383c7..372303700566 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2440,11 +2440,11 @@ static int __init state_next(void)
2440 break; 2440 break;
2441 case IOMMU_ACPI_FINISHED: 2441 case IOMMU_ACPI_FINISHED:
2442 early_enable_iommus(); 2442 early_enable_iommus();
2443 register_syscore_ops(&amd_iommu_syscore_ops);
2444 x86_platform.iommu_shutdown = disable_iommus; 2443 x86_platform.iommu_shutdown = disable_iommus;
2445 init_state = IOMMU_ENABLED; 2444 init_state = IOMMU_ENABLED;
2446 break; 2445 break;
2447 case IOMMU_ENABLED: 2446 case IOMMU_ENABLED:
2447 register_syscore_ops(&amd_iommu_syscore_ops);
2448 ret = amd_iommu_init_pci(); 2448 ret = amd_iommu_init_pci();
2449 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 2449 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2450 enable_iommus_v2(); 2450 enable_iommus_v2();
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 294a409e283b..d6b873b57054 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -574,7 +574,9 @@ struct amd_iommu {
574 574
575static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) 575static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
576{ 576{
577 return container_of(dev, struct amd_iommu, iommu.dev); 577 struct iommu_device *iommu = dev_to_iommu_device(dev);
578
579 return container_of(iommu, struct amd_iommu, iommu);
578} 580}
579 581
580#define ACPIHID_UID_LEN 256 582#define ACPIHID_UID_LEN 256
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index bc89b4d6c043..2d80fa8a0634 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -400,6 +400,8 @@ struct arm_smmu_device {
400 400
401 u32 cavium_id_base; /* Specific to Cavium */ 401 u32 cavium_id_base; /* Specific to Cavium */
402 402
403 spinlock_t global_sync_lock;
404
403 /* IOMMU core code handle */ 405 /* IOMMU core code handle */
404 struct iommu_device iommu; 406 struct iommu_device iommu;
405}; 407};
@@ -436,7 +438,7 @@ struct arm_smmu_domain {
436 struct arm_smmu_cfg cfg; 438 struct arm_smmu_cfg cfg;
437 enum arm_smmu_domain_stage stage; 439 enum arm_smmu_domain_stage stage;
438 struct mutex init_mutex; /* Protects smmu pointer */ 440 struct mutex init_mutex; /* Protects smmu pointer */
439 spinlock_t cb_lock; /* Serialises ATS1* ops */ 441 spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
440 struct iommu_domain domain; 442 struct iommu_domain domain;
441}; 443};
442 444
@@ -602,9 +604,12 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
602static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu) 604static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
603{ 605{
604 void __iomem *base = ARM_SMMU_GR0(smmu); 606 void __iomem *base = ARM_SMMU_GR0(smmu);
607 unsigned long flags;
605 608
609 spin_lock_irqsave(&smmu->global_sync_lock, flags);
606 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC, 610 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
607 base + ARM_SMMU_GR0_sTLBGSTATUS); 611 base + ARM_SMMU_GR0_sTLBGSTATUS);
612 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
608} 613}
609 614
610static void arm_smmu_tlb_sync_context(void *cookie) 615static void arm_smmu_tlb_sync_context(void *cookie)
@@ -612,9 +617,12 @@ static void arm_smmu_tlb_sync_context(void *cookie)
612 struct arm_smmu_domain *smmu_domain = cookie; 617 struct arm_smmu_domain *smmu_domain = cookie;
613 struct arm_smmu_device *smmu = smmu_domain->smmu; 618 struct arm_smmu_device *smmu = smmu_domain->smmu;
614 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx); 619 void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
620 unsigned long flags;
615 621
622 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
616 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC, 623 __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
617 base + ARM_SMMU_CB_TLBSTATUS); 624 base + ARM_SMMU_CB_TLBSTATUS);
625 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
618} 626}
619 627
620static void arm_smmu_tlb_sync_vmid(void *cookie) 628static void arm_smmu_tlb_sync_vmid(void *cookie)
@@ -1511,6 +1519,12 @@ static int arm_smmu_add_device(struct device *dev)
1511 1519
1512 if (using_legacy_binding) { 1520 if (using_legacy_binding) {
1513 ret = arm_smmu_register_legacy_master(dev, &smmu); 1521 ret = arm_smmu_register_legacy_master(dev, &smmu);
1522
1523 /*
1524 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1525 * will allocate/initialise a new one. Thus we need to update fwspec for
1526 * later use.
1527 */
1514 fwspec = dev->iommu_fwspec; 1528 fwspec = dev->iommu_fwspec;
1515 if (ret) 1529 if (ret)
1516 goto out_free; 1530 goto out_free;
@@ -1550,15 +1564,15 @@ static int arm_smmu_add_device(struct device *dev)
1550 1564
1551 ret = arm_smmu_master_alloc_smes(dev); 1565 ret = arm_smmu_master_alloc_smes(dev);
1552 if (ret) 1566 if (ret)
1553 goto out_free; 1567 goto out_cfg_free;
1554 1568
1555 iommu_device_link(&smmu->iommu, dev); 1569 iommu_device_link(&smmu->iommu, dev);
1556 1570
1557 return 0; 1571 return 0;
1558 1572
1573out_cfg_free:
1574 kfree(cfg);
1559out_free: 1575out_free:
1560 if (fwspec)
1561 kfree(fwspec->iommu_priv);
1562 iommu_fwspec_free(dev); 1576 iommu_fwspec_free(dev);
1563 return ret; 1577 return ret;
1564} 1578}
@@ -1925,6 +1939,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1925 1939
1926 smmu->num_mapping_groups = size; 1940 smmu->num_mapping_groups = size;
1927 mutex_init(&smmu->stream_map_mutex); 1941 mutex_init(&smmu->stream_map_mutex);
1942 spin_lock_init(&smmu->global_sync_lock);
1928 1943
1929 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) { 1944 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1930 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; 1945 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 687f18f65cea..3e8636f1220e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4736,7 +4736,9 @@ static void intel_disable_iommus(void)
4736 4736
4737static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) 4737static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev)
4738{ 4738{
4739 return container_of(dev, struct intel_iommu, iommu.dev); 4739 struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
4740
4741 return container_of(iommu_dev, struct intel_iommu, iommu);
4740} 4742}
4741 4743
4742static ssize_t intel_iommu_show_version(struct device *dev, 4744static ssize_t intel_iommu_show_version(struct device *dev,
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index af330f513653..d665d0dc16e8 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -479,6 +479,9 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
479 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) 479 if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
480 return 0; 480 return 0;
481 481
482 if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr)))
483 return -ERANGE;
484
482 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); 485 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
483 /* 486 /*
484 * Synchronise all PTE updates for the new mapping before there's 487 * Synchronise all PTE updates for the new mapping before there's
@@ -659,6 +662,9 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
659 struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); 662 struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
660 size_t unmapped; 663 size_t unmapped;
661 664
665 if (WARN_ON(upper_32_bits(iova)))
666 return 0;
667
662 unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); 668 unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
663 if (unmapped) 669 if (unmapped)
664 io_pgtable_tlb_sync(&data->iop); 670 io_pgtable_tlb_sync(&data->iop);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index b182039862c5..e8018a308868 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -452,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
452 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 452 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
453 return 0; 453 return 0;
454 454
455 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
456 paddr >= (1ULL << data->iop.cfg.oas)))
457 return -ERANGE;
458
455 prot = arm_lpae_prot_to_pte(data, iommu_prot); 459 prot = arm_lpae_prot_to_pte(data, iommu_prot);
456 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); 460 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
457 /* 461 /*
@@ -610,6 +614,9 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
610 arm_lpae_iopte *ptep = data->pgd; 614 arm_lpae_iopte *ptep = data->pgd;
611 int lvl = ARM_LPAE_START_LVL(data); 615 int lvl = ARM_LPAE_START_LVL(data);
612 616
617 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
618 return 0;
619
613 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); 620 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
614 if (unmapped) 621 if (unmapped)
615 io_pgtable_tlb_sync(&data->iop); 622 io_pgtable_tlb_sync(&data->iop);
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 524263a7ae6f..a3e667077b14 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -158,14 +158,12 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops);
158 * @fmt: The page table format. 158 * @fmt: The page table format.
159 * @cookie: An opaque token provided by the IOMMU driver and passed back to 159 * @cookie: An opaque token provided by the IOMMU driver and passed back to
160 * any callback routines. 160 * any callback routines.
161 * @tlb_sync_pending: Private flag for optimising out redundant syncs.
162 * @cfg: A copy of the page table configuration. 161 * @cfg: A copy of the page table configuration.
163 * @ops: The page table operations in use for this set of page tables. 162 * @ops: The page table operations in use for this set of page tables.
164 */ 163 */
165struct io_pgtable { 164struct io_pgtable {
166 enum io_pgtable_fmt fmt; 165 enum io_pgtable_fmt fmt;
167 void *cookie; 166 void *cookie;
168 bool tlb_sync_pending;
169 struct io_pgtable_cfg cfg; 167 struct io_pgtable_cfg cfg;
170 struct io_pgtable_ops ops; 168 struct io_pgtable_ops ops;
171}; 169};
@@ -175,22 +173,17 @@ struct io_pgtable {
175static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) 173static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
176{ 174{
177 iop->cfg.tlb->tlb_flush_all(iop->cookie); 175 iop->cfg.tlb->tlb_flush_all(iop->cookie);
178 iop->tlb_sync_pending = true;
179} 176}
180 177
181static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, 178static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
182 unsigned long iova, size_t size, size_t granule, bool leaf) 179 unsigned long iova, size_t size, size_t granule, bool leaf)
183{ 180{
184 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); 181 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
185 iop->tlb_sync_pending = true;
186} 182}
187 183
188static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) 184static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
189{ 185{
190 if (iop->tlb_sync_pending) { 186 iop->cfg.tlb->tlb_sync(iop->cookie);
191 iop->cfg.tlb->tlb_sync(iop->cookie);
192 iop->tlb_sync_pending = false;
193 }
194} 187}
195 188
196/** 189/**
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index c58351ed61c1..36d1a7ce7fc4 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -62,32 +62,40 @@ int iommu_device_sysfs_add(struct iommu_device *iommu,
62 va_list vargs; 62 va_list vargs;
63 int ret; 63 int ret;
64 64
65 device_initialize(&iommu->dev); 65 iommu->dev = kzalloc(sizeof(*iommu->dev), GFP_KERNEL);
66 if (!iommu->dev)
67 return -ENOMEM;
66 68
67 iommu->dev.class = &iommu_class; 69 device_initialize(iommu->dev);
68 iommu->dev.parent = parent; 70
69 iommu->dev.groups = groups; 71 iommu->dev->class = &iommu_class;
72 iommu->dev->parent = parent;
73 iommu->dev->groups = groups;
70 74
71 va_start(vargs, fmt); 75 va_start(vargs, fmt);
72 ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); 76 ret = kobject_set_name_vargs(&iommu->dev->kobj, fmt, vargs);
73 va_end(vargs); 77 va_end(vargs);
74 if (ret) 78 if (ret)
75 goto error; 79 goto error;
76 80
77 ret = device_add(&iommu->dev); 81 ret = device_add(iommu->dev);
78 if (ret) 82 if (ret)
79 goto error; 83 goto error;
80 84
85 dev_set_drvdata(iommu->dev, iommu);
86
81 return 0; 87 return 0;
82 88
83error: 89error:
84 put_device(&iommu->dev); 90 put_device(iommu->dev);
85 return ret; 91 return ret;
86} 92}
87 93
88void iommu_device_sysfs_remove(struct iommu_device *iommu) 94void iommu_device_sysfs_remove(struct iommu_device *iommu)
89{ 95{
90 device_unregister(&iommu->dev); 96 dev_set_drvdata(iommu->dev, NULL);
97 device_unregister(iommu->dev);
98 iommu->dev = NULL;
91} 99}
92/* 100/*
93 * IOMMU drivers can indicate a device is managed by a given IOMMU using 101 * IOMMU drivers can indicate a device is managed by a given IOMMU using
@@ -102,14 +110,14 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
102 if (!iommu || IS_ERR(iommu)) 110 if (!iommu || IS_ERR(iommu))
103 return -ENODEV; 111 return -ENODEV;
104 112
105 ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", 113 ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
106 &link->kobj, dev_name(link)); 114 &link->kobj, dev_name(link));
107 if (ret) 115 if (ret)
108 return ret; 116 return ret;
109 117
110 ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); 118 ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev->kobj, "iommu");
111 if (ret) 119 if (ret)
112 sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", 120 sysfs_remove_link_from_group(&iommu->dev->kobj, "devices",
113 dev_name(link)); 121 dev_name(link));
114 122
115 return ret; 123 return ret;
@@ -121,5 +129,5 @@ void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
121 return; 129 return;
122 130
123 sysfs_remove_link(&link->kobj, "iommu"); 131 sysfs_remove_link(&link->kobj, "iommu");
124 sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); 132 sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
125} 133}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 5d14cd15198d..91c6d367ab35 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -129,6 +129,7 @@ static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
129 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); 129 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
130 writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A); 130 writel_relaxed(iova + size - 1, data->base + REG_MMU_INVLD_END_A);
131 writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE); 131 writel_relaxed(F_MMU_INV_RANGE, data->base + REG_MMU_INVALIDATE);
132 data->tlb_flush_active = true;
132} 133}
133 134
134static void mtk_iommu_tlb_sync(void *cookie) 135static void mtk_iommu_tlb_sync(void *cookie)
@@ -137,6 +138,10 @@ static void mtk_iommu_tlb_sync(void *cookie)
137 int ret; 138 int ret;
138 u32 tmp; 139 u32 tmp;
139 140
141 /* Avoid timing out if there's nothing to wait for */
142 if (!data->tlb_flush_active)
143 return;
144
140 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp, 145 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, tmp,
141 tmp != 0, 10, 100000); 146 tmp != 0, 10, 100000);
142 if (ret) { 147 if (ret) {
@@ -146,6 +151,7 @@ static void mtk_iommu_tlb_sync(void *cookie)
146 } 151 }
147 /* Clear the CPE status */ 152 /* Clear the CPE status */
148 writel_relaxed(0, data->base + REG_MMU_CPE_DONE); 153 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
154 data->tlb_flush_active = false;
149} 155}
150 156
151static const struct iommu_gather_ops mtk_iommu_gather_ops = { 157static const struct iommu_gather_ops mtk_iommu_gather_ops = {
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 2a28eadeea0e..c06cc91b5d9a 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -47,6 +47,7 @@ struct mtk_iommu_data {
47 struct iommu_group *m4u_group; 47 struct iommu_group *m4u_group;
48 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ 48 struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
49 bool enable_4GB; 49 bool enable_4GB;
50 bool tlb_flush_active;
50 51
51 struct iommu_device iommu; 52 struct iommu_device iommu;
52}; 53};
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 28b26c80f4cf..072bd227b6c6 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -137,14 +137,14 @@ static void __init aic_common_ext_irq_of_init(struct irq_domain *domain)
137#define AT91_RTC_IMR 0x28 137#define AT91_RTC_IMR 0x28
138#define AT91_RTC_IRQ_MASK 0x1f 138#define AT91_RTC_IRQ_MASK 0x1f
139 139
140void __init aic_common_rtc_irq_fixup(struct device_node *root) 140void __init aic_common_rtc_irq_fixup(void)
141{ 141{
142 struct device_node *np; 142 struct device_node *np;
143 void __iomem *regs; 143 void __iomem *regs;
144 144
145 np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc"); 145 np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
146 if (!np) 146 if (!np)
147 np = of_find_compatible_node(root, NULL, 147 np = of_find_compatible_node(NULL, NULL,
148 "atmel,at91sam9x5-rtc"); 148 "atmel,at91sam9x5-rtc");
149 149
150 if (!np) 150 if (!np)
@@ -165,7 +165,7 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
165#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */ 165#define AT91_RTT_ALMIEN (1 << 16) /* Alarm Interrupt Enable */
166#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */ 166#define AT91_RTT_RTTINCIEN (1 << 17) /* Real Time Timer Increment Interrupt Enable */
167 167
168void __init aic_common_rtt_irq_fixup(struct device_node *root) 168void __init aic_common_rtt_irq_fixup(void)
169{ 169{
170 struct device_node *np; 170 struct device_node *np;
171 void __iomem *regs; 171 void __iomem *regs;
@@ -196,11 +196,10 @@ static void __init aic_common_irq_fixup(const struct of_device_id *matches)
196 return; 196 return;
197 197
198 match = of_match_node(matches, root); 198 match = of_match_node(matches, root);
199 of_node_put(root);
200 199
201 if (match) { 200 if (match) {
202 void (*fixup)(struct device_node *) = match->data; 201 void (*fixup)(void) = match->data;
203 fixup(root); 202 fixup();
204 } 203 }
205 204
206 of_node_put(root); 205 of_node_put(root);
diff --git a/drivers/irqchip/irq-atmel-aic-common.h b/drivers/irqchip/irq-atmel-aic-common.h
index af60376d50de..242e62c1851e 100644
--- a/drivers/irqchip/irq-atmel-aic-common.h
+++ b/drivers/irqchip/irq-atmel-aic-common.h
@@ -33,8 +33,8 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
33 const char *name, int nirqs, 33 const char *name, int nirqs,
34 const struct of_device_id *matches); 34 const struct of_device_id *matches);
35 35
36void __init aic_common_rtc_irq_fixup(struct device_node *root); 36void __init aic_common_rtc_irq_fixup(void);
37 37
38void __init aic_common_rtt_irq_fixup(struct device_node *root); 38void __init aic_common_rtt_irq_fixup(void);
39 39
40#endif /* __IRQ_ATMEL_AIC_COMMON_H */ 40#endif /* __IRQ_ATMEL_AIC_COMMON_H */
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 37f952dd9fc9..bb1ad451392f 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -209,20 +209,20 @@ static const struct irq_domain_ops aic_irq_ops = {
209 .xlate = aic_irq_domain_xlate, 209 .xlate = aic_irq_domain_xlate,
210}; 210};
211 211
212static void __init at91rm9200_aic_irq_fixup(struct device_node *root) 212static void __init at91rm9200_aic_irq_fixup(void)
213{ 213{
214 aic_common_rtc_irq_fixup(root); 214 aic_common_rtc_irq_fixup();
215} 215}
216 216
217static void __init at91sam9260_aic_irq_fixup(struct device_node *root) 217static void __init at91sam9260_aic_irq_fixup(void)
218{ 218{
219 aic_common_rtt_irq_fixup(root); 219 aic_common_rtt_irq_fixup();
220} 220}
221 221
222static void __init at91sam9g45_aic_irq_fixup(struct device_node *root) 222static void __init at91sam9g45_aic_irq_fixup(void)
223{ 223{
224 aic_common_rtc_irq_fixup(root); 224 aic_common_rtc_irq_fixup();
225 aic_common_rtt_irq_fixup(root); 225 aic_common_rtt_irq_fixup();
226} 226}
227 227
228static const struct of_device_id aic_irq_fixups[] __initconst = { 228static const struct of_device_id aic_irq_fixups[] __initconst = {
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index c04ee9a23d09..6acad2ea0fb3 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -305,9 +305,9 @@ static const struct irq_domain_ops aic5_irq_ops = {
305 .xlate = aic5_irq_domain_xlate, 305 .xlate = aic5_irq_domain_xlate,
306}; 306};
307 307
308static void __init sama5d3_aic_irq_fixup(struct device_node *root) 308static void __init sama5d3_aic_irq_fixup(void)
309{ 309{
310 aic_common_rtc_irq_fixup(root); 310 aic_common_rtc_irq_fixup();
311} 311}
312 312
313static const struct of_device_id aic5_irq_fixups[] __initconst = { 313static const struct of_device_id aic5_irq_fixups[] __initconst = {
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index bddf169c4b37..b009b916a292 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -189,6 +189,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
189 189
190 ct->chip.irq_suspend = brcmstb_l2_intc_suspend; 190 ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
191 ct->chip.irq_resume = brcmstb_l2_intc_resume; 191 ct->chip.irq_resume = brcmstb_l2_intc_resume;
192 ct->chip.irq_pm_shutdown = brcmstb_l2_intc_suspend;
192 193
193 if (data->can_wake) { 194 if (data->can_wake) {
194 /* This IRQ chip can wake the system, set all child interrupts 195 /* This IRQ chip can wake the system, set all child interrupts
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index dad85e74c37c..3aae015469a5 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -71,7 +71,7 @@ static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base,
71static int __init digicolor_of_init(struct device_node *node, 71static int __init digicolor_of_init(struct device_node *node,
72 struct device_node *parent) 72 struct device_node *parent)
73{ 73{
74 static void __iomem *reg_base; 74 void __iomem *reg_base;
75 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 75 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
76 struct regmap *ucregs; 76 struct regmap *ucregs;
77 int ret; 77 int ret;
diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c
index 54c296401525..18d58d2b4ffe 100644
--- a/drivers/irqchip/irq-gic-realview.c
+++ b/drivers/irqchip/irq-gic-realview.c
@@ -43,7 +43,7 @@ static const struct of_device_id syscon_pldset_of_match[] = {
43static int __init 43static int __init
44realview_gic_of_init(struct device_node *node, struct device_node *parent) 44realview_gic_of_init(struct device_node *node, struct device_node *parent)
45{ 45{
46 static struct regmap *map; 46 struct regmap *map;
47 struct device_node *np; 47 struct device_node *np;
48 const struct of_device_id *gic_id; 48 const struct of_device_id *gic_id;
49 u32 pld1_ctrl; 49 u32 pld1_ctrl;
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 249240d9a425..833a90fe33ae 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -43,6 +43,7 @@ static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
43 *dev_id = args.args[0]; 43 *dev_id = args.args[0];
44 break; 44 break;
45 } 45 }
46 index++;
46 } while (!ret); 47 } while (!ret);
47 48
48 return ret; 49 return ret;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 68932873eebc..284738add89b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1835,7 +1835,7 @@ static int __init its_of_probe(struct device_node *node)
1835 1835
1836#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) 1836#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
1837 1837
1838#if defined(CONFIG_ACPI_NUMA) && (ACPI_CA_VERSION >= 0x20170531) 1838#ifdef CONFIG_ACPI_NUMA
1839struct its_srat_map { 1839struct its_srat_map {
1840 /* numa node id */ 1840 /* numa node id */
1841 u32 numa_node; 1841 u32 numa_node;
@@ -1843,7 +1843,7 @@ struct its_srat_map {
1843 u32 its_id; 1843 u32 its_id;
1844}; 1844};
1845 1845
1846static struct its_srat_map its_srat_maps[MAX_NUMNODES] __initdata; 1846static struct its_srat_map *its_srat_maps __initdata;
1847static int its_in_srat __initdata; 1847static int its_in_srat __initdata;
1848 1848
1849static int __init acpi_get_its_numa_node(u32 its_id) 1849static int __init acpi_get_its_numa_node(u32 its_id)
@@ -1857,6 +1857,12 @@ static int __init acpi_get_its_numa_node(u32 its_id)
1857 return NUMA_NO_NODE; 1857 return NUMA_NO_NODE;
1858} 1858}
1859 1859
1860static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
1861 const unsigned long end)
1862{
1863 return 0;
1864}
1865
1860static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header, 1866static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
1861 const unsigned long end) 1867 const unsigned long end)
1862{ 1868{
@@ -1873,12 +1879,6 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
1873 return -EINVAL; 1879 return -EINVAL;
1874 } 1880 }
1875 1881
1876 if (its_in_srat >= MAX_NUMNODES) {
1877 pr_err("SRAT: ITS affinity exceeding max count[%d]\n",
1878 MAX_NUMNODES);
1879 return -EINVAL;
1880 }
1881
1882 node = acpi_map_pxm_to_node(its_affinity->proximity_domain); 1882 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
1883 1883
1884 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { 1884 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
@@ -1897,14 +1897,37 @@ static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
1897 1897
1898static void __init acpi_table_parse_srat_its(void) 1898static void __init acpi_table_parse_srat_its(void)
1899{ 1899{
1900 int count;
1901
1902 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
1903 sizeof(struct acpi_table_srat),
1904 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
1905 gic_acpi_match_srat_its, 0);
1906 if (count <= 0)
1907 return;
1908
1909 its_srat_maps = kmalloc(count * sizeof(struct its_srat_map),
1910 GFP_KERNEL);
1911 if (!its_srat_maps) {
1912 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
1913 return;
1914 }
1915
1900 acpi_table_parse_entries(ACPI_SIG_SRAT, 1916 acpi_table_parse_entries(ACPI_SIG_SRAT,
1901 sizeof(struct acpi_table_srat), 1917 sizeof(struct acpi_table_srat),
1902 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, 1918 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
1903 gic_acpi_parse_srat_its, 0); 1919 gic_acpi_parse_srat_its, 0);
1904} 1920}
1921
1922/* free the its_srat_maps after ITS probing */
1923static void __init acpi_its_srat_maps_free(void)
1924{
1925 kfree(its_srat_maps);
1926}
1905#else 1927#else
1906static void __init acpi_table_parse_srat_its(void) { } 1928static void __init acpi_table_parse_srat_its(void) { }
1907static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } 1929static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
1930static void __init acpi_its_srat_maps_free(void) { }
1908#endif 1931#endif
1909 1932
1910static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header, 1933static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
@@ -1951,6 +1974,7 @@ static void __init its_acpi_probe(void)
1951 acpi_table_parse_srat_its(); 1974 acpi_table_parse_srat_its();
1952 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, 1975 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
1953 gic_acpi_parse_madt_its, 0); 1976 gic_acpi_parse_madt_its, 0);
1977 acpi_its_srat_maps_free();
1954} 1978}
1955#else 1979#else
1956static void __init its_acpi_probe(void) { } 1980static void __init its_acpi_probe(void) { }
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index dbffb7ab6203..984c3ecfd22c 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -353,6 +353,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
353 353
354 if (static_key_true(&supports_deactivate)) 354 if (static_key_true(&supports_deactivate))
355 gic_write_eoir(irqnr); 355 gic_write_eoir(irqnr);
356 else
357 isb();
356 358
357 err = handle_domain_irq(gic_data.domain, irqnr, regs); 359 err = handle_domain_irq(gic_data.domain, irqnr, regs);
358 if (err) { 360 if (err) {
@@ -640,11 +642,16 @@ static void gic_smp_init(void)
640static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 642static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
641 bool force) 643 bool force)
642{ 644{
643 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); 645 unsigned int cpu;
644 void __iomem *reg; 646 void __iomem *reg;
645 int enabled; 647 int enabled;
646 u64 val; 648 u64 val;
647 649
650 if (force)
651 cpu = cpumask_first(mask_val);
652 else
653 cpu = cpumask_any_and(mask_val, cpu_online_mask);
654
648 if (cpu >= nr_cpu_ids) 655 if (cpu >= nr_cpu_ids)
649 return -EINVAL; 656 return -EINVAL;
650 657
@@ -831,8 +838,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
831 if (ret) 838 if (ret)
832 return ret; 839 return ret;
833 840
834 for (i = 0; i < nr_irqs; i++) 841 for (i = 0; i < nr_irqs; i++) {
835 gic_irq_domain_map(domain, virq + i, hwirq + i); 842 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
843 if (ret)
844 return ret;
845 }
836 846
837 return 0; 847 return 0;
838} 848}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1b1df4f770bd..d3e7c43718b8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -361,6 +361,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
361 if (likely(irqnr > 15 && irqnr < 1020)) { 361 if (likely(irqnr > 15 && irqnr < 1020)) {
362 if (static_key_true(&supports_deactivate)) 362 if (static_key_true(&supports_deactivate))
363 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 363 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
364 isb();
364 handle_domain_irq(gic->domain, irqnr, regs); 365 handle_domain_irq(gic->domain, irqnr, regs);
365 continue; 366 continue;
366 } 367 }
@@ -401,10 +402,12 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
401 goto out; 402 goto out;
402 403
403 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); 404 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
404 if (unlikely(gic_irq < 32 || gic_irq > 1020)) 405 if (unlikely(gic_irq < 32 || gic_irq > 1020)) {
405 handle_bad_irq(desc); 406 handle_bad_irq(desc);
406 else 407 } else {
408 isb();
407 generic_handle_irq(cascade_irq); 409 generic_handle_irq(cascade_irq);
410 }
408 411
409 out: 412 out:
410 chained_irq_exit(chip, desc); 413 chained_irq_exit(chip, desc);
@@ -1027,8 +1030,11 @@ static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1027 if (ret) 1030 if (ret)
1028 return ret; 1031 return ret;
1029 1032
1030 for (i = 0; i < nr_irqs; i++) 1033 for (i = 0; i < nr_irqs; i++) {
1031 gic_irq_domain_map(domain, virq + i, hwirq + i); 1034 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1035 if (ret)
1036 return ret;
1037 }
1032 1038
1033 return 0; 1039 return 0;
1034} 1040}
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index 0a8ed1c05518..14461cbfab2f 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -154,7 +154,7 @@ asmlinkage void __weak plat_irq_dispatch(void)
154static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, 154static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
155 irq_hw_number_t hw) 155 irq_hw_number_t hw)
156{ 156{
157 static struct irq_chip *chip; 157 struct irq_chip *chip;
158 158
159 if (hw < 2 && cpu_has_mipsmt) { 159 if (hw < 2 && cpu_has_mipsmt) {
160 /* Software interrupts are used for MT/CMT IPI */ 160 /* Software interrupts are used for MT/CMT IPI */
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 832ebf4062f7..6ab1d3afec02 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -950,7 +950,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
950 &gic_irq_domain_ops, NULL); 950 &gic_irq_domain_ops, NULL);
951 if (!gic_irq_domain) 951 if (!gic_irq_domain)
952 panic("Failed to add GIC IRQ domain"); 952 panic("Failed to add GIC IRQ domain");
953 gic_irq_domain->name = "mips-gic-irq";
954 953
955 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 954 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
956 IRQ_DOMAIN_FLAG_IPI_PER_CPU, 955 IRQ_DOMAIN_FLAG_IPI_PER_CPU,
@@ -959,7 +958,6 @@ static void __init __gic_init(unsigned long gic_base_addr,
959 if (!gic_ipi_domain) 958 if (!gic_ipi_domain)
960 panic("Failed to add GIC IPI domain"); 959 panic("Failed to add GIC IPI domain");
961 960
962 gic_ipi_domain->name = "mips-gic-ipi";
963 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); 961 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
964 962
965 if (node && 963 if (node &&
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index 060d357f107f..6f423bc49d0d 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -485,18 +485,19 @@ static int isdn_divert_icall(isdn_ctrl *ic)
485 cs->deflect_dest[0] = '\0'; 485 cs->deflect_dest[0] = '\0';
486 retval = 4; /* only proceed */ 486 retval = 4; /* only proceed */
487 } 487 }
488 sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n", 488 snprintf(cs->info, sizeof(cs->info),
489 cs->akt_state, 489 "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n",
490 cs->divert_id, 490 cs->akt_state,
491 divert_if.drv_to_name(cs->ics.driver), 491 cs->divert_id,
492 (ic->command == ISDN_STAT_ICALLW) ? "1" : "0", 492 divert_if.drv_to_name(cs->ics.driver),
493 cs->ics.parm.setup.phone, 493 (ic->command == ISDN_STAT_ICALLW) ? "1" : "0",
494 cs->ics.parm.setup.eazmsn, 494 cs->ics.parm.setup.phone,
495 cs->ics.parm.setup.si1, 495 cs->ics.parm.setup.eazmsn,
496 cs->ics.parm.setup.si2, 496 cs->ics.parm.setup.si1,
497 cs->ics.parm.setup.screen, 497 cs->ics.parm.setup.si2,
498 dv->rule.waittime, 498 cs->ics.parm.setup.screen,
499 cs->deflect_dest); 499 dv->rule.waittime,
500 cs->deflect_dest);
500 if ((dv->rule.action == DEFLECT_REPORT) || 501 if ((dv->rule.action == DEFLECT_REPORT) ||
501 (dv->rule.action == DEFLECT_REJECT)) { 502 (dv->rule.action == DEFLECT_REJECT)) {
502 put_info_buffer(cs->info); 503 put_info_buffer(cs->info);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 40c7e2cf423b..034cabac699d 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -42,7 +42,7 @@ static char *revision = "$Revision: 1.1.2.2 $";
42 42
43static bool suppress_pollack; 43static bool suppress_pollack;
44 44
45static struct pci_device_id c4_pci_tbl[] = { 45static const struct pci_device_id c4_pci_tbl[] = {
46 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 }, 46 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
47 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 }, 47 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 },
48 { } /* Terminating entry */ 48 { } /* Terminating entry */
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 8b7ad4f1ab01..b2023e08dcd2 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -110,7 +110,7 @@ typedef struct _diva_os_thread_dpc {
110/* 110/*
111 This table should be sorted by PCI device ID 111 This table should be sorted by PCI device ID
112*/ 112*/
113static struct pci_device_id divas_pci_tbl[] = { 113static const struct pci_device_id divas_pci_tbl[] = {
114 /* Diva Server BRI-2M PCI 0xE010 */ 114 /* Diva Server BRI-2M PCI 0xE010 */
115 { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA), 115 { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA),
116 CARDTYPE_MAESTRA_PCI }, 116 CARDTYPE_MAESTRA_PCI },
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index e3fa1cd64470..dce6632daae1 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev)
1142 pr_info("%s: drvdata already removed\n", __func__); 1142 pr_info("%s: drvdata already removed\n", __func__);
1143} 1143}
1144 1144
1145static struct pci_device_id fcpci_ids[] = { 1145static const struct pci_device_id fcpci_ids[] = {
1146 { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID, 1146 { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID,
1147 0, 0, (unsigned long) "Fritz!Card PCI"}, 1147 0, 0, (unsigned long) "Fritz!Card PCI"},
1148 { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID, 1148 { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index aea0c9616ea5..3cf07b8ced1c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -5348,7 +5348,7 @@ static const struct hm_map hfcm_map[] = {
5348 5348
5349#undef H 5349#undef H
5350#define H(x) ((unsigned long)&hfcm_map[x]) 5350#define H(x) ((unsigned long)&hfcm_map[x])
5351static struct pci_device_id hfmultipci_ids[] = { 5351static const struct pci_device_id hfmultipci_ids[] = {
5352 5352
5353 /* Cards with HFC-4S Chip */ 5353 /* Cards with HFC-4S Chip */
5354 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD, 5354 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 5dc246d71c16..d2e401a8090e 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2161,7 +2161,7 @@ static const struct _hfc_map hfc_map[] =
2161 {}, 2161 {},
2162}; 2162};
2163 2163
2164static struct pci_device_id hfc_ids[] = 2164static const struct pci_device_id hfc_ids[] =
2165{ 2165{
2166 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0), 2166 { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0),
2167 (unsigned long) &hfc_map[0] }, 2167 (unsigned long) &hfc_map[0] },
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index afde4edef9ae..6a6d848bd18e 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1137,7 +1137,7 @@ static void nj_remove(struct pci_dev *pdev)
1137/* We cannot select cards with PCI_SUB... IDs, since here are cards with 1137/* We cannot select cards with PCI_SUB... IDs, since here are cards with
1138 * SUB IDs set to PCI_ANY_ID, so we need to match all and reject 1138 * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
1139 * known other cards which not work with this driver - see probe function */ 1139 * known other cards which not work with this driver - see probe function */
1140static struct pci_device_id nj_pci_ids[] = { 1140static const struct pci_device_id nj_pci_ids[] = {
1141 { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300, 1141 { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
1142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 1142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1143 { } 1143 { }
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 3052c836b89f..d80072fef434 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1398,7 +1398,7 @@ w6692_remove_pci(struct pci_dev *pdev)
1398 pr_notice("%s: drvdata already removed\n", __func__); 1398 pr_notice("%s: drvdata already removed\n", __func__);
1399} 1399}
1400 1400
1401static struct pci_device_id w6692_ids[] = { 1401static const struct pci_device_id w6692_ids[] = {
1402 { PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH, 1402 { PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH,
1403 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]}, 1403 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]},
1404 { PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692, 1404 { PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692,
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index c7d68675b028..7108bdb8742e 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1909,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
1909#ifdef CONFIG_PCI 1909#ifdef CONFIG_PCI
1910#include <linux/pci.h> 1910#include <linux/pci.h>
1911 1911
1912static struct pci_device_id hisax_pci_tbl[] __used = { 1912static const struct pci_device_id hisax_pci_tbl[] __used = {
1913#ifdef CONFIG_HISAX_FRITZPCI 1913#ifdef CONFIG_HISAX_FRITZPCI
1914 {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) }, 1914 {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
1915#endif 1915#endif
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 90f051ce0259..9090cc1e1f29 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -86,7 +86,7 @@ typedef struct {
86 char *device_name; 86 char *device_name;
87} hfc4s8s_param; 87} hfc4s8s_param;
88 88
89static struct pci_device_id hfc4s8s_ids[] = { 89static const struct pci_device_id hfc4s8s_ids[] = {
90 {.vendor = PCI_VENDOR_ID_CCD, 90 {.vendor = PCI_VENDOR_ID_CCD,
91 .device = PCI_DEVICE_ID_4S, 91 .device = PCI_DEVICE_ID_4S,
92 .subvendor = 0x1397, 92 .subvendor = 0x1397,
diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c
index 5a9f39ed1d5d..e4f7573ba9bf 100644
--- a/drivers/isdn/hisax/hisax_fcpcipnp.c
+++ b/drivers/isdn/hisax/hisax_fcpcipnp.c
@@ -52,7 +52,7 @@ module_param(debug, int, 0);
52MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>"); 52MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>");
53MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver"); 53MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver");
54 54
55static struct pci_device_id fcpci_ids[] = { 55static const struct pci_device_id fcpci_ids[] = {
56 { .vendor = PCI_VENDOR_ID_AVM, 56 { .vendor = PCI_VENDOR_ID_AVM,
57 .device = PCI_DEVICE_ID_AVM_A1, 57 .device = PCI_DEVICE_ID_AVM_A1,
58 .subvendor = PCI_ANY_ID, 58 .subvendor = PCI_ANY_ID,
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 7b5fd8fb1761..aaca0b3d662e 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -44,7 +44,6 @@ struct procdata {
44 char log_name[15]; /* log filename */ 44 char log_name[15]; /* log filename */
45 struct log_data *log_head, *log_tail; /* head and tail for queue */ 45 struct log_data *log_head, *log_tail; /* head and tail for queue */
46 int if_used; /* open count for interface */ 46 int if_used; /* open count for interface */
47 int volatile del_lock; /* lock for delete operations */
48 unsigned char logtmp[LOG_MAX_LINELEN]; 47 unsigned char logtmp[LOG_MAX_LINELEN];
49 wait_queue_head_t rd_queue; 48 wait_queue_head_t rd_queue;
50}; 49};
@@ -102,7 +101,6 @@ put_log_buffer(hysdn_card *card, char *cp)
102{ 101{
103 struct log_data *ib; 102 struct log_data *ib;
104 struct procdata *pd = card->proclog; 103 struct procdata *pd = card->proclog;
105 int i;
106 unsigned long flags; 104 unsigned long flags;
107 105
108 if (!pd) 106 if (!pd)
@@ -126,21 +124,21 @@ put_log_buffer(hysdn_card *card, char *cp)
126 else 124 else
127 pd->log_tail->next = ib; /* follows existing messages */ 125 pd->log_tail->next = ib; /* follows existing messages */
128 pd->log_tail = ib; /* new tail */ 126 pd->log_tail = ib; /* new tail */
129 i = pd->del_lock++; /* get lock state */
130 spin_unlock_irqrestore(&card->hysdn_lock, flags);
131 127
132 /* delete old entrys */ 128 /* delete old entrys */
133 if (!i) 129 while (pd->log_head->next) {
134 while (pd->log_head->next) { 130 if ((pd->log_head->usage_cnt <= 0) &&
135 if ((pd->log_head->usage_cnt <= 0) && 131 (pd->log_head->next->usage_cnt <= 0)) {
136 (pd->log_head->next->usage_cnt <= 0)) { 132 ib = pd->log_head;
137 ib = pd->log_head; 133 pd->log_head = pd->log_head->next;
138 pd->log_head = pd->log_head->next; 134 kfree(ib);
139 kfree(ib); 135 } else {
140 } else 136 break;
141 break; 137 }
142 } /* pd->log_head->next */ 138 } /* pd->log_head->next */
143 pd->del_lock--; /* release lock level */ 139
140 spin_unlock_irqrestore(&card->hysdn_lock, flags);
141
144 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */ 142 wake_up_interruptible(&(pd->rd_queue)); /* announce new entry */
145} /* put_log_buffer */ 143} /* put_log_buffer */
146 144
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 89b09c51ab7c..38a5bb764c7b 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1376,6 +1376,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
1376 if (arg) { 1376 if (arg) {
1377 if (copy_from_user(bname, argp, sizeof(bname) - 1)) 1377 if (copy_from_user(bname, argp, sizeof(bname) - 1))
1378 return -EFAULT; 1378 return -EFAULT;
1379 bname[sizeof(bname)-1] = 0;
1379 } else 1380 } else
1380 return -EINVAL; 1381 return -EINVAL;
1381 ret = mutex_lock_interruptible(&dev->mtx); 1382 ret = mutex_lock_interruptible(&dev->mtx);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index c151c6daa67e..f63a110b7bcb 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
2611 char newname[10]; 2611 char newname[10];
2612 2612
2613 if (p) { 2613 if (p) {
2614 /* Slave-Name MUST not be empty */ 2614 /* Slave-Name MUST not be empty or overflow 'newname' */
2615 if (!strlen(p + 1)) 2615 if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
2616 return NULL; 2616 return NULL;
2617 strcpy(newname, p + 1);
2618 *p = 0; 2617 *p = 0;
2619 /* Master must already exist */ 2618 /* Master must already exist */
2620 if (!(n = isdn_net_findif(parm))) 2619 if (!(n = isdn_net_findif(parm)))
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
index 78fc5d5e9051..92e6570b1143 100644
--- a/drivers/isdn/mISDN/fsm.c
+++ b/drivers/isdn/mISDN/fsm.c
@@ -26,7 +26,7 @@
26 26
27#define FSM_TIMER_DEBUG 0 27#define FSM_TIMER_DEBUG 0
28 28
29void 29int
30mISDN_FsmNew(struct Fsm *fsm, 30mISDN_FsmNew(struct Fsm *fsm,
31 struct FsmNode *fnlist, int fncount) 31 struct FsmNode *fnlist, int fncount)
32{ 32{
@@ -34,6 +34,8 @@ mISDN_FsmNew(struct Fsm *fsm,
34 34
35 fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count * 35 fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
36 fsm->event_count, GFP_KERNEL); 36 fsm->event_count, GFP_KERNEL);
37 if (fsm->jumpmatrix == NULL)
38 return -ENOMEM;
37 39
38 for (i = 0; i < fncount; i++) 40 for (i = 0; i < fncount; i++)
39 if ((fnlist[i].state >= fsm->state_count) || 41 if ((fnlist[i].state >= fsm->state_count) ||
@@ -45,6 +47,7 @@ mISDN_FsmNew(struct Fsm *fsm,
45 } else 47 } else
46 fsm->jumpmatrix[fsm->state_count * fnlist[i].event + 48 fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
47 fnlist[i].state] = (FSMFNPTR) fnlist[i].routine; 49 fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
50 return 0;
48} 51}
49EXPORT_SYMBOL(mISDN_FsmNew); 52EXPORT_SYMBOL(mISDN_FsmNew);
50 53
diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h
index 928f5be192c1..e1def8490221 100644
--- a/drivers/isdn/mISDN/fsm.h
+++ b/drivers/isdn/mISDN/fsm.h
@@ -55,7 +55,7 @@ struct FsmTimer {
55 void *arg; 55 void *arg;
56}; 56};
57 57
58extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int); 58extern int mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
59extern void mISDN_FsmFree(struct Fsm *); 59extern void mISDN_FsmFree(struct Fsm *);
60extern int mISDN_FsmEvent(struct FsmInst *, int , void *); 60extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
61extern void mISDN_FsmChangeState(struct FsmInst *, int); 61extern void mISDN_FsmChangeState(struct FsmInst *, int);
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index bebc57b72138..3192b0eb3944 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -414,8 +414,7 @@ l1_init(u_int *deb)
414 l1fsm_s.event_count = L1_EVENT_COUNT; 414 l1fsm_s.event_count = L1_EVENT_COUNT;
415 l1fsm_s.strEvent = strL1Event; 415 l1fsm_s.strEvent = strL1Event;
416 l1fsm_s.strState = strL1SState; 416 l1fsm_s.strState = strL1SState;
417 mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList)); 417 return mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
418 return 0;
419} 418}
420 419
421void 420void
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 7243a6746f8b..9ff0903a0e89 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -2247,15 +2247,26 @@ static struct Bprotocol X75SLP = {
2247int 2247int
2248Isdnl2_Init(u_int *deb) 2248Isdnl2_Init(u_int *deb)
2249{ 2249{
2250 int res;
2250 debug = deb; 2251 debug = deb;
2251 mISDN_register_Bprotocol(&X75SLP); 2252 mISDN_register_Bprotocol(&X75SLP);
2252 l2fsm.state_count = L2_STATE_COUNT; 2253 l2fsm.state_count = L2_STATE_COUNT;
2253 l2fsm.event_count = L2_EVENT_COUNT; 2254 l2fsm.event_count = L2_EVENT_COUNT;
2254 l2fsm.strEvent = strL2Event; 2255 l2fsm.strEvent = strL2Event;
2255 l2fsm.strState = strL2State; 2256 l2fsm.strState = strL2State;
2256 mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList)); 2257 res = mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
2257 TEIInit(deb); 2258 if (res)
2259 goto error;
2260 res = TEIInit(deb);
2261 if (res)
2262 goto error_fsm;
2258 return 0; 2263 return 0;
2264
2265error_fsm:
2266 mISDN_FsmFree(&l2fsm);
2267error:
2268 mISDN_unregister_Bprotocol(&X75SLP);
2269 return res;
2259} 2270}
2260 2271
2261void 2272void
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 908127efccf8..12d9e5f4beb1 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -1387,23 +1387,37 @@ create_teimanager(struct mISDNdevice *dev)
1387 1387
1388int TEIInit(u_int *deb) 1388int TEIInit(u_int *deb)
1389{ 1389{
1390 int res;
1390 debug = deb; 1391 debug = deb;
1391 teifsmu.state_count = TEI_STATE_COUNT; 1392 teifsmu.state_count = TEI_STATE_COUNT;
1392 teifsmu.event_count = TEI_EVENT_COUNT; 1393 teifsmu.event_count = TEI_EVENT_COUNT;
1393 teifsmu.strEvent = strTeiEvent; 1394 teifsmu.strEvent = strTeiEvent;
1394 teifsmu.strState = strTeiState; 1395 teifsmu.strState = strTeiState;
1395 mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser)); 1396 res = mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
1397 if (res)
1398 goto error;
1396 teifsmn.state_count = TEI_STATE_COUNT; 1399 teifsmn.state_count = TEI_STATE_COUNT;
1397 teifsmn.event_count = TEI_EVENT_COUNT; 1400 teifsmn.event_count = TEI_EVENT_COUNT;
1398 teifsmn.strEvent = strTeiEvent; 1401 teifsmn.strEvent = strTeiEvent;
1399 teifsmn.strState = strTeiState; 1402 teifsmn.strState = strTeiState;
1400 mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet)); 1403 res = mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
1404 if (res)
1405 goto error_smn;
1401 deactfsm.state_count = DEACT_STATE_COUNT; 1406 deactfsm.state_count = DEACT_STATE_COUNT;
1402 deactfsm.event_count = DEACT_EVENT_COUNT; 1407 deactfsm.event_count = DEACT_EVENT_COUNT;
1403 deactfsm.strEvent = strDeactEvent; 1408 deactfsm.strEvent = strDeactEvent;
1404 deactfsm.strState = strDeactState; 1409 deactfsm.strState = strDeactState;
1405 mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList)); 1410 res = mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
1411 if (res)
1412 goto error_deact;
1406 return 0; 1413 return 0;
1414
1415error_deact:
1416 mISDN_FsmFree(&teifsmn);
1417error_smn:
1418 mISDN_FsmFree(&teifsmu);
1419error:
1420 return res;
1407} 1421}
1408 1422
1409void TEIFree(void) 1423void TEIFree(void)
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index 5ecc154f6831..9bc32578a766 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -657,7 +657,7 @@ try:
657 * be directed to disk. 657 * be directed to disk.
658 */ 658 */
659int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, 659int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
660 struct ppa_addr ppa, int bio_iter) 660 struct ppa_addr ppa, int bio_iter, bool advanced_bio)
661{ 661{
662 struct pblk *pblk = container_of(rb, struct pblk, rwb); 662 struct pblk *pblk = container_of(rb, struct pblk, rwb);
663 struct pblk_rb_entry *entry; 663 struct pblk_rb_entry *entry;
@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
694 * filled with data from the cache). If part of the data resides on the 694 * filled with data from the cache). If part of the data resides on the
695 * media, we will read later on 695 * media, we will read later on
696 */ 696 */
697 if (unlikely(!bio->bi_iter.bi_idx)) 697 if (unlikely(!advanced_bio))
698 bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); 698 bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
699 699
700 data = bio_data(bio); 700 data = bio_data(bio);
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 4e5c48f3de62..d682e89e6493 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -26,7 +26,7 @@
26 */ 26 */
27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, 27static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
28 sector_t lba, struct ppa_addr ppa, 28 sector_t lba, struct ppa_addr ppa,
29 int bio_iter) 29 int bio_iter, bool advanced_bio)
30{ 30{
31#ifdef CONFIG_NVM_DEBUG 31#ifdef CONFIG_NVM_DEBUG
32 /* Callers must ensure that the ppa points to a cache address */ 32 /* Callers must ensure that the ppa points to a cache address */
@@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
34 BUG_ON(!pblk_addr_in_cache(ppa)); 34 BUG_ON(!pblk_addr_in_cache(ppa));
35#endif 35#endif
36 36
37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter); 37 return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
38 bio_iter, advanced_bio);
38} 39}
39 40
40static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, 41static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
44 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; 45 struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
45 sector_t blba = pblk_get_lba(bio); 46 sector_t blba = pblk_get_lba(bio);
46 int nr_secs = rqd->nr_ppas; 47 int nr_secs = rqd->nr_ppas;
47 int advanced_bio = 0; 48 bool advanced_bio = false;
48 int i, j = 0; 49 int i, j = 0;
49 50
50 /* logic error: lba out-of-bounds. Ignore read request */ 51 /* logic error: lba out-of-bounds. Ignore read request */
@@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
62retry: 63retry:
63 if (pblk_ppa_empty(p)) { 64 if (pblk_ppa_empty(p)) {
64 WARN_ON(test_and_set_bit(i, read_bitmap)); 65 WARN_ON(test_and_set_bit(i, read_bitmap));
65 continue; 66
67 if (unlikely(!advanced_bio)) {
68 bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
69 advanced_bio = true;
70 }
71
72 goto next;
66 } 73 }
67 74
68 /* Try to read from write buffer. The address is later checked 75 /* Try to read from write buffer. The address is later checked
69 * on the write buffer to prevent retrieving overwritten data. 76 * on the write buffer to prevent retrieving overwritten data.
70 */ 77 */
71 if (pblk_addr_in_cache(p)) { 78 if (pblk_addr_in_cache(p)) {
72 if (!pblk_read_from_cache(pblk, bio, lba, p, i)) { 79 if (!pblk_read_from_cache(pblk, bio, lba, p, i,
80 advanced_bio)) {
73 pblk_lookup_l2p_seq(pblk, &p, lba, 1); 81 pblk_lookup_l2p_seq(pblk, &p, lba, 1);
74 goto retry; 82 goto retry;
75 } 83 }
76 WARN_ON(test_and_set_bit(i, read_bitmap)); 84 WARN_ON(test_and_set_bit(i, read_bitmap));
77 advanced_bio = 1; 85 advanced_bio = true;
78#ifdef CONFIG_NVM_DEBUG 86#ifdef CONFIG_NVM_DEBUG
79 atomic_long_inc(&pblk->cache_reads); 87 atomic_long_inc(&pblk->cache_reads);
80#endif 88#endif
@@ -83,6 +91,7 @@ retry:
83 rqd->ppa_list[j++] = p; 91 rqd->ppa_list[j++] = p;
84 } 92 }
85 93
94next:
86 if (advanced_bio) 95 if (advanced_bio)
87 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); 96 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
88 } 97 }
@@ -282,7 +291,7 @@ retry:
282 * write buffer to prevent retrieving overwritten data. 291 * write buffer to prevent retrieving overwritten data.
283 */ 292 */
284 if (pblk_addr_in_cache(ppa)) { 293 if (pblk_addr_in_cache(ppa)) {
285 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) { 294 if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
286 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); 295 pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
287 goto retry; 296 goto retry;
288 } 297 }
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 0c5692cc2f60..67e623bd5c2d 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
670 struct list_head *list, 670 struct list_head *list,
671 unsigned int max); 671 unsigned int max);
672int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, 672int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
673 struct ppa_addr ppa, int bio_iter); 673 struct ppa_addr ppa, int bio_iter, bool advanced_bio);
674unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); 674unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
675 675
676unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); 676unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index ac91fd0d62c6..cbca5e51b975 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -92,7 +92,7 @@ static struct mbox_controller pcc_mbox_ctrl = {};
92 */ 92 */
93static struct mbox_chan *get_pcc_channel(int id) 93static struct mbox_chan *get_pcc_channel(int id)
94{ 94{
95 if (id < 0 || id > pcc_mbox_ctrl.num_chans) 95 if (id < 0 || id >= pcc_mbox_ctrl.num_chans)
96 return ERR_PTR(-ENOENT); 96 return ERR_PTR(-ENOENT);
97 97
98 return &pcc_mbox_channels[id]; 98 return &pcc_mbox_channels[id];
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index f4eace5ea184..40f3cd7eab0f 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
156 156
157 rdev_for_each(rdev, mddev) { 157 rdev_for_each(rdev, mddev) {
158 if (! test_bit(In_sync, &rdev->flags) 158 if (! test_bit(In_sync, &rdev->flags)
159 || test_bit(Faulty, &rdev->flags)) 159 || test_bit(Faulty, &rdev->flags)
160 || test_bit(Bitmap_sync, &rdev->flags))
160 continue; 161 continue;
161 162
162 target = offset + index * (PAGE_SIZE/512); 163 target = offset + index * (PAGE_SIZE/512);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 850ff6c67994..44f4a8ac95bd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1258,8 +1258,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1258 */ 1258 */
1259int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c) 1259int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1260{ 1260{
1261 blk_status_t a; 1261 int a, f;
1262 int f;
1263 unsigned long buffers_processed = 0; 1262 unsigned long buffers_processed = 0;
1264 struct dm_buffer *b, *tmp; 1263 struct dm_buffer *b, *tmp;
1265 1264
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1b224aa9cf15..3acce09bba35 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1587,16 +1587,18 @@ retry:
1587 if (likely(ic->mode == 'J')) { 1587 if (likely(ic->mode == 'J')) {
1588 if (dio->write) { 1588 if (dio->write) {
1589 unsigned next_entry, i, pos; 1589 unsigned next_entry, i, pos;
1590 unsigned ws, we; 1590 unsigned ws, we, range_sectors;
1591 1591
1592 dio->range.n_sectors = min(dio->range.n_sectors, ic->free_sectors); 1592 dio->range.n_sectors = min(dio->range.n_sectors,
1593 ic->free_sectors << ic->sb->log2_sectors_per_block);
1593 if (unlikely(!dio->range.n_sectors)) 1594 if (unlikely(!dio->range.n_sectors))
1594 goto sleep; 1595 goto sleep;
1595 ic->free_sectors -= dio->range.n_sectors; 1596 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1597 ic->free_sectors -= range_sectors;
1596 journal_section = ic->free_section; 1598 journal_section = ic->free_section;
1597 journal_entry = ic->free_section_entry; 1599 journal_entry = ic->free_section_entry;
1598 1600
1599 next_entry = ic->free_section_entry + dio->range.n_sectors; 1601 next_entry = ic->free_section_entry + range_sectors;
1600 ic->free_section_entry = next_entry % ic->journal_section_entries; 1602 ic->free_section_entry = next_entry % ic->journal_section_entries;
1601 ic->free_section += next_entry / ic->journal_section_entries; 1603 ic->free_section += next_entry / ic->journal_section_entries;
1602 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; 1604 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
@@ -1727,6 +1729,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
1727 wraparound_section(ic, &ic->free_section); 1729 wraparound_section(ic, &ic->free_section);
1728 ic->n_uncommitted_sections++; 1730 ic->n_uncommitted_sections++;
1729 } 1731 }
1732 WARN_ON(ic->journal_sections * ic->journal_section_entries !=
1733 (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
1730} 1734}
1731 1735
1732static void integrity_commit(struct work_struct *w) 1736static void integrity_commit(struct work_struct *w)
@@ -1821,6 +1825,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1821{ 1825{
1822 unsigned i, j, n; 1826 unsigned i, j, n;
1823 struct journal_completion comp; 1827 struct journal_completion comp;
1828 struct blk_plug plug;
1829
1830 blk_start_plug(&plug);
1824 1831
1825 comp.ic = ic; 1832 comp.ic = ic;
1826 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 1833 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
@@ -1945,6 +1952,8 @@ skip_io:
1945 1952
1946 dm_bufio_write_dirty_buffers_async(ic->bufio); 1953 dm_bufio_write_dirty_buffers_async(ic->bufio);
1947 1954
1955 blk_finish_plug(&plug);
1956
1948 complete_journal_op(&comp); 1957 complete_journal_op(&comp);
1949 wait_for_completion_io(&comp.comp); 1958 wait_for_completion_io(&comp.comp);
1950 1959
@@ -3019,6 +3028,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3019 ti->error = "Block size doesn't match the information in superblock"; 3028 ti->error = "Block size doesn't match the information in superblock";
3020 goto bad; 3029 goto bad;
3021 } 3030 }
3031 if (!le32_to_cpu(ic->sb->journal_sections)) {
3032 r = -EINVAL;
3033 ti->error = "Corrupted superblock, journal_sections is 0";
3034 goto bad;
3035 }
3022 /* make sure that ti->max_io_len doesn't overflow */ 3036 /* make sure that ti->max_io_len doesn't overflow */
3023 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || 3037 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3024 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { 3038 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2e10c2f13a34..5bfe285ea9d1 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -208,6 +208,7 @@ struct raid_dev {
208#define RT_FLAG_RS_BITMAP_LOADED 2 208#define RT_FLAG_RS_BITMAP_LOADED 2
209#define RT_FLAG_UPDATE_SBS 3 209#define RT_FLAG_UPDATE_SBS 3
210#define RT_FLAG_RESHAPE_RS 4 210#define RT_FLAG_RESHAPE_RS 4
211#define RT_FLAG_RS_SUSPENDED 5
211 212
212/* Array elements of 64 bit needed for rebuild/failed disk bits */ 213/* Array elements of 64 bit needed for rebuild/failed disk bits */
213#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) 214#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -564,9 +565,10 @@ static const char *raid10_md_layout_to_format(int layout)
564 if (__raid10_near_copies(layout) > 1) 565 if (__raid10_near_copies(layout) > 1)
565 return "near"; 566 return "near";
566 567
567 WARN_ON(__raid10_far_copies(layout) < 2); 568 if (__raid10_far_copies(layout) > 1)
569 return "far";
568 570
569 return "far"; 571 return "unknown";
570} 572}
571 573
572/* Return md raid10 algorithm for @name */ 574/* Return md raid10 algorithm for @name */
@@ -2540,11 +2542,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2540 if (!freshest) 2542 if (!freshest)
2541 return 0; 2543 return 0;
2542 2544
2543 if (validate_raid_redundancy(rs)) {
2544 rs->ti->error = "Insufficient redundancy to activate array";
2545 return -EINVAL;
2546 }
2547
2548 /* 2545 /*
2549 * Validation of the freshest device provides the source of 2546 * Validation of the freshest device provides the source of
2550 * validation for the remaining devices. 2547 * validation for the remaining devices.
@@ -2553,6 +2550,11 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
2553 if (super_validate(rs, freshest)) 2550 if (super_validate(rs, freshest))
2554 return -EINVAL; 2551 return -EINVAL;
2555 2552
2553 if (validate_raid_redundancy(rs)) {
2554 rs->ti->error = "Insufficient redundancy to activate array";
2555 return -EINVAL;
2556 }
2557
2556 rdev_for_each(rdev, mddev) 2558 rdev_for_each(rdev, mddev)
2557 if (!test_bit(Journal, &rdev->flags) && 2559 if (!test_bit(Journal, &rdev->flags) &&
2558 rdev != freshest && 2560 rdev != freshest &&
@@ -3168,6 +3170,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3168 } 3170 }
3169 3171
3170 mddev_suspend(&rs->md); 3172 mddev_suspend(&rs->md);
3173 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3171 3174
3172 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ 3175 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
3173 if (rs_is_raid456(rs)) { 3176 if (rs_is_raid456(rs)) {
@@ -3625,7 +3628,7 @@ static void raid_postsuspend(struct dm_target *ti)
3625{ 3628{
3626 struct raid_set *rs = ti->private; 3629 struct raid_set *rs = ti->private;
3627 3630
3628 if (!rs->md.suspended) 3631 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3629 mddev_suspend(&rs->md); 3632 mddev_suspend(&rs->md);
3630 3633
3631 rs->md.ro = 1; 3634 rs->md.ro = 1;
@@ -3759,7 +3762,7 @@ static int rs_start_reshape(struct raid_set *rs)
3759 return r; 3762 return r;
3760 3763
3761 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ 3764 /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
3762 if (mddev->suspended) 3765 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3763 mddev_resume(mddev); 3766 mddev_resume(mddev);
3764 3767
3765 /* 3768 /*
@@ -3786,8 +3789,8 @@ static int rs_start_reshape(struct raid_set *rs)
3786 } 3789 }
3787 3790
3788 /* Suspend because a resume will happen in raid_resume() */ 3791 /* Suspend because a resume will happen in raid_resume() */
3789 if (!mddev->suspended) 3792 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
3790 mddev_suspend(mddev); 3793 mddev_suspend(mddev);
3791 3794
3792 /* 3795 /*
3793 * Now reshape got set up, update superblocks to 3796 * Now reshape got set up, update superblocks to
@@ -3883,13 +3886,13 @@ static void raid_resume(struct dm_target *ti)
3883 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS)) 3886 if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
3884 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3887 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3885 3888
3886 if (mddev->suspended) 3889 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
3887 mddev_resume(mddev); 3890 mddev_resume(mddev);
3888} 3891}
3889 3892
3890static struct target_type raid_target = { 3893static struct target_type raid_target = {
3891 .name = "raid", 3894 .name = "raid",
3892 .version = {1, 11, 1}, 3895 .version = {1, 12, 1},
3893 .module = THIS_MODULE, 3896 .module = THIS_MODULE,
3894 .ctr = raid_ctr, 3897 .ctr = raid_ctr,
3895 .dtr = raid_dtr, 3898 .dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index a39bcd9b982a..28a4071cdf85 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -20,6 +20,7 @@
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <linux/blk-mq.h> 21#include <linux/blk-mq.h>
22#include <linux/mount.h> 22#include <linux/mount.h>
23#include <linux/dax.h>
23 24
24#define DM_MSG_PREFIX "table" 25#define DM_MSG_PREFIX "table"
25 26
@@ -1630,6 +1631,37 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
1630 return false; 1631 return false;
1631} 1632}
1632 1633
1634static int device_dax_write_cache_enabled(struct dm_target *ti,
1635 struct dm_dev *dev, sector_t start,
1636 sector_t len, void *data)
1637{
1638 struct dax_device *dax_dev = dev->dax_dev;
1639
1640 if (!dax_dev)
1641 return false;
1642
1643 if (dax_write_cache_enabled(dax_dev))
1644 return true;
1645 return false;
1646}
1647
1648static int dm_table_supports_dax_write_cache(struct dm_table *t)
1649{
1650 struct dm_target *ti;
1651 unsigned i;
1652
1653 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1654 ti = dm_table_get_target(t, i);
1655
1656 if (ti->type->iterate_devices &&
1657 ti->type->iterate_devices(ti,
1658 device_dax_write_cache_enabled, NULL))
1659 return true;
1660 }
1661
1662 return false;
1663}
1664
1633static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, 1665static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1634 sector_t start, sector_t len, void *data) 1666 sector_t start, sector_t len, void *data)
1635{ 1667{
@@ -1785,6 +1817,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1785 } 1817 }
1786 blk_queue_write_cache(q, wc, fua); 1818 blk_queue_write_cache(q, wc, fua);
1787 1819
1820 if (dm_table_supports_dax_write_cache(t))
1821 dax_write_cache(t->md->dax_dev, true);
1822
1788 /* Ensure that all underlying devices are non-rotational. */ 1823 /* Ensure that all underlying devices are non-rotational. */
1789 if (dm_table_all_devices_attribute(t, device_is_nonrot)) 1824 if (dm_table_all_devices_attribute(t, device_is_nonrot))
1790 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); 1825 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 504ba3fa328b..e13f90832b6b 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -308,19 +308,14 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
308{ 308{
309 unsigned n; 309 unsigned n;
310 310
311 if (!fio->rs) { 311 if (!fio->rs)
312 fio->rs = mempool_alloc(v->fec->rs_pool, 0); 312 fio->rs = mempool_alloc(v->fec->rs_pool, GFP_NOIO);
313 if (unlikely(!fio->rs)) {
314 DMERR("failed to allocate RS");
315 return -ENOMEM;
316 }
317 }
318 313
319 fec_for_each_prealloc_buffer(n) { 314 fec_for_each_prealloc_buffer(n) {
320 if (fio->bufs[n]) 315 if (fio->bufs[n])
321 continue; 316 continue;
322 317
323 fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOIO); 318 fio->bufs[n] = mempool_alloc(v->fec->prealloc_pool, GFP_NOWAIT);
324 if (unlikely(!fio->bufs[n])) { 319 if (unlikely(!fio->bufs[n])) {
325 DMERR("failed to allocate FEC buffer"); 320 DMERR("failed to allocate FEC buffer");
326 return -ENOMEM; 321 return -ENOMEM;
@@ -332,22 +327,16 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
332 if (fio->bufs[n]) 327 if (fio->bufs[n])
333 continue; 328 continue;
334 329
335 fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOIO); 330 fio->bufs[n] = mempool_alloc(v->fec->extra_pool, GFP_NOWAIT);
336 /* we can manage with even one buffer if necessary */ 331 /* we can manage with even one buffer if necessary */
337 if (unlikely(!fio->bufs[n])) 332 if (unlikely(!fio->bufs[n]))
338 break; 333 break;
339 } 334 }
340 fio->nbufs = n; 335 fio->nbufs = n;
341 336
342 if (!fio->output) { 337 if (!fio->output)
343 fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO); 338 fio->output = mempool_alloc(v->fec->output_pool, GFP_NOIO);
344 339
345 if (!fio->output) {
346 DMERR("failed to allocate FEC page");
347 return -ENOMEM;
348 }
349 }
350
351 return 0; 340 return 0;
352} 341}
353 342
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 884ff7c170a0..a4fa2ada6883 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -624,7 +624,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
624 624
625 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page); 625 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
626 if (ret == 0) 626 if (ret == 0)
627 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 627 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
628 628
629 return ret; 629 return ret;
630} 630}
@@ -658,7 +658,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
658 658
659 /* Flush drive cache (this will also sync data) */ 659 /* Flush drive cache (this will also sync data) */
660 if (ret == 0) 660 if (ret == 0)
661 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 661 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
662 662
663 return ret; 663 return ret;
664} 664}
@@ -722,7 +722,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
722 722
723 /* If there are no dirty metadata blocks, just flush the device cache */ 723 /* If there are no dirty metadata blocks, just flush the device cache */
724 if (list_empty(&write_list)) { 724 if (list_empty(&write_list)) {
725 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_KERNEL, NULL); 725 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
726 goto out; 726 goto out;
727 } 727 }
728 728
@@ -927,7 +927,7 @@ static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
927 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift); 927 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
928 } 928 }
929 929
930 page = alloc_page(GFP_KERNEL); 930 page = alloc_page(GFP_NOIO);
931 if (!page) 931 if (!page)
932 return -ENOMEM; 932 return -ENOMEM;
933 933
@@ -1183,7 +1183,7 @@ static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1183 1183
1184 /* Get zone information from disk */ 1184 /* Get zone information from disk */
1185 ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone), 1185 ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
1186 &blkz, &nr_blkz, GFP_KERNEL); 1186 &blkz, &nr_blkz, GFP_NOIO);
1187 if (ret) { 1187 if (ret) {
1188 dmz_dev_err(zmd->dev, "Get zone %u report failed", 1188 dmz_dev_err(zmd->dev, "Get zone %u report failed",
1189 dmz_id(zmd, zone)); 1189 dmz_id(zmd, zone));
@@ -1257,7 +1257,7 @@ static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1257 1257
1258 ret = blkdev_reset_zones(dev->bdev, 1258 ret = blkdev_reset_zones(dev->bdev,
1259 dmz_start_sect(zmd, zone), 1259 dmz_start_sect(zmd, zone),
1260 dev->zone_nr_sectors, GFP_KERNEL); 1260 dev->zone_nr_sectors, GFP_NOIO);
1261 if (ret) { 1261 if (ret) {
1262 dmz_dev_err(dev, "Reset zone %u failed %d", 1262 dmz_dev_err(dev, "Reset zone %u failed %d",
1263 dmz_id(zmd, zone), ret); 1263 dmz_id(zmd, zone), ret);
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 05c0a126f5c8..44a119e12f1a 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -75,7 +75,7 @@ static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
75 nr_blocks = block - wp_block; 75 nr_blocks = block - wp_block;
76 ret = blkdev_issue_zeroout(zrc->dev->bdev, 76 ret = blkdev_issue_zeroout(zrc->dev->bdev,
77 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block), 77 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
78 dmz_blk2sect(nr_blocks), GFP_NOFS, false); 78 dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
79 if (ret) { 79 if (ret) {
80 dmz_dev_err(zrc->dev, 80 dmz_dev_err(zrc->dev,
81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d", 81 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 2b538fa817f4..b08bbbd4d902 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -541,7 +541,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
541 int ret; 541 int ret;
542 542
543 /* Create a new chunk work */ 543 /* Create a new chunk work */
544 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOFS); 544 cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
545 if (!cw) 545 if (!cw)
546 goto out; 546 goto out;
547 547
@@ -588,7 +588,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
588 588
589 bio->bi_bdev = dev->bdev; 589 bio->bi_bdev = dev->bdev;
590 590
591 if (!nr_sectors && (bio_op(bio) != REQ_OP_FLUSH) && (bio_op(bio) != REQ_OP_WRITE)) 591 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
592 return DM_MAPIO_REMAPPED; 592 return DM_MAPIO_REMAPPED;
593 593
594 /* The BIO should be block aligned */ 594 /* The BIO should be block aligned */
@@ -603,7 +603,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
603 bioctx->status = BLK_STS_OK; 603 bioctx->status = BLK_STS_OK;
604 604
605 /* Set the BIO pending in the flush list */ 605 /* Set the BIO pending in the flush list */
606 if (bio_op(bio) == REQ_OP_FLUSH || (!nr_sectors && bio_op(bio) == REQ_OP_WRITE)) { 606 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
607 spin_lock(&dmz->flush_lock); 607 spin_lock(&dmz->flush_lock);
608 bio_list_add(&dmz->flush_list, bio); 608 bio_list_add(&dmz->flush_list, bio);
609 spin_unlock(&dmz->flush_lock); 609 spin_unlock(&dmz->flush_lock);
@@ -785,7 +785,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
785 785
786 /* Chunk BIO work */ 786 /* Chunk BIO work */
787 mutex_init(&dmz->chunk_lock); 787 mutex_init(&dmz->chunk_lock);
788 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOFS); 788 INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
789 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 789 dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
790 0, dev->name); 790 0, dev->name);
791 if (!dmz->chunk_wq) { 791 if (!dmz->chunk_wq) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cdca0296749..b01e458d31e9 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev)
2287 2287
2288static bool set_in_sync(struct mddev *mddev) 2288static bool set_in_sync(struct mddev *mddev)
2289{ 2289{
2290 WARN_ON_ONCE(!spin_is_locked(&mddev->lock)); 2290 WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
2291 if (!mddev->in_sync) { 2291 if (!mddev->in_sync) {
2292 mddev->sync_checkers++; 2292 mddev->sync_checkers++;
2293 spin_unlock(&mddev->lock); 2293 spin_unlock(&mddev->lock);
@@ -7996,7 +7996,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
7996 if (mddev->safemode == 1) 7996 if (mddev->safemode == 1)
7997 mddev->safemode = 0; 7997 mddev->safemode = 0;
7998 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 7998 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
7999 if (mddev->in_sync || !mddev->sync_checkers) { 7999 if (mddev->in_sync || mddev->sync_checkers) {
8000 spin_lock(&mddev->lock); 8000 spin_lock(&mddev->lock);
8001 if (mddev->in_sync) { 8001 if (mddev->in_sync) {
8002 mddev->in_sync = 0; 8002 mddev->in_sync = 0;
@@ -8656,6 +8656,9 @@ void md_check_recovery(struct mddev *mddev)
8656 if (mddev_trylock(mddev)) { 8656 if (mddev_trylock(mddev)) {
8657 int spares = 0; 8657 int spares = 0;
8658 8658
8659 if (!mddev->external && mddev->safemode == 1)
8660 mddev->safemode = 0;
8661
8659 if (mddev->ro) { 8662 if (mddev->ro) {
8660 struct md_rdev *rdev; 8663 struct md_rdev *rdev;
8661 if (!mddev->external && mddev->in_sync) 8664 if (!mddev->external && mddev->in_sync)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 991f0fe2dcc6..09db03455801 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -134,7 +134,9 @@ enum flag_bits {
134 Faulty, /* device is known to have a fault */ 134 Faulty, /* device is known to have a fault */
135 In_sync, /* device is in_sync with rest of array */ 135 In_sync, /* device is in_sync with rest of array */
136 Bitmap_sync, /* ..actually, not quite In_sync. Need a 136 Bitmap_sync, /* ..actually, not quite In_sync. Need a
137 * bitmap-based recovery to get fully in sync 137 * bitmap-based recovery to get fully in sync.
138 * The bit is only meaningful before device
139 * has been passed to pers->hot_add_disk.
138 */ 140 */
139 WriteMostly, /* Avoid reading if at all possible */ 141 WriteMostly, /* Avoid reading if at all possible */
140 AutoDetected, /* added by auto-detect */ 142 AutoDetected, /* added by auto-detect */
@@ -729,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
729 !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) 731 !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
730 mddev->queue->limits.max_write_zeroes_sectors = 0; 732 mddev->queue->limits.max_write_zeroes_sectors = 0;
731} 733}
732
733/* Maximum size of each resync request */
734#define RESYNC_BLOCK_SIZE (64*1024)
735#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
736
737/* for managing resync I/O pages */
738struct resync_pages {
739 unsigned idx; /* for get/put page from the pool */
740 void *raid_bio;
741 struct page *pages[RESYNC_PAGES];
742};
743
744static inline int resync_alloc_pages(struct resync_pages *rp,
745 gfp_t gfp_flags)
746{
747 int i;
748
749 for (i = 0; i < RESYNC_PAGES; i++) {
750 rp->pages[i] = alloc_page(gfp_flags);
751 if (!rp->pages[i])
752 goto out_free;
753 }
754
755 return 0;
756
757out_free:
758 while (--i >= 0)
759 put_page(rp->pages[i]);
760 return -ENOMEM;
761}
762
763static inline void resync_free_pages(struct resync_pages *rp)
764{
765 int i;
766
767 for (i = 0; i < RESYNC_PAGES; i++)
768 put_page(rp->pages[i]);
769}
770
771static inline void resync_get_all_pages(struct resync_pages *rp)
772{
773 int i;
774
775 for (i = 0; i < RESYNC_PAGES; i++)
776 get_page(rp->pages[i]);
777}
778
779static inline struct page *resync_fetch_page(struct resync_pages *rp,
780 unsigned idx)
781{
782 if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
783 return NULL;
784 return rp->pages[idx];
785}
786#endif /* _MD_MD_H */ 734#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
new file mode 100644
index 000000000000..9f2670b45f31
--- /dev/null
+++ b/drivers/md/raid1-10.c
@@ -0,0 +1,81 @@
1/* Maximum size of each resync request */
2#define RESYNC_BLOCK_SIZE (64*1024)
3#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
4
5/* for managing resync I/O pages */
6struct resync_pages {
7 void *raid_bio;
8 struct page *pages[RESYNC_PAGES];
9};
10
11static inline int resync_alloc_pages(struct resync_pages *rp,
12 gfp_t gfp_flags)
13{
14 int i;
15
16 for (i = 0; i < RESYNC_PAGES; i++) {
17 rp->pages[i] = alloc_page(gfp_flags);
18 if (!rp->pages[i])
19 goto out_free;
20 }
21
22 return 0;
23
24out_free:
25 while (--i >= 0)
26 put_page(rp->pages[i]);
27 return -ENOMEM;
28}
29
30static inline void resync_free_pages(struct resync_pages *rp)
31{
32 int i;
33
34 for (i = 0; i < RESYNC_PAGES; i++)
35 put_page(rp->pages[i]);
36}
37
38static inline void resync_get_all_pages(struct resync_pages *rp)
39{
40 int i;
41
42 for (i = 0; i < RESYNC_PAGES; i++)
43 get_page(rp->pages[i]);
44}
45
46static inline struct page *resync_fetch_page(struct resync_pages *rp,
47 unsigned idx)
48{
49 if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
50 return NULL;
51 return rp->pages[idx];
52}
53
54/*
55 * 'strct resync_pages' stores actual pages used for doing the resync
56 * IO, and it is per-bio, so make .bi_private points to it.
57 */
58static inline struct resync_pages *get_resync_pages(struct bio *bio)
59{
60 return bio->bi_private;
61}
62
63/* generally called after bio_reset() for reseting bvec */
64static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
65 int size)
66{
67 int idx = 0;
68
69 /* initialize bvec table again */
70 do {
71 struct page *page = resync_fetch_page(rp, idx);
72 int len = min_t(int, size, PAGE_SIZE);
73
74 /*
75 * won't fail because the vec table is big
76 * enough to hold all these pages
77 */
78 bio_add_page(bio, page, len, 0);
79 size -= len;
80 } while (idx++ < RESYNC_PAGES && size > 0);
81}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3febfc8391fb..f50958ded9f0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
81#define raid1_log(md, fmt, args...) \ 81#define raid1_log(md, fmt, args...) \
82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) 82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
83 83
84/* 84#include "raid1-10.c"
85 * 'strct resync_pages' stores actual pages used for doing the resync
86 * IO, and it is per-bio, so make .bi_private points to it.
87 */
88static inline struct resync_pages *get_resync_pages(struct bio *bio)
89{
90 return bio->bi_private;
91}
92 85
93/* 86/*
94 * for resync bio, r1bio pointer can be retrieved from the per-bio 87 * for resync bio, r1bio pointer can be retrieved from the per-bio
@@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
170 resync_get_all_pages(rp); 163 resync_get_all_pages(rp);
171 } 164 }
172 165
173 rp->idx = 0;
174 rp->raid_bio = r1_bio; 166 rp->raid_bio = r1_bio;
175 bio->bi_private = rp; 167 bio->bi_private = rp;
176 } 168 }
@@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
492 } 484 }
493 485
494 if (behind) { 486 if (behind) {
495 /* we release behind master bio when all write are done */
496 if (r1_bio->behind_master_bio == bio)
497 to_put = NULL;
498
499 if (test_bit(WriteMostly, &rdev->flags)) 487 if (test_bit(WriteMostly, &rdev->flags))
500 atomic_dec(&r1_bio->behind_remaining); 488 atomic_dec(&r1_bio->behind_remaining);
501 489
@@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
802 bio->bi_next = NULL; 790 bio->bi_next = NULL;
803 bio->bi_bdev = rdev->bdev; 791 bio->bi_bdev = rdev->bdev;
804 if (test_bit(Faulty, &rdev->flags)) { 792 if (test_bit(Faulty, &rdev->flags)) {
805 bio->bi_status = BLK_STS_IOERR; 793 bio_io_error(bio);
806 bio_endio(bio);
807 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 794 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
808 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 795 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
809 /* Just ignore it */ 796 /* Just ignore it */
@@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf)
1088 wake_up(&conf->wait_barrier); 1075 wake_up(&conf->wait_barrier);
1089} 1076}
1090 1077
1091static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, 1078static void alloc_behind_master_bio(struct r1bio *r1_bio,
1092 struct bio *bio) 1079 struct bio *bio)
1093{ 1080{
1094 int size = bio->bi_iter.bi_size; 1081 int size = bio->bi_iter.bi_size;
@@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
1098 1085
1099 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); 1086 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1100 if (!behind_bio) 1087 if (!behind_bio)
1101 goto fail; 1088 return;
1102 1089
1103 /* discard op, we don't support writezero/writesame yet */ 1090 /* discard op, we don't support writezero/writesame yet */
1104 if (!bio_has_data(bio)) 1091 if (!bio_has_data(bio)) {
1092 behind_bio->bi_iter.bi_size = size;
1105 goto skip_copy; 1093 goto skip_copy;
1094 }
1106 1095
1107 while (i < vcnt && size) { 1096 while (i < vcnt && size) {
1108 struct page *page; 1097 struct page *page;
@@ -1123,14 +1112,13 @@ skip_copy:
1123 r1_bio->behind_master_bio = behind_bio;; 1112 r1_bio->behind_master_bio = behind_bio;;
1124 set_bit(R1BIO_BehindIO, &r1_bio->state); 1113 set_bit(R1BIO_BehindIO, &r1_bio->state);
1125 1114
1126 return behind_bio; 1115 return;
1127 1116
1128free_pages: 1117free_pages:
1129 pr_debug("%dB behind alloc failed, doing sync I/O\n", 1118 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1130 bio->bi_iter.bi_size); 1119 bio->bi_iter.bi_size);
1131 bio_free_pages(behind_bio); 1120 bio_free_pages(behind_bio);
1132fail: 1121 bio_put(behind_bio);
1133 return behind_bio;
1134} 1122}
1135 1123
1136struct raid1_plug_cb { 1124struct raid1_plug_cb {
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1483 (atomic_read(&bitmap->behind_writes) 1471 (atomic_read(&bitmap->behind_writes)
1484 < mddev->bitmap_info.max_write_behind) && 1472 < mddev->bitmap_info.max_write_behind) &&
1485 !waitqueue_active(&bitmap->behind_wait)) { 1473 !waitqueue_active(&bitmap->behind_wait)) {
1486 mbio = alloc_behind_master_bio(r1_bio, bio); 1474 alloc_behind_master_bio(r1_bio, bio);
1487 } 1475 }
1488 1476
1489 bitmap_startwrite(bitmap, r1_bio->sector, 1477 bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1493 first_clone = 0; 1481 first_clone = 0;
1494 } 1482 }
1495 1483
1496 if (!mbio) { 1484 if (r1_bio->behind_master_bio)
1497 if (r1_bio->behind_master_bio) 1485 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1498 mbio = bio_clone_fast(r1_bio->behind_master_bio, 1486 GFP_NOIO, mddev->bio_set);
1499 GFP_NOIO, 1487 else
1500 mddev->bio_set); 1488 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1501 else
1502 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1503 }
1504 1489
1505 if (r1_bio->behind_master_bio) { 1490 if (r1_bio->behind_master_bio) {
1506 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 1491 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio)
2086 /* Fix variable parts of all bios */ 2071 /* Fix variable parts of all bios */
2087 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); 2072 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2088 for (i = 0; i < conf->raid_disks * 2; i++) { 2073 for (i = 0; i < conf->raid_disks * 2; i++) {
2089 int j;
2090 int size;
2091 blk_status_t status; 2074 blk_status_t status;
2092 struct bio_vec *bi;
2093 struct bio *b = r1_bio->bios[i]; 2075 struct bio *b = r1_bio->bios[i];
2094 struct resync_pages *rp = get_resync_pages(b); 2076 struct resync_pages *rp = get_resync_pages(b);
2095 if (b->bi_end_io != end_sync_read) 2077 if (b->bi_end_io != end_sync_read)
@@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio)
2098 status = b->bi_status; 2080 status = b->bi_status;
2099 bio_reset(b); 2081 bio_reset(b);
2100 b->bi_status = status; 2082 b->bi_status = status;
2101 b->bi_vcnt = vcnt;
2102 b->bi_iter.bi_size = r1_bio->sectors << 9;
2103 b->bi_iter.bi_sector = r1_bio->sector + 2083 b->bi_iter.bi_sector = r1_bio->sector +
2104 conf->mirrors[i].rdev->data_offset; 2084 conf->mirrors[i].rdev->data_offset;
2105 b->bi_bdev = conf->mirrors[i].rdev->bdev; 2085 b->bi_bdev = conf->mirrors[i].rdev->bdev;
@@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio)
2107 rp->raid_bio = r1_bio; 2087 rp->raid_bio = r1_bio;
2108 b->bi_private = rp; 2088 b->bi_private = rp;
2109 2089
2110 size = b->bi_iter.bi_size; 2090 /* initialize bvec table again */
2111 bio_for_each_segment_all(bi, b, j) { 2091 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2112 bi->bv_offset = 0;
2113 if (size > PAGE_SIZE)
2114 bi->bv_len = PAGE_SIZE;
2115 else
2116 bi->bv_len = size;
2117 size -= PAGE_SIZE;
2118 }
2119 } 2092 }
2120 for (primary = 0; primary < conf->raid_disks * 2; primary++) 2093 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2121 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 2094 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2366 wbio = bio_clone_fast(r1_bio->behind_master_bio, 2339 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2367 GFP_NOIO, 2340 GFP_NOIO,
2368 mddev->bio_set); 2341 mddev->bio_set);
2369 /* We really need a _all clone */
2370 wbio->bi_iter = (struct bvec_iter){ 0 };
2371 } else { 2342 } else {
2372 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2343 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2373 mddev->bio_set); 2344 mddev->bio_set);
@@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2619 int good_sectors = RESYNC_SECTORS; 2590 int good_sectors = RESYNC_SECTORS;
2620 int min_bad = 0; /* number of sectors that are bad in all devices */ 2591 int min_bad = 0; /* number of sectors that are bad in all devices */
2621 int idx = sector_to_idx(sector_nr); 2592 int idx = sector_to_idx(sector_nr);
2593 int page_idx = 0;
2622 2594
2623 if (!conf->r1buf_pool) 2595 if (!conf->r1buf_pool)
2624 if (init_resync(conf)) 2596 if (init_resync(conf))
@@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2846 bio = r1_bio->bios[i]; 2818 bio = r1_bio->bios[i];
2847 rp = get_resync_pages(bio); 2819 rp = get_resync_pages(bio);
2848 if (bio->bi_end_io) { 2820 if (bio->bi_end_io) {
2849 page = resync_fetch_page(rp, rp->idx++); 2821 page = resync_fetch_page(rp, page_idx);
2850 2822
2851 /* 2823 /*
2852 * won't fail because the vec table is big 2824 * won't fail because the vec table is big
@@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2858 nr_sectors += len>>9; 2830 nr_sectors += len>>9;
2859 sector_nr += len>>9; 2831 sector_nr += len>>9;
2860 sync_blocks -= (len>>9); 2832 sync_blocks -= (len>>9);
2861 } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); 2833 } while (++page_idx < RESYNC_PAGES);
2862 2834
2863 r1_bio->sectors = nr_sectors; 2835 r1_bio->sectors = nr_sectors;
2864 2836
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5026e7ad51d3..f55d4cc085f6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf);
110#define raid10_log(md, fmt, args...) \ 110#define raid10_log(md, fmt, args...) \
111 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) 111 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
112 112
113/* 113#include "raid1-10.c"
114 * 'strct resync_pages' stores actual pages used for doing the resync
115 * IO, and it is per-bio, so make .bi_private points to it.
116 */
117static inline struct resync_pages *get_resync_pages(struct bio *bio)
118{
119 return bio->bi_private;
120}
121 114
122/* 115/*
123 * for resync bio, r10bio pointer can be retrieved from the per-bio 116 * for resync bio, r10bio pointer can be retrieved from the per-bio
@@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
221 resync_get_all_pages(rp); 214 resync_get_all_pages(rp);
222 } 215 }
223 216
224 rp->idx = 0;
225 rp->raid_bio = r10_bio; 217 rp->raid_bio = r10_bio;
226 bio->bi_private = rp; 218 bio->bi_private = rp;
227 if (rbio) { 219 if (rbio) {
@@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf)
913 bio->bi_next = NULL; 905 bio->bi_next = NULL;
914 bio->bi_bdev = rdev->bdev; 906 bio->bi_bdev = rdev->bdev;
915 if (test_bit(Faulty, &rdev->flags)) { 907 if (test_bit(Faulty, &rdev->flags)) {
916 bio->bi_status = BLK_STS_IOERR; 908 bio_io_error(bio);
917 bio_endio(bio);
918 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 909 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
919 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 910 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
920 /* Just ignore it */ 911 /* Just ignore it */
@@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1098 bio->bi_next = NULL; 1089 bio->bi_next = NULL;
1099 bio->bi_bdev = rdev->bdev; 1090 bio->bi_bdev = rdev->bdev;
1100 if (test_bit(Faulty, &rdev->flags)) { 1091 if (test_bit(Faulty, &rdev->flags)) {
1101 bio->bi_status = BLK_STS_IOERR; 1092 bio_io_error(bio);
1102 bio_endio(bio);
1103 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1093 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1104 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1094 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1105 /* Just ignore it */ 1095 /* Just ignore it */
@@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2087 rp = get_resync_pages(tbio); 2077 rp = get_resync_pages(tbio);
2088 bio_reset(tbio); 2078 bio_reset(tbio);
2089 2079
2090 tbio->bi_vcnt = vcnt; 2080 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2091 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; 2081
2092 rp->raid_bio = r10_bio; 2082 rp->raid_bio = r10_bio;
2093 tbio->bi_private = rp; 2083 tbio->bi_private = rp;
2094 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 2084 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
@@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2853 sector_t sectors_skipped = 0; 2843 sector_t sectors_skipped = 0;
2854 int chunks_skipped = 0; 2844 int chunks_skipped = 0;
2855 sector_t chunk_mask = conf->geo.chunk_mask; 2845 sector_t chunk_mask = conf->geo.chunk_mask;
2846 int page_idx = 0;
2856 2847
2857 if (!conf->r10buf_pool) 2848 if (!conf->r10buf_pool)
2858 if (init_resync(conf)) 2849 if (init_resync(conf))
@@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3355 break; 3346 break;
3356 for (bio= biolist ; bio ; bio=bio->bi_next) { 3347 for (bio= biolist ; bio ; bio=bio->bi_next) {
3357 struct resync_pages *rp = get_resync_pages(bio); 3348 struct resync_pages *rp = get_resync_pages(bio);
3358 page = resync_fetch_page(rp, rp->idx++); 3349 page = resync_fetch_page(rp, page_idx);
3359 /* 3350 /*
3360 * won't fail because the vec table is big enough 3351 * won't fail because the vec table is big enough
3361 * to hold all these pages 3352 * to hold all these pages
@@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3364 } 3355 }
3365 nr_sectors += len>>9; 3356 nr_sectors += len>>9;
3366 sector_nr += len>>9; 3357 sector_nr += len>>9;
3367 } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); 3358 } while (++page_idx < RESYNC_PAGES);
3368 r10_bio->sectors = nr_sectors; 3359 r10_bio->sectors = nr_sectors;
3369 3360
3370 while (biolist) { 3361 while (biolist) {
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index bfa1e907c472..2dcbafa8e66c 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -236,9 +236,10 @@ struct r5l_io_unit {
236 bool need_split_bio; 236 bool need_split_bio;
237 struct bio *split_bio; 237 struct bio *split_bio;
238 238
239 unsigned int has_flush:1; /* include flush request */ 239 unsigned int has_flush:1; /* include flush request */
240 unsigned int has_fua:1; /* include fua request */ 240 unsigned int has_fua:1; /* include fua request */
241 unsigned int has_null_flush:1; /* include empty flush request */ 241 unsigned int has_null_flush:1; /* include null flush request */
242 unsigned int has_flush_payload:1; /* include flush payload */
242 /* 243 /*
243 * io isn't sent yet, flush/fua request can only be submitted till it's 244 * io isn't sent yet, flush/fua request can only be submitted till it's
244 * the first IO in running_ios list 245 * the first IO in running_ios list
@@ -571,6 +572,8 @@ static void r5l_log_endio(struct bio *bio)
571 struct r5l_io_unit *io_deferred; 572 struct r5l_io_unit *io_deferred;
572 struct r5l_log *log = io->log; 573 struct r5l_log *log = io->log;
573 unsigned long flags; 574 unsigned long flags;
575 bool has_null_flush;
576 bool has_flush_payload;
574 577
575 if (bio->bi_status) 578 if (bio->bi_status)
576 md_error(log->rdev->mddev, log->rdev); 579 md_error(log->rdev->mddev, log->rdev);
@@ -580,6 +583,16 @@ static void r5l_log_endio(struct bio *bio)
580 583
581 spin_lock_irqsave(&log->io_list_lock, flags); 584 spin_lock_irqsave(&log->io_list_lock, flags);
582 __r5l_set_io_unit_state(io, IO_UNIT_IO_END); 585 __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
586
587 /*
588 * if the io doesn't not have null_flush or flush payload,
589 * it is not safe to access it after releasing io_list_lock.
590 * Therefore, it is necessary to check the condition with
591 * the lock held.
592 */
593 has_null_flush = io->has_null_flush;
594 has_flush_payload = io->has_flush_payload;
595
583 if (log->need_cache_flush && !list_empty(&io->stripe_list)) 596 if (log->need_cache_flush && !list_empty(&io->stripe_list))
584 r5l_move_to_end_ios(log); 597 r5l_move_to_end_ios(log);
585 else 598 else
@@ -600,19 +613,23 @@ static void r5l_log_endio(struct bio *bio)
600 if (log->need_cache_flush) 613 if (log->need_cache_flush)
601 md_wakeup_thread(log->rdev->mddev->thread); 614 md_wakeup_thread(log->rdev->mddev->thread);
602 615
603 if (io->has_null_flush) { 616 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */
617 if (has_null_flush) {
604 struct bio *bi; 618 struct bio *bi;
605 619
606 WARN_ON(bio_list_empty(&io->flush_barriers)); 620 WARN_ON(bio_list_empty(&io->flush_barriers));
607 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { 621 while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) {
608 bio_endio(bi); 622 bio_endio(bi);
609 atomic_dec(&io->pending_stripe); 623 if (atomic_dec_and_test(&io->pending_stripe)) {
624 __r5l_stripe_write_finished(io);
625 return;
626 }
610 } 627 }
611 } 628 }
612 629 /* decrease pending_stripe for flush payload */
613 /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ 630 if (has_flush_payload)
614 if (atomic_read(&io->pending_stripe) == 0) 631 if (atomic_dec_and_test(&io->pending_stripe))
615 __r5l_stripe_write_finished(io); 632 __r5l_stripe_write_finished(io);
616} 633}
617 634
618static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) 635static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
@@ -881,6 +898,11 @@ static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
881 payload->size = cpu_to_le32(sizeof(__le64)); 898 payload->size = cpu_to_le32(sizeof(__le64));
882 payload->flush_stripes[0] = cpu_to_le64(sect); 899 payload->flush_stripes[0] = cpu_to_le64(sect);
883 io->meta_offset += meta_size; 900 io->meta_offset += meta_size;
901 /* multiple flush payloads count as one pending_stripe */
902 if (!io->has_flush_payload) {
903 io->has_flush_payload = 1;
904 atomic_inc(&io->pending_stripe);
905 }
884 mutex_unlock(&log->io_mutex); 906 mutex_unlock(&log->io_mutex);
885} 907}
886 908
@@ -2540,23 +2562,32 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
2540 */ 2562 */
2541int r5c_journal_mode_set(struct mddev *mddev, int mode) 2563int r5c_journal_mode_set(struct mddev *mddev, int mode)
2542{ 2564{
2543 struct r5conf *conf = mddev->private; 2565 struct r5conf *conf;
2544 struct r5l_log *log = conf->log; 2566 int err;
2545
2546 if (!log)
2547 return -ENODEV;
2548 2567
2549 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || 2568 if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
2550 mode > R5C_JOURNAL_MODE_WRITE_BACK) 2569 mode > R5C_JOURNAL_MODE_WRITE_BACK)
2551 return -EINVAL; 2570 return -EINVAL;
2552 2571
2572 err = mddev_lock(mddev);
2573 if (err)
2574 return err;
2575 conf = mddev->private;
2576 if (!conf || !conf->log) {
2577 mddev_unlock(mddev);
2578 return -ENODEV;
2579 }
2580
2553 if (raid5_calc_degraded(conf) > 0 && 2581 if (raid5_calc_degraded(conf) > 0 &&
2554 mode == R5C_JOURNAL_MODE_WRITE_BACK) 2582 mode == R5C_JOURNAL_MODE_WRITE_BACK) {
2583 mddev_unlock(mddev);
2555 return -EINVAL; 2584 return -EINVAL;
2585 }
2556 2586
2557 mddev_suspend(mddev); 2587 mddev_suspend(mddev);
2558 conf->log->r5c_journal_mode = mode; 2588 conf->log->r5c_journal_mode = mode;
2559 mddev_resume(mddev); 2589 mddev_resume(mddev);
2590 mddev_unlock(mddev);
2560 2591
2561 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", 2592 pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
2562 mdname(mddev), mode, r5c_journal_mode_str[mode]); 2593 mdname(mddev), mode, r5c_journal_mode_str[mode]);
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 77cce3573aa8..44ad5baf3206 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
1150 goto err; 1150 goto err;
1151 } 1151 }
1152 1152
1153 ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0); 1153 ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS);
1154 if (!ppl_conf->bs) { 1154 if (!ppl_conf->bs) {
1155 ret = -ENOMEM; 1155 ret = -ENOMEM;
1156 goto err; 1156 goto err;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2ceb338b094b..0fc2748aaf95 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3381 sh->dev[i].sector + STRIPE_SECTORS) { 3381 sh->dev[i].sector + STRIPE_SECTORS) {
3382 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3382 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
3383 3383
3384 bi->bi_status = BLK_STS_IOERR;
3385 md_write_end(conf->mddev); 3384 md_write_end(conf->mddev);
3386 bio_endio(bi); 3385 bio_io_error(bi);
3387 bi = nextbi; 3386 bi = nextbi;
3388 } 3387 }
3389 if (bitmap_end) 3388 if (bitmap_end)
@@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3403 sh->dev[i].sector + STRIPE_SECTORS) { 3402 sh->dev[i].sector + STRIPE_SECTORS) {
3404 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3403 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
3405 3404
3406 bi->bi_status = BLK_STS_IOERR;
3407 md_write_end(conf->mddev); 3405 md_write_end(conf->mddev);
3408 bio_endio(bi); 3406 bio_io_error(bi);
3409 bi = bi2; 3407 bi = bi2;
3410 } 3408 }
3411 3409
@@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3429 struct bio *nextbi = 3427 struct bio *nextbi =
3430 r5_next_bio(bi, sh->dev[i].sector); 3428 r5_next_bio(bi, sh->dev[i].sector);
3431 3429
3432 bi->bi_status = BLK_STS_IOERR; 3430 bio_io_error(bi);
3433 bio_endio(bi);
3434 bi = nextbi; 3431 bi = nextbi;
3435 } 3432 }
3436 } 3433 }
@@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work)
6237 pr_debug("%d stripes handled\n", handled); 6234 pr_debug("%d stripes handled\n", handled);
6238 6235
6239 spin_unlock_irq(&conf->device_lock); 6236 spin_unlock_irq(&conf->device_lock);
6237
6238 async_tx_issue_pending_all();
6240 blk_finish_plug(&plug); 6239 blk_finish_plug(&plug);
6241 6240
6242 pr_debug("--- raid5worker inactive\n"); 6241 pr_debug("--- raid5worker inactive\n");
@@ -7951,12 +7950,10 @@ static void end_reshape(struct r5conf *conf)
7951{ 7950{
7952 7951
7953 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 7952 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
7954 struct md_rdev *rdev;
7955 7953
7956 spin_lock_irq(&conf->device_lock); 7954 spin_lock_irq(&conf->device_lock);
7957 conf->previous_raid_disks = conf->raid_disks; 7955 conf->previous_raid_disks = conf->raid_disks;
7958 rdev_for_each(rdev, conf->mddev) 7956 md_finish_reshape(conf->mddev);
7959 rdev->data_offset = rdev->new_data_offset;
7960 smp_wmb(); 7957 smp_wmb();
7961 conf->reshape_progress = MaxSector; 7958 conf->reshape_progress = MaxSector;
7962 conf->mddev->reshape_position = MaxSector; 7959 conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index bf45977b2823..d596b601ff42 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(cec_transmit_done);
559 559
560void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status) 560void cec_transmit_attempt_done(struct cec_adapter *adap, u8 status)
561{ 561{
562 switch (status) { 562 switch (status & ~CEC_TX_STATUS_MAX_RETRIES) {
563 case CEC_TX_STATUS_OK: 563 case CEC_TX_STATUS_OK:
564 cec_transmit_done(adap, status, 0, 0, 0, 0); 564 cec_transmit_done(adap, status, 0, 0, 0, 0);
565 return; 565 return;
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 74dc1c32080e..08b619d0ea1e 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -87,6 +87,9 @@ EXPORT_SYMBOL_GPL(cec_notifier_put);
87 87
88void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa) 88void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
89{ 89{
90 if (n == NULL)
91 return;
92
90 mutex_lock(&n->lock); 93 mutex_lock(&n->lock);
91 n->phys_addr = pa; 94 n->phys_addr = pa;
92 if (n->callback) 95 if (n->callback)
@@ -100,6 +103,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
100{ 103{
101 u16 pa = CEC_PHYS_ADDR_INVALID; 104 u16 pa = CEC_PHYS_ADDR_INVALID;
102 105
106 if (n == NULL)
107 return;
108
103 if (edid && edid->extensions) 109 if (edid && edid->extensions)
104 pa = cec_get_edid_phys_addr((const u8 *)edid, 110 pa = cec_get_edid_phys_addr((const u8 *)edid,
105 EDID_LENGTH * (edid->extensions + 1), NULL); 111 EDID_LENGTH * (edid->extensions + 1), NULL);
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index af694f2066a2..17970cdd55fa 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -349,7 +349,8 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
349 /* read the buffer size from the CAM */ 349 /* read the buffer size from the CAM */
350 if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0) 350 if ((ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN | CMDREG_SR)) != 0)
351 return ret; 351 return ret;
352 if ((ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ / 10)) != 0) 352 ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ);
353 if (ret != 0)
353 return ret; 354 return ret;
354 if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2) 355 if ((ret = dvb_ca_en50221_read_data(ca, slot, buf, 2)) != 2)
355 return -EIO; 356 return -EIO;
@@ -644,72 +645,101 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
644 } 645 }
645 buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer); 646 buf_free = dvb_ringbuffer_free(&ca->slot_info[slot].rx_buffer);
646 647
647 if (buf_free < (ca->slot_info[slot].link_buf_size + DVB_RINGBUFFER_PKTHDRSIZE)) { 648 if (buf_free < (ca->slot_info[slot].link_buf_size +
649 DVB_RINGBUFFER_PKTHDRSIZE)) {
648 status = -EAGAIN; 650 status = -EAGAIN;
649 goto exit; 651 goto exit;
650 } 652 }
651 } 653 }
652 654
653 /* check if there is data available */ 655 if (ca->pub->read_data &&
654 if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) 656 (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT)) {
655 goto exit; 657 if (ebuf == NULL)
656 if (!(status & STATUSREG_DA)) { 658 status = ca->pub->read_data(ca->pub, slot, buf,
657 /* no data */ 659 sizeof(buf));
658 status = 0; 660 else
659 goto exit; 661 status = ca->pub->read_data(ca->pub, slot, buf, ecount);
660 } 662 if (status < 0)
661 663 return status;
662 /* read the amount of data */ 664 bytes_read = status;
663 if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH)) < 0) 665 if (status == 0)
664 goto exit; 666 goto exit;
665 bytes_read = status << 8; 667 } else {
666 if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW)) < 0)
667 goto exit;
668 bytes_read |= status;
669 668
670 /* check it will fit */ 669 /* check if there is data available */
671 if (ebuf == NULL) { 670 status = ca->pub->read_cam_control(ca->pub, slot,
672 if (bytes_read > ca->slot_info[slot].link_buf_size) { 671 CTRLIF_STATUS);
673 pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n", 672 if (status < 0)
674 ca->dvbdev->adapter->num, bytes_read,
675 ca->slot_info[slot].link_buf_size);
676 ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
677 status = -EIO;
678 goto exit; 673 goto exit;
679 } 674 if (!(status & STATUSREG_DA)) {
680 if (bytes_read < 2) { 675 /* no data */
681 pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n", 676 status = 0;
682 ca->dvbdev->adapter->num);
683 ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT;
684 status = -EIO;
685 goto exit; 677 goto exit;
686 } 678 }
687 } else { 679
688 if (bytes_read > ecount) { 680 /* read the amount of data */
689 pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n", 681 status = ca->pub->read_cam_control(ca->pub, slot,
690 ca->dvbdev->adapter->num); 682 CTRLIF_SIZE_HIGH);
691 status = -EIO; 683 if (status < 0)
684 goto exit;
685 bytes_read = status << 8;
686 status = ca->pub->read_cam_control(ca->pub, slot,
687 CTRLIF_SIZE_LOW);
688 if (status < 0)
692 goto exit; 689 goto exit;
690 bytes_read |= status;
691
692 /* check it will fit */
693 if (ebuf == NULL) {
694 if (bytes_read > ca->slot_info[slot].link_buf_size) {
695 pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the link buffer size (%i > %i)!\n",
696 ca->dvbdev->adapter->num, bytes_read,
697 ca->slot_info[slot].link_buf_size);
698 ca->slot_info[slot].slot_state =
699 DVB_CA_SLOTSTATE_LINKINIT;
700 status = -EIO;
701 goto exit;
702 }
703 if (bytes_read < 2) {
704 pr_err("dvb_ca adapter %d: CAM sent a buffer that was less than 2 bytes!\n",
705 ca->dvbdev->adapter->num);
706 ca->slot_info[slot].slot_state =
707 DVB_CA_SLOTSTATE_LINKINIT;
708 status = -EIO;
709 goto exit;
710 }
711 } else {
712 if (bytes_read > ecount) {
713 pr_err("dvb_ca adapter %d: CAM tried to send a buffer larger than the ecount size!\n",
714 ca->dvbdev->adapter->num);
715 status = -EIO;
716 goto exit;
717 }
693 } 718 }
694 }
695 719
696 /* fill the buffer */ 720 /* fill the buffer */
697 for (i = 0; i < bytes_read; i++) { 721 for (i = 0; i < bytes_read; i++) {
698 /* read byte and check */ 722 /* read byte and check */
699 if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_DATA)) < 0) 723 status = ca->pub->read_cam_control(ca->pub, slot,
700 goto exit; 724 CTRLIF_DATA);
725 if (status < 0)
726 goto exit;
701 727
702 /* OK, store it in the buffer */ 728 /* OK, store it in the buffer */
703 buf[i] = status; 729 buf[i] = status;
704 } 730 }
705 731
706 /* check for read error (RE should now be 0) */ 732 /* check for read error (RE should now be 0) */
707 if ((status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS)) < 0) 733 status = ca->pub->read_cam_control(ca->pub, slot,
708 goto exit; 734 CTRLIF_STATUS);
709 if (status & STATUSREG_RE) { 735 if (status < 0)
710 ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_LINKINIT; 736 goto exit;
711 status = -EIO; 737 if (status & STATUSREG_RE) {
712 goto exit; 738 ca->slot_info[slot].slot_state =
739 DVB_CA_SLOTSTATE_LINKINIT;
740 status = -EIO;
741 goto exit;
742 }
713 } 743 }
714 744
715 /* OK, add it to the receive buffer, or copy into external buffer if supplied */ 745 /* OK, add it to the receive buffer, or copy into external buffer if supplied */
@@ -762,6 +792,10 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
762 if (bytes_write > ca->slot_info[slot].link_buf_size) 792 if (bytes_write > ca->slot_info[slot].link_buf_size)
763 return -EINVAL; 793 return -EINVAL;
764 794
795 if (ca->pub->write_data &&
796 (ca->slot_info[slot].slot_state != DVB_CA_SLOTSTATE_LINKINIT))
797 return ca->pub->write_data(ca->pub, slot, buf, bytes_write);
798
765 /* it is possible we are dealing with a single buffer implementation, 799 /* it is possible we are dealing with a single buffer implementation,
766 thus if there is data available for read or if there is even a read 800 thus if there is data available for read or if there is even a read
767 already in progress, we do nothing but awake the kernel thread to 801 already in progress, we do nothing but awake the kernel thread to
@@ -1176,7 +1210,8 @@ static int dvb_ca_en50221_thread(void *data)
1176 1210
1177 pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n", 1211 pr_err("dvb_ca adapter %d: DVB CAM link initialisation failed :(\n",
1178 ca->dvbdev->adapter->num); 1212 ca->dvbdev->adapter->num);
1179 ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_INVALID; 1213 ca->slot_info[slot].slot_state =
1214 DVB_CA_SLOTSTATE_UNINITIALISED;
1180 dvb_ca_en50221_thread_update_delay(ca); 1215 dvb_ca_en50221_thread_update_delay(ca);
1181 break; 1216 break;
1182 } 1217 }
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.h b/drivers/media/dvb-core/dvb_ca_en50221.h
index 1e4bbbd34d91..82617bac0875 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.h
+++ b/drivers/media/dvb-core/dvb_ca_en50221.h
@@ -41,6 +41,8 @@
41 * @write_attribute_mem: function for writing attribute memory on the CAM 41 * @write_attribute_mem: function for writing attribute memory on the CAM
42 * @read_cam_control: function for reading the control interface on the CAM 42 * @read_cam_control: function for reading the control interface on the CAM
43 * @write_cam_control: function for reading the control interface on the CAM 43 * @write_cam_control: function for reading the control interface on the CAM
44 * @read_data: function for reading data (block mode)
45 * @write_data: function for writing data (block mode)
44 * @slot_reset: function to reset the CAM slot 46 * @slot_reset: function to reset the CAM slot
45 * @slot_shutdown: function to shutdown a CAM slot 47 * @slot_shutdown: function to shutdown a CAM slot
46 * @slot_ts_enable: function to enable the Transport Stream on a CAM slot 48 * @slot_ts_enable: function to enable the Transport Stream on a CAM slot
@@ -66,6 +68,11 @@ struct dvb_ca_en50221 {
66 int (*write_cam_control)(struct dvb_ca_en50221 *ca, 68 int (*write_cam_control)(struct dvb_ca_en50221 *ca,
67 int slot, u8 address, u8 value); 69 int slot, u8 address, u8 value);
68 70
71 int (*read_data)(struct dvb_ca_en50221 *ca,
72 int slot, u8 *ebuf, int ecount);
73 int (*write_data)(struct dvb_ca_en50221 *ca,
74 int slot, u8 *ebuf, int ecount);
75
69 int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot); 76 int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot);
70 int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot); 77 int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot);
71 int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot); 78 int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot);
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 08f67d60a7d9..12bff778c97f 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -3279,7 +3279,10 @@ static int cxd2841er_get_frontend(struct dvb_frontend *fe,
3279 else if (priv->state == STATE_ACTIVE_TC) 3279 else if (priv->state == STATE_ACTIVE_TC)
3280 cxd2841er_read_status_tc(fe, &status); 3280 cxd2841er_read_status_tc(fe, &status);
3281 3281
3282 cxd2841er_read_signal_strength(fe); 3282 if (priv->state == STATE_ACTIVE_TC || priv->state == STATE_ACTIVE_S)
3283 cxd2841er_read_signal_strength(fe);
3284 else
3285 p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3283 3286
3284 if (status & FE_HAS_LOCK) { 3287 if (status & FE_HAS_LOCK) {
3285 cxd2841er_read_snr(fe); 3288 cxd2841er_read_snr(fe);
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index 4442e478db72..cd69e187ba7a 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -307,7 +307,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
307* \def DRX_UNKNOWN 307* \def DRX_UNKNOWN
308* \brief Generic UNKNOWN value for DRX enumerated types. 308* \brief Generic UNKNOWN value for DRX enumerated types.
309* 309*
310* Used to indicate that the parameter value is unknown or not yet initalized. 310* Used to indicate that the parameter value is unknown or not yet initialized.
311*/ 311*/
312#ifndef DRX_UNKNOWN 312#ifndef DRX_UNKNOWN
313#define DRX_UNKNOWN (254) 313#define DRX_UNKNOWN (254)
@@ -450,19 +450,6 @@ MACROS
450 ((u8)((((u16)x)>>8)&0xFF)) 450 ((u8)((((u16)x)>>8)&0xFF))
451 451
452/** 452/**
453* \brief Macro to sign extend signed 9 bit value to signed 16 bit value
454*/
455#define DRX_S9TOS16(x) ((((u16)x)&0x100) ? ((s16)((u16)(x)|0xFF00)) : (x))
456
457/**
458* \brief Macro to sign extend signed 9 bit value to signed 16 bit value
459*/
460#define DRX_S24TODRXFREQ(x) ((((u32) x) & 0x00800000UL) ? \
461 ((s32) \
462 (((u32) x) | 0xFF000000)) : \
463 ((s32) x))
464
465/**
466* \brief Macro to convert 16 bit register value to a s32 453* \brief Macro to convert 16 bit register value to a s32
467*/ 454*/
468#define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \ 455#define DRX_U16TODRXFREQ(x) ((x & 0x8000) ? \
diff --git a/drivers/media/dvb-frontends/lnbh25.c b/drivers/media/dvb-frontends/lnbh25.c
index ef3021e964be..cb486e879fdd 100644
--- a/drivers/media/dvb-frontends/lnbh25.c
+++ b/drivers/media/dvb-frontends/lnbh25.c
@@ -76,8 +76,8 @@ static int lnbh25_read_vmon(struct lnbh25_priv *priv)
76 return ret; 76 return ret;
77 } 77 }
78 } 78 }
79 print_hex_dump_bytes("lnbh25_read_vmon: ", 79 dev_dbg(&priv->i2c->dev, "%s(): %*ph\n",
80 DUMP_PREFIX_OFFSET, status, sizeof(status)); 80 __func__, (int) sizeof(status), status);
81 if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) { 81 if ((status[0] & (LNBH25_STATUS_OFL | LNBH25_STATUS_VMON)) != 0) {
82 dev_err(&priv->i2c->dev, 82 dev_err(&priv->i2c->dev,
83 "%s(): voltage in failure state, status reg 0x%x\n", 83 "%s(): voltage in failure state, status reg 0x%x\n",
@@ -178,7 +178,7 @@ struct dvb_frontend *lnbh25_attach(struct dvb_frontend *fe,
178 fe->ops.release_sec = lnbh25_release; 178 fe->ops.release_sec = lnbh25_release;
179 fe->ops.set_voltage = lnbh25_set_voltage; 179 fe->ops.set_voltage = lnbh25_set_voltage;
180 180
181 dev_err(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n", 181 dev_info(&i2c->dev, "%s(): attached at I2C addr 0x%02x\n",
182 __func__, priv->i2c_address); 182 __func__, priv->i2c_address);
183 return fe; 183 return fe;
184} 184}
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index e726c2e00460..8ac0f598978d 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -25,6 +25,8 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/i2c.h> 26#include <linux/i2c.h>
27 27
28#include "dvb_math.h"
29
28#include "stv0367.h" 30#include "stv0367.h"
29#include "stv0367_defs.h" 31#include "stv0367_defs.h"
30#include "stv0367_regs.h" 32#include "stv0367_regs.h"
@@ -1437,7 +1439,7 @@ static int stv0367ter_get_frontend(struct dvb_frontend *fe,
1437 return 0; 1439 return 0;
1438} 1440}
1439 1441
1440static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr) 1442static u32 stv0367ter_snr_readreg(struct dvb_frontend *fe)
1441{ 1443{
1442 struct stv0367_state *state = fe->demodulator_priv; 1444 struct stv0367_state *state = fe->demodulator_priv;
1443 u32 snru32 = 0; 1445 u32 snru32 = 0;
@@ -1453,10 +1455,16 @@ static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
1453 1455
1454 cpt++; 1456 cpt++;
1455 } 1457 }
1456
1457 snru32 /= 10;/*average on 10 values*/ 1458 snru32 /= 10;/*average on 10 values*/
1458 1459
1459 *snr = snru32 / 1000; 1460 return snru32;
1461}
1462
1463static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
1464{
1465 u32 snrval = stv0367ter_snr_readreg(fe);
1466
1467 *snr = snrval / 1000;
1460 1468
1461 return 0; 1469 return 0;
1462} 1470}
@@ -1501,7 +1509,8 @@ static int stv0367ter_read_status(struct dvb_frontend *fe,
1501 *status = 0; 1509 *status = 0;
1502 1510
1503 if (stv0367_readbits(state, F367TER_LK)) { 1511 if (stv0367_readbits(state, F367TER_LK)) {
1504 *status |= FE_HAS_LOCK; 1512 *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI
1513 | FE_HAS_SYNC | FE_HAS_LOCK;
1505 dprintk("%s: stv0367 has locked\n", __func__); 1514 dprintk("%s: stv0367 has locked\n", __func__);
1506 } 1515 }
1507 1516
@@ -2149,6 +2158,18 @@ static int stv0367cab_read_status(struct dvb_frontend *fe,
2149 2158
2150 *status = 0; 2159 *status = 0;
2151 2160
2161 if (state->cab_state->state > FE_CAB_NOSIGNAL)
2162 *status |= FE_HAS_SIGNAL;
2163
2164 if (state->cab_state->state > FE_CAB_NOCARRIER)
2165 *status |= FE_HAS_CARRIER;
2166
2167 if (state->cab_state->state >= FE_CAB_DEMODOK)
2168 *status |= FE_HAS_VITERBI;
2169
2170 if (state->cab_state->state >= FE_CAB_DATAOK)
2171 *status |= FE_HAS_SYNC;
2172
2152 if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ? 2173 if (stv0367_readbits(state, (state->cab_state->qamfec_status_reg ?
2153 state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) { 2174 state->cab_state->qamfec_status_reg : F367CAB_QAMFEC_LOCK))) {
2154 *status |= FE_HAS_LOCK; 2175 *status |= FE_HAS_LOCK;
@@ -2702,51 +2723,61 @@ static int stv0367cab_read_strength(struct dvb_frontend *fe, u16 *strength)
2702 return 0; 2723 return 0;
2703} 2724}
2704 2725
2705static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr) 2726static int stv0367cab_snr_power(struct dvb_frontend *fe)
2706{ 2727{
2707 struct stv0367_state *state = fe->demodulator_priv; 2728 struct stv0367_state *state = fe->demodulator_priv;
2708 u32 noisepercentage;
2709 enum stv0367cab_mod QAMSize; 2729 enum stv0367cab_mod QAMSize;
2710 u32 regval = 0, temp = 0;
2711 int power, i;
2712 2730
2713 QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE); 2731 QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE);
2714 switch (QAMSize) { 2732 switch (QAMSize) {
2715 case FE_CAB_MOD_QAM4: 2733 case FE_CAB_MOD_QAM4:
2716 power = 21904; 2734 return 21904;
2717 break;
2718 case FE_CAB_MOD_QAM16: 2735 case FE_CAB_MOD_QAM16:
2719 power = 20480; 2736 return 20480;
2720 break;
2721 case FE_CAB_MOD_QAM32: 2737 case FE_CAB_MOD_QAM32:
2722 power = 23040; 2738 return 23040;
2723 break;
2724 case FE_CAB_MOD_QAM64: 2739 case FE_CAB_MOD_QAM64:
2725 power = 21504; 2740 return 21504;
2726 break;
2727 case FE_CAB_MOD_QAM128: 2741 case FE_CAB_MOD_QAM128:
2728 power = 23616; 2742 return 23616;
2729 break;
2730 case FE_CAB_MOD_QAM256: 2743 case FE_CAB_MOD_QAM256:
2731 power = 21760; 2744 return 21760;
2732 break;
2733 case FE_CAB_MOD_QAM512:
2734 power = 1;
2735 break;
2736 case FE_CAB_MOD_QAM1024: 2745 case FE_CAB_MOD_QAM1024:
2737 power = 21280; 2746 return 21280;
2738 break;
2739 default: 2747 default:
2740 power = 1;
2741 break; 2748 break;
2742 } 2749 }
2743 2750
2751 return 1;
2752}
2753
2754static int stv0367cab_snr_readreg(struct dvb_frontend *fe, int avgdiv)
2755{
2756 struct stv0367_state *state = fe->demodulator_priv;
2757 u32 regval = 0;
2758 int i;
2759
2744 for (i = 0; i < 10; i++) { 2760 for (i = 0; i < 10; i++) {
2745 regval += (stv0367_readbits(state, F367CAB_SNR_LO) 2761 regval += (stv0367_readbits(state, F367CAB_SNR_LO)
2746 + 256 * stv0367_readbits(state, F367CAB_SNR_HI)); 2762 + 256 * stv0367_readbits(state, F367CAB_SNR_HI));
2747 } 2763 }
2748 2764
2749 regval /= 10; /*for average over 10 times in for loop above*/ 2765 if (avgdiv)
2766 regval /= 10;
2767
2768 return regval;
2769}
2770
2771static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr)
2772{
2773 struct stv0367_state *state = fe->demodulator_priv;
2774 u32 noisepercentage;
2775 u32 regval = 0, temp = 0;
2776 int power;
2777
2778 power = stv0367cab_snr_power(fe);
2779 regval = stv0367cab_snr_readreg(fe, 1);
2780
2750 if (regval != 0) { 2781 if (regval != 0) {
2751 temp = power 2782 temp = power
2752 * (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER))); 2783 * (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER)));
@@ -2980,21 +3011,117 @@ static int stv0367ddb_set_frontend(struct dvb_frontend *fe)
2980 return -EINVAL; 3011 return -EINVAL;
2981} 3012}
2982 3013
3014static void stv0367ddb_read_signal_strength(struct dvb_frontend *fe)
3015{
3016 struct stv0367_state *state = fe->demodulator_priv;
3017 struct dtv_frontend_properties *p = &fe->dtv_property_cache;
3018 s32 signalstrength;
3019
3020 switch (state->activedemod) {
3021 case demod_cab:
3022 signalstrength = stv0367cab_get_rf_lvl(state) * 1000;
3023 break;
3024 default:
3025 p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3026 return;
3027 }
3028
3029 p->strength.stat[0].scale = FE_SCALE_DECIBEL;
3030 p->strength.stat[0].uvalue = signalstrength;
3031}
3032
3033static void stv0367ddb_read_snr(struct dvb_frontend *fe)
3034{
3035 struct stv0367_state *state = fe->demodulator_priv;
3036 struct dtv_frontend_properties *p = &fe->dtv_property_cache;
3037 int cab_pwr;
3038 u32 regval, tmpval, snrval = 0;
3039
3040 switch (state->activedemod) {
3041 case demod_ter:
3042 snrval = stv0367ter_snr_readreg(fe);
3043 break;
3044 case demod_cab:
3045 cab_pwr = stv0367cab_snr_power(fe);
3046 regval = stv0367cab_snr_readreg(fe, 0);
3047
3048 /* prevent division by zero */
3049 if (!regval) {
3050 snrval = 0;
3051 break;
3052 }
3053
3054 tmpval = (cab_pwr * 320) / regval;
3055 snrval = ((tmpval != 0) ? (intlog2(tmpval) / 5581) : 0);
3056 break;
3057 default:
3058 p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3059 return;
3060 }
3061
3062 p->cnr.stat[0].scale = FE_SCALE_DECIBEL;
3063 p->cnr.stat[0].uvalue = snrval;
3064}
3065
3066static void stv0367ddb_read_ucblocks(struct dvb_frontend *fe)
3067{
3068 struct stv0367_state *state = fe->demodulator_priv;
3069 struct dtv_frontend_properties *p = &fe->dtv_property_cache;
3070 u32 ucblocks = 0;
3071
3072 switch (state->activedemod) {
3073 case demod_ter:
3074 stv0367ter_read_ucblocks(fe, &ucblocks);
3075 break;
3076 case demod_cab:
3077 stv0367cab_read_ucblcks(fe, &ucblocks);
3078 break;
3079 default:
3080 p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3081 return;
3082 }
3083
3084 p->block_error.stat[0].scale = FE_SCALE_COUNTER;
3085 p->block_error.stat[0].uvalue = ucblocks;
3086}
3087
2983static int stv0367ddb_read_status(struct dvb_frontend *fe, 3088static int stv0367ddb_read_status(struct dvb_frontend *fe,
2984 enum fe_status *status) 3089 enum fe_status *status)
2985{ 3090{
2986 struct stv0367_state *state = fe->demodulator_priv; 3091 struct stv0367_state *state = fe->demodulator_priv;
3092 struct dtv_frontend_properties *p = &fe->dtv_property_cache;
3093 int ret;
2987 3094
2988 switch (state->activedemod) { 3095 switch (state->activedemod) {
2989 case demod_ter: 3096 case demod_ter:
2990 return stv0367ter_read_status(fe, status); 3097 ret = stv0367ter_read_status(fe, status);
3098 break;
2991 case demod_cab: 3099 case demod_cab:
2992 return stv0367cab_read_status(fe, status); 3100 ret = stv0367cab_read_status(fe, status);
2993 default:
2994 break; 3101 break;
3102 default:
3103 return 0;
2995 } 3104 }
2996 3105
2997 return -EINVAL; 3106 /* stop and report on *_read_status failure */
3107 if (ret)
3108 return ret;
3109
3110 stv0367ddb_read_signal_strength(fe);
3111
3112 /* read carrier/noise when a carrier is detected */
3113 if (*status & FE_HAS_CARRIER)
3114 stv0367ddb_read_snr(fe);
3115 else
3116 p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3117
3118 /* read uncorrected blocks on FE_HAS_LOCK */
3119 if (*status & FE_HAS_LOCK)
3120 stv0367ddb_read_ucblocks(fe);
3121 else
3122 p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3123
3124 return 0;
2998} 3125}
2999 3126
3000static int stv0367ddb_get_frontend(struct dvb_frontend *fe, 3127static int stv0367ddb_get_frontend(struct dvb_frontend *fe,
@@ -3035,6 +3162,7 @@ static int stv0367ddb_sleep(struct dvb_frontend *fe)
3035static int stv0367ddb_init(struct stv0367_state *state) 3162static int stv0367ddb_init(struct stv0367_state *state)
3036{ 3163{
3037 struct stv0367ter_state *ter_state = state->ter_state; 3164 struct stv0367ter_state *ter_state = state->ter_state;
3165 struct dtv_frontend_properties *p = &state->fe.dtv_property_cache;
3038 3166
3039 stv0367_writereg(state, R367TER_TOPCTRL, 0x10); 3167 stv0367_writereg(state, R367TER_TOPCTRL, 0x10);
3040 3168
@@ -3109,6 +3237,13 @@ static int stv0367ddb_init(struct stv0367_state *state)
3109 ter_state->first_lock = 0; 3237 ter_state->first_lock = 0;
3110 ter_state->unlock_counter = 2; 3238 ter_state->unlock_counter = 2;
3111 3239
3240 p->strength.len = 1;
3241 p->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3242 p->cnr.len = 1;
3243 p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3244 p->block_error.len = 1;
3245 p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
3246
3112 return 0; 3247 return 0;
3113} 3248}
3114 3249
@@ -3126,15 +3261,12 @@ static const struct dvb_frontend_ops stv0367ddb_ops = {
3126 0x400 |/* FE_CAN_QAM_4 */ 3261 0x400 |/* FE_CAN_QAM_4 */
3127 FE_CAN_QAM_16 | FE_CAN_QAM_32 | 3262 FE_CAN_QAM_16 | FE_CAN_QAM_32 |
3128 FE_CAN_QAM_64 | FE_CAN_QAM_128 | 3263 FE_CAN_QAM_64 | FE_CAN_QAM_128 |
3129 FE_CAN_QAM_256 | FE_CAN_FEC_AUTO | 3264 FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
3130 /* DVB-T */ 3265 /* DVB-T */
3131 FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | 3266 FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
3132 FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | 3267 FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
3133 FE_CAN_FEC_AUTO | 3268 FE_CAN_QPSK | FE_CAN_TRANSMISSION_MODE_AUTO |
3134 FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | 3269 FE_CAN_RECOVER | FE_CAN_INVERSION_AUTO |
3135 FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
3136 FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER |
3137 FE_CAN_INVERSION_AUTO |
3138 FE_CAN_MUTE_TS 3270 FE_CAN_MUTE_TS
3139 }, 3271 },
3140 .release = stv0367_release, 3272 .release = stv0367_release,
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 6e313d5243a0..f39f5179dd95 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1496,7 +1496,6 @@ MODULE_DEVICE_TABLE(i2c, et8ek8_id_table);
1496static const struct dev_pm_ops et8ek8_pm_ops = { 1496static const struct dev_pm_ops et8ek8_pm_ops = {
1497 SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume) 1497 SET_SYSTEM_SLEEP_PM_OPS(et8ek8_suspend, et8ek8_resume)
1498}; 1498};
1499MODULE_DEVICE_TABLE(of, et8ek8_of_table);
1500 1499
1501static struct i2c_driver et8ek8_i2c_driver = { 1500static struct i2c_driver et8ek8_i2c_driver = {
1502 .driver = { 1501 .driver = {
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 9da4bf4f2c7a..7b79a7498751 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -659,7 +659,7 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
659 struct tvp5150 *decoder = to_tvp5150(sd); 659 struct tvp5150 *decoder = to_tvp5150(sd);
660 v4l2_std_id std = decoder->norm; 660 v4l2_std_id std = decoder->norm;
661 u8 reg; 661 u8 reg;
662 int pos=0; 662 int pos = 0;
663 663
664 if (std == V4L2_STD_ALL) { 664 if (std == V4L2_STD_ALL) {
665 dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n"); 665 dev_err(sd->dev, "VBI can't be configured without knowing number of lines\n");
@@ -669,33 +669,30 @@ static int tvp5150_set_vbi(struct v4l2_subdev *sd,
669 line += 3; 669 line += 3;
670 } 670 }
671 671
672 if (line<6||line>27) 672 if (line < 6 || line > 27)
673 return 0; 673 return 0;
674 674
675 while (regs->reg != (u16)-1 ) { 675 while (regs->reg != (u16)-1) {
676 if ((type & regs->type.vbi_type) && 676 if ((type & regs->type.vbi_type) &&
677 (line>=regs->type.ini_line) && 677 (line >= regs->type.ini_line) &&
678 (line<=regs->type.end_line)) { 678 (line <= regs->type.end_line))
679 type=regs->type.vbi_type;
680 break; 679 break;
681 }
682 680
683 regs++; 681 regs++;
684 pos++; 682 pos++;
685 } 683 }
684
686 if (regs->reg == (u16)-1) 685 if (regs->reg == (u16)-1)
687 return 0; 686 return 0;
688 687
689 type=pos | (flags & 0xf0); 688 type = pos | (flags & 0xf0);
690 reg=((line-6)<<1)+TVP5150_LINE_MODE_INI; 689 reg = ((line - 6) << 1) + TVP5150_LINE_MODE_INI;
691 690
692 if (fields&1) { 691 if (fields & 1)
693 tvp5150_write(sd, reg, type); 692 tvp5150_write(sd, reg, type);
694 }
695 693
696 if (fields&2) { 694 if (fields & 2)
697 tvp5150_write(sd, reg+1, type); 695 tvp5150_write(sd, reg + 1, type);
698 }
699 696
700 return type; 697 return type;
701} 698}
diff --git a/drivers/media/pci/ddbridge/ddbridge-core.c b/drivers/media/pci/ddbridge/ddbridge-core.c
index 9420479bee9a..cd1723e79a07 100644
--- a/drivers/media/pci/ddbridge/ddbridge-core.c
+++ b/drivers/media/pci/ddbridge/ddbridge-core.c
@@ -17,6 +17,8 @@
17 * http://www.gnu.org/copyleft/gpl.html 17 * http://www.gnu.org/copyleft/gpl.html
18 */ 18 */
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/init.h> 23#include <linux/init.h>
22#include <linux/interrupt.h> 24#include <linux/interrupt.h>
@@ -114,6 +116,19 @@ static int i2c_write_reg(struct i2c_adapter *adap, u8 adr,
114 return i2c_write(adap, adr, msg, 2); 116 return i2c_write(adap, adr, msg, 2);
115} 117}
116 118
119static inline u32 safe_ddbreadl(struct ddb *dev, u32 adr)
120{
121 u32 val = ddbreadl(adr);
122
123 /* (ddb)readl returns (uint)-1 (all bits set) on failure, catch that */
124 if (val == ~0) {
125 dev_err(&dev->pdev->dev, "ddbreadl failure, adr=%08x\n", adr);
126 return 0;
127 }
128
129 return val;
130}
131
117static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd) 132static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
118{ 133{
119 struct ddb *dev = i2c->dev; 134 struct ddb *dev = i2c->dev;
@@ -124,10 +139,10 @@ static int ddb_i2c_cmd(struct ddb_i2c *i2c, u32 adr, u32 cmd)
124 ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND); 139 ddbwritel((adr << 9) | cmd, i2c->regs + I2C_COMMAND);
125 stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); 140 stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ);
126 if (stat == 0) { 141 if (stat == 0) {
127 printk(KERN_ERR "I2C timeout\n"); 142 dev_err(&dev->pdev->dev, "I2C timeout\n");
128 { /* MSI debugging*/ 143 { /* MSI debugging*/
129 u32 istat = ddbreadl(INTERRUPT_STATUS); 144 u32 istat = ddbreadl(INTERRUPT_STATUS);
130 printk(KERN_ERR "IRS %08x\n", istat); 145 dev_err(&dev->pdev->dev, "IRS %08x\n", istat);
131 ddbwritel(istat, INTERRUPT_ACK); 146 ddbwritel(istat, INTERRUPT_ACK);
132 } 147 }
133 return -EIO; 148 return -EIO;
@@ -533,7 +548,7 @@ static u32 ddb_input_avail(struct ddb_input *input)
533 off = (stat & 0x7ff) << 7; 548 off = (stat & 0x7ff) << 7;
534 549
535 if (ctrl & 4) { 550 if (ctrl & 4) {
536 printk(KERN_ERR "IA %d %d %08x\n", idx, off, ctrl); 551 dev_err(&dev->pdev->dev, "IA %d %d %08x\n", idx, off, ctrl);
537 ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr)); 552 ddbwritel(input->stat, DMA_BUFFER_ACK(input->nr));
538 return 0; 553 return 0;
539 } 554 }
@@ -611,6 +626,7 @@ static int demod_attach_drxk(struct ddb_input *input)
611 struct i2c_adapter *i2c = &input->port->i2c->adap; 626 struct i2c_adapter *i2c = &input->port->i2c->adap;
612 struct dvb_frontend *fe; 627 struct dvb_frontend *fe;
613 struct drxk_config config; 628 struct drxk_config config;
629 struct device *dev = &input->port->dev->pdev->dev;
614 630
615 memset(&config, 0, sizeof(config)); 631 memset(&config, 0, sizeof(config));
616 config.microcode_name = "drxk_a3.mc"; 632 config.microcode_name = "drxk_a3.mc";
@@ -619,7 +635,7 @@ static int demod_attach_drxk(struct ddb_input *input)
619 635
620 fe = input->fe = dvb_attach(drxk_attach, &config, i2c); 636 fe = input->fe = dvb_attach(drxk_attach, &config, i2c);
621 if (!input->fe) { 637 if (!input->fe) {
622 printk(KERN_ERR "No DRXK found!\n"); 638 dev_err(dev, "No DRXK found!\n");
623 return -ENODEV; 639 return -ENODEV;
624 } 640 }
625 fe->sec_priv = input; 641 fe->sec_priv = input;
@@ -632,12 +648,13 @@ static int tuner_attach_tda18271(struct ddb_input *input)
632{ 648{
633 struct i2c_adapter *i2c = &input->port->i2c->adap; 649 struct i2c_adapter *i2c = &input->port->i2c->adap;
634 struct dvb_frontend *fe; 650 struct dvb_frontend *fe;
651 struct device *dev = &input->port->dev->pdev->dev;
635 652
636 if (input->fe->ops.i2c_gate_ctrl) 653 if (input->fe->ops.i2c_gate_ctrl)
637 input->fe->ops.i2c_gate_ctrl(input->fe, 1); 654 input->fe->ops.i2c_gate_ctrl(input->fe, 1);
638 fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60); 655 fe = dvb_attach(tda18271c2dd_attach, input->fe, i2c, 0x60);
639 if (!fe) { 656 if (!fe) {
640 printk(KERN_ERR "No TDA18271 found!\n"); 657 dev_err(dev, "No TDA18271 found!\n");
641 return -ENODEV; 658 return -ENODEV;
642 } 659 }
643 if (input->fe->ops.i2c_gate_ctrl) 660 if (input->fe->ops.i2c_gate_ctrl)
@@ -670,13 +687,14 @@ static struct stv0367_config ddb_stv0367_config[] = {
670static int demod_attach_stv0367(struct ddb_input *input) 687static int demod_attach_stv0367(struct ddb_input *input)
671{ 688{
672 struct i2c_adapter *i2c = &input->port->i2c->adap; 689 struct i2c_adapter *i2c = &input->port->i2c->adap;
690 struct device *dev = &input->port->dev->pdev->dev;
673 691
674 /* attach frontend */ 692 /* attach frontend */
675 input->fe = dvb_attach(stv0367ddb_attach, 693 input->fe = dvb_attach(stv0367ddb_attach,
676 &ddb_stv0367_config[(input->nr & 1)], i2c); 694 &ddb_stv0367_config[(input->nr & 1)], i2c);
677 695
678 if (!input->fe) { 696 if (!input->fe) {
679 printk(KERN_ERR "stv0367ddb_attach failed (not found?)\n"); 697 dev_err(dev, "stv0367ddb_attach failed (not found?)\n");
680 return -ENODEV; 698 return -ENODEV;
681 } 699 }
682 700
@@ -690,17 +708,19 @@ static int demod_attach_stv0367(struct ddb_input *input)
690static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr) 708static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr)
691{ 709{
692 struct i2c_adapter *adapter = &input->port->i2c->adap; 710 struct i2c_adapter *adapter = &input->port->i2c->adap;
711 struct device *dev = &input->port->dev->pdev->dev;
712
693 u8 tda_id[2]; 713 u8 tda_id[2];
694 u8 subaddr = 0x00; 714 u8 subaddr = 0x00;
695 715
696 printk(KERN_DEBUG "stv0367-tda18212 tuner ping\n"); 716 dev_dbg(dev, "stv0367-tda18212 tuner ping\n");
697 if (input->fe->ops.i2c_gate_ctrl) 717 if (input->fe->ops.i2c_gate_ctrl)
698 input->fe->ops.i2c_gate_ctrl(input->fe, 1); 718 input->fe->ops.i2c_gate_ctrl(input->fe, 1);
699 719
700 if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) 720 if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
701 printk(KERN_DEBUG "tda18212 ping 1 fail\n"); 721 dev_dbg(dev, "tda18212 ping 1 fail\n");
702 if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0) 722 if (i2c_read_regs(adapter, adr, subaddr, tda_id, sizeof(tda_id)) < 0)
703 printk(KERN_DEBUG "tda18212 ping 2 fail\n"); 723 dev_warn(dev, "tda18212 ping failed, expect problems\n");
704 724
705 if (input->fe->ops.i2c_gate_ctrl) 725 if (input->fe->ops.i2c_gate_ctrl)
706 input->fe->ops.i2c_gate_ctrl(input->fe, 0); 726 input->fe->ops.i2c_gate_ctrl(input->fe, 0);
@@ -711,6 +731,7 @@ static int tuner_tda18212_ping(struct ddb_input *input, unsigned short adr)
711static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24) 731static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
712{ 732{
713 struct i2c_adapter *i2c = &input->port->i2c->adap; 733 struct i2c_adapter *i2c = &input->port->i2c->adap;
734 struct device *dev = &input->port->dev->pdev->dev;
714 struct cxd2841er_config cfg; 735 struct cxd2841er_config cfg;
715 736
716 /* the cxd2841er driver expects 8bit/shifted I2C addresses */ 737 /* the cxd2841er driver expects 8bit/shifted I2C addresses */
@@ -728,7 +749,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
728 input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c); 749 input->fe = dvb_attach(cxd2841er_attach_t_c, &cfg, i2c);
729 750
730 if (!input->fe) { 751 if (!input->fe) {
731 printk(KERN_ERR "No Sony CXD28xx found!\n"); 752 dev_err(dev, "No Sony CXD28xx found!\n");
732 return -ENODEV; 753 return -ENODEV;
733 } 754 }
734 755
@@ -742,6 +763,7 @@ static int demod_attach_cxd28xx(struct ddb_input *input, int par, int osc24)
742static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype) 763static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype)
743{ 764{
744 struct i2c_adapter *adapter = &input->port->i2c->adap; 765 struct i2c_adapter *adapter = &input->port->i2c->adap;
766 struct device *dev = &input->port->dev->pdev->dev;
745 struct i2c_client *client; 767 struct i2c_client *client;
746 struct tda18212_config config = { 768 struct tda18212_config config = {
747 .fe = input->fe, 769 .fe = input->fe,
@@ -786,7 +808,7 @@ static int tuner_attach_tda18212(struct ddb_input *input, u32 porttype)
786 808
787 return 0; 809 return 0;
788err: 810err:
789 printk(KERN_INFO "TDA18212 tuner not found. Device is not fully operational.\n"); 811 dev_warn(dev, "TDA18212 tuner not found. Device is not fully operational.\n");
790 return -ENODEV; 812 return -ENODEV;
791} 813}
792 814
@@ -847,19 +869,20 @@ static struct stv6110x_config stv6110b = {
847static int demod_attach_stv0900(struct ddb_input *input, int type) 869static int demod_attach_stv0900(struct ddb_input *input, int type)
848{ 870{
849 struct i2c_adapter *i2c = &input->port->i2c->adap; 871 struct i2c_adapter *i2c = &input->port->i2c->adap;
872 struct device *dev = &input->port->dev->pdev->dev;
850 struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; 873 struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
851 874
852 input->fe = dvb_attach(stv090x_attach, feconf, i2c, 875 input->fe = dvb_attach(stv090x_attach, feconf, i2c,
853 (input->nr & 1) ? STV090x_DEMODULATOR_1 876 (input->nr & 1) ? STV090x_DEMODULATOR_1
854 : STV090x_DEMODULATOR_0); 877 : STV090x_DEMODULATOR_0);
855 if (!input->fe) { 878 if (!input->fe) {
856 printk(KERN_ERR "No STV0900 found!\n"); 879 dev_err(dev, "No STV0900 found!\n");
857 return -ENODEV; 880 return -ENODEV;
858 } 881 }
859 if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0, 882 if (!dvb_attach(lnbh24_attach, input->fe, i2c, 0,
860 0, (input->nr & 1) ? 883 0, (input->nr & 1) ?
861 (0x09 - type) : (0x0b - type))) { 884 (0x09 - type) : (0x0b - type))) {
862 printk(KERN_ERR "No LNBH24 found!\n"); 885 dev_err(dev, "No LNBH24 found!\n");
863 return -ENODEV; 886 return -ENODEV;
864 } 887 }
865 return 0; 888 return 0;
@@ -868,6 +891,7 @@ static int demod_attach_stv0900(struct ddb_input *input, int type)
868static int tuner_attach_stv6110(struct ddb_input *input, int type) 891static int tuner_attach_stv6110(struct ddb_input *input, int type)
869{ 892{
870 struct i2c_adapter *i2c = &input->port->i2c->adap; 893 struct i2c_adapter *i2c = &input->port->i2c->adap;
894 struct device *dev = &input->port->dev->pdev->dev;
871 struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900; 895 struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
872 struct stv6110x_config *tunerconf = (input->nr & 1) ? 896 struct stv6110x_config *tunerconf = (input->nr & 1) ?
873 &stv6110b : &stv6110a; 897 &stv6110b : &stv6110a;
@@ -875,10 +899,10 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type)
875 899
876 ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c); 900 ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
877 if (!ctl) { 901 if (!ctl) {
878 printk(KERN_ERR "No STV6110X found!\n"); 902 dev_err(dev, "No STV6110X found!\n");
879 return -ENODEV; 903 return -ENODEV;
880 } 904 }
881 printk(KERN_INFO "attach tuner input %d adr %02x\n", 905 dev_info(dev, "attach tuner input %d adr %02x\n",
882 input->nr, tunerconf->addr); 906 input->nr, tunerconf->addr);
883 907
884 feconf->tuner_init = ctl->tuner_init; 908 feconf->tuner_init = ctl->tuner_init;
@@ -1009,13 +1033,14 @@ static int dvb_input_attach(struct ddb_input *input)
1009 struct ddb_port *port = input->port; 1033 struct ddb_port *port = input->port;
1010 struct dvb_adapter *adap = &input->adap; 1034 struct dvb_adapter *adap = &input->adap;
1011 struct dvb_demux *dvbdemux = &input->demux; 1035 struct dvb_demux *dvbdemux = &input->demux;
1036 struct device *dev = &input->port->dev->pdev->dev;
1012 int sony_osc24 = 0, sony_tspar = 0; 1037 int sony_osc24 = 0, sony_tspar = 0;
1013 1038
1014 ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE, 1039 ret = dvb_register_adapter(adap, "DDBridge", THIS_MODULE,
1015 &input->port->dev->pdev->dev, 1040 &input->port->dev->pdev->dev,
1016 adapter_nr); 1041 adapter_nr);
1017 if (ret < 0) { 1042 if (ret < 0) {
1018 printk(KERN_ERR "ddbridge: Could not register adapter.Check if you enabled enough adapters in dvb-core!\n"); 1043 dev_err(dev, "Could not register adapter. Check if you enabled enough adapters in dvb-core!\n");
1019 return ret; 1044 return ret;
1020 } 1045 }
1021 input->attached = 1; 1046 input->attached = 1;
@@ -1241,9 +1266,9 @@ static void input_tasklet(unsigned long data)
1241 1266
1242 if (input->port->class == DDB_PORT_TUNER) { 1267 if (input->port->class == DDB_PORT_TUNER) {
1243 if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr))) 1268 if (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))
1244 printk(KERN_ERR "Overflow input %d\n", input->nr); 1269 dev_err(&dev->pdev->dev, "Overflow input %d\n", input->nr);
1245 while (input->cbuf != ((input->stat >> 11) & 0x1f) 1270 while (input->cbuf != ((input->stat >> 11) & 0x1f)
1246 || (4&ddbreadl(DMA_BUFFER_CONTROL(input->nr)))) { 1271 || (4 & safe_ddbreadl(dev, DMA_BUFFER_CONTROL(input->nr)))) {
1247 dvb_dmx_swfilter_packets(&input->demux, 1272 dvb_dmx_swfilter_packets(&input->demux,
1248 input->vbuf[input->cbuf], 1273 input->vbuf[input->cbuf],
1249 input->dma_buf_size / 188); 1274 input->dma_buf_size / 188);
@@ -1280,6 +1305,7 @@ static struct cxd2099_cfg cxd_cfg = {
1280 .adr = 0x40, 1305 .adr = 0x40,
1281 .polarity = 1, 1306 .polarity = 1,
1282 .clock_mode = 1, 1307 .clock_mode = 1,
1308 .max_i2c = 512,
1283}; 1309};
1284 1310
1285static int ddb_ci_attach(struct ddb_port *port) 1311static int ddb_ci_attach(struct ddb_port *port)
@@ -1310,6 +1336,7 @@ static int ddb_ci_attach(struct ddb_port *port)
1310 1336
1311static int ddb_port_attach(struct ddb_port *port) 1337static int ddb_port_attach(struct ddb_port *port)
1312{ 1338{
1339 struct device *dev = &port->dev->pdev->dev;
1313 int ret = 0; 1340 int ret = 0;
1314 1341
1315 switch (port->class) { 1342 switch (port->class) {
@@ -1326,7 +1353,7 @@ static int ddb_port_attach(struct ddb_port *port)
1326 break; 1353 break;
1327 } 1354 }
1328 if (ret < 0) 1355 if (ret < 0)
1329 printk(KERN_ERR "port_attach on port %d failed\n", port->nr); 1356 dev_err(dev, "port_attach on port %d failed\n", port->nr);
1330 return ret; 1357 return ret;
1331} 1358}
1332 1359
@@ -1377,6 +1404,7 @@ static void ddb_ports_detach(struct ddb *dev)
1377static int init_xo2(struct ddb_port *port) 1404static int init_xo2(struct ddb_port *port)
1378{ 1405{
1379 struct i2c_adapter *i2c = &port->i2c->adap; 1406 struct i2c_adapter *i2c = &port->i2c->adap;
1407 struct device *dev = &port->dev->pdev->dev;
1380 u8 val, data[2]; 1408 u8 val, data[2];
1381 int res; 1409 int res;
1382 1410
@@ -1385,7 +1413,7 @@ static int init_xo2(struct ddb_port *port)
1385 return res; 1413 return res;
1386 1414
1387 if (data[0] != 0x01) { 1415 if (data[0] != 0x01) {
1388 pr_info("Port %d: invalid XO2\n", port->nr); 1416 dev_info(dev, "Port %d: invalid XO2\n", port->nr);
1389 return -1; 1417 return -1;
1390 } 1418 }
1391 1419
@@ -1511,7 +1539,7 @@ static void ddb_port_probe(struct ddb_port *port)
1511 port->class = DDB_PORT_CI; 1539 port->class = DDB_PORT_CI;
1512 ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); 1540 ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
1513 } else if (port_has_xo2(port, &xo2_type, &xo2_id)) { 1541 } else if (port_has_xo2(port, &xo2_type, &xo2_id)) {
1514 printk(KERN_INFO "Port %d (TAB %d): XO2 type: %d, id: %d\n", 1542 dev_dbg(&dev->pdev->dev, "Port %d (TAB %d): XO2 type: %d, id: %d\n",
1515 port->nr, port->nr+1, xo2_type, xo2_id); 1543 port->nr, port->nr+1, xo2_type, xo2_id);
1516 1544
1517 ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING); 1545 ddbwritel(I2C_SPEED_400, port->i2c->regs + I2C_TIMING);
@@ -1556,10 +1584,10 @@ static void ddb_port_probe(struct ddb_port *port)
1556 } 1584 }
1557 break; 1585 break;
1558 case DDB_XO2_TYPE_CI: 1586 case DDB_XO2_TYPE_CI:
1559 printk(KERN_INFO "DuoFlex CI modules not supported\n"); 1587 dev_info(&dev->pdev->dev, "DuoFlex CI modules not supported\n");
1560 break; 1588 break;
1561 default: 1589 default:
1562 printk(KERN_INFO "Unknown XO2 DuoFlex module\n"); 1590 dev_info(&dev->pdev->dev, "Unknown XO2 DuoFlex module\n");
1563 break; 1591 break;
1564 } 1592 }
1565 } else if (port_has_cxd28xx(port, &cxd_id)) { 1593 } else if (port_has_cxd28xx(port, &cxd_id)) {
@@ -1611,7 +1639,7 @@ static void ddb_port_probe(struct ddb_port *port)
1611 ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING); 1639 ddbwritel(I2C_SPEED_100, port->i2c->regs + I2C_TIMING);
1612 } 1640 }
1613 1641
1614 printk(KERN_INFO "Port %d (TAB %d): %s\n", 1642 dev_info(&dev->pdev->dev, "Port %d (TAB %d): %s\n",
1615 port->nr, port->nr+1, modname); 1643 port->nr, port->nr+1, modname);
1616} 1644}
1617 1645
@@ -1765,7 +1793,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
1765 wbuf += 4; 1793 wbuf += 4;
1766 wlen -= 4; 1794 wlen -= 4;
1767 ddbwritel(data, SPI_DATA); 1795 ddbwritel(data, SPI_DATA);
1768 while (ddbreadl(SPI_CONTROL) & 0x0004) 1796 while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004)
1769 ; 1797 ;
1770 } 1798 }
1771 1799
@@ -1785,7 +1813,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
1785 if (shift) 1813 if (shift)
1786 data <<= shift; 1814 data <<= shift;
1787 ddbwritel(data, SPI_DATA); 1815 ddbwritel(data, SPI_DATA);
1788 while (ddbreadl(SPI_CONTROL) & 0x0004) 1816 while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004)
1789 ; 1817 ;
1790 1818
1791 if (!rlen) { 1819 if (!rlen) {
@@ -1797,7 +1825,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
1797 1825
1798 while (rlen > 4) { 1826 while (rlen > 4) {
1799 ddbwritel(0xffffffff, SPI_DATA); 1827 ddbwritel(0xffffffff, SPI_DATA);
1800 while (ddbreadl(SPI_CONTROL) & 0x0004) 1828 while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004)
1801 ; 1829 ;
1802 data = ddbreadl(SPI_DATA); 1830 data = ddbreadl(SPI_DATA);
1803 *(u32 *) rbuf = swab32(data); 1831 *(u32 *) rbuf = swab32(data);
@@ -1806,7 +1834,7 @@ static int flashio(struct ddb *dev, u8 *wbuf, u32 wlen, u8 *rbuf, u32 rlen)
1806 } 1834 }
1807 ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL); 1835 ddbwritel(0x0003 | ((rlen << (8 + 3)) & 0x1F00), SPI_CONTROL);
1808 ddbwritel(0xffffffff, SPI_DATA); 1836 ddbwritel(0xffffffff, SPI_DATA);
1809 while (ddbreadl(SPI_CONTROL) & 0x0004) 1837 while (safe_ddbreadl(dev, SPI_CONTROL) & 0x0004)
1810 ; 1838 ;
1811 1839
1812 data = ddbreadl(SPI_DATA); 1840 data = ddbreadl(SPI_DATA);
@@ -1993,7 +2021,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1993 dev->pdev = pdev; 2021 dev->pdev = pdev;
1994 pci_set_drvdata(pdev, dev); 2022 pci_set_drvdata(pdev, dev);
1995 dev->info = (struct ddb_info *) id->driver_data; 2023 dev->info = (struct ddb_info *) id->driver_data;
1996 printk(KERN_INFO "DDBridge driver detected: %s\n", dev->info->name); 2024 dev_info(&pdev->dev, "Detected %s\n", dev->info->name);
1997 2025
1998 dev->regs = ioremap(pci_resource_start(dev->pdev, 0), 2026 dev->regs = ioremap(pci_resource_start(dev->pdev, 0),
1999 pci_resource_len(dev->pdev, 0)); 2027 pci_resource_len(dev->pdev, 0));
@@ -2001,13 +2029,13 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2001 stat = -ENOMEM; 2029 stat = -ENOMEM;
2002 goto fail; 2030 goto fail;
2003 } 2031 }
2004 printk(KERN_INFO "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4)); 2032 dev_info(&pdev->dev, "HW %08x FW %08x\n", ddbreadl(0), ddbreadl(4));
2005 2033
2006#ifdef CONFIG_PCI_MSI 2034#ifdef CONFIG_PCI_MSI
2007 if (pci_msi_enabled()) 2035 if (pci_msi_enabled())
2008 stat = pci_enable_msi(dev->pdev); 2036 stat = pci_enable_msi(dev->pdev);
2009 if (stat) { 2037 if (stat) {
2010 printk(KERN_INFO ": MSI not available.\n"); 2038 dev_info(&pdev->dev, "MSI not available.\n");
2011 } else { 2039 } else {
2012 irq_flag = 0; 2040 irq_flag = 0;
2013 dev->msi = 1; 2041 dev->msi = 1;
@@ -2040,7 +2068,7 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2040 goto fail1; 2068 goto fail1;
2041 ddb_ports_init(dev); 2069 ddb_ports_init(dev);
2042 if (ddb_buffers_alloc(dev) < 0) { 2070 if (ddb_buffers_alloc(dev) < 0) {
2043 printk(KERN_INFO ": Could not allocate buffer memory\n"); 2071 dev_err(&pdev->dev, "Could not allocate buffer memory\n");
2044 goto fail2; 2072 goto fail2;
2045 } 2073 }
2046 if (ddb_ports_attach(dev) < 0) 2074 if (ddb_ports_attach(dev) < 0)
@@ -2050,19 +2078,19 @@ static int ddb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2050 2078
2051fail3: 2079fail3:
2052 ddb_ports_detach(dev); 2080 ddb_ports_detach(dev);
2053 printk(KERN_ERR "fail3\n"); 2081 dev_err(&pdev->dev, "fail3\n");
2054 ddb_ports_release(dev); 2082 ddb_ports_release(dev);
2055fail2: 2083fail2:
2056 printk(KERN_ERR "fail2\n"); 2084 dev_err(&pdev->dev, "fail2\n");
2057 ddb_buffers_free(dev); 2085 ddb_buffers_free(dev);
2058fail1: 2086fail1:
2059 printk(KERN_ERR "fail1\n"); 2087 dev_err(&pdev->dev, "fail1\n");
2060 if (dev->msi) 2088 if (dev->msi)
2061 pci_disable_msi(dev->pdev); 2089 pci_disable_msi(dev->pdev);
2062 if (stat == 0) 2090 if (stat == 0)
2063 free_irq(dev->pdev->irq, dev); 2091 free_irq(dev->pdev->irq, dev);
2064fail: 2092fail:
2065 printk(KERN_ERR "fail\n"); 2093 dev_err(&pdev->dev, "fail\n");
2066 ddb_unmap(dev); 2094 ddb_unmap(dev);
2067 pci_set_drvdata(pdev, NULL); 2095 pci_set_drvdata(pdev, NULL);
2068 pci_disable_device(pdev); 2096 pci_disable_device(pdev);
@@ -2242,7 +2270,7 @@ static __init int module_init_ddbridge(void)
2242{ 2270{
2243 int ret; 2271 int ret;
2244 2272
2245 printk(KERN_INFO "Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n"); 2273 pr_info("Digital Devices PCIE bridge driver, Copyright (C) 2010-11 Digital Devices GmbH\n");
2246 2274
2247 ret = ddb_class_create(); 2275 ret = ddb_class_create();
2248 if (ret < 0) 2276 if (ret < 0)
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index ce69e648b663..8c92cb7f7e72 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -336,9 +336,9 @@ int ngene_command(struct ngene *dev, struct ngene_command *com)
336{ 336{
337 int result; 337 int result;
338 338
339 down(&dev->cmd_mutex); 339 mutex_lock(&dev->cmd_mutex);
340 result = ngene_command_mutex(dev, com); 340 result = ngene_command_mutex(dev, com);
341 up(&dev->cmd_mutex); 341 mutex_unlock(&dev->cmd_mutex);
342 return result; 342 return result;
343} 343}
344 344
@@ -560,7 +560,6 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
560 u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700); 560 u16 BsSPI = ((stream & 1) ? 0x9800 : 0x9700);
561 u16 BsSDO = 0x9B00; 561 u16 BsSDO = 0x9B00;
562 562
563 down(&dev->stream_mutex);
564 memset(&com, 0, sizeof(com)); 563 memset(&com, 0, sizeof(com));
565 com.cmd.hdr.Opcode = CMD_CONTROL; 564 com.cmd.hdr.Opcode = CMD_CONTROL;
566 com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2; 565 com.cmd.hdr.Length = sizeof(struct FW_STREAM_CONTROL) - 2;
@@ -586,17 +585,13 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
586 chan->State = KSSTATE_ACQUIRE; 585 chan->State = KSSTATE_ACQUIRE;
587 chan->HWState = HWSTATE_STOP; 586 chan->HWState = HWSTATE_STOP;
588 spin_unlock_irq(&chan->state_lock); 587 spin_unlock_irq(&chan->state_lock);
589 if (ngene_command(dev, &com) < 0) { 588 if (ngene_command(dev, &com) < 0)
590 up(&dev->stream_mutex);
591 return -1; 589 return -1;
592 }
593 /* clear_buffers(chan); */ 590 /* clear_buffers(chan); */
594 flush_buffers(chan); 591 flush_buffers(chan);
595 up(&dev->stream_mutex);
596 return 0; 592 return 0;
597 } 593 }
598 spin_unlock_irq(&chan->state_lock); 594 spin_unlock_irq(&chan->state_lock);
599 up(&dev->stream_mutex);
600 return 0; 595 return 0;
601 } 596 }
602 597
@@ -692,11 +687,9 @@ static int ngene_command_stream_control(struct ngene *dev, u8 stream,
692 chan->HWState = HWSTATE_STARTUP; 687 chan->HWState = HWSTATE_STARTUP;
693 spin_unlock_irq(&chan->state_lock); 688 spin_unlock_irq(&chan->state_lock);
694 689
695 if (ngene_command(dev, &com) < 0) { 690 if (ngene_command(dev, &com) < 0)
696 up(&dev->stream_mutex);
697 return -1; 691 return -1;
698 } 692
699 up(&dev->stream_mutex);
700 return 0; 693 return 0;
701} 694}
702 695
@@ -750,8 +743,11 @@ void set_transfer(struct ngene_channel *chan, int state)
750 /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n", 743 /* else printk(KERN_INFO DEVICE_NAME ": lock=%08x\n",
751 ngreadl(0x9310)); */ 744 ngreadl(0x9310)); */
752 745
746 mutex_lock(&dev->stream_mutex);
753 ret = ngene_command_stream_control(dev, chan->number, 747 ret = ngene_command_stream_control(dev, chan->number,
754 control, mode, flags); 748 control, mode, flags);
749 mutex_unlock(&dev->stream_mutex);
750
755 if (!ret) 751 if (!ret)
756 chan->running = state; 752 chan->running = state;
757 else 753 else
@@ -1283,7 +1279,7 @@ static int ngene_load_firm(struct ngene *dev)
1283 1279
1284static void ngene_stop(struct ngene *dev) 1280static void ngene_stop(struct ngene *dev)
1285{ 1281{
1286 down(&dev->cmd_mutex); 1282 mutex_destroy(&dev->cmd_mutex);
1287 i2c_del_adapter(&(dev->channel[0].i2c_adapter)); 1283 i2c_del_adapter(&(dev->channel[0].i2c_adapter));
1288 i2c_del_adapter(&(dev->channel[1].i2c_adapter)); 1284 i2c_del_adapter(&(dev->channel[1].i2c_adapter));
1289 ngwritel(0, NGENE_INT_ENABLE); 1285 ngwritel(0, NGENE_INT_ENABLE);
@@ -1346,10 +1342,10 @@ static int ngene_start(struct ngene *dev)
1346 init_waitqueue_head(&dev->cmd_wq); 1342 init_waitqueue_head(&dev->cmd_wq);
1347 init_waitqueue_head(&dev->tx_wq); 1343 init_waitqueue_head(&dev->tx_wq);
1348 init_waitqueue_head(&dev->rx_wq); 1344 init_waitqueue_head(&dev->rx_wq);
1349 sema_init(&dev->cmd_mutex, 1); 1345 mutex_init(&dev->cmd_mutex);
1350 sema_init(&dev->stream_mutex, 1); 1346 mutex_init(&dev->stream_mutex);
1351 sema_init(&dev->pll_mutex, 1); 1347 sema_init(&dev->pll_mutex, 1);
1352 sema_init(&dev->i2c_switch_mutex, 1); 1348 mutex_init(&dev->i2c_switch_mutex);
1353 spin_lock_init(&dev->cmd_lock); 1349 spin_lock_init(&dev->cmd_lock);
1354 for (i = 0; i < MAX_STREAM; i++) 1350 for (i = 0; i < MAX_STREAM; i++)
1355 spin_lock_init(&dev->channel[i].state_lock); 1351 spin_lock_init(&dev->channel[i].state_lock);
@@ -1606,10 +1602,10 @@ static void ngene_unlink(struct ngene *dev)
1606 com.in_len = 3; 1602 com.in_len = 3;
1607 com.out_len = 1; 1603 com.out_len = 1;
1608 1604
1609 down(&dev->cmd_mutex); 1605 mutex_lock(&dev->cmd_mutex);
1610 ngwritel(0, NGENE_INT_ENABLE); 1606 ngwritel(0, NGENE_INT_ENABLE);
1611 ngene_command_mutex(dev, &com); 1607 ngene_command_mutex(dev, &com);
1612 up(&dev->cmd_mutex); 1608 mutex_unlock(&dev->cmd_mutex);
1613} 1609}
1614 1610
1615void ngene_shutdown(struct pci_dev *pdev) 1611void ngene_shutdown(struct pci_dev *pdev)
diff --git a/drivers/media/pci/ngene/ngene-i2c.c b/drivers/media/pci/ngene/ngene-i2c.c
index cf39fcf54adf..fbf36353c701 100644
--- a/drivers/media/pci/ngene/ngene-i2c.c
+++ b/drivers/media/pci/ngene/ngene-i2c.c
@@ -118,7 +118,7 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
118 (struct ngene_channel *)i2c_get_adapdata(adapter); 118 (struct ngene_channel *)i2c_get_adapdata(adapter);
119 struct ngene *dev = chan->dev; 119 struct ngene *dev = chan->dev;
120 120
121 down(&dev->i2c_switch_mutex); 121 mutex_lock(&dev->i2c_switch_mutex);
122 ngene_i2c_set_bus(dev, chan->number); 122 ngene_i2c_set_bus(dev, chan->number);
123 123
124 if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD)) 124 if (num == 2 && msg[1].flags & I2C_M_RD && !(msg[0].flags & I2C_M_RD))
@@ -136,11 +136,11 @@ static int ngene_i2c_master_xfer(struct i2c_adapter *adapter,
136 msg[0].buf, msg[0].len, 0)) 136 msg[0].buf, msg[0].len, 0))
137 goto done; 137 goto done;
138 138
139 up(&dev->i2c_switch_mutex); 139 mutex_unlock(&dev->i2c_switch_mutex);
140 return -EIO; 140 return -EIO;
141 141
142done: 142done:
143 up(&dev->i2c_switch_mutex); 143 mutex_unlock(&dev->i2c_switch_mutex);
144 return num; 144 return num;
145} 145}
146 146
diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h
index 10d8f74c4f0a..7c7cd217333d 100644
--- a/drivers/media/pci/ngene/ngene.h
+++ b/drivers/media/pci/ngene/ngene.h
@@ -762,10 +762,10 @@ struct ngene {
762 762
763 wait_queue_head_t cmd_wq; 763 wait_queue_head_t cmd_wq;
764 int cmd_done; 764 int cmd_done;
765 struct semaphore cmd_mutex; 765 struct mutex cmd_mutex;
766 struct semaphore stream_mutex; 766 struct mutex stream_mutex;
767 struct semaphore pll_mutex; 767 struct semaphore pll_mutex;
768 struct semaphore i2c_switch_mutex; 768 struct mutex i2c_switch_mutex;
769 int i2c_current_channel; 769 int i2c_current_channel;
770 int i2c_current_bus; 770 int i2c_current_bus;
771 spinlock_t cmd_lock; 771 spinlock_t cmd_lock;
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 2a044be729da..e7bd2b8484e3 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -545,6 +545,7 @@ static int tw5864_fmt_vid_cap(struct file *file, void *priv,
545 switch (input->std) { 545 switch (input->std) {
546 default: 546 default:
547 WARN_ON_ONCE(1); 547 WARN_ON_ONCE(1);
548 return -EINVAL;
548 case STD_NTSC: 549 case STD_NTSC:
549 f->fmt.pix.height = 480; 550 f->fmt.pix.height = 480;
550 break; 551 break;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 1313cd533436..fb1fa0b82077 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -475,8 +475,8 @@ config VIDEO_QCOM_VENUS
475 tristate "Qualcomm Venus V4L2 encoder/decoder driver" 475 tristate "Qualcomm Venus V4L2 encoder/decoder driver"
476 depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA 476 depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
477 depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST 477 depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
478 select QCOM_MDT_LOADER if (ARM || ARM64) 478 select QCOM_MDT_LOADER if ARCH_QCOM
479 select QCOM_SCM if (ARM || ARM64) 479 select QCOM_SCM if ARCH_QCOM
480 select VIDEOBUF2_DMA_SG 480 select VIDEOBUF2_DMA_SG
481 select V4L2_MEM2MEM_DEV 481 select V4L2_MEM2MEM_DEV
482 ---help--- 482 ---help---
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index 25cbf9e5ac5a..bba1eb43b5d8 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -393,8 +393,8 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx,
393 int ret; 393 int ret;
394 int i; 394 int i;
395 395
396 if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || 396 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
397 ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) { 397 ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264) {
398 width = round_up(q_data->width, 16); 398 width = round_up(q_data->width, 16);
399 height = round_up(q_data->height, 16); 399 height = round_up(q_data->height, 16);
400 } else { 400 } else {
@@ -2198,7 +2198,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
2198 ctx->display_idx = display_idx; 2198 ctx->display_idx = display_idx;
2199} 2199}
2200 2200
2201static void coda_error_decode(struct coda_ctx *ctx) 2201static void coda_decode_timeout(struct coda_ctx *ctx)
2202{ 2202{
2203 struct vb2_v4l2_buffer *dst_buf; 2203 struct vb2_v4l2_buffer *dst_buf;
2204 2204
@@ -2223,7 +2223,7 @@ const struct coda_context_ops coda_bit_decode_ops = {
2223 .start_streaming = coda_start_decoding, 2223 .start_streaming = coda_start_decoding,
2224 .prepare_run = coda_prepare_decode, 2224 .prepare_run = coda_prepare_decode,
2225 .finish_run = coda_finish_decode, 2225 .finish_run = coda_finish_decode,
2226 .error_run = coda_error_decode, 2226 .run_timeout = coda_decode_timeout,
2227 .seq_end_work = coda_seq_end_work, 2227 .seq_end_work = coda_seq_end_work,
2228 .release = coda_bit_release, 2228 .release = coda_bit_release,
2229}; 2229};
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index f92cc7df58fb..829c7895a98a 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1164,8 +1164,8 @@ static void coda_pic_run_work(struct work_struct *work)
1164 1164
1165 coda_hw_reset(ctx); 1165 coda_hw_reset(ctx);
1166 1166
1167 if (ctx->ops->error_run) 1167 if (ctx->ops->run_timeout)
1168 ctx->ops->error_run(ctx); 1168 ctx->ops->run_timeout(ctx);
1169 } else if (!ctx->aborting) { 1169 } else if (!ctx->aborting) {
1170 ctx->ops->finish_run(ctx); 1170 ctx->ops->finish_run(ctx);
1171 } 1171 }
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 40fe22f0d757..c5f504d8cf67 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -183,7 +183,7 @@ struct coda_context_ops {
183 int (*start_streaming)(struct coda_ctx *ctx); 183 int (*start_streaming)(struct coda_ctx *ctx);
184 int (*prepare_run)(struct coda_ctx *ctx); 184 int (*prepare_run)(struct coda_ctx *ctx);
185 void (*finish_run)(struct coda_ctx *ctx); 185 void (*finish_run)(struct coda_ctx *ctx);
186 void (*error_run)(struct coda_ctx *ctx); 186 void (*run_timeout)(struct coda_ctx *ctx);
187 void (*seq_end_work)(struct work_struct *work); 187 void (*seq_end_work)(struct work_struct *work);
188 void (*release)(struct coda_ctx *ctx); 188 void (*release)(struct coda_ctx *ctx);
189}; 189};
diff --git a/drivers/media/platform/davinci/ccdc_hw_device.h b/drivers/media/platform/davinci/ccdc_hw_device.h
index 8f6688a7a111..f1b521045d64 100644
--- a/drivers/media/platform/davinci/ccdc_hw_device.h
+++ b/drivers/media/platform/davinci/ccdc_hw_device.h
@@ -42,16 +42,6 @@ struct ccdc_hw_ops {
42 int (*set_hw_if_params) (struct vpfe_hw_if_param *param); 42 int (*set_hw_if_params) (struct vpfe_hw_if_param *param);
43 /* get interface parameters */ 43 /* get interface parameters */
44 int (*get_hw_if_params) (struct vpfe_hw_if_param *param); 44 int (*get_hw_if_params) (struct vpfe_hw_if_param *param);
45 /*
46 * Pointer to function to set parameters. Used
47 * for implementing VPFE_S_CCDC_PARAMS
48 */
49 int (*set_params) (void *params);
50 /*
51 * Pointer to function to get parameter. Used
52 * for implementing VPFE_G_CCDC_PARAMS
53 */
54 int (*get_params) (void *params);
55 /* Pointer to function to configure ccdc */ 45 /* Pointer to function to configure ccdc */
56 int (*configure) (void); 46 int (*configure) (void);
57 47
diff --git a/drivers/media/platform/davinci/dm355_ccdc.c b/drivers/media/platform/davinci/dm355_ccdc.c
index 73db166dc338..6d492dc4c3a9 100644
--- a/drivers/media/platform/davinci/dm355_ccdc.c
+++ b/drivers/media/platform/davinci/dm355_ccdc.c
@@ -17,12 +17,7 @@
17 * This module is for configuring DM355 CCD controller of VPFE to capture 17 * This module is for configuring DM355 CCD controller of VPFE to capture
18 * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules 18 * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
19 * such as Defect Pixel Correction, Color Space Conversion etc to 19 * such as Defect Pixel Correction, Color Space Conversion etc to
20 * pre-process the Bayer RGB data, before writing it to SDRAM. This 20 * pre-process the Bayer RGB data, before writing it to SDRAM.
21 * module also allows application to configure individual
22 * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL.
23 * To do so, application include dm355_ccdc.h and vpfe_capture.h header
24 * files. The setparams() API is called by vpfe_capture driver
25 * to configure module parameters
26 * 21 *
27 * TODO: 1) Raw bayer parameter settings and bayer capture 22 * TODO: 1) Raw bayer parameter settings and bayer capture
28 * 2) Split module parameter structure to module specific ioctl structs 23 * 2) Split module parameter structure to module specific ioctl structs
@@ -260,90 +255,6 @@ static void ccdc_setwin(struct v4l2_rect *image_win,
260 dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin..."); 255 dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
261} 256}
262 257
263static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
264{
265 if (ccdcparam->datasft < CCDC_DATA_NO_SHIFT ||
266 ccdcparam->datasft > CCDC_DATA_SHIFT_6BIT) {
267 dev_dbg(ccdc_cfg.dev, "Invalid value of data shift\n");
268 return -EINVAL;
269 }
270
271 if (ccdcparam->mfilt1 < CCDC_NO_MEDIAN_FILTER1 ||
272 ccdcparam->mfilt1 > CCDC_MEDIAN_FILTER1) {
273 dev_dbg(ccdc_cfg.dev, "Invalid value of median filter1\n");
274 return -EINVAL;
275 }
276
277 if (ccdcparam->mfilt2 < CCDC_NO_MEDIAN_FILTER2 ||
278 ccdcparam->mfilt2 > CCDC_MEDIAN_FILTER2) {
279 dev_dbg(ccdc_cfg.dev, "Invalid value of median filter2\n");
280 return -EINVAL;
281 }
282
283 if ((ccdcparam->med_filt_thres < 0) ||
284 (ccdcparam->med_filt_thres > CCDC_MED_FILT_THRESH)) {
285 dev_dbg(ccdc_cfg.dev,
286 "Invalid value of median filter threshold\n");
287 return -EINVAL;
288 }
289
290 if (ccdcparam->data_sz < CCDC_DATA_16BITS ||
291 ccdcparam->data_sz > CCDC_DATA_8BITS) {
292 dev_dbg(ccdc_cfg.dev, "Invalid value of data size\n");
293 return -EINVAL;
294 }
295
296 if (ccdcparam->alaw.enable) {
297 if (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_13_4 ||
298 ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) {
299 dev_dbg(ccdc_cfg.dev, "Invalid value of ALAW\n");
300 return -EINVAL;
301 }
302 }
303
304 if (ccdcparam->blk_clamp.b_clamp_enable) {
305 if (ccdcparam->blk_clamp.sample_pixel < CCDC_SAMPLE_1PIXELS ||
306 ccdcparam->blk_clamp.sample_pixel > CCDC_SAMPLE_16PIXELS) {
307 dev_dbg(ccdc_cfg.dev,
308 "Invalid value of sample pixel\n");
309 return -EINVAL;
310 }
311 if (ccdcparam->blk_clamp.sample_ln < CCDC_SAMPLE_1LINES ||
312 ccdcparam->blk_clamp.sample_ln > CCDC_SAMPLE_16LINES) {
313 dev_dbg(ccdc_cfg.dev,
314 "Invalid value of sample lines\n");
315 return -EINVAL;
316 }
317 }
318 return 0;
319}
320
321/* Parameter operations */
322static int ccdc_set_params(void __user *params)
323{
324 struct ccdc_config_params_raw ccdc_raw_params;
325 int x;
326
327 /* only raw module parameters can be set through the IOCTL */
328 if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
329 return -EINVAL;
330
331 x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
332 if (x) {
333 dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copying ccdcparams, %d\n",
334 x);
335 return -EFAULT;
336 }
337
338 if (!validate_ccdc_param(&ccdc_raw_params)) {
339 memcpy(&ccdc_cfg.bayer.config_params,
340 &ccdc_raw_params,
341 sizeof(ccdc_raw_params));
342 return 0;
343 }
344 return -EINVAL;
345}
346
347/* This function will configure CCDC for YCbCr video capture */ 258/* This function will configure CCDC for YCbCr video capture */
348static void ccdc_config_ycbcr(void) 259static void ccdc_config_ycbcr(void)
349{ 260{
@@ -939,7 +850,6 @@ static struct ccdc_hw_device ccdc_hw_dev = {
939 .enable = ccdc_enable, 850 .enable = ccdc_enable,
940 .enable_out_to_sdram = ccdc_enable_output_to_sdram, 851 .enable_out_to_sdram = ccdc_enable_output_to_sdram,
941 .set_hw_if_params = ccdc_set_hw_if_params, 852 .set_hw_if_params = ccdc_set_hw_if_params,
942 .set_params = ccdc_set_params,
943 .configure = ccdc_configure, 853 .configure = ccdc_configure,
944 .set_buftype = ccdc_set_buftype, 854 .set_buftype = ccdc_set_buftype,
945 .get_buftype = ccdc_get_buftype, 855 .get_buftype = ccdc_get_buftype,
diff --git a/drivers/media/platform/davinci/dm644x_ccdc.c b/drivers/media/platform/davinci/dm644x_ccdc.c
index 740fbc7a8c14..3b2d8a9317b8 100644
--- a/drivers/media/platform/davinci/dm644x_ccdc.c
+++ b/drivers/media/platform/davinci/dm644x_ccdc.c
@@ -17,13 +17,9 @@
17 * This module is for configuring CCD controller of DM6446 VPFE to capture 17 * This module is for configuring CCD controller of DM6446 VPFE to capture
18 * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules 18 * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
19 * such as Defect Pixel Correction, Color Space Conversion etc to 19 * such as Defect Pixel Correction, Color Space Conversion etc to
20 * pre-process the Raw Bayer RGB data, before writing it to SDRAM. This 20 * pre-process the Raw Bayer RGB data, before writing it to SDRAM.
21 * module also allows application to configure individual 21 * This file is named DM644x so that other variants such DM6443
22 * module parameters through VPFE_CMD_S_CCDC_RAW_PARAMS IOCTL. 22 * may be supported using the same module.
23 * To do so, application includes dm644x_ccdc.h and vpfe_capture.h header
24 * files. The setparams() API is called by vpfe_capture driver
25 * to configure module parameters. This file is named DM644x so that other
26 * variants such DM6443 may be supported using the same module.
27 * 23 *
28 * TODO: Test Raw bayer parameter settings and bayer capture 24 * TODO: Test Raw bayer parameter settings and bayer capture
29 * Split module parameter structure to module specific ioctl structs 25 * Split module parameter structure to module specific ioctl structs
@@ -216,96 +212,8 @@ static void ccdc_readregs(void)
216 dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val); 212 dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val);
217} 213}
218 214
219static int validate_ccdc_param(struct ccdc_config_params_raw *ccdcparam)
220{
221 if (ccdcparam->alaw.enable) {
222 u8 max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
223 u8 max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
224
225 if ((ccdcparam->alaw.gamma_wd > CCDC_GAMMA_BITS_09_0) ||
226 (ccdcparam->alaw.gamma_wd < CCDC_GAMMA_BITS_15_6) ||
227 (max_gamma > max_data)) {
228 dev_dbg(ccdc_cfg.dev, "\nInvalid data line select");
229 return -1;
230 }
231 }
232 return 0;
233}
234
235static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
236{
237 struct ccdc_config_params_raw *config_params =
238 &ccdc_cfg.bayer.config_params;
239 unsigned int *fpc_virtaddr = NULL;
240 unsigned int *fpc_physaddr = NULL;
241
242 memcpy(config_params, raw_params, sizeof(*raw_params));
243 /*
244 * allocate memory for fault pixel table and copy the user
245 * values to the table
246 */
247 if (!config_params->fault_pxl.enable)
248 return 0;
249
250 fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
251 fpc_virtaddr = (unsigned int *)phys_to_virt(
252 (unsigned long)fpc_physaddr);
253 /*
254 * Allocate memory for FPC table if current
255 * FPC table buffer is not big enough to
256 * accommodate FPC Number requested
257 */
258 if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) {
259 if (fpc_physaddr != NULL) {
260 free_pages((unsigned long)fpc_virtaddr,
261 get_order
262 (config_params->fault_pxl.fp_num *
263 FP_NUM_BYTES));
264 }
265
266 /* Allocate memory for FPC table */
267 fpc_virtaddr =
268 (unsigned int *)__get_free_pages(GFP_KERNEL | GFP_DMA,
269 get_order(raw_params->
270 fault_pxl.fp_num *
271 FP_NUM_BYTES));
272
273 if (fpc_virtaddr == NULL) {
274 dev_dbg(ccdc_cfg.dev,
275 "\nUnable to allocate memory for FPC");
276 return -EFAULT;
277 }
278 fpc_physaddr =
279 (unsigned int *)virt_to_phys((void *)fpc_virtaddr);
280 }
281
282 /* Copy number of fault pixels and FPC table */
283 config_params->fault_pxl.fp_num = raw_params->fault_pxl.fp_num;
284 if (copy_from_user(fpc_virtaddr,
285 (void __user *)raw_params->fault_pxl.fpc_table_addr,
286 config_params->fault_pxl.fp_num * FP_NUM_BYTES)) {
287 dev_dbg(ccdc_cfg.dev, "\n copy_from_user failed");
288 return -EFAULT;
289 }
290 config_params->fault_pxl.fpc_table_addr = (unsigned long)fpc_physaddr;
291 return 0;
292}
293
294static int ccdc_close(struct device *dev) 215static int ccdc_close(struct device *dev)
295{ 216{
296 struct ccdc_config_params_raw *config_params =
297 &ccdc_cfg.bayer.config_params;
298 unsigned int *fpc_physaddr = NULL, *fpc_virtaddr = NULL;
299
300 fpc_physaddr = (unsigned int *)config_params->fault_pxl.fpc_table_addr;
301
302 if (fpc_physaddr != NULL) {
303 fpc_virtaddr = (unsigned int *)
304 phys_to_virt((unsigned long)fpc_physaddr);
305 free_pages((unsigned long)fpc_virtaddr,
306 get_order(config_params->fault_pxl.fp_num *
307 FP_NUM_BYTES));
308 }
309 return 0; 217 return 0;
310} 218}
311 219
@@ -339,29 +247,6 @@ static void ccdc_sbl_reset(void)
339 vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O); 247 vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O);
340} 248}
341 249
342/* Parameter operations */
343static int ccdc_set_params(void __user *params)
344{
345 struct ccdc_config_params_raw ccdc_raw_params;
346 int x;
347
348 if (ccdc_cfg.if_type != VPFE_RAW_BAYER)
349 return -EINVAL;
350
351 x = copy_from_user(&ccdc_raw_params, params, sizeof(ccdc_raw_params));
352 if (x) {
353 dev_dbg(ccdc_cfg.dev, "ccdc_set_params: error in copyingccdc params, %d\n",
354 x);
355 return -EFAULT;
356 }
357
358 if (!validate_ccdc_param(&ccdc_raw_params)) {
359 if (!ccdc_update_raw_params(&ccdc_raw_params))
360 return 0;
361 }
362 return -EINVAL;
363}
364
365/* 250/*
366 * ccdc_config_ycbcr() 251 * ccdc_config_ycbcr()
367 * This function will configure CCDC for YCbCr video capture 252 * This function will configure CCDC for YCbCr video capture
@@ -489,32 +374,6 @@ static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
489 regw(val, CCDC_BLKCMP); 374 regw(val, CCDC_BLKCMP);
490} 375}
491 376
492static void ccdc_config_fpc(struct ccdc_fault_pixel *fpc)
493{
494 u32 val;
495
496 /* Initially disable FPC */
497 val = CCDC_FPC_DISABLE;
498 regw(val, CCDC_FPC);
499
500 if (!fpc->enable)
501 return;
502
503 /* Configure Fault pixel if needed */
504 regw(fpc->fpc_table_addr, CCDC_FPC_ADDR);
505 dev_dbg(ccdc_cfg.dev, "\nWriting 0x%lx to FPC_ADDR...\n",
506 (fpc->fpc_table_addr));
507 /* Write the FPC params with FPC disable */
508 val = fpc->fp_num & CCDC_FPC_FPC_NUM_MASK;
509 regw(val, CCDC_FPC);
510
511 dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
512 /* read the FPC register */
513 val = regr(CCDC_FPC) | CCDC_FPC_ENABLE;
514 regw(val, CCDC_FPC);
515 dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FPC...\n", val);
516}
517
518/* 377/*
519 * ccdc_config_raw() 378 * ccdc_config_raw()
520 * This function will configure CCDC for Raw capture mode 379 * This function will configure CCDC for Raw capture mode
@@ -569,9 +428,6 @@ static void ccdc_config_raw(void)
569 /* Configure Black level compensation */ 428 /* Configure Black level compensation */
570 ccdc_config_black_compense(&config_params->blk_comp); 429 ccdc_config_black_compense(&config_params->blk_comp);
571 430
572 /* Configure Fault Pixel Correction */
573 ccdc_config_fpc(&config_params->fault_pxl);
574
575 /* If data size is 8 bit then pack the data */ 431 /* If data size is 8 bit then pack the data */
576 if ((config_params->data_sz == CCDC_DATA_8BITS) || 432 if ((config_params->data_sz == CCDC_DATA_8BITS) ||
577 config_params->alaw.enable) 433 config_params->alaw.enable)
@@ -929,7 +785,6 @@ static struct ccdc_hw_device ccdc_hw_dev = {
929 .reset = ccdc_sbl_reset, 785 .reset = ccdc_sbl_reset,
930 .enable = ccdc_enable, 786 .enable = ccdc_enable,
931 .set_hw_if_params = ccdc_set_hw_if_params, 787 .set_hw_if_params = ccdc_set_hw_if_params,
932 .set_params = ccdc_set_params,
933 .configure = ccdc_configure, 788 .configure = ccdc_configure,
934 .set_buftype = ccdc_set_buftype, 789 .set_buftype = ccdc_set_buftype,
935 .get_buftype = ccdc_get_buftype, 790 .get_buftype = ccdc_get_buftype,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index e3fe3e0635aa..b1bf4a7e8eb7 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -281,45 +281,6 @@ void vpfe_unregister_ccdc_device(struct ccdc_hw_device *dev)
281EXPORT_SYMBOL(vpfe_unregister_ccdc_device); 281EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
282 282
283/* 283/*
284 * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
285 */
286static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe_dev,
287 struct v4l2_format *f)
288{
289 struct v4l2_rect image_win;
290 enum ccdc_buftype buf_type;
291 enum ccdc_frmfmt frm_fmt;
292
293 memset(f, 0, sizeof(*f));
294 f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
295 ccdc_dev->hw_ops.get_image_window(&image_win);
296 f->fmt.pix.width = image_win.width;
297 f->fmt.pix.height = image_win.height;
298 f->fmt.pix.bytesperline = ccdc_dev->hw_ops.get_line_length();
299 f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
300 f->fmt.pix.height;
301 buf_type = ccdc_dev->hw_ops.get_buftype();
302 f->fmt.pix.pixelformat = ccdc_dev->hw_ops.get_pixel_format();
303 frm_fmt = ccdc_dev->hw_ops.get_frame_format();
304 if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
305 f->fmt.pix.field = V4L2_FIELD_NONE;
306 else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
307 if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
308 f->fmt.pix.field = V4L2_FIELD_INTERLACED;
309 else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED)
310 f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
311 else {
312 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf_type\n");
313 return -EINVAL;
314 }
315 } else {
316 v4l2_err(&vpfe_dev->v4l2_dev, "Invalid frm_fmt\n");
317 return -EINVAL;
318 }
319 return 0;
320}
321
322/*
323 * vpfe_config_ccdc_image_format() 284 * vpfe_config_ccdc_image_format()
324 * For a pix format, configure ccdc to setup the capture 285 * For a pix format, configure ccdc to setup the capture
325 */ 286 */
@@ -1697,59 +1658,6 @@ unlock_out:
1697 return ret; 1658 return ret;
1698} 1659}
1699 1660
1700
1701static long vpfe_param_handler(struct file *file, void *priv,
1702 bool valid_prio, unsigned int cmd, void *param)
1703{
1704 struct vpfe_device *vpfe_dev = video_drvdata(file);
1705 int ret;
1706
1707 v4l2_dbg(2, debug, &vpfe_dev->v4l2_dev, "vpfe_param_handler\n");
1708
1709 if (vpfe_dev->started) {
1710 /* only allowed if streaming is not started */
1711 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1712 "device already started\n");
1713 return -EBUSY;
1714 }
1715
1716 ret = mutex_lock_interruptible(&vpfe_dev->lock);
1717 if (ret)
1718 return ret;
1719
1720 switch (cmd) {
1721 case VPFE_CMD_S_CCDC_RAW_PARAMS:
1722 v4l2_warn(&vpfe_dev->v4l2_dev,
1723 "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
1724 if (ccdc_dev->hw_ops.set_params) {
1725 ret = ccdc_dev->hw_ops.set_params(param);
1726 if (ret) {
1727 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1728 "Error setting parameters in CCDC\n");
1729 goto unlock_out;
1730 }
1731 ret = vpfe_get_ccdc_image_format(vpfe_dev,
1732 &vpfe_dev->fmt);
1733 if (ret < 0) {
1734 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1735 "Invalid image format at CCDC\n");
1736 goto unlock_out;
1737 }
1738 } else {
1739 ret = -EINVAL;
1740 v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
1741 "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
1742 }
1743 break;
1744 default:
1745 ret = -ENOTTY;
1746 }
1747unlock_out:
1748 mutex_unlock(&vpfe_dev->lock);
1749 return ret;
1750}
1751
1752
1753/* vpfe capture ioctl operations */ 1661/* vpfe capture ioctl operations */
1754static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { 1662static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
1755 .vidioc_querycap = vpfe_querycap, 1663 .vidioc_querycap = vpfe_querycap,
@@ -1772,7 +1680,6 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
1772 .vidioc_cropcap = vpfe_cropcap, 1680 .vidioc_cropcap = vpfe_cropcap,
1773 .vidioc_g_selection = vpfe_g_selection, 1681 .vidioc_g_selection = vpfe_g_selection,
1774 .vidioc_s_selection = vpfe_s_selection, 1682 .vidioc_s_selection = vpfe_s_selection,
1775 .vidioc_default = vpfe_param_handler,
1776}; 1683};
1777 1684
1778static struct vpfe_device *vpfe_initialize(void) 1685static struct vpfe_device *vpfe_initialize(void)
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index d78580f9e431..4be6554c56c5 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1719,7 +1719,6 @@ vpif_unregister:
1719 */ 1719 */
1720static int vpif_remove(struct platform_device *device) 1720static int vpif_remove(struct platform_device *device)
1721{ 1721{
1722 struct common_obj *common;
1723 struct channel_obj *ch; 1722 struct channel_obj *ch;
1724 int i; 1723 int i;
1725 1724
@@ -1730,7 +1729,6 @@ static int vpif_remove(struct platform_device *device)
1730 for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { 1729 for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
1731 /* Get the pointer to the channel object */ 1730 /* Get the pointer to the channel object */
1732 ch = vpif_obj.dev[i]; 1731 ch = vpif_obj.dev[i];
1733 common = &ch->common[VPIF_VIDEO_INDEX];
1734 /* Unregister video device */ 1732 /* Unregister video device */
1735 video_unregister_device(&ch->video_dev); 1733 video_unregister_device(&ch->video_dev);
1736 kfree(vpif_obj.dev[i]); 1734 kfree(vpif_obj.dev[i]);
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index b5ac6ce626b3..bf982bf86542 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1339,7 +1339,6 @@ vpif_unregister:
1339 */ 1339 */
1340static int vpif_remove(struct platform_device *device) 1340static int vpif_remove(struct platform_device *device)
1341{ 1341{
1342 struct common_obj *common;
1343 struct channel_obj *ch; 1342 struct channel_obj *ch;
1344 int i; 1343 int i;
1345 1344
@@ -1350,7 +1349,6 @@ static int vpif_remove(struct platform_device *device)
1350 for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { 1349 for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) {
1351 /* Get the pointer to the channel object */ 1350 /* Get the pointer to the channel object */
1352 ch = vpif_obj.dev[i]; 1351 ch = vpif_obj.dev[i];
1353 common = &ch->common[VPIF_VIDEO_INDEX];
1354 /* Unregister video device */ 1352 /* Unregister video device */
1355 video_unregister_device(&ch->video_dev); 1353 video_unregister_device(&ch->video_dev);
1356 kfree(vpif_obj.dev[i]); 1354 kfree(vpif_obj.dev[i]);
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 92c4e1826356..45a553d4f5b2 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -16,7 +16,6 @@
16#include <media/videobuf-dma-contig.h> 16#include <media/videobuf-dma-contig.h>
17#include <media/v4l2-device.h> 17#include <media/v4l2-device.h>
18 18
19#include <linux/omap-dma.h>
20#include <video/omapvrfb.h> 19#include <video/omapvrfb.h>
21 20
22#include "omap_voutdef.h" 21#include "omap_voutdef.h"
@@ -63,7 +62,7 @@ static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout,
63/* 62/*
64 * Wakes up the application once the DMA transfer to VRFB space is completed. 63 * Wakes up the application once the DMA transfer to VRFB space is completed.
65 */ 64 */
66static void omap_vout_vrfb_dma_tx_callback(int lch, u16 ch_status, void *data) 65static void omap_vout_vrfb_dma_tx_callback(void *data)
67{ 66{
68 struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data; 67 struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data;
69 68
@@ -94,6 +93,7 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
94 int ret = 0, i, j; 93 int ret = 0, i, j;
95 struct omap_vout_device *vout; 94 struct omap_vout_device *vout;
96 struct video_device *vfd; 95 struct video_device *vfd;
96 dma_cap_mask_t mask;
97 int image_width, image_height; 97 int image_width, image_height;
98 int vrfb_num_bufs = VRFB_NUM_BUFS; 98 int vrfb_num_bufs = VRFB_NUM_BUFS;
99 struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); 99 struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
@@ -131,18 +131,27 @@ int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
131 /* 131 /*
132 * Request and Initialize DMA, for DMA based VRFB transfer 132 * Request and Initialize DMA, for DMA based VRFB transfer
133 */ 133 */
134 vout->vrfb_dma_tx.dev_id = OMAP_DMA_NO_DEVICE; 134 dma_cap_zero(mask);
135 vout->vrfb_dma_tx.dma_ch = -1; 135 dma_cap_set(DMA_INTERLEAVE, mask);
136 vout->vrfb_dma_tx.req_status = DMA_CHAN_ALLOTED; 136 vout->vrfb_dma_tx.chan = dma_request_chan_by_mask(&mask);
137 ret = omap_request_dma(vout->vrfb_dma_tx.dev_id, "VRFB DMA TX", 137 if (IS_ERR(vout->vrfb_dma_tx.chan)) {
138 omap_vout_vrfb_dma_tx_callback,
139 (void *) &vout->vrfb_dma_tx, &vout->vrfb_dma_tx.dma_ch);
140 if (ret < 0) {
141 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; 138 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
139 } else {
140 size_t xt_size = sizeof(struct dma_interleaved_template) +
141 sizeof(struct data_chunk);
142
143 vout->vrfb_dma_tx.xt = kzalloc(xt_size, GFP_KERNEL);
144 if (!vout->vrfb_dma_tx.xt) {
145 dma_release_channel(vout->vrfb_dma_tx.chan);
146 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
147 }
148 }
149
150 if (vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED)
142 dev_info(&pdev->dev, 151 dev_info(&pdev->dev,
143 ": failed to allocate DMA Channel for video%d\n", 152 ": failed to allocate DMA Channel for video%d\n",
144 vfd->minor); 153 vfd->minor);
145 } 154
146 init_waitqueue_head(&vout->vrfb_dma_tx.wait); 155 init_waitqueue_head(&vout->vrfb_dma_tx.wait);
147 156
148 /* statically allocated the VRFB buffer is done through 157 /* statically allocated the VRFB buffer is done through
@@ -177,7 +186,9 @@ void omap_vout_release_vrfb(struct omap_vout_device *vout)
177 186
178 if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) { 187 if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) {
179 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; 188 vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED;
180 omap_free_dma(vout->vrfb_dma_tx.dma_ch); 189 kfree(vout->vrfb_dma_tx.xt);
190 dmaengine_terminate_sync(vout->vrfb_dma_tx.chan);
191 dma_release_channel(vout->vrfb_dma_tx.chan);
181 } 192 }
182} 193}
183 194
@@ -219,70 +230,84 @@ int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout,
219} 230}
220 231
221int omap_vout_prepare_vrfb(struct omap_vout_device *vout, 232int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
222 struct videobuf_buffer *vb) 233 struct videobuf_buffer *vb)
223{ 234{
224 dma_addr_t dmabuf; 235 struct dma_async_tx_descriptor *tx;
225 struct vid_vrfb_dma *tx; 236 enum dma_ctrl_flags flags;
237 struct dma_chan *chan = vout->vrfb_dma_tx.chan;
238 struct dma_device *dmadev = chan->device;
239 struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt;
240 dma_cookie_t cookie;
241 enum dma_status status;
226 enum dss_rotation rotation; 242 enum dss_rotation rotation;
227 u32 dest_frame_index = 0, src_element_index = 0; 243 size_t dst_icg;
228 u32 dest_element_index = 0, src_frame_index = 0; 244 u32 pixsize;
229 u32 elem_count = 0, frame_count = 0, pixsize = 2;
230 245
231 if (!is_rotation_enabled(vout)) 246 if (!is_rotation_enabled(vout))
232 return 0; 247 return 0;
233 248
234 dmabuf = vout->buf_phy_addr[vb->i];
235 /* If rotation is enabled, copy input buffer into VRFB 249 /* If rotation is enabled, copy input buffer into VRFB
236 * memory space using DMA. We are copying input buffer 250 * memory space using DMA. We are copying input buffer
237 * into VRFB memory space of desired angle and DSS will 251 * into VRFB memory space of desired angle and DSS will
238 * read image VRFB memory for 0 degree angle 252 * read image VRFB memory for 0 degree angle
239 */ 253 */
254
240 pixsize = vout->bpp * vout->vrfb_bpp; 255 pixsize = vout->bpp * vout->vrfb_bpp;
241 /* 256 dst_icg = ((MAX_PIXELS_PER_LINE * pixsize) -
242 * DMA transfer in double index mode 257 (vout->pix.width * vout->bpp)) + 1;
243 */ 258
259 xt->src_start = vout->buf_phy_addr[vb->i];
260 xt->dst_start = vout->vrfb_context[vb->i].paddr[0];
261
262 xt->numf = vout->pix.height;
263 xt->frame_size = 1;
264 xt->sgl[0].size = vout->pix.width * vout->bpp;
265 xt->sgl[0].icg = dst_icg;
266
267 xt->dir = DMA_MEM_TO_MEM;
268 xt->src_sgl = false;
269 xt->src_inc = true;
270 xt->dst_sgl = true;
271 xt->dst_inc = true;
272
273 tx = dmadev->device_prep_interleaved_dma(chan, xt, flags);
274 if (tx == NULL) {
275 pr_err("%s: DMA interleaved prep error\n", __func__);
276 return -EINVAL;
277 }
244 278
245 /* Frame index */ 279 tx->callback = omap_vout_vrfb_dma_tx_callback;
246 dest_frame_index = ((MAX_PIXELS_PER_LINE * pixsize) - 280 tx->callback_param = &vout->vrfb_dma_tx;
247 (vout->pix.width * vout->bpp)) + 1; 281
248 282 cookie = dmaengine_submit(tx);
249 /* Source and destination parameters */ 283 if (dma_submit_error(cookie)) {
250 src_element_index = 0; 284 pr_err("%s: dmaengine_submit failed (%d)\n", __func__, cookie);
251 src_frame_index = 0; 285 return -EINVAL;
252 dest_element_index = 1; 286 }
253 /* Number of elements per frame */
254 elem_count = vout->pix.width * vout->bpp;
255 frame_count = vout->pix.height;
256 tx = &vout->vrfb_dma_tx;
257 tx->tx_status = 0;
258 omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32,
259 (elem_count / 4), frame_count, OMAP_DMA_SYNC_ELEMENT,
260 tx->dev_id, 0x0);
261 /* src_port required only for OMAP1 */
262 omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
263 dmabuf, src_element_index, src_frame_index);
264 /*set dma source burst mode for VRFB */
265 omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
266 rotation = calc_rotation(vout);
267 287
268 /* dest_port required only for OMAP1 */ 288 vout->vrfb_dma_tx.tx_status = 0;
269 omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX, 289 dma_async_issue_pending(chan);
270 vout->vrfb_context[vb->i].paddr[0], dest_element_index,
271 dest_frame_index);
272 /*set dma dest burst mode for VRFB */
273 omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16);
274 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 0x20, 0);
275 290
276 omap_start_dma(tx->dma_ch); 291 wait_event_interruptible_timeout(vout->vrfb_dma_tx.wait,
277 wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1, 292 vout->vrfb_dma_tx.tx_status == 1,
278 VRFB_TX_TIMEOUT); 293 VRFB_TX_TIMEOUT);
279 294
280 if (tx->tx_status == 0) { 295 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
281 omap_stop_dma(tx->dma_ch); 296
297 if (vout->vrfb_dma_tx.tx_status == 0) {
298 pr_err("%s: Timeout while waiting for DMA\n", __func__);
299 dmaengine_terminate_sync(chan);
300 return -EINVAL;
301 } else if (status != DMA_COMPLETE) {
302 pr_err("%s: DMA completion %s status\n", __func__,
303 status == DMA_ERROR ? "error" : "busy");
304 dmaengine_terminate_sync(chan);
282 return -EINVAL; 305 return -EINVAL;
283 } 306 }
307
284 /* Store buffers physical address into an array. Addresses 308 /* Store buffers physical address into an array. Addresses
285 * from this array will be used to configure DSS */ 309 * from this array will be used to configure DSS */
310 rotation = calc_rotation(vout);
286 vout->queued_buf_addr[vb->i] = (u8 *) 311 vout->queued_buf_addr[vb->i] = (u8 *)
287 vout->vrfb_context[vb->i].paddr[rotation]; 312 vout->vrfb_context[vb->i].paddr[rotation];
288 return 0; 313 return 0;
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 80c79fabdf95..56b630b1c8b4 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -14,6 +14,7 @@
14#include <media/v4l2-ctrls.h> 14#include <media/v4l2-ctrls.h>
15#include <video/omapfb_dss.h> 15#include <video/omapfb_dss.h>
16#include <video/omapvrfb.h> 16#include <video/omapvrfb.h>
17#include <linux/dmaengine.h>
17 18
18#define YUYV_BPP 2 19#define YUYV_BPP 2
19#define RGB565_BPP 2 20#define RGB565_BPP 2
@@ -81,8 +82,9 @@ enum vout_rotaion_type {
81 * for VRFB hidden buffer 82 * for VRFB hidden buffer
82 */ 83 */
83struct vid_vrfb_dma { 84struct vid_vrfb_dma {
84 int dev_id; 85 struct dma_chan *chan;
85 int dma_ch; 86 struct dma_interleaved_template *xt;
87
86 int req_status; 88 int req_status;
87 int tx_status; 89 int tx_status;
88 wait_queue_head_t wait; 90 wait_queue_head_t wait;
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 776d2bae6979..41eef376eb2d 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -76,7 +76,7 @@ static void venus_sys_error_handler(struct work_struct *work)
76 hfi_core_deinit(core, true); 76 hfi_core_deinit(core, true);
77 hfi_destroy(core); 77 hfi_destroy(core);
78 mutex_lock(&core->lock); 78 mutex_lock(&core->lock);
79 venus_shutdown(&core->dev_fw); 79 venus_shutdown(core->dev);
80 80
81 pm_runtime_put_sync(core->dev); 81 pm_runtime_put_sync(core->dev);
82 82
@@ -84,7 +84,7 @@ static void venus_sys_error_handler(struct work_struct *work)
84 84
85 pm_runtime_get_sync(core->dev); 85 pm_runtime_get_sync(core->dev);
86 86
87 ret |= venus_boot(core->dev, &core->dev_fw, core->res->fwname); 87 ret |= venus_boot(core->dev, core->res->fwname);
88 88
89 ret |= hfi_core_resume(core, true); 89 ret |= hfi_core_resume(core, true);
90 90
@@ -137,7 +137,7 @@ static int venus_clks_enable(struct venus_core *core)
137 137
138 return 0; 138 return 0;
139err: 139err:
140 while (--i) 140 while (i--)
141 clk_disable_unprepare(core->clks[i]); 141 clk_disable_unprepare(core->clks[i]);
142 142
143 return ret; 143 return ret;
@@ -207,7 +207,7 @@ static int venus_probe(struct platform_device *pdev)
207 if (ret < 0) 207 if (ret < 0)
208 goto err_runtime_disable; 208 goto err_runtime_disable;
209 209
210 ret = venus_boot(dev, &core->dev_fw, core->res->fwname); 210 ret = venus_boot(dev, core->res->fwname);
211 if (ret) 211 if (ret)
212 goto err_runtime_disable; 212 goto err_runtime_disable;
213 213
@@ -238,7 +238,7 @@ err_dev_unregister:
238err_core_deinit: 238err_core_deinit:
239 hfi_core_deinit(core, false); 239 hfi_core_deinit(core, false);
240err_venus_shutdown: 240err_venus_shutdown:
241 venus_shutdown(&core->dev_fw); 241 venus_shutdown(dev);
242err_runtime_disable: 242err_runtime_disable:
243 pm_runtime_set_suspended(dev); 243 pm_runtime_set_suspended(dev);
244 pm_runtime_disable(dev); 244 pm_runtime_disable(dev);
@@ -259,7 +259,7 @@ static int venus_remove(struct platform_device *pdev)
259 WARN_ON(ret); 259 WARN_ON(ret);
260 260
261 hfi_destroy(core); 261 hfi_destroy(core);
262 venus_shutdown(&core->dev_fw); 262 venus_shutdown(dev);
263 of_platform_depopulate(dev); 263 of_platform_depopulate(dev);
264 264
265 pm_runtime_put_sync(dev); 265 pm_runtime_put_sync(dev);
@@ -270,8 +270,7 @@ static int venus_remove(struct platform_device *pdev)
270 return ret; 270 return ret;
271} 271}
272 272
273#ifdef CONFIG_PM 273static __maybe_unused int venus_runtime_suspend(struct device *dev)
274static int venus_runtime_suspend(struct device *dev)
275{ 274{
276 struct venus_core *core = dev_get_drvdata(dev); 275 struct venus_core *core = dev_get_drvdata(dev);
277 int ret; 276 int ret;
@@ -283,7 +282,7 @@ static int venus_runtime_suspend(struct device *dev)
283 return ret; 282 return ret;
284} 283}
285 284
286static int venus_runtime_resume(struct device *dev) 285static __maybe_unused int venus_runtime_resume(struct device *dev)
287{ 286{
288 struct venus_core *core = dev_get_drvdata(dev); 287 struct venus_core *core = dev_get_drvdata(dev);
289 int ret; 288 int ret;
@@ -302,7 +301,6 @@ err_clks_disable:
302 venus_clks_disable(core); 301 venus_clks_disable(core);
303 return ret; 302 return ret;
304} 303}
305#endif
306 304
307static const struct dev_pm_ops venus_pm_ops = { 305static const struct dev_pm_ops venus_pm_ops = {
308 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 306 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index e542700eee32..cba092bcb76d 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -101,7 +101,6 @@ struct venus_core {
101 struct device *dev; 101 struct device *dev;
102 struct device *dev_dec; 102 struct device *dev_dec;
103 struct device *dev_enc; 103 struct device *dev_enc;
104 struct device dev_fw;
105 struct mutex lock; 104 struct mutex lock;
106 struct list_head instances; 105 struct list_head instances;
107 atomic_t insts_count; 106 atomic_t insts_count;
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 1b1a4f355918..521d4b36c090 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -12,97 +12,87 @@
12 * 12 *
13 */ 13 */
14 14
15#include <linux/dma-mapping.h> 15#include <linux/device.h>
16#include <linux/firmware.h> 16#include <linux/firmware.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/io.h>
18#include <linux/of.h> 19#include <linux/of.h>
19#include <linux/of_reserved_mem.h> 20#include <linux/of_address.h>
20#include <linux/slab.h>
21#include <linux/qcom_scm.h> 21#include <linux/qcom_scm.h>
22#include <linux/sizes.h>
22#include <linux/soc/qcom/mdt_loader.h> 23#include <linux/soc/qcom/mdt_loader.h>
23 24
24#include "firmware.h" 25#include "firmware.h"
25 26
26#define VENUS_PAS_ID 9 27#define VENUS_PAS_ID 9
27#define VENUS_FW_MEM_SIZE SZ_8M 28#define VENUS_FW_MEM_SIZE (6 * SZ_1M)
28 29
29static void device_release_dummy(struct device *dev) 30int venus_boot(struct device *dev, const char *fwname)
30{
31 of_reserved_mem_device_release(dev);
32}
33
34int venus_boot(struct device *parent, struct device *fw_dev, const char *fwname)
35{ 31{
36 const struct firmware *mdt; 32 const struct firmware *mdt;
33 struct device_node *node;
37 phys_addr_t mem_phys; 34 phys_addr_t mem_phys;
35 struct resource r;
38 ssize_t fw_size; 36 ssize_t fw_size;
39 size_t mem_size; 37 size_t mem_size;
40 void *mem_va; 38 void *mem_va;
41 int ret; 39 int ret;
42 40
43 if (!qcom_scm_is_available()) 41 if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) || !qcom_scm_is_available())
44 return -EPROBE_DEFER; 42 return -EPROBE_DEFER;
45 43
46 fw_dev->parent = parent; 44 node = of_parse_phandle(dev->of_node, "memory-region", 0);
47 fw_dev->release = device_release_dummy; 45 if (!node) {
46 dev_err(dev, "no memory-region specified\n");
47 return -EINVAL;
48 }
48 49
49 ret = dev_set_name(fw_dev, "%s:%s", dev_name(parent), "firmware"); 50 ret = of_address_to_resource(node, 0, &r);
50 if (ret) 51 if (ret)
51 return ret; 52 return ret;
52 53
53 ret = device_register(fw_dev); 54 mem_phys = r.start;
54 if (ret < 0) 55 mem_size = resource_size(&r);
55 return ret;
56 56
57 ret = of_reserved_mem_device_init_by_idx(fw_dev, parent->of_node, 0); 57 if (mem_size < VENUS_FW_MEM_SIZE)
58 if (ret) 58 return -EINVAL;
59 goto err_unreg_device;
60 59
61 mem_size = VENUS_FW_MEM_SIZE; 60 mem_va = memremap(r.start, mem_size, MEMREMAP_WC);
62
63 mem_va = dmam_alloc_coherent(fw_dev, mem_size, &mem_phys, GFP_KERNEL);
64 if (!mem_va) { 61 if (!mem_va) {
65 ret = -ENOMEM; 62 dev_err(dev, "unable to map memory region: %pa+%zx\n",
66 goto err_unreg_device; 63 &r.start, mem_size);
64 return -ENOMEM;
67 } 65 }
68 66
69 ret = request_firmware(&mdt, fwname, fw_dev); 67 ret = request_firmware(&mdt, fwname, dev);
70 if (ret < 0) 68 if (ret < 0)
71 goto err_unreg_device; 69 goto err_unmap;
72 70
73 fw_size = qcom_mdt_get_size(mdt); 71 fw_size = qcom_mdt_get_size(mdt);
74 if (fw_size < 0) { 72 if (fw_size < 0) {
75 ret = fw_size; 73 ret = fw_size;
76 release_firmware(mdt); 74 release_firmware(mdt);
77 goto err_unreg_device; 75 goto err_unmap;
78 } 76 }
79 77
80 ret = qcom_mdt_load(fw_dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys, 78 ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys,
81 mem_size); 79 mem_size);
82 80
83 release_firmware(mdt); 81 release_firmware(mdt);
84 82
85 if (ret) 83 if (ret)
86 goto err_unreg_device; 84 goto err_unmap;
87 85
88 ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID); 86 ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
89 if (ret) 87 if (ret)
90 goto err_unreg_device; 88 goto err_unmap;
91
92 return 0;
93 89
94err_unreg_device: 90err_unmap:
95 device_unregister(fw_dev); 91 memunmap(mem_va);
96 return ret; 92 return ret;
97} 93}
98 94
99int venus_shutdown(struct device *fw_dev) 95int venus_shutdown(struct device *dev)
100{ 96{
101 int ret; 97 return qcom_scm_pas_shutdown(VENUS_PAS_ID);
102
103 ret = qcom_scm_pas_shutdown(VENUS_PAS_ID);
104 device_unregister(fw_dev);
105 memset(fw_dev, 0, sizeof(*fw_dev));
106
107 return ret;
108} 98}
diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h
index f81a98979798..428efb56d339 100644
--- a/drivers/media/platform/qcom/venus/firmware.h
+++ b/drivers/media/platform/qcom/venus/firmware.h
@@ -16,8 +16,7 @@
16 16
17struct device; 17struct device;
18 18
19int venus_boot(struct device *parent, struct device *fw_dev, 19int venus_boot(struct device *dev, const char *fwname);
20 const char *fwname); 20int venus_shutdown(struct device *dev);
21int venus_shutdown(struct device *fw_dev);
22 21
23#endif 22#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index f8841713e417..a681ae5381d6 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -239,11 +239,12 @@ static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
239 break; 239 break;
240 } 240 }
241 241
242 if (!error) { 242 if (error)
243 rem_bytes -= read_bytes; 243 break;
244 data += read_bytes; 244
245 num_properties--; 245 rem_bytes -= read_bytes;
246 } 246 data += read_bytes;
247 num_properties--;
247 } 248 }
248 249
249err_no_prop: 250err_no_prop:
diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c
index 7af66860d624..2cc289e4dea1 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-debug.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c
@@ -104,7 +104,7 @@ static void bdisp_dbg_dump_ins(struct seq_file *s, u32 val)
104 if (val & BLT_INS_IRQ) 104 if (val & BLT_INS_IRQ)
105 seq_puts(s, "IRQ - "); 105 seq_puts(s, "IRQ - ");
106 106
107 seq_puts(s, "\n"); 107 seq_putc(s, '\n');
108} 108}
109 109
110static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val) 110static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val)
@@ -153,7 +153,7 @@ static void bdisp_dbg_dump_tty(struct seq_file *s, u32 val)
153 if (val & BLT_TTY_BIG_END) 153 if (val & BLT_TTY_BIG_END)
154 seq_puts(s, "BigEndian - "); 154 seq_puts(s, "BigEndian - ");
155 155
156 seq_puts(s, "\n"); 156 seq_putc(s, '\n');
157} 157}
158 158
159static void bdisp_dbg_dump_xy(struct seq_file *s, u32 val, char *name) 159static void bdisp_dbg_dump_xy(struct seq_file *s, u32 val, char *name)
@@ -230,7 +230,7 @@ static void bdisp_dbg_dump_sty(struct seq_file *s,
230 seq_puts(s, "BigEndian - "); 230 seq_puts(s, "BigEndian - ");
231 231
232done: 232done:
233 seq_puts(s, "\n"); 233 seq_putc(s, '\n');
234} 234}
235 235
236static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val) 236static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val)
@@ -247,7 +247,7 @@ static void bdisp_dbg_dump_fctl(struct seq_file *s, u32 val)
247 else if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SAMPLE) 247 else if ((val & BLT_FCTL_HV_SCALE) == BLT_FCTL_HV_SAMPLE)
248 seq_puts(s, "Sample Chroma"); 248 seq_puts(s, "Sample Chroma");
249 249
250 seq_puts(s, "\n"); 250 seq_putc(s, '\n');
251} 251}
252 252
253static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name) 253static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name)
@@ -266,7 +266,7 @@ static void bdisp_dbg_dump_rsf(struct seq_file *s, u32 val, char *name)
266 seq_printf(s, "V: %d(6.10) / scale~%dx0.1", inc, 1024 * 10 / inc); 266 seq_printf(s, "V: %d(6.10) / scale~%dx0.1", inc, 1024 * 10 / inc);
267 267
268done: 268done:
269 seq_puts(s, "\n"); 269 seq_putc(s, '\n');
270} 270}
271 271
272static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name) 272static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name)
@@ -281,7 +281,7 @@ static void bdisp_dbg_dump_rzi(struct seq_file *s, u32 val, char *name)
281 seq_printf(s, "V: init=%d repeat=%d", val & 0x3FF, (val >> 12) & 7); 281 seq_printf(s, "V: init=%d repeat=%d", val & 0x3FF, (val >> 12) & 7);
282 282
283done: 283done:
284 seq_puts(s, "\n"); 284 seq_putc(s, '\n');
285} 285}
286 286
287static void bdisp_dbg_dump_ivmx(struct seq_file *s, 287static void bdisp_dbg_dump_ivmx(struct seq_file *s,
@@ -293,7 +293,7 @@ static void bdisp_dbg_dump_ivmx(struct seq_file *s,
293 seq_printf(s, "IVMX3\t0x%08X\t", c3); 293 seq_printf(s, "IVMX3\t0x%08X\t", c3);
294 294
295 if (!c0 && !c1 && !c2 && !c3) { 295 if (!c0 && !c1 && !c2 && !c3) {
296 seq_puts(s, "\n"); 296 seq_putc(s, '\n');
297 return; 297 return;
298 } 298 }
299 299
diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
index 14cb32e21130..88a1e5670c72 100644
--- a/drivers/media/platform/vimc/vimc-capture.c
+++ b/drivers/media/platform/vimc/vimc-capture.c
@@ -517,21 +517,22 @@ static int vimc_cap_remove(struct platform_device *pdev)
517 return 0; 517 return 0;
518} 518}
519 519
520static const struct platform_device_id vimc_cap_driver_ids[] = {
521 {
522 .name = VIMC_CAP_DRV_NAME,
523 },
524 { }
525};
526
520static struct platform_driver vimc_cap_pdrv = { 527static struct platform_driver vimc_cap_pdrv = {
521 .probe = vimc_cap_probe, 528 .probe = vimc_cap_probe,
522 .remove = vimc_cap_remove, 529 .remove = vimc_cap_remove,
530 .id_table = vimc_cap_driver_ids,
523 .driver = { 531 .driver = {
524 .name = VIMC_CAP_DRV_NAME, 532 .name = VIMC_CAP_DRV_NAME,
525 }, 533 },
526}; 534};
527 535
528static const struct platform_device_id vimc_cap_driver_ids[] = {
529 {
530 .name = VIMC_CAP_DRV_NAME,
531 },
532 { }
533};
534
535module_platform_driver(vimc_cap_pdrv); 536module_platform_driver(vimc_cap_pdrv);
536 537
537MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids); 538MODULE_DEVICE_TABLE(platform, vimc_cap_driver_ids);
diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c
index 35b15bd4d61d..033a131f67af 100644
--- a/drivers/media/platform/vimc/vimc-debayer.c
+++ b/drivers/media/platform/vimc/vimc-debayer.c
@@ -577,21 +577,22 @@ static int vimc_deb_remove(struct platform_device *pdev)
577 return 0; 577 return 0;
578} 578}
579 579
580static const struct platform_device_id vimc_deb_driver_ids[] = {
581 {
582 .name = VIMC_DEB_DRV_NAME,
583 },
584 { }
585};
586
580static struct platform_driver vimc_deb_pdrv = { 587static struct platform_driver vimc_deb_pdrv = {
581 .probe = vimc_deb_probe, 588 .probe = vimc_deb_probe,
582 .remove = vimc_deb_remove, 589 .remove = vimc_deb_remove,
590 .id_table = vimc_deb_driver_ids,
583 .driver = { 591 .driver = {
584 .name = VIMC_DEB_DRV_NAME, 592 .name = VIMC_DEB_DRV_NAME,
585 }, 593 },
586}; 594};
587 595
588static const struct platform_device_id vimc_deb_driver_ids[] = {
589 {
590 .name = VIMC_DEB_DRV_NAME,
591 },
592 { }
593};
594
595module_platform_driver(vimc_deb_pdrv); 596module_platform_driver(vimc_deb_pdrv);
596 597
597MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids); 598MODULE_DEVICE_TABLE(platform, vimc_deb_driver_ids);
diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c
index fe77505d2679..0a3e086e12f3 100644
--- a/drivers/media/platform/vimc/vimc-scaler.c
+++ b/drivers/media/platform/vimc/vimc-scaler.c
@@ -431,21 +431,22 @@ static int vimc_sca_remove(struct platform_device *pdev)
431 return 0; 431 return 0;
432} 432}
433 433
434static const struct platform_device_id vimc_sca_driver_ids[] = {
435 {
436 .name = VIMC_SCA_DRV_NAME,
437 },
438 { }
439};
440
434static struct platform_driver vimc_sca_pdrv = { 441static struct platform_driver vimc_sca_pdrv = {
435 .probe = vimc_sca_probe, 442 .probe = vimc_sca_probe,
436 .remove = vimc_sca_remove, 443 .remove = vimc_sca_remove,
444 .id_table = vimc_sca_driver_ids,
437 .driver = { 445 .driver = {
438 .name = VIMC_SCA_DRV_NAME, 446 .name = VIMC_SCA_DRV_NAME,
439 }, 447 },
440}; 448};
441 449
442static const struct platform_device_id vimc_sca_driver_ids[] = {
443 {
444 .name = VIMC_SCA_DRV_NAME,
445 },
446 { }
447};
448
449module_platform_driver(vimc_sca_pdrv); 450module_platform_driver(vimc_sca_pdrv);
450 451
451MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids); 452MODULE_DEVICE_TABLE(platform, vimc_sca_driver_ids);
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index ebdbbe8c05ed..615c2b18dcfc 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -365,21 +365,22 @@ static int vimc_sen_remove(struct platform_device *pdev)
365 return 0; 365 return 0;
366} 366}
367 367
368static const struct platform_device_id vimc_sen_driver_ids[] = {
369 {
370 .name = VIMC_SEN_DRV_NAME,
371 },
372 { }
373};
374
368static struct platform_driver vimc_sen_pdrv = { 375static struct platform_driver vimc_sen_pdrv = {
369 .probe = vimc_sen_probe, 376 .probe = vimc_sen_probe,
370 .remove = vimc_sen_remove, 377 .remove = vimc_sen_remove,
378 .id_table = vimc_sen_driver_ids,
371 .driver = { 379 .driver = {
372 .name = VIMC_SEN_DRV_NAME, 380 .name = VIMC_SEN_DRV_NAME,
373 }, 381 },
374}; 382};
375 383
376static const struct platform_device_id vimc_sen_driver_ids[] = {
377 {
378 .name = VIMC_SEN_DRV_NAME,
379 },
380 { }
381};
382
383module_platform_driver(vimc_sen_pdrv); 384module_platform_driver(vimc_sen_pdrv);
384 385
385MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids); 386MODULE_DEVICE_TABLE(platform, vimc_sen_driver_ids);
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 7240223dc15a..17e82a9a0109 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -610,10 +610,21 @@ static int wl1273_fm_start(struct wl1273_device *radio, int new_mode)
610 } 610 }
611 } 611 }
612 612
613 if (radio->rds_on) 613 if (radio->rds_on) {
614 r = core->write(core, WL1273_RDS_DATA_ENB, 1); 614 r = core->write(core, WL1273_RDS_DATA_ENB, 1);
615 else 615 if (r) {
616 dev_err(dev, "%s: RDS_DATA_ENB ON fails\n",
617 __func__);
618 goto fail;
619 }
620 } else {
616 r = core->write(core, WL1273_RDS_DATA_ENB, 0); 621 r = core->write(core, WL1273_RDS_DATA_ENB, 0);
622 if (r) {
623 dev_err(dev, "%s: RDS_DATA_ENB OFF fails\n",
624 __func__);
625 goto fail;
626 }
627 }
617 } else { 628 } else {
618 dev_warn(dev, "%s: Illegal mode.\n", __func__); 629 dev_warn(dev, "%s: Illegal mode.\n", __func__);
619 } 630 }
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index a30af91710fe..d2223c04e9ad 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -266,7 +266,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
266 if (!dev->rx_resolution) 266 if (!dev->rx_resolution)
267 return -ENOTTY; 267 return -ENOTTY;
268 268
269 val = dev->rx_resolution; 269 val = dev->rx_resolution / 1000;
270 break; 270 break;
271 271
272 case LIRC_SET_WIDEBAND_RECEIVER: 272 case LIRC_SET_WIDEBAND_RECEIVER:
diff --git a/drivers/media/tuners/fc0011.c b/drivers/media/tuners/fc0011.c
index 192b1c7740df..145407dee3db 100644
--- a/drivers/media/tuners/fc0011.c
+++ b/drivers/media/tuners/fc0011.c
@@ -342,6 +342,7 @@ static int fc0011_set_params(struct dvb_frontend *fe)
342 switch (vco_sel) { 342 switch (vco_sel) {
343 default: 343 default:
344 WARN_ON(1); 344 WARN_ON(1);
345 return -EINVAL;
345 case 0: 346 case 0:
346 if (vco_cal < 8) { 347 if (vco_cal < 8) {
347 regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2); 348 regs[FC11_REG_VCOSEL] &= ~(FC11_VCOSEL_1 | FC11_VCOSEL_2);
diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c
index 353744fee053..dd59c2c0e4a5 100644
--- a/drivers/media/tuners/mxl5005s.c
+++ b/drivers/media/tuners/mxl5005s.c
@@ -2737,8 +2737,6 @@ static u16 MXL_TuneRF(struct dvb_frontend *fe, u32 RF_Freq)
2737 status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0); 2737 status += MXL_ControlWrite(fe, TG_LO_DIVVAL, 0x0);
2738 status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x7); 2738 status += MXL_ControlWrite(fe, TG_LO_SELVAL, 0x7);
2739 divider_val = 2 ; 2739 divider_val = 2 ;
2740 Fmax = FmaxBin ;
2741 Fmin = FminBin ;
2742 } 2740 }
2743 2741
2744 /* TG_DIV_VAL */ 2742 /* TG_DIV_VAL */
diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
index 9ec919c68482..9d82ec0a4b64 100644
--- a/drivers/media/usb/au0828/au0828-input.c
+++ b/drivers/media/usb/au0828/au0828-input.c
@@ -351,7 +351,7 @@ int au0828_rc_register(struct au0828_dev *dev)
351 if (err) 351 if (err)
352 goto error; 352 goto error;
353 353
354 pr_info("Remote controller %s initalized\n", ir->name); 354 pr_info("Remote controller %s initialized\n", ir->name);
355 355
356 return 0; 356 return 0;
357 357
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index 594360a63c18..a91fdad8f8d4 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -207,15 +207,13 @@ static int lme2510_stream_restart(struct dvb_usb_device *d)
207 struct lme2510_state *st = d->priv; 207 struct lme2510_state *st = d->priv;
208 u8 all_pids[] = LME_ALL_PIDS; 208 u8 all_pids[] = LME_ALL_PIDS;
209 u8 stream_on[] = LME_ST_ON_W; 209 u8 stream_on[] = LME_ST_ON_W;
210 int ret;
211 u8 rbuff[1]; 210 u8 rbuff[1];
212 if (st->pid_off) 211 if (st->pid_off)
213 ret = lme2510_usb_talk(d, all_pids, sizeof(all_pids), 212 lme2510_usb_talk(d, all_pids, sizeof(all_pids),
214 rbuff, sizeof(rbuff)); 213 rbuff, sizeof(rbuff));
215 /*Restart Stream Command*/ 214 /*Restart Stream Command*/
216 ret = lme2510_usb_talk(d, stream_on, sizeof(stream_on), 215 return lme2510_usb_talk(d, stream_on, sizeof(stream_on),
217 rbuff, sizeof(rbuff)); 216 rbuff, sizeof(rbuff));
218 return ret;
219} 217}
220 218
221static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) 219static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out)
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index 08acdd32e412..bea1b4764a66 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -215,13 +215,14 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
215 USB_CTRL_GET_TIMEOUT); 215 USB_CTRL_GET_TIMEOUT);
216 if (result < 0) { 216 if (result < 0) {
217 deb_info("i2c read error (status = %d)\n", result); 217 deb_info("i2c read error (status = %d)\n", result);
218 break; 218 goto unlock;
219 } 219 }
220 220
221 if (msg[i].len > sizeof(st->buf)) { 221 if (msg[i].len > sizeof(st->buf)) {
222 deb_info("buffer too small to fit %d bytes\n", 222 deb_info("buffer too small to fit %d bytes\n",
223 msg[i].len); 223 msg[i].len);
224 return -EIO; 224 result = -EIO;
225 goto unlock;
225 } 226 }
226 227
227 memcpy(msg[i].buf, st->buf, msg[i].len); 228 memcpy(msg[i].buf, st->buf, msg[i].len);
@@ -233,8 +234,8 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
233 /* Write request */ 234 /* Write request */
234 if (mutex_lock_interruptible(&d->usb_mutex) < 0) { 235 if (mutex_lock_interruptible(&d->usb_mutex) < 0) {
235 err("could not acquire lock"); 236 err("could not acquire lock");
236 mutex_unlock(&d->i2c_mutex); 237 result = -EINTR;
237 return -EINTR; 238 goto unlock;
238 } 239 }
239 st->buf[0] = REQUEST_NEW_I2C_WRITE; 240 st->buf[0] = REQUEST_NEW_I2C_WRITE;
240 st->buf[1] = msg[i].addr << 1; 241 st->buf[1] = msg[i].addr << 1;
@@ -247,7 +248,9 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
247 if (msg[i].len > sizeof(st->buf) - 4) { 248 if (msg[i].len > sizeof(st->buf) - 4) {
248 deb_info("i2c message to big: %d\n", 249 deb_info("i2c message to big: %d\n",
249 msg[i].len); 250 msg[i].len);
250 return -EIO; 251 mutex_unlock(&d->usb_mutex);
252 result = -EIO;
253 goto unlock;
251 } 254 }
252 255
253 /* The Actual i2c payload */ 256 /* The Actual i2c payload */
@@ -269,8 +272,11 @@ static int dib0700_i2c_xfer_new(struct i2c_adapter *adap, struct i2c_msg *msg,
269 } 272 }
270 } 273 }
271 } 274 }
275 result = i;
276
277unlock:
272 mutex_unlock(&d->i2c_mutex); 278 mutex_unlock(&d->i2c_mutex);
273 return i; 279 return result;
274} 280}
275 281
276/* 282/*
@@ -281,7 +287,7 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
281{ 287{
282 struct dvb_usb_device *d = i2c_get_adapdata(adap); 288 struct dvb_usb_device *d = i2c_get_adapdata(adap);
283 struct dib0700_state *st = d->priv; 289 struct dib0700_state *st = d->priv;
284 int i,len; 290 int i, len, result;
285 291
286 if (mutex_lock_interruptible(&d->i2c_mutex) < 0) 292 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
287 return -EINTR; 293 return -EINTR;
@@ -298,7 +304,8 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
298 if (msg[i].len > sizeof(st->buf) - 2) { 304 if (msg[i].len > sizeof(st->buf) - 2) {
299 deb_info("i2c xfer to big: %d\n", 305 deb_info("i2c xfer to big: %d\n",
300 msg[i].len); 306 msg[i].len);
301 return -EIO; 307 result = -EIO;
308 goto unlock;
302 } 309 }
303 memcpy(&st->buf[2], msg[i].buf, msg[i].len); 310 memcpy(&st->buf[2], msg[i].buf, msg[i].len);
304 311
@@ -313,13 +320,15 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
313 if (len <= 0) { 320 if (len <= 0) {
314 deb_info("I2C read failed on address 0x%02x\n", 321 deb_info("I2C read failed on address 0x%02x\n",
315 msg[i].addr); 322 msg[i].addr);
316 break; 323 result = -EIO;
324 goto unlock;
317 } 325 }
318 326
319 if (msg[i + 1].len > sizeof(st->buf)) { 327 if (msg[i + 1].len > sizeof(st->buf)) {
320 deb_info("i2c xfer buffer to small for %d\n", 328 deb_info("i2c xfer buffer to small for %d\n",
321 msg[i].len); 329 msg[i].len);
322 return -EIO; 330 result = -EIO;
331 goto unlock;
323 } 332 }
324 memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len); 333 memcpy(msg[i + 1].buf, st->buf, msg[i + 1].len);
325 334
@@ -328,14 +337,17 @@ static int dib0700_i2c_xfer_legacy(struct i2c_adapter *adap,
328 i++; 337 i++;
329 } else { 338 } else {
330 st->buf[0] = REQUEST_I2C_WRITE; 339 st->buf[0] = REQUEST_I2C_WRITE;
331 if (dib0700_ctrl_wr(d, st->buf, msg[i].len + 2) < 0) 340 result = dib0700_ctrl_wr(d, st->buf, msg[i].len + 2);
332 break; 341 if (result < 0)
342 goto unlock;
333 } 343 }
334 } 344 }
345 result = i;
346unlock:
335 mutex_unlock(&d->usb_mutex); 347 mutex_unlock(&d->usb_mutex);
336 mutex_unlock(&d->i2c_mutex); 348 mutex_unlock(&d->i2c_mutex);
337 349
338 return i; 350 return result;
339} 351}
340 352
341static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, 353static int dib0700_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 146341aeb782..4c57fd7929cb 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -1193,6 +1193,22 @@ struct em28xx_board em28xx_boards[] = {
1193 .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE | 1193 .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
1194 EM28XX_I2C_FREQ_400_KHZ, 1194 EM28XX_I2C_FREQ_400_KHZ,
1195 }, 1195 },
1196 [EM2884_BOARD_TERRATEC_H6] = {
1197 .name = "Terratec Cinergy H6 rev. 2",
1198 .has_dvb = 1,
1199 .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
1200#if 0
1201 .tuner_type = TUNER_PHILIPS_TDA8290,
1202 .tuner_addr = 0x41,
1203 .dvb_gpio = terratec_h5_digital, /* FIXME: probably wrong */
1204 .tuner_gpio = terratec_h5_gpio,
1205#else
1206 .tuner_type = TUNER_ABSENT,
1207#endif
1208 .def_i2c_bus = 1,
1209 .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
1210 EM28XX_I2C_FREQ_400_KHZ,
1211 },
1196 [EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = { 1212 [EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C] = {
1197 .name = "Hauppauge WinTV HVR 930C", 1213 .name = "Hauppauge WinTV HVR 930C",
1198 .has_dvb = 1, 1214 .has_dvb = 1,
@@ -2496,6 +2512,8 @@ struct usb_device_id em28xx_id_table[] = {
2496 .driver_info = EM2884_BOARD_TERRATEC_H5 }, 2512 .driver_info = EM2884_BOARD_TERRATEC_H5 },
2497 { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */ 2513 { USB_DEVICE(0x0ccd, 0x10b6), /* H5 Rev. 3 */
2498 .driver_info = EM2884_BOARD_TERRATEC_H5 }, 2514 .driver_info = EM2884_BOARD_TERRATEC_H5 },
2515 { USB_DEVICE(0x0ccd, 0x10b2), /* H6 */
2516 .driver_info = EM2884_BOARD_TERRATEC_H6 },
2499 { USB_DEVICE(0x0ccd, 0x0084), 2517 { USB_DEVICE(0x0ccd, 0x0084),
2500 .driver_info = EM2860_BOARD_TERRATEC_AV350 }, 2518 .driver_info = EM2860_BOARD_TERRATEC_AV350 },
2501 { USB_DEVICE(0x0ccd, 0x0096), 2519 { USB_DEVICE(0x0ccd, 0x0096),
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 82edd37f0d73..4a7db623fe29 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -1522,6 +1522,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
1522 break; 1522 break;
1523 case EM2884_BOARD_ELGATO_EYETV_HYBRID_2008: 1523 case EM2884_BOARD_ELGATO_EYETV_HYBRID_2008:
1524 case EM2884_BOARD_CINERGY_HTC_STICK: 1524 case EM2884_BOARD_CINERGY_HTC_STICK:
1525 case EM2884_BOARD_TERRATEC_H6:
1525 terratec_htc_stick_init(dev); 1526 terratec_htc_stick_init(dev);
1526 1527
1527 /* attach demodulator */ 1528 /* attach demodulator */
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 8c472d5adb50..60b195c157b8 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -982,8 +982,6 @@ int em28xx_i2c_register(struct em28xx *dev, unsigned bus,
982 dev_err(&dev->intf->dev, 982 dev_err(&dev->intf->dev,
983 "%s: em28xx_i2_eeprom failed! retval [%d]\n", 983 "%s: em28xx_i2_eeprom failed! retval [%d]\n",
984 __func__, retval); 984 __func__, retval);
985
986 return retval;
987 } 985 }
988 } 986 }
989 987
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index eba75736e654..ca9673917ad5 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -821,7 +821,7 @@ static int em28xx_ir_init(struct em28xx *dev)
821 if (err) 821 if (err)
822 goto error; 822 goto error;
823 823
824 dev_info(&dev->intf->dev, "Input extension successfully initalized\n"); 824 dev_info(&dev->intf->dev, "Input extension successfully initialized\n");
825 825
826 return 0; 826 return 0;
827 827
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index e8d97d5ec161..88084f24f033 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -148,6 +148,7 @@
148#define EM28178_BOARD_PLEX_PX_BCUD 98 148#define EM28178_BOARD_PLEX_PX_BCUD 98
149#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB 99 149#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB 99
150#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100 150#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_01595 100
151#define EM2884_BOARD_TERRATEC_H6 101
151 152
152/* Limits minimum and default number of buffers */ 153/* Limits minimum and default number of buffers */
153#define EM28XX_MIN_BUF 4 154#define EM28XX_MIN_BUF 4
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index c843070f24c1..f9ed9c950247 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -51,7 +51,7 @@ MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
51MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
52 52
53static int debug; 53static int debug;
54static int persistent_config = 1; 54static int persistent_config;
55module_param(debug, int, 0644); 55module_param(debug, int, 0644);
56module_param(persistent_config, int, 0644); 56module_param(persistent_config, int, 0644);
57MODULE_PARM_DESC(debug, "debug level (0-1)"); 57MODULE_PARM_DESC(debug, "debug level (0-1)");
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
index f203699e9c1b..65692576690f 100644
--- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
+++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c
@@ -116,21 +116,19 @@ static void rain_irq_work_handler(struct work_struct *work)
116 116
117 while (true) { 117 while (true) {
118 unsigned long flags; 118 unsigned long flags;
119 bool exit_loop = false;
120 char data; 119 char data;
121 120
122 spin_lock_irqsave(&rain->buf_lock, flags); 121 spin_lock_irqsave(&rain->buf_lock, flags);
123 if (rain->buf_len) { 122 if (!rain->buf_len) {
124 data = rain->buf[rain->buf_rd_idx]; 123 spin_unlock_irqrestore(&rain->buf_lock, flags);
125 rain->buf_len--; 124 break;
126 rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff;
127 } else {
128 exit_loop = true;
129 } 125 }
130 spin_unlock_irqrestore(&rain->buf_lock, flags);
131 126
132 if (exit_loop) 127 data = rain->buf[rain->buf_rd_idx];
133 break; 128 rain->buf_len--;
129 rain->buf_rd_idx = (rain->buf_rd_idx + 1) & 0xff;
130
131 spin_unlock_irqrestore(&rain->buf_lock, flags);
134 132
135 if (!rain->cmd_started && data != '?') 133 if (!rain->cmd_started && data != '?')
136 continue; 134 continue;
diff --git a/drivers/media/usb/stkwebcam/stk-sensor.c b/drivers/media/usb/stkwebcam/stk-sensor.c
index 985af9933c7e..c1d4505f84ea 100644
--- a/drivers/media/usb/stkwebcam/stk-sensor.c
+++ b/drivers/media/usb/stkwebcam/stk-sensor.c
@@ -41,6 +41,8 @@
41 41
42/* It seems the i2c bus is controlled with these registers */ 42/* It seems the i2c bus is controlled with these registers */
43 43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
44#include "stk-webcam.h" 46#include "stk-webcam.h"
45 47
46#define STK_IIC_BASE (0x0200) 48#define STK_IIC_BASE (0x0200)
@@ -239,8 +241,8 @@ static int stk_sensor_outb(struct stk_camera *dev, u8 reg, u8 val)
239 } while (tmpval == 0 && i < MAX_RETRIES); 241 } while (tmpval == 0 && i < MAX_RETRIES);
240 if (tmpval != STK_IIC_STAT_TX_OK) { 242 if (tmpval != STK_IIC_STAT_TX_OK) {
241 if (tmpval) 243 if (tmpval)
242 STK_ERROR("stk_sensor_outb failed, status=0x%02x\n", 244 pr_err("stk_sensor_outb failed, status=0x%02x\n",
243 tmpval); 245 tmpval);
244 return 1; 246 return 1;
245 } else 247 } else
246 return 0; 248 return 0;
@@ -262,8 +264,8 @@ static int stk_sensor_inb(struct stk_camera *dev, u8 reg, u8 *val)
262 } while (tmpval == 0 && i < MAX_RETRIES); 264 } while (tmpval == 0 && i < MAX_RETRIES);
263 if (tmpval != STK_IIC_STAT_RX_OK) { 265 if (tmpval != STK_IIC_STAT_RX_OK) {
264 if (tmpval) 266 if (tmpval)
265 STK_ERROR("stk_sensor_inb failed, status=0x%02x\n", 267 pr_err("stk_sensor_inb failed, status=0x%02x\n",
266 tmpval); 268 tmpval);
267 return 1; 269 return 1;
268 } 270 }
269 271
@@ -366,29 +368,29 @@ int stk_sensor_init(struct stk_camera *dev)
366 if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES) 368 if (stk_camera_write_reg(dev, STK_IIC_ENABLE, STK_IIC_ENABLE_YES)
367 || stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS) 369 || stk_camera_write_reg(dev, STK_IIC_ADDR, SENSOR_ADDRESS)
368 || stk_sensor_outb(dev, REG_COM7, COM7_RESET)) { 370 || stk_sensor_outb(dev, REG_COM7, COM7_RESET)) {
369 STK_ERROR("Sensor resetting failed\n"); 371 pr_err("Sensor resetting failed\n");
370 return -ENODEV; 372 return -ENODEV;
371 } 373 }
372 msleep(10); 374 msleep(10);
373 /* Read the manufacturer ID: ov = 0x7FA2 */ 375 /* Read the manufacturer ID: ov = 0x7FA2 */
374 if (stk_sensor_inb(dev, REG_MIDH, &idh) 376 if (stk_sensor_inb(dev, REG_MIDH, &idh)
375 || stk_sensor_inb(dev, REG_MIDL, &idl)) { 377 || stk_sensor_inb(dev, REG_MIDL, &idl)) {
376 STK_ERROR("Strange error reading sensor ID\n"); 378 pr_err("Strange error reading sensor ID\n");
377 return -ENODEV; 379 return -ENODEV;
378 } 380 }
379 if (idh != 0x7f || idl != 0xa2) { 381 if (idh != 0x7f || idl != 0xa2) {
380 STK_ERROR("Huh? you don't have a sensor from ovt\n"); 382 pr_err("Huh? you don't have a sensor from ovt\n");
381 return -ENODEV; 383 return -ENODEV;
382 } 384 }
383 if (stk_sensor_inb(dev, REG_PID, &idh) 385 if (stk_sensor_inb(dev, REG_PID, &idh)
384 || stk_sensor_inb(dev, REG_VER, &idl)) { 386 || stk_sensor_inb(dev, REG_VER, &idl)) {
385 STK_ERROR("Could not read sensor model\n"); 387 pr_err("Could not read sensor model\n");
386 return -ENODEV; 388 return -ENODEV;
387 } 389 }
388 stk_sensor_write_regvals(dev, ov_initvals); 390 stk_sensor_write_regvals(dev, ov_initvals);
389 msleep(10); 391 msleep(10);
390 STK_INFO("OmniVision sensor detected, id %02X%02X at address %x\n", 392 pr_info("OmniVision sensor detected, id %02X%02X at address %x\n",
391 idh, idl, SENSOR_ADDRESS); 393 idh, idl, SENSOR_ADDRESS);
392 return 0; 394 return 0;
393} 395}
394 396
@@ -520,7 +522,8 @@ int stk_sensor_configure(struct stk_camera *dev)
520 case MODE_SXGA: com7 = COM7_FMT_SXGA; 522 case MODE_SXGA: com7 = COM7_FMT_SXGA;
521 dummylines = 0; 523 dummylines = 0;
522 break; 524 break;
523 default: STK_ERROR("Unsupported mode %d\n", dev->vsettings.mode); 525 default:
526 pr_err("Unsupported mode %d\n", dev->vsettings.mode);
524 return -EFAULT; 527 return -EFAULT;
525 } 528 }
526 switch (dev->vsettings.palette) { 529 switch (dev->vsettings.palette) {
@@ -544,7 +547,8 @@ int stk_sensor_configure(struct stk_camera *dev)
544 com7 |= COM7_PBAYER; 547 com7 |= COM7_PBAYER;
545 rv = ov_fmt_bayer; 548 rv = ov_fmt_bayer;
546 break; 549 break;
547 default: STK_ERROR("Unsupported colorspace\n"); 550 default:
551 pr_err("Unsupported colorspace\n");
548 return -EFAULT; 552 return -EFAULT;
549 } 553 }
550 /*FIXME sometimes the sensor go to a bad state 554 /*FIXME sometimes the sensor go to a bad state
@@ -564,7 +568,7 @@ int stk_sensor_configure(struct stk_camera *dev)
564 switch (dev->vsettings.mode) { 568 switch (dev->vsettings.mode) {
565 case MODE_VGA: 569 case MODE_VGA:
566 if (stk_sensor_set_hw(dev, 302, 1582, 6, 486)) 570 if (stk_sensor_set_hw(dev, 302, 1582, 6, 486))
567 STK_ERROR("stk_sensor_set_hw failed (VGA)\n"); 571 pr_err("stk_sensor_set_hw failed (VGA)\n");
568 break; 572 break;
569 case MODE_SXGA: 573 case MODE_SXGA:
570 case MODE_CIF: 574 case MODE_CIF:
@@ -572,7 +576,7 @@ int stk_sensor_configure(struct stk_camera *dev)
572 case MODE_QCIF: 576 case MODE_QCIF:
573 /*FIXME These settings seem ignored by the sensor 577 /*FIXME These settings seem ignored by the sensor
574 if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034)) 578 if (stk_sensor_set_hw(dev, 220, 1500, 10, 1034))
575 STK_ERROR("stk_sensor_set_hw failed (SXGA)\n"); 579 pr_err("stk_sensor_set_hw failed (SXGA)\n");
576 */ 580 */
577 break; 581 break;
578 } 582 }
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 6e7fc36b658f..90d4a08cda31 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -18,6 +18,8 @@
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 */ 19 */
20 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
21#include <linux/module.h> 23#include <linux/module.h>
22#include <linux/init.h> 24#include <linux/init.h>
23#include <linux/kernel.h> 25#include <linux/kernel.h>
@@ -175,15 +177,15 @@ static int stk_start_stream(struct stk_camera *dev)
175 if (!is_present(dev)) 177 if (!is_present(dev))
176 return -ENODEV; 178 return -ENODEV;
177 if (!is_memallocd(dev) || !is_initialised(dev)) { 179 if (!is_memallocd(dev) || !is_initialised(dev)) {
178 STK_ERROR("FIXME: Buffers are not allocated\n"); 180 pr_err("FIXME: Buffers are not allocated\n");
179 return -EFAULT; 181 return -EFAULT;
180 } 182 }
181 ret = usb_set_interface(dev->udev, 0, 5); 183 ret = usb_set_interface(dev->udev, 0, 5);
182 184
183 if (ret < 0) 185 if (ret < 0)
184 STK_ERROR("usb_set_interface failed !\n"); 186 pr_err("usb_set_interface failed !\n");
185 if (stk_sensor_wakeup(dev)) 187 if (stk_sensor_wakeup(dev))
186 STK_ERROR("error awaking the sensor\n"); 188 pr_err("error awaking the sensor\n");
187 189
188 stk_camera_read_reg(dev, 0x0116, &value_116); 190 stk_camera_read_reg(dev, 0x0116, &value_116);
189 stk_camera_read_reg(dev, 0x0117, &value_117); 191 stk_camera_read_reg(dev, 0x0117, &value_117);
@@ -224,9 +226,9 @@ static int stk_stop_stream(struct stk_camera *dev)
224 unset_streaming(dev); 226 unset_streaming(dev);
225 227
226 if (usb_set_interface(dev->udev, 0, 0)) 228 if (usb_set_interface(dev->udev, 0, 0))
227 STK_ERROR("usb_set_interface failed !\n"); 229 pr_err("usb_set_interface failed !\n");
228 if (stk_sensor_sleep(dev)) 230 if (stk_sensor_sleep(dev))
229 STK_ERROR("error suspending the sensor\n"); 231 pr_err("error suspending the sensor\n");
230 } 232 }
231 return 0; 233 return 0;
232} 234}
@@ -313,7 +315,7 @@ static void stk_isoc_handler(struct urb *urb)
313 dev = (struct stk_camera *) urb->context; 315 dev = (struct stk_camera *) urb->context;
314 316
315 if (dev == NULL) { 317 if (dev == NULL) {
316 STK_ERROR("isoc_handler called with NULL device !\n"); 318 pr_err("isoc_handler called with NULL device !\n");
317 return; 319 return;
318 } 320 }
319 321
@@ -326,14 +328,13 @@ static void stk_isoc_handler(struct urb *urb)
326 spin_lock_irqsave(&dev->spinlock, flags); 328 spin_lock_irqsave(&dev->spinlock, flags);
327 329
328 if (urb->status != -EINPROGRESS && urb->status != 0) { 330 if (urb->status != -EINPROGRESS && urb->status != 0) {
329 STK_ERROR("isoc_handler: urb->status == %d\n", urb->status); 331 pr_err("isoc_handler: urb->status == %d\n", urb->status);
330 goto resubmit; 332 goto resubmit;
331 } 333 }
332 334
333 if (list_empty(&dev->sio_avail)) { 335 if (list_empty(&dev->sio_avail)) {
334 /*FIXME Stop streaming after a while */ 336 /*FIXME Stop streaming after a while */
335 (void) (printk_ratelimit() && 337 pr_err_ratelimited("isoc_handler without available buffer!\n");
336 STK_ERROR("isoc_handler without available buffer!\n"));
337 goto resubmit; 338 goto resubmit;
338 } 339 }
339 fb = list_first_entry(&dev->sio_avail, 340 fb = list_first_entry(&dev->sio_avail,
@@ -343,8 +344,8 @@ static void stk_isoc_handler(struct urb *urb)
343 for (i = 0; i < urb->number_of_packets; i++) { 344 for (i = 0; i < urb->number_of_packets; i++) {
344 if (urb->iso_frame_desc[i].status != 0) { 345 if (urb->iso_frame_desc[i].status != 0) {
345 if (urb->iso_frame_desc[i].status != -EXDEV) 346 if (urb->iso_frame_desc[i].status != -EXDEV)
346 STK_ERROR("Frame %d has error %d\n", i, 347 pr_err("Frame %d has error %d\n",
347 urb->iso_frame_desc[i].status); 348 i, urb->iso_frame_desc[i].status);
348 continue; 349 continue;
349 } 350 }
350 framelen = urb->iso_frame_desc[i].actual_length; 351 framelen = urb->iso_frame_desc[i].actual_length;
@@ -368,9 +369,8 @@ static void stk_isoc_handler(struct urb *urb)
368 /* This marks a new frame */ 369 /* This marks a new frame */
369 if (fb->v4lbuf.bytesused != 0 370 if (fb->v4lbuf.bytesused != 0
370 && fb->v4lbuf.bytesused != dev->frame_size) { 371 && fb->v4lbuf.bytesused != dev->frame_size) {
371 (void) (printk_ratelimit() && 372 pr_err_ratelimited("frame %d, bytesused=%d, skipping\n",
372 STK_ERROR("frame %d, bytesused=%d, skipping\n", 373 i, fb->v4lbuf.bytesused);
373 i, fb->v4lbuf.bytesused));
374 fb->v4lbuf.bytesused = 0; 374 fb->v4lbuf.bytesused = 0;
375 fill = fb->buffer; 375 fill = fb->buffer;
376 } else if (fb->v4lbuf.bytesused == dev->frame_size) { 376 } else if (fb->v4lbuf.bytesused == dev->frame_size) {
@@ -395,8 +395,7 @@ static void stk_isoc_handler(struct urb *urb)
395 395
396 /* Our buffer is full !!! */ 396 /* Our buffer is full !!! */
397 if (framelen + fb->v4lbuf.bytesused > dev->frame_size) { 397 if (framelen + fb->v4lbuf.bytesused > dev->frame_size) {
398 (void) (printk_ratelimit() && 398 pr_err_ratelimited("Frame buffer overflow, lost sync\n");
399 STK_ERROR("Frame buffer overflow, lost sync\n"));
400 /*FIXME Do something here? */ 399 /*FIXME Do something here? */
401 continue; 400 continue;
402 } 401 }
@@ -414,8 +413,8 @@ resubmit:
414 urb->dev = dev->udev; 413 urb->dev = dev->udev;
415 ret = usb_submit_urb(urb, GFP_ATOMIC); 414 ret = usb_submit_urb(urb, GFP_ATOMIC);
416 if (ret != 0) { 415 if (ret != 0) {
417 STK_ERROR("Error (%d) re-submitting urb in stk_isoc_handler.\n", 416 pr_err("Error (%d) re-submitting urb in stk_isoc_handler\n",
418 ret); 417 ret);
419 } 418 }
420} 419}
421 420
@@ -433,32 +432,31 @@ static int stk_prepare_iso(struct stk_camera *dev)
433 udev = dev->udev; 432 udev = dev->udev;
434 433
435 if (dev->isobufs) 434 if (dev->isobufs)
436 STK_ERROR("isobufs already allocated. Bad\n"); 435 pr_err("isobufs already allocated. Bad\n");
437 else 436 else
438 dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs), 437 dev->isobufs = kcalloc(MAX_ISO_BUFS, sizeof(*dev->isobufs),
439 GFP_KERNEL); 438 GFP_KERNEL);
440 if (dev->isobufs == NULL) { 439 if (dev->isobufs == NULL) {
441 STK_ERROR("Unable to allocate iso buffers\n"); 440 pr_err("Unable to allocate iso buffers\n");
442 return -ENOMEM; 441 return -ENOMEM;
443 } 442 }
444 for (i = 0; i < MAX_ISO_BUFS; i++) { 443 for (i = 0; i < MAX_ISO_BUFS; i++) {
445 if (dev->isobufs[i].data == NULL) { 444 if (dev->isobufs[i].data == NULL) {
446 kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL); 445 kbuf = kzalloc(ISO_BUFFER_SIZE, GFP_KERNEL);
447 if (kbuf == NULL) { 446 if (kbuf == NULL) {
448 STK_ERROR("Failed to allocate iso buffer %d\n", 447 pr_err("Failed to allocate iso buffer %d\n", i);
449 i);
450 goto isobufs_out; 448 goto isobufs_out;
451 } 449 }
452 dev->isobufs[i].data = kbuf; 450 dev->isobufs[i].data = kbuf;
453 } else 451 } else
454 STK_ERROR("isobuf data already allocated\n"); 452 pr_err("isobuf data already allocated\n");
455 if (dev->isobufs[i].urb == NULL) { 453 if (dev->isobufs[i].urb == NULL) {
456 urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL); 454 urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
457 if (urb == NULL) 455 if (urb == NULL)
458 goto isobufs_out; 456 goto isobufs_out;
459 dev->isobufs[i].urb = urb; 457 dev->isobufs[i].urb = urb;
460 } else { 458 } else {
461 STK_ERROR("Killing URB\n"); 459 pr_err("Killing URB\n");
462 usb_kill_urb(dev->isobufs[i].urb); 460 usb_kill_urb(dev->isobufs[i].urb);
463 urb = dev->isobufs[i].urb; 461 urb = dev->isobufs[i].urb;
464 } 462 }
@@ -567,7 +565,7 @@ static int stk_prepare_sio_buffers(struct stk_camera *dev, unsigned n_sbufs)
567{ 565{
568 int i; 566 int i;
569 if (dev->sio_bufs != NULL) 567 if (dev->sio_bufs != NULL)
570 STK_ERROR("sio_bufs already allocated\n"); 568 pr_err("sio_bufs already allocated\n");
571 else { 569 else {
572 dev->sio_bufs = kzalloc(n_sbufs * sizeof(struct stk_sio_buffer), 570 dev->sio_bufs = kzalloc(n_sbufs * sizeof(struct stk_sio_buffer),
573 GFP_KERNEL); 571 GFP_KERNEL);
@@ -690,7 +688,7 @@ static ssize_t stk_read(struct file *fp, char __user *buf,
690 spin_lock_irqsave(&dev->spinlock, flags); 688 spin_lock_irqsave(&dev->spinlock, flags);
691 if (list_empty(&dev->sio_full)) { 689 if (list_empty(&dev->sio_full)) {
692 spin_unlock_irqrestore(&dev->spinlock, flags); 690 spin_unlock_irqrestore(&dev->spinlock, flags);
693 STK_ERROR("BUG: No siobufs ready\n"); 691 pr_err("BUG: No siobufs ready\n");
694 return 0; 692 return 0;
695 } 693 }
696 sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list); 694 sbuf = list_first_entry(&dev->sio_full, struct stk_sio_buffer, list);
@@ -907,7 +905,7 @@ static int stk_vidioc_g_fmt_vid_cap(struct file *filp,
907 stk_sizes[i].m != dev->vsettings.mode; i++) 905 stk_sizes[i].m != dev->vsettings.mode; i++)
908 ; 906 ;
909 if (i == ARRAY_SIZE(stk_sizes)) { 907 if (i == ARRAY_SIZE(stk_sizes)) {
910 STK_ERROR("ERROR: mode invalid\n"); 908 pr_err("ERROR: mode invalid\n");
911 return -EINVAL; 909 return -EINVAL;
912 } 910 }
913 pix_format->width = stk_sizes[i].w; 911 pix_format->width = stk_sizes[i].w;
@@ -985,7 +983,7 @@ static int stk_setup_format(struct stk_camera *dev)
985 stk_sizes[i].m != dev->vsettings.mode) 983 stk_sizes[i].m != dev->vsettings.mode)
986 i++; 984 i++;
987 if (i == ARRAY_SIZE(stk_sizes)) { 985 if (i == ARRAY_SIZE(stk_sizes)) {
988 STK_ERROR("Something is broken in %s\n", __func__); 986 pr_err("Something is broken in %s\n", __func__);
989 return -EFAULT; 987 return -EFAULT;
990 } 988 }
991 /* This registers controls some timings, not sure of what. */ 989 /* This registers controls some timings, not sure of what. */
@@ -1241,7 +1239,7 @@ static void stk_v4l_dev_release(struct video_device *vd)
1241 struct stk_camera *dev = vdev_to_camera(vd); 1239 struct stk_camera *dev = vdev_to_camera(vd);
1242 1240
1243 if (dev->sio_bufs != NULL || dev->isobufs != NULL) 1241 if (dev->sio_bufs != NULL || dev->isobufs != NULL)
1244 STK_ERROR("We are leaking memory\n"); 1242 pr_err("We are leaking memory\n");
1245 usb_put_intf(dev->interface); 1243 usb_put_intf(dev->interface);
1246 kfree(dev); 1244 kfree(dev);
1247} 1245}
@@ -1264,10 +1262,10 @@ static int stk_register_video_device(struct stk_camera *dev)
1264 video_set_drvdata(&dev->vdev, dev); 1262 video_set_drvdata(&dev->vdev, dev);
1265 err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1); 1263 err = video_register_device(&dev->vdev, VFL_TYPE_GRABBER, -1);
1266 if (err) 1264 if (err)
1267 STK_ERROR("v4l registration failed\n"); 1265 pr_err("v4l registration failed\n");
1268 else 1266 else
1269 STK_INFO("Syntek USB2.0 Camera is now controlling device %s\n", 1267 pr_info("Syntek USB2.0 Camera is now controlling device %s\n",
1270 video_device_node_name(&dev->vdev)); 1268 video_device_node_name(&dev->vdev));
1271 return err; 1269 return err;
1272} 1270}
1273 1271
@@ -1288,7 +1286,7 @@ static int stk_camera_probe(struct usb_interface *interface,
1288 1286
1289 dev = kzalloc(sizeof(struct stk_camera), GFP_KERNEL); 1287 dev = kzalloc(sizeof(struct stk_camera), GFP_KERNEL);
1290 if (dev == NULL) { 1288 if (dev == NULL) {
1291 STK_ERROR("Out of memory !\n"); 1289 pr_err("Out of memory !\n");
1292 return -ENOMEM; 1290 return -ENOMEM;
1293 } 1291 }
1294 err = v4l2_device_register(&interface->dev, &dev->v4l2_dev); 1292 err = v4l2_device_register(&interface->dev, &dev->v4l2_dev);
@@ -1352,7 +1350,7 @@ static int stk_camera_probe(struct usb_interface *interface,
1352 } 1350 }
1353 } 1351 }
1354 if (!dev->isoc_ep) { 1352 if (!dev->isoc_ep) {
1355 STK_ERROR("Could not find isoc-in endpoint"); 1353 pr_err("Could not find isoc-in endpoint\n");
1356 err = -ENODEV; 1354 err = -ENODEV;
1357 goto error; 1355 goto error;
1358 } 1356 }
@@ -1387,8 +1385,8 @@ static void stk_camera_disconnect(struct usb_interface *interface)
1387 1385
1388 wake_up_interruptible(&dev->wait_frame); 1386 wake_up_interruptible(&dev->wait_frame);
1389 1387
1390 STK_INFO("Syntek USB2.0 Camera release resources device %s\n", 1388 pr_info("Syntek USB2.0 Camera release resources device %s\n",
1391 video_device_node_name(&dev->vdev)); 1389 video_device_node_name(&dev->vdev));
1392 1390
1393 video_unregister_device(&dev->vdev); 1391 video_unregister_device(&dev->vdev);
1394 v4l2_ctrl_handler_free(&dev->hdl); 1392 v4l2_ctrl_handler_free(&dev->hdl);
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.h b/drivers/media/usb/stkwebcam/stk-webcam.h
index 0284120ce246..5cecbdc97573 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.h
+++ b/drivers/media/usb/stkwebcam/stk-webcam.h
@@ -31,12 +31,6 @@
31#define ISO_MAX_FRAME_SIZE 3 * 1024 31#define ISO_MAX_FRAME_SIZE 3 * 1024
32#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE) 32#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE)
33 33
34
35#define PREFIX "stkwebcam: "
36#define STK_INFO(str, args...) printk(KERN_INFO PREFIX str, ##args)
37#define STK_ERROR(str, args...) printk(KERN_ERR PREFIX str, ##args)
38#define STK_WARNING(str, args...) printk(KERN_WARNING PREFIX str, ##args)
39
40struct stk_iso_buf { 34struct stk_iso_buf {
41 void *data; 35 void *data;
42 int length; 36 int length;
diff --git a/drivers/media/v4l2-core/tuner-core.c b/drivers/media/v4l2-core/tuner-core.c
index e48b7c032c95..8db45dfc271b 100644
--- a/drivers/media/v4l2-core/tuner-core.c
+++ b/drivers/media/v4l2-core/tuner-core.c
@@ -43,8 +43,6 @@
43 43
44#define UNSET (-1U) 44#define UNSET (-1U)
45 45
46#define PREFIX (t->i2c->dev.driver->name)
47
48/* 46/*
49 * Driver modprobe parameters 47 * Driver modprobe parameters
50 */ 48 */
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index 99e644cda4d1..ebf69ff48ae2 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -72,7 +72,7 @@ struct atmel_smc_timing_xlate {
72 { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos} 72 { .name = nm, .converter = atmel_smc_cs_conf_set_pulse, .shift = pos}
73 73
74#define ATMEL_SMC_CYCLE_XLATE(nm, pos) \ 74#define ATMEL_SMC_CYCLE_XLATE(nm, pos) \
75 { .name = nm, .converter = atmel_smc_cs_conf_set_setup, .shift = pos} 75 { .name = nm, .converter = atmel_smc_cs_conf_set_cycle, .shift = pos}
76 76
77static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid, 77static void at91sam9_ebi_get_config(struct atmel_ebi_dev *ebid,
78 struct atmel_ebi_dev_config *conf) 78 struct atmel_ebi_dev_config *conf)
@@ -120,12 +120,14 @@ static int atmel_ebi_xslate_smc_timings(struct atmel_ebi_dev *ebid,
120 if (!ret) { 120 if (!ret) {
121 required = true; 121 required = true;
122 ncycles = DIV_ROUND_UP(val, clk_period_ns); 122 ncycles = DIV_ROUND_UP(val, clk_period_ns);
123 if (ncycles > ATMEL_SMC_MODE_TDF_MAX || 123 if (ncycles > ATMEL_SMC_MODE_TDF_MAX) {
124 ncycles < ATMEL_SMC_MODE_TDF_MIN) {
125 ret = -EINVAL; 124 ret = -EINVAL;
126 goto out; 125 goto out;
127 } 126 }
128 127
128 if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
129 ncycles = ATMEL_SMC_MODE_TDF_MIN;
130
129 smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles); 131 smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles);
130 } 132 }
131 133
@@ -263,7 +265,7 @@ static int atmel_ebi_xslate_smc_config(struct atmel_ebi_dev *ebid,
263 } 265 }
264 266
265 ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf); 267 ret = atmel_ebi_xslate_smc_timings(ebid, np, &conf->smcconf);
266 if (ret) 268 if (ret < 0)
267 return -EINVAL; 269 return -EINVAL;
268 270
269 if ((ret > 0 && !required) || (!ret && required)) { 271 if ((ret > 0 && !required) || (!ret && required)) {
diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c
index 954cf0f66a31..20cc0ea470fa 100644
--- a/drivers/mfd/atmel-smc.c
+++ b/drivers/mfd/atmel-smc.c
@@ -206,7 +206,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_pulse);
206 * parameter 206 * parameter
207 * 207 *
208 * This function encodes the @ncycles value as described in the datasheet 208 * This function encodes the @ncycles value as described in the datasheet
209 * (section "SMC Pulse Register"), and then stores the result in the 209 * (section "SMC Cycle Register"), and then stores the result in the
210 * @conf->setup field at @shift position. 210 * @conf->setup field at @shift position.
211 * 211 *
212 * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in 212 * Returns -EINVAL if @shift is invalid, -ERANGE if @ncycles does not fit in
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index fbe0f245ce8e..fe1811523e4a 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -645,6 +645,9 @@ static const struct regmap_range da9062_aa_readable_ranges[] = {
645 .range_min = DA9062AA_VLDO1_B, 645 .range_min = DA9062AA_VLDO1_B,
646 .range_max = DA9062AA_VLDO4_B, 646 .range_max = DA9062AA_VLDO4_B,
647 }, { 647 }, {
648 .range_min = DA9062AA_BBAT_CONT,
649 .range_max = DA9062AA_BBAT_CONT,
650 }, {
648 .range_min = DA9062AA_INTERFACE, 651 .range_min = DA9062AA_INTERFACE,
649 .range_max = DA9062AA_CONFIG_E, 652 .range_max = DA9062AA_CONFIG_E,
650 }, { 653 }, {
@@ -721,6 +724,9 @@ static const struct regmap_range da9062_aa_writeable_ranges[] = {
721 .range_min = DA9062AA_VLDO1_B, 724 .range_min = DA9062AA_VLDO1_B,
722 .range_max = DA9062AA_VLDO4_B, 725 .range_max = DA9062AA_VLDO4_B,
723 }, { 726 }, {
727 .range_min = DA9062AA_BBAT_CONT,
728 .range_max = DA9062AA_BBAT_CONT,
729 }, {
724 .range_min = DA9062AA_GP_ID_0, 730 .range_min = DA9062AA_GP_ID_0,
725 .range_max = DA9062AA_GP_ID_19, 731 .range_max = DA9062AA_GP_ID_19,
726 }, 732 },
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 8621a198a2ce..bac33311f55a 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -216,6 +216,12 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
216 pci_set_drvdata(pdev, dev); 216 pci_set_drvdata(pdev, dev);
217 217
218 /* 218 /*
219 * MEI requires to resume from runtime suspend mode
220 * in order to perform link reset flow upon system suspend.
221 */
222 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
223
224 /*
219 * For not wake-able HW runtime pm framework 225 * For not wake-able HW runtime pm framework
220 * can't be used on pci device level. 226 * can't be used on pci device level.
221 * Use domain runtime pm callbacks instead. 227 * Use domain runtime pm callbacks instead.
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index f811cd524468..e38a5f144373 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -138,6 +138,12 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
138 pci_set_drvdata(pdev, dev); 138 pci_set_drvdata(pdev, dev);
139 139
140 /* 140 /*
141 * MEI requires to resume from runtime suspend mode
142 * in order to perform link reset flow upon system suspend.
143 */
144 pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
145
146 /*
141 * For not wake-able HW runtime pm framework 147 * For not wake-able HW runtime pm framework
142 * can't be used on pci device level. 148 * can't be used on pci device level.
143 * Use domain runtime pm callbacks instead. 149 * Use domain runtime pm callbacks instead.
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 8ac59dc80f23..80d1ec693d2d 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1371,12 +1371,46 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
1371 R1_CC_ERROR | /* Card controller error */ \ 1371 R1_CC_ERROR | /* Card controller error */ \
1372 R1_ERROR) /* General/unknown error */ 1372 R1_ERROR) /* General/unknown error */
1373 1373
1374static bool mmc_blk_has_cmd_err(struct mmc_command *cmd) 1374static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
1375{ 1375{
1376 if (!cmd->error && cmd->resp[0] & CMD_ERRORS) 1376 u32 val;
1377 cmd->error = -EIO;
1378 1377
1379 return cmd->error; 1378 /*
1379 * Per the SD specification(physical layer version 4.10)[1],
1380 * section 4.3.3, it explicitly states that "When the last
1381 * block of user area is read using CMD18, the host should
1382 * ignore OUT_OF_RANGE error that may occur even the sequence
1383 * is correct". And JESD84-B51 for eMMC also has a similar
1384 * statement on section 6.8.3.
1385 *
1386 * Multiple block read/write could be done by either predefined
1387 * method, namely CMD23, or open-ending mode. For open-ending mode,
1388 * we should ignore the OUT_OF_RANGE error as it's normal behaviour.
1389 *
1390 * However the spec[1] doesn't tell us whether we should also
1391 * ignore that for predefined method. But per the spec[1], section
1392 * 4.15 Set Block Count Command, it says"If illegal block count
1393 * is set, out of range error will be indicated during read/write
1394 * operation (For example, data transfer is stopped at user area
1395 * boundary)." In another word, we could expect a out of range error
1396 * in the response for the following CMD18/25. And if argument of
1397 * CMD23 + the argument of CMD18/25 exceed the max number of blocks,
1398 * we could also expect to get a -ETIMEDOUT or any error number from
1399 * the host drivers due to missing data response(for write)/data(for
1400 * read), as the cards will stop the data transfer by itself per the
1401 * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode.
1402 */
1403
1404 if (!brq->stop.error) {
1405 bool oor_with_open_end;
1406 /* If there is no error yet, check R1 response */
1407
1408 val = brq->stop.resp[0] & CMD_ERRORS;
1409 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc;
1410
1411 if (val && !oor_with_open_end)
1412 brq->stop.error = -EIO;
1413 }
1380} 1414}
1381 1415
1382static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, 1416static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
@@ -1400,8 +1434,11 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
1400 * stop.error indicates a problem with the stop command. Data 1434 * stop.error indicates a problem with the stop command. Data
1401 * may have been transferred, or may still be transferring. 1435 * may have been transferred, or may still be transferring.
1402 */ 1436 */
1403 if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) || 1437
1404 brq->data.error) { 1438 mmc_blk_eval_resp_error(brq);
1439
1440 if (brq->sbc.error || brq->cmd.error ||
1441 brq->stop.error || brq->data.error) {
1405 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { 1442 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
1406 case ERR_RETRY: 1443 case ERR_RETRY:
1407 return MMC_BLK_RETRY; 1444 return MMC_BLK_RETRY;
@@ -2170,6 +2207,9 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
2170 * from being accepted. 2207 * from being accepted.
2171 */ 2208 */
2172 card = md->queue.card; 2209 card = md->queue.card;
2210 spin_lock_irq(md->queue.queue->queue_lock);
2211 queue_flag_set(QUEUE_FLAG_BYPASS, md->queue.queue);
2212 spin_unlock_irq(md->queue.queue->queue_lock);
2173 blk_set_queue_dying(md->queue.queue); 2213 blk_set_queue_dying(md->queue.queue);
2174 mmc_cleanup_queue(&md->queue); 2214 mmc_cleanup_queue(&md->queue);
2175 if (md->disk->flags & GENHD_FL_UP) { 2215 if (md->disk->flags & GENHD_FL_UP) {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 4ffea14b7eb6..2bae69e39544 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1289,7 +1289,7 @@ out_err:
1289static int mmc_select_hs400es(struct mmc_card *card) 1289static int mmc_select_hs400es(struct mmc_card *card)
1290{ 1290{
1291 struct mmc_host *host = card->host; 1291 struct mmc_host *host = card->host;
1292 int err = 0; 1292 int err = -EINVAL;
1293 u8 val; 1293 u8 val;
1294 1294
1295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) { 1295 if (!(host->caps & MMC_CAP_8_BIT_DATA)) {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index a9dfb26972f2..250dc6ec4c82 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2957,7 +2957,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2957 } 2957 }
2958 2958
2959 /* find out number of slots supported */ 2959 /* find out number of slots supported */
2960 if (device_property_read_u32(dev, "num-slots", &pdata->num_slots)) 2960 if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots))
2961 dev_info(dev, "'num-slots' was deprecated.\n"); 2961 dev_info(dev, "'num-slots' was deprecated.\n");
2962 2962
2963 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth)) 2963 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 7c12f3715676..2ab4788d021f 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -356,9 +356,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
356 struct mmc_host *mmc = host->mmc; 356 struct mmc_host *mmc = host->mmc;
357 int ret = 0; 357 int ret = 0;
358 358
359 if (mmc_pdata(host)->set_power)
360 return mmc_pdata(host)->set_power(host->dev, power_on, vdd);
361
362 /* 359 /*
363 * If we don't see a Vcc regulator, assume it's a fixed 360 * If we don't see a Vcc regulator, assume it's a fixed
364 * voltage always-on regulator. 361 * voltage always-on regulator.
@@ -366,9 +363,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
366 if (IS_ERR(mmc->supply.vmmc)) 363 if (IS_ERR(mmc->supply.vmmc))
367 return 0; 364 return 0;
368 365
369 if (mmc_pdata(host)->before_set_reg)
370 mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd);
371
372 ret = omap_hsmmc_set_pbias(host, false, 0); 366 ret = omap_hsmmc_set_pbias(host, false, 0);
373 if (ret) 367 if (ret)
374 return ret; 368 return ret;
@@ -400,9 +394,6 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
400 return ret; 394 return ret;
401 } 395 }
402 396
403 if (mmc_pdata(host)->after_set_reg)
404 mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd);
405
406 return 0; 397 return 0;
407 398
408err_set_voltage: 399err_set_voltage:
@@ -469,8 +460,6 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
469 int ret; 460 int ret;
470 struct mmc_host *mmc = host->mmc; 461 struct mmc_host *mmc = host->mmc;
471 462
472 if (mmc_pdata(host)->set_power)
473 return 0;
474 463
475 ret = mmc_regulator_get_supply(mmc); 464 ret = mmc_regulator_get_supply(mmc);
476 if (ret == -EPROBE_DEFER) 465 if (ret == -EPROBE_DEFER)
@@ -2097,7 +2086,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
2097 mmc->max_seg_size = mmc->max_req_size; 2086 mmc->max_seg_size = mmc->max_req_size;
2098 2087
2099 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 2088 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
2100 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE; 2089 MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
2101 2090
2102 mmc->caps |= mmc_pdata(host)->caps; 2091 mmc->caps |= mmc_pdata(host)->caps;
2103 if (mmc->caps & MMC_CAP_8_BIT_DATA) 2092 if (mmc->caps & MMC_CAP_8_BIT_DATA)
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 7611fd679f1a..1485530c3592 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -31,6 +31,7 @@
31 31
32#define SDMMC_MC1R 0x204 32#define SDMMC_MC1R 0x204
33#define SDMMC_MC1R_DDR BIT(3) 33#define SDMMC_MC1R_DDR BIT(3)
34#define SDMMC_MC1R_FCD BIT(7)
34#define SDMMC_CACR 0x230 35#define SDMMC_CACR 0x230
35#define SDMMC_CACR_CAPWREN BIT(0) 36#define SDMMC_CACR_CAPWREN BIT(0)
36#define SDMMC_CACR_KEY (0x46 << 8) 37#define SDMMC_CACR_KEY (0x46 << 8)
@@ -43,6 +44,15 @@ struct sdhci_at91_priv {
43 struct clk *mainck; 44 struct clk *mainck;
44}; 45};
45 46
47static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
48{
49 u8 mc1r;
50
51 mc1r = readb(host->ioaddr + SDMMC_MC1R);
52 mc1r |= SDMMC_MC1R_FCD;
53 writeb(mc1r, host->ioaddr + SDMMC_MC1R);
54}
55
46static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) 56static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
47{ 57{
48 u16 clk; 58 u16 clk;
@@ -110,10 +120,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
110 sdhci_set_uhs_signaling(host, timing); 120 sdhci_set_uhs_signaling(host, timing);
111} 121}
112 122
123static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
124{
125 sdhci_reset(host, mask);
126
127 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
128 sdhci_at91_set_force_card_detect(host);
129}
130
113static const struct sdhci_ops sdhci_at91_sama5d2_ops = { 131static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
114 .set_clock = sdhci_at91_set_clock, 132 .set_clock = sdhci_at91_set_clock,
115 .set_bus_width = sdhci_set_bus_width, 133 .set_bus_width = sdhci_set_bus_width,
116 .reset = sdhci_reset, 134 .reset = sdhci_at91_reset,
117 .set_uhs_signaling = sdhci_at91_set_uhs_signaling, 135 .set_uhs_signaling = sdhci_at91_set_uhs_signaling,
118 .set_power = sdhci_at91_set_power, 136 .set_power = sdhci_at91_set_power,
119}; 137};
@@ -324,6 +342,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
324 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; 342 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
325 } 343 }
326 344
345 /*
346 * If the device attached to the MMC bus is not removable, it is safer
347 * to set the Force Card Detect bit. People often don't connect the
348 * card detect signal and use this pin for another purpose. If the card
349 * detect pin is not muxed to SDHCI controller, a default value is
350 * used. This value can be different from a SoC revision to another
351 * one. Problems come when this default value is not card present. To
352 * avoid this case, if the device is non removable then the card
353 * detection procedure using the SDMCC_CD signal is bypassed.
354 * This bit is reset when a software reset for all command is performed
355 * so we need to implement our own reset function to set back this bit.
356 */
357 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
358 sdhci_at91_set_force_card_detect(host);
359
327 pm_runtime_put_autosuspend(&pdev->dev); 360 pm_runtime_put_autosuspend(&pdev->dev);
328 361
329 return 0; 362 return 0;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index d6fa2214aaae..0fb4e4c119e1 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -793,8 +793,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
793 } 793 }
794 mmc_writel(host, REG_CLKCR, rval); 794 mmc_writel(host, REG_CLKCR, rval);
795 795
796 if (host->cfg->needs_new_timings) 796 if (host->cfg->needs_new_timings) {
797 mmc_writel(host, REG_SD_NTSR, SDXC_2X_TIMING_MODE); 797 /* Don't touch the delay bits */
798 rval = mmc_readl(host, REG_SD_NTSR);
799 rval |= SDXC_2X_TIMING_MODE;
800 mmc_writel(host, REG_SD_NTSR, rval);
801 }
798 802
799 ret = sunxi_mmc_clk_set_phase(host, ios, rate); 803 ret = sunxi_mmc_clk_set_phase(host, ios, rate);
800 if (ret) 804 if (ret)
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f336a9b85576..9ec8f033ac5f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -113,6 +113,7 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
113 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 113 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
114 if (tr->writesect(dev, block, buf)) 114 if (tr->writesect(dev, block, buf))
115 return BLK_STS_IOERR; 115 return BLK_STS_IOERR;
116 return BLK_STS_OK;
116 default: 117 default:
117 return BLK_STS_IOERR; 118 return BLK_STS_IOERR;
118 } 119 }
diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c
index d922a88e407f..ceec21bd30c4 100644
--- a/drivers/mtd/nand/atmel/nand-controller.c
+++ b/drivers/mtd/nand/atmel/nand-controller.c
@@ -1201,7 +1201,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1201 * tRC < 30ns implies EDO mode. This controller does not support this 1201 * tRC < 30ns implies EDO mode. This controller does not support this
1202 * mode. 1202 * mode.
1203 */ 1203 */
1204 if (conf->timings.sdr.tRC_min < 30) 1204 if (conf->timings.sdr.tRC_min < 30000)
1205 return -ENOTSUPP; 1205 return -ENOTSUPP;
1206 1206
1207 atmel_smc_cs_conf_init(smcconf); 1207 atmel_smc_cs_conf_init(smcconf);
@@ -1364,7 +1364,18 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1364 ret = atmel_smc_cs_conf_set_timing(smcconf, 1364 ret = atmel_smc_cs_conf_set_timing(smcconf,
1365 ATMEL_HSMC_TIMINGS_TADL_SHIFT, 1365 ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1366 ncycles); 1366 ncycles);
1367 if (ret) 1367 /*
1368 * Version 4 of the ONFI spec mandates that tADL be at least 400
1369 * nanoseconds, but, depending on the master clock rate, 400 ns may not
1370 * fit in the tADL field of the SMC reg. We need to relax the check and
1371 * accept the -ERANGE return code.
1372 *
1373 * Note that previous versions of the ONFI spec had a lower tADL_min
1374 * (100 or 200 ns). It's not clear why this timing constraint got
1375 * increased but it seems most NANDs are fine with values lower than
1376 * 400ns, so we should be safe.
1377 */
1378 if (ret && ret != -ERANGE)
1368 return ret; 1379 return ret;
1369 1380
1370 ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps); 1381 ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 55a8ee5306ea..8c210a5776bc 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -945,6 +945,7 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
945 */ 945 */
946 struct platform_device *pdev = to_platform_device(userdev); 946 struct platform_device *pdev = to_platform_device(userdev);
947 const struct atmel_pmecc_caps *caps; 947 const struct atmel_pmecc_caps *caps;
948 const struct of_device_id *match;
948 949
949 /* No PMECC engine available. */ 950 /* No PMECC engine available. */
950 if (!of_property_read_bool(userdev->of_node, 951 if (!of_property_read_bool(userdev->of_node,
@@ -953,21 +954,11 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *userdev)
953 954
954 caps = &at91sam9g45_caps; 955 caps = &at91sam9g45_caps;
955 956
956 /* 957 /* Find the caps associated to the NAND dev node. */
957 * Try to find the NFC subnode and extract the associated caps 958 match = of_match_node(atmel_pmecc_legacy_match,
958 * from there. 959 userdev->of_node);
959 */ 960 if (match && match->data)
960 np = of_find_compatible_node(userdev->of_node, NULL, 961 caps = match->data;
961 "atmel,sama5d3-nfc");
962 if (np) {
963 const struct of_device_id *match;
964
965 match = of_match_node(atmel_pmecc_legacy_match, np);
966 if (match && match->data)
967 caps = match->data;
968
969 of_node_put(np);
970 }
971 962
972 pmecc = atmel_pmecc_create(pdev, caps, 1, 2); 963 pmecc = atmel_pmecc_create(pdev, caps, 1, 2);
973 } 964 }
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 5fa5ddc94834..c6c18b82f8f4 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -65,8 +65,14 @@ static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
65 65
66 if (!section) { 66 if (!section) {
67 oobregion->offset = 0; 67 oobregion->offset = 0;
68 oobregion->length = 4; 68 if (mtd->oobsize == 16)
69 oobregion->length = 4;
70 else
71 oobregion->length = 3;
69 } else { 72 } else {
73 if (mtd->oobsize == 8)
74 return -ERANGE;
75
70 oobregion->offset = 6; 76 oobregion->offset = 6;
71 oobregion->length = ecc->total - 4; 77 oobregion->length = ecc->total - 4;
72 } 78 }
@@ -1125,7 +1131,9 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
1125 * Ensure the timing mode has been changed on the chip side 1131 * Ensure the timing mode has been changed on the chip side
1126 * before changing timings on the controller side. 1132 * before changing timings on the controller side.
1127 */ 1133 */
1128 if (chip->onfi_version) { 1134 if (chip->onfi_version &&
1135 (le16_to_cpu(chip->onfi_params.opt_cmd) &
1136 ONFI_OPT_CMD_SET_GET_FEATURES)) {
1129 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { 1137 u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1130 chip->onfi_timing_mode_default, 1138 chip->onfi_timing_mode_default,
1131 }; 1139 };
@@ -2741,7 +2749,6 @@ static int nand_write_page_syndrome(struct mtd_info *mtd,
2741 * @buf: the data to write 2749 * @buf: the data to write
2742 * @oob_required: must write chip->oob_poi to OOB 2750 * @oob_required: must write chip->oob_poi to OOB
2743 * @page: page number to write 2751 * @page: page number to write
2744 * @cached: cached programming
2745 * @raw: use _raw version of write_page 2752 * @raw: use _raw version of write_page
2746 */ 2753 */
2747static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 2754static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index f06312df3669..7e36d7d13c26 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -311,9 +311,9 @@ int onfi_init_data_interface(struct nand_chip *chip,
311 struct nand_sdr_timings *timings = &iface->timings.sdr; 311 struct nand_sdr_timings *timings = &iface->timings.sdr;
312 312
313 /* microseconds -> picoseconds */ 313 /* microseconds -> picoseconds */
314 timings->tPROG_max = 1000000UL * le16_to_cpu(params->t_prog); 314 timings->tPROG_max = 1000000ULL * le16_to_cpu(params->t_prog);
315 timings->tBERS_max = 1000000UL * le16_to_cpu(params->t_bers); 315 timings->tBERS_max = 1000000ULL * le16_to_cpu(params->t_bers);
316 timings->tR_max = 1000000UL * le16_to_cpu(params->t_r); 316 timings->tR_max = 1000000ULL * le16_to_cpu(params->t_r);
317 317
318 /* nanoseconds -> picoseconds */ 318 /* nanoseconds -> picoseconds */
319 timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs); 319 timings->tCCS_min = 1000UL * le16_to_cpu(params->t_ccs);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 03a0d057bf2f..e4211c3cc49b 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2373,6 +2373,7 @@ static int __init ns_init_module(void)
2373 return 0; 2373 return 0;
2374 2374
2375err_exit: 2375err_exit:
2376 nandsim_debugfs_remove(nand);
2376 free_nandsim(nand); 2377 free_nandsim(nand);
2377 nand_release(nsmtd); 2378 nand_release(nsmtd);
2378 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i) 2379 for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index d0b6f8f9f297..6abd142b1324 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -1728,6 +1728,10 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
1728 */ 1728 */
1729 chip->clk_rate = NSEC_PER_SEC / min_clk_period; 1729 chip->clk_rate = NSEC_PER_SEC / min_clk_period;
1730 real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); 1730 real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
1731 if (real_clk_rate <= 0) {
1732 dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate);
1733 return -EINVAL;
1734 }
1731 1735
1732 /* 1736 /*
1733 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data 1737 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 7c754a0f14bb..19e4e904c9bf 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -2,20 +2,11 @@
2# Multiplexer devices 2# Multiplexer devices
3# 3#
4 4
5menuconfig MULTIPLEXER 5config MULTIPLEXER
6 tristate "Multiplexer subsystem" 6 tristate
7 help
8 Multiplexer controller subsystem. Multiplexers are used in a
9 variety of settings, and this subsystem abstracts their use
10 so that the rest of the kernel sees a common interface. When
11 multiple parallel multiplexers are controlled by one single
12 multiplexer controller, this subsystem also coordinates the
13 multiplexer accesses.
14
15 To compile the subsystem as a module, choose M here: the module will
16 be called mux-core.
17 7
18if MULTIPLEXER 8menu "Multiplexer drivers"
9 depends on MULTIPLEXER
19 10
20config MUX_ADG792A 11config MUX_ADG792A
21 tristate "Analog Devices ADG792A/ADG792G Multiplexers" 12 tristate "Analog Devices ADG792A/ADG792G Multiplexers"
@@ -56,4 +47,4 @@ config MUX_MMIO
56 To compile the driver as a module, choose M here: the module will 47 To compile the driver as a module, choose M here: the module will
57 be called mux-mmio. 48 be called mux-mmio.
58 49
59endif 50endmenu
diff --git a/drivers/mux/mux-core.c b/drivers/mux/mux-core.c
index 90b8995f07cb..2fe96c470112 100644
--- a/drivers/mux/mux-core.c
+++ b/drivers/mux/mux-core.c
@@ -46,7 +46,7 @@ static int __init mux_init(void)
46 46
47static void __exit mux_exit(void) 47static void __exit mux_exit(void)
48{ 48{
49 class_register(&mux_class); 49 class_unregister(&mux_class);
50 ida_destroy(&mux_ida); 50 ida_destroy(&mux_ida);
51} 51}
52 52
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 14ff622190a5..fc63992ab0e0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1569,7 +1569,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1569 new_slave->delay = 0; 1569 new_slave->delay = 0;
1570 new_slave->link_failure_count = 0; 1570 new_slave->link_failure_count = 0;
1571 1571
1572 if (bond_update_speed_duplex(new_slave)) 1572 if (bond_update_speed_duplex(new_slave) &&
1573 bond_needs_speed_duplex(bond))
1573 new_slave->link = BOND_LINK_DOWN; 1574 new_slave->link = BOND_LINK_DOWN;
1574 1575
1575 new_slave->last_rx = jiffies - 1576 new_slave->last_rx = jiffies -
@@ -2050,6 +2051,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2050 continue; 2051 continue;
2051 2052
2052 bond_propose_link_state(slave, BOND_LINK_FAIL); 2053 bond_propose_link_state(slave, BOND_LINK_FAIL);
2054 commit++;
2053 slave->delay = bond->params.downdelay; 2055 slave->delay = bond->params.downdelay;
2054 if (slave->delay) { 2056 if (slave->delay) {
2055 netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", 2057 netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -2088,6 +2090,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2088 continue; 2090 continue;
2089 2091
2090 bond_propose_link_state(slave, BOND_LINK_BACK); 2092 bond_propose_link_state(slave, BOND_LINK_BACK);
2093 commit++;
2091 slave->delay = bond->params.updelay; 2094 slave->delay = bond->params.updelay;
2092 2095
2093 if (slave->delay) { 2096 if (slave->delay) {
@@ -2138,11 +2141,13 @@ static void bond_miimon_commit(struct bonding *bond)
2138 continue; 2141 continue;
2139 2142
2140 case BOND_LINK_UP: 2143 case BOND_LINK_UP:
2141 if (bond_update_speed_duplex(slave)) { 2144 if (bond_update_speed_duplex(slave) &&
2145 bond_needs_speed_duplex(bond)) {
2142 slave->link = BOND_LINK_DOWN; 2146 slave->link = BOND_LINK_DOWN;
2143 netdev_warn(bond->dev, 2147 if (net_ratelimit())
2144 "failed to get link speed/duplex for %s\n", 2148 netdev_warn(bond->dev,
2145 slave->dev->name); 2149 "failed to get link speed/duplex for %s\n",
2150 slave->dev->name);
2146 continue; 2151 continue;
2147 } 2152 }
2148 bond_set_slave_link_state(slave, BOND_LINK_UP, 2153 bond_set_slave_link_state(slave, BOND_LINK_UP,
@@ -4596,7 +4601,7 @@ static int bond_check_params(struct bond_params *params)
4596 } 4601 }
4597 ad_user_port_key = valptr->value; 4602 ad_user_port_key = valptr->value;
4598 4603
4599 if (bond_mode == BOND_MODE_TLB) { 4604 if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) {
4600 bond_opt_initstr(&newval, "default"); 4605 bond_opt_initstr(&newval, "default");
4601 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), 4606 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB),
4602 &newval); 4607 &newval);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e68d368e20ac..7f36d3e3c98b 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1665,6 +1665,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
1665 .dev_name = "BCM53125", 1665 .dev_name = "BCM53125",
1666 .vlans = 4096, 1666 .vlans = 4096,
1667 .enabled_ports = 0xff, 1667 .enabled_ports = 0xff,
1668 .arl_entries = 4,
1668 .cpu_port = B53_CPU_PORT, 1669 .cpu_port = B53_CPU_PORT,
1669 .vta_regs = B53_VTA_REGS, 1670 .vta_regs = B53_VTA_REGS,
1670 .duplex_reg = B53_DUPLEX_STAT_GE, 1671 .duplex_reg = B53_DUPLEX_STAT_GE,
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1e46418a3b74..264b281eb86b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -625,6 +625,44 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
625 * all finished. 625 * all finished.
626 */ 626 */
627 mt7623_pad_clk_setup(ds); 627 mt7623_pad_clk_setup(ds);
628 } else {
629 u16 lcl_adv = 0, rmt_adv = 0;
630 u8 flowctrl;
631 u32 mcr = PMCR_USERP_LINK | PMCR_FORCE_MODE;
632
633 switch (phydev->speed) {
634 case SPEED_1000:
635 mcr |= PMCR_FORCE_SPEED_1000;
636 break;
637 case SPEED_100:
638 mcr |= PMCR_FORCE_SPEED_100;
639 break;
640 };
641
642 if (phydev->link)
643 mcr |= PMCR_FORCE_LNK;
644
645 if (phydev->duplex) {
646 mcr |= PMCR_FORCE_FDX;
647
648 if (phydev->pause)
649 rmt_adv = LPA_PAUSE_CAP;
650 if (phydev->asym_pause)
651 rmt_adv |= LPA_PAUSE_ASYM;
652
653 if (phydev->advertising & ADVERTISED_Pause)
654 lcl_adv |= ADVERTISE_PAUSE_CAP;
655 if (phydev->advertising & ADVERTISED_Asym_Pause)
656 lcl_adv |= ADVERTISE_PAUSE_ASYM;
657
658 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
659
660 if (flowctrl & FLOW_CTRL_TX)
661 mcr |= PMCR_TX_FC_EN;
662 if (flowctrl & FLOW_CTRL_RX)
663 mcr |= PMCR_RX_FC_EN;
664 }
665 mt7530_write(priv, MT7530_PMCR_P(port), mcr);
628 } 666 }
629} 667}
630 668
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index b83d76b99802..74db9822eb40 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -151,6 +151,7 @@ enum mt7530_stp_state {
151#define PMCR_TX_FC_EN BIT(5) 151#define PMCR_TX_FC_EN BIT(5)
152#define PMCR_RX_FC_EN BIT(4) 152#define PMCR_RX_FC_EN BIT(4)
153#define PMCR_FORCE_SPEED_1000 BIT(3) 153#define PMCR_FORCE_SPEED_1000 BIT(3)
154#define PMCR_FORCE_SPEED_100 BIT(2)
154#define PMCR_FORCE_FDX BIT(1) 155#define PMCR_FORCE_FDX BIT(1)
155#define PMCR_FORCE_LNK BIT(0) 156#define PMCR_FORCE_LNK BIT(0)
156#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \ 157#define PMCR_COMMON_LINK (PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 53b088166c28..5bcdd33101b0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3178,6 +3178,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
3178 .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, 3178 .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
3179 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3179 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
3180 .port_pause_limit = mv88e6390_port_pause_limit, 3180 .port_pause_limit = mv88e6390_port_pause_limit,
3181 .port_set_cmode = mv88e6390x_port_set_cmode,
3181 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, 3182 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
3182 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, 3183 .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
3183 .stats_snapshot = mv88e6390_g1_stats_snapshot, 3184 .stats_snapshot = mv88e6390_g1_stats_snapshot,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index d3906f6b01bd..1d307f2def2d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1787,14 +1787,16 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
1787 1787
1788 pdata->clk = devm_clk_get(&pdev->dev, NULL); 1788 pdata->clk = devm_clk_get(&pdev->dev, NULL);
1789 if (IS_ERR(pdata->clk)) { 1789 if (IS_ERR(pdata->clk)) {
1790 /* Abort if the clock is defined but couldn't be retrived. 1790 if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) {
1791 * Always abort if the clock is missing on DT system as 1791 /* Abort if the clock is defined but couldn't be
1792 * the driver can't cope with this case. 1792 * retrived. Always abort if the clock is missing on
1793 */ 1793 * DT system as the driver can't cope with this case.
1794 if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) 1794 */
1795 return PTR_ERR(pdata->clk); 1795 if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node)
1796 /* Firmware may have set up the clock already. */ 1796 return PTR_ERR(pdata->clk);
1797 dev_info(dev, "clocks have been setup already\n"); 1797 /* Firmware may have set up the clock already. */
1798 dev_info(dev, "clocks have been setup already\n");
1799 }
1798 } 1800 }
1799 1801
1800 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) 1802 if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 041cfb7952f8..e94159507847 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
609 mac_mode |= HALF_DUPLEX; 609 mac_mode |= HALF_DUPLEX;
610 610
611 if (gigabit) { 611 if (gigabit) {
612 if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) 612 if (phy_interface_is_rgmii(dev->phydev))
613 mac_mode |= RGMII_MODE; 613 mac_mode |= RGMII_MODE;
614 614
615 mac_mode |= GMAC_MODE; 615 mac_mode |= GMAC_MODE;
@@ -1268,11 +1268,10 @@ static int nb8800_tangox_init(struct net_device *dev)
1268 break; 1268 break;
1269 1269
1270 case PHY_INTERFACE_MODE_RGMII: 1270 case PHY_INTERFACE_MODE_RGMII:
1271 pad_mode = PAD_MODE_RGMII; 1271 case PHY_INTERFACE_MODE_RGMII_ID:
1272 break; 1272 case PHY_INTERFACE_MODE_RGMII_RXID:
1273
1274 case PHY_INTERFACE_MODE_RGMII_TXID: 1273 case PHY_INTERFACE_MODE_RGMII_TXID:
1275 pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; 1274 pad_mode = PAD_MODE_RGMII;
1276 break; 1275 break;
1277 1276
1278 default: 1277 default:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index f411936b744c..a1125d10c825 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2368,6 +2368,7 @@ static int b44_init_one(struct ssb_device *sdev,
2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE); 2368 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2369 2369
2370 spin_lock_init(&bp->lock); 2370 spin_lock_init(&bp->lock);
2371 u64_stats_init(&bp->hw_stats.syncp);
2371 2372
2372 bp->rx_pending = B44_DEF_RX_RING_PENDING; 2373 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2373 bp->tx_pending = B44_DEF_TX_RING_PENDING; 2374 bp->tx_pending = B44_DEF_TX_RING_PENDING;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5333601f855f..dc3052751bc1 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -449,6 +449,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
449 p = (char *)&dev->stats; 449 p = (char *)&dev->stats;
450 else 450 else
451 p = (char *)priv; 451 p = (char *)priv;
452
453 if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
454 continue;
455
452 p += s->stat_offset; 456 p += s->stat_offset;
453 data[j] = *(unsigned long *)p; 457 data[j] = *(unsigned long *)p;
454 j++; 458 j++;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 73aca97a96bc..d937083db9a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -50,11 +50,14 @@ static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
50 50
51static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) 51static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
52{ 52{
53 return writel(value, bgmac->plat.idm_base + offset); 53 writel(value, bgmac->plat.idm_base + offset);
54} 54}
55 55
56static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) 56static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
57{ 57{
58 if (!bgmac->plat.idm_base)
59 return true;
60
58 if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN) 61 if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN)
59 return false; 62 return false;
60 if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) 63 if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
@@ -66,6 +69,9 @@ static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
66{ 69{
67 u32 val; 70 u32 val;
68 71
72 if (!bgmac->plat.idm_base)
73 return;
74
69 /* The Reset Control register only contains a single bit to show if the 75 /* The Reset Control register only contains a single bit to show if the
70 * controller is currently in reset. Do a sanity check here, just in 76 * controller is currently in reset. Do a sanity check here, just in
71 * case the bootloader happened to leave the device in reset. 77 * case the bootloader happened to leave the device in reset.
@@ -180,6 +186,7 @@ static int bgmac_probe(struct platform_device *pdev)
180 bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4; 186 bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
181 bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP; 187 bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
182 bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP; 188 bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
189 bgmac->feature_flags |= BGMAC_FEAT_IDM_MASK;
183 190
184 bgmac->dev = &pdev->dev; 191 bgmac->dev = &pdev->dev;
185 bgmac->dma_dev = &pdev->dev; 192 bgmac->dma_dev = &pdev->dev;
@@ -207,15 +214,13 @@ static int bgmac_probe(struct platform_device *pdev)
207 return PTR_ERR(bgmac->plat.base); 214 return PTR_ERR(bgmac->plat.base);
208 215
209 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); 216 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
210 if (!regs) { 217 if (regs) {
211 dev_err(&pdev->dev, "Unable to obtain idm resource\n"); 218 bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
212 return -EINVAL; 219 if (IS_ERR(bgmac->plat.idm_base))
220 return PTR_ERR(bgmac->plat.idm_base);
221 bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
213 } 222 }
214 223
215 bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
216 if (IS_ERR(bgmac->plat.idm_base))
217 return PTR_ERR(bgmac->plat.idm_base);
218
219 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base"); 224 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
220 if (regs) { 225 if (regs) {
221 bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev, 226 bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index ba4d2e145bb9..48d672b204a4 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -622,9 +622,11 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
622 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); 622 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
623 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); 623 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
624 624
625 if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) { 625 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
626 dev_err(bgmac->dev, "Core does not report 64-bit DMA\n"); 626 if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
627 return -ENOTSUPP; 627 dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
628 return -ENOTSUPP;
629 }
628 } 630 }
629 631
630 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 632 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
@@ -855,9 +857,11 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
855static void bgmac_miiconfig(struct bgmac *bgmac) 857static void bgmac_miiconfig(struct bgmac *bgmac)
856{ 858{
857 if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) { 859 if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
858 bgmac_idm_write(bgmac, BCMA_IOCTL, 860 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
859 bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 | 861 bgmac_idm_write(bgmac, BCMA_IOCTL,
860 BGMAC_BCMA_IOCTL_SW_CLKEN); 862 bgmac_idm_read(bgmac, BCMA_IOCTL) |
863 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN);
864 }
861 bgmac->mac_speed = SPEED_2500; 865 bgmac->mac_speed = SPEED_2500;
862 bgmac->mac_duplex = DUPLEX_FULL; 866 bgmac->mac_duplex = DUPLEX_FULL;
863 bgmac_mac_speed(bgmac); 867 bgmac_mac_speed(bgmac);
@@ -874,11 +878,36 @@ static void bgmac_miiconfig(struct bgmac *bgmac)
874 } 878 }
875} 879}
876 880
881static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
882{
883 u32 iost;
884
885 iost = bgmac_idm_read(bgmac, BCMA_IOST);
886 if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
887 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
888
889 /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
890 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
891 u32 flags = 0;
892
893 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
894 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
895 if (!bgmac->has_robosw)
896 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
897 }
898 bgmac_clk_enable(bgmac, flags);
899 }
900
901 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
902 bgmac_idm_write(bgmac, BCMA_IOCTL,
903 bgmac_idm_read(bgmac, BCMA_IOCTL) &
904 ~BGMAC_BCMA_IOCTL_SW_RESET);
905}
906
877/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ 907/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
878static void bgmac_chip_reset(struct bgmac *bgmac) 908static void bgmac_chip_reset(struct bgmac *bgmac)
879{ 909{
880 u32 cmdcfg_sr; 910 u32 cmdcfg_sr;
881 u32 iost;
882 int i; 911 int i;
883 912
884 if (bgmac_clk_enabled(bgmac)) { 913 if (bgmac_clk_enabled(bgmac)) {
@@ -899,20 +928,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
899 /* TODO: Clear software multicast filter list */ 928 /* TODO: Clear software multicast filter list */
900 } 929 }
901 930
902 iost = bgmac_idm_read(bgmac, BCMA_IOST); 931 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK))
903 if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) 932 bgmac_chip_reset_idm_config(bgmac);
904 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
905
906 /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
907 if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
908 u32 flags = 0;
909 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
910 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
911 if (!bgmac->has_robosw)
912 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
913 }
914 bgmac_clk_enable(bgmac, flags);
915 }
916 933
917 /* Request Misc PLL for corerev > 2 */ 934 /* Request Misc PLL for corerev > 2 */
918 if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) { 935 if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
@@ -970,11 +987,6 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
970 BGMAC_CHIPCTL_7_IF_TYPE_RGMII); 987 BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
971 } 988 }
972 989
973 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
974 bgmac_idm_write(bgmac, BCMA_IOCTL,
975 bgmac_idm_read(bgmac, BCMA_IOCTL) &
976 ~BGMAC_BCMA_IOCTL_SW_RESET);
977
978 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset 990 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
979 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine 991 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
980 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to 992 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
@@ -1497,8 +1509,10 @@ int bgmac_enet_probe(struct bgmac *bgmac)
1497 bgmac_clk_enable(bgmac, 0); 1509 bgmac_clk_enable(bgmac, 0);
1498 1510
1499 /* This seems to be fixing IRQ by assigning OOB #6 to the core */ 1511 /* This seems to be fixing IRQ by assigning OOB #6 to the core */
1500 if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) 1512 if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) {
1501 bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); 1513 if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
1514 bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);
1515 }
1502 1516
1503 bgmac_chip_reset(bgmac); 1517 bgmac_chip_reset(bgmac);
1504 1518
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index c1818766c501..443d57b10264 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -425,6 +425,7 @@
425#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17) 425#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17)
426#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18) 426#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18)
427#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19) 427#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19)
428#define BGMAC_FEAT_IDM_MASK BIT(20)
428 429
429struct bgmac_slot_info { 430struct bgmac_slot_info {
430 union { 431 union {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 43423744fdfa..1e33abde4a3e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2886,7 +2886,7 @@ static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
2886 2886
2887static int bnx2x_test_nvram(struct bnx2x *bp) 2887static int bnx2x_test_nvram(struct bnx2x *bp)
2888{ 2888{
2889 const struct crc_pair nvram_tbl[] = { 2889 static const struct crc_pair nvram_tbl[] = {
2890 { 0, 0x14 }, /* bootstrap */ 2890 { 0, 0x14 }, /* bootstrap */
2891 { 0x14, 0xec }, /* dir */ 2891 { 0x14, 0xec }, /* dir */
2892 { 0x100, 0x350 }, /* manuf_info */ 2892 { 0x100, 0x350 }, /* manuf_info */
@@ -2895,7 +2895,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
2895 { 0x708, 0x70 }, /* manuf_key_info */ 2895 { 0x708, 0x70 }, /* manuf_key_info */
2896 { 0, 0 } 2896 { 0, 0 }
2897 }; 2897 };
2898 const struct crc_pair nvram_tbl2[] = { 2898 static const struct crc_pair nvram_tbl2[] = {
2899 { 0x7e8, 0x350 }, /* manuf_info2 */ 2899 { 0x7e8, 0x350 }, /* manuf_info2 */
2900 { 0xb38, 0xf0 }, /* feature_info */ 2900 { 0xb38, 0xf0 }, /* feature_info */
2901 { 0, 0 } 2901 { 0, 0 }
@@ -3162,7 +3162,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3162 if (is_multi(bp)) { 3162 if (is_multi(bp)) {
3163 for_each_eth_queue(bp, i) { 3163 for_each_eth_queue(bp, i) {
3164 memset(queue_name, 0, sizeof(queue_name)); 3164 memset(queue_name, 0, sizeof(queue_name));
3165 sprintf(queue_name, "%d", i); 3165 snprintf(queue_name, sizeof(queue_name),
3166 "%d", i);
3166 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 3167 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
3167 snprintf(buf + (k + j)*ETH_GSTRING_LEN, 3168 snprintf(buf + (k + j)*ETH_GSTRING_LEN,
3168 ETH_GSTRING_LEN, 3169 ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index daca1c9d254b..a981c4ee9d72 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1202,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1202 return tx_cb_ptr; 1202 return tx_cb_ptr;
1203} 1203}
1204 1204
1205/* Simple helper to free a control block's resources */ 1205static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1206static void bcmgenet_free_cb(struct enet_cb *cb) 1206 struct bcmgenet_tx_ring *ring)
1207{ 1207{
1208 dev_kfree_skb_any(cb->skb); 1208 struct enet_cb *tx_cb_ptr;
1209 cb->skb = NULL; 1209
1210 dma_unmap_addr_set(cb, dma_addr, 0); 1210 tx_cb_ptr = ring->cbs;
1211 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1212
1213 /* Rewinding local write pointer */
1214 if (ring->write_ptr == ring->cb_ptr)
1215 ring->write_ptr = ring->end_ptr;
1216 else
1217 ring->write_ptr--;
1218
1219 return tx_cb_ptr;
1211} 1220}
1212 1221
1213static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) 1222static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
@@ -1260,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1260 INTRL2_CPU_MASK_SET); 1269 INTRL2_CPU_MASK_SET);
1261} 1270}
1262 1271
1272/* Simple helper to free a transmit control block's resources
1273 * Returns an skb when the last transmit control block associated with the
1274 * skb is freed. The skb should be freed by the caller if necessary.
1275 */
1276static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1277 struct enet_cb *cb)
1278{
1279 struct sk_buff *skb;
1280
1281 skb = cb->skb;
1282
1283 if (skb) {
1284 cb->skb = NULL;
1285 if (cb == GENET_CB(skb)->first_cb)
1286 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1287 dma_unmap_len(cb, dma_len),
1288 DMA_TO_DEVICE);
1289 else
1290 dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1291 dma_unmap_len(cb, dma_len),
1292 DMA_TO_DEVICE);
1293 dma_unmap_addr_set(cb, dma_addr, 0);
1294
1295 if (cb == GENET_CB(skb)->last_cb)
1296 return skb;
1297
1298 } else if (dma_unmap_addr(cb, dma_addr)) {
1299 dma_unmap_page(dev,
1300 dma_unmap_addr(cb, dma_addr),
1301 dma_unmap_len(cb, dma_len),
1302 DMA_TO_DEVICE);
1303 dma_unmap_addr_set(cb, dma_addr, 0);
1304 }
1305
1306 return 0;
1307}
1308
1309/* Simple helper to free a receive control block's resources */
1310static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1311 struct enet_cb *cb)
1312{
1313 struct sk_buff *skb;
1314
1315 skb = cb->skb;
1316 cb->skb = NULL;
1317
1318 if (dma_unmap_addr(cb, dma_addr)) {
1319 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1320 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1321 dma_unmap_addr_set(cb, dma_addr, 0);
1322 }
1323
1324 return skb;
1325}
1326
1263/* Unlocked version of the reclaim routine */ 1327/* Unlocked version of the reclaim routine */
1264static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, 1328static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1265 struct bcmgenet_tx_ring *ring) 1329 struct bcmgenet_tx_ring *ring)
1266{ 1330{
1267 struct bcmgenet_priv *priv = netdev_priv(dev); 1331 struct bcmgenet_priv *priv = netdev_priv(dev);
1268 struct device *kdev = &priv->pdev->dev; 1332 unsigned int txbds_processed = 0;
1269 struct enet_cb *tx_cb_ptr;
1270 unsigned int pkts_compl = 0;
1271 unsigned int bytes_compl = 0; 1333 unsigned int bytes_compl = 0;
1272 unsigned int c_index; 1334 unsigned int pkts_compl = 0;
1273 unsigned int txbds_ready; 1335 unsigned int txbds_ready;
1274 unsigned int txbds_processed = 0; 1336 unsigned int c_index;
1337 struct sk_buff *skb;
1275 1338
1276 /* Clear status before servicing to reduce spurious interrupts */ 1339 /* Clear status before servicing to reduce spurious interrupts */
1277 if (ring->index == DESC_INDEX) 1340 if (ring->index == DESC_INDEX)
@@ -1292,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1292 1355
1293 /* Reclaim transmitted buffers */ 1356 /* Reclaim transmitted buffers */
1294 while (txbds_processed < txbds_ready) { 1357 while (txbds_processed < txbds_ready) {
1295 tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; 1358 skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1296 if (tx_cb_ptr->skb) { 1359 &priv->tx_cbs[ring->clean_ptr]);
1360 if (skb) {
1297 pkts_compl++; 1361 pkts_compl++;
1298 bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; 1362 bytes_compl += GENET_CB(skb)->bytes_sent;
1299 dma_unmap_single(kdev, 1363 dev_kfree_skb_any(skb);
1300 dma_unmap_addr(tx_cb_ptr, dma_addr),
1301 dma_unmap_len(tx_cb_ptr, dma_len),
1302 DMA_TO_DEVICE);
1303 bcmgenet_free_cb(tx_cb_ptr);
1304 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
1305 dma_unmap_page(kdev,
1306 dma_unmap_addr(tx_cb_ptr, dma_addr),
1307 dma_unmap_len(tx_cb_ptr, dma_len),
1308 DMA_TO_DEVICE);
1309 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
1310 } 1364 }
1311 1365
1312 txbds_processed++; 1366 txbds_processed++;
@@ -1380,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1380 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); 1434 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1381} 1435}
1382 1436
1383/* Transmits a single SKB (either head of a fragment or a single SKB)
1384 * caller must hold priv->lock
1385 */
1386static int bcmgenet_xmit_single(struct net_device *dev,
1387 struct sk_buff *skb,
1388 u16 dma_desc_flags,
1389 struct bcmgenet_tx_ring *ring)
1390{
1391 struct bcmgenet_priv *priv = netdev_priv(dev);
1392 struct device *kdev = &priv->pdev->dev;
1393 struct enet_cb *tx_cb_ptr;
1394 unsigned int skb_len;
1395 dma_addr_t mapping;
1396 u32 length_status;
1397 int ret;
1398
1399 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1400
1401 if (unlikely(!tx_cb_ptr))
1402 BUG();
1403
1404 tx_cb_ptr->skb = skb;
1405
1406 skb_len = skb_headlen(skb);
1407
1408 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
1409 ret = dma_mapping_error(kdev, mapping);
1410 if (ret) {
1411 priv->mib.tx_dma_failed++;
1412 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1413 dev_kfree_skb(skb);
1414 return ret;
1415 }
1416
1417 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1418 dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
1419 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1420 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1421 DMA_TX_APPEND_CRC;
1422
1423 if (skb->ip_summed == CHECKSUM_PARTIAL)
1424 length_status |= DMA_TX_DO_CSUM;
1425
1426 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1427
1428 return 0;
1429}
1430
1431/* Transmit a SKB fragment */
1432static int bcmgenet_xmit_frag(struct net_device *dev,
1433 skb_frag_t *frag,
1434 u16 dma_desc_flags,
1435 struct bcmgenet_tx_ring *ring)
1436{
1437 struct bcmgenet_priv *priv = netdev_priv(dev);
1438 struct device *kdev = &priv->pdev->dev;
1439 struct enet_cb *tx_cb_ptr;
1440 unsigned int frag_size;
1441 dma_addr_t mapping;
1442 int ret;
1443
1444 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1445
1446 if (unlikely(!tx_cb_ptr))
1447 BUG();
1448
1449 tx_cb_ptr->skb = NULL;
1450
1451 frag_size = skb_frag_size(frag);
1452
1453 mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
1454 ret = dma_mapping_error(kdev, mapping);
1455 if (ret) {
1456 priv->mib.tx_dma_failed++;
1457 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1458 __func__);
1459 return ret;
1460 }
1461
1462 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1463 dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
1464
1465 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1466 (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1467 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1468
1469 return 0;
1470}
1471
1472/* Reallocate the SKB to put enough headroom in front of it and insert 1437/* Reallocate the SKB to put enough headroom in front of it and insert
1473 * the transmit checksum offsets in the descriptors 1438 * the transmit checksum offsets in the descriptors
1474 */ 1439 */
@@ -1535,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1535static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) 1500static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1536{ 1501{
1537 struct bcmgenet_priv *priv = netdev_priv(dev); 1502 struct bcmgenet_priv *priv = netdev_priv(dev);
1503 struct device *kdev = &priv->pdev->dev;
1538 struct bcmgenet_tx_ring *ring = NULL; 1504 struct bcmgenet_tx_ring *ring = NULL;
1505 struct enet_cb *tx_cb_ptr;
1539 struct netdev_queue *txq; 1506 struct netdev_queue *txq;
1540 unsigned long flags = 0; 1507 unsigned long flags = 0;
1541 int nr_frags, index; 1508 int nr_frags, index;
1542 u16 dma_desc_flags; 1509 dma_addr_t mapping;
1510 unsigned int size;
1511 skb_frag_t *frag;
1512 u32 len_stat;
1543 int ret; 1513 int ret;
1544 int i; 1514 int i;
1545 1515
@@ -1592,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1592 } 1562 }
1593 } 1563 }
1594 1564
1595 dma_desc_flags = DMA_SOP; 1565 for (i = 0; i <= nr_frags; i++) {
1596 if (nr_frags == 0) 1566 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1597 dma_desc_flags |= DMA_EOP;
1598 1567
1599 /* Transmit single SKB or head of fragment list */ 1568 if (unlikely(!tx_cb_ptr))
1600 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); 1569 BUG();
1601 if (ret) { 1570
1602 ret = NETDEV_TX_OK; 1571 if (!i) {
1603 goto out; 1572 /* Transmit single SKB or head of fragment list */
1604 } 1573 GENET_CB(skb)->first_cb = tx_cb_ptr;
1574 size = skb_headlen(skb);
1575 mapping = dma_map_single(kdev, skb->data, size,
1576 DMA_TO_DEVICE);
1577 } else {
1578 /* xmit fragment */
1579 frag = &skb_shinfo(skb)->frags[i - 1];
1580 size = skb_frag_size(frag);
1581 mapping = skb_frag_dma_map(kdev, frag, 0, size,
1582 DMA_TO_DEVICE);
1583 }
1605 1584
1606 /* xmit fragment */ 1585 ret = dma_mapping_error(kdev, mapping);
1607 for (i = 0; i < nr_frags; i++) {
1608 ret = bcmgenet_xmit_frag(dev,
1609 &skb_shinfo(skb)->frags[i],
1610 (i == nr_frags - 1) ? DMA_EOP : 0,
1611 ring);
1612 if (ret) { 1586 if (ret) {
1587 priv->mib.tx_dma_failed++;
1588 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1613 ret = NETDEV_TX_OK; 1589 ret = NETDEV_TX_OK;
1614 goto out; 1590 goto out_unmap_frags;
1591 }
1592 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1593 dma_unmap_len_set(tx_cb_ptr, dma_len, size);
1594
1595 tx_cb_ptr->skb = skb;
1596
1597 len_stat = (size << DMA_BUFLENGTH_SHIFT) |
1598 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
1599
1600 if (!i) {
1601 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
1602 if (skb->ip_summed == CHECKSUM_PARTIAL)
1603 len_stat |= DMA_TX_DO_CSUM;
1615 } 1604 }
1605 if (i == nr_frags)
1606 len_stat |= DMA_EOP;
1607
1608 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
1616 } 1609 }
1617 1610
1611 GENET_CB(skb)->last_cb = tx_cb_ptr;
1618 skb_tx_timestamp(skb); 1612 skb_tx_timestamp(skb);
1619 1613
1620 /* Decrement total BD count and advance our write pointer */ 1614 /* Decrement total BD count and advance our write pointer */
@@ -1635,6 +1629,19 @@ out:
1635 spin_unlock_irqrestore(&ring->lock, flags); 1629 spin_unlock_irqrestore(&ring->lock, flags);
1636 1630
1637 return ret; 1631 return ret;
1632
1633out_unmap_frags:
1634 /* Back up for failed control block mapping */
1635 bcmgenet_put_txcb(priv, ring);
1636
1637 /* Unmap successfully mapped control blocks */
1638 while (i-- > 0) {
1639 tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
1640 bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
1641 }
1642
1643 dev_kfree_skb(skb);
1644 goto out;
1638} 1645}
1639 1646
1640static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, 1647static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
@@ -1666,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1666 } 1673 }
1667 1674
1668 /* Grab the current Rx skb from the ring and DMA-unmap it */ 1675 /* Grab the current Rx skb from the ring and DMA-unmap it */
1669 rx_skb = cb->skb; 1676 rx_skb = bcmgenet_free_rx_cb(kdev, cb);
1670 if (likely(rx_skb))
1671 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
1672 priv->rx_buf_len, DMA_FROM_DEVICE);
1673 1677
1674 /* Put the new Rx skb on the ring */ 1678 /* Put the new Rx skb on the ring */
1675 cb->skb = skb; 1679 cb->skb = skb;
1676 dma_unmap_addr_set(cb, dma_addr, mapping); 1680 dma_unmap_addr_set(cb, dma_addr, mapping);
1681 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
1677 dmadesc_set_addr(priv, cb->bd_addr, mapping); 1682 dmadesc_set_addr(priv, cb->bd_addr, mapping);
1678 1683
1679 /* Return the current Rx skb to caller */ 1684 /* Return the current Rx skb to caller */
@@ -1880,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1880 1885
1881static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) 1886static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1882{ 1887{
1883 struct device *kdev = &priv->pdev->dev; 1888 struct sk_buff *skb;
1884 struct enet_cb *cb; 1889 struct enet_cb *cb;
1885 int i; 1890 int i;
1886 1891
1887 for (i = 0; i < priv->num_rx_bds; i++) { 1892 for (i = 0; i < priv->num_rx_bds; i++) {
1888 cb = &priv->rx_cbs[i]; 1893 cb = &priv->rx_cbs[i];
1889 1894
1890 if (dma_unmap_addr(cb, dma_addr)) { 1895 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
1891 dma_unmap_single(kdev, 1896 if (skb)
1892 dma_unmap_addr(cb, dma_addr), 1897 dev_kfree_skb_any(skb);
1893 priv->rx_buf_len, DMA_FROM_DEVICE);
1894 dma_unmap_addr_set(cb, dma_addr, 0);
1895 }
1896
1897 if (cb->skb)
1898 bcmgenet_free_cb(cb);
1899 } 1898 }
1900} 1899}
1901 1900
@@ -2479,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2479 2478
2480static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 2479static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2481{ 2480{
2482 int i;
2483 struct netdev_queue *txq; 2481 struct netdev_queue *txq;
2482 struct sk_buff *skb;
2483 struct enet_cb *cb;
2484 int i;
2484 2485
2485 bcmgenet_fini_rx_napi(priv); 2486 bcmgenet_fini_rx_napi(priv);
2486 bcmgenet_fini_tx_napi(priv); 2487 bcmgenet_fini_tx_napi(priv);
@@ -2489,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
2489 bcmgenet_dma_teardown(priv); 2490 bcmgenet_dma_teardown(priv);
2490 2491
2491 for (i = 0; i < priv->num_tx_bds; i++) { 2492 for (i = 0; i < priv->num_tx_bds; i++) {
2492 if (priv->tx_cbs[i].skb != NULL) { 2493 cb = priv->tx_cbs + i;
2493 dev_kfree_skb(priv->tx_cbs[i].skb); 2494 skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
2494 priv->tx_cbs[i].skb = NULL; 2495 if (skb)
2495 } 2496 dev_kfree_skb(skb);
2496 } 2497 }
2497 2498
2498 for (i = 0; i < priv->hw_params->tx_queues; i++) { 2499 for (i = 0; i < priv->hw_params->tx_queues; i++) {
@@ -3668,7 +3669,7 @@ static int bcmgenet_resume(struct device *d)
3668 3669
3669 phy_init_hw(priv->phydev); 3670 phy_init_hw(priv->phydev);
3670 /* Speed settings must be restored */ 3671 /* Speed settings must be restored */
3671 bcmgenet_mii_config(priv->dev); 3672 bcmgenet_mii_config(priv->dev, false);
3672 3673
3673 /* disable ethernet MAC while updating its registers */ 3674 /* disable ethernet MAC while updating its registers */
3674 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); 3675 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index efd07020b89f..3a34fdba5301 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -544,6 +544,8 @@ struct bcmgenet_hw_params {
544}; 544};
545 545
546struct bcmgenet_skb_cb { 546struct bcmgenet_skb_cb {
547 struct enet_cb *first_cb; /* First control block of SKB */
548 struct enet_cb *last_cb; /* Last control block of SKB */
547 unsigned int bytes_sent; /* bytes on the wire (no TSB) */ 549 unsigned int bytes_sent; /* bytes on the wire (no TSB) */
548}; 550};
549 551
@@ -696,7 +698,7 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
696 698
697/* MDIO routines */ 699/* MDIO routines */
698int bcmgenet_mii_init(struct net_device *dev); 700int bcmgenet_mii_init(struct net_device *dev);
699int bcmgenet_mii_config(struct net_device *dev); 701int bcmgenet_mii_config(struct net_device *dev, bool init);
700int bcmgenet_mii_probe(struct net_device *dev); 702int bcmgenet_mii_probe(struct net_device *dev);
701void bcmgenet_mii_exit(struct net_device *dev); 703void bcmgenet_mii_exit(struct net_device *dev);
702void bcmgenet_mii_reset(struct net_device *dev); 704void bcmgenet_mii_reset(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 071fcbd14e6a..30cb97b4a1d7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -238,7 +238,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
238 bcmgenet_fixed_phy_link_update); 238 bcmgenet_fixed_phy_link_update);
239} 239}
240 240
241int bcmgenet_mii_config(struct net_device *dev) 241int bcmgenet_mii_config(struct net_device *dev, bool init)
242{ 242{
243 struct bcmgenet_priv *priv = netdev_priv(dev); 243 struct bcmgenet_priv *priv = netdev_priv(dev);
244 struct phy_device *phydev = priv->phydev; 244 struct phy_device *phydev = priv->phydev;
@@ -327,7 +327,8 @@ int bcmgenet_mii_config(struct net_device *dev)
327 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); 327 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
328 } 328 }
329 329
330 dev_info_once(kdev, "configuring instance for %s\n", phy_name); 330 if (init)
331 dev_info(kdev, "configuring instance for %s\n", phy_name);
331 332
332 return 0; 333 return 0;
333} 334}
@@ -375,7 +376,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
375 * PHY speed which is needed for bcmgenet_mii_config() to configure 376 * PHY speed which is needed for bcmgenet_mii_config() to configure
376 * things appropriately. 377 * things appropriately.
377 */ 378 */
378 ret = bcmgenet_mii_config(dev); 379 ret = bcmgenet_mii_config(dev, true);
379 if (ret) { 380 if (ret) {
380 phy_disconnect(priv->phydev); 381 phy_disconnect(priv->phydev);
381 return ret; 382 return ret;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 28ecda3d3404..ebd353bc78ff 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -335,7 +335,7 @@ lio_ethtool_get_channels(struct net_device *dev,
335 335
336static int lio_get_eeprom_len(struct net_device *netdev) 336static int lio_get_eeprom_len(struct net_device *netdev)
337{ 337{
338 u8 buf[128]; 338 u8 buf[192];
339 struct lio *lio = GET_LIO(netdev); 339 struct lio *lio = GET_LIO(netdev);
340 struct octeon_device *oct_dev = lio->oct_dev; 340 struct octeon_device *oct_dev = lio->oct_dev;
341 struct octeon_board_info *board_info; 341 struct octeon_board_info *board_info;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a0ca68ce3fbb..5e5c4d7796b8 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -292,11 +292,30 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
292 u64 cmr_cfg; 292 u64 cmr_cfg;
293 u64 port_cfg = 0; 293 u64 port_cfg = 0;
294 u64 misc_ctl = 0; 294 u64 misc_ctl = 0;
295 bool tx_en, rx_en;
295 296
296 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG); 297 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
297 cmr_cfg &= ~CMR_EN; 298 tx_en = cmr_cfg & CMR_PKT_TX_EN;
299 rx_en = cmr_cfg & CMR_PKT_RX_EN;
300 cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
298 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 301 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
299 302
303 /* Wait for BGX RX to be idle */
304 if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
305 GMI_PORT_CFG_RX_IDLE, false)) {
306 dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
307 bgx->bgx_id, lmac->lmacid);
308 return;
309 }
310
311 /* Wait for BGX TX to be idle */
312 if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
313 GMI_PORT_CFG_TX_IDLE, false)) {
314 dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
315 bgx->bgx_id, lmac->lmacid);
316 return;
317 }
318
300 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 319 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
301 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL); 320 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
302 321
@@ -347,10 +366,8 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
347 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl); 366 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
348 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg); 367 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
349 368
350 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG); 369 /* Restore CMR config settings */
351 370 cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
352 /* Re-enable lmac */
353 cmr_cfg |= CMR_EN;
354 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg); 371 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
355 372
356 if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN))) 373 if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
@@ -1008,7 +1025,7 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
1008{ 1025{
1009 struct device *dev = &bgx->pdev->dev; 1026 struct device *dev = &bgx->pdev->dev;
1010 struct lmac *lmac; 1027 struct lmac *lmac;
1011 char str[20]; 1028 char str[27];
1012 1029
1013 if (!bgx->is_dlm && lmacid) 1030 if (!bgx->is_dlm && lmacid)
1014 return; 1031 return;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 6b7fe6fdd13b..23acdc5ab896 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -170,6 +170,8 @@
170#define GMI_PORT_CFG_DUPLEX BIT_ULL(2) 170#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
171#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3) 171#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
172#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8) 172#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
173#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
174#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
173#define BGX_GMP_GMI_RXX_JABBER 0x38038 175#define BGX_GMP_GMI_RXX_JABBER 0x38038
174#define BGX_GMP_GMI_TXX_THRESH 0x38210 176#define BGX_GMP_GMI_TXX_THRESH 0x38210
175#define BGX_GMP_GMI_TXX_APPEND 0x38218 177#define BGX_GMP_GMI_TXX_APPEND 0x38218
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ef4be781fd05..09ea62ee96d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -529,6 +529,7 @@ enum { /* adapter flags */
529 USING_SOFT_PARAMS = (1 << 6), 529 USING_SOFT_PARAMS = (1 << 6),
530 MASTER_PF = (1 << 7), 530 MASTER_PF = (1 << 7),
531 FW_OFLD_CONN = (1 << 9), 531 FW_OFLD_CONN = (1 << 9),
532 ROOT_NO_RELAXED_ORDERING = (1 << 10),
532}; 533};
533 534
534enum { 535enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e403fa18f1b1..33bb8678833a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4654,11 +4654,6 @@ static void print_port_info(const struct net_device *dev)
4654 dev->name, adap->params.vpd.id, adap->name, buf); 4654 dev->name, adap->params.vpd.id, adap->name, buf);
4655} 4655}
4656 4656
4657static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4658{
4659 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4660}
4661
4662/* 4657/*
4663 * Free the following resources: 4658 * Free the following resources:
4664 * - memory used for tables 4659 * - memory used for tables
@@ -4908,7 +4903,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4908 } 4903 }
4909 4904
4910 pci_enable_pcie_error_reporting(pdev); 4905 pci_enable_pcie_error_reporting(pdev);
4911 enable_pcie_relaxed_ordering(pdev);
4912 pci_set_master(pdev); 4906 pci_set_master(pdev);
4913 pci_save_state(pdev); 4907 pci_save_state(pdev);
4914 4908
@@ -4947,6 +4941,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4947 adapter->msg_enable = DFLT_MSG_ENABLE; 4941 adapter->msg_enable = DFLT_MSG_ENABLE;
4948 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); 4942 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4949 4943
4944 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
4945 * Ingress Packet Data to Free List Buffers in order to allow for
4946 * chipset performance optimizations between the Root Complex and
4947 * Memory Controllers. (Messages to the associated Ingress Queue
4948 * notifying new Packet Placement in the Free Lists Buffers will be
4949 * send without the Relaxed Ordering Attribute thus guaranteeing that
4950 * all preceding PCIe Transaction Layer Packets will be processed
4951 * first.) But some Root Complexes have various issues with Upstream
4952 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
4953 * The PCIe devices which under the Root Complexes will be cleared the
4954 * Relaxed Ordering bit in the configuration space, So we check our
4955 * PCIe configuration space to see if it's flagged with advice against
4956 * using Relaxed Ordering.
4957 */
4958 if (!pcie_relaxed_ordering_enabled(pdev))
4959 adapter->flags |= ROOT_NO_RELAXED_ORDERING;
4960
4950 spin_lock_init(&adapter->stats_lock); 4961 spin_lock_init(&adapter->stats_lock);
4951 spin_lock_init(&adapter->tid_release_lock); 4962 spin_lock_init(&adapter->tid_release_lock);
4952 spin_lock_init(&adapter->win0_lock); 4963 spin_lock_init(&adapter->win0_lock);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 50517cfd9671..9f9d6cae39d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -441,7 +441,8 @@ void cxgb4_ptp_init(struct adapter *adapter)
441 441
442 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, 442 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
443 &adapter->pdev->dev); 443 &adapter->pdev->dev);
444 if (!adapter->ptp_clock) { 444 if (IS_ERR_OR_NULL(adapter->ptp_clock)) {
445 adapter->ptp_clock = NULL;
445 dev_err(adapter->pdev_dev, 446 dev_err(adapter->pdev_dev,
446 "PTP %s Clock registration has failed\n", __func__); 447 "PTP %s Clock registration has failed\n", __func__);
447 return; 448 return;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ede12209f20b..4ef68f69b58c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2719,6 +2719,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2719 struct fw_iq_cmd c; 2719 struct fw_iq_cmd c;
2720 struct sge *s = &adap->sge; 2720 struct sge *s = &adap->sge;
2721 struct port_info *pi = netdev_priv(dev); 2721 struct port_info *pi = netdev_priv(dev);
2722 int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
2722 2723
2723 /* Size needs to be multiple of 16, including status entry. */ 2724 /* Size needs to be multiple of 16, including status entry. */
2724 iq->size = roundup(iq->size, 16); 2725 iq->size = roundup(iq->size, 16);
@@ -2772,8 +2773,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2772 2773
2773 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); 2774 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2774 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F | 2775 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
2775 FW_IQ_CMD_FL0FETCHRO_F | 2776 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2776 FW_IQ_CMD_FL0DATARO_F | 2777 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2777 FW_IQ_CMD_FL0PADEN_F); 2778 FW_IQ_CMD_FL0PADEN_F);
2778 if (cong >= 0) 2779 if (cong >= 0)
2779 c.iqns_to_fl0congen |= 2780 c.iqns_to_fl0congen |=
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 99987d8e437e..aa28299aef5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -174,6 +174,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
174 CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */ 174 CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
175 CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */ 175 CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
176 CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */ 176 CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */
177 CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
178 CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
177 179
178 /* T6 adapters: 180 /* T6 adapters:
179 */ 181 */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 109bc630408b..08c6ddb84a04 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -408,6 +408,7 @@ enum { /* adapter flags */
408 USING_MSI = (1UL << 1), 408 USING_MSI = (1UL << 1),
409 USING_MSIX = (1UL << 2), 409 USING_MSIX = (1UL << 2),
410 QUEUES_BOUND = (1UL << 3), 410 QUEUES_BOUND = (1UL << 3),
411 ROOT_NO_RELAXED_ORDERING = (1UL << 4),
411}; 412};
412 413
413/* 414/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ac7a150c54e9..2b85b874fd0d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2888,6 +2888,24 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2888 */ 2888 */
2889 adapter->name = pci_name(pdev); 2889 adapter->name = pci_name(pdev);
2890 adapter->msg_enable = DFLT_MSG_ENABLE; 2890 adapter->msg_enable = DFLT_MSG_ENABLE;
2891
2892 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
2893 * Ingress Packet Data to Free List Buffers in order to allow for
2894 * chipset performance optimizations between the Root Complex and
2895 * Memory Controllers. (Messages to the associated Ingress Queue
2896 * notifying new Packet Placement in the Free Lists Buffers will be
2897 * send without the Relaxed Ordering Attribute thus guaranteeing that
2898 * all preceding PCIe Transaction Layer Packets will be processed
2899 * first.) But some Root Complexes have various issues with Upstream
2900 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
2901 * The PCIe devices which under the Root Complexes will be cleared the
2902 * Relaxed Ordering bit in the configuration space, So we check our
2903 * PCIe configuration space to see if it's flagged with advice against
2904 * using Relaxed Ordering.
2905 */
2906 if (!pcie_relaxed_ordering_enabled(pdev))
2907 adapter->flags |= ROOT_NO_RELAXED_ORDERING;
2908
2891 err = adap_init0(adapter); 2909 err = adap_init0(adapter);
2892 if (err) 2910 if (err)
2893 goto err_unmap_bar; 2911 goto err_unmap_bar;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index e37dde2ba97f..05498e7f2840 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2205,6 +2205,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2205 struct port_info *pi = netdev_priv(dev); 2205 struct port_info *pi = netdev_priv(dev);
2206 struct fw_iq_cmd cmd, rpl; 2206 struct fw_iq_cmd cmd, rpl;
2207 int ret, iqandst, flsz = 0; 2207 int ret, iqandst, flsz = 0;
2208 int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING);
2208 2209
2209 /* 2210 /*
2210 * If we're using MSI interrupts and we're not initializing the 2211 * If we're using MSI interrupts and we're not initializing the
@@ -2300,6 +2301,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2300 cpu_to_be32( 2301 cpu_to_be32(
2301 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | 2302 FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2302 FW_IQ_CMD_FL0PACKEN_F | 2303 FW_IQ_CMD_FL0PACKEN_F |
2304 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
2305 FW_IQ_CMD_FL0DATARO_V(relaxed) |
2303 FW_IQ_CMD_FL0PADEN_F); 2306 FW_IQ_CMD_FL0PADEN_F);
2304 2307
2305 /* In T6, for egress queue type FL there is internal overhead 2308 /* In T6, for egress queue type FL there is internal overhead
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 95bf5e89cfd1..34dae51effd4 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -125,7 +125,7 @@ static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
125 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR); 125 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
126 iowrite32(maccr | FTGMAC100_MACCR_SW_RST, 126 iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
127 priv->base + FTGMAC100_OFFSET_MACCR); 127 priv->base + FTGMAC100_OFFSET_MACCR);
128 for (i = 0; i < 50; i++) { 128 for (i = 0; i < 200; i++) {
129 unsigned int maccr; 129 unsigned int maccr;
130 130
131 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR); 131 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
@@ -392,7 +392,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
392 struct net_device *netdev = priv->netdev; 392 struct net_device *netdev = priv->netdev;
393 struct sk_buff *skb; 393 struct sk_buff *skb;
394 dma_addr_t map; 394 dma_addr_t map;
395 int err; 395 int err = 0;
396 396
397 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE); 397 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
398 if (unlikely(!skb)) { 398 if (unlikely(!skb)) {
@@ -428,7 +428,7 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
428 else 428 else
429 rxdes->rxdes0 = 0; 429 rxdes->rxdes0 = 0;
430 430
431 return 0; 431 return err;
432} 432}
433 433
434static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv, 434static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
@@ -1682,6 +1682,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
1682 priv->mii_bus->name = "ftgmac100_mdio"; 1682 priv->mii_bus->name = "ftgmac100_mdio";
1683 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", 1683 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1684 pdev->name, pdev->id); 1684 pdev->name, pdev->id);
1685 priv->mii_bus->parent = priv->dev;
1685 priv->mii_bus->priv = priv->netdev; 1686 priv->mii_bus->priv = priv->netdev;
1686 priv->mii_bus->read = ftgmac100_mdiobus_read; 1687 priv->mii_bus->read = ftgmac100_mdiobus_read;
1687 priv->mii_bus->write = ftgmac100_mdiobus_write; 1688 priv->mii_bus->write = ftgmac100_mdiobus_write;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ff864a187d5a..a37166ee577b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -776,8 +776,9 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
776 776
777 assert(handle); 777 assert(handle);
778 mac_cb = hns_get_mac_cb(handle); 778 mac_cb = hns_get_mac_cb(handle);
779 if (!mac_cb->cpld_ctrl) 779 if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
780 return; 780 return;
781
781 hns_set_led_opt(mac_cb); 782 hns_set_led_opt(mac_cb);
782} 783}
783 784
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 7a8addda726e..408b63faf9a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -53,6 +53,34 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
53 return ret; 53 return ret;
54} 54}
55 55
56static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
57 u32 link, u32 port, u32 act)
58{
59 union acpi_object *obj;
60 union acpi_object obj_args[3], argv4;
61
62 obj_args[0].integer.type = ACPI_TYPE_INTEGER;
63 obj_args[0].integer.value = link;
64 obj_args[1].integer.type = ACPI_TYPE_INTEGER;
65 obj_args[1].integer.value = port;
66 obj_args[2].integer.type = ACPI_TYPE_INTEGER;
67 obj_args[2].integer.value = act;
68
69 argv4.type = ACPI_TYPE_PACKAGE;
70 argv4.package.count = 3;
71 argv4.package.elements = obj_args;
72
73 obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
74 &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
75 if (!obj) {
76 dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
77 link, port, act);
78 return;
79 }
80
81 ACPI_FREE(obj);
82}
83
56static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, 84static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
57 u16 speed, int data) 85 u16 speed, int data)
58{ 86{
@@ -93,6 +121,18 @@ static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
93 } 121 }
94} 122}
95 123
124static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status,
125 u16 speed, int data)
126{
127 if (!mac_cb) {
128 pr_err("cpld_led_set mac_cb is null!\n");
129 return;
130 }
131
132 hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
133 link_status, mac_cb->mac_id, data);
134}
135
96static void cpld_led_reset(struct hns_mac_cb *mac_cb) 136static void cpld_led_reset(struct hns_mac_cb *mac_cb)
97{ 137{
98 if (!mac_cb || !mac_cb->cpld_ctrl) 138 if (!mac_cb || !mac_cb->cpld_ctrl)
@@ -103,6 +143,20 @@ static void cpld_led_reset(struct hns_mac_cb *mac_cb)
103 mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; 143 mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
104} 144}
105 145
146static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
147{
148 if (!mac_cb) {
149 pr_err("cpld_led_reset mac_cb is null!\n");
150 return;
151 }
152
153 if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
154 return;
155
156 hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
157 0, mac_cb->mac_id, 0);
158}
159
106static int cpld_set_led_id(struct hns_mac_cb *mac_cb, 160static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
107 enum hnae_led_state status) 161 enum hnae_led_state status)
108{ 162{
@@ -604,8 +658,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
604 658
605 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback; 659 misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
606 } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { 660 } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
607 misc_op->cpld_set_led = hns_cpld_set_led; 661 misc_op->cpld_set_led = hns_cpld_set_led_acpi;
608 misc_op->cpld_reset_led = cpld_led_reset; 662 misc_op->cpld_reset_led = cpld_led_reset_acpi;
609 misc_op->cpld_set_led_id = cpld_set_led_id; 663 misc_op->cpld_set_led_id = cpld_set_led_id;
610 664
611 misc_op->dsaf_reset = hns_dsaf_rst_acpi; 665 misc_op->dsaf_reset = hns_dsaf_rst_acpi;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a3e694679635..c45e8e3b82d3 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -111,6 +111,7 @@ static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111static void send_request_unmap(struct ibmvnic_adapter *, u8); 111static void send_request_unmap(struct ibmvnic_adapter *, u8);
112static void send_login(struct ibmvnic_adapter *adapter); 112static void send_login(struct ibmvnic_adapter *adapter);
113static void send_cap_queries(struct ibmvnic_adapter *adapter); 113static void send_cap_queries(struct ibmvnic_adapter *adapter);
114static int init_sub_crqs(struct ibmvnic_adapter *);
114static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); 115static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
115static int ibmvnic_init(struct ibmvnic_adapter *); 116static int ibmvnic_init(struct ibmvnic_adapter *);
116static void release_crq_queue(struct ibmvnic_adapter *); 117static void release_crq_queue(struct ibmvnic_adapter *);
@@ -651,6 +652,7 @@ static int ibmvnic_login(struct net_device *netdev)
651 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 652 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
652 unsigned long timeout = msecs_to_jiffies(30000); 653 unsigned long timeout = msecs_to_jiffies(30000);
653 struct device *dev = &adapter->vdev->dev; 654 struct device *dev = &adapter->vdev->dev;
655 int rc;
654 656
655 do { 657 do {
656 if (adapter->renegotiate) { 658 if (adapter->renegotiate) {
@@ -664,6 +666,18 @@ static int ibmvnic_login(struct net_device *netdev)
664 dev_err(dev, "Capabilities query timeout\n"); 666 dev_err(dev, "Capabilities query timeout\n");
665 return -1; 667 return -1;
666 } 668 }
669 rc = init_sub_crqs(adapter);
670 if (rc) {
671 dev_err(dev,
672 "Initialization of SCRQ's failed\n");
673 return -1;
674 }
675 rc = init_sub_crq_irqs(adapter);
676 if (rc) {
677 dev_err(dev,
678 "Initialization of SCRQ's irqs failed\n");
679 return -1;
680 }
667 } 681 }
668 682
669 reinit_completion(&adapter->init_done); 683 reinit_completion(&adapter->init_done);
@@ -3004,7 +3018,6 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3004 *req_value, 3018 *req_value,
3005 (long int)be64_to_cpu(crq->request_capability_rsp. 3019 (long int)be64_to_cpu(crq->request_capability_rsp.
3006 number), name); 3020 number), name);
3007 release_sub_crqs(adapter);
3008 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3021 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
3009 ibmvnic_send_req_caps(adapter, 1); 3022 ibmvnic_send_req_caps(adapter, 1);
3010 return; 3023 return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b936febc315a..2194960d5855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1113,6 +1113,8 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1113 if (!tx_ring->tx_bi) 1113 if (!tx_ring->tx_bi)
1114 goto err; 1114 goto err;
1115 1115
1116 u64_stats_init(&tx_ring->syncp);
1117
1116 /* round up to nearest 4K */ 1118 /* round up to nearest 4K */
1117 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 1119 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1118 /* add u32 for head writeback, align after this takes care of 1120 /* add u32 for head writeback, align after this takes care of
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 084c53582793..032f8ac06357 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2988,6 +2988,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2988 if (!tx_ring->tx_buffer_info) 2988 if (!tx_ring->tx_buffer_info)
2989 goto err; 2989 goto err;
2990 2990
2991 u64_stats_init(&tx_ring->syncp);
2992
2991 /* round up to nearest 4K */ 2993 /* round up to nearest 4K */
2992 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2994 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2993 tx_ring->size = ALIGN(tx_ring->size, 4096); 2995 tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -3046,6 +3048,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3046 if (!rx_ring->rx_buffer_info) 3048 if (!rx_ring->rx_buffer_info)
3047 goto err; 3049 goto err;
3048 3050
3051 u64_stats_init(&rx_ring->syncp);
3052
3049 /* Round up to nearest 4K */ 3053 /* Round up to nearest 4K */
3050 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 3054 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3051 rx_ring->size = ALIGN(rx_ring->size, 4096); 3055 rx_ring->size = ALIGN(rx_ring->size, 4096);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5794d98d946f..9c94ea9b2b80 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2734,7 +2734,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
2734 ppd.shared = pdev; 2734 ppd.shared = pdev;
2735 2735
2736 memset(&res, 0, sizeof(res)); 2736 memset(&res, 0, sizeof(res));
2737 if (!of_irq_to_resource(pnp, 0, &res)) { 2737 if (of_irq_to_resource(pnp, 0, &res) <= 0) {
2738 dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); 2738 dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name);
2739 return -EINVAL; 2739 return -EINVAL;
2740 } 2740 }
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b3d0c2e6347a..e588a0cdb074 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -22,6 +22,7 @@
22#include <linux/if_vlan.h> 22#include <linux/if_vlan.h>
23#include <linux/reset.h> 23#include <linux/reset.h>
24#include <linux/tcp.h> 24#include <linux/tcp.h>
25#include <linux/interrupt.h>
25 26
26#include "mtk_eth_soc.h" 27#include "mtk_eth_soc.h"
27 28
@@ -947,6 +948,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
947 RX_DMA_FPORT_MASK; 948 RX_DMA_FPORT_MASK;
948 mac--; 949 mac--;
949 950
951 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
952 !eth->netdev[mac]))
953 goto release_desc;
954
950 netdev = eth->netdev[mac]; 955 netdev = eth->netdev[mac];
951 956
952 if (unlikely(test_bit(MTK_RESETTING, &eth->state))) 957 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 249a4584401a..b651c1210555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -283,7 +283,7 @@ int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
283} 283}
284 284
285/* Should be called under a lock */ 285/* Should be called under a lock */
286static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) 286static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
287{ 287{
288 struct mlx4_zone_allocator *zone_alloc = entry->allocator; 288 struct mlx4_zone_allocator *zone_alloc = entry->allocator;
289 289
@@ -315,8 +315,6 @@ static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry)
315 } 315 }
316 zone_alloc->mask = mask; 316 zone_alloc->mask = mask;
317 } 317 }
318
319 return 0;
320} 318}
321 319
322void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) 320void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
@@ -457,7 +455,7 @@ struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32
457int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) 455int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
458{ 456{
459 struct mlx4_zone_entry *zone; 457 struct mlx4_zone_entry *zone;
460 int res; 458 int res = 0;
461 459
462 spin_lock(&zones->lock); 460 spin_lock(&zones->lock);
463 461
@@ -468,7 +466,7 @@ int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid)
468 goto out; 466 goto out;
469 } 467 }
470 468
471 res = __mlx4_zone_remove_one_entry(zone); 469 __mlx4_zone_remove_one_entry(zone);
472 470
473out: 471out:
474 spin_unlock(&zones->lock); 472 spin_unlock(&zones->lock);
@@ -578,7 +576,7 @@ out:
578} 576}
579 577
580static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, 578static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
581 struct mlx4_buf *buf, gfp_t gfp) 579 struct mlx4_buf *buf)
582{ 580{
583 dma_addr_t t; 581 dma_addr_t t;
584 582
@@ -587,7 +585,7 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
587 buf->page_shift = get_order(size) + PAGE_SHIFT; 585 buf->page_shift = get_order(size) + PAGE_SHIFT;
588 buf->direct.buf = 586 buf->direct.buf =
589 dma_zalloc_coherent(&dev->persist->pdev->dev, 587 dma_zalloc_coherent(&dev->persist->pdev->dev,
590 size, &t, gfp); 588 size, &t, GFP_KERNEL);
591 if (!buf->direct.buf) 589 if (!buf->direct.buf)
592 return -ENOMEM; 590 return -ENOMEM;
593 591
@@ -607,10 +605,10 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
607 * multiple pages, so we don't require too much contiguous memory. 605 * multiple pages, so we don't require too much contiguous memory.
608 */ 606 */
609int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 607int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
610 struct mlx4_buf *buf, gfp_t gfp) 608 struct mlx4_buf *buf)
611{ 609{
612 if (size <= max_direct) { 610 if (size <= max_direct) {
613 return mlx4_buf_direct_alloc(dev, size, buf, gfp); 611 return mlx4_buf_direct_alloc(dev, size, buf);
614 } else { 612 } else {
615 dma_addr_t t; 613 dma_addr_t t;
616 int i; 614 int i;
@@ -620,14 +618,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
620 buf->npages = buf->nbufs; 618 buf->npages = buf->nbufs;
621 buf->page_shift = PAGE_SHIFT; 619 buf->page_shift = PAGE_SHIFT;
622 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), 620 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
623 gfp); 621 GFP_KERNEL);
624 if (!buf->page_list) 622 if (!buf->page_list)
625 return -ENOMEM; 623 return -ENOMEM;
626 624
627 for (i = 0; i < buf->nbufs; ++i) { 625 for (i = 0; i < buf->nbufs; ++i) {
628 buf->page_list[i].buf = 626 buf->page_list[i].buf =
629 dma_zalloc_coherent(&dev->persist->pdev->dev, 627 dma_zalloc_coherent(&dev->persist->pdev->dev,
630 PAGE_SIZE, &t, gfp); 628 PAGE_SIZE, &t, GFP_KERNEL);
631 if (!buf->page_list[i].buf) 629 if (!buf->page_list[i].buf)
632 goto err_free; 630 goto err_free;
633 631
@@ -663,12 +661,11 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
663} 661}
664EXPORT_SYMBOL_GPL(mlx4_buf_free); 662EXPORT_SYMBOL_GPL(mlx4_buf_free);
665 663
666static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device, 664static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
667 gfp_t gfp)
668{ 665{
669 struct mlx4_db_pgdir *pgdir; 666 struct mlx4_db_pgdir *pgdir;
670 667
671 pgdir = kzalloc(sizeof *pgdir, gfp); 668 pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
672 if (!pgdir) 669 if (!pgdir)
673 return NULL; 670 return NULL;
674 671
@@ -676,7 +673,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
676 pgdir->bits[0] = pgdir->order0; 673 pgdir->bits[0] = pgdir->order0;
677 pgdir->bits[1] = pgdir->order1; 674 pgdir->bits[1] = pgdir->order1;
678 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, 675 pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
679 &pgdir->db_dma, gfp); 676 &pgdir->db_dma, GFP_KERNEL);
680 if (!pgdir->db_page) { 677 if (!pgdir->db_page) {
681 kfree(pgdir); 678 kfree(pgdir);
682 return NULL; 679 return NULL;
@@ -716,7 +713,7 @@ found:
716 return 0; 713 return 0;
717} 714}
718 715
719int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp) 716int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
720{ 717{
721 struct mlx4_priv *priv = mlx4_priv(dev); 718 struct mlx4_priv *priv = mlx4_priv(dev);
722 struct mlx4_db_pgdir *pgdir; 719 struct mlx4_db_pgdir *pgdir;
@@ -728,7 +725,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
728 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) 725 if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
729 goto out; 726 goto out;
730 727
731 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp); 728 pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
732 if (!pgdir) { 729 if (!pgdir) {
733 ret = -ENOMEM; 730 ret = -ENOMEM;
734 goto out; 731 goto out;
@@ -780,13 +777,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
780{ 777{
781 int err; 778 int err;
782 779
783 err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL); 780 err = mlx4_db_alloc(dev, &wqres->db, 1);
784 if (err) 781 if (err)
785 return err; 782 return err;
786 783
787 *wqres->db.db = 0; 784 *wqres->db.db = 0;
788 785
789 err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL); 786 err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
790 if (err) 787 if (err)
791 goto err_db; 788 goto err_db;
792 789
@@ -795,7 +792,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
795 if (err) 792 if (err)
796 goto err_buf; 793 goto err_buf;
797 794
798 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL); 795 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
799 if (err) 796 if (err)
800 goto err_mtt; 797 goto err_mtt;
801 798
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index fa6d2354a0e9..c56a511b918e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -224,11 +224,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
224 if (*cqn == -1) 224 if (*cqn == -1)
225 return -ENOMEM; 225 return -ENOMEM;
226 226
227 err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL); 227 err = mlx4_table_get(dev, &cq_table->table, *cqn);
228 if (err) 228 if (err)
229 goto err_out; 229 goto err_out;
230 230
231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL); 231 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
232 if (err) 232 if (err)
233 goto err_put; 233 goto err_put;
234 return 0; 234 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c751a1d434ad..3d4e4a5d00d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,6 +223,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
223 struct ethtool_wolinfo *wol) 223 struct ethtool_wolinfo *wol)
224{ 224{
225 struct mlx4_en_priv *priv = netdev_priv(netdev); 225 struct mlx4_en_priv *priv = netdev_priv(netdev);
226 struct mlx4_caps *caps = &priv->mdev->dev->caps;
226 int err = 0; 227 int err = 0;
227 u64 config = 0; 228 u64 config = 0;
228 u64 mask; 229 u64 mask;
@@ -235,24 +236,24 @@ static void mlx4_en_get_wol(struct net_device *netdev,
235 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 : 236 mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
236 MLX4_DEV_CAP_FLAG_WOL_PORT2; 237 MLX4_DEV_CAP_FLAG_WOL_PORT2;
237 238
238 if (!(priv->mdev->dev->caps.flags & mask)) { 239 if (!(caps->flags & mask)) {
239 wol->supported = 0; 240 wol->supported = 0;
240 wol->wolopts = 0; 241 wol->wolopts = 0;
241 return; 242 return;
242 } 243 }
243 244
245 if (caps->wol_port[priv->port])
246 wol->supported = WAKE_MAGIC;
247 else
248 wol->supported = 0;
249
244 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port); 250 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
245 if (err) { 251 if (err) {
246 en_err(priv, "Failed to get WoL information\n"); 252 en_err(priv, "Failed to get WoL information\n");
247 return; 253 return;
248 } 254 }
249 255
250 if (config & MLX4_EN_WOL_MAGIC) 256 if ((config & MLX4_EN_WOL_ENABLED) && (config & MLX4_EN_WOL_MAGIC))
251 wol->supported = WAKE_MAGIC;
252 else
253 wol->supported = 0;
254
255 if (config & MLX4_EN_WOL_ENABLED)
256 wol->wolopts = WAKE_MAGIC; 257 wol->wolopts = WAKE_MAGIC;
257 else 258 else
258 wol->wolopts = 0; 259 wol->wolopts = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e5fb89505a13..bf1638044a7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -574,16 +574,21 @@ static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
574 * header, the HW adds it. To address that, we are subtracting the pseudo 574 * header, the HW adds it. To address that, we are subtracting the pseudo
575 * header checksum from the checksum value provided by the HW. 575 * header checksum from the checksum value provided by the HW.
576 */ 576 */
577static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, 577static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
578 struct iphdr *iph) 578 struct iphdr *iph)
579{ 579{
580 __u16 length_for_csum = 0; 580 __u16 length_for_csum = 0;
581 __wsum csum_pseudo_header = 0; 581 __wsum csum_pseudo_header = 0;
582 __u8 ipproto = iph->protocol;
583
584 if (unlikely(ipproto == IPPROTO_SCTP))
585 return -1;
582 586
583 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2)); 587 length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
584 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr, 588 csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
585 length_for_csum, iph->protocol, 0); 589 length_for_csum, ipproto, 0);
586 skb->csum = csum_sub(hw_checksum, csum_pseudo_header); 590 skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
591 return 0;
587} 592}
588 593
589#if IS_ENABLED(CONFIG_IPV6) 594#if IS_ENABLED(CONFIG_IPV6)
@@ -594,17 +599,20 @@ static void get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
594static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, 599static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
595 struct ipv6hdr *ipv6h) 600 struct ipv6hdr *ipv6h)
596{ 601{
602 __u8 nexthdr = ipv6h->nexthdr;
597 __wsum csum_pseudo_hdr = 0; 603 __wsum csum_pseudo_hdr = 0;
598 604
599 if (unlikely(ipv6h->nexthdr == IPPROTO_FRAGMENT || 605 if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
600 ipv6h->nexthdr == IPPROTO_HOPOPTS)) 606 nexthdr == IPPROTO_HOPOPTS ||
607 nexthdr == IPPROTO_SCTP))
601 return -1; 608 return -1;
602 hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); 609 hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr));
603 610
604 csum_pseudo_hdr = csum_partial(&ipv6h->saddr, 611 csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
605 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); 612 sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
606 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); 613 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len);
607 csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ntohs(ipv6h->nexthdr)); 614 csum_pseudo_hdr = csum_add(csum_pseudo_hdr,
615 (__force __wsum)htons(nexthdr));
608 616
609 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); 617 skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr);
610 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); 618 skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0));
@@ -627,11 +635,10 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
627 } 635 }
628 636
629 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) 637 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4))
630 get_fixed_ipv4_csum(hw_checksum, skb, hdr); 638 return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
631#if IS_ENABLED(CONFIG_IPV6) 639#if IS_ENABLED(CONFIG_IPV6)
632 else if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) 640 if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
633 if (unlikely(get_fixed_ipv6_csum(hw_checksum, skb, hdr))) 641 return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
634 return -1;
635#endif 642#endif
636 return 0; 643 return 0;
637} 644}
@@ -1042,7 +1049,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
1042 if (!context) 1049 if (!context)
1043 return -ENOMEM; 1050 return -ENOMEM;
1044 1051
1045 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); 1052 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
1046 if (err) { 1053 if (err) {
1047 en_err(priv, "Failed to allocate qp #%x\n", qpn); 1054 en_err(priv, "Failed to allocate qp #%x\n", qpn);
1048 goto out; 1055 goto out;
@@ -1086,7 +1093,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
1086 en_err(priv, "Failed reserving drop qpn\n"); 1093 en_err(priv, "Failed reserving drop qpn\n");
1087 return err; 1094 return err;
1088 } 1095 }
1089 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); 1096 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
1090 if (err) { 1097 if (err) {
1091 en_err(priv, "Failed allocating drop qp\n"); 1098 en_err(priv, "Failed allocating drop qp\n");
1092 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); 1099 mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
@@ -1158,8 +1165,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1158 } 1165 }
1159 1166
1160 /* Configure RSS indirection qp */ 1167 /* Configure RSS indirection qp */
1161 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp, 1168 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1162 GFP_KERNEL);
1163 if (err) { 1169 if (err) {
1164 en_err(priv, "Failed to allocate RSS indirection QP\n"); 1170 en_err(priv, "Failed to allocate RSS indirection QP\n");
1165 goto rss_err; 1171 goto rss_err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4f3a9b27ce4a..73faa3d77921 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -111,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
111 goto err_hwq_res; 111 goto err_hwq_res;
112 } 112 }
113 113
114 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL); 114 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
115 if (err) { 115 if (err) {
116 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 116 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
117 goto err_reserve; 117 goto err_reserve;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 37e84a59e751..041c0ed65929 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -159,8 +159,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
159 [32] = "Loopback source checks support", 159 [32] = "Loopback source checks support",
160 [33] = "RoCEv2 support", 160 [33] = "RoCEv2 support",
161 [34] = "DMFS Sniffer support (UC & MC)", 161 [34] = "DMFS Sniffer support (UC & MC)",
162 [35] = "QinQ VST mode support", 162 [35] = "Diag counters per port",
163 [36] = "sl to vl mapping table change event support" 163 [36] = "QinQ VST mode support",
164 [37] = "sl to vl mapping table change event support",
164 }; 165 };
165 int i; 166 int i;
166 167
@@ -764,6 +765,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
764#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e 765#define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
765#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 766#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
766#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40 767#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
768#define QUERY_DEV_CAP_WOL_OFFSET 0x43
767#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 769#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
768#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 770#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
769#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 771#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -920,6 +922,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
920 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 922 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
921 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 923 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
922 dev_cap->flags = flags | (u64)ext_flags << 32; 924 dev_cap->flags = flags | (u64)ext_flags << 32;
925 MLX4_GET(field, outbox, QUERY_DEV_CAP_WOL_OFFSET);
926 dev_cap->wol_port[1] = !!(field & 0x20);
927 dev_cap->wol_port[2] = !!(field & 0x40);
923 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 928 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
924 dev_cap->reserved_uars = field >> 4; 929 dev_cap->reserved_uars = field >> 4;
925 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 930 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 5343a0599253..b52ba01aa486 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -129,6 +129,7 @@ struct mlx4_dev_cap {
129 u32 dmfs_high_rate_qpn_range; 129 u32 dmfs_high_rate_qpn_range;
130 struct mlx4_rate_limit_caps rl_caps; 130 struct mlx4_rate_limit_caps rl_caps;
131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1]; 131 struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
132 bool wol_port[MLX4_MAX_PORTS + 1];
132}; 133};
133 134
134struct mlx4_func_cap { 135struct mlx4_func_cap {
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index e1f9e7cebf8f..5a7816e7c7b4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -251,8 +251,7 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
251 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 251 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
252} 252}
253 253
254int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, 254int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
255 gfp_t gfp)
256{ 255{
257 u32 i = (obj & (table->num_obj - 1)) / 256 u32 i = (obj & (table->num_obj - 1)) /
258 (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 257 (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
@@ -266,7 +265,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
266 } 265 }
267 266
268 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 267 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
269 (table->lowmem ? gfp : GFP_HIGHUSER) | 268 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
270 __GFP_NOWARN, table->coherent); 269 __GFP_NOWARN, table->coherent);
271 if (!table->icm[i]) { 270 if (!table->icm[i]) {
272 ret = -ENOMEM; 271 ret = -ENOMEM;
@@ -363,7 +362,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
363 u32 i; 362 u32 i;
364 363
365 for (i = start; i <= end; i += inc) { 364 for (i = start; i <= end; i += inc) {
366 err = mlx4_table_get(dev, table, i, GFP_KERNEL); 365 err = mlx4_table_get(dev, table, i);
367 if (err) 366 if (err)
368 goto fail; 367 goto fail;
369 } 368 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index 0c7364550150..dee67fa39107 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -71,8 +71,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
71 gfp_t gfp_mask, int coherent); 71 gfp_t gfp_mask, int coherent);
72void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); 72void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
73 73
74int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, 74int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
75 gfp_t gfp);
76void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); 75void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
77int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, 76int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
78 u32 start, u32 end); 77 u32 start, u32 end);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a27c9c13a36e..5fe5cdc51357 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -424,13 +424,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
424 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 424 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
425 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 425 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 426 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
427 dev->caps.wol_port[1] = dev_cap->wol_port[1];
428 dev->caps.wol_port[2] = dev_cap->wol_port[2];
427 429
428 /* Save uar page shift */ 430 /* Save uar page shift */
429 if (!mlx4_is_slave(dev)) { 431 if (!mlx4_is_slave(dev)) {
430 /* Virtual PCI function needs to determine UAR page size from 432 /* Virtual PCI function needs to determine UAR page size from
431 * firmware. Only master PCI function can set the uar page size 433 * firmware. Only master PCI function can set the uar page size
432 */ 434 */
433 if (enable_4k_uar) 435 if (enable_4k_uar || !dev->persist->num_vfs)
434 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; 436 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
435 else 437 else
436 dev->uar_page_shift = PAGE_SHIFT; 438 dev->uar_page_shift = PAGE_SHIFT;
@@ -2275,7 +2277,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
2275 2277
2276 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2278 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2277 2279
2278 if (enable_4k_uar) { 2280 if (enable_4k_uar || !dev->persist->num_vfs) {
2279 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + 2281 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2280 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; 2282 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
2281 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; 2283 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 30616cd0140d..706d7f21ac5c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -969,7 +969,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
969void mlx4_cleanup_qp_table(struct mlx4_dev *dev); 969void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
970void mlx4_cleanup_srq_table(struct mlx4_dev *dev); 970void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
971void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); 971void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
972int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp); 972int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
973void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); 973void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
974int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); 974int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
975void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); 975void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
@@ -977,7 +977,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
977void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); 977void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
978int __mlx4_mpt_reserve(struct mlx4_dev *dev); 978int __mlx4_mpt_reserve(struct mlx4_dev *dev);
979void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); 979void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
980int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp); 980int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
981void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); 981void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
982u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); 982u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
983void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); 983void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index ce852ca22a96..24282cd017d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -479,14 +479,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
479 __mlx4_mpt_release(dev, index); 479 __mlx4_mpt_release(dev, index);
480} 480}
481 481
482int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) 482int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
483{ 483{
484 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 484 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
485 485
486 return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp); 486 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
487} 487}
488 488
489static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) 489static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
490{ 490{
491 u64 param = 0; 491 u64 param = 0;
492 492
@@ -497,7 +497,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
497 MLX4_CMD_TIME_CLASS_A, 497 MLX4_CMD_TIME_CLASS_A,
498 MLX4_CMD_WRAPPED); 498 MLX4_CMD_WRAPPED);
499 } 499 }
500 return __mlx4_mpt_alloc_icm(dev, index, gfp); 500 return __mlx4_mpt_alloc_icm(dev, index);
501} 501}
502 502
503void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 503void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
@@ -629,7 +629,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
629 struct mlx4_mpt_entry *mpt_entry; 629 struct mlx4_mpt_entry *mpt_entry;
630 int err; 630 int err;
631 631
632 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL); 632 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
633 if (err) 633 if (err)
634 return err; 634 return err;
635 635
@@ -787,14 +787,13 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
787EXPORT_SYMBOL_GPL(mlx4_write_mtt); 787EXPORT_SYMBOL_GPL(mlx4_write_mtt);
788 788
789int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 789int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
790 struct mlx4_buf *buf, gfp_t gfp) 790 struct mlx4_buf *buf)
791{ 791{
792 u64 *page_list; 792 u64 *page_list;
793 int err; 793 int err;
794 int i; 794 int i;
795 795
796 page_list = kmalloc(buf->npages * sizeof *page_list, 796 page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL);
797 gfp);
798 if (!page_list) 797 if (!page_list)
799 return -ENOMEM; 798 return -ENOMEM;
800 799
@@ -841,7 +840,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
841 struct mlx4_mpt_entry *mpt_entry; 840 struct mlx4_mpt_entry *mpt_entry;
842 int err; 841 int err;
843 842
844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL); 843 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
845 if (err) 844 if (err)
846 return err; 845 return err;
847 846
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 5a310d313e94..26747212526b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -301,29 +301,29 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
301} 301}
302EXPORT_SYMBOL_GPL(mlx4_qp_release_range); 302EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
303 303
304int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) 304int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
305{ 305{
306 struct mlx4_priv *priv = mlx4_priv(dev); 306 struct mlx4_priv *priv = mlx4_priv(dev);
307 struct mlx4_qp_table *qp_table = &priv->qp_table; 307 struct mlx4_qp_table *qp_table = &priv->qp_table;
308 int err; 308 int err;
309 309
310 err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); 310 err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
311 if (err) 311 if (err)
312 goto err_out; 312 goto err_out;
313 313
314 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); 314 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
315 if (err) 315 if (err)
316 goto err_put_qp; 316 goto err_put_qp;
317 317
318 err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp); 318 err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
319 if (err) 319 if (err)
320 goto err_put_auxc; 320 goto err_put_auxc;
321 321
322 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp); 322 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
323 if (err) 323 if (err)
324 goto err_put_altc; 324 goto err_put_altc;
325 325
326 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp); 326 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
327 if (err) 327 if (err)
328 goto err_put_rdmarc; 328 goto err_put_rdmarc;
329 329
@@ -345,7 +345,7 @@ err_out:
345 return err; 345 return err;
346} 346}
347 347
348static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) 348static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
349{ 349{
350 u64 param = 0; 350 u64 param = 0;
351 351
@@ -355,7 +355,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
355 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, 355 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
356 MLX4_CMD_WRAPPED); 356 MLX4_CMD_WRAPPED);
357 } 357 }
358 return __mlx4_qp_alloc_icm(dev, qpn, gfp); 358 return __mlx4_qp_alloc_icm(dev, qpn);
359} 359}
360 360
361void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 361void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
@@ -397,7 +397,7 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
397 return qp; 397 return qp;
398} 398}
399 399
400int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 400int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
401{ 401{
402 struct mlx4_priv *priv = mlx4_priv(dev); 402 struct mlx4_priv *priv = mlx4_priv(dev);
403 struct mlx4_qp_table *qp_table = &priv->qp_table; 403 struct mlx4_qp_table *qp_table = &priv->qp_table;
@@ -408,7 +408,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
408 408
409 qp->qpn = qpn; 409 qp->qpn = qpn;
410 410
411 err = mlx4_qp_alloc_icm(dev, qpn, gfp); 411 err = mlx4_qp_alloc_icm(dev, qpn);
412 if (err) 412 if (err)
413 return err; 413 return err;
414 414
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 812783865205..215e21c3dc8a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1822,7 +1822,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1822 return err; 1822 return err;
1823 1823
1824 if (!fw_reserved(dev, qpn)) { 1824 if (!fw_reserved(dev, qpn)) {
1825 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); 1825 err = __mlx4_qp_alloc_icm(dev, qpn);
1826 if (err) { 1826 if (err) {
1827 res_abort_move(dev, slave, RES_QP, qpn); 1827 res_abort_move(dev, slave, RES_QP, qpn);
1828 return err; 1828 return err;
@@ -1909,7 +1909,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1909 if (err) 1909 if (err)
1910 return err; 1910 return err;
1911 1911
1912 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); 1912 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1913 if (err) { 1913 if (err) {
1914 res_abort_move(dev, slave, RES_MPT, id); 1914 res_abort_move(dev, slave, RES_MPT, id);
1915 return err; 1915 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index f44d089e2ca6..bedf52126824 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -100,11 +100,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
100 if (*srqn == -1) 100 if (*srqn == -1)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
103 err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL); 103 err = mlx4_table_get(dev, &srq_table->table, *srqn);
104 if (err) 104 if (err)
105 goto err_out; 105 goto err_out;
106 106
107 err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL); 107 err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
108 if (err) 108 if (err)
109 goto err_put; 109 goto err_put;
110 return 0; 110 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index f5a2c605749f..31cbe5e86a01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -786,6 +786,10 @@ static void cb_timeout_handler(struct work_struct *work)
786 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 786 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
787} 787}
788 788
789static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
790static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
791 struct mlx5_cmd_msg *msg);
792
789static void cmd_work_handler(struct work_struct *work) 793static void cmd_work_handler(struct work_struct *work)
790{ 794{
791 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 795 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -796,17 +800,28 @@ static void cmd_work_handler(struct work_struct *work)
796 struct semaphore *sem; 800 struct semaphore *sem;
797 unsigned long flags; 801 unsigned long flags;
798 bool poll_cmd = ent->polling; 802 bool poll_cmd = ent->polling;
803 int alloc_ret;
799 804
800 805
801 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 806 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
802 down(sem); 807 down(sem);
803 if (!ent->page_queue) { 808 if (!ent->page_queue) {
804 ent->idx = alloc_ent(cmd); 809 alloc_ret = alloc_ent(cmd);
805 if (ent->idx < 0) { 810 if (alloc_ret < 0) {
806 mlx5_core_err(dev, "failed to allocate command entry\n"); 811 mlx5_core_err(dev, "failed to allocate command entry\n");
812 if (ent->callback) {
813 ent->callback(-EAGAIN, ent->context);
814 mlx5_free_cmd_msg(dev, ent->out);
815 free_msg(dev, ent->in);
816 free_cmd(ent);
817 } else {
818 ent->ret = -EAGAIN;
819 complete(&ent->done);
820 }
807 up(sem); 821 up(sem);
808 return; 822 return;
809 } 823 }
824 ent->idx = alloc_ret;
810 } else { 825 } else {
811 ent->idx = cmd->max_reg_cmds; 826 ent->idx = cmd->max_reg_cmds;
812 spin_lock_irqsave(&cmd->alloc_lock, flags); 827 spin_lock_irqsave(&cmd->alloc_lock, flags);
@@ -967,7 +982,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
967 982
968 err = wait_func(dev, ent); 983 err = wait_func(dev, ent);
969 if (err == -ETIMEDOUT) 984 if (err == -ETIMEDOUT)
970 goto out_free; 985 goto out;
971 986
972 ds = ent->ts2 - ent->ts1; 987 ds = ent->ts2 - ent->ts1;
973 op = MLX5_GET(mbox_in, in->first.data, opcode); 988 op = MLX5_GET(mbox_in, in->first.data, opcode);
@@ -1430,6 +1445,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1430 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", 1445 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n",
1431 ent->idx); 1446 ent->idx);
1432 free_ent(cmd, ent->idx); 1447 free_ent(cmd, ent->idx);
1448 free_cmd(ent);
1433 } 1449 }
1434 continue; 1450 continue;
1435 } 1451 }
@@ -1488,7 +1504,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
1488 free_msg(dev, ent->in); 1504 free_msg(dev, ent->in);
1489 1505
1490 err = err ? err : ent->status; 1506 err = err ? err : ent->status;
1491 free_cmd(ent); 1507 if (!forced)
1508 free_cmd(ent);
1492 callback(err, context); 1509 callback(err, context);
1493 } else { 1510 } else {
1494 complete(&ent->done); 1511 complete(&ent->done);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index e1b7ddfecd01..0039b4725405 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -266,6 +266,14 @@ struct mlx5e_dcbx {
266}; 266};
267#endif 267#endif
268 268
269#define MAX_PIN_NUM 8
270struct mlx5e_pps {
271 u8 pin_caps[MAX_PIN_NUM];
272 struct work_struct out_work;
273 u64 start[MAX_PIN_NUM];
274 u8 enabled;
275};
276
269struct mlx5e_tstamp { 277struct mlx5e_tstamp {
270 rwlock_t lock; 278 rwlock_t lock;
271 struct cyclecounter cycles; 279 struct cyclecounter cycles;
@@ -277,7 +285,7 @@ struct mlx5e_tstamp {
277 struct mlx5_core_dev *mdev; 285 struct mlx5_core_dev *mdev;
278 struct ptp_clock *ptp; 286 struct ptp_clock *ptp;
279 struct ptp_clock_info ptp_info; 287 struct ptp_clock_info ptp_info;
280 u8 *pps_pin_caps; 288 struct mlx5e_pps pps_info;
281}; 289};
282 290
283enum { 291enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 66f432385dbb..84dd63e74041 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -53,6 +53,15 @@ enum {
53 MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, 53 MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
54}; 54};
55 55
56enum {
57 MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
58 MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
59 MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
60 MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
61 MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
62 MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
63};
64
56void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, 65void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
57 struct skb_shared_hwtstamps *hwts) 66 struct skb_shared_hwtstamps *hwts)
58{ 67{
@@ -73,17 +82,46 @@ static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
73 return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; 82 return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
74} 83}
75 84
85static void mlx5e_pps_out(struct work_struct *work)
86{
87 struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
88 out_work);
89 struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
90 pps_info);
91 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
92 unsigned long flags;
93 int i;
94
95 for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
96 u64 tstart;
97
98 write_lock_irqsave(&tstamp->lock, flags);
99 tstart = tstamp->pps_info.start[i];
100 tstamp->pps_info.start[i] = 0;
101 write_unlock_irqrestore(&tstamp->lock, flags);
102 if (!tstart)
103 continue;
104
105 MLX5_SET(mtpps_reg, in, pin, i);
106 MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
107 MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
108 mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
109 }
110}
111
76static void mlx5e_timestamp_overflow(struct work_struct *work) 112static void mlx5e_timestamp_overflow(struct work_struct *work)
77{ 113{
78 struct delayed_work *dwork = to_delayed_work(work); 114 struct delayed_work *dwork = to_delayed_work(work);
79 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, 115 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
80 overflow_work); 116 overflow_work);
117 struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
81 unsigned long flags; 118 unsigned long flags;
82 119
83 write_lock_irqsave(&tstamp->lock, flags); 120 write_lock_irqsave(&tstamp->lock, flags);
84 timecounter_read(&tstamp->clock); 121 timecounter_read(&tstamp->clock);
85 write_unlock_irqrestore(&tstamp->lock, flags); 122 write_unlock_irqrestore(&tstamp->lock, flags);
86 schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); 123 queue_delayed_work(priv->wq, &tstamp->overflow_work,
124 msecs_to_jiffies(tstamp->overflow_period * 1000));
87} 125}
88 126
89int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) 127int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -213,18 +251,6 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
213 int neg_adj = 0; 251 int neg_adj = 0;
214 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 252 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
215 ptp_info); 253 ptp_info);
216 struct mlx5e_priv *priv =
217 container_of(tstamp, struct mlx5e_priv, tstamp);
218
219 if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
220 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
221
222 /* For future use need to add a loop for finding all 1PPS out pins */
223 MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
224 MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
225
226 mlx5_set_mtpps(priv->mdev, in, sizeof(in));
227 }
228 254
229 if (delta < 0) { 255 if (delta < 0) {
230 neg_adj = 1; 256 neg_adj = 1;
@@ -253,12 +279,13 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
253 struct mlx5e_priv *priv = 279 struct mlx5e_priv *priv =
254 container_of(tstamp, struct mlx5e_priv, tstamp); 280 container_of(tstamp, struct mlx5e_priv, tstamp);
255 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 281 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
282 u32 field_select = 0;
283 u8 pin_mode = 0;
256 u8 pattern = 0; 284 u8 pattern = 0;
257 int pin = -1; 285 int pin = -1;
258 int err = 0; 286 int err = 0;
259 287
260 if (!MLX5_CAP_GEN(priv->mdev, pps) || 288 if (!MLX5_PPS_CAP(priv->mdev))
261 !MLX5_CAP_GEN(priv->mdev, pps_modify))
262 return -EOPNOTSUPP; 289 return -EOPNOTSUPP;
263 290
264 if (rq->extts.index >= tstamp->ptp_info.n_pins) 291 if (rq->extts.index >= tstamp->ptp_info.n_pins)
@@ -268,15 +295,21 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
268 pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); 295 pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
269 if (pin < 0) 296 if (pin < 0)
270 return -EBUSY; 297 return -EBUSY;
298 pin_mode = MLX5E_PIN_MODE_IN;
299 pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
300 field_select = MLX5E_MTPPS_FS_PIN_MODE |
301 MLX5E_MTPPS_FS_PATTERN |
302 MLX5E_MTPPS_FS_ENABLE;
303 } else {
304 pin = rq->extts.index;
305 field_select = MLX5E_MTPPS_FS_ENABLE;
271 } 306 }
272 307
273 if (rq->extts.flags & PTP_FALLING_EDGE)
274 pattern = 1;
275
276 MLX5_SET(mtpps_reg, in, pin, pin); 308 MLX5_SET(mtpps_reg, in, pin, pin);
277 MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN); 309 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
278 MLX5_SET(mtpps_reg, in, pattern, pattern); 310 MLX5_SET(mtpps_reg, in, pattern, pattern);
279 MLX5_SET(mtpps_reg, in, enable, on); 311 MLX5_SET(mtpps_reg, in, enable, on);
312 MLX5_SET(mtpps_reg, in, field_select, field_select);
280 313
281 err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); 314 err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
282 if (err) 315 if (err)
@@ -295,14 +328,18 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
295 struct mlx5e_priv *priv = 328 struct mlx5e_priv *priv =
296 container_of(tstamp, struct mlx5e_priv, tstamp); 329 container_of(tstamp, struct mlx5e_priv, tstamp);
297 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 330 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
298 u64 nsec_now, nsec_delta, time_stamp; 331 u64 nsec_now, nsec_delta, time_stamp = 0;
299 u64 cycles_now, cycles_delta; 332 u64 cycles_now, cycles_delta;
300 struct timespec64 ts; 333 struct timespec64 ts;
301 unsigned long flags; 334 unsigned long flags;
335 u32 field_select = 0;
336 u8 pin_mode = 0;
337 u8 pattern = 0;
302 int pin = -1; 338 int pin = -1;
339 int err = 0;
303 s64 ns; 340 s64 ns;
304 341
305 if (!MLX5_CAP_GEN(priv->mdev, pps_modify)) 342 if (!MLX5_PPS_CAP(priv->mdev))
306 return -EOPNOTSUPP; 343 return -EOPNOTSUPP;
307 344
308 if (rq->perout.index >= tstamp->ptp_info.n_pins) 345 if (rq->perout.index >= tstamp->ptp_info.n_pins)
@@ -313,32 +350,60 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
313 rq->perout.index); 350 rq->perout.index);
314 if (pin < 0) 351 if (pin < 0)
315 return -EBUSY; 352 return -EBUSY;
316 }
317 353
318 ts.tv_sec = rq->perout.period.sec; 354 pin_mode = MLX5E_PIN_MODE_OUT;
319 ts.tv_nsec = rq->perout.period.nsec; 355 pattern = MLX5E_OUT_PATTERN_PERIODIC;
320 ns = timespec64_to_ns(&ts); 356 ts.tv_sec = rq->perout.period.sec;
321 if (on) 357 ts.tv_nsec = rq->perout.period.nsec;
358 ns = timespec64_to_ns(&ts);
359
322 if ((ns >> 1) != 500000000LL) 360 if ((ns >> 1) != 500000000LL)
323 return -EINVAL; 361 return -EINVAL;
324 ts.tv_sec = rq->perout.start.sec; 362
325 ts.tv_nsec = rq->perout.start.nsec; 363 ts.tv_sec = rq->perout.start.sec;
326 ns = timespec64_to_ns(&ts); 364 ts.tv_nsec = rq->perout.start.nsec;
327 cycles_now = mlx5_read_internal_timer(tstamp->mdev); 365 ns = timespec64_to_ns(&ts);
328 write_lock_irqsave(&tstamp->lock, flags); 366 cycles_now = mlx5_read_internal_timer(tstamp->mdev);
329 nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); 367 write_lock_irqsave(&tstamp->lock, flags);
330 nsec_delta = ns - nsec_now; 368 nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
331 cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, 369 nsec_delta = ns - nsec_now;
332 tstamp->cycles.mult); 370 cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
333 write_unlock_irqrestore(&tstamp->lock, flags); 371 tstamp->cycles.mult);
334 time_stamp = cycles_now + cycles_delta; 372 write_unlock_irqrestore(&tstamp->lock, flags);
373 time_stamp = cycles_now + cycles_delta;
374 field_select = MLX5E_MTPPS_FS_PIN_MODE |
375 MLX5E_MTPPS_FS_PATTERN |
376 MLX5E_MTPPS_FS_ENABLE |
377 MLX5E_MTPPS_FS_TIME_STAMP;
378 } else {
379 pin = rq->perout.index;
380 field_select = MLX5E_MTPPS_FS_ENABLE;
381 }
382
335 MLX5_SET(mtpps_reg, in, pin, pin); 383 MLX5_SET(mtpps_reg, in, pin, pin);
336 MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT); 384 MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
337 MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC); 385 MLX5_SET(mtpps_reg, in, pattern, pattern);
338 MLX5_SET(mtpps_reg, in, enable, on); 386 MLX5_SET(mtpps_reg, in, enable, on);
339 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); 387 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
388 MLX5_SET(mtpps_reg, in, field_select, field_select);
389
390 err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
391 if (err)
392 return err;
340 393
341 return mlx5_set_mtpps(priv->mdev, in, sizeof(in)); 394 return mlx5_set_mtppse(priv->mdev, pin, 0,
395 MLX5E_EVENT_MODE_REPETETIVE & on);
396}
397
398static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
399 struct ptp_clock_request *rq,
400 int on)
401{
402 struct mlx5e_tstamp *tstamp =
403 container_of(ptp, struct mlx5e_tstamp, ptp_info);
404
405 tstamp->pps_info.enabled = !!on;
406 return 0;
342} 407}
343 408
344static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, 409static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
@@ -350,6 +415,8 @@ static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
350 return mlx5e_extts_configure(ptp, rq, on); 415 return mlx5e_extts_configure(ptp, rq, on);
351 case PTP_CLK_REQ_PEROUT: 416 case PTP_CLK_REQ_PEROUT:
352 return mlx5e_perout_configure(ptp, rq, on); 417 return mlx5e_perout_configure(ptp, rq, on);
418 case PTP_CLK_REQ_PPS:
419 return mlx5e_pps_configure(ptp, rq, on);
353 default: 420 default:
354 return -EOPNOTSUPP; 421 return -EOPNOTSUPP;
355 } 422 }
@@ -395,6 +462,7 @@ static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
395 return -ENOMEM; 462 return -ENOMEM;
396 tstamp->ptp_info.enable = mlx5e_ptp_enable; 463 tstamp->ptp_info.enable = mlx5e_ptp_enable;
397 tstamp->ptp_info.verify = mlx5e_ptp_verify; 464 tstamp->ptp_info.verify = mlx5e_ptp_verify;
465 tstamp->ptp_info.pps = 1;
398 466
399 for (i = 0; i < tstamp->ptp_info.n_pins; i++) { 467 for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
400 snprintf(tstamp->ptp_info.pin_config[i].name, 468 snprintf(tstamp->ptp_info.pin_config[i].name,
@@ -422,22 +490,56 @@ static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
422 tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, 490 tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
423 cap_max_num_of_pps_out_pins); 491 cap_max_num_of_pps_out_pins);
424 492
425 tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); 493 tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
426 tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); 494 tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
427 tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); 495 tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
428 tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); 496 tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
429 tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); 497 tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
430 tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); 498 tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
431 tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); 499 tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
432 tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); 500 tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
433} 501}
434 502
435void mlx5e_pps_event_handler(struct mlx5e_priv *priv, 503void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
436 struct ptp_clock_event *event) 504 struct ptp_clock_event *event)
437{ 505{
506 struct net_device *netdev = priv->netdev;
438 struct mlx5e_tstamp *tstamp = &priv->tstamp; 507 struct mlx5e_tstamp *tstamp = &priv->tstamp;
508 struct timespec64 ts;
509 u64 nsec_now, nsec_delta;
510 u64 cycles_now, cycles_delta;
511 int pin = event->index;
512 s64 ns;
513 unsigned long flags;
439 514
440 ptp_clock_event(tstamp->ptp, event); 515 switch (tstamp->ptp_info.pin_config[pin].func) {
516 case PTP_PF_EXTTS:
517 if (tstamp->pps_info.enabled) {
518 event->type = PTP_CLOCK_PPSUSR;
519 event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
520 } else {
521 event->type = PTP_CLOCK_EXTTS;
522 }
523 ptp_clock_event(tstamp->ptp, event);
524 break;
525 case PTP_PF_PEROUT:
526 mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
527 cycles_now = mlx5_read_internal_timer(tstamp->mdev);
528 ts.tv_sec += 1;
529 ts.tv_nsec = 0;
530 ns = timespec64_to_ns(&ts);
531 write_lock_irqsave(&tstamp->lock, flags);
532 nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
533 nsec_delta = ns - nsec_now;
534 cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
535 tstamp->cycles.mult);
536 tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
537 queue_work(priv->wq, &tstamp->pps_info.out_work);
538 write_unlock_irqrestore(&tstamp->lock, flags);
539 break;
540 default:
541 netdev_err(netdev, "%s: Unhandled event\n", __func__);
542 }
441} 543}
442 544
443void mlx5e_timestamp_init(struct mlx5e_priv *priv) 545void mlx5e_timestamp_init(struct mlx5e_priv *priv)
@@ -473,9 +575,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
473 do_div(ns, NSEC_PER_SEC / 2 / HZ); 575 do_div(ns, NSEC_PER_SEC / 2 / HZ);
474 tstamp->overflow_period = ns; 576 tstamp->overflow_period = ns;
475 577
578 INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
476 INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); 579 INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
477 if (tstamp->overflow_period) 580 if (tstamp->overflow_period)
478 schedule_delayed_work(&tstamp->overflow_work, 0); 581 queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
479 else 582 else
480 mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); 583 mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
481 584
@@ -484,16 +587,10 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
484 snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); 587 snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
485 588
486 /* Initialize 1PPS data structures */ 589 /* Initialize 1PPS data structures */
487#define MAX_PIN_NUM 8 590 if (MLX5_PPS_CAP(priv->mdev))
488 tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL); 591 mlx5e_get_pps_caps(priv, tstamp);
489 if (tstamp->pps_pin_caps) { 592 if (tstamp->ptp_info.n_pins)
490 if (MLX5_CAP_GEN(priv->mdev, pps)) 593 mlx5e_init_pin_config(tstamp);
491 mlx5e_get_pps_caps(priv, tstamp);
492 if (tstamp->ptp_info.n_pins)
493 mlx5e_init_pin_config(tstamp);
494 } else {
495 mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
496 }
497 594
498 tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, 595 tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
499 &priv->mdev->pdev->dev); 596 &priv->mdev->pdev->dev);
@@ -516,8 +613,7 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
516 priv->tstamp.ptp = NULL; 613 priv->tstamp.ptp = NULL;
517 } 614 }
518 615
519 kfree(tstamp->pps_pin_caps); 616 cancel_work_sync(&tstamp->pps_info.out_work);
520 kfree(tstamp->ptp_info.pin_config);
521
522 cancel_delayed_work_sync(&tstamp->overflow_work); 617 cancel_delayed_work_sync(&tstamp->overflow_work);
618 kfree(tstamp->ptp_info.pin_config);
523} 619}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index bdd82c9b3992..eafc59280ada 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
276 276
277static bool outer_header_zero(u32 *match_criteria) 277static bool outer_header_zero(u32 *match_criteria)
278{ 278{
279 int size = MLX5_ST_SZ_BYTES(fte_match_param); 279 int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
280 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, 280 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
281 outer_headers); 281 outer_headers);
282 282
@@ -320,7 +320,7 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
320 320
321 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); 321 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
322 flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; 322 flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
323 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1); 323 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
324 if (IS_ERR(rule)) { 324 if (IS_ERR(rule)) {
325 err = PTR_ERR(rule); 325 err = PTR_ERR(rule);
326 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", 326 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1eac5003084f..57f31fa478ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -377,7 +377,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
377 break; 377 break;
378 case MLX5_DEV_EVENT_PPS: 378 case MLX5_DEV_EVENT_PPS:
379 eqe = (struct mlx5_eqe *)param; 379 eqe = (struct mlx5_eqe *)param;
380 ptp_event.type = PTP_CLOCK_EXTTS;
381 ptp_event.index = eqe->data.pps.pin; 380 ptp_event.index = eqe->data.pps.pin;
382 ptp_event.timestamp = 381 ptp_event.timestamp =
383 timecounter_cyc2time(&priv->tstamp.clock, 382 timecounter_cyc2time(&priv->tstamp.clock,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index af51a5d2b912..52b9a64cd3a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -698,7 +698,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
698 else 698 else
699 mlx5_core_dbg(dev, "port_module_event is not set\n"); 699 mlx5_core_dbg(dev, "port_module_event is not set\n");
700 700
701 if (MLX5_CAP_GEN(dev, pps)) 701 if (MLX5_PPS_CAP(dev))
702 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); 702 async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
703 703
704 if (MLX5_CAP_GEN(dev, fpga)) 704 if (MLX5_CAP_GEN(dev, fpga))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 89bfda419efe..8b18cc9ec026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1668,7 +1668,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1668 int i; 1668 int i;
1669 1669
1670 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || 1670 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1671 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1671 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH ||
1672 esw->mode == SRIOV_NONE)
1672 return; 1673 return;
1673 1674
1674 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", 1675 esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 1ee5bce85901..85298051a3e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -178,8 +178,6 @@ out:
178 178
179static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) 179static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
180{ 180{
181 mlx5_fs_remove_rx_underlay_qpn(mdev, qp->qpn);
182
183 mlx5_core_destroy_qp(mdev, qp); 181 mlx5_core_destroy_qp(mdev, qp);
184} 182}
185 183
@@ -194,8 +192,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
194 return err; 192 return err;
195 } 193 }
196 194
197 mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
198
199 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); 195 err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
200 if (err) { 196 if (err) {
201 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 197 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
@@ -253,6 +249,7 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
253 249
254static int mlx5i_init_rx(struct mlx5e_priv *priv) 250static int mlx5i_init_rx(struct mlx5e_priv *priv)
255{ 251{
252 struct mlx5i_priv *ipriv = priv->ppriv;
256 int err; 253 int err;
257 254
258 err = mlx5e_create_indirect_rqt(priv); 255 err = mlx5e_create_indirect_rqt(priv);
@@ -271,12 +268,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
271 if (err) 268 if (err)
272 goto err_destroy_indirect_tirs; 269 goto err_destroy_indirect_tirs;
273 270
274 err = mlx5i_create_flow_steering(priv); 271 err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
275 if (err) 272 if (err)
276 goto err_destroy_direct_tirs; 273 goto err_destroy_direct_tirs;
277 274
275 err = mlx5i_create_flow_steering(priv);
276 if (err)
277 goto err_remove_rx_underlay_qpn;
278
278 return 0; 279 return 0;
279 280
281err_remove_rx_underlay_qpn:
282 mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
280err_destroy_direct_tirs: 283err_destroy_direct_tirs:
281 mlx5e_destroy_direct_tirs(priv); 284 mlx5e_destroy_direct_tirs(priv);
282err_destroy_indirect_tirs: 285err_destroy_indirect_tirs:
@@ -290,6 +293,9 @@ err_destroy_indirect_rqts:
290 293
291static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) 294static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
292{ 295{
296 struct mlx5i_priv *ipriv = priv->ppriv;
297
298 mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
293 mlx5i_destroy_flow_steering(priv); 299 mlx5i_destroy_flow_steering(priv);
294 mlx5e_destroy_direct_tirs(priv); 300 mlx5e_destroy_direct_tirs(priv);
295 mlx5e_destroy_indirect_tirs(priv); 301 mlx5e_destroy_indirect_tirs(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index a3a836bdcfd2..f26f97fe4666 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -162,22 +162,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
162static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, 162static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
163 u8 *port1, u8 *port2) 163 u8 *port1, u8 *port2)
164{ 164{
165 if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { 165 *port1 = 1;
166 if (tracker->netdev_state[0].tx_enabled) { 166 *port2 = 2;
167 *port1 = 1; 167 if (!tracker->netdev_state[0].tx_enabled ||
168 *port2 = 1; 168 !tracker->netdev_state[0].link_up) {
169 } else { 169 *port1 = 2;
170 *port1 = 2; 170 return;
171 *port2 = 2;
172 }
173 } else {
174 *port1 = 1;
175 *port2 = 2;
176 if (!tracker->netdev_state[0].link_up)
177 *port1 = 2;
178 else if (!tracker->netdev_state[1].link_up)
179 *port2 = 1;
180 } 171 }
172
173 if (!tracker->netdev_state[1].tx_enabled ||
174 !tracker->netdev_state[1].link_up)
175 *port2 = 1;
181} 176}
182 177
183static void mlx5_activate_lag(struct mlx5_lag *ldev, 178static void mlx5_activate_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 6a3d6bef7dd4..6a263e8d883a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -154,6 +154,11 @@ int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
154int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); 154int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
155int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); 155int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
156 156
157#define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
158 MLX5_CAP_GEN((mdev), pps_modify) && \
159 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
160 MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj))
161
157int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw); 162int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
158 163
159void mlx5e_init(void); 164void mlx5e_init(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index bcdf7779c48d..bf99d40e30b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -88,7 +88,11 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
88 int vf; 88 int vf;
89 89
90 if (!sriov->enabled_vfs) 90 if (!sriov->enabled_vfs)
91#ifdef CONFIG_MLX5_CORE_EN
92 goto disable_sriov_resources;
93#else
91 return; 94 return;
95#endif
92 96
93 for (vf = 0; vf < sriov->num_vfs; vf++) { 97 for (vf = 0; vf < sriov->num_vfs; vf++) {
94 if (!sriov->vfs_ctx[vf].enabled) 98 if (!sriov->vfs_ctx[vf].enabled)
@@ -103,6 +107,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
103 } 107 }
104 108
105#ifdef CONFIG_MLX5_CORE_EN 109#ifdef CONFIG_MLX5_CORE_EN
110disable_sriov_resources:
106 mlx5_eswitch_disable_sriov(dev->priv.eswitch); 111 mlx5_eswitch_disable_sriov(dev->priv.eswitch);
107#endif 112#endif
108 113
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 383fef5a8e24..4b2e0fd7d51e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1512,6 +1512,10 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1512static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, 1512static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1513 struct mlxsw_sp_fib_entry *fib_entry); 1513 struct mlxsw_sp_fib_entry *fib_entry);
1514 1514
1515static bool
1516mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
1517 const struct mlxsw_sp_fib_entry *fib_entry);
1518
1515static int 1519static int
1516mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, 1520mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1517 struct mlxsw_sp_nexthop_group *nh_grp) 1521 struct mlxsw_sp_nexthop_group *nh_grp)
@@ -1520,6 +1524,9 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1520 int err; 1524 int err;
1521 1525
1522 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { 1526 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1527 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
1528 fib_entry))
1529 continue;
1523 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); 1530 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1524 if (err) 1531 if (err)
1525 return err; 1532 return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 656b2d3f1bee..5eb1606765c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -626,8 +626,8 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
626 626
627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
628 orig_dev); 628 orig_dev);
629 if (WARN_ON(!bridge_port)) 629 if (!bridge_port)
630 return -EINVAL; 630 return 0;
631 631
632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, 632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
633 MLXSW_SP_FLOOD_TYPE_UC, 633 MLXSW_SP_FLOOD_TYPE_UC,
@@ -711,8 +711,8 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
711 711
712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge, 712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
713 orig_dev); 713 orig_dev);
714 if (WARN_ON(!bridge_port)) 714 if (!bridge_port)
715 return -EINVAL; 715 return 0;
716 716
717 if (!bridge_port->bridge_device->multicast_enabled) 717 if (!bridge_port->bridge_device->multicast_enabled)
718 return 0; 718 return 0;
@@ -1283,15 +1283,15 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1283 return 0; 1283 return 0;
1284 1284
1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1286 if (WARN_ON(!bridge_port)) 1286 if (!bridge_port)
1287 return -EINVAL; 1287 return 0;
1288 1288
1289 bridge_device = bridge_port->bridge_device; 1289 bridge_device = bridge_port->bridge_device;
1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1291 bridge_device, 1291 bridge_device,
1292 mdb->vid); 1292 mdb->vid);
1293 if (WARN_ON(!mlxsw_sp_port_vlan)) 1293 if (!mlxsw_sp_port_vlan)
1294 return -EINVAL; 1294 return 0;
1295 1295
1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1297 1297
@@ -1407,15 +1407,15 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1407 int err = 0; 1407 int err = 0;
1408 1408
1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1410 if (WARN_ON(!bridge_port)) 1410 if (!bridge_port)
1411 return -EINVAL; 1411 return 0;
1412 1412
1413 bridge_device = bridge_port->bridge_device; 1413 bridge_device = bridge_port->bridge_device;
1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port, 1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1415 bridge_device, 1415 bridge_device,
1416 mdb->vid); 1416 mdb->vid);
1417 if (WARN_ON(!mlxsw_sp_port_vlan)) 1417 if (!mlxsw_sp_port_vlan)
1418 return -EINVAL; 1418 return 0;
1419 1419
1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); 1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1421 1421
@@ -1974,6 +1974,17 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1974 1974
1975} 1975}
1976 1976
1977static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp)
1978{
1979 struct mlxsw_sp_mid *mid, *tmp;
1980
1981 list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) {
1982 list_del(&mid->list);
1983 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1984 kfree(mid);
1985 }
1986}
1987
1977int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) 1988int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1978{ 1989{
1979 struct mlxsw_sp_bridge *bridge; 1990 struct mlxsw_sp_bridge *bridge;
@@ -1996,7 +2007,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1996void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) 2007void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1997{ 2008{
1998 mlxsw_sp_fdb_fini(mlxsw_sp); 2009 mlxsw_sp_fdb_fini(mlxsw_sp);
1999 WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list)); 2010 mlxsw_sp_mids_fini(mlxsw_sp);
2000 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); 2011 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
2001 kfree(mlxsw_sp->bridge); 2012 kfree(mlxsw_sp->bridge);
2002} 2013}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index dd7fa9cf225f..b0837b58c3a1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -115,14 +115,10 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
115 return; 115 return;
116 } 116 }
117 117
118 if (link) { 118 if (link)
119 netif_carrier_on(netdev); 119 netif_carrier_on(netdev);
120 rtnl_lock(); 120 else
121 dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
122 rtnl_unlock();
123 } else {
124 netif_carrier_off(netdev); 121 netif_carrier_off(netdev);
125 }
126 rcu_read_unlock(); 122 rcu_read_unlock();
127} 123}
128 124
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18750ff0ede6..9f77ce038a4a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -513,6 +513,7 @@ nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
513 tx_ring->idx = idx; 513 tx_ring->idx = idx;
514 tx_ring->r_vec = r_vec; 514 tx_ring->r_vec = r_vec;
515 tx_ring->is_xdp = is_xdp; 515 tx_ring->is_xdp = is_xdp;
516 u64_stats_init(&tx_ring->r_vec->tx_sync);
516 517
517 tx_ring->qcidx = tx_ring->idx * nn->stride_tx; 518 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
518 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); 519 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
@@ -532,6 +533,7 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
532 533
533 rx_ring->idx = idx; 534 rx_ring->idx = idx;
534 rx_ring->r_vec = r_vec; 535 rx_ring->r_vec = r_vec;
536 u64_stats_init(&rx_ring->r_vec->rx_sync);
535 537
536 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; 538 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
537 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx); 539 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
@@ -906,8 +908,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
906 return NETDEV_TX_OK; 908 return NETDEV_TX_OK;
907 909
908err_unmap: 910err_unmap:
909 --f; 911 while (--f >= 0) {
910 while (f >= 0) {
911 frag = &skb_shinfo(skb)->frags[f]; 912 frag = &skb_shinfo(skb)->frags[f];
912 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr, 913 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
913 skb_frag_size(frag), DMA_TO_DEVICE); 914 skb_frag_size(frag), DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 66ff15d08bad..0a66389c06c2 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -2311,7 +2311,7 @@ netxen_md_rdqueue(struct netxen_adapter *adapter,
2311 loop_cnt++) { 2311 loop_cnt++) {
2312 NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id); 2312 NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id);
2313 read_addr = queueEntry->read_addr; 2313 read_addr = queueEntry->read_addr;
2314 for (k = 0; k < read_cnt; k--) { 2314 for (k = 0; k < read_cnt; k++) {
2315 NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, 2315 NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0,
2316 &read_value); 2316 &read_value);
2317 *data_buff++ = read_value; 2317 *data_buff++ = read_value;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9da91045d167..3eb241657368 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -253,7 +253,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); 253 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); 254 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); 255 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
256 if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) 256 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
257 goto err; 257 goto err;
258 258
259 return 0; 259 return 0;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 746d94e28470..60850bfa3d32 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -766,11 +766,13 @@ static void emac_shutdown(struct platform_device *pdev)
766 struct emac_adapter *adpt = netdev_priv(netdev); 766 struct emac_adapter *adpt = netdev_priv(netdev);
767 struct emac_sgmii *sgmii = &adpt->phy; 767 struct emac_sgmii *sgmii = &adpt->phy;
768 768
769 /* Closing the SGMII turns off its interrupts */ 769 if (netdev->flags & IFF_UP) {
770 sgmii->close(adpt); 770 /* Closing the SGMII turns off its interrupts */
771 sgmii->close(adpt);
771 772
772 /* Resetting the MAC turns off all DMA and its interrupts */ 773 /* Resetting the MAC turns off all DMA and its interrupts */
773 emac_mac_reset(adpt); 774 emac_mac_reset(adpt);
775 }
774} 776}
775 777
776static struct platform_driver emac_platform_driver = { 778static struct platform_driver emac_platform_driver = {
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index c905971c5f3a..990a63d7fcb7 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -938,7 +938,6 @@ enum efx_stats_action {
938static int efx_mcdi_mac_stats(struct efx_nic *efx, 938static int efx_mcdi_mac_stats(struct efx_nic *efx,
939 enum efx_stats_action action, int clear) 939 enum efx_stats_action action, int clear)
940{ 940{
941 struct efx_ef10_nic_data *nic_data = efx->nic_data;
942 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); 941 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
943 int rc; 942 int rc;
944 int change = action == EFX_STATS_PULL ? 0 : 1; 943 int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -960,7 +959,12 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
960 MAC_STATS_IN_PERIODIC_NOEVENT, 1, 959 MAC_STATS_IN_PERIODIC_NOEVENT, 1,
961 MAC_STATS_IN_PERIOD_MS, period); 960 MAC_STATS_IN_PERIOD_MS, period);
962 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 961 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
963 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id); 962
963 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
964 struct efx_ef10_nic_data *nic_data = efx->nic_data;
965
966 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
967 }
964 968
965 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 969 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
966 NULL, 0, NULL); 970 NULL, 0, NULL);
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index b607936e1b3e..9c0488e0f08e 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -90,17 +90,13 @@ struct ioc3_private {
90 spinlock_t ioc3_lock; 90 spinlock_t ioc3_lock;
91 struct mii_if_info mii; 91 struct mii_if_info mii;
92 92
93 struct net_device *dev;
93 struct pci_dev *pdev; 94 struct pci_dev *pdev;
94 95
95 /* Members used by autonegotiation */ 96 /* Members used by autonegotiation */
96 struct timer_list ioc3_timer; 97 struct timer_list ioc3_timer;
97}; 98};
98 99
99static inline struct net_device *priv_netdev(struct ioc3_private *dev)
100{
101 return (void *)dev - ((sizeof(struct net_device) + 31) & ~31);
102}
103
104static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 100static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
105static void ioc3_set_multicast_list(struct net_device *dev); 101static void ioc3_set_multicast_list(struct net_device *dev);
106static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); 102static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -427,7 +423,7 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
427 nic[i] = nic_read_byte(ioc3); 423 nic[i] = nic_read_byte(ioc3);
428 424
429 for (i = 2; i < 8; i++) 425 for (i = 2; i < 8; i++)
430 priv_netdev(ip)->dev_addr[i - 2] = nic[i]; 426 ip->dev->dev_addr[i - 2] = nic[i];
431} 427}
432 428
433/* 429/*
@@ -439,7 +435,7 @@ static void ioc3_get_eaddr(struct ioc3_private *ip)
439{ 435{
440 ioc3_get_eaddr_nic(ip); 436 ioc3_get_eaddr_nic(ip);
441 437
442 printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr); 438 printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
443} 439}
444 440
445static void __ioc3_set_mac_address(struct net_device *dev) 441static void __ioc3_set_mac_address(struct net_device *dev)
@@ -790,13 +786,12 @@ static void ioc3_timer(unsigned long data)
790 */ 786 */
791static int ioc3_mii_init(struct ioc3_private *ip) 787static int ioc3_mii_init(struct ioc3_private *ip)
792{ 788{
793 struct net_device *dev = priv_netdev(ip);
794 int i, found = 0, res = 0; 789 int i, found = 0, res = 0;
795 int ioc3_phy_workaround = 1; 790 int ioc3_phy_workaround = 1;
796 u16 word; 791 u16 word;
797 792
798 for (i = 0; i < 32; i++) { 793 for (i = 0; i < 32; i++) {
799 word = ioc3_mdio_read(dev, i, MII_PHYSID1); 794 word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1);
800 795
801 if (word != 0xffff && word != 0x0000) { 796 if (word != 0xffff && word != 0x0000) {
802 found = 1; 797 found = 1;
@@ -1276,6 +1271,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1276 SET_NETDEV_DEV(dev, &pdev->dev); 1271 SET_NETDEV_DEV(dev, &pdev->dev);
1277 1272
1278 ip = netdev_priv(dev); 1273 ip = netdev_priv(dev);
1274 ip->dev = dev;
1279 1275
1280 dev->irq = pdev->irq; 1276 dev->irq = pdev->irq;
1281 1277
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 22cf6353ba04..7ecf549c7f1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -205,7 +205,7 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
205{ 205{
206 int i; 206 int i;
207 207
208 for (i = 0; i < 23; i++) 208 for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
209 if ((i < 12) || (i > 17)) 209 if ((i < 12) || (i > 17))
210 reg_space[DMA_BUS_MODE / 4 + i] = 210 reg_space[DMA_BUS_MODE / 4 + i] =
211 readl(ioaddr + DMA_BUS_MODE + i * 4); 211 readl(ioaddr + DMA_BUS_MODE + i * 4);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index eef2f222ce9a..6502b9aa3bf5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -70,7 +70,7 @@ static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
70{ 70{
71 int i; 71 int i;
72 72
73 for (i = 0; i < 9; i++) 73 for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++)
74 reg_space[DMA_BUS_MODE / 4 + i] = 74 reg_space[DMA_BUS_MODE / 4 + i] =
75 readl(ioaddr + DMA_BUS_MODE + i * 4); 75 readl(ioaddr + DMA_BUS_MODE + i * 4);
76 76
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f233bf8b4ebb..c4407e8e39a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -117,7 +117,7 @@ static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
117 void __iomem *ioaddr = hw->pcsr; 117 void __iomem *ioaddr = hw->pcsr;
118 u32 value; 118 u32 value;
119 119
120 const struct stmmac_rx_routing route_possibilities[] = { 120 static const struct stmmac_rx_routing route_possibilities[] = {
121 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, 121 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
122 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, 122 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
123 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, 123 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 9091df86723a..adc54006f884 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -136,6 +136,9 @@
136#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 136#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
137#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ 137#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
138 138
139#define NUM_DWMAC100_DMA_REGS 9
140#define NUM_DWMAC1000_DMA_REGS 23
141
139void dwmac_enable_dma_transmission(void __iomem *ioaddr); 142void dwmac_enable_dma_transmission(void __iomem *ioaddr);
140void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan); 143void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
141void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan); 144void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index babb39c646ff..af30b4857c3b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -33,6 +33,8 @@
33#define MAC100_ETHTOOL_NAME "st_mac100" 33#define MAC100_ETHTOOL_NAME "st_mac100"
34#define GMAC_ETHTOOL_NAME "st_gmac" 34#define GMAC_ETHTOOL_NAME "st_gmac"
35 35
36#define ETHTOOL_DMA_OFFSET 55
37
36struct stmmac_stats { 38struct stmmac_stats {
37 char stat_string[ETH_GSTRING_LEN]; 39 char stat_string[ETH_GSTRING_LEN];
38 int sizeof_stat; 40 int sizeof_stat;
@@ -442,6 +444,9 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
442 444
443 priv->hw->mac->dump_regs(priv->hw, reg_space); 445 priv->hw->mac->dump_regs(priv->hw, reg_space);
444 priv->hw->dma->dump_regs(priv->ioaddr, reg_space); 446 priv->hw->dma->dump_regs(priv->ioaddr, reg_space);
447 /* Copy DMA registers to where ethtool expects them */
448 memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
449 NUM_DWMAC1000_DMA_REGS * 4);
445} 450}
446 451
447static void 452static void
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1853f7ff6657..1763e48c84e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4120,8 +4120,15 @@ int stmmac_dvr_probe(struct device *device,
4120 if ((phyaddr >= 0) && (phyaddr <= 31)) 4120 if ((phyaddr >= 0) && (phyaddr <= 31))
4121 priv->plat->phy_addr = phyaddr; 4121 priv->plat->phy_addr = phyaddr;
4122 4122
4123 if (priv->plat->stmmac_rst) 4123 if (priv->plat->stmmac_rst) {
4124 ret = reset_control_assert(priv->plat->stmmac_rst);
4124 reset_control_deassert(priv->plat->stmmac_rst); 4125 reset_control_deassert(priv->plat->stmmac_rst);
4126 /* Some reset controllers have only reset callback instead of
4127 * assert + deassert callbacks pair.
4128 */
4129 if (ret == -ENOTSUPP)
4130 reset_control_reset(priv->plat->stmmac_rst);
4131 }
4125 4132
4126 /* Init MAC and get the capabilities */ 4133 /* Init MAC and get the capabilities */
4127 ret = stmmac_hw_init(priv); 4134 ret = stmmac_hw_init(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index db157a47000c..72ec711fcba2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -204,6 +204,7 @@ int stmmac_mdio_register(struct net_device *ndev)
204 struct stmmac_priv *priv = netdev_priv(ndev); 204 struct stmmac_priv *priv = netdev_priv(ndev);
205 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 205 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
206 struct device_node *mdio_node = priv->plat->mdio_node; 206 struct device_node *mdio_node = priv->plat->mdio_node;
207 struct device *dev = ndev->dev.parent;
207 int addr, found; 208 int addr, found;
208 209
209 if (!mdio_bus_data) 210 if (!mdio_bus_data)
@@ -237,7 +238,7 @@ int stmmac_mdio_register(struct net_device *ndev)
237 else 238 else
238 err = mdiobus_register(new_bus); 239 err = mdiobus_register(new_bus);
239 if (err != 0) { 240 if (err != 0) {
240 netdev_err(ndev, "Cannot register the MDIO bus\n"); 241 dev_err(dev, "Cannot register the MDIO bus\n");
241 goto bus_register_fail; 242 goto bus_register_fail;
242 } 243 }
243 244
@@ -285,14 +286,12 @@ int stmmac_mdio_register(struct net_device *ndev)
285 irq_str = irq_num; 286 irq_str = irq_num;
286 break; 287 break;
287 } 288 }
288 netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", 289 phy_attached_info(phydev);
289 phydev->phy_id, addr, irq_str, phydev_name(phydev),
290 act ? " active" : "");
291 found = 1; 290 found = 1;
292 } 291 }
293 292
294 if (!found && !mdio_node) { 293 if (!found && !mdio_node) {
295 netdev_warn(ndev, "No PHY found\n"); 294 dev_warn(dev, "No PHY found\n");
296 mdiobus_unregister(new_bus); 295 mdiobus_unregister(new_bus);
297 mdiobus_free(new_bus); 296 mdiobus_free(new_bus);
298 return -ENODEV; 297 return -ENODEV;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 46cb7f8955a2..4bb04aaf9650 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9532,7 +9532,7 @@ static struct niu_parent *niu_get_parent(struct niu *np,
9532 p = niu_new_parent(np, id, ptype); 9532 p = niu_new_parent(np, id, ptype);
9533 9533
9534 if (p) { 9534 if (p) {
9535 char port_name[6]; 9535 char port_name[8];
9536 int err; 9536 int err;
9537 9537
9538 sprintf(port_name, "port%d", port); 9538 sprintf(port_name, "port%d", port);
@@ -9553,7 +9553,7 @@ static void niu_put_parent(struct niu *np)
9553{ 9553{
9554 struct niu_parent *p = np->parent; 9554 struct niu_parent *p = np->parent;
9555 u8 port = np->port; 9555 u8 port = np->port;
9556 char port_name[6]; 9556 char port_name[8];
9557 9557
9558 BUG_ON(!p || p->ports[port] != np); 9558 BUG_ON(!p || p->ports[port] != np);
9559 9559
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 3af540adb3c5..fca1bca7f69d 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -13,9 +13,9 @@
13/* Happy Meal global registers. */ 13/* Happy Meal global registers. */
14#define GREG_SWRESET 0x000UL /* Software Reset */ 14#define GREG_SWRESET 0x000UL /* Software Reset */
15#define GREG_CFG 0x004UL /* Config Register */ 15#define GREG_CFG 0x004UL /* Config Register */
16#define GREG_STAT 0x108UL /* Status */ 16#define GREG_STAT 0x100UL /* Status */
17#define GREG_IMASK 0x10cUL /* Interrupt Mask */ 17#define GREG_IMASK 0x104UL /* Interrupt Mask */
18#define GREG_REG_SIZE 0x110UL 18#define GREG_REG_SIZE 0x108UL
19 19
20/* Global reset register. */ 20/* Global reset register. */
21#define GREG_RESET_ETX 0x01 21#define GREG_RESET_ETX 0x01
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 711fbbbc4b1f..163d8d16bc24 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -654,6 +654,8 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
654 RET(-EFAULT); 654 RET(-EFAULT);
655 } 655 }
656 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); 656 DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
657 } else {
658 return -EOPNOTSUPP;
657 } 659 }
658 660
659 if (!capable(CAP_SYS_RAWIO)) 661 if (!capable(CAP_SYS_RAWIO))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1850e348f555..badd0a8caeb9 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -3089,6 +3089,31 @@ static int cpsw_probe(struct platform_device *pdev)
3089 cpsw->quirk_irq = true; 3089 cpsw->quirk_irq = true;
3090 } 3090 }
3091 3091
3092 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3093
3094 ndev->netdev_ops = &cpsw_netdev_ops;
3095 ndev->ethtool_ops = &cpsw_ethtool_ops;
3096 netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
3097 netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
3098 cpsw_split_res(ndev);
3099
3100 /* register the network device */
3101 SET_NETDEV_DEV(ndev, &pdev->dev);
3102 ret = register_netdev(ndev);
3103 if (ret) {
3104 dev_err(priv->dev, "error registering net device\n");
3105 ret = -ENODEV;
3106 goto clean_ale_ret;
3107 }
3108
3109 if (cpsw->data.dual_emac) {
3110 ret = cpsw_probe_dual_emac(priv);
3111 if (ret) {
3112 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3113 goto clean_unregister_netdev_ret;
3114 }
3115 }
3116
3092 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and 3117 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3093 * MISC IRQs which are always kept disabled with this driver so 3118 * MISC IRQs which are always kept disabled with this driver so
3094 * we will not request them. 3119 * we will not request them.
@@ -3127,33 +3152,9 @@ static int cpsw_probe(struct platform_device *pdev)
3127 goto clean_ale_ret; 3152 goto clean_ale_ret;
3128 } 3153 }
3129 3154
3130 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3131
3132 ndev->netdev_ops = &cpsw_netdev_ops;
3133 ndev->ethtool_ops = &cpsw_ethtool_ops;
3134 netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
3135 netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
3136 cpsw_split_res(ndev);
3137
3138 /* register the network device */
3139 SET_NETDEV_DEV(ndev, &pdev->dev);
3140 ret = register_netdev(ndev);
3141 if (ret) {
3142 dev_err(priv->dev, "error registering net device\n");
3143 ret = -ENODEV;
3144 goto clean_ale_ret;
3145 }
3146
3147 cpsw_notice(priv, probe, 3155 cpsw_notice(priv, probe,
3148 "initialized device (regs %pa, irq %d, pool size %d)\n", 3156 "initialized device (regs %pa, irq %d, pool size %d)\n",
3149 &ss_res->start, ndev->irq, dma_params.descs_pool_size); 3157 &ss_res->start, ndev->irq, dma_params.descs_pool_size);
3150 if (cpsw->data.dual_emac) {
3151 ret = cpsw_probe_dual_emac(priv);
3152 if (ret) {
3153 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3154 goto clean_unregister_netdev_ret;
3155 }
3156 }
3157 3158
3158 pm_runtime_put(&pdev->dev); 3159 pm_runtime_put(&pdev->dev);
3159 3160
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 32279d21c836..c2121d214f08 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -31,9 +31,18 @@
31 31
32#include "cpts.h" 32#include "cpts.h"
33 33
34#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
35
36struct cpts_skb_cb_data {
37 unsigned long tmo;
38};
39
34#define cpts_read32(c, r) readl_relaxed(&c->reg->r) 40#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
35#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r) 41#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
36 42
43static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
44 u16 ts_seqid, u8 ts_msgtype);
45
37static int event_expired(struct cpts_event *event) 46static int event_expired(struct cpts_event *event)
38{ 47{
39 return time_after(jiffies, event->tmo); 48 return time_after(jiffies, event->tmo);
@@ -77,6 +86,47 @@ static int cpts_purge_events(struct cpts *cpts)
77 return removed ? 0 : -1; 86 return removed ? 0 : -1;
78} 87}
79 88
89static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
90{
91 struct sk_buff *skb, *tmp;
92 u16 seqid;
93 u8 mtype;
94 bool found = false;
95
96 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
97 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
98
99 /* no need to grab txq.lock as access is always done under cpts->lock */
100 skb_queue_walk_safe(&cpts->txq, skb, tmp) {
101 struct skb_shared_hwtstamps ssh;
102 unsigned int class = ptp_classify_raw(skb);
103 struct cpts_skb_cb_data *skb_cb =
104 (struct cpts_skb_cb_data *)skb->cb;
105
106 if (cpts_match(skb, class, seqid, mtype)) {
107 u64 ns = timecounter_cyc2time(&cpts->tc, event->low);
108
109 memset(&ssh, 0, sizeof(ssh));
110 ssh.hwtstamp = ns_to_ktime(ns);
111 skb_tstamp_tx(skb, &ssh);
112 found = true;
113 __skb_unlink(skb, &cpts->txq);
114 dev_consume_skb_any(skb);
115 dev_dbg(cpts->dev, "match tx timestamp mtype %u seqid %04x\n",
116 mtype, seqid);
117 } else if (time_after(jiffies, skb_cb->tmo)) {
118 /* timeout any expired skbs over 1s */
119 dev_dbg(cpts->dev,
120 "expiring tx timestamp mtype %u seqid %04x\n",
121 mtype, seqid);
122 __skb_unlink(skb, &cpts->txq);
123 dev_consume_skb_any(skb);
124 }
125 }
126
127 return found;
128}
129
80/* 130/*
81 * Returns zero if matching event type was found. 131 * Returns zero if matching event type was found.
82 */ 132 */
@@ -101,9 +151,15 @@ static int cpts_fifo_read(struct cpts *cpts, int match)
101 event->low = lo; 151 event->low = lo;
102 type = event_type(event); 152 type = event_type(event);
103 switch (type) { 153 switch (type) {
154 case CPTS_EV_TX:
155 if (cpts_match_tx_ts(cpts, event)) {
156 /* if the new event matches an existing skb,
157 * then don't queue it
158 */
159 break;
160 }
104 case CPTS_EV_PUSH: 161 case CPTS_EV_PUSH:
105 case CPTS_EV_RX: 162 case CPTS_EV_RX:
106 case CPTS_EV_TX:
107 list_del_init(&event->list); 163 list_del_init(&event->list);
108 list_add_tail(&event->list, &cpts->events); 164 list_add_tail(&event->list, &cpts->events);
109 break; 165 break;
@@ -224,6 +280,24 @@ static int cpts_ptp_enable(struct ptp_clock_info *ptp,
224 return -EOPNOTSUPP; 280 return -EOPNOTSUPP;
225} 281}
226 282
283static long cpts_overflow_check(struct ptp_clock_info *ptp)
284{
285 struct cpts *cpts = container_of(ptp, struct cpts, info);
286 unsigned long delay = cpts->ov_check_period;
287 struct timespec64 ts;
288 unsigned long flags;
289
290 spin_lock_irqsave(&cpts->lock, flags);
291 ts = ns_to_timespec64(timecounter_read(&cpts->tc));
292
293 if (!skb_queue_empty(&cpts->txq))
294 delay = CPTS_SKB_TX_WORK_TIMEOUT;
295 spin_unlock_irqrestore(&cpts->lock, flags);
296
297 pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
298 return (long)delay;
299}
300
227static struct ptp_clock_info cpts_info = { 301static struct ptp_clock_info cpts_info = {
228 .owner = THIS_MODULE, 302 .owner = THIS_MODULE,
229 .name = "CTPS timer", 303 .name = "CTPS timer",
@@ -236,18 +310,9 @@ static struct ptp_clock_info cpts_info = {
236 .gettime64 = cpts_ptp_gettime, 310 .gettime64 = cpts_ptp_gettime,
237 .settime64 = cpts_ptp_settime, 311 .settime64 = cpts_ptp_settime,
238 .enable = cpts_ptp_enable, 312 .enable = cpts_ptp_enable,
313 .do_aux_work = cpts_overflow_check,
239}; 314};
240 315
241static void cpts_overflow_check(struct work_struct *work)
242{
243 struct timespec64 ts;
244 struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
245
246 cpts_ptp_gettime(&cpts->info, &ts);
247 pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec);
248 schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period);
249}
250
251static int cpts_match(struct sk_buff *skb, unsigned int ptp_class, 316static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
252 u16 ts_seqid, u8 ts_msgtype) 317 u16 ts_seqid, u8 ts_msgtype)
253{ 318{
@@ -299,7 +364,7 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
299 return 0; 364 return 0;
300 365
301 spin_lock_irqsave(&cpts->lock, flags); 366 spin_lock_irqsave(&cpts->lock, flags);
302 cpts_fifo_read(cpts, CPTS_EV_PUSH); 367 cpts_fifo_read(cpts, -1);
303 list_for_each_safe(this, next, &cpts->events) { 368 list_for_each_safe(this, next, &cpts->events) {
304 event = list_entry(this, struct cpts_event, list); 369 event = list_entry(this, struct cpts_event, list);
305 if (event_expired(event)) { 370 if (event_expired(event)) {
@@ -317,6 +382,19 @@ static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
317 break; 382 break;
318 } 383 }
319 } 384 }
385
386 if (ev_type == CPTS_EV_TX && !ns) {
387 struct cpts_skb_cb_data *skb_cb =
388 (struct cpts_skb_cb_data *)skb->cb;
389 /* Not found, add frame to queue for processing later.
390 * The periodic FIFO check will handle this.
391 */
392 skb_get(skb);
393 /* get the timestamp for timeouts */
394 skb_cb->tmo = jiffies + msecs_to_jiffies(100);
395 __skb_queue_tail(&cpts->txq, skb);
396 ptp_schedule_worker(cpts->clock, 0);
397 }
320 spin_unlock_irqrestore(&cpts->lock, flags); 398 spin_unlock_irqrestore(&cpts->lock, flags);
321 399
322 return ns; 400 return ns;
@@ -358,6 +436,7 @@ int cpts_register(struct cpts *cpts)
358{ 436{
359 int err, i; 437 int err, i;
360 438
439 skb_queue_head_init(&cpts->txq);
361 INIT_LIST_HEAD(&cpts->events); 440 INIT_LIST_HEAD(&cpts->events);
362 INIT_LIST_HEAD(&cpts->pool); 441 INIT_LIST_HEAD(&cpts->pool);
363 for (i = 0; i < CPTS_MAX_EVENTS; i++) 442 for (i = 0; i < CPTS_MAX_EVENTS; i++)
@@ -378,7 +457,7 @@ int cpts_register(struct cpts *cpts)
378 } 457 }
379 cpts->phc_index = ptp_clock_index(cpts->clock); 458 cpts->phc_index = ptp_clock_index(cpts->clock);
380 459
381 schedule_delayed_work(&cpts->overflow_work, cpts->ov_check_period); 460 ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
382 return 0; 461 return 0;
383 462
384err_ptp: 463err_ptp:
@@ -392,14 +471,15 @@ void cpts_unregister(struct cpts *cpts)
392 if (WARN_ON(!cpts->clock)) 471 if (WARN_ON(!cpts->clock))
393 return; 472 return;
394 473
395 cancel_delayed_work_sync(&cpts->overflow_work);
396
397 ptp_clock_unregister(cpts->clock); 474 ptp_clock_unregister(cpts->clock);
398 cpts->clock = NULL; 475 cpts->clock = NULL;
399 476
400 cpts_write32(cpts, 0, int_enable); 477 cpts_write32(cpts, 0, int_enable);
401 cpts_write32(cpts, 0, control); 478 cpts_write32(cpts, 0, control);
402 479
480 /* Drop all packet */
481 skb_queue_purge(&cpts->txq);
482
403 clk_disable(cpts->refclk); 483 clk_disable(cpts->refclk);
404} 484}
405EXPORT_SYMBOL_GPL(cpts_unregister); 485EXPORT_SYMBOL_GPL(cpts_unregister);
@@ -476,7 +556,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
476 cpts->dev = dev; 556 cpts->dev = dev;
477 cpts->reg = (struct cpsw_cpts __iomem *)regs; 557 cpts->reg = (struct cpsw_cpts __iomem *)regs;
478 spin_lock_init(&cpts->lock); 558 spin_lock_init(&cpts->lock);
479 INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
480 559
481 ret = cpts_of_parse(cpts, node); 560 ret = cpts_of_parse(cpts, node);
482 if (ret) 561 if (ret)
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 01ea82ba9cdc..73d73faf0f38 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -119,13 +119,13 @@ struct cpts {
119 u32 cc_mult; /* for the nominal frequency */ 119 u32 cc_mult; /* for the nominal frequency */
120 struct cyclecounter cc; 120 struct cyclecounter cc;
121 struct timecounter tc; 121 struct timecounter tc;
122 struct delayed_work overflow_work;
123 int phc_index; 122 int phc_index;
124 struct clk *refclk; 123 struct clk *refclk;
125 struct list_head events; 124 struct list_head events;
126 struct list_head pool; 125 struct list_head pool;
127 struct cpts_event pool_data[CPTS_MAX_EVENTS]; 126 struct cpts_event pool_data[CPTS_MAX_EVENTS];
128 unsigned long ov_check_period; 127 unsigned long ov_check_period;
128 struct sk_buff_head txq;
129}; 129};
130 130
131void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb); 131void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index d9db8a06afd2..cce9c9ed46aa 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1338,7 +1338,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1338static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status) 1338static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1339{ 1339{
1340 static int count; 1340 static int count;
1341 printk(KERN_WARNING "%s: Fatal Error Intterrupt (%#x):", 1341 printk(KERN_WARNING "%s: Fatal Error Interrupt (%#x):",
1342 dev->name, status); 1342 dev->name, status);
1343 if (status & Int_IntPCI) 1343 if (status & Int_IntPCI)
1344 printk(" IntPCI"); 1344 printk(" IntPCI");
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index de8156c6b292..2bbda71818ad 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1091,7 +1091,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
1091 if (data[IFLA_GENEVE_ID]) { 1091 if (data[IFLA_GENEVE_ID]) {
1092 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); 1092 __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]);
1093 1093
1094 if (vni >= GENEVE_VID_MASK) 1094 if (vni >= GENEVE_N_VID)
1095 return -ERANGE; 1095 return -ERANGE;
1096 } 1096 }
1097 1097
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 1542e837fdfa..f38e32a7ec9c 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -364,7 +364,7 @@ static int gtp_dev_init(struct net_device *dev)
364 364
365 gtp->dev = dev; 365 gtp->dev = dev;
366 366
367 dev->tstats = alloc_percpu(struct pcpu_sw_netstats); 367 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
368 if (!dev->tstats) 368 if (!dev->tstats)
369 return -ENOMEM; 369 return -ENOMEM;
370 370
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d6c25580f8dd..12cc64bfcff8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -765,7 +765,8 @@ struct netvsc_device {
765 u32 max_chn; 765 u32 max_chn;
766 u32 num_chn; 766 u32 num_chn;
767 767
768 refcount_t sc_offered; 768 atomic_t open_chn;
769 wait_queue_head_t subchan_open;
769 770
770 struct rndis_device *extension; 771 struct rndis_device *extension;
771 772
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0a9167dd72fb..d18c3326a1f7 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -78,6 +78,7 @@ static struct netvsc_device *alloc_net_device(void)
78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 78 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 79 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
80 init_completion(&net_device->channel_init_wait); 80 init_completion(&net_device->channel_init_wait);
81 init_waitqueue_head(&net_device->subchan_open);
81 82
82 return net_device; 83 return net_device;
83} 84}
@@ -1302,6 +1303,8 @@ int netvsc_device_add(struct hv_device *device,
1302 struct netvsc_channel *nvchan = &net_device->chan_table[i]; 1303 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1303 1304
1304 nvchan->channel = device->channel; 1305 nvchan->channel = device->channel;
1306 u64_stats_init(&nvchan->tx_stats.syncp);
1307 u64_stats_init(&nvchan->rx_stats.syncp);
1305 } 1308 }
1306 1309
1307 /* Enable NAPI handler before init callbacks */ 1310 /* Enable NAPI handler before init callbacks */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 63c98bbbc596..0d78727f1a14 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -315,14 +315,34 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
315 return slots_used; 315 return slots_used;
316} 316}
317 317
318/* Estimate number of page buffers neede to transmit 318static int count_skb_frag_slots(struct sk_buff *skb)
319 * Need at most 2 for RNDIS header plus skb body and fragments. 319{
320 */ 320 int i, frags = skb_shinfo(skb)->nr_frags;
321static unsigned int netvsc_get_slots(const struct sk_buff *skb) 321 int pages = 0;
322
323 for (i = 0; i < frags; i++) {
324 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
325 unsigned long size = skb_frag_size(frag);
326 unsigned long offset = frag->page_offset;
327
328 /* Skip unused frames from start of page */
329 offset &= ~PAGE_MASK;
330 pages += PFN_UP(offset + size);
331 }
332 return pages;
333}
334
335static int netvsc_get_slots(struct sk_buff *skb)
322{ 336{
323 return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)) 337 char *data = skb->data;
324 + skb_shinfo(skb)->nr_frags 338 unsigned int offset = offset_in_page(data);
325 + 2; 339 unsigned int len = skb_headlen(skb);
340 int slots;
341 int frag_slots;
342
343 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
344 frag_slots = count_skb_frag_slots(skb);
345 return slots + frag_slots;
326} 346}
327 347
328static u32 net_checksum_info(struct sk_buff *skb) 348static u32 net_checksum_info(struct sk_buff *skb)
@@ -360,18 +380,21 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
360 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; 380 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
361 struct hv_page_buffer *pb = page_buf; 381 struct hv_page_buffer *pb = page_buf;
362 382
363 /* We can only transmit MAX_PAGE_BUFFER_COUNT number 383 /* We will atmost need two pages to describe the rndis
384 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
364 * of pages in a single packet. If skb is scattered around 385 * of pages in a single packet. If skb is scattered around
365 * more pages we try linearizing it. 386 * more pages we try linearizing it.
366 */ 387 */
367 num_data_pgs = netvsc_get_slots(skb); 388
389 num_data_pgs = netvsc_get_slots(skb) + 2;
390
368 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { 391 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
369 ++net_device_ctx->eth_stats.tx_scattered; 392 ++net_device_ctx->eth_stats.tx_scattered;
370 393
371 if (skb_linearize(skb)) 394 if (skb_linearize(skb))
372 goto no_memory; 395 goto no_memory;
373 396
374 num_data_pgs = netvsc_get_slots(skb); 397 num_data_pgs = netvsc_get_slots(skb) + 2;
375 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { 398 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
376 ++net_device_ctx->eth_stats.tx_too_big; 399 ++net_device_ctx->eth_stats.tx_too_big;
377 goto drop; 400 goto drop;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 85c00e1c52b6..d6308ffda53e 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1048,8 +1048,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1048 else 1048 else
1049 netif_napi_del(&nvchan->napi); 1049 netif_napi_del(&nvchan->napi);
1050 1050
1051 if (refcount_dec_and_test(&nvscdev->sc_offered)) 1051 atomic_inc(&nvscdev->open_chn);
1052 complete(&nvscdev->channel_init_wait); 1052 wake_up(&nvscdev->subchan_open);
1053} 1053}
1054 1054
1055int rndis_filter_device_add(struct hv_device *dev, 1055int rndis_filter_device_add(struct hv_device *dev,
@@ -1090,8 +1090,6 @@ int rndis_filter_device_add(struct hv_device *dev,
1090 net_device->max_chn = 1; 1090 net_device->max_chn = 1;
1091 net_device->num_chn = 1; 1091 net_device->num_chn = 1;
1092 1092
1093 refcount_set(&net_device->sc_offered, 0);
1094
1095 net_device->extension = rndis_device; 1093 net_device->extension = rndis_device;
1096 rndis_device->ndev = net; 1094 rndis_device->ndev = net;
1097 1095
@@ -1221,11 +1219,11 @@ int rndis_filter_device_add(struct hv_device *dev,
1221 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i, 1219 rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
1222 net_device->num_chn); 1220 net_device->num_chn);
1223 1221
1222 atomic_set(&net_device->open_chn, 1);
1224 num_rss_qs = net_device->num_chn - 1; 1223 num_rss_qs = net_device->num_chn - 1;
1225 if (num_rss_qs == 0) 1224 if (num_rss_qs == 0)
1226 return 0; 1225 return 0;
1227 1226
1228 refcount_set(&net_device->sc_offered, num_rss_qs);
1229 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); 1227 vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
1230 1228
1231 init_packet = &net_device->channel_init_pkt; 1229 init_packet = &net_device->channel_init_pkt;
@@ -1242,15 +1240,19 @@ int rndis_filter_device_add(struct hv_device *dev,
1242 if (ret) 1240 if (ret)
1243 goto out; 1241 goto out;
1244 1242
1243 wait_for_completion(&net_device->channel_init_wait);
1245 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1244 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1246 ret = -ENODEV; 1245 ret = -ENODEV;
1247 goto out; 1246 goto out;
1248 } 1247 }
1249 wait_for_completion(&net_device->channel_init_wait);
1250 1248
1251 net_device->num_chn = 1 + 1249 net_device->num_chn = 1 +
1252 init_packet->msg.v5_msg.subchn_comp.num_subchannels; 1250 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1253 1251
1252 /* wait for all sub channels to open */
1253 wait_event(net_device->subchan_open,
1254 atomic_read(&net_device->open_chn) == net_device->num_chn);
1255
1254 /* ignore failues from setting rss parameters, still have channels */ 1256 /* ignore failues from setting rss parameters, still have channels */
1255 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key, 1257 rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
1256 net_device->num_chn); 1258 net_device->num_chn);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index f37e3c1fd4e7..8dab74a81303 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -192,7 +192,7 @@ static int ipvlan_init(struct net_device *dev)
192 192
193 netdev_lockdep_set_classes(dev); 193 netdev_lockdep_set_classes(dev);
194 194
195 ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); 195 ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
196 if (!ipvlan->pcpu_stats) 196 if (!ipvlan->pcpu_stats)
197 return -ENOMEM; 197 return -ENOMEM;
198 198
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 6f6ed75b63c9..765de3bedb88 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
141static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val) 141static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
142{ 142{
143 struct usb_device *dev = mcs->usbdev; 143 struct usb_device *dev = mcs->usbdev;
144 int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, 144 void *dmabuf;
145 MCS_RD_RTYPE, 0, reg, val, 2, 145 int ret;
146 msecs_to_jiffies(MCS_CTRL_TIMEOUT)); 146
147 dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
148 if (!dmabuf)
149 return -ENOMEM;
150
151 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
152 MCS_RD_RTYPE, 0, reg, dmabuf, 2,
153 msecs_to_jiffies(MCS_CTRL_TIMEOUT));
154
155 memcpy(val, dmabuf, sizeof(__u16));
156 kfree(dmabuf);
147 157
148 return ret; 158 return ret;
149} 159}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2dda72004a7d..928fd892f167 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -7,7 +7,16 @@ menuconfig MDIO_DEVICE
7 help 7 help
8 MDIO devices and driver infrastructure code. 8 MDIO devices and driver infrastructure code.
9 9
10if MDIO_DEVICE 10config MDIO_BUS
11 tristate
12 default m if PHYLIB=m
13 default MDIO_DEVICE
14 help
15 This internal symbol is used for link time dependencies and it
16 reflects whether the mdio_bus/mdio_device code is built as a
17 loadable module or built-in.
18
19if MDIO_BUS
11 20
12config MDIO_BCM_IPROC 21config MDIO_BCM_IPROC
13 tristate "Broadcom iProc MDIO bus controller" 22 tristate "Broadcom iProc MDIO bus controller"
@@ -28,7 +37,6 @@ config MDIO_BCM_UNIMAC
28 37
29config MDIO_BITBANG 38config MDIO_BITBANG
30 tristate "Bitbanged MDIO buses" 39 tristate "Bitbanged MDIO buses"
31 depends on !(MDIO_DEVICE=y && PHYLIB=m)
32 help 40 help
33 This module implements the MDIO bus protocol in software, 41 This module implements the MDIO bus protocol in software,
34 for use by low level drivers that export the ability to 42 for use by low level drivers that export the ability to
@@ -127,7 +135,6 @@ config MDIO_THUNDER
127 tristate "ThunderX SOCs MDIO buses" 135 tristate "ThunderX SOCs MDIO buses"
128 depends on 64BIT 136 depends on 64BIT
129 depends on PCI 137 depends on PCI
130 depends on !(MDIO_DEVICE=y && PHYLIB=m)
131 select MDIO_CAVIUM 138 select MDIO_CAVIUM
132 help 139 help
133 This driver supports the MDIO interfaces found on Cavium 140 This driver supports the MDIO interfaces found on Cavium
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 00755b6a42cf..c608e1dfaf09 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -135,8 +135,8 @@ int mdio_mux_init(struct device *dev,
135 for_each_available_child_of_node(dev->of_node, child_bus_node) { 135 for_each_available_child_of_node(dev->of_node, child_bus_node) {
136 int v; 136 int v;
137 137
138 v = of_mdio_parse_addr(dev, child_bus_node); 138 r = of_property_read_u32(child_bus_node, "reg", &v);
139 if (v < 0) { 139 if (r) {
140 dev_err(dev, 140 dev_err(dev,
141 "Error: Failed to find reg for child %s\n", 141 "Error: Failed to find reg for child %s\n",
142 of_node_full_name(child_bus_node)); 142 of_node_full_name(child_bus_node));
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d0626bf5c540..5068c582d502 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -749,6 +749,9 @@ void phy_stop_machine(struct phy_device *phydev)
749 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) 749 if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
750 phydev->state = PHY_UP; 750 phydev->state = PHY_UP;
751 mutex_unlock(&phydev->lock); 751 mutex_unlock(&phydev->lock);
752
753 /* Now we can run the state machine synchronously */
754 phy_state_machine(&phydev->state_queue.work);
752} 755}
753 756
754/** 757/**
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 13028833bee3..a404552555d4 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,6 +120,7 @@ struct ppp {
120 int n_channels; /* how many channels are attached 54 */ 120 int n_channels; /* how many channels are attached 54 */
121 spinlock_t rlock; /* lock for receive side 58 */ 121 spinlock_t rlock; /* lock for receive side 58 */
122 spinlock_t wlock; /* lock for transmit side 5c */ 122 spinlock_t wlock; /* lock for transmit side 5c */
123 int *xmit_recursion __percpu; /* xmit recursion detect */
123 int mru; /* max receive unit 60 */ 124 int mru; /* max receive unit 60 */
124 unsigned int flags; /* control bits 64 */ 125 unsigned int flags; /* control bits 64 */
125 unsigned int xstate; /* transmit state bits 68 */ 126 unsigned int xstate; /* transmit state bits 68 */
@@ -1025,6 +1026,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1025 struct ppp *ppp = netdev_priv(dev); 1026 struct ppp *ppp = netdev_priv(dev);
1026 int indx; 1027 int indx;
1027 int err; 1028 int err;
1029 int cpu;
1028 1030
1029 ppp->dev = dev; 1031 ppp->dev = dev;
1030 ppp->ppp_net = src_net; 1032 ppp->ppp_net = src_net;
@@ -1039,6 +1041,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1039 INIT_LIST_HEAD(&ppp->channels); 1041 INIT_LIST_HEAD(&ppp->channels);
1040 spin_lock_init(&ppp->rlock); 1042 spin_lock_init(&ppp->rlock);
1041 spin_lock_init(&ppp->wlock); 1043 spin_lock_init(&ppp->wlock);
1044
1045 ppp->xmit_recursion = alloc_percpu(int);
1046 if (!ppp->xmit_recursion) {
1047 err = -ENOMEM;
1048 goto err1;
1049 }
1050 for_each_possible_cpu(cpu)
1051 (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0;
1052
1042#ifdef CONFIG_PPP_MULTILINK 1053#ifdef CONFIG_PPP_MULTILINK
1043 ppp->minseq = -1; 1054 ppp->minseq = -1;
1044 skb_queue_head_init(&ppp->mrq); 1055 skb_queue_head_init(&ppp->mrq);
@@ -1050,11 +1061,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
1050 1061
1051 err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set); 1062 err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
1052 if (err < 0) 1063 if (err < 0)
1053 return err; 1064 goto err2;
1054 1065
1055 conf->file->private_data = &ppp->file; 1066 conf->file->private_data = &ppp->file;
1056 1067
1057 return 0; 1068 return 0;
1069err2:
1070 free_percpu(ppp->xmit_recursion);
1071err1:
1072 return err;
1058} 1073}
1059 1074
1060static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = { 1075static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
@@ -1400,18 +1415,16 @@ static void __ppp_xmit_process(struct ppp *ppp)
1400 ppp_xmit_unlock(ppp); 1415 ppp_xmit_unlock(ppp);
1401} 1416}
1402 1417
1403static DEFINE_PER_CPU(int, ppp_xmit_recursion);
1404
1405static void ppp_xmit_process(struct ppp *ppp) 1418static void ppp_xmit_process(struct ppp *ppp)
1406{ 1419{
1407 local_bh_disable(); 1420 local_bh_disable();
1408 1421
1409 if (unlikely(__this_cpu_read(ppp_xmit_recursion))) 1422 if (unlikely(*this_cpu_ptr(ppp->xmit_recursion)))
1410 goto err; 1423 goto err;
1411 1424
1412 __this_cpu_inc(ppp_xmit_recursion); 1425 (*this_cpu_ptr(ppp->xmit_recursion))++;
1413 __ppp_xmit_process(ppp); 1426 __ppp_xmit_process(ppp);
1414 __this_cpu_dec(ppp_xmit_recursion); 1427 (*this_cpu_ptr(ppp->xmit_recursion))--;
1415 1428
1416 local_bh_enable(); 1429 local_bh_enable();
1417 1430
@@ -1902,23 +1915,23 @@ static void __ppp_channel_push(struct channel *pch)
1902 spin_unlock(&pch->downl); 1915 spin_unlock(&pch->downl);
1903 /* see if there is anything from the attached unit to be sent */ 1916 /* see if there is anything from the attached unit to be sent */
1904 if (skb_queue_empty(&pch->file.xq)) { 1917 if (skb_queue_empty(&pch->file.xq)) {
1905 read_lock(&pch->upl);
1906 ppp = pch->ppp; 1918 ppp = pch->ppp;
1907 if (ppp) 1919 if (ppp)
1908 __ppp_xmit_process(ppp); 1920 __ppp_xmit_process(ppp);
1909 read_unlock(&pch->upl);
1910 } 1921 }
1911} 1922}
1912 1923
1913static void ppp_channel_push(struct channel *pch) 1924static void ppp_channel_push(struct channel *pch)
1914{ 1925{
1915 local_bh_disable(); 1926 read_lock_bh(&pch->upl);
1916 1927 if (pch->ppp) {
1917 __this_cpu_inc(ppp_xmit_recursion); 1928 (*this_cpu_ptr(pch->ppp->xmit_recursion))++;
1918 __ppp_channel_push(pch); 1929 __ppp_channel_push(pch);
1919 __this_cpu_dec(ppp_xmit_recursion); 1930 (*this_cpu_ptr(pch->ppp->xmit_recursion))--;
1920 1931 } else {
1921 local_bh_enable(); 1932 __ppp_channel_push(pch);
1933 }
1934 read_unlock_bh(&pch->upl);
1922} 1935}
1923 1936
1924/* 1937/*
@@ -3057,6 +3070,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
3057#endif /* CONFIG_PPP_FILTER */ 3070#endif /* CONFIG_PPP_FILTER */
3058 3071
3059 kfree_skb(ppp->xmit_pending); 3072 kfree_skb(ppp->xmit_pending);
3073 free_percpu(ppp->xmit_recursion);
3060 3074
3061 free_netdev(ppp->dev); 3075 free_netdev(ppp->dev);
3062} 3076}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index eac499c58aa7..6dde9a0cfe76 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -131,7 +131,6 @@ static void del_chan(struct pppox_sock *sock)
131 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); 131 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
132 RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); 132 RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
133 spin_unlock(&chan_lock); 133 spin_unlock(&chan_lock);
134 synchronize_rcu();
135} 134}
136 135
137static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) 136static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
@@ -520,6 +519,7 @@ static int pptp_release(struct socket *sock)
520 519
521 po = pppox_sk(sk); 520 po = pppox_sk(sk);
522 del_chan(po); 521 del_chan(po);
522 synchronize_rcu();
523 523
524 pppox_unbind_sock(sk); 524 pppox_unbind_sock(sk);
525 sk->sk_state = PPPOX_DEAD; 525 sk->sk_state = PPPOX_DEAD;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 464570409796..ae53e899259f 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -60,11 +60,11 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
60static int __set_port_dev_addr(struct net_device *port_dev, 60static int __set_port_dev_addr(struct net_device *port_dev,
61 const unsigned char *dev_addr) 61 const unsigned char *dev_addr)
62{ 62{
63 struct sockaddr addr; 63 struct sockaddr_storage addr;
64 64
65 memcpy(addr.sa_data, dev_addr, port_dev->addr_len); 65 memcpy(addr.__data, dev_addr, port_dev->addr_len);
66 addr.sa_family = port_dev->type; 66 addr.ss_family = port_dev->type;
67 return dev_set_mac_address(port_dev, &addr); 67 return dev_set_mac_address(port_dev, (struct sockaddr *)&addr);
68} 68}
69 69
70static int team_port_set_orig_dev_addr(struct team_port *port) 70static int team_port_set_orig_dev_addr(struct team_port *port)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3d4c24572ecd..0a2c0a42283f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1879,6 +1879,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1879 1879
1880err_detach: 1880err_detach:
1881 tun_detach_all(dev); 1881 tun_detach_all(dev);
1882 /* register_netdevice() already called tun_free_netdev() */
1883 goto err_free_dev;
1884
1882err_free_flow: 1885err_free_flow:
1883 tun_flow_uninit(tun); 1886 tun_flow_uninit(tun);
1884 security_tun_dev_free_security(tun->security); 1887 security_tun_dev_free_security(tun->security);
@@ -2598,8 +2601,16 @@ static int __init tun_init(void)
2598 goto err_misc; 2601 goto err_misc;
2599 } 2602 }
2600 2603
2601 register_netdevice_notifier(&tun_notifier_block); 2604 ret = register_netdevice_notifier(&tun_notifier_block);
2605 if (ret) {
2606 pr_err("Can't register netdevice notifier\n");
2607 goto err_notifier;
2608 }
2609
2602 return 0; 2610 return 0;
2611
2612err_notifier:
2613 misc_deregister(&tun_miscdev);
2603err_misc: 2614err_misc:
2604 rtnl_link_unregister(&tun_link_ops); 2615 rtnl_link_unregister(&tun_link_ops);
2605err_linkops: 2616err_linkops:
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index d1092421aaa7..9a4171b90947 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -209,6 +209,7 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
209int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 209int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
210 struct asix_rx_fixup_info *rx); 210 struct asix_rx_fixup_info *rx);
211int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb); 211int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb);
212void asix_rx_fixup_common_free(struct asix_common_private *dp);
212 213
213struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 214struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
214 gfp_t flags); 215 gfp_t flags);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7847436c441e..522d2900cd1d 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -75,6 +75,27 @@ void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
75 value, index, data, size); 75 value, index, data, size);
76} 76}
77 77
78static void reset_asix_rx_fixup_info(struct asix_rx_fixup_info *rx)
79{
80 /* Reset the variables that have a lifetime outside of
81 * asix_rx_fixup_internal() so that future processing starts from a
82 * known set of initial conditions.
83 */
84
85 if (rx->ax_skb) {
86 /* Discard any incomplete Ethernet frame in the netdev buffer */
87 kfree_skb(rx->ax_skb);
88 rx->ax_skb = NULL;
89 }
90
91 /* Assume the Data header 32-bit word is at the start of the current
92 * or next URB socket buffer so reset all the state variables.
93 */
94 rx->remaining = 0;
95 rx->split_head = false;
96 rx->header = 0;
97}
98
78int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, 99int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
79 struct asix_rx_fixup_info *rx) 100 struct asix_rx_fixup_info *rx)
80{ 101{
@@ -99,15 +120,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
99 if (size != ((~rx->header >> 16) & 0x7ff)) { 120 if (size != ((~rx->header >> 16) & 0x7ff)) {
100 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n", 121 netdev_err(dev->net, "asix_rx_fixup() Data Header synchronisation was lost, remaining %d\n",
101 rx->remaining); 122 rx->remaining);
102 if (rx->ax_skb) { 123 reset_asix_rx_fixup_info(rx);
103 kfree_skb(rx->ax_skb);
104 rx->ax_skb = NULL;
105 /* Discard the incomplete netdev Ethernet frame
106 * and assume the Data header is at the start of
107 * the current URB socket buffer.
108 */
109 }
110 rx->remaining = 0;
111 } 124 }
112 } 125 }
113 126
@@ -139,11 +152,13 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
139 if (size != ((~rx->header >> 16) & 0x7ff)) { 152 if (size != ((~rx->header >> 16) & 0x7ff)) {
140 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n", 153 netdev_err(dev->net, "asix_rx_fixup() Bad Header Length 0x%x, offset %d\n",
141 rx->header, offset); 154 rx->header, offset);
155 reset_asix_rx_fixup_info(rx);
142 return 0; 156 return 0;
143 } 157 }
144 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { 158 if (size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) {
145 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 159 netdev_dbg(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
146 size); 160 size);
161 reset_asix_rx_fixup_info(rx);
147 return 0; 162 return 0;
148 } 163 }
149 164
@@ -168,8 +183,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
168 if (rx->ax_skb) { 183 if (rx->ax_skb) {
169 skb_put_data(rx->ax_skb, skb->data + offset, 184 skb_put_data(rx->ax_skb, skb->data + offset,
170 copy_length); 185 copy_length);
171 if (!rx->remaining) 186 if (!rx->remaining) {
172 usbnet_skb_return(dev, rx->ax_skb); 187 usbnet_skb_return(dev, rx->ax_skb);
188 rx->ax_skb = NULL;
189 }
173 } 190 }
174 191
175 offset += (copy_length + 1) & 0xfffe; 192 offset += (copy_length + 1) & 0xfffe;
@@ -178,6 +195,7 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb,
178 if (skb->len != offset) { 195 if (skb->len != offset) {
179 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n", 196 netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d, %d\n",
180 skb->len, offset); 197 skb->len, offset);
198 reset_asix_rx_fixup_info(rx);
181 return 0; 199 return 0;
182 } 200 }
183 201
@@ -192,6 +210,21 @@ int asix_rx_fixup_common(struct usbnet *dev, struct sk_buff *skb)
192 return asix_rx_fixup_internal(dev, skb, rx); 210 return asix_rx_fixup_internal(dev, skb, rx);
193} 211}
194 212
213void asix_rx_fixup_common_free(struct asix_common_private *dp)
214{
215 struct asix_rx_fixup_info *rx;
216
217 if (!dp)
218 return;
219
220 rx = &dp->rx_fixup_info;
221
222 if (rx->ax_skb) {
223 kfree_skb(rx->ax_skb);
224 rx->ax_skb = NULL;
225 }
226}
227
195struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, 228struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
196 gfp_t flags) 229 gfp_t flags)
197{ 230{
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index a3aa0a27dfe5..b2ff88e69a81 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -764,6 +764,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
764 764
765static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf) 765static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
766{ 766{
767 asix_rx_fixup_common_free(dev->driver_priv);
767 kfree(dev->driver_priv); 768 kfree(dev->driver_priv);
768} 769}
769 770
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d103a1d4fb36..8f572b9f3625 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -768,8 +768,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
768 u8 *buf; 768 u8 *buf;
769 int len; 769 int len;
770 int temp; 770 int temp;
771 int err;
771 u8 iface_no; 772 u8 iface_no;
772 struct usb_cdc_parsed_header hdr; 773 struct usb_cdc_parsed_header hdr;
774 u16 curr_ntb_format;
773 775
774 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 776 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
775 if (!ctx) 777 if (!ctx)
@@ -874,6 +876,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
874 goto error2; 876 goto error2;
875 } 877 }
876 878
879 /*
880 * Some Huawei devices have been observed to come out of reset in NDP32 mode.
881 * Let's check if this is the case, and set the device to NDP16 mode again if
882 * needed.
883 */
884 if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) {
885 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT,
886 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
887 0, iface_no, &curr_ntb_format, 2);
888 if (err < 0) {
889 goto error2;
890 }
891
892 if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) {
893 dev_info(&intf->dev, "resetting NTB format to 16-bit");
894 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
895 USB_TYPE_CLASS | USB_DIR_OUT
896 | USB_RECIP_INTERFACE,
897 USB_CDC_NCM_NTB16_FORMAT,
898 iface_no, NULL, 0);
899
900 if (err < 0)
901 goto error2;
902 }
903 }
904
877 cdc_ncm_find_endpoints(dev, ctx->data); 905 cdc_ncm_find_endpoints(dev, ctx->data);
878 cdc_ncm_find_endpoints(dev, ctx->control); 906 cdc_ncm_find_endpoints(dev, ctx->control);
879 if (!dev->in || !dev->out || !dev->status) { 907 if (!dev->in || !dev->out || !dev->status) {
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 2680a65cd5e4..63f28908afda 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
80 * be at the end of the frame. 80 * be at the end of the frame.
81 */ 81 */
82 drvflags |= CDC_NCM_FLAG_NDP_TO_END; 82 drvflags |= CDC_NCM_FLAG_NDP_TO_END;
83
84 /* Additionally, it has been reported that some Huawei E3372H devices, with
85 * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence
86 * needing to be set to the NTB16 one again.
87 */
88 drvflags |= CDC_NCM_FLAG_RESET_NTB16;
83 ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags); 89 ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags);
84 if (ret) 90 if (ret)
85 goto err; 91 goto err;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 5833f7e2a127..b99a7fb09f8e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2367,9 +2367,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2367 /* Init LTM */ 2367 /* Init LTM */
2368 lan78xx_init_ltm(dev); 2368 lan78xx_init_ltm(dev);
2369 2369
2370 dev->net->hard_header_len += TX_OVERHEAD;
2371 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2372
2373 if (dev->udev->speed == USB_SPEED_SUPER) { 2370 if (dev->udev->speed == USB_SPEED_SUPER) {
2374 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE; 2371 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2375 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; 2372 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
@@ -2855,16 +2852,19 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2855 return ret; 2852 return ret;
2856 } 2853 }
2857 2854
2855 dev->net->hard_header_len += TX_OVERHEAD;
2856 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2857
2858 /* Init all registers */ 2858 /* Init all registers */
2859 ret = lan78xx_reset(dev); 2859 ret = lan78xx_reset(dev);
2860 2860
2861 lan78xx_mdio_init(dev); 2861 ret = lan78xx_mdio_init(dev);
2862 2862
2863 dev->net->flags |= IFF_MULTICAST; 2863 dev->net->flags |= IFF_MULTICAST;
2864 2864
2865 pdata->wol = WAKE_MAGIC; 2865 pdata->wol = WAKE_MAGIC;
2866 2866
2867 return 0; 2867 return ret;
2868} 2868}
2869 2869
2870static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) 2870static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
@@ -3525,11 +3525,11 @@ static int lan78xx_probe(struct usb_interface *intf,
3525 udev = interface_to_usbdev(intf); 3525 udev = interface_to_usbdev(intf);
3526 udev = usb_get_dev(udev); 3526 udev = usb_get_dev(udev);
3527 3527
3528 ret = -ENOMEM;
3529 netdev = alloc_etherdev(sizeof(struct lan78xx_net)); 3528 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3530 if (!netdev) { 3529 if (!netdev) {
3531 dev_err(&intf->dev, "Error: OOM\n"); 3530 dev_err(&intf->dev, "Error: OOM\n");
3532 goto out1; 3531 ret = -ENOMEM;
3532 goto out1;
3533 } 3533 }
3534 3534
3535 /* netdev_printk() needs this */ 3535 /* netdev_printk() needs this */
@@ -3610,7 +3610,7 @@ static int lan78xx_probe(struct usb_interface *intf,
3610 ret = register_netdev(netdev); 3610 ret = register_netdev(netdev);
3611 if (ret != 0) { 3611 if (ret != 0) {
3612 netif_err(dev, probe, netdev, "couldn't register the device\n"); 3612 netif_err(dev, probe, netdev, "couldn't register the device\n");
3613 goto out2; 3613 goto out3;
3614 } 3614 }
3615 3615
3616 usb_set_intfdata(intf, dev); 3616 usb_set_intfdata(intf, dev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5894e3c9468f..8c3733608271 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1175,6 +1175,7 @@ static const struct usb_device_id products[] = {
1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */ 1175 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1176 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1177 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1178 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1178 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1179 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1179 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1180 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1180 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ 1181 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -1340,10 +1341,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
1340static void qmi_wwan_disconnect(struct usb_interface *intf) 1341static void qmi_wwan_disconnect(struct usb_interface *intf)
1341{ 1342{
1342 struct usbnet *dev = usb_get_intfdata(intf); 1343 struct usbnet *dev = usb_get_intfdata(intf);
1343 struct qmi_wwan_state *info = (void *)&dev->data; 1344 struct qmi_wwan_state *info;
1344 struct list_head *iter; 1345 struct list_head *iter;
1345 struct net_device *ldev; 1346 struct net_device *ldev;
1346 1347
1348 /* called twice if separate control and data intf */
1349 if (!dev)
1350 return;
1351 info = (void *)&dev->data;
1347 if (info->flags & QMI_WWAN_FLAG_MUX) { 1352 if (info->flags & QMI_WWAN_FLAG_MUX) {
1348 if (!rtnl_trylock()) { 1353 if (!rtnl_trylock()) {
1349 restart_syscall(); 1354 restart_syscall();
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2dfca96a63b6..340c13484e5c 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -898,6 +898,7 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
898 .set_wol = smsc95xx_ethtool_set_wol, 898 .set_wol = smsc95xx_ethtool_set_wol,
899 .get_link_ksettings = smsc95xx_get_link_ksettings, 899 .get_link_ksettings = smsc95xx_get_link_ksettings,
900 .set_link_ksettings = smsc95xx_set_link_ksettings, 900 .set_link_ksettings = smsc95xx_set_link_ksettings,
901 .get_ts_info = ethtool_op_get_ts_info,
901}; 902};
902 903
903static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) 904static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 99a26a9efec1..98f17b05c68b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -889,21 +889,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
889 889
890 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; 890 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
891 buf += headroom; /* advance address leaving hole at front of pkt */ 891 buf += headroom; /* advance address leaving hole at front of pkt */
892 ctx = (void *)(unsigned long)len;
893 get_page(alloc_frag->page); 892 get_page(alloc_frag->page);
894 alloc_frag->offset += len + headroom; 893 alloc_frag->offset += len + headroom;
895 hole = alloc_frag->size - alloc_frag->offset; 894 hole = alloc_frag->size - alloc_frag->offset;
896 if (hole < len + headroom) { 895 if (hole < len + headroom) {
897 /* To avoid internal fragmentation, if there is very likely not 896 /* To avoid internal fragmentation, if there is very likely not
898 * enough space for another buffer, add the remaining space to 897 * enough space for another buffer, add the remaining space to
899 * the current buffer. This extra space is not included in 898 * the current buffer.
900 * the truesize stored in ctx.
901 */ 899 */
902 len += hole; 900 len += hole;
903 alloc_frag->offset += hole; 901 alloc_frag->offset += hole;
904 } 902 }
905 903
906 sg_init_one(rq->sg, buf, len); 904 sg_init_one(rq->sg, buf, len);
905 ctx = (void *)(unsigned long)len;
907 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); 906 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
908 if (err < 0) 907 if (err < 0)
909 put_page(virt_to_head_page(buf)); 908 put_page(virt_to_head_page(buf));
@@ -2743,9 +2742,9 @@ module_init(virtio_net_driver_init);
2743 2742
2744static __exit void virtio_net_driver_exit(void) 2743static __exit void virtio_net_driver_exit(void)
2745{ 2744{
2745 unregister_virtio_driver(&virtio_net_driver);
2746 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD); 2746 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2747 cpuhp_remove_multi_state(virtionet_online); 2747 cpuhp_remove_multi_state(virtionet_online);
2748 unregister_virtio_driver(&virtio_net_driver);
2749} 2748}
2750module_exit(virtio_net_driver_exit); 2749module_exit(virtio_net_driver_exit);
2751 2750
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index ba1c9f93592b..9c51b8be0038 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -311,7 +311,7 @@ struct vmxnet3_intr {
311 u8 num_intrs; /* # of intr vectors */ 311 u8 num_intrs; /* # of intr vectors */
312 u8 event_intr_idx; /* idx of the intr vector for event */ 312 u8 event_intr_idx; /* idx of the intr vector for event */
313 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ 313 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
314 char event_msi_vector_name[IFNAMSIZ+11]; 314 char event_msi_vector_name[IFNAMSIZ+17];
315#ifdef CONFIG_PCI_MSI 315#ifdef CONFIG_PCI_MSI
316 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; 316 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
317#endif 317#endif
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 96aa7e6cf214..e17baac70f43 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,6 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
623 623
624out: 624out:
625 skb_gro_remcsum_cleanup(skb, &grc); 625 skb_gro_remcsum_cleanup(skb, &grc);
626 skb->remcsum_offload = 0;
626 NAPI_GRO_CB(skb)->flush |= flush; 627 NAPI_GRO_CB(skb)->flush |= flush;
627 628
628 return pp; 629 return pp;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 2153e8062b4c..5cc3a07dda9e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -214,7 +214,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
214 214
215 /* Make sure there's enough writeable headroom */ 215 /* Make sure there's enough writeable headroom */
216 if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { 216 if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) {
217 head_delta = drvr->hdrlen - skb_headroom(skb); 217 head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0);
218 218
219 brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n", 219 brcmf_dbg(INFO, "%s: insufficient headroom (%d)\n",
220 brcmf_ifname(ifp), head_delta); 220 brcmf_ifname(ifp), head_delta);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index d21258d277ce..f1b60740e020 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -159,8 +159,10 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
159 159
160 brcmf_feat_firmware_capabilities(ifp); 160 brcmf_feat_firmware_capabilities(ifp);
161 memset(&gscan_cfg, 0, sizeof(gscan_cfg)); 161 memset(&gscan_cfg, 0, sizeof(gscan_cfg));
162 brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN, "pfn_gscan_cfg", 162 if (drvr->bus_if->chip != BRCM_CC_43430_CHIP_ID)
163 &gscan_cfg, sizeof(gscan_cfg)); 163 brcmf_feat_iovar_data_set(ifp, BRCMF_FEAT_GSCAN,
164 "pfn_gscan_cfg",
165 &gscan_cfg, sizeof(gscan_cfg));
164 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn"); 166 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
165 if (drvr->bus_if->wowl_supported) 167 if (drvr->bus_if->wowl_supported)
166 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); 168 brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index fbcbb4325936..f3556122c6ac 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -2053,12 +2053,13 @@ static int brcmf_sdio_txpkt_hdalign(struct brcmf_sdio *bus, struct sk_buff *pkt)
2053 atomic_inc(&stats->pktcow_failed); 2053 atomic_inc(&stats->pktcow_failed);
2054 return -ENOMEM; 2054 return -ENOMEM;
2055 } 2055 }
2056 head_pad = 0;
2056 } 2057 }
2057 skb_push(pkt, head_pad); 2058 skb_push(pkt, head_pad);
2058 dat_buf = (u8 *)(pkt->data); 2059 dat_buf = (u8 *)(pkt->data);
2059 } 2060 }
2060 memset(dat_buf, 0, head_pad + bus->tx_hdrlen); 2061 memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
2061 return 0; 2062 return head_pad;
2062} 2063}
2063 2064
2064/** 2065/**
@@ -4174,11 +4175,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
4174 goto fail; 4175 goto fail;
4175 } 4176 }
4176 4177
4177 /* allocate scatter-gather table. sg support
4178 * will be disabled upon allocation failure.
4179 */
4180 brcmf_sdiod_sgtable_alloc(bus->sdiodev);
4181
4182 /* Query the F2 block size, set roundup accordingly */ 4178 /* Query the F2 block size, set roundup accordingly */
4183 bus->blocksize = bus->sdiodev->func[2]->cur_blksize; 4179 bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
4184 bus->roundup = min(max_roundup, bus->blocksize); 4180 bus->roundup = min(max_roundup, bus->blocksize);
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index b4ecd1fe1374..97208ce19f92 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -154,7 +154,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
154const struct iwl_cfg iwl9160_2ac_cfg = { 154const struct iwl_cfg iwl9160_2ac_cfg = {
155 .name = "Intel(R) Dual Band Wireless AC 9160", 155 .name = "Intel(R) Dual Band Wireless AC 9160",
156 .fw_name_pre = IWL9260A_FW_PRE, 156 .fw_name_pre = IWL9260A_FW_PRE,
157 .fw_name_pre_next_step = IWL9260B_FW_PRE, 157 .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
158 IWL_DEVICE_9000, 158 IWL_DEVICE_9000,
159 .ht_params = &iwl9000_ht_params, 159 .ht_params = &iwl9000_ht_params,
160 .nvm_ver = IWL9000_NVM_VERSION, 160 .nvm_ver = IWL9000_NVM_VERSION,
@@ -165,7 +165,7 @@ const struct iwl_cfg iwl9160_2ac_cfg = {
165const struct iwl_cfg iwl9260_2ac_cfg = { 165const struct iwl_cfg iwl9260_2ac_cfg = {
166 .name = "Intel(R) Dual Band Wireless AC 9260", 166 .name = "Intel(R) Dual Band Wireless AC 9260",
167 .fw_name_pre = IWL9260A_FW_PRE, 167 .fw_name_pre = IWL9260A_FW_PRE,
168 .fw_name_pre_next_step = IWL9260B_FW_PRE, 168 .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
169 IWL_DEVICE_9000, 169 IWL_DEVICE_9000,
170 .ht_params = &iwl9000_ht_params, 170 .ht_params = &iwl9000_ht_params,
171 .nvm_ver = IWL9000_NVM_VERSION, 171 .nvm_ver = IWL9000_NVM_VERSION,
@@ -176,7 +176,7 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
176const struct iwl_cfg iwl9270_2ac_cfg = { 176const struct iwl_cfg iwl9270_2ac_cfg = {
177 .name = "Intel(R) Dual Band Wireless AC 9270", 177 .name = "Intel(R) Dual Band Wireless AC 9270",
178 .fw_name_pre = IWL9260A_FW_PRE, 178 .fw_name_pre = IWL9260A_FW_PRE,
179 .fw_name_pre_next_step = IWL9260B_FW_PRE, 179 .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
180 IWL_DEVICE_9000, 180 IWL_DEVICE_9000,
181 .ht_params = &iwl9000_ht_params, 181 .ht_params = &iwl9000_ht_params,
182 .nvm_ver = IWL9000_NVM_VERSION, 182 .nvm_ver = IWL9000_NVM_VERSION,
@@ -186,8 +186,8 @@ const struct iwl_cfg iwl9270_2ac_cfg = {
186 186
187const struct iwl_cfg iwl9460_2ac_cfg = { 187const struct iwl_cfg iwl9460_2ac_cfg = {
188 .name = "Intel(R) Dual Band Wireless AC 9460", 188 .name = "Intel(R) Dual Band Wireless AC 9460",
189 .fw_name_pre = IWL9000_FW_PRE, 189 .fw_name_pre = IWL9260A_FW_PRE,
190 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, 190 .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
191 IWL_DEVICE_9000, 191 IWL_DEVICE_9000,
192 .ht_params = &iwl9000_ht_params, 192 .ht_params = &iwl9000_ht_params,
193 .nvm_ver = IWL9000_NVM_VERSION, 193 .nvm_ver = IWL9000_NVM_VERSION,
@@ -198,8 +198,8 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
198 198
199const struct iwl_cfg iwl9560_2ac_cfg = { 199const struct iwl_cfg iwl9560_2ac_cfg = {
200 .name = "Intel(R) Dual Band Wireless AC 9560", 200 .name = "Intel(R) Dual Band Wireless AC 9560",
201 .fw_name_pre = IWL9000_FW_PRE, 201 .fw_name_pre = IWL9260A_FW_PRE,
202 .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, 202 .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
203 IWL_DEVICE_9000, 203 IWL_DEVICE_9000,
204 .ht_params = &iwl9000_ht_params, 204 .ht_params = &iwl9000_ht_params,
205 .nvm_ver = IWL9000_NVM_VERSION, 205 .nvm_ver = IWL9000_NVM_VERSION,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index adaa2f0097cc..fb40ddfced99 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -1189,11 +1189,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
1189 next_reclaimed; 1189 next_reclaimed;
1190 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", 1190 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1191 next_reclaimed); 1191 next_reclaimed);
1192 iwlagn_check_ratid_empty(priv, sta_id, tid);
1192 } 1193 }
1193 1194
1194 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); 1195 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1195 1196
1196 iwlagn_check_ratid_empty(priv, sta_id, tid);
1197 freed = 0; 1197 freed = 0;
1198 1198
1199 /* process frames */ 1199 /* process frames */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 0fa8c473f1e2..c73a6438ce8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -328,6 +328,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
328 * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger 328 * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
329 * command size (command version 4) that supports toggling ACK TX 329 * command size (command version 4) that supports toggling ACK TX
330 * power reduction. 330 * power reduction.
331 * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
331 * 332 *
332 * @NUM_IWL_UCODE_TLV_CAPA: number of bits used 333 * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
333 */ 334 */
@@ -373,6 +374,7 @@ enum iwl_ucode_tlv_capa {
373 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, 374 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
374 IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, 375 IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
375 IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, 376 IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84,
377 IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96,
376 378
377 NUM_IWL_UCODE_TLV_CAPA 379 NUM_IWL_UCODE_TLV_CAPA
378#ifdef __CHECKER__ 380#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index c52623cb7c2a..d19c74827fbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -276,10 +276,10 @@ struct iwl_pwr_tx_backoff {
276 * @fw_name_pre: Firmware filename prefix. The api version and extension 276 * @fw_name_pre: Firmware filename prefix. The api version and extension
277 * (.ucode) will be added to filename before loading from disk. The 277 * (.ucode) will be added to filename before loading from disk. The
278 * filename is constructed as fw_name_pre<api>.ucode. 278 * filename is constructed as fw_name_pre<api>.ucode.
279 * @fw_name_pre_next_step: same as @fw_name_pre, only for next step 279 * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
280 * (if supported) 280 * (if supported)
281 * @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next 281 * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
282 * step. Supported only in integrated solutions. 282 * next step. Supported only in integrated solutions.
283 * @ucode_api_max: Highest version of uCode API supported by driver. 283 * @ucode_api_max: Highest version of uCode API supported by driver.
284 * @ucode_api_min: Lowest version of uCode API supported by driver. 284 * @ucode_api_min: Lowest version of uCode API supported by driver.
285 * @max_inst_size: The maximal length of the fw inst section 285 * @max_inst_size: The maximal length of the fw inst section
@@ -330,7 +330,7 @@ struct iwl_cfg {
330 /* params specific to an individual device within a device family */ 330 /* params specific to an individual device within a device family */
331 const char *name; 331 const char *name;
332 const char *fw_name_pre; 332 const char *fw_name_pre;
333 const char *fw_name_pre_next_step; 333 const char *fw_name_pre_b_or_c_step;
334 const char *fw_name_pre_rf_next_step; 334 const char *fw_name_pre_rf_next_step;
335 /* params not likely to change within a device family */ 335 /* params not likely to change within a device family */
336 const struct iwl_base_params *base_params; 336 const struct iwl_base_params *base_params;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index 545d14b0bc92..f5c1127253cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -55,8 +55,8 @@ static inline bool iwl_trace_data(struct sk_buff *skb)
55 /* also account for the RFC 1042 header, of course */ 55 /* also account for the RFC 1042 header, of course */
56 offs += 6; 56 offs += 6;
57 57
58 return skb->len > offs + 2 && 58 return skb->len <= offs + 2 ||
59 *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE); 59 *(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE);
60} 60}
61 61
62static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, 62static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 6fdb5921e17f..4e0f86fe0a6f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -216,8 +216,9 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
216 const char *fw_pre_name; 216 const char *fw_pre_name;
217 217
218 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && 218 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
219 CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP) 219 (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
220 fw_pre_name = cfg->fw_name_pre_next_step; 220 CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
221 fw_pre_name = cfg->fw_name_pre_b_or_c_step;
221 else if (drv->trans->cfg->integrated && 222 else if (drv->trans->cfg->integrated &&
222 CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP && 223 CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
223 cfg->fw_name_pre_rf_next_step) 224 cfg->fw_name_pre_rf_next_step)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 5c08f4d40f6a..3ee6767392b6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -785,7 +785,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
785 int num_of_ch, __le32 *channels, u16 fw_mcc) 785 int num_of_ch, __le32 *channels, u16 fw_mcc)
786{ 786{
787 int ch_idx; 787 int ch_idx;
788 u16 ch_flags, prev_ch_flags = 0; 788 u16 ch_flags;
789 u32 reg_rule_flags, prev_reg_rule_flags = 0;
789 const u8 *nvm_chan = cfg->ext_nvm ? 790 const u8 *nvm_chan = cfg->ext_nvm ?
790 iwl_ext_nvm_channels : iwl_nvm_channels; 791 iwl_ext_nvm_channels : iwl_nvm_channels;
791 struct ieee80211_regdomain *regd; 792 struct ieee80211_regdomain *regd;
@@ -834,8 +835,11 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
834 continue; 835 continue;
835 } 836 }
836 837
838 reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
839 ch_flags, cfg);
840
837 /* we can't continue the same rule */ 841 /* we can't continue the same rule */
838 if (ch_idx == 0 || prev_ch_flags != ch_flags || 842 if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
839 center_freq - prev_center_freq > 20) { 843 center_freq - prev_center_freq > 20) {
840 valid_rules++; 844 valid_rules++;
841 new_rule = true; 845 new_rule = true;
@@ -854,18 +858,17 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
854 rule->power_rule.max_eirp = 858 rule->power_rule.max_eirp =
855 DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); 859 DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
856 860
857 rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, 861 rule->flags = reg_rule_flags;
858 ch_flags, cfg);
859 862
860 /* rely on auto-calculation to merge BW of contiguous chans */ 863 /* rely on auto-calculation to merge BW of contiguous chans */
861 rule->flags |= NL80211_RRF_AUTO_BW; 864 rule->flags |= NL80211_RRF_AUTO_BW;
862 rule->freq_range.max_bandwidth_khz = 0; 865 rule->freq_range.max_bandwidth_khz = 0;
863 866
864 prev_ch_flags = ch_flags;
865 prev_center_freq = center_freq; 867 prev_center_freq = center_freq;
868 prev_reg_rule_flags = reg_rule_flags;
866 869
867 IWL_DEBUG_DEV(dev, IWL_DL_LAR, 870 IWL_DEBUG_DEV(dev, IWL_DL_LAR,
868 "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", 871 "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x) reg_flags 0x%x: %s\n",
869 center_freq, 872 center_freq,
870 band == NL80211_BAND_5GHZ ? "5.2" : "2.4", 873 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
871 CHECK_AND_PRINT_I(VALID), 874 CHECK_AND_PRINT_I(VALID),
@@ -877,10 +880,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
877 CHECK_AND_PRINT_I(160MHZ), 880 CHECK_AND_PRINT_I(160MHZ),
878 CHECK_AND_PRINT_I(INDOOR_ONLY), 881 CHECK_AND_PRINT_I(INDOOR_ONLY),
879 CHECK_AND_PRINT_I(GO_CONCURRENT), 882 CHECK_AND_PRINT_I(GO_CONCURRENT),
880 ch_flags, 883 ch_flags, reg_rule_flags,
881 ((ch_flags & NVM_CHANNEL_ACTIVE) && 884 ((ch_flags & NVM_CHANNEL_ACTIVE) &&
882 !(ch_flags & NVM_CHANNEL_RADAR)) 885 !(ch_flags & NVM_CHANNEL_RADAR))
883 ? "" : "not "); 886 ? "Ad-Hoc" : "");
884 } 887 }
885 888
886 regd->n_reg_rules = valid_rules; 889 regd->n_reg_rules = valid_rules;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 79e7a7a285dc..82863e9273eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1275,8 +1275,10 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
1275 1275
1276 entry = &wifi_pkg->package.elements[idx++]; 1276 entry = &wifi_pkg->package.elements[idx++];
1277 if ((entry->type != ACPI_TYPE_INTEGER) || 1277 if ((entry->type != ACPI_TYPE_INTEGER) ||
1278 (entry->integer.value > U8_MAX)) 1278 (entry->integer.value > U8_MAX)) {
1279 return -EINVAL; 1279 ret = -EINVAL;
1280 goto out_free;
1281 }
1280 1282
1281 mvm->geo_profiles[i].values[j] = entry->integer.value; 1283 mvm->geo_profiles[i].values[j] = entry->integer.value;
1282 } 1284 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index bcde1ba0f1c8..ce901be5fba8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1084,7 +1084,13 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1084 1084
1085 lockdep_assert_held(&mvm->mutex); 1085 lockdep_assert_held(&mvm->mutex);
1086 1086
1087 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1087 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
1088 /*
1089 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
1090 * so later code will - from now on - see that we're doing it.
1091 */
1092 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1093 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1088 /* Clean up some internal and mac80211 state on restart */ 1094 /* Clean up some internal and mac80211 state on restart */
1089 iwl_mvm_restart_cleanup(mvm); 1095 iwl_mvm_restart_cleanup(mvm);
1090 } else { 1096 } else {
@@ -2591,8 +2597,18 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2591 spin_lock_bh(&mvm_sta->lock); 2597 spin_lock_bh(&mvm_sta->lock);
2592 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 2598 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2593 tid_data = &mvm_sta->tid_data[i]; 2599 tid_data = &mvm_sta->tid_data[i];
2594 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) 2600
2601 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
2602 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2603
2604 /*
2605 * The first deferred frame should've stopped the MAC
2606 * queues, so we should never get a second deferred
2607 * frame for the RA/TID.
2608 */
2609 iwl_mvm_start_mac_queues(mvm, info->hw_queue);
2595 ieee80211_free_txskb(mvm->hw, skb); 2610 ieee80211_free_txskb(mvm->hw, skb);
2611 }
2596 } 2612 }
2597 spin_unlock_bh(&mvm_sta->lock); 2613 spin_unlock_bh(&mvm_sta->lock);
2598} 2614}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index eaacfaf37206..ddd8719f27b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1090,6 +1090,7 @@ struct iwl_mvm {
1090 * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted 1090 * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
1091 * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active 1091 * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
1092 * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running 1092 * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
1093 * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
1093 * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active 1094 * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
1094 * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 1095 * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
1095 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running 1096 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
@@ -1101,6 +1102,7 @@ enum iwl_mvm_status {
1101 IWL_MVM_STATUS_HW_RFKILL, 1102 IWL_MVM_STATUS_HW_RFKILL,
1102 IWL_MVM_STATUS_HW_CTKILL, 1103 IWL_MVM_STATUS_HW_CTKILL,
1103 IWL_MVM_STATUS_ROC_RUNNING, 1104 IWL_MVM_STATUS_ROC_RUNNING,
1105 IWL_MVM_STATUS_HW_RESTART_REQUESTED,
1104 IWL_MVM_STATUS_IN_HW_RESTART, 1106 IWL_MVM_STATUS_IN_HW_RESTART,
1105 IWL_MVM_STATUS_IN_D0I3, 1107 IWL_MVM_STATUS_IN_D0I3,
1106 IWL_MVM_STATUS_ROC_AUX_RUNNING, 1108 IWL_MVM_STATUS_ROC_AUX_RUNNING,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 4d1188b8736a..9c175d5e9d67 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1235,9 +1235,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
1235 */ 1235 */
1236 if (!mvm->fw_restart && fw_error) { 1236 if (!mvm->fw_restart && fw_error) {
1237 iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 1237 iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
1238 NULL); 1238 NULL);
1239 } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, 1239 } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1240 &mvm->status)) {
1241 struct iwl_mvm_reprobe *reprobe; 1240 struct iwl_mvm_reprobe *reprobe;
1242 1241
1243 IWL_ERR(mvm, 1242 IWL_ERR(mvm,
@@ -1268,6 +1267,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
1268 1267
1269 if (fw_error && mvm->fw_restart > 0) 1268 if (fw_error && mvm->fw_restart > 0)
1270 mvm->fw_restart--; 1269 mvm->fw_restart--;
1270 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1271 ieee80211_restart_hw(mvm->hw); 1271 ieee80211_restart_hw(mvm->hw);
1272 } 1272 }
1273} 1273}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 65beca3a457a..8999a1199d60 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1291,7 +1291,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1291 * first index into rate scale table. 1291 * first index into rate scale table.
1292 */ 1292 */
1293 if (info->flags & IEEE80211_TX_STAT_AMPDU) { 1293 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
1294 rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index, 1294 rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
1295 info->status.ampdu_len, 1295 info->status.ampdu_len,
1296 info->status.ampdu_ack_len, 1296 info->status.ampdu_ack_len,
1297 reduced_txp); 1297 reduced_txp);
@@ -1312,7 +1312,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1312 if (info->status.ampdu_ack_len == 0) 1312 if (info->status.ampdu_ack_len == 0)
1313 info->status.ampdu_len = 1; 1313 info->status.ampdu_len = 1;
1314 1314
1315 rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index, 1315 rs_collect_tlc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
1316 info->status.ampdu_len, 1316 info->status.ampdu_len,
1317 info->status.ampdu_ack_len); 1317 info->status.ampdu_ack_len);
1318 1318
@@ -1348,11 +1348,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1348 continue; 1348 continue;
1349 1349
1350 rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, 1350 rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
1351 lq_rate.index, 1, 1351 tx_resp_rate.index, 1,
1352 i < retries ? 0 : legacy_success, 1352 i < retries ? 0 : legacy_success,
1353 reduced_txp); 1353 reduced_txp);
1354 rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, 1354 rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
1355 lq_rate.index, 1, 1355 tx_resp_rate.index, 1,
1356 i < retries ? 0 : legacy_success); 1356 i < retries ? 0 : legacy_success);
1357 } 1357 }
1358 1358
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index f3e608196369..71c8b800ffa9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -636,9 +636,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
636 636
637 baid_data = rcu_dereference(mvm->baid_map[baid]); 637 baid_data = rcu_dereference(mvm->baid_map[baid]);
638 if (!baid_data) { 638 if (!baid_data) {
639 WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN), 639 IWL_DEBUG_RX(mvm,
640 "Received baid %d, but no data exists for this BAID\n", 640 "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
641 baid); 641 baid, reorder);
642 return false; 642 return false;
643 } 643 }
644 644
@@ -759,7 +759,9 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
759 759
760 data = rcu_dereference(mvm->baid_map[baid]); 760 data = rcu_dereference(mvm->baid_map[baid]);
761 if (!data) { 761 if (!data) {
762 WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN)); 762 IWL_DEBUG_RX(mvm,
763 "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
764 baid, reorder_data);
763 goto out; 765 goto out;
764 } 766 }
765 767
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 4df5f13fcdae..027ee5e72172 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -121,7 +121,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 121 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
122 .add_modify = update ? 1 : 0, 122 .add_modify = update ? 1 : 0,
123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | 123 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
124 STA_FLG_MIMO_EN_MSK), 124 STA_FLG_MIMO_EN_MSK |
125 STA_FLG_RTS_MIMO_PROT),
125 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), 126 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
126 }; 127 };
127 int ret; 128 int ret;
@@ -277,9 +278,21 @@ static void iwl_mvm_rx_agg_session_expired(unsigned long data)
277 278
278 /* Timer expired */ 279 /* Timer expired */
279 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); 280 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
281
282 /*
283 * sta should be valid unless the following happens:
284 * The firmware asserts which triggers a reconfig flow, but
285 * the reconfig fails before we set the pointer to sta into
286 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
287 * A-MDPU and hence the timer continues to run. Then, the
288 * timer expires and sta is NULL.
289 */
290 if (!sta)
291 goto unlock;
292
280 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 293 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
281 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, 294 ieee80211_rx_ba_timer_expired(mvm_sta->vif,
282 sta->addr, ba_data->tid); 295 sta->addr, ba_data->tid);
283unlock: 296unlock:
284 rcu_read_unlock(); 297 rcu_read_unlock();
285} 298}
@@ -2015,7 +2028,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2015 IWL_MAX_TID_COUNT, 2028 IWL_MAX_TID_COUNT,
2016 wdg_timeout); 2029 wdg_timeout);
2017 2030
2018 if (vif->type == NL80211_IFTYPE_AP) 2031 if (vif->type == NL80211_IFTYPE_AP ||
2032 vif->type == NL80211_IFTYPE_ADHOC)
2019 mvm->probe_queue = queue; 2033 mvm->probe_queue = queue;
2020 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 2034 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2021 mvm->p2p_dev_queue = queue; 2035 mvm->p2p_dev_queue = queue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 60360ed73f26..5fcc9dd6be56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -185,8 +185,14 @@ static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
185 else 185 else
186 udp_hdr(skb)->check = 0; 186 udp_hdr(skb)->check = 0;
187 187
188 /* mac header len should include IV, size is in words */ 188 /*
189 if (info->control.hw_key) 189 * mac header len should include IV, size is in words unless
190 * the IV is added by the firmware like in WEP.
191 * In new Tx API, the IV is always added by the firmware.
192 */
193 if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
194 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
195 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
190 mh_len += info->control.hw_key->iv_len; 196 mh_len += info->control.hw_key->iv_len;
191 mh_len /= 2; 197 mh_len /= 2;
192 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; 198 offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
@@ -1815,6 +1821,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1815 struct iwl_mvm_tid_data *tid_data; 1821 struct iwl_mvm_tid_data *tid_data;
1816 struct iwl_mvm_sta *mvmsta; 1822 struct iwl_mvm_sta *mvmsta;
1817 1823
1824 ba_info.flags = IEEE80211_TX_STAT_AMPDU;
1825
1818 if (iwl_mvm_has_new_tx_api(mvm)) { 1826 if (iwl_mvm_has_new_tx_api(mvm)) {
1819 struct iwl_mvm_compressed_ba_notif *ba_res = 1827 struct iwl_mvm_compressed_ba_notif *ba_res =
1820 (void *)pkt->data; 1828 (void *)pkt->data;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index f16c1bb9bf94..84f4ba01e14f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -510,9 +510,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
510 510
511/* 9000 Series */ 511/* 9000 Series */
512 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, 512 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
513 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
514 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
513 {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)}, 515 {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
514 {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, 516 {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
517 {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
518 {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
519 {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
520 {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
521 {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
515 {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, 522 {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
523 {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
516 {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, 524 {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
517 {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, 525 {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
518 {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, 526 {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
@@ -527,10 +535,22 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
527 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, 535 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
528 {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, 536 {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
529 {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, 537 {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
538 {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
539 {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
540 {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
541 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
542 {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
543 {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
544 {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
530 {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, 545 {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
531 {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, 546 {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
532 {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, 547 {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
533 {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, 548 {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
549 {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
550 {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
551 {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
552 {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
553 {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
534 {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, 554 {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
535 {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, 555 {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
536 {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, 556 {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 92b3a55d0fbc..f95eec52508e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3150,7 +3150,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3150 init_waitqueue_head(&trans_pcie->d0i3_waitq); 3150 init_waitqueue_head(&trans_pcie->d0i3_waitq);
3151 3151
3152 if (trans_pcie->msix_enabled) { 3152 if (trans_pcie->msix_enabled) {
3153 if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) 3153 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3154 if (ret)
3154 goto out_no_pci; 3155 goto out_no_pci;
3155 } else { 3156 } else {
3156 ret = iwl_pcie_alloc_ict(trans); 3157 ret = iwl_pcie_alloc_ict(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index de50418adae5..034bdb4a0b06 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -298,6 +298,9 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
298 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { 298 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
299 struct iwl_txq *txq = trans_pcie->txq[i]; 299 struct iwl_txq *txq = trans_pcie->txq[i];
300 300
301 if (!test_bit(i, trans_pcie->queue_used))
302 continue;
303
301 spin_lock_bh(&txq->lock); 304 spin_lock_bh(&txq->lock);
302 if (txq->need_update) { 305 if (txq->need_update) {
303 iwl_pcie_txq_inc_wr_ptr(trans, txq); 306 iwl_pcie_txq_inc_wr_ptr(trans, txq);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 6e2e760d98b1..0b75def39c6c 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -5704,7 +5704,7 @@ static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
5704 5704
5705static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev) 5705static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
5706{ 5706{
5707 const u8 glrt_table[] = { 5707 static const u8 glrt_table[] = {
5708 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */ 5708 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
5709 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */ 5709 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
5710 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */ 5710 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 2a7ad5ffe997..cd5dc6dcb19f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -846,9 +846,6 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
846 return false; 846 return false;
847 } 847 }
848 848
849 if (rtlpriv->cfg->ops->get_btc_status())
850 rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
851
852 bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); 849 bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
853 rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); 850 rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
854 851
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index fb1ebb01133f..70723e67b7d7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2547,7 +2547,6 @@ struct bt_coexist_info {
2547struct rtl_btc_ops { 2547struct rtl_btc_ops {
2548 void (*btc_init_variables) (struct rtl_priv *rtlpriv); 2548 void (*btc_init_variables) (struct rtl_priv *rtlpriv);
2549 void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); 2549 void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv);
2550 void (*btc_power_on_setting)(struct rtl_priv *rtlpriv);
2551 void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); 2550 void (*btc_init_hw_config) (struct rtl_priv *rtlpriv);
2552 void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); 2551 void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type);
2553 void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); 2552 void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 9a03c5871efe..f58d8e305323 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -924,10 +924,8 @@ out1:
924 ntb_free_mw(nt, i); 924 ntb_free_mw(nt, i);
925 925
926 /* if there's an actual failure, we should just bail */ 926 /* if there's an actual failure, we should just bail */
927 if (rc < 0) { 927 if (rc < 0)
928 ntb_link_disable(ndev);
929 return; 928 return;
930 }
931 929
932out: 930out:
933 if (ntb_link_is_up(ndev, NULL, NULL) == 1) 931 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
@@ -1059,7 +1057,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1059 int node; 1057 int node;
1060 int rc, i; 1058 int rc, i;
1061 1059
1062 mw_count = ntb_mw_count(ndev, PIDX); 1060 mw_count = ntb_peer_mw_count(ndev);
1063 1061
1064 if (!ndev->ops->mw_set_trans) { 1062 if (!ndev->ops->mw_set_trans) {
1065 dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); 1063 dev_err(&ndev->dev, "Inbound MW based NTB API is required\n");
diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c
index f002bf48a08d..a69815c45ce6 100644
--- a/drivers/ntb/test/ntb_tool.c
+++ b/drivers/ntb/test/ntb_tool.c
@@ -959,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb)
959 tc->ntb = ntb; 959 tc->ntb = ntb;
960 init_waitqueue_head(&tc->link_wq); 960 init_waitqueue_head(&tc->link_wq);
961 961
962 tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS); 962 tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS);
963 for (i = 0; i < tc->mw_count; i++) { 963 for (i = 0; i < tc->mw_count; i++) {
964 rc = tool_init_mw(tc, i); 964 rc = tool_init_mw(tc, i);
965 if (rc) 965 if (rc)
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7cd99b1f8596..75bc08c6838c 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -421,14 +421,15 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num)
421static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) 421static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
422{ 422{
423 const unsigned int sector_size = 512; 423 const unsigned int sector_size = 512;
424 sector_t start_sector; 424 sector_t start_sector, end_sector;
425 u64 num_sectors; 425 u64 num_sectors;
426 u32 rem; 426 u32 rem;
427 427
428 start_sector = div_u64(ns_offset, sector_size); 428 start_sector = div_u64(ns_offset, sector_size);
429 num_sectors = div_u64_rem(len, sector_size, &rem); 429 end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
430 if (rem) 430 if (rem)
431 num_sectors++; 431 end_sector++;
432 num_sectors = end_sector - start_sector;
432 433
433 if (unlikely(num_sectors > (u64)INT_MAX)) { 434 if (unlikely(num_sectors > (u64)INT_MAX)) {
434 u64 remaining = num_sectors; 435 u64 remaining = num_sectors;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cb96f4a7ae3a..37046ac2c441 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl,
336 336
337 c.directive.opcode = nvme_admin_directive_recv; 337 c.directive.opcode = nvme_admin_directive_recv;
338 c.directive.nsid = cpu_to_le32(nsid); 338 c.directive.nsid = cpu_to_le32(nsid);
339 c.directive.numd = sizeof(*s); 339 c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1);
340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; 340 c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM;
341 c.directive.dtype = NVME_DIR_STREAMS; 341 c.directive.dtype = NVME_DIR_STREAMS;
342 342
@@ -1509,7 +1509,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
1509 blk_queue_write_cache(q, vwc, vwc); 1509 blk_queue_write_cache(q, vwc, vwc);
1510} 1510}
1511 1511
1512static void nvme_configure_apst(struct nvme_ctrl *ctrl) 1512static int nvme_configure_apst(struct nvme_ctrl *ctrl)
1513{ 1513{
1514 /* 1514 /*
1515 * APST (Autonomous Power State Transition) lets us program a 1515 * APST (Autonomous Power State Transition) lets us program a
@@ -1538,16 +1538,16 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1538 * then don't do anything. 1538 * then don't do anything.
1539 */ 1539 */
1540 if (!ctrl->apsta) 1540 if (!ctrl->apsta)
1541 return; 1541 return 0;
1542 1542
1543 if (ctrl->npss > 31) { 1543 if (ctrl->npss > 31) {
1544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); 1544 dev_warn(ctrl->device, "NPSS is invalid; not using APST\n");
1545 return; 1545 return 0;
1546 } 1546 }
1547 1547
1548 table = kzalloc(sizeof(*table), GFP_KERNEL); 1548 table = kzalloc(sizeof(*table), GFP_KERNEL);
1549 if (!table) 1549 if (!table)
1550 return; 1550 return 0;
1551 1551
1552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { 1552 if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) {
1553 /* Turn off APST. */ 1553 /* Turn off APST. */
@@ -1629,6 +1629,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
1629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); 1629 dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret);
1630 1630
1631 kfree(table); 1631 kfree(table);
1632 return ret;
1632} 1633}
1633 1634
1634static void nvme_set_latency_tolerance(struct device *dev, s32 val) 1635static void nvme_set_latency_tolerance(struct device *dev, s32 val)
@@ -1835,13 +1836,16 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1835 * In fabrics we need to verify the cntlid matches the 1836 * In fabrics we need to verify the cntlid matches the
1836 * admin connect 1837 * admin connect
1837 */ 1838 */
1838 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) 1839 if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
1839 ret = -EINVAL; 1840 ret = -EINVAL;
1841 goto out_free;
1842 }
1840 1843
1841 if (!ctrl->opts->discovery_nqn && !ctrl->kas) { 1844 if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
1842 dev_err(ctrl->device, 1845 dev_err(ctrl->device,
1843 "keep-alive support is mandatory for fabrics\n"); 1846 "keep-alive support is mandatory for fabrics\n");
1844 ret = -EINVAL; 1847 ret = -EINVAL;
1848 goto out_free;
1845 } 1849 }
1846 } else { 1850 } else {
1847 ctrl->cntlid = le16_to_cpu(id->cntlid); 1851 ctrl->cntlid = le16_to_cpu(id->cntlid);
@@ -1856,11 +1860,20 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
1856 else if (!ctrl->apst_enabled && prev_apst_enabled) 1860 else if (!ctrl->apst_enabled && prev_apst_enabled)
1857 dev_pm_qos_hide_latency_tolerance(ctrl->device); 1861 dev_pm_qos_hide_latency_tolerance(ctrl->device);
1858 1862
1859 nvme_configure_apst(ctrl); 1863 ret = nvme_configure_apst(ctrl);
1860 nvme_configure_directives(ctrl); 1864 if (ret < 0)
1865 return ret;
1866
1867 ret = nvme_configure_directives(ctrl);
1868 if (ret < 0)
1869 return ret;
1861 1870
1862 ctrl->identified = true; 1871 ctrl->identified = true;
1863 1872
1873 return 0;
1874
1875out_free:
1876 kfree(id);
1864 return ret; 1877 return ret;
1865} 1878}
1866EXPORT_SYMBOL_GPL(nvme_init_identify); 1879EXPORT_SYMBOL_GPL(nvme_init_identify);
@@ -1995,15 +2008,20 @@ static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
1995 int serial_len = sizeof(ctrl->serial); 2008 int serial_len = sizeof(ctrl->serial);
1996 int model_len = sizeof(ctrl->model); 2009 int model_len = sizeof(ctrl->model);
1997 2010
2011 if (!uuid_is_null(&ns->uuid))
2012 return sprintf(buf, "uuid.%pU\n", &ns->uuid);
2013
1998 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) 2014 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
1999 return sprintf(buf, "eui.%16phN\n", ns->nguid); 2015 return sprintf(buf, "eui.%16phN\n", ns->nguid);
2000 2016
2001 if (memchr_inv(ns->eui, 0, sizeof(ns->eui))) 2017 if (memchr_inv(ns->eui, 0, sizeof(ns->eui)))
2002 return sprintf(buf, "eui.%8phN\n", ns->eui); 2018 return sprintf(buf, "eui.%8phN\n", ns->eui);
2003 2019
2004 while (ctrl->serial[serial_len - 1] == ' ') 2020 while (serial_len > 0 && (ctrl->serial[serial_len - 1] == ' ' ||
2021 ctrl->serial[serial_len - 1] == '\0'))
2005 serial_len--; 2022 serial_len--;
2006 while (ctrl->model[model_len - 1] == ' ') 2023 while (model_len > 0 && (ctrl->model[model_len - 1] == ' ' ||
2024 ctrl->model[model_len - 1] == '\0'))
2007 model_len--; 2025 model_len--;
2008 2026
2009 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid, 2027 return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl->vid,
@@ -2709,7 +2727,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
2709 mutex_lock(&ctrl->namespaces_mutex); 2727 mutex_lock(&ctrl->namespaces_mutex);
2710 2728
2711 /* Forcibly unquiesce queues to avoid blocking dispatch */ 2729 /* Forcibly unquiesce queues to avoid blocking dispatch */
2712 blk_mq_unquiesce_queue(ctrl->admin_q); 2730 if (ctrl->admin_q)
2731 blk_mq_unquiesce_queue(ctrl->admin_q);
2713 2732
2714 list_for_each_entry(ns, &ctrl->namespaces, list) { 2733 list_for_each_entry(ns, &ctrl->namespaces, list) {
2715 /* 2734 /*
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 2e582a240943..5f5cd306f76d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -794,7 +794,8 @@ static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
794 int i; 794 int i;
795 795
796 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) { 796 for (i = 0; i < ARRAY_SIZE(opt_tokens); i++) {
797 if (opt_tokens[i].token & ~allowed_opts) { 797 if ((opt_tokens[i].token & opts->mask) &&
798 (opt_tokens[i].token & ~allowed_opts)) {
798 pr_warn("invalid parameter '%s'\n", 799 pr_warn("invalid parameter '%s'\n",
799 opt_tokens[i].pattern); 800 opt_tokens[i].pattern);
800 } 801 }
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d666ada39a9b..5c2a08ef08ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1888,7 +1888,7 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1888 * the target device is present 1888 * the target device is present
1889 */ 1889 */
1890 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1890 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1891 return BLK_STS_IOERR; 1891 goto busy;
1892 1892
1893 if (!nvme_fc_ctrl_get(ctrl)) 1893 if (!nvme_fc_ctrl_get(ctrl))
1894 return BLK_STS_IOERR; 1894 return BLK_STS_IOERR;
@@ -1958,22 +1958,25 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1958 queue->lldd_handle, &op->fcp_req); 1958 queue->lldd_handle, &op->fcp_req);
1959 1959
1960 if (ret) { 1960 if (ret) {
1961 if (op->rq) /* normal request */ 1961 if (!(op->flags & FCOP_FLAGS_AEN))
1962 nvme_fc_unmap_data(ctrl, op->rq, op); 1962 nvme_fc_unmap_data(ctrl, op->rq, op);
1963 /* else - aen. no cleanup needed */
1964 1963
1965 nvme_fc_ctrl_put(ctrl); 1964 nvme_fc_ctrl_put(ctrl);
1966 1965
1967 if (ret != -EBUSY) 1966 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
1967 ret != -EBUSY)
1968 return BLK_STS_IOERR; 1968 return BLK_STS_IOERR;
1969 1969
1970 if (op->rq) 1970 goto busy;
1971 blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1972
1973 return BLK_STS_RESOURCE;
1974 } 1971 }
1975 1972
1976 return BLK_STS_OK; 1973 return BLK_STS_OK;
1974
1975busy:
1976 if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
1977 blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1978
1979 return BLK_STS_RESOURCE;
1977} 1980}
1978 1981
1979static blk_status_t 1982static blk_status_t
@@ -2802,66 +2805,70 @@ out_fail:
2802 return ERR_PTR(ret); 2805 return ERR_PTR(ret);
2803} 2806}
2804 2807
2805enum {
2806 FCT_TRADDR_ERR = 0,
2807 FCT_TRADDR_WWNN = 1 << 0,
2808 FCT_TRADDR_WWPN = 1 << 1,
2809};
2810 2808
2811struct nvmet_fc_traddr { 2809struct nvmet_fc_traddr {
2812 u64 nn; 2810 u64 nn;
2813 u64 pn; 2811 u64 pn;
2814}; 2812};
2815 2813
2816static const match_table_t traddr_opt_tokens = {
2817 { FCT_TRADDR_WWNN, "nn-%s" },
2818 { FCT_TRADDR_WWPN, "pn-%s" },
2819 { FCT_TRADDR_ERR, NULL }
2820};
2821
2822static int 2814static int
2823nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf) 2815__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2824{ 2816{
2825 substring_t args[MAX_OPT_ARGS];
2826 char *options, *o, *p;
2827 int token, ret = 0;
2828 u64 token64; 2817 u64 token64;
2829 2818
2830 options = o = kstrdup(buf, GFP_KERNEL); 2819 if (match_u64(sstr, &token64))
2831 if (!options) 2820 return -EINVAL;
2832 return -ENOMEM; 2821 *val = token64;
2833 2822
2834 while ((p = strsep(&o, ":\n")) != NULL) { 2823 return 0;
2835 if (!*p) 2824}
2836 continue;
2837 2825
2838 token = match_token(p, traddr_opt_tokens, args); 2826/*
2839 switch (token) { 2827 * This routine validates and extracts the WWN's from the TRADDR string.
2840 case FCT_TRADDR_WWNN: 2828 * As kernel parsers need the 0x to determine number base, universally
2841 if (match_u64(args, &token64)) { 2829 * build string to parse with 0x prefix before parsing name strings.
2842 ret = -EINVAL; 2830 */
2843 goto out; 2831static int
2844 } 2832nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2845 traddr->nn = token64; 2833{
2846 break; 2834 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2847 case FCT_TRADDR_WWPN: 2835 substring_t wwn = { name, &name[sizeof(name)-1] };
2848 if (match_u64(args, &token64)) { 2836 int nnoffset, pnoffset;
2849 ret = -EINVAL; 2837
2850 goto out; 2838 /* validate it string one of the 2 allowed formats */
2851 } 2839 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2852 traddr->pn = token64; 2840 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2853 break; 2841 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2854 default: 2842 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2855 pr_warn("unknown traddr token or missing value '%s'\n", 2843 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2856 p); 2844 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2857 ret = -EINVAL; 2845 NVME_FC_TRADDR_OXNNLEN;
2858 goto out; 2846 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2859 } 2847 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2860 } 2848 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2849 "pn-", NVME_FC_TRADDR_NNLEN))) {
2850 nnoffset = NVME_FC_TRADDR_NNLEN;
2851 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2852 } else
2853 goto out_einval;
2861 2854
2862out: 2855 name[0] = '0';
2863 kfree(options); 2856 name[1] = 'x';
2864 return ret; 2857 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2858
2859 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2860 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2861 goto out_einval;
2862
2863 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2864 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2865 goto out_einval;
2866
2867 return 0;
2868
2869out_einval:
2870 pr_warn("%s: bad traddr string\n", __func__);
2871 return -EINVAL;
2865} 2872}
2866 2873
2867static struct nvme_ctrl * 2874static struct nvme_ctrl *
@@ -2875,11 +2882,11 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2875 unsigned long flags; 2882 unsigned long flags;
2876 int ret; 2883 int ret;
2877 2884
2878 ret = nvme_fc_parse_address(&raddr, opts->traddr); 2885 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
2879 if (ret || !raddr.nn || !raddr.pn) 2886 if (ret || !raddr.nn || !raddr.pn)
2880 return ERR_PTR(-EINVAL); 2887 return ERR_PTR(-EINVAL);
2881 2888
2882 ret = nvme_fc_parse_address(&laddr, opts->host_traddr); 2889 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
2883 if (ret || !laddr.nn || !laddr.pn) 2890 if (ret || !laddr.nn || !laddr.pn)
2884 return ERR_PTR(-EINVAL); 2891 return ERR_PTR(-EINVAL);
2885 2892
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d10d2f279d19..925467b31a33 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -539,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
539} 539}
540#endif 540#endif
541 541
542static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) 542static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
543{ 543{
544 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 544 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
545 struct dma_pool *pool; 545 struct dma_pool *pool;
@@ -556,7 +556,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
556 556
557 length -= (page_size - offset); 557 length -= (page_size - offset);
558 if (length <= 0) 558 if (length <= 0)
559 return true; 559 return BLK_STS_OK;
560 560
561 dma_len -= (page_size - offset); 561 dma_len -= (page_size - offset);
562 if (dma_len) { 562 if (dma_len) {
@@ -569,7 +569,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
569 569
570 if (length <= page_size) { 570 if (length <= page_size) {
571 iod->first_dma = dma_addr; 571 iod->first_dma = dma_addr;
572 return true; 572 return BLK_STS_OK;
573 } 573 }
574 574
575 nprps = DIV_ROUND_UP(length, page_size); 575 nprps = DIV_ROUND_UP(length, page_size);
@@ -585,7 +585,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
585 if (!prp_list) { 585 if (!prp_list) {
586 iod->first_dma = dma_addr; 586 iod->first_dma = dma_addr;
587 iod->npages = -1; 587 iod->npages = -1;
588 return false; 588 return BLK_STS_RESOURCE;
589 } 589 }
590 list[0] = prp_list; 590 list[0] = prp_list;
591 iod->first_dma = prp_dma; 591 iod->first_dma = prp_dma;
@@ -595,7 +595,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
595 __le64 *old_prp_list = prp_list; 595 __le64 *old_prp_list = prp_list;
596 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 596 prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
597 if (!prp_list) 597 if (!prp_list)
598 return false; 598 return BLK_STS_RESOURCE;
599 list[iod->npages++] = prp_list; 599 list[iod->npages++] = prp_list;
600 prp_list[0] = old_prp_list[i - 1]; 600 prp_list[0] = old_prp_list[i - 1];
601 old_prp_list[i - 1] = cpu_to_le64(prp_dma); 601 old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -609,13 +609,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
609 break; 609 break;
610 if (dma_len > 0) 610 if (dma_len > 0)
611 continue; 611 continue;
612 BUG_ON(dma_len < 0); 612 if (unlikely(dma_len < 0))
613 goto bad_sgl;
613 sg = sg_next(sg); 614 sg = sg_next(sg);
614 dma_addr = sg_dma_address(sg); 615 dma_addr = sg_dma_address(sg);
615 dma_len = sg_dma_len(sg); 616 dma_len = sg_dma_len(sg);
616 } 617 }
617 618
618 return true; 619 return BLK_STS_OK;
620
621 bad_sgl:
622 if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n",
623 blk_rq_payload_bytes(req), iod->nents)) {
624 for_each_sg(iod->sg, sg, iod->nents, i) {
625 dma_addr_t phys = sg_phys(sg);
626 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
627 "dma_address:%pad dma_length:%d\n", i, &phys,
628 sg->offset, sg->length,
629 &sg_dma_address(sg),
630 sg_dma_len(sg));
631 }
632 }
633 return BLK_STS_IOERR;
634
619} 635}
620 636
621static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 637static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -637,7 +653,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
637 DMA_ATTR_NO_WARN)) 653 DMA_ATTR_NO_WARN))
638 goto out; 654 goto out;
639 655
640 if (!nvme_setup_prps(dev, req)) 656 ret = nvme_setup_prps(dev, req);
657 if (ret != BLK_STS_OK)
641 goto out_unmap; 658 goto out_unmap;
642 659
643 ret = BLK_STS_IOERR; 660 ret = BLK_STS_IOERR;
@@ -784,6 +801,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
784 return; 801 return;
785 } 802 }
786 803
804 nvmeq->cqe_seen = 1;
787 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id); 805 req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
788 nvme_end_request(req, cqe->status, cqe->result); 806 nvme_end_request(req, cqe->status, cqe->result);
789} 807}
@@ -813,10 +831,8 @@ static void nvme_process_cq(struct nvme_queue *nvmeq)
813 consumed++; 831 consumed++;
814 } 832 }
815 833
816 if (consumed) { 834 if (consumed)
817 nvme_ring_cq_doorbell(nvmeq); 835 nvme_ring_cq_doorbell(nvmeq);
818 nvmeq->cqe_seen = 1;
819 }
820} 836}
821 837
822static irqreturn_t nvme_irq(int irq, void *data) 838static irqreturn_t nvme_irq(int irq, void *data)
@@ -1541,11 +1557,9 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
1541 if (dev->cmb) { 1557 if (dev->cmb) {
1542 iounmap(dev->cmb); 1558 iounmap(dev->cmb);
1543 dev->cmb = NULL; 1559 dev->cmb = NULL;
1544 if (dev->cmbsz) { 1560 sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
1545 sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1561 &dev_attr_cmb.attr, NULL);
1546 &dev_attr_cmb.attr, NULL); 1562 dev->cmbsz = 0;
1547 dev->cmbsz = 0;
1548 }
1549 } 1563 }
1550} 1564}
1551 1565
@@ -1602,7 +1616,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
1602static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 1616static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
1603{ 1617{
1604 struct nvme_host_mem_buf_desc *descs; 1618 struct nvme_host_mem_buf_desc *descs;
1605 u32 chunk_size, max_entries; 1619 u32 chunk_size, max_entries, len;
1606 int i = 0; 1620 int i = 0;
1607 void **bufs; 1621 void **bufs;
1608 u64 size = 0, tmp; 1622 u64 size = 0, tmp;
@@ -1621,10 +1635,10 @@ retry:
1621 if (!bufs) 1635 if (!bufs)
1622 goto out_free_descs; 1636 goto out_free_descs;
1623 1637
1624 for (size = 0; size < preferred; size += chunk_size) { 1638 for (size = 0; size < preferred; size += len) {
1625 u32 len = min_t(u64, chunk_size, preferred - size);
1626 dma_addr_t dma_addr; 1639 dma_addr_t dma_addr;
1627 1640
1641 len = min_t(u64, chunk_size, preferred - size);
1628 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 1642 bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
1629 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 1643 DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
1630 if (!bufs[i]) 1644 if (!bufs[i])
@@ -1936,16 +1950,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
1936 1950
1937 /* 1951 /*
1938 * CMBs can currently only exist on >=1.2 PCIe devices. We only 1952 * CMBs can currently only exist on >=1.2 PCIe devices. We only
1939 * populate sysfs if a CMB is implemented. Note that we add the 1953 * populate sysfs if a CMB is implemented. Since nvme_dev_attrs_group
1940 * CMB attribute to the nvme_ctrl kobj which removes the need to remove 1954 * has no name we can pass NULL as final argument to
1941 * it on exit. Since nvme_dev_attrs_group has no name we can pass 1955 * sysfs_add_file_to_group.
1942 * NULL as final argument to sysfs_add_file_to_group.
1943 */ 1956 */
1944 1957
1945 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { 1958 if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
1946 dev->cmb = nvme_map_cmb(dev); 1959 dev->cmb = nvme_map_cmb(dev);
1947 1960 if (dev->cmb) {
1948 if (dev->cmbsz) {
1949 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1961 if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
1950 &dev_attr_cmb.attr, NULL)) 1962 &dev_attr_cmb.attr, NULL))
1951 dev_warn(dev->ctrl.device, 1963 dev_warn(dev->ctrl.device,
@@ -2282,7 +2294,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2282 2294
2283 result = nvme_dev_map(dev); 2295 result = nvme_dev_map(dev);
2284 if (result) 2296 if (result)
2285 goto free; 2297 goto put_pci;
2286 2298
2287 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 2299 INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
2288 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 2300 INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
@@ -2291,7 +2303,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2291 2303
2292 result = nvme_setup_prp_pools(dev); 2304 result = nvme_setup_prp_pools(dev);
2293 if (result) 2305 if (result)
2294 goto put_pci; 2306 goto unmap;
2295 2307
2296 quirks |= check_dell_samsung_bug(pdev); 2308 quirks |= check_dell_samsung_bug(pdev);
2297 2309
@@ -2308,9 +2320,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2308 2320
2309 release_pools: 2321 release_pools:
2310 nvme_release_prp_pools(dev); 2322 nvme_release_prp_pools(dev);
2323 unmap:
2324 nvme_dev_unmap(dev);
2311 put_pci: 2325 put_pci:
2312 put_device(dev->dev); 2326 put_device(dev->dev);
2313 nvme_dev_unmap(dev);
2314 free: 2327 free:
2315 kfree(dev->queues); 2328 kfree(dev->queues);
2316 kfree(dev); 2329 kfree(dev);
@@ -2466,6 +2479,9 @@ static const struct pci_device_id nvme_id_table[] = {
2466 { PCI_VDEVICE(INTEL, 0x0a54), 2479 { PCI_VDEVICE(INTEL, 0x0a54),
2467 .driver_data = NVME_QUIRK_STRIPE_SIZE | 2480 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2468 NVME_QUIRK_DEALLOCATE_ZEROES, }, 2481 NVME_QUIRK_DEALLOCATE_ZEROES, },
2482 { PCI_VDEVICE(INTEL, 0x0a55),
2483 .driver_data = NVME_QUIRK_STRIPE_SIZE |
2484 NVME_QUIRK_DEALLOCATE_ZEROES, },
2469 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 2485 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
2470 .driver_data = NVME_QUIRK_NO_DEEPEST_PS }, 2486 .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
2471 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2487 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 35f930db3c02..a53bb6635b83 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -168,11 +168,21 @@ out:
168 nvmet_req_complete(req, status); 168 nvmet_req_complete(req, status);
169} 169}
170 170
171static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len)
172{
173 int len = min(src_len, dst_len);
174
175 memcpy(dst, src, len);
176 if (dst_len > len)
177 memset(dst + len, ' ', dst_len - len);
178}
179
171static void nvmet_execute_identify_ctrl(struct nvmet_req *req) 180static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
172{ 181{
173 struct nvmet_ctrl *ctrl = req->sq->ctrl; 182 struct nvmet_ctrl *ctrl = req->sq->ctrl;
174 struct nvme_id_ctrl *id; 183 struct nvme_id_ctrl *id;
175 u16 status = 0; 184 u16 status = 0;
185 const char model[] = "Linux";
176 186
177 id = kzalloc(sizeof(*id), GFP_KERNEL); 187 id = kzalloc(sizeof(*id), GFP_KERNEL);
178 if (!id) { 188 if (!id) {
@@ -184,14 +194,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
184 id->vid = 0; 194 id->vid = 0;
185 id->ssvid = 0; 195 id->ssvid = 0;
186 196
187 memset(id->sn, ' ', sizeof(id->sn)); 197 bin2hex(id->sn, &ctrl->subsys->serial,
188 snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); 198 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
189 199 copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1);
190 memset(id->mn, ' ', sizeof(id->mn)); 200 copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE));
191 strncpy((char *)id->mn, "Linux", sizeof(id->mn));
192
193 memset(id->fr, ' ', sizeof(id->fr));
194 strncpy((char *)id->fr, UTS_RELEASE, sizeof(id->fr));
195 201
196 id->rab = 6; 202 id->rab = 6;
197 203
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index a358ecd93e11..0a0067e771f5 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -650,7 +650,7 @@ out_unlock:
650 650
651CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); 651CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
652 652
653static ssize_t nvmet_subsys_version_show(struct config_item *item, 653static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
654 char *page) 654 char *page)
655{ 655{
656 struct nvmet_subsys *subsys = to_subsys(item); 656 struct nvmet_subsys *subsys = to_subsys(item);
@@ -666,7 +666,7 @@ static ssize_t nvmet_subsys_version_show(struct config_item *item,
666 (int)NVME_MINOR(subsys->ver)); 666 (int)NVME_MINOR(subsys->ver));
667} 667}
668 668
669static ssize_t nvmet_subsys_version_store(struct config_item *item, 669static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
670 const char *page, size_t count) 670 const char *page, size_t count)
671{ 671{
672 struct nvmet_subsys *subsys = to_subsys(item); 672 struct nvmet_subsys *subsys = to_subsys(item);
@@ -684,11 +684,33 @@ static ssize_t nvmet_subsys_version_store(struct config_item *item,
684 684
685 return count; 685 return count;
686} 686}
687CONFIGFS_ATTR(nvmet_subsys_, version); 687CONFIGFS_ATTR(nvmet_subsys_, attr_version);
688
689static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
690 char *page)
691{
692 struct nvmet_subsys *subsys = to_subsys(item);
693
694 return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial);
695}
696
697static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
698 const char *page, size_t count)
699{
700 struct nvmet_subsys *subsys = to_subsys(item);
701
702 down_write(&nvmet_config_sem);
703 sscanf(page, "%llx\n", &subsys->serial);
704 up_write(&nvmet_config_sem);
705
706 return count;
707}
708CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
688 709
689static struct configfs_attribute *nvmet_subsys_attrs[] = { 710static struct configfs_attribute *nvmet_subsys_attrs[] = {
690 &nvmet_subsys_attr_attr_allow_any_host, 711 &nvmet_subsys_attr_attr_allow_any_host,
691 &nvmet_subsys_attr_version, 712 &nvmet_subsys_attr_attr_version,
713 &nvmet_subsys_attr_attr_serial,
692 NULL, 714 NULL,
693}; 715};
694 716
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b5b4ac103748..f4b02bb4a1a8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -767,9 +767,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
767 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); 767 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
768 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); 768 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
769 769
770 /* generate a random serial number as our controllers are ephemeral: */
771 get_random_bytes(&ctrl->serial, sizeof(ctrl->serial));
772
773 kref_init(&ctrl->ref); 770 kref_init(&ctrl->ref);
774 ctrl->subsys = subsys; 771 ctrl->subsys = subsys;
775 772
@@ -928,6 +925,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
928 return NULL; 925 return NULL;
929 926
930 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */ 927 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
928 /* generate a random serial number as our controllers are ephemeral: */
929 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
931 930
932 switch (type) { 931 switch (type) {
933 case NVME_NQN_NVME: 932 case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1e6dcc241b3c..309c84aa7595 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -114,6 +114,11 @@ struct nvmet_fc_tgtport {
114 struct kref ref; 114 struct kref ref;
115}; 115};
116 116
117struct nvmet_fc_defer_fcp_req {
118 struct list_head req_list;
119 struct nvmefc_tgt_fcp_req *fcp_req;
120};
121
117struct nvmet_fc_tgt_queue { 122struct nvmet_fc_tgt_queue {
118 bool ninetypercent; 123 bool ninetypercent;
119 u16 qid; 124 u16 qid;
@@ -132,6 +137,8 @@ struct nvmet_fc_tgt_queue {
132 struct nvmet_fc_tgt_assoc *assoc; 137 struct nvmet_fc_tgt_assoc *assoc;
133 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */ 138 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
134 struct list_head fod_list; 139 struct list_head fod_list;
140 struct list_head pending_cmd_list;
141 struct list_head avail_defer_list;
135 struct workqueue_struct *work_q; 142 struct workqueue_struct *work_q;
136 struct kref ref; 143 struct kref ref;
137} __aligned(sizeof(unsigned long long)); 144} __aligned(sizeof(unsigned long long));
@@ -223,6 +230,8 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
223static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); 230static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
224static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); 231static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
225static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); 232static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
233static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
234 struct nvmet_fc_fcp_iod *fod);
226 235
227 236
228/* *********************** FC-NVME DMA Handling **************************** */ 237/* *********************** FC-NVME DMA Handling **************************** */
@@ -385,7 +394,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
385static struct nvmet_fc_ls_iod * 394static struct nvmet_fc_ls_iod *
386nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport) 395nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
387{ 396{
388 static struct nvmet_fc_ls_iod *iod; 397 struct nvmet_fc_ls_iod *iod;
389 unsigned long flags; 398 unsigned long flags;
390 399
391 spin_lock_irqsave(&tgtport->lock, flags); 400 spin_lock_irqsave(&tgtport->lock, flags);
@@ -462,10 +471,10 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
462static struct nvmet_fc_fcp_iod * 471static struct nvmet_fc_fcp_iod *
463nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue) 472nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
464{ 473{
465 static struct nvmet_fc_fcp_iod *fod; 474 struct nvmet_fc_fcp_iod *fod;
466 unsigned long flags; 475
476 lockdep_assert_held(&queue->qlock);
467 477
468 spin_lock_irqsave(&queue->qlock, flags);
469 fod = list_first_entry_or_null(&queue->fod_list, 478 fod = list_first_entry_or_null(&queue->fod_list,
470 struct nvmet_fc_fcp_iod, fcp_list); 479 struct nvmet_fc_fcp_iod, fcp_list);
471 if (fod) { 480 if (fod) {
@@ -477,17 +486,37 @@ nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
477 * will "inherit" that reference. 486 * will "inherit" that reference.
478 */ 487 */
479 } 488 }
480 spin_unlock_irqrestore(&queue->qlock, flags);
481 return fod; 489 return fod;
482} 490}
483 491
484 492
485static void 493static void
494nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
495 struct nvmet_fc_tgt_queue *queue,
496 struct nvmefc_tgt_fcp_req *fcpreq)
497{
498 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
499
500 /*
501 * put all admin cmds on hw queue id 0. All io commands go to
502 * the respective hw queue based on a modulo basis
503 */
504 fcpreq->hwqid = queue->qid ?
505 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
506
507 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
508 queue_work_on(queue->cpu, queue->work_q, &fod->work);
509 else
510 nvmet_fc_handle_fcp_rqst(tgtport, fod);
511}
512
513static void
486nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, 514nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
487 struct nvmet_fc_fcp_iod *fod) 515 struct nvmet_fc_fcp_iod *fod)
488{ 516{
489 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 517 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
490 struct nvmet_fc_tgtport *tgtport = fod->tgtport; 518 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
519 struct nvmet_fc_defer_fcp_req *deferfcp;
491 unsigned long flags; 520 unsigned long flags;
492 521
493 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, 522 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
@@ -495,21 +524,56 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
495 524
496 fcpreq->nvmet_fc_private = NULL; 525 fcpreq->nvmet_fc_private = NULL;
497 526
498 spin_lock_irqsave(&queue->qlock, flags);
499 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
500 fod->active = false; 527 fod->active = false;
501 fod->abort = false; 528 fod->abort = false;
502 fod->aborted = false; 529 fod->aborted = false;
503 fod->writedataactive = false; 530 fod->writedataactive = false;
504 fod->fcpreq = NULL; 531 fod->fcpreq = NULL;
532
533 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
534
535 spin_lock_irqsave(&queue->qlock, flags);
536 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
537 struct nvmet_fc_defer_fcp_req, req_list);
538 if (!deferfcp) {
539 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
540 spin_unlock_irqrestore(&queue->qlock, flags);
541
542 /* Release reference taken at queue lookup and fod allocation */
543 nvmet_fc_tgt_q_put(queue);
544 return;
545 }
546
547 /* Re-use the fod for the next pending cmd that was deferred */
548 list_del(&deferfcp->req_list);
549
550 fcpreq = deferfcp->fcp_req;
551
552 /* deferfcp can be reused for another IO at a later date */
553 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
554
505 spin_unlock_irqrestore(&queue->qlock, flags); 555 spin_unlock_irqrestore(&queue->qlock, flags);
506 556
557 /* Save NVME CMD IO in fod */
558 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
559
560 /* Setup new fcpreq to be processed */
561 fcpreq->rspaddr = NULL;
562 fcpreq->rsplen = 0;
563 fcpreq->nvmet_fc_private = fod;
564 fod->fcpreq = fcpreq;
565 fod->active = true;
566
567 /* inform LLDD IO is now being processed */
568 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
569
570 /* Submit deferred IO for processing */
571 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
572
507 /* 573 /*
508 * release the reference taken at queue lookup and fod allocation 574 * Leave the queue lookup get reference taken when
575 * fod was originally allocated.
509 */ 576 */
510 nvmet_fc_tgt_q_put(queue);
511
512 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
513} 577}
514 578
515static int 579static int
@@ -569,6 +633,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
569 queue->port = assoc->tgtport->port; 633 queue->port = assoc->tgtport->port;
570 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); 634 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
571 INIT_LIST_HEAD(&queue->fod_list); 635 INIT_LIST_HEAD(&queue->fod_list);
636 INIT_LIST_HEAD(&queue->avail_defer_list);
637 INIT_LIST_HEAD(&queue->pending_cmd_list);
572 atomic_set(&queue->connected, 0); 638 atomic_set(&queue->connected, 0);
573 atomic_set(&queue->sqtail, 0); 639 atomic_set(&queue->sqtail, 0);
574 atomic_set(&queue->rsn, 1); 640 atomic_set(&queue->rsn, 1);
@@ -638,6 +704,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
638{ 704{
639 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; 705 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
640 struct nvmet_fc_fcp_iod *fod = queue->fod; 706 struct nvmet_fc_fcp_iod *fod = queue->fod;
707 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
641 unsigned long flags; 708 unsigned long flags;
642 int i, writedataactive; 709 int i, writedataactive;
643 bool disconnect; 710 bool disconnect;
@@ -666,6 +733,36 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
666 } 733 }
667 } 734 }
668 } 735 }
736
737 /* Cleanup defer'ed IOs in queue */
738 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
739 req_list) {
740 list_del(&deferfcp->req_list);
741 kfree(deferfcp);
742 }
743
744 for (;;) {
745 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
746 struct nvmet_fc_defer_fcp_req, req_list);
747 if (!deferfcp)
748 break;
749
750 list_del(&deferfcp->req_list);
751 spin_unlock_irqrestore(&queue->qlock, flags);
752
753 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
754 deferfcp->fcp_req);
755
756 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
757 deferfcp->fcp_req);
758
759 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
760 deferfcp->fcp_req);
761
762 kfree(deferfcp);
763
764 spin_lock_irqsave(&queue->qlock, flags);
765 }
669 spin_unlock_irqrestore(&queue->qlock, flags); 766 spin_unlock_irqrestore(&queue->qlock, flags);
670 767
671 flush_workqueue(queue->work_q); 768 flush_workqueue(queue->work_q);
@@ -1174,14 +1271,14 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1174 */ 1271 */
1175 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) 1272 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1176 ret = VERR_CR_ASSOC_LEN; 1273 ret = VERR_CR_ASSOC_LEN;
1177 else if (rqst->desc_list_len < 1274 else if (be32_to_cpu(rqst->desc_list_len) <
1178 cpu_to_be32(FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)) 1275 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1179 ret = VERR_CR_ASSOC_RQST_LEN; 1276 ret = VERR_CR_ASSOC_RQST_LEN;
1180 else if (rqst->assoc_cmd.desc_tag != 1277 else if (rqst->assoc_cmd.desc_tag !=
1181 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) 1278 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1182 ret = VERR_CR_ASSOC_CMD; 1279 ret = VERR_CR_ASSOC_CMD;
1183 else if (rqst->assoc_cmd.desc_len < 1280 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1184 cpu_to_be32(FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)) 1281 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1185 ret = VERR_CR_ASSOC_CMD_LEN; 1282 ret = VERR_CR_ASSOC_CMD_LEN;
1186 else if (!rqst->assoc_cmd.ersp_ratio || 1283 else if (!rqst->assoc_cmd.ersp_ratio ||
1187 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= 1284 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
@@ -2172,11 +2269,38 @@ nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2172 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc 2269 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2173 * layer for processing. 2270 * layer for processing.
2174 * 2271 *
2175 * The nvmet-fc layer will copy cmd payload to an internal structure for 2272 * The nvmet_fc layer allocates a local job structure (struct
2176 * processing. As such, upon completion of the routine, the LLDD may 2273 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2177 * immediately free/reuse the CMD IU buffer passed in the call. 2274 * CMD IU buffer to the job structure. As such, on a successful
2275 * completion (returns 0), the LLDD may immediately free/reuse
2276 * the CMD IU buffer passed in the call.
2277 *
2278 * However, in some circumstances, due to the packetized nature of FC
2279 * and the api of the FC LLDD which may issue a hw command to send the
2280 * response, but the LLDD may not get the hw completion for that command
2281 * and upcall the nvmet_fc layer before a new command may be
2282 * asynchronously received - its possible for a command to be received
2283 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2284 * the appearance of more commands received than fits in the sq.
2285 * To alleviate this scenario, a temporary queue is maintained in the
2286 * transport for pending LLDD requests waiting for a queue job structure.
2287 * In these "overrun" cases, a temporary queue element is allocated
2288 * the LLDD request and CMD iu buffer information remembered, and the
2289 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2290 * structure is freed, it is immediately reallocated for anything on the
2291 * pending request list. The LLDDs defer_rcv() callback is called,
2292 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2293 * is then started normally with the transport.
2178 * 2294 *
2179 * If this routine returns error, the lldd should abort the exchange. 2295 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2296 * the completion as successful but must not reuse the CMD IU buffer
2297 * until the LLDD's defer_rcv() callback has been called for the
2298 * corresponding struct nvmefc_tgt_fcp_req pointer.
2299 *
2300 * If there is any other condition in which an error occurs, the
2301 * transport will return a non-zero status indicating the error.
2302 * In all cases other than -EOVERFLOW, the transport has not accepted the
2303 * request and the LLDD should abort the exchange.
2180 * 2304 *
2181 * @target_port: pointer to the (registered) target port the FCP CMD IU 2305 * @target_port: pointer to the (registered) target port the FCP CMD IU
2182 * was received on. 2306 * was received on.
@@ -2194,6 +2318,8 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2194 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf; 2318 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2195 struct nvmet_fc_tgt_queue *queue; 2319 struct nvmet_fc_tgt_queue *queue;
2196 struct nvmet_fc_fcp_iod *fod; 2320 struct nvmet_fc_fcp_iod *fod;
2321 struct nvmet_fc_defer_fcp_req *deferfcp;
2322 unsigned long flags;
2197 2323
2198 /* validate iu, so the connection id can be used to find the queue */ 2324 /* validate iu, so the connection id can be used to find the queue */
2199 if ((cmdiubuf_len != sizeof(*cmdiu)) || 2325 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
@@ -2214,29 +2340,60 @@ nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2214 * when the fod is freed. 2340 * when the fod is freed.
2215 */ 2341 */
2216 2342
2343 spin_lock_irqsave(&queue->qlock, flags);
2344
2217 fod = nvmet_fc_alloc_fcp_iod(queue); 2345 fod = nvmet_fc_alloc_fcp_iod(queue);
2218 if (!fod) { 2346 if (fod) {
2347 spin_unlock_irqrestore(&queue->qlock, flags);
2348
2349 fcpreq->nvmet_fc_private = fod;
2350 fod->fcpreq = fcpreq;
2351
2352 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2353
2354 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2355
2356 return 0;
2357 }
2358
2359 if (!tgtport->ops->defer_rcv) {
2360 spin_unlock_irqrestore(&queue->qlock, flags);
2219 /* release the queue lookup reference */ 2361 /* release the queue lookup reference */
2220 nvmet_fc_tgt_q_put(queue); 2362 nvmet_fc_tgt_q_put(queue);
2221 return -ENOENT; 2363 return -ENOENT;
2222 } 2364 }
2223 2365
2224 fcpreq->nvmet_fc_private = fod; 2366 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2225 fod->fcpreq = fcpreq; 2367 struct nvmet_fc_defer_fcp_req, req_list);
2226 /* 2368 if (deferfcp) {
2227 * put all admin cmds on hw queue id 0. All io commands go to 2369 /* Just re-use one that was previously allocated */
2228 * the respective hw queue based on a modulo basis 2370 list_del(&deferfcp->req_list);
2229 */ 2371 } else {
2230 fcpreq->hwqid = queue->qid ? 2372 spin_unlock_irqrestore(&queue->qlock, flags);
2231 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2232 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2233 2373
2234 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR) 2374 /* Now we need to dynamically allocate one */
2235 queue_work_on(queue->cpu, queue->work_q, &fod->work); 2375 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2236 else 2376 if (!deferfcp) {
2237 nvmet_fc_handle_fcp_rqst(tgtport, fod); 2377 /* release the queue lookup reference */
2378 nvmet_fc_tgt_q_put(queue);
2379 return -ENOMEM;
2380 }
2381 spin_lock_irqsave(&queue->qlock, flags);
2382 }
2238 2383
2239 return 0; 2384 /* For now, use rspaddr / rsplen to save payload information */
2385 fcpreq->rspaddr = cmdiubuf;
2386 fcpreq->rsplen = cmdiubuf_len;
2387 deferfcp->fcp_req = fcpreq;
2388
2389 /* defer processing till a fod becomes available */
2390 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2391
2392 /* NOTE: the queue lookup reference is still valid */
2393
2394 spin_unlock_irqrestore(&queue->qlock, flags);
2395
2396 return -EOVERFLOW;
2240} 2397}
2241EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req); 2398EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2242 2399
@@ -2293,66 +2450,70 @@ nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2293} 2450}
2294EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort); 2451EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2295 2452
2296enum {
2297 FCT_TRADDR_ERR = 0,
2298 FCT_TRADDR_WWNN = 1 << 0,
2299 FCT_TRADDR_WWPN = 1 << 1,
2300};
2301 2453
2302struct nvmet_fc_traddr { 2454struct nvmet_fc_traddr {
2303 u64 nn; 2455 u64 nn;
2304 u64 pn; 2456 u64 pn;
2305}; 2457};
2306 2458
2307static const match_table_t traddr_opt_tokens = {
2308 { FCT_TRADDR_WWNN, "nn-%s" },
2309 { FCT_TRADDR_WWPN, "pn-%s" },
2310 { FCT_TRADDR_ERR, NULL }
2311};
2312
2313static int 2459static int
2314nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf) 2460__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2315{ 2461{
2316 substring_t args[MAX_OPT_ARGS];
2317 char *options, *o, *p;
2318 int token, ret = 0;
2319 u64 token64; 2462 u64 token64;
2320 2463
2321 options = o = kstrdup(buf, GFP_KERNEL); 2464 if (match_u64(sstr, &token64))
2322 if (!options) 2465 return -EINVAL;
2323 return -ENOMEM; 2466 *val = token64;
2324 2467
2325 while ((p = strsep(&o, ":\n")) != NULL) { 2468 return 0;
2326 if (!*p) 2469}
2327 continue;
2328 2470
2329 token = match_token(p, traddr_opt_tokens, args); 2471/*
2330 switch (token) { 2472 * This routine validates and extracts the WWN's from the TRADDR string.
2331 case FCT_TRADDR_WWNN: 2473 * As kernel parsers need the 0x to determine number base, universally
2332 if (match_u64(args, &token64)) { 2474 * build string to parse with 0x prefix before parsing name strings.
2333 ret = -EINVAL; 2475 */
2334 goto out; 2476static int
2335 } 2477nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2336 traddr->nn = token64; 2478{
2337 break; 2479 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2338 case FCT_TRADDR_WWPN: 2480 substring_t wwn = { name, &name[sizeof(name)-1] };
2339 if (match_u64(args, &token64)) { 2481 int nnoffset, pnoffset;
2340 ret = -EINVAL; 2482
2341 goto out; 2483 /* validate it string one of the 2 allowed formats */
2342 } 2484 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2343 traddr->pn = token64; 2485 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2344 break; 2486 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2345 default: 2487 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2346 pr_warn("unknown traddr token or missing value '%s'\n", 2488 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2347 p); 2489 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2348 ret = -EINVAL; 2490 NVME_FC_TRADDR_OXNNLEN;
2349 goto out; 2491 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2350 } 2492 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2351 } 2493 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2494 "pn-", NVME_FC_TRADDR_NNLEN))) {
2495 nnoffset = NVME_FC_TRADDR_NNLEN;
2496 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2497 } else
2498 goto out_einval;
2499
2500 name[0] = '0';
2501 name[1] = 'x';
2502 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2503
2504 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2505 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2506 goto out_einval;
2507
2508 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2509 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2510 goto out_einval;
2352 2511
2353out: 2512 return 0;
2354 kfree(options); 2513
2355 return ret; 2514out_einval:
2515 pr_warn("%s: bad traddr string\n", __func__);
2516 return -EINVAL;
2356} 2517}
2357 2518
2358static int 2519static int
@@ -2370,7 +2531,8 @@ nvmet_fc_add_port(struct nvmet_port *port)
2370 2531
2371 /* map the traddr address info to a target port */ 2532 /* map the traddr address info to a target port */
2372 2533
2373 ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr); 2534 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2535 sizeof(port->disc_addr.traddr));
2374 if (ret) 2536 if (ret)
2375 return ret; 2537 return ret;
2376 2538
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 747bbdb4f9c6..e3b244c7e443 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -112,7 +112,6 @@ struct nvmet_ctrl {
112 112
113 struct mutex lock; 113 struct mutex lock;
114 u64 cap; 114 u64 cap;
115 u64 serial;
116 u32 cc; 115 u32 cc;
117 u32 csts; 116 u32 csts;
118 117
@@ -152,6 +151,7 @@ struct nvmet_subsys {
152 u16 max_qid; 151 u16 max_qid;
153 152
154 u64 ver; 153 u64 ver;
154 u64 serial;
155 char *subsysnqn; 155 char *subsysnqn;
156 156
157 struct config_group group; 157 struct config_group group;
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index a0d4ede9b8fc..63e3eb55f3ac 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -170,7 +170,7 @@ static const struct of_device_id rockchip_efuse_match[] = {
170 .data = (void *)&rockchip_rk3288_efuse_read, 170 .data = (void *)&rockchip_rk3288_efuse_read,
171 }, 171 },
172 { 172 {
173 .compatible = "rockchip,rk322x-efuse", 173 .compatible = "rockchip,rk3228-efuse",
174 .data = (void *)&rockchip_rk3288_efuse_read, 174 .data = (void *)&rockchip_rk3288_efuse_read,
175 }, 175 },
176 { 176 {
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 28c38c756f92..e0a28ea341fe 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -89,6 +89,7 @@ int of_dma_configure(struct device *dev, struct device_node *np)
89 bool coherent; 89 bool coherent;
90 unsigned long offset; 90 unsigned long offset;
91 const struct iommu_ops *iommu; 91 const struct iommu_ops *iommu;
92 u64 mask;
92 93
93 /* 94 /*
94 * Set default coherent_dma_mask to 32 bit. Drivers are expected to 95 * Set default coherent_dma_mask to 32 bit. Drivers are expected to
@@ -134,10 +135,9 @@ int of_dma_configure(struct device *dev, struct device_node *np)
134 * Limit coherent and dma mask based on size and default mask 135 * Limit coherent and dma mask based on size and default mask
135 * set by the driver. 136 * set by the driver.
136 */ 137 */
137 dev->coherent_dma_mask = min(dev->coherent_dma_mask, 138 mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
138 DMA_BIT_MASK(ilog2(dma_addr + size))); 139 dev->coherent_dma_mask &= mask;
139 *dev->dma_mask = min((*dev->dma_mask), 140 *dev->dma_mask &= mask;
140 DMA_BIT_MASK(ilog2(dma_addr + size)));
141 141
142 coherent = of_dma_is_coherent(np); 142 coherent = of_dma_is_coherent(np);
143 dev_dbg(dev, "device is%sdma coherent\n", 143 dev_dbg(dev, "device is%sdma coherent\n",
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6ce72aa65425..ab21c846eb27 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -476,7 +476,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
476 int i; 476 int i;
477 477
478 for (i = 0; i < nr_irqs; i++, res++) 478 for (i = 0; i < nr_irqs; i++, res++)
479 if (!of_irq_to_resource(dev, i, res)) 479 if (of_irq_to_resource(dev, i, res) <= 0)
480 break; 480 break;
481 481
482 return i; 482 return i;
diff --git a/drivers/of/property.c b/drivers/of/property.c
index eda50b4be934..067f9fab7b77 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -708,6 +708,15 @@ struct device_node *of_graph_get_port_parent(struct device_node *node)
708{ 708{
709 unsigned int depth; 709 unsigned int depth;
710 710
711 if (!node)
712 return NULL;
713
714 /*
715 * Preserve usecount for passed in node as of_get_next_parent()
716 * will do of_node_put() on it.
717 */
718 of_node_get(node);
719
711 /* Walk 3 levels up only if there is 'ports' node. */ 720 /* Walk 3 levels up only if there is 'ports' node. */
712 for (depth = 3; depth && node; depth--) { 721 for (depth = 3; depth && node; depth--) {
713 node = of_get_next_parent(node); 722 node = of_get_next_parent(node);
@@ -728,12 +737,16 @@ EXPORT_SYMBOL(of_graph_get_port_parent);
728struct device_node *of_graph_get_remote_port_parent( 737struct device_node *of_graph_get_remote_port_parent(
729 const struct device_node *node) 738 const struct device_node *node)
730{ 739{
731 struct device_node *np; 740 struct device_node *np, *pp;
732 741
733 /* Get remote endpoint node. */ 742 /* Get remote endpoint node. */
734 np = of_graph_get_remote_endpoint(node); 743 np = of_graph_get_remote_endpoint(node);
735 744
736 return of_graph_get_port_parent(np); 745 pp = of_graph_get_port_parent(np);
746
747 of_node_put(np);
748
749 return pp;
737} 750}
738EXPORT_SYMBOL(of_graph_get_remote_port_parent); 751EXPORT_SYMBOL(of_graph_get_remote_port_parent);
739 752
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 5c63b920b471..ed92c1254cff 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -956,7 +956,7 @@ static int __init dino_probe(struct parisc_device *dev)
956 956
957 dino_dev->hba.dev = dev; 957 dino_dev->hba.dev = dev;
958 dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096); 958 dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
959 dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */ 959 dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
960 spin_lock_init(&dino_dev->dinosaur_pen); 960 spin_lock_init(&dino_dev->dinosaur_pen);
961 dino_dev->hba.iommu = ccio_get_iommu(dev); 961 dino_dev->hba.iommu = ccio_get_iommu(dev);
962 962
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 055f83fddc18..b1ff46fe4547 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -333,11 +333,11 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
333 333
334 /* Update the symlink to the real device */ 334 /* Update the symlink to the real device */
335 sysfs_remove_link(&entry->kobj, "device"); 335 sysfs_remove_link(&entry->kobj, "device");
336 write_unlock(&entry->rw_lock);
337
336 ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device"); 338 ret = sysfs_create_link(&entry->kobj, &entry->dev->kobj, "device");
337 WARN_ON(ret); 339 WARN_ON(ret);
338 340
339 write_unlock(&entry->rw_lock);
340
341 printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n", 341 printk(KERN_INFO PDCS_PREFIX ": changed \"%s\" path to \"%s\"\n",
342 entry->name, buf); 342 entry->name, buf);
343 343
@@ -954,7 +954,7 @@ static struct attribute *pdcs_subsys_attrs[] = {
954 NULL, 954 NULL,
955}; 955};
956 956
957static struct attribute_group pdcs_attr_group = { 957static const struct attribute_group pdcs_attr_group = {
958 .attrs = pdcs_subsys_attrs, 958 .attrs = pdcs_subsys_attrs,
959}; 959};
960 960
@@ -998,6 +998,7 @@ pdcs_register_pathentries(void)
998 /* kobject is now registered */ 998 /* kobject is now registered */
999 write_lock(&entry->rw_lock); 999 write_lock(&entry->rw_lock);
1000 entry->ready = 2; 1000 entry->ready = 2;
1001 write_unlock(&entry->rw_lock);
1001 1002
1002 /* Add a nice symlink to the real device */ 1003 /* Add a nice symlink to the real device */
1003 if (entry->dev) { 1004 if (entry->dev) {
@@ -1005,7 +1006,6 @@ pdcs_register_pathentries(void)
1005 WARN_ON(err); 1006 WARN_ON(err);
1006 } 1007 }
1007 1008
1008 write_unlock(&entry->rw_lock);
1009 kobject_uevent(&entry->kobj, KOBJ_ADD); 1009 kobject_uevent(&entry->kobj, KOBJ_ADD);
1010 } 1010 }
1011 1011
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 253d92409bb3..2225afc1cbbb 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -538,12 +538,9 @@ msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
538 struct msi_desc *entry; 538 struct msi_desc *entry;
539 u16 control; 539 u16 control;
540 540
541 if (affd) { 541 if (affd)
542 masks = irq_create_affinity_masks(nvec, affd); 542 masks = irq_create_affinity_masks(nvec, affd);
543 if (!masks) 543
544 dev_err(&dev->dev, "can't allocate MSI affinity masks for %d vectors\n",
545 nvec);
546 }
547 544
548 /* MSI Entry Initialization */ 545 /* MSI Entry Initialization */
549 entry = alloc_msi_entry(&dev->dev, nvec, masks); 546 entry = alloc_msi_entry(&dev->dev, nvec, masks);
@@ -679,12 +676,8 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
679 struct msi_desc *entry; 676 struct msi_desc *entry;
680 int ret, i; 677 int ret, i;
681 678
682 if (affd) { 679 if (affd)
683 masks = irq_create_affinity_masks(nvec, affd); 680 masks = irq_create_affinity_masks(nvec, affd);
684 if (!masks)
685 dev_err(&dev->dev, "can't allocate MSI-X affinity masks for %d vectors\n",
686 nvec);
687 }
688 681
689 for (i = 0, curmsk = masks; i < nvec; i++) { 682 for (i = 0, curmsk = masks; i < nvec; i++) {
690 entry = alloc_msi_entry(&dev->dev, 1, curmsk); 683 entry = alloc_msi_entry(&dev->dev, 1, curmsk);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index af0cc3456dc1..fdf65a6c13f6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -514,7 +514,7 @@ EXPORT_SYMBOL(pci_find_resource);
514 */ 514 */
515struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev) 515struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
516{ 516{
517 struct pci_dev *bridge, *highest_pcie_bridge = NULL; 517 struct pci_dev *bridge, *highest_pcie_bridge = dev;
518 518
519 bridge = pci_upstream_bridge(dev); 519 bridge = pci_upstream_bridge(dev);
520 while (bridge && pci_is_pcie(bridge)) { 520 while (bridge && pci_is_pcie(bridge)) {
@@ -4260,6 +4260,41 @@ int pci_reset_function(struct pci_dev *dev)
4260EXPORT_SYMBOL_GPL(pci_reset_function); 4260EXPORT_SYMBOL_GPL(pci_reset_function);
4261 4261
4262/** 4262/**
4263 * pci_reset_function_locked - quiesce and reset a PCI device function
4264 * @dev: PCI device to reset
4265 *
4266 * Some devices allow an individual function to be reset without affecting
4267 * other functions in the same device. The PCI device must be responsive
4268 * to PCI config space in order to use this function.
4269 *
4270 * This function does not just reset the PCI portion of a device, but
4271 * clears all the state associated with the device. This function differs
4272 * from __pci_reset_function() in that it saves and restores device state
4273 * over the reset. It also differs from pci_reset_function() in that it
4274 * requires the PCI device lock to be held.
4275 *
4276 * Returns 0 if the device function was successfully reset or negative if the
4277 * device doesn't support resetting a single function.
4278 */
4279int pci_reset_function_locked(struct pci_dev *dev)
4280{
4281 int rc;
4282
4283 rc = pci_probe_reset_function(dev);
4284 if (rc)
4285 return rc;
4286
4287 pci_dev_save_and_disable(dev);
4288
4289 rc = __pci_reset_function_locked(dev);
4290
4291 pci_dev_restore(dev);
4292
4293 return rc;
4294}
4295EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4296
4297/**
4263 * pci_try_reset_function - quiesce and reset a PCI device function 4298 * pci_try_reset_function - quiesce and reset a PCI device function
4264 * @dev: PCI device to reset 4299 * @dev: PCI device to reset
4265 * 4300 *
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c31310db0404..e6a917b4acd3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1762,6 +1762,48 @@ static void pci_configure_extended_tags(struct pci_dev *dev)
1762 PCI_EXP_DEVCTL_EXT_TAG); 1762 PCI_EXP_DEVCTL_EXT_TAG);
1763} 1763}
1764 1764
1765/**
1766 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1767 * @dev: PCI device to query
1768 *
1769 * Returns true if the device has enabled relaxed ordering attribute.
1770 */
1771bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1772{
1773 u16 v;
1774
1775 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1776
1777 return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1778}
1779EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1780
1781static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1782{
1783 struct pci_dev *root;
1784
1785 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1786 if (dev->is_virtfn)
1787 return;
1788
1789 if (!pcie_relaxed_ordering_enabled(dev))
1790 return;
1791
1792 /*
1793 * For now, we only deal with Relaxed Ordering issues with Root
1794 * Ports. Peer-to-Peer DMA is another can of worms.
1795 */
1796 root = pci_find_pcie_root_port(dev);
1797 if (!root)
1798 return;
1799
1800 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1801 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1802 PCI_EXP_DEVCTL_RELAX_EN);
1803 dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1804 }
1805}
1806
1765static void pci_configure_device(struct pci_dev *dev) 1807static void pci_configure_device(struct pci_dev *dev)
1766{ 1808{
1767 struct hotplug_params hpp; 1809 struct hotplug_params hpp;
@@ -1769,6 +1811,7 @@ static void pci_configure_device(struct pci_dev *dev)
1769 1811
1770 pci_configure_mps(dev); 1812 pci_configure_mps(dev);
1771 pci_configure_extended_tags(dev); 1813 pci_configure_extended_tags(dev);
1814 pci_configure_relaxed_ordering(dev);
1772 1815
1773 memset(&hpp, 0, sizeof(hpp)); 1816 memset(&hpp, 0, sizeof(hpp));
1774 ret = pci_get_hp_params(dev, &hpp); 1817 ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6967c6b4cf6b..140760403f36 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4016,6 +4016,95 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
4016 quirk_tw686x_class); 4016 quirk_tw686x_class);
4017 4017
4018/* 4018/*
4019 * Some devices have problems with Transaction Layer Packets with the Relaxed
4020 * Ordering Attribute set. Such devices should mark themselves and other
4021 * Device Drivers should check before sending TLPs with RO set.
4022 */
4023static void quirk_relaxedordering_disable(struct pci_dev *dev)
4024{
4025 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4026 dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4027}
4028
4029/*
4030 * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root
4031 * Complex has a Flow Control Credit issue which can cause performance
4032 * problems with Upstream Transaction Layer Packets with Relaxed Ordering set.
4033 */
4034DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
4035 quirk_relaxedordering_disable);
4036DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
4037 quirk_relaxedordering_disable);
4038DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
4039 quirk_relaxedordering_disable);
4040DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
4041 quirk_relaxedordering_disable);
4042DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
4043 quirk_relaxedordering_disable);
4044DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
4045 quirk_relaxedordering_disable);
4046DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
4047 quirk_relaxedordering_disable);
4048DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
4049 quirk_relaxedordering_disable);
4050DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
4051 quirk_relaxedordering_disable);
4052DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
4053 quirk_relaxedordering_disable);
4054DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
4055 quirk_relaxedordering_disable);
4056DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
4057 quirk_relaxedordering_disable);
4058DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
4059 quirk_relaxedordering_disable);
4060DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
4061 quirk_relaxedordering_disable);
4062DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
4063 quirk_relaxedordering_disable);
4064DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
4065 quirk_relaxedordering_disable);
4066DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
4067 quirk_relaxedordering_disable);
4068DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
4069 quirk_relaxedordering_disable);
4070DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
4071 quirk_relaxedordering_disable);
4072DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
4073 quirk_relaxedordering_disable);
4074DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
4075 quirk_relaxedordering_disable);
4076DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
4077 quirk_relaxedordering_disable);
4078DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
4079 quirk_relaxedordering_disable);
4080DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
4081 quirk_relaxedordering_disable);
4082DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
4083 quirk_relaxedordering_disable);
4084DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
4085 quirk_relaxedordering_disable);
4086DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
4087 quirk_relaxedordering_disable);
4088DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
4089 quirk_relaxedordering_disable);
4090
4091/*
4092 * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex
4093 * where Upstream Transaction Layer Packets with the Relaxed Ordering
4094 * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering
4095 * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules
4096 * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0
4097 * November 10, 2010). As a result, on this platform we can't use Relaxed
4098 * Ordering for Upstream TLPs.
4099 */
4100DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
4101 quirk_relaxedordering_disable);
4102DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
4103 quirk_relaxedordering_disable);
4104DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
4105 quirk_relaxedordering_disable);
4106
4107/*
4019 * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same 4108 * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same
4020 * values for the Attribute as were supplied in the header of the 4109 * values for the Attribute as were supplied in the header of the
4021 * corresponding Request, except as explicitly allowed when IDO is used." 4110 * corresponding Request, except as explicitly allowed when IDO is used."
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index dc459eb1246b..1c5e0f333779 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -569,22 +569,41 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
569 if (irq != other_irq) { 569 if (irq != other_irq) {
570 pr_warn("mismatched PPIs detected.\n"); 570 pr_warn("mismatched PPIs detected.\n");
571 err = -EINVAL; 571 err = -EINVAL;
572 goto err_out;
572 } 573 }
573 } else { 574 } else {
574 err = request_irq(irq, handler, 575 struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
575 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", 576 unsigned long irq_flags;
577
578 err = irq_force_affinity(irq, cpumask_of(cpu));
579
580 if (err && num_possible_cpus() > 1) {
581 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
582 irq, cpu);
583 goto err_out;
584 }
585
586 if (platdata && platdata->irq_flags) {
587 irq_flags = platdata->irq_flags;
588 } else {
589 irq_flags = IRQF_PERCPU |
590 IRQF_NOBALANCING |
591 IRQF_NO_THREAD;
592 }
593
594 err = request_irq(irq, handler, irq_flags, "arm-pmu",
576 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); 595 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
577 } 596 }
578 597
579 if (err) { 598 if (err)
580 pr_err("unable to request IRQ%d for ARM PMU counters\n", 599 goto err_out;
581 irq);
582 return err;
583 }
584 600
585 cpumask_set_cpu(cpu, &armpmu->active_irqs); 601 cpumask_set_cpu(cpu, &armpmu->active_irqs);
586
587 return 0; 602 return 0;
603
604err_out:
605 pr_err("unable to request IRQ%d for ARM PMU counters\n", irq);
606 return err;
588} 607}
589 608
590int armpmu_request_irqs(struct arm_pmu *armpmu) 609int armpmu_request_irqs(struct arm_pmu *armpmu)
@@ -628,12 +647,6 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
628 enable_percpu_irq(irq, IRQ_TYPE_NONE); 647 enable_percpu_irq(irq, IRQ_TYPE_NONE);
629 return 0; 648 return 0;
630 } 649 }
631
632 if (irq_force_affinity(irq, cpumask_of(cpu)) &&
633 num_possible_cpus() > 1) {
634 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
635 irq, cpu);
636 }
637 } 650 }
638 651
639 return 0; 652 return 0;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 69255f53057a..4eafa7a42e52 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -131,8 +131,8 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
131 } 131 }
132 132
133 if (!pmu_has_irq_affinity(pdev->dev.of_node)) { 133 if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
134 pr_warn("no interrupt-affinity property for %s, guessing.\n", 134 pr_warn("no interrupt-affinity property for %pOF, guessing.\n",
135 of_node_full_name(pdev->dev.of_node)); 135 pdev->dev.of_node);
136 } 136 }
137 137
138 /* 138 /*
@@ -211,7 +211,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
211 } 211 }
212 212
213 if (ret) { 213 if (ret) {
214 pr_info("%s: failed to probe PMU!\n", of_node_full_name(node)); 214 pr_info("%pOF: failed to probe PMU!\n", node);
215 goto out_free; 215 goto out_free;
216 } 216 }
217 217
@@ -228,8 +228,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
228out_free_irqs: 228out_free_irqs:
229 armpmu_free_irqs(pmu); 229 armpmu_free_irqs(pmu);
230out_free: 230out_free:
231 pr_info("%s: failed to register PMU devices!\n", 231 pr_info("%pOF: failed to register PMU devices!\n", node);
232 of_node_full_name(node));
233 armpmu_free(pmu); 232 armpmu_free(pmu);
234 return ret; 233 return ret;
235} 234}
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index c259848228b4..b242cce10468 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -546,6 +546,7 @@ static int l2_cache_event_init(struct perf_event *event)
546 } 546 }
547 547
548 if ((event != event->group_leader) && 548 if ((event != event->group_leader) &&
549 !is_software_event(event->group_leader) &&
549 (L2_EVT_GROUP(event->group_leader->attr.config) == 550 (L2_EVT_GROUP(event->group_leader->attr.config) ==
550 L2_EVT_GROUP(event->attr.config))) { 551 L2_EVT_GROUP(event->attr.config))) {
551 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 552 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
@@ -558,6 +559,7 @@ static int l2_cache_event_init(struct perf_event *event)
558 list_for_each_entry(sibling, &event->group_leader->sibling_list, 559 list_for_each_entry(sibling, &event->group_leader->sibling_list,
559 group_entry) { 560 group_entry) {
560 if ((sibling != event) && 561 if ((sibling != event) &&
562 !is_software_event(sibling) &&
561 (L2_EVT_GROUP(sibling->attr.config) == 563 (L2_EVT_GROUP(sibling->attr.config) ==
562 L2_EVT_GROUP(event->attr.config))) { 564 L2_EVT_GROUP(event->attr.config))) {
563 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, 565 dev_dbg_ratelimited(&l2cache_pmu->pdev->dev,
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 37371b89b14f..64fc59c3ae6d 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -30,8 +30,8 @@ config PHY_BCM_NS_USB3
30 tristate "Broadcom Northstar USB 3.0 PHY Driver" 30 tristate "Broadcom Northstar USB 3.0 PHY Driver"
31 depends on ARCH_BCM_IPROC || COMPILE_TEST 31 depends on ARCH_BCM_IPROC || COMPILE_TEST
32 depends on HAS_IOMEM && OF 32 depends on HAS_IOMEM && OF
33 depends on MDIO_BUS
33 select GENERIC_PHY 34 select GENERIC_PHY
34 select MDIO_DEVICE
35 help 35 help
36 Enable this to support Broadcom USB 3.0 PHY connected to the USB 36 Enable this to support Broadcom USB 3.0 PHY connected to the USB
37 controller on Northstar family. 37 controller on Northstar family.
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 20f1b4493994..04e929fd0ffe 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1548,6 +1548,13 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1548 }, 1548 },
1549 }, 1549 },
1550 { 1550 {
1551 .ident = "HP Chromebook 11 G5 (Setzer)",
1552 .matches = {
1553 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1554 DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
1555 },
1556 },
1557 {
1551 .ident = "Acer Chromebook R11 (Cyan)", 1558 .ident = "Acer Chromebook R11 (Cyan)",
1552 .matches = { 1559 .matches = {
1553 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1560 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index 4d4ef42a39b5..86c4b3fab7b0 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -343,9 +343,9 @@ static const struct pinctrl_pin_desc mrfld_pins[] = {
343 343
344static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; 344static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 };
345static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; 345static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 };
346static const unsigned int mrfld_uart0_pins[] = { 124, 125, 126, 127 }; 346static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 };
347static const unsigned int mrfld_uart1_pins[] = { 128, 129, 130, 131 }; 347static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 };
348static const unsigned int mrfld_uart2_pins[] = { 132, 133, 134, 135 }; 348static const unsigned int mrfld_uart2_pins[] = { 123, 124, 125, 126 };
349static const unsigned int mrfld_pwm0_pins[] = { 144 }; 349static const unsigned int mrfld_pwm0_pins[] = { 144 };
350static const unsigned int mrfld_pwm1_pins[] = { 145 }; 350static const unsigned int mrfld_pwm1_pins[] = { 145 };
351static const unsigned int mrfld_pwm2_pins[] = { 132 }; 351static const unsigned int mrfld_pwm2_pins[] = { 132 };
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index f024e25787fc..0c6d7812d6fd 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -37,7 +37,7 @@
37#define IRQ_STATUS 0x10 37#define IRQ_STATUS 0x10
38#define IRQ_WKUP 0x18 38#define IRQ_WKUP 0x18
39 39
40#define NB_FUNCS 2 40#define NB_FUNCS 3
41#define GPIO_PER_REG 32 41#define GPIO_PER_REG 32
42 42
43/** 43/**
@@ -126,6 +126,16 @@ struct armada_37xx_pinctrl {
126 .funcs = {_func1, "gpio"} \ 126 .funcs = {_func1, "gpio"} \
127 } 127 }
128 128
129#define PIN_GRP_GPIO_3(_name, _start, _nr, _mask, _v1, _v2, _v3, _f1, _f2) \
130 { \
131 .name = _name, \
132 .start_pin = _start, \
133 .npins = _nr, \
134 .reg_mask = _mask, \
135 .val = {_v1, _v2, _v3}, \
136 .funcs = {_f1, _f2, "gpio"} \
137 }
138
129#define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \ 139#define PIN_GRP_EXTRA(_name, _start, _nr, _mask, _v1, _v2, _start2, _nr2, \
130 _f1, _f2) \ 140 _f1, _f2) \
131 { \ 141 { \
@@ -171,12 +181,13 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
171 PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"), 181 PIN_GRP_GPIO("usb32_drvvbus0", 0, 1, BIT(0), "drvbus"),
172 PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"), 182 PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
173 PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"), 183 PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
174 PIN_GRP_EXTRA("rgmii", 6, 12, BIT(3), 0, BIT(3), 23, 1, "mii", "gpio"), 184 PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
175 PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"), 185 PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
176 PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"), 186 PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
177 PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"), 187 PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
178 PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"), 188 PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
179 PIN_GRP("mii_col", 23, 1, BIT(8), "mii", "mii_err"), 189 PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
190 "mii", "mii_err"),
180}; 191};
181 192
182const struct armada_37xx_pin_data armada_37xx_pin_nb = { 193const struct armada_37xx_pin_data armada_37xx_pin_nb = {
@@ -187,7 +198,7 @@ const struct armada_37xx_pin_data armada_37xx_pin_nb = {
187}; 198};
188 199
189const struct armada_37xx_pin_data armada_37xx_pin_sb = { 200const struct armada_37xx_pin_data armada_37xx_pin_sb = {
190 .nr_pins = 29, 201 .nr_pins = 30,
191 .name = "GPIO2", 202 .name = "GPIO2",
192 .groups = armada_37xx_sb_groups, 203 .groups = armada_37xx_sb_groups,
193 .ngroups = ARRAY_SIZE(armada_37xx_sb_groups), 204 .ngroups = ARRAY_SIZE(armada_37xx_sb_groups),
@@ -208,7 +219,7 @@ static int armada_37xx_get_func_reg(struct armada_37xx_pin_group *grp,
208{ 219{
209 int f; 220 int f;
210 221
211 for (f = 0; f < NB_FUNCS; f++) 222 for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++)
212 if (!strcmp(grp->funcs[f], func)) 223 if (!strcmp(grp->funcs[f], func))
213 return f; 224 return f;
214 225
@@ -795,7 +806,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
795 for (j = 0; j < grp->extra_npins; j++) 806 for (j = 0; j < grp->extra_npins; j++)
796 grp->pins[i+j] = grp->extra_pin + j; 807 grp->pins[i+j] = grp->extra_pin + j;
797 808
798 for (f = 0; f < NB_FUNCS; f++) { 809 for (f = 0; (f < NB_FUNCS) && grp->funcs[f]; f++) {
799 int ret; 810 int ret;
800 /* check for unique functions and count groups */ 811 /* check for unique functions and count groups */
801 ret = armada_37xx_add_function(info->funcs, &funcsize, 812 ret = armada_37xx_add_function(info->funcs, &funcsize,
@@ -847,7 +858,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
847 struct armada_37xx_pin_group *gp = &info->groups[g]; 858 struct armada_37xx_pin_group *gp = &info->groups[g];
848 int f; 859 int f;
849 860
850 for (f = 0; f < NB_FUNCS; f++) { 861 for (f = 0; (f < NB_FUNCS) && gp->funcs[f]; f++) {
851 if (strcmp(gp->funcs[f], name) == 0) { 862 if (strcmp(gp->funcs[f], name) == 0) {
852 *groups = gp->name; 863 *groups = gp->name;
853 groups++; 864 groups++;
diff --git a/drivers/pinctrl/stm32/Kconfig b/drivers/pinctrl/stm32/Kconfig
index 3b8026fca057..7e1fe39a56a5 100644
--- a/drivers/pinctrl/stm32/Kconfig
+++ b/drivers/pinctrl/stm32/Kconfig
@@ -6,29 +6,30 @@ config PINCTRL_STM32
6 select PINMUX 6 select PINMUX
7 select GENERIC_PINCONF 7 select GENERIC_PINCONF
8 select GPIOLIB 8 select GPIOLIB
9 select IRQ_DOMAIN_HIERARCHY
9 select MFD_SYSCON 10 select MFD_SYSCON
10 11
11config PINCTRL_STM32F429 12config PINCTRL_STM32F429
12 bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429 13 bool "STMicroelectronics STM32F429 pin control" if COMPILE_TEST && !MACH_STM32F429
13 depends on OF && IRQ_DOMAIN_HIERARCHY 14 depends on OF
14 default MACH_STM32F429 15 default MACH_STM32F429
15 select PINCTRL_STM32 16 select PINCTRL_STM32
16 17
17config PINCTRL_STM32F469 18config PINCTRL_STM32F469
18 bool "STMicroelectronics STM32F469 pin control" if COMPILE_TEST && !MACH_STM32F469 19 bool "STMicroelectronics STM32F469 pin control" if COMPILE_TEST && !MACH_STM32F469
19 depends on OF && IRQ_DOMAIN_HIERARCHY 20 depends on OF
20 default MACH_STM32F469 21 default MACH_STM32F469
21 select PINCTRL_STM32 22 select PINCTRL_STM32
22 23
23config PINCTRL_STM32F746 24config PINCTRL_STM32F746
24 bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746 25 bool "STMicroelectronics STM32F746 pin control" if COMPILE_TEST && !MACH_STM32F746
25 depends on OF && IRQ_DOMAIN_HIERARCHY 26 depends on OF
26 default MACH_STM32F746 27 default MACH_STM32F746
27 select PINCTRL_STM32 28 select PINCTRL_STM32
28 29
29config PINCTRL_STM32H743 30config PINCTRL_STM32H743
30 bool "STMicroelectronics STM32H743 pin control" if COMPILE_TEST && !MACH_STM32H743 31 bool "STMicroelectronics STM32H743 pin control" if COMPILE_TEST && !MACH_STM32H743
31 depends on OF && IRQ_DOMAIN_HIERARCHY 32 depends on OF
32 default MACH_STM32H743 33 default MACH_STM32H743
33 select PINCTRL_STM32 34 select PINCTRL_STM32
34endif 35endif
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 159580c04b14..47a392bc73c8 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -918,6 +918,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
918 SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */ 918 SUNXI_FUNCTION_VARIANT(0x3, "emac", /* ETXD1 */
919 PINCTRL_SUN7I_A20), 919 PINCTRL_SUN7I_A20),
920 SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */ 920 SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
921 SUNXI_FUNCTION(0x5, "sim"), /* DET */
921 SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */ 922 SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
922 SUNXI_FUNCTION(0x7, "csi1")), /* D16 */ 923 SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
923 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17), 924 SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index a433a306a2d0..c75e094b2d90 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -1084,7 +1084,7 @@ static const unsigned usb1_pins[] = {182, 183};
1084static const int usb1_muxvals[] = {0, 0}; 1084static const int usb1_muxvals[] = {0, 0};
1085static const unsigned usb2_pins[] = {184, 185}; 1085static const unsigned usb2_pins[] = {184, 185};
1086static const int usb2_muxvals[] = {0, 0}; 1086static const int usb2_muxvals[] = {0, 0};
1087static const unsigned usb3_pins[] = {186, 187}; 1087static const unsigned usb3_pins[] = {187, 188};
1088static const int usb3_muxvals[] = {0, 0}; 1088static const int usb3_muxvals[] = {0, 0};
1089static const unsigned port_range0_pins[] = { 1089static const unsigned port_range0_pins[] = {
1090 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */ 1090 300, 301, 302, 303, 304, 305, 306, 307, /* PORT0x */
diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
index 787e3967bd5c..f828ee340a98 100644
--- a/drivers/pinctrl/zte/pinctrl-zx.c
+++ b/drivers/pinctrl/zte/pinctrl-zx.c
@@ -64,10 +64,8 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
64 struct zx_pinctrl_soc_info *info = zpctl->info; 64 struct zx_pinctrl_soc_info *info = zpctl->info;
65 const struct pinctrl_pin_desc *pindesc = info->pins + group_selector; 65 const struct pinctrl_pin_desc *pindesc = info->pins + group_selector;
66 struct zx_pin_data *data = pindesc->drv_data; 66 struct zx_pin_data *data = pindesc->drv_data;
67 struct zx_mux_desc *mux = data->muxes; 67 struct zx_mux_desc *mux;
68 u32 mask = (1 << data->width) - 1; 68 u32 mask, offset, bitpos;
69 u32 offset = data->offset;
70 u32 bitpos = data->bitpos;
71 struct function_desc *func; 69 struct function_desc *func;
72 unsigned long flags; 70 unsigned long flags;
73 u32 val, mval; 71 u32 val, mval;
@@ -76,6 +74,11 @@ static int zx_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
76 if (!data) 74 if (!data)
77 return -EINVAL; 75 return -EINVAL;
78 76
77 mux = data->muxes;
78 mask = (1 << data->width) - 1;
79 offset = data->offset;
80 bitpos = data->bitpos;
81
79 func = pinmux_generic_get_function(pctldev, func_selector); 82 func = pinmux_generic_get_function(pctldev, func_selector);
80 if (!func) 83 if (!func)
81 return -EINVAL; 84 return -EINVAL;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index b04860703740..80b87954f6dd 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -675,6 +675,7 @@ config PEAQ_WMI
675 tristate "PEAQ 2-in-1 WMI hotkey driver" 675 tristate "PEAQ 2-in-1 WMI hotkey driver"
676 depends on ACPI_WMI 676 depends on ACPI_WMI
677 depends on INPUT 677 depends on INPUT
678 select INPUT_POLLDEV
678 help 679 help
679 Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s. 680 Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
680 681
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index f8978464df31..dad8f4afa17c 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -626,7 +626,7 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev)
626 * WMI Interface Version 8 4 <version> 626 * WMI Interface Version 8 4 <version>
627 * WMI buffer length 12 4 4096 627 * WMI buffer length 12 4 4096
628 */ 628 */
629static int __init dell_wmi_check_descriptor_buffer(void) 629static int dell_wmi_check_descriptor_buffer(void)
630{ 630{
631 struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; 631 struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
632 union acpi_object *obj; 632 union acpi_object *obj;
@@ -717,9 +717,15 @@ static int dell_wmi_events_set_enabled(bool enable)
717 717
718static int dell_wmi_probe(struct wmi_device *wdev) 718static int dell_wmi_probe(struct wmi_device *wdev)
719{ 719{
720 int err;
721
720 struct dell_wmi_priv *priv = devm_kzalloc( 722 struct dell_wmi_priv *priv = devm_kzalloc(
721 &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL); 723 &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL);
722 724
725 err = dell_wmi_check_descriptor_buffer();
726 if (err)
727 return err;
728
723 dev_set_drvdata(&wdev->dev, priv); 729 dev_set_drvdata(&wdev->dev, priv);
724 730
725 return dell_wmi_input_setup(wdev); 731 return dell_wmi_input_setup(wdev);
@@ -749,10 +755,6 @@ static int __init dell_wmi_init(void)
749{ 755{
750 int err; 756 int err;
751 757
752 err = dell_wmi_check_descriptor_buffer();
753 if (err)
754 return err;
755
756 dmi_check_system(dell_wmi_smbios_list); 758 dmi_check_system(dell_wmi_smbios_list);
757 759
758 if (wmi_requires_smbios_request) { 760 if (wmi_requires_smbios_request) {
diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
index 61f106377661..480926786cb8 100644
--- a/drivers/platform/x86/intel-vbtn.c
+++ b/drivers/platform/x86/intel-vbtn.c
@@ -36,8 +36,8 @@ static const struct acpi_device_id intel_vbtn_ids[] = {
36 36
37/* In theory, these are HID usages. */ 37/* In theory, these are HID usages. */
38static const struct key_entry intel_vbtn_keymap[] = { 38static const struct key_entry intel_vbtn_keymap[] = {
39 { KE_IGNORE, 0xC0, { KEY_POWER } }, /* power key press */ 39 { KE_KEY, 0xC0, { KEY_POWER } }, /* power key press */
40 { KE_KEY, 0xC1, { KEY_POWER } }, /* power key release */ 40 { KE_IGNORE, 0xC1, { KEY_POWER } }, /* power key release */
41 { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */ 41 { KE_KEY, 0xC4, { KEY_VOLUMEUP } }, /* volume-up key press */
42 { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */ 42 { KE_IGNORE, 0xC5, { KEY_VOLUMEUP } }, /* volume-up key release */
43 { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */ 43 { KE_KEY, 0xC6, { KEY_VOLUMEDOWN } }, /* volume-down key press */
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 1a764e311e11..e32ba575e8d9 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -1252,12 +1252,12 @@ static int __init acpi_wmi_init(void)
1252 1252
1253 return 0; 1253 return 0;
1254 1254
1255err_unreg_class:
1256 class_unregister(&wmi_bus_class);
1257
1258err_unreg_bus: 1255err_unreg_bus:
1259 bus_unregister(&wmi_bus_type); 1256 bus_unregister(&wmi_bus_type);
1260 1257
1258err_unreg_class:
1259 class_unregister(&wmi_bus_class);
1260
1261 return error; 1261 return error;
1262} 1262}
1263 1263
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index b77435783ef3..7eacc1c4b3b1 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -28,6 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/syscalls.h> 29#include <linux/syscalls.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <uapi/linux/sched/types.h>
31 32
32#include "ptp_private.h" 33#include "ptp_private.h"
33 34
@@ -184,6 +185,19 @@ static void delete_ptp_clock(struct posix_clock *pc)
184 kfree(ptp); 185 kfree(ptp);
185} 186}
186 187
188static void ptp_aux_kworker(struct kthread_work *work)
189{
190 struct ptp_clock *ptp = container_of(work, struct ptp_clock,
191 aux_work.work);
192 struct ptp_clock_info *info = ptp->info;
193 long delay;
194
195 delay = info->do_aux_work(info);
196
197 if (delay >= 0)
198 kthread_queue_delayed_work(ptp->kworker, &ptp->aux_work, delay);
199}
200
187/* public interface */ 201/* public interface */
188 202
189struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, 203struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
@@ -217,6 +231,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
217 mutex_init(&ptp->pincfg_mux); 231 mutex_init(&ptp->pincfg_mux);
218 init_waitqueue_head(&ptp->tsev_wq); 232 init_waitqueue_head(&ptp->tsev_wq);
219 233
234 if (ptp->info->do_aux_work) {
235 char *worker_name = kasprintf(GFP_KERNEL, "ptp%d", ptp->index);
236
237 kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker);
238 ptp->kworker = kthread_create_worker(0, worker_name ?
239 worker_name : info->name);
240 kfree(worker_name);
241 if (IS_ERR(ptp->kworker)) {
242 err = PTR_ERR(ptp->kworker);
243 pr_err("failed to create ptp aux_worker %d\n", err);
244 goto kworker_err;
245 }
246 }
247
220 err = ptp_populate_pin_groups(ptp); 248 err = ptp_populate_pin_groups(ptp);
221 if (err) 249 if (err)
222 goto no_pin_groups; 250 goto no_pin_groups;
@@ -259,6 +287,9 @@ no_pps:
259no_device: 287no_device:
260 ptp_cleanup_pin_groups(ptp); 288 ptp_cleanup_pin_groups(ptp);
261no_pin_groups: 289no_pin_groups:
290 if (ptp->kworker)
291 kthread_destroy_worker(ptp->kworker);
292kworker_err:
262 mutex_destroy(&ptp->tsevq_mux); 293 mutex_destroy(&ptp->tsevq_mux);
263 mutex_destroy(&ptp->pincfg_mux); 294 mutex_destroy(&ptp->pincfg_mux);
264 ida_simple_remove(&ptp_clocks_map, index); 295 ida_simple_remove(&ptp_clocks_map, index);
@@ -274,6 +305,11 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
274 ptp->defunct = 1; 305 ptp->defunct = 1;
275 wake_up_interruptible(&ptp->tsev_wq); 306 wake_up_interruptible(&ptp->tsev_wq);
276 307
308 if (ptp->kworker) {
309 kthread_cancel_delayed_work_sync(&ptp->aux_work);
310 kthread_destroy_worker(ptp->kworker);
311 }
312
277 /* Release the clock's resources. */ 313 /* Release the clock's resources. */
278 if (ptp->pps_source) 314 if (ptp->pps_source)
279 pps_unregister_source(ptp->pps_source); 315 pps_unregister_source(ptp->pps_source);
@@ -339,6 +375,12 @@ int ptp_find_pin(struct ptp_clock *ptp,
339} 375}
340EXPORT_SYMBOL(ptp_find_pin); 376EXPORT_SYMBOL(ptp_find_pin);
341 377
378int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay)
379{
380 return kthread_mod_delayed_work(ptp->kworker, &ptp->aux_work, delay);
381}
382EXPORT_SYMBOL(ptp_schedule_worker);
383
342/* module operations */ 384/* module operations */
343 385
344static void __exit ptp_exit(void) 386static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index d95888974d0c..b86f1bfecd6f 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -22,6 +22,7 @@
22 22
23#include <linux/cdev.h> 23#include <linux/cdev.h>
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/kthread.h>
25#include <linux/mutex.h> 26#include <linux/mutex.h>
26#include <linux/posix-clock.h> 27#include <linux/posix-clock.h>
27#include <linux/ptp_clock.h> 28#include <linux/ptp_clock.h>
@@ -56,6 +57,8 @@ struct ptp_clock {
56 struct attribute_group pin_attr_group; 57 struct attribute_group pin_attr_group;
57 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */ 58 /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
58 const struct attribute_group *pin_attr_groups[2]; 59 const struct attribute_group *pin_attr_groups[2];
60 struct kthread_worker *kworker;
61 struct kthread_delayed_work aux_work;
59}; 62};
60 63
61/* 64/*
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 4fac49e55d47..4b43aa62fbc7 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1301,7 +1301,6 @@ static void ds1307_clks_register(struct ds1307 *ds1307)
1301static const struct regmap_config regmap_config = { 1301static const struct regmap_config regmap_config = {
1302 .reg_bits = 8, 1302 .reg_bits = 8,
1303 .val_bits = 8, 1303 .val_bits = 8,
1304 .max_register = 0x12,
1305}; 1304};
1306 1305
1307static int ds1307_probe(struct i2c_client *client, 1306static int ds1307_probe(struct i2c_client *client,
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 7e0d4f724dda..432fc40990bd 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1,
559 chpid.id = crw0->rsid; 559 chpid.id = crw0->rsid;
560 switch (crw0->erc) { 560 switch (crw0->erc) {
561 case CRW_ERC_IPARM: /* Path has come. */ 561 case CRW_ERC_IPARM: /* Path has come. */
562 case CRW_ERC_INIT:
562 if (!chp_is_registered(chpid)) 563 if (!chp_is_registered(chpid))
563 chp_new(chpid); 564 chp_new(chpid);
564 chsc_chp_online(chpid); 565 chsc_chp_online(chpid);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 8975cd321390..d42e758518ed 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2512,7 +2512,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2512 struct rtable *rt = (struct rtable *) dst; 2512 struct rtable *rt = (struct rtable *) dst;
2513 __be32 *pkey = &ip_hdr(skb)->daddr; 2513 __be32 *pkey = &ip_hdr(skb)->daddr;
2514 2514
2515 if (rt->rt_gateway) 2515 if (rt && rt->rt_gateway)
2516 pkey = &rt->rt_gateway; 2516 pkey = &rt->rt_gateway;
2517 2517
2518 /* IPv4 */ 2518 /* IPv4 */
@@ -2523,7 +2523,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
2523 struct rt6_info *rt = (struct rt6_info *) dst; 2523 struct rt6_info *rt = (struct rt6_info *) dst;
2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr; 2524 struct in6_addr *pkey = &ipv6_hdr(skb)->daddr;
2525 2525
2526 if (!ipv6_addr_any(&rt->rt6i_gateway)) 2526 if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
2527 pkey = &rt->rt6i_gateway; 2527 pkey = &rt->rt6i_gateway;
2528 2528
2529 /* IPv6 */ 2529 /* IPv6 */
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 04efed171c88..f32765d3cbd8 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -212,8 +212,8 @@ static int d7s_probe(struct platform_device *op)
212 212
213 writeb(regs, p->regs); 213 writeb(regs, p->regs);
214 214
215 printk(KERN_INFO PFX "7-Segment Display%s at [%s:0x%llx] %s\n", 215 printk(KERN_INFO PFX "7-Segment Display%pOF at [%s:0x%llx] %s\n",
216 op->dev.of_node->full_name, 216 op->dev.of_node,
217 (regs & D7S_FLIP) ? " (FLIPPED)" : "", 217 (regs & D7S_FLIP) ? " (FLIPPED)" : "",
218 op->resource[0].start, 218 op->resource[0].start,
219 sol_compat ? "in sol_compat mode" : ""); 219 sol_compat ? "in sol_compat mode" : "");
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index 216f923161d1..a610b8d3d11f 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -181,8 +181,8 @@ static int flash_probe(struct platform_device *op)
181 } 181 }
182 flash.busy = 0; 182 flash.busy = 0;
183 183
184 printk(KERN_INFO "%s: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n", 184 printk(KERN_INFO "%pOF: OBP Flash, RD %lx[%lx] WR %lx[%lx]\n",
185 op->dev.of_node->full_name, 185 op->dev.of_node,
186 flash.read_base, flash.read_size, 186 flash.read_base, flash.read_size,
187 flash.write_base, flash.write_size); 187 flash.write_base, flash.write_size);
188 188
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c
index 57696fc0b482..0a5013350acd 100644
--- a/drivers/sbus/char/uctrl.c
+++ b/drivers/sbus/char/uctrl.c
@@ -379,8 +379,8 @@ static int uctrl_probe(struct platform_device *op)
379 } 379 }
380 380
381 sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr); 381 sbus_writel(UCTRL_INTR_RXNE_REQ|UCTRL_INTR_RXNE_MSK, &p->regs->uctrl_intr);
382 printk(KERN_INFO "%s: uctrl regs[0x%p] (irq %d)\n", 382 printk(KERN_INFO "%pOF: uctrl regs[0x%p] (irq %d)\n",
383 op->dev.of_node->full_name, p->regs, p->irq); 383 op->dev.of_node, p->regs, p->irq);
384 uctrl_get_event_status(p); 384 uctrl_get_event_status(p);
385 uctrl_get_external_status(p); 385 uctrl_get_external_status(p);
386 386
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d384f4f86c26..d145e0d90227 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -47,6 +47,17 @@ config SCSI_NETLINK
47 default n 47 default n
48 depends on NET 48 depends on NET
49 49
50config SCSI_MQ_DEFAULT
51 bool "SCSI: use blk-mq I/O path by default"
52 depends on SCSI
53 ---help---
54 This option enables the new blk-mq based I/O path for SCSI
55 devices by default. With the option the scsi_mod.use_blk_mq
56 module/boot option defaults to Y, without it to N, but it can
57 still be overridden either way.
58
59 If unsure say N.
60
50config SCSI_PROC_FS 61config SCSI_PROC_FS
51 bool "legacy /proc/scsi/ support" 62 bool "legacy /proc/scsi/ support"
52 depends on SCSI && PROC_FS 63 depends on SCSI && PROC_FS
@@ -1230,6 +1241,8 @@ config SCSI_LPFC
1230 tristate "Emulex LightPulse Fibre Channel Support" 1241 tristate "Emulex LightPulse Fibre Channel Support"
1231 depends on PCI && SCSI 1242 depends on PCI && SCSI
1232 depends on SCSI_FC_ATTRS 1243 depends on SCSI_FC_ATTRS
1244 depends on NVME_TARGET_FC || NVME_TARGET_FC=n
1245 depends on NVME_FC || NVME_FC=n
1233 select CRC_T10DIF 1246 select CRC_T10DIF
1234 ---help--- 1247 ---help---
1235 This lpfc driver supports the Emulex LightPulse 1248 This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 707ee2f5954d..a1a2c71e1626 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -549,7 +549,9 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
549 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 549 if ((le32_to_cpu(get_name_reply->status) == CT_OK)
550 && (get_name_reply->data[0] != '\0')) { 550 && (get_name_reply->data[0] != '\0')) {
551 char *sp = get_name_reply->data; 551 char *sp = get_name_reply->data;
552 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; 552 int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
553
554 sp[data_size - 1] = '\0';
553 while (*sp == ' ') 555 while (*sp == ' ')
554 ++sp; 556 ++sp;
555 if (*sp) { 557 if (*sp) {
@@ -579,12 +581,15 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
579static int aac_get_container_name(struct scsi_cmnd * scsicmd) 581static int aac_get_container_name(struct scsi_cmnd * scsicmd)
580{ 582{
581 int status; 583 int status;
584 int data_size;
582 struct aac_get_name *dinfo; 585 struct aac_get_name *dinfo;
583 struct fib * cmd_fibcontext; 586 struct fib * cmd_fibcontext;
584 struct aac_dev * dev; 587 struct aac_dev * dev;
585 588
586 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 589 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
587 590
591 data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
592
588 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 593 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
589 594
590 aac_fib_init(cmd_fibcontext); 595 aac_fib_init(cmd_fibcontext);
@@ -593,7 +598,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
593 dinfo->command = cpu_to_le32(VM_ContainerConfig); 598 dinfo->command = cpu_to_le32(VM_ContainerConfig);
594 dinfo->type = cpu_to_le32(CT_READ_NAME); 599 dinfo->type = cpu_to_le32(CT_READ_NAME);
595 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 600 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
596 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 601 dinfo->count = cpu_to_le32(data_size - 1);
597 602
598 status = aac_fib_send(ContainerCommand, 603 status = aac_fib_send(ContainerCommand,
599 cmd_fibcontext, 604 cmd_fibcontext,
@@ -3198,10 +3203,11 @@ static int query_disk(struct aac_dev *dev, void __user *arg)
3198 return -EBUSY; 3203 return -EBUSY;
3199 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 3204 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
3200 return -EFAULT; 3205 return -EFAULT;
3201 if (qd.cnum == -1) 3206 if (qd.cnum == -1) {
3207 if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
3208 return -EINVAL;
3202 qd.cnum = qd.id; 3209 qd.cnum = qd.id;
3203 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 3210 } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
3204 {
3205 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 3211 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
3206 return -EINVAL; 3212 return -EINVAL;
3207 qd.instance = dev->scsi_host_ptr->host_no; 3213 qd.instance = dev->scsi_host_ptr->host_no;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index d31a9bc2ba69..ee2667e20e42 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2274,7 +2274,7 @@ struct aac_get_name_resp {
2274 __le32 parm3; 2274 __le32 parm3;
2275 __le32 parm4; 2275 __le32 parm4;
2276 __le32 parm5; 2276 __le32 parm5;
2277 u8 data[16]; 2277 u8 data[17];
2278}; 2278};
2279 2279
2280#define CT_CID_TO_32BITS_UID 165 2280#define CT_CID_TO_32BITS_UID 165
diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile
index 741d81861d17..07b60a780c06 100644
--- a/drivers/scsi/aic7xxx/Makefile
+++ b/drivers/scsi/aic7xxx/Makefile
@@ -55,9 +55,9 @@ aicasm-7xxx-opts-$(CONFIG_AIC7XXX_REG_PRETTY_PRINT) := \
55 55
56ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y) 56ifeq ($(CONFIG_AIC7XXX_BUILD_FIRMWARE),y)
57$(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm 57$(obj)/aic7xxx_seq.h: $(src)/aic7xxx.seq $(src)/aic7xxx.reg $(obj)/aicasm/aicasm
58 $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic7xxx_reg.h \ 58 $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic7xxx_reg.h \
59 $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \ 59 $(aicasm-7xxx-opts-y) -o $(obj)/aic7xxx_seq.h \
60 $(src)/aic7xxx.seq 60 $(srctree)/$(src)/aic7xxx.seq
61 61
62$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h 62$(aic7xxx-gen-y): $(obj)/aic7xxx_seq.h
63else 63else
@@ -72,14 +72,14 @@ aicasm-79xx-opts-$(CONFIG_AIC79XX_REG_PRETTY_PRINT) := \
72 72
73ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y) 73ifeq ($(CONFIG_AIC79XX_BUILD_FIRMWARE),y)
74$(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm 74$(obj)/aic79xx_seq.h: $(src)/aic79xx.seq $(src)/aic79xx.reg $(obj)/aicasm/aicasm
75 $(obj)/aicasm/aicasm -I$(src) -r $(obj)/aic79xx_reg.h \ 75 $(obj)/aicasm/aicasm -I$(srctree)/$(src) -r $(obj)/aic79xx_reg.h \
76 $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \ 76 $(aicasm-79xx-opts-y) -o $(obj)/aic79xx_seq.h \
77 $(src)/aic79xx.seq 77 $(srctree)/$(src)/aic79xx.seq
78 78
79$(aic79xx-gen-y): $(obj)/aic79xx_seq.h 79$(aic79xx-gen-y): $(obj)/aic79xx_seq.h
80else 80else
81$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped 81$(obj)/aic79xx_reg_print.c: $(src)/aic79xx_reg_print.c_shipped
82endif 82endif
83 83
84$(obj)/aicasm/aicasm: $(src)/aicasm/*.[chyl] 84$(obj)/aicasm/aicasm: $(srctree)/$(src)/aicasm/*.[chyl]
85 $(MAKE) -C $(src)/aicasm 85 $(MAKE) -C $(srctree)/$(src)/aicasm OUTDIR=$(shell pwd)/$(obj)/aicasm/
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
index b98c5c1056c3..45e2d49c1fff 100644
--- a/drivers/scsi/aic7xxx/aicasm/Makefile
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -1,19 +1,21 @@
1PROG= aicasm 1PROG= aicasm
2 2
3OUTDIR ?= ./
4
3.SUFFIXES= .l .y .c .h 5.SUFFIXES= .l .y .c .h
4 6
5CSRCS= aicasm.c aicasm_symbol.c 7CSRCS= aicasm.c aicasm_symbol.c
6YSRCS= aicasm_gram.y aicasm_macro_gram.y 8YSRCS= aicasm_gram.y aicasm_macro_gram.y
7LSRCS= aicasm_scan.l aicasm_macro_scan.l 9LSRCS= aicasm_scan.l aicasm_macro_scan.l
8 10
9GENHDRS= aicdb.h $(YSRCS:.y=.h) 11GENHDRS= $(addprefix ${OUTDIR}/,aicdb.h $(YSRCS:.y=.h))
10GENSRCS= $(YSRCS:.y=.c) $(LSRCS:.l=.c) 12GENSRCS= $(addprefix ${OUTDIR}/,$(YSRCS:.y=.c) $(LSRCS:.l=.c))
11 13
12SRCS= ${CSRCS} ${GENSRCS} 14SRCS= ${CSRCS} ${GENSRCS}
13LIBS= -ldb 15LIBS= -ldb
14clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG) 16clean-files:= ${GENSRCS} ${GENHDRS} $(YSRCS:.y=.output) $(PROG)
15# Override default kernel CFLAGS. This is a userland app. 17# Override default kernel CFLAGS. This is a userland app.
16AICASM_CFLAGS:= -I/usr/include -I. 18AICASM_CFLAGS:= -I/usr/include -I. -I$(OUTDIR)
17LEX= flex 19LEX= flex
18YACC= bison 20YACC= bison
19YFLAGS= -d 21YFLAGS= -d
@@ -32,22 +34,25 @@ YFLAGS+= -t -v
32LFLAGS= -d 34LFLAGS= -d
33endif 35endif
34 36
35$(PROG): ${GENHDRS} $(SRCS) 37$(PROG): $(OUTDIR) ${GENHDRS} $(SRCS)
36 $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(PROG) $(LIBS) 38 $(AICASM_CC) $(AICASM_CFLAGS) $(SRCS) -o $(OUTDIR)/$(PROG) $(LIBS)
39
40$(OUTDIR):
41 mkdir -p $(OUTDIR)
37 42
38aicdb.h: 43$(OUTDIR)/aicdb.h:
39 @if [ -e "/usr/include/db4/db_185.h" ]; then \ 44 @if [ -e "/usr/include/db4/db_185.h" ]; then \
40 echo "#include <db4/db_185.h>" > aicdb.h; \ 45 echo "#include <db4/db_185.h>" > $@; \
41 elif [ -e "/usr/include/db3/db_185.h" ]; then \ 46 elif [ -e "/usr/include/db3/db_185.h" ]; then \
42 echo "#include <db3/db_185.h>" > aicdb.h; \ 47 echo "#include <db3/db_185.h>" > $@; \
43 elif [ -e "/usr/include/db2/db_185.h" ]; then \ 48 elif [ -e "/usr/include/db2/db_185.h" ]; then \
44 echo "#include <db2/db_185.h>" > aicdb.h; \ 49 echo "#include <db2/db_185.h>" > $@; \
45 elif [ -e "/usr/include/db1/db_185.h" ]; then \ 50 elif [ -e "/usr/include/db1/db_185.h" ]; then \
46 echo "#include <db1/db_185.h>" > aicdb.h; \ 51 echo "#include <db1/db_185.h>" > $@; \
47 elif [ -e "/usr/include/db/db_185.h" ]; then \ 52 elif [ -e "/usr/include/db/db_185.h" ]; then \
48 echo "#include <db/db_185.h>" > aicdb.h; \ 53 echo "#include <db/db_185.h>" > $@; \
49 elif [ -e "/usr/include/db_185.h" ]; then \ 54 elif [ -e "/usr/include/db_185.h" ]; then \
50 echo "#include <db_185.h>" > aicdb.h; \ 55 echo "#include <db_185.h>" > $@; \
51 else \ 56 else \
52 echo "*** Install db development libraries"; \ 57 echo "*** Install db development libraries"; \
53 fi 58 fi
@@ -58,23 +63,23 @@ clean:
58# Create a dependency chain in generated files 63# Create a dependency chain in generated files
59# to avoid concurrent invocations of the single 64# to avoid concurrent invocations of the single
60# rule that builds them all. 65# rule that builds them all.
61aicasm_gram.c: aicasm_gram.h 66$(OUTDIR)/aicasm_gram.c: $(OUTDIR)/aicasm_gram.h
62aicasm_gram.c aicasm_gram.h: aicasm_gram.y 67$(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y
63 $(YACC) $(YFLAGS) -b $(<:.y=) $< 68 $(YACC) $(YFLAGS) -b $(<:.y=) $<
64 mv $(<:.y=).tab.c $(<:.y=.c) 69 mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
65 mv $(<:.y=).tab.h $(<:.y=.h) 70 mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
66 71
67# Create a dependency chain in generated files 72# Create a dependency chain in generated files
68# to avoid concurrent invocations of the single 73# to avoid concurrent invocations of the single
69# rule that builds them all. 74# rule that builds them all.
70aicasm_macro_gram.c: aicasm_macro_gram.h 75$(OUTDIR)/aicasm_macro_gram.c: $(OUTDIR)/aicasm_macro_gram.h
71aicasm_macro_gram.c aicasm_macro_gram.h: aicasm_macro_gram.y 76$(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y
72 $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $< 77 $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $<
73 mv $(<:.y=).tab.c $(<:.y=.c) 78 mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
74 mv $(<:.y=).tab.h $(<:.y=.h) 79 mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
75 80
76aicasm_scan.c: aicasm_scan.l 81$(OUTDIR)/aicasm_scan.c: aicasm_scan.l
77 $(LEX) $(LFLAGS) -o$@ $< 82 $(LEX) $(LFLAGS) -o $@ $<
78 83
79aicasm_macro_scan.c: aicasm_macro_scan.l 84$(OUTDIR)/aicasm_macro_scan.c: aicasm_macro_scan.l
80 $(LEX) $(LFLAGS) -Pmm -o$@ $< 85 $(LEX) $(LFLAGS) -Pmm -o $@ $<
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7dfe709a7138..6844ba361616 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2624,12 +2624,11 @@ static struct fcoe_transport bnx2fc_transport = {
2624}; 2624};
2625 2625
2626/** 2626/**
2627 * bnx2fc_percpu_thread_create - Create a receive thread for an 2627 * bnx2fc_cpu_online - Create a receive thread for an online CPU
2628 * online CPU
2629 * 2628 *
2630 * @cpu: cpu index for the online cpu 2629 * @cpu: cpu index for the online cpu
2631 */ 2630 */
2632static void bnx2fc_percpu_thread_create(unsigned int cpu) 2631static int bnx2fc_cpu_online(unsigned int cpu)
2633{ 2632{
2634 struct bnx2fc_percpu_s *p; 2633 struct bnx2fc_percpu_s *p;
2635 struct task_struct *thread; 2634 struct task_struct *thread;
@@ -2639,15 +2638,17 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
2639 thread = kthread_create_on_node(bnx2fc_percpu_io_thread, 2638 thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
2640 (void *)p, cpu_to_node(cpu), 2639 (void *)p, cpu_to_node(cpu),
2641 "bnx2fc_thread/%d", cpu); 2640 "bnx2fc_thread/%d", cpu);
2641 if (IS_ERR(thread))
2642 return PTR_ERR(thread);
2643
2642 /* bind thread to the cpu */ 2644 /* bind thread to the cpu */
2643 if (likely(!IS_ERR(thread))) { 2645 kthread_bind(thread, cpu);
2644 kthread_bind(thread, cpu); 2646 p->iothread = thread;
2645 p->iothread = thread; 2647 wake_up_process(thread);
2646 wake_up_process(thread); 2648 return 0;
2647 }
2648} 2649}
2649 2650
2650static void bnx2fc_percpu_thread_destroy(unsigned int cpu) 2651static int bnx2fc_cpu_offline(unsigned int cpu)
2651{ 2652{
2652 struct bnx2fc_percpu_s *p; 2653 struct bnx2fc_percpu_s *p;
2653 struct task_struct *thread; 2654 struct task_struct *thread;
@@ -2661,7 +2662,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
2661 thread = p->iothread; 2662 thread = p->iothread;
2662 p->iothread = NULL; 2663 p->iothread = NULL;
2663 2664
2664
2665 /* Free all work in the list */ 2665 /* Free all work in the list */
2666 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 2666 list_for_each_entry_safe(work, tmp, &p->work_list, list) {
2667 list_del_init(&work->list); 2667 list_del_init(&work->list);
@@ -2673,20 +2673,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
2673 2673
2674 if (thread) 2674 if (thread)
2675 kthread_stop(thread); 2675 kthread_stop(thread);
2676}
2677
2678
2679static int bnx2fc_cpu_online(unsigned int cpu)
2680{
2681 printk(PFX "CPU %x online: Create Rx thread\n", cpu);
2682 bnx2fc_percpu_thread_create(cpu);
2683 return 0;
2684}
2685
2686static int bnx2fc_cpu_dead(unsigned int cpu)
2687{
2688 printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
2689 bnx2fc_percpu_thread_destroy(cpu);
2690 return 0; 2676 return 0;
2691} 2677}
2692 2678
@@ -2761,30 +2747,16 @@ static int __init bnx2fc_mod_init(void)
2761 spin_lock_init(&p->fp_work_lock); 2747 spin_lock_init(&p->fp_work_lock);
2762 } 2748 }
2763 2749
2764 get_online_cpus(); 2750 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online",
2765 2751 bnx2fc_cpu_online, bnx2fc_cpu_offline);
2766 for_each_online_cpu(cpu)
2767 bnx2fc_percpu_thread_create(cpu);
2768
2769 rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2770 "scsi/bnx2fc:online",
2771 bnx2fc_cpu_online, NULL);
2772 if (rc < 0) 2752 if (rc < 0)
2773 goto stop_threads; 2753 goto stop_thread;
2774 bnx2fc_online_state = rc; 2754 bnx2fc_online_state = rc;
2775 2755
2776 cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead",
2777 NULL, bnx2fc_cpu_dead);
2778 put_online_cpus();
2779
2780 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2756 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
2781
2782 return 0; 2757 return 0;
2783 2758
2784stop_threads: 2759stop_thread:
2785 for_each_online_cpu(cpu)
2786 bnx2fc_percpu_thread_destroy(cpu);
2787 put_online_cpus();
2788 kthread_stop(l2_thread); 2760 kthread_stop(l2_thread);
2789free_wq: 2761free_wq:
2790 destroy_workqueue(bnx2fc_wq); 2762 destroy_workqueue(bnx2fc_wq);
@@ -2803,7 +2775,6 @@ static void __exit bnx2fc_mod_exit(void)
2803 struct fcoe_percpu_s *bg; 2775 struct fcoe_percpu_s *bg;
2804 struct task_struct *l2_thread; 2776 struct task_struct *l2_thread;
2805 struct sk_buff *skb; 2777 struct sk_buff *skb;
2806 unsigned int cpu = 0;
2807 2778
2808 /* 2779 /*
2809 * NOTE: Since cnic calls register_driver routine rtnl_lock, 2780 * NOTE: Since cnic calls register_driver routine rtnl_lock,
@@ -2844,16 +2815,7 @@ static void __exit bnx2fc_mod_exit(void)
2844 if (l2_thread) 2815 if (l2_thread)
2845 kthread_stop(l2_thread); 2816 kthread_stop(l2_thread);
2846 2817
2847 get_online_cpus(); 2818 cpuhp_remove_state(bnx2fc_online_state);
2848 /* Destroy per cpu threads */
2849 for_each_online_cpu(cpu) {
2850 bnx2fc_percpu_thread_destroy(cpu);
2851 }
2852
2853 cpuhp_remove_state_nocalls(bnx2fc_online_state);
2854 cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD);
2855
2856 put_online_cpus();
2857 2819
2858 destroy_workqueue(bnx2fc_wq); 2820 destroy_workqueue(bnx2fc_wq);
2859 /* 2821 /*
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 913c750205ce..26de61d65a4d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
1008 return work; 1008 return work;
1009} 1009}
1010 1010
1011/* Pending work request completion */
1012static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
1013{
1014 unsigned int cpu = wqe % num_possible_cpus();
1015 struct bnx2fc_percpu_s *fps;
1016 struct bnx2fc_work *work;
1017
1018 fps = &per_cpu(bnx2fc_percpu, cpu);
1019 spin_lock_bh(&fps->fp_work_lock);
1020 if (fps->iothread) {
1021 work = bnx2fc_alloc_work(tgt, wqe);
1022 if (work) {
1023 list_add_tail(&work->list, &fps->work_list);
1024 wake_up_process(fps->iothread);
1025 spin_unlock_bh(&fps->fp_work_lock);
1026 return;
1027 }
1028 }
1029 spin_unlock_bh(&fps->fp_work_lock);
1030 bnx2fc_process_cq_compl(tgt, wqe);
1031}
1032
1011int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 1033int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1012{ 1034{
1013 struct fcoe_cqe *cq; 1035 struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1042 /* Unsolicited event notification */ 1064 /* Unsolicited event notification */
1043 bnx2fc_process_unsol_compl(tgt, wqe); 1065 bnx2fc_process_unsol_compl(tgt, wqe);
1044 } else { 1066 } else {
1045 /* Pending work request completion */ 1067 bnx2fc_pending_work(tgt, wqe);
1046 struct bnx2fc_work *work = NULL;
1047 struct bnx2fc_percpu_s *fps = NULL;
1048 unsigned int cpu = wqe % num_possible_cpus();
1049
1050 fps = &per_cpu(bnx2fc_percpu, cpu);
1051 spin_lock_bh(&fps->fp_work_lock);
1052 if (unlikely(!fps->iothread))
1053 goto unlock;
1054
1055 work = bnx2fc_alloc_work(tgt, wqe);
1056 if (work)
1057 list_add_tail(&work->list,
1058 &fps->work_list);
1059unlock:
1060 spin_unlock_bh(&fps->fp_work_lock);
1061
1062 /* Pending work request completion */
1063 if (fps->iothread && work)
1064 wake_up_process(fps->iothread);
1065 else
1066 bnx2fc_process_cq_compl(tgt, wqe);
1067 num_free_sqes++; 1068 num_free_sqes++;
1068 } 1069 }
1069 cqe++; 1070 cqe++;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 86afc002814c..4ebcda8d9500 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -404,12 +404,11 @@ int bnx2i_get_stats(void *handle)
404 404
405 405
406/** 406/**
407 * bnx2i_percpu_thread_create - Create a receive thread for an 407 * bnx2i_cpu_online - Create a receive thread for an online CPU
408 * online CPU
409 * 408 *
410 * @cpu: cpu index for the online cpu 409 * @cpu: cpu index for the online cpu
411 */ 410 */
412static void bnx2i_percpu_thread_create(unsigned int cpu) 411static int bnx2i_cpu_online(unsigned int cpu)
413{ 412{
414 struct bnx2i_percpu_s *p; 413 struct bnx2i_percpu_s *p;
415 struct task_struct *thread; 414 struct task_struct *thread;
@@ -419,16 +418,17 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
419 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, 418 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
420 cpu_to_node(cpu), 419 cpu_to_node(cpu),
421 "bnx2i_thread/%d", cpu); 420 "bnx2i_thread/%d", cpu);
421 if (IS_ERR(thread))
422 return PTR_ERR(thread);
423
422 /* bind thread to the cpu */ 424 /* bind thread to the cpu */
423 if (likely(!IS_ERR(thread))) { 425 kthread_bind(thread, cpu);
424 kthread_bind(thread, cpu); 426 p->iothread = thread;
425 p->iothread = thread; 427 wake_up_process(thread);
426 wake_up_process(thread); 428 return 0;
427 }
428} 429}
429 430
430 431static int bnx2i_cpu_offline(unsigned int cpu)
431static void bnx2i_percpu_thread_destroy(unsigned int cpu)
432{ 432{
433 struct bnx2i_percpu_s *p; 433 struct bnx2i_percpu_s *p;
434 struct task_struct *thread; 434 struct task_struct *thread;
@@ -451,19 +451,6 @@ static void bnx2i_percpu_thread_destroy(unsigned int cpu)
451 spin_unlock_bh(&p->p_work_lock); 451 spin_unlock_bh(&p->p_work_lock);
452 if (thread) 452 if (thread)
453 kthread_stop(thread); 453 kthread_stop(thread);
454}
455
456static int bnx2i_cpu_online(unsigned int cpu)
457{
458 pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu);
459 bnx2i_percpu_thread_create(cpu);
460 return 0;
461}
462
463static int bnx2i_cpu_dead(unsigned int cpu)
464{
465 pr_info("CPU %x offline: Remove Rx thread\n", cpu);
466 bnx2i_percpu_thread_destroy(cpu);
467 return 0; 454 return 0;
468} 455}
469 456
@@ -511,27 +498,14 @@ static int __init bnx2i_mod_init(void)
511 p->iothread = NULL; 498 p->iothread = NULL;
512 } 499 }
513 500
514 get_online_cpus(); 501 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online",
515 502 bnx2i_cpu_online, bnx2i_cpu_offline);
516 for_each_online_cpu(cpu)
517 bnx2i_percpu_thread_create(cpu);
518
519 err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
520 "scsi/bnx2i:online",
521 bnx2i_cpu_online, NULL);
522 if (err < 0) 503 if (err < 0)
523 goto remove_threads; 504 goto unreg_driver;
524 bnx2i_online_state = err; 505 bnx2i_online_state = err;
525
526 cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead",
527 NULL, bnx2i_cpu_dead);
528 put_online_cpus();
529 return 0; 506 return 0;
530 507
531remove_threads: 508unreg_driver:
532 for_each_online_cpu(cpu)
533 bnx2i_percpu_thread_destroy(cpu);
534 put_online_cpus();
535 cnic_unregister_driver(CNIC_ULP_ISCSI); 509 cnic_unregister_driver(CNIC_ULP_ISCSI);
536unreg_xport: 510unreg_xport:
537 iscsi_unregister_transport(&bnx2i_iscsi_transport); 511 iscsi_unregister_transport(&bnx2i_iscsi_transport);
@@ -551,7 +525,6 @@ out:
551static void __exit bnx2i_mod_exit(void) 525static void __exit bnx2i_mod_exit(void)
552{ 526{
553 struct bnx2i_hba *hba; 527 struct bnx2i_hba *hba;
554 unsigned cpu = 0;
555 528
556 mutex_lock(&bnx2i_dev_lock); 529 mutex_lock(&bnx2i_dev_lock);
557 while (!list_empty(&adapter_list)) { 530 while (!list_empty(&adapter_list)) {
@@ -569,14 +542,7 @@ static void __exit bnx2i_mod_exit(void)
569 } 542 }
570 mutex_unlock(&bnx2i_dev_lock); 543 mutex_unlock(&bnx2i_dev_lock);
571 544
572 get_online_cpus(); 545 cpuhp_remove_state(bnx2i_online_state);
573
574 for_each_online_cpu(cpu)
575 bnx2i_percpu_thread_destroy(cpu);
576
577 cpuhp_remove_state_nocalls(bnx2i_online_state);
578 cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD);
579 put_online_cpus();
580 546
581 iscsi_unregister_transport(&bnx2i_iscsi_transport); 547 iscsi_unregister_transport(&bnx2i_iscsi_transport);
582 cnic_unregister_driver(CNIC_ULP_ISCSI); 548 cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 2029ad225121..5be0086142ca 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3845,8 +3845,10 @@ csio_hw_start(struct csio_hw *hw)
3845 3845
3846 if (csio_is_hw_ready(hw)) 3846 if (csio_is_hw_ready(hw))
3847 return 0; 3847 return 0;
3848 else 3848 else if (csio_match_state(hw, csio_hws_uninit))
3849 return -EINVAL; 3849 return -EINVAL;
3850 else
3851 return -ENODEV;
3850} 3852}
3851 3853
3852int 3854int
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index ea0c31086cc6..dcd074169aa9 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -969,10 +969,14 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
969 969
970 pci_set_drvdata(pdev, hw); 970 pci_set_drvdata(pdev, hw);
971 971
972 if (csio_hw_start(hw) != 0) { 972 rv = csio_hw_start(hw);
973 dev_err(&pdev->dev, 973 if (rv) {
974 "Failed to start FW, continuing in debug mode.\n"); 974 if (rv == -EINVAL) {
975 return 0; 975 dev_err(&pdev->dev,
976 "Failed to start FW, continuing in debug mode.\n");
977 return 0;
978 }
979 goto err_lnode_exit;
976 } 980 }
977 981
978 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n", 982 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index a69a9ac836f5..1d02cf9fe06c 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1635,6 +1635,9 @@ static int init_act_open(struct cxgbi_sock *csk)
1635 goto rel_resource; 1635 goto rel_resource;
1636 } 1636 }
1637 1637
1638 if (!(n->nud_state & NUD_VALID))
1639 neigh_event_send(n, NULL);
1640
1638 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1641 csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1639 if (csk->atid < 0) { 1642 if (csk->atid < 0) {
1640 pr_err("%s, NO atid available.\n", ndev->name); 1643 pr_err("%s, NO atid available.\n", ndev->name);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index e4c83b7c96a8..1a4cfa562a60 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2128,6 +2128,13 @@ void cxgbi_cleanup_task(struct iscsi_task *task)
2128 struct iscsi_tcp_task *tcp_task = task->dd_data; 2128 struct iscsi_tcp_task *tcp_task = task->dd_data;
2129 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); 2129 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2130 2130
2131 if (!tcp_task || !tdata || (tcp_task->dd_data != tdata)) {
2132 pr_info("task 0x%p,0x%p, tcp_task 0x%p, tdata 0x%p/0x%p.\n",
2133 task, task->sc, tcp_task,
2134 tcp_task ? tcp_task->dd_data : NULL, tdata);
2135 return;
2136 }
2137
2131 log_debug(1 << CXGBI_DBG_ISCSI, 2138 log_debug(1 << CXGBI_DBG_ISCSI,
2132 "task 0x%p, skb 0x%p, itt 0x%x.\n", 2139 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2133 task, tdata->skb, task->hdr_itt); 2140 task, tdata->skb, task->hdr_itt);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 077f62e208aa..6a4367cc9caa 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3401,9 +3401,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3401 if (is_write) { 3401 if (is_write) {
3402 req_flags |= SISL_REQ_FLAGS_HOST_WRITE; 3402 req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3403 3403
3404 rc = copy_from_user(kbuf, ubuf, ulen); 3404 if (copy_from_user(kbuf, ubuf, ulen)) {
3405 if (unlikely(rc)) 3405 rc = -EFAULT;
3406 goto out; 3406 goto out;
3407 }
3407 } 3408 }
3408 } 3409 }
3409 3410
@@ -3431,8 +3432,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3431 goto out; 3432 goto out;
3432 } 3433 }
3433 3434
3434 if (ulen && !is_write) 3435 if (ulen && !is_write) {
3435 rc = copy_to_user(ubuf, kbuf, ulen); 3436 if (copy_to_user(ubuf, kbuf, ulen))
3437 rc = -EFAULT;
3438 }
3436out: 3439out:
3437 kfree(buf); 3440 kfree(buf);
3438 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 3441 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 551d103c27f1..2bfea7082e3a 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1693,7 +1693,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
1693 1693
1694static int parse_trans_tx_err_code_v2_hw(u32 err_msk) 1694static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
1695{ 1695{
1696 const u8 trans_tx_err_code_prio[] = { 1696 static const u8 trans_tx_err_code_prio[] = {
1697 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, 1697 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS,
1698 TRANS_TX_ERR_PHY_NOT_ENABLE, 1698 TRANS_TX_ERR_PHY_NOT_ENABLE,
1699 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, 1699 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION,
@@ -1738,7 +1738,7 @@ static int parse_trans_tx_err_code_v2_hw(u32 err_msk)
1738 1738
1739static int parse_trans_rx_err_code_v2_hw(u32 err_msk) 1739static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
1740{ 1740{
1741 const u8 trans_rx_err_code_prio[] = { 1741 static const u8 trans_rx_err_code_prio[] = {
1742 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, 1742 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR,
1743 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, 1743 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR,
1744 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, 1744 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM,
@@ -1784,7 +1784,7 @@ static int parse_trans_rx_err_code_v2_hw(u32 err_msk)
1784 1784
1785static int parse_dma_tx_err_code_v2_hw(u32 err_msk) 1785static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
1786{ 1786{
1787 const u8 dma_tx_err_code_prio[] = { 1787 static const u8 dma_tx_err_code_prio[] = {
1788 DMA_TX_UNEXP_XFER_ERR, 1788 DMA_TX_UNEXP_XFER_ERR,
1789 DMA_TX_UNEXP_RETRANS_ERR, 1789 DMA_TX_UNEXP_RETRANS_ERR,
1790 DMA_TX_XFER_LEN_OVERFLOW, 1790 DMA_TX_XFER_LEN_OVERFLOW,
@@ -1810,7 +1810,7 @@ static int parse_dma_tx_err_code_v2_hw(u32 err_msk)
1810 1810
1811static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) 1811static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
1812{ 1812{
1813 const u8 sipc_rx_err_code_prio[] = { 1813 static const u8 sipc_rx_err_code_prio[] = {
1814 SIPC_RX_FIS_STATUS_ERR_BIT_VLD, 1814 SIPC_RX_FIS_STATUS_ERR_BIT_VLD,
1815 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, 1815 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR,
1816 SIPC_RX_FIS_STATUS_BSY_BIT_ERR, 1816 SIPC_RX_FIS_STATUS_BSY_BIT_ERR,
@@ -1836,7 +1836,7 @@ static int parse_sipc_rx_err_code_v2_hw(u32 err_msk)
1836 1836
1837static int parse_dma_rx_err_code_v2_hw(u32 err_msk) 1837static int parse_dma_rx_err_code_v2_hw(u32 err_msk)
1838{ 1838{
1839 const u8 dma_rx_err_code_prio[] = { 1839 static const u8 dma_rx_err_code_prio[] = {
1840 DMA_RX_UNKNOWN_FRM_ERR, 1840 DMA_RX_UNKNOWN_FRM_ERR,
1841 DMA_RX_DATA_LEN_OVERFLOW, 1841 DMA_RX_DATA_LEN_OVERFLOW,
1842 DMA_RX_DATA_LEN_UNDERFLOW, 1842 DMA_RX_DATA_LEN_UNDERFLOW,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 8914eab84337..4f7cdb28bd38 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -938,7 +938,7 @@ static struct scsi_host_template hpsa_driver_template = {
938#endif 938#endif
939 .sdev_attrs = hpsa_sdev_attrs, 939 .sdev_attrs = hpsa_sdev_attrs,
940 .shost_attrs = hpsa_shost_attrs, 940 .shost_attrs = hpsa_shost_attrs,
941 .max_sectors = 8192, 941 .max_sectors = 1024,
942 .no_write_same = 1, 942 .no_write_same = 1,
943}; 943};
944 944
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b0c68d24db01..da5bdbdcce52 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3351,6 +3351,16 @@ static void ipr_worker_thread(struct work_struct *work)
3351 return; 3351 return;
3352 } 3352 }
3353 3353
3354 if (ioa_cfg->scsi_unblock) {
3355 ioa_cfg->scsi_unblock = 0;
3356 ioa_cfg->scsi_blocked = 0;
3357 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3358 scsi_unblock_requests(ioa_cfg->host);
3359 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3360 if (ioa_cfg->scsi_blocked)
3361 scsi_block_requests(ioa_cfg->host);
3362 }
3363
3354 if (!ioa_cfg->scan_enabled) { 3364 if (!ioa_cfg->scan_enabled) {
3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356 return; 3366 return;
@@ -7211,9 +7221,8 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7211 ENTER; 7221 ENTER;
7212 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 7222 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7213 ipr_trace; 7223 ipr_trace;
7214 spin_unlock_irq(ioa_cfg->host->host_lock); 7224 ioa_cfg->scsi_unblock = 1;
7215 scsi_unblock_requests(ioa_cfg->host); 7225 schedule_work(&ioa_cfg->work_q);
7216 spin_lock_irq(ioa_cfg->host->host_lock);
7217 } 7226 }
7218 7227
7219 ioa_cfg->in_reset_reload = 0; 7228 ioa_cfg->in_reset_reload = 0;
@@ -7287,13 +7296,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7287 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7296 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7288 wake_up_all(&ioa_cfg->reset_wait_q); 7297 wake_up_all(&ioa_cfg->reset_wait_q);
7289 7298
7290 spin_unlock(ioa_cfg->host->host_lock); 7299 ioa_cfg->scsi_unblock = 1;
7291 scsi_unblock_requests(ioa_cfg->host);
7292 spin_lock(ioa_cfg->host->host_lock);
7293
7294 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
7295 scsi_block_requests(ioa_cfg->host);
7296
7297 schedule_work(&ioa_cfg->work_q); 7300 schedule_work(&ioa_cfg->work_q);
7298 LEAVE; 7301 LEAVE;
7299 return IPR_RC_JOB_RETURN; 7302 return IPR_RC_JOB_RETURN;
@@ -9249,8 +9252,11 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9249 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9252 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9250 } 9253 }
9251 wmb(); 9254 wmb();
9252 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) 9255 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9256 ioa_cfg->scsi_unblock = 0;
9257 ioa_cfg->scsi_blocked = 1;
9253 scsi_block_requests(ioa_cfg->host); 9258 scsi_block_requests(ioa_cfg->host);
9259 }
9254 9260
9255 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 9261 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9256 ioa_cfg->reset_cmd = ipr_cmd; 9262 ioa_cfg->reset_cmd = ipr_cmd;
@@ -9306,9 +9312,8 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9306 wake_up_all(&ioa_cfg->reset_wait_q); 9312 wake_up_all(&ioa_cfg->reset_wait_q);
9307 9313
9308 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9314 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9309 spin_unlock_irq(ioa_cfg->host->host_lock); 9315 ioa_cfg->scsi_unblock = 1;
9310 scsi_unblock_requests(ioa_cfg->host); 9316 schedule_work(&ioa_cfg->work_q);
9311 spin_lock_irq(ioa_cfg->host->host_lock);
9312 } 9317 }
9313 return; 9318 return;
9314 } else { 9319 } else {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index e98a87a65335..c7f0e9e3cd7d 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1488,6 +1488,8 @@ struct ipr_ioa_cfg {
1488 u8 cfg_locked:1; 1488 u8 cfg_locked:1;
1489 u8 clear_isr:1; 1489 u8 clear_isr:1;
1490 u8 probe_done:1; 1490 u8 probe_done:1;
1491 u8 scsi_unblock:1;
1492 u8 scsi_blocked:1;
1491 1493
1492 u8 revid; 1494 u8 revid;
1493 1495
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 47f66e949745..ed197bc8e801 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -213,7 +213,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
213 * @task_context: 213 * @task_context:
214 * 214 *
215 */ 215 */
216static void scu_ssp_reqeust_construct_task_context( 216static void scu_ssp_request_construct_task_context(
217 struct isci_request *ireq, 217 struct isci_request *ireq,
218 struct scu_task_context *task_context) 218 struct scu_task_context *task_context)
219{ 219{
@@ -425,7 +425,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
425 u8 prot_type = scsi_get_prot_type(scmd); 425 u8 prot_type = scsi_get_prot_type(scmd);
426 u8 prot_op = scsi_get_prot_op(scmd); 426 u8 prot_op = scsi_get_prot_op(scmd);
427 427
428 scu_ssp_reqeust_construct_task_context(ireq, task_context); 428 scu_ssp_request_construct_task_context(ireq, task_context);
429 429
430 task_context->ssp_command_iu_length = 430 task_context->ssp_command_iu_length =
431 sizeof(struct ssp_cmd_iu) / sizeof(u32); 431 sizeof(struct ssp_cmd_iu) / sizeof(u32);
@@ -472,7 +472,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
472{ 472{
473 struct scu_task_context *task_context = ireq->tc; 473 struct scu_task_context *task_context = ireq->tc;
474 474
475 scu_ssp_reqeust_construct_task_context(ireq, task_context); 475 scu_ssp_request_construct_task_context(ireq, task_context);
476 476
477 task_context->control_frame = 1; 477 task_context->control_frame = 1;
478 task_context->priority = SCU_TASK_PRIORITY_HIGH; 478 task_context->priority = SCU_TASK_PRIORITY_HIGH;
@@ -495,7 +495,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire
495 * the command buffer is complete. none Revisit task context construction to 495 * the command buffer is complete. none Revisit task context construction to
496 * determine what is common for SSP/SMP/STP task context structures. 496 * determine what is common for SSP/SMP/STP task context structures.
497 */ 497 */
498static void scu_sata_reqeust_construct_task_context( 498static void scu_sata_request_construct_task_context(
499 struct isci_request *ireq, 499 struct isci_request *ireq,
500 struct scu_task_context *task_context) 500 struct scu_task_context *task_context)
501{ 501{
@@ -562,7 +562,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq
562{ 562{
563 struct scu_task_context *task_context = ireq->tc; 563 struct scu_task_context *task_context = ireq->tc;
564 564
565 scu_sata_reqeust_construct_task_context(ireq, task_context); 565 scu_sata_request_construct_task_context(ireq, task_context);
566 566
567 task_context->control_frame = 0; 567 task_context->control_frame = 0;
568 task_context->priority = SCU_TASK_PRIORITY_NORMAL; 568 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
@@ -613,7 +613,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
613 struct scu_task_context *task_context = ireq->tc; 613 struct scu_task_context *task_context = ireq->tc;
614 614
615 /* Build the STP task context structure */ 615 /* Build the STP task context structure */
616 scu_sata_reqeust_construct_task_context(ireq, task_context); 616 scu_sata_request_construct_task_context(ireq, task_context);
617 617
618 /* Copy over the SGL elements */ 618 /* Copy over the SGL elements */
619 sci_request_build_sgl(ireq); 619 sci_request_build_sgl(ireq);
@@ -1401,7 +1401,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
1401 * @data_buffer: The buffer of data to be copied. 1401 * @data_buffer: The buffer of data to be copied.
1402 * @length: The length of the data transfer. 1402 * @length: The length of the data transfer.
1403 * 1403 *
1404 * Copy the data from the buffer for the length specified to the IO reqeust SGL 1404 * Copy the data from the buffer for the length specified to the IO request SGL
1405 * specified data region. enum sci_status 1405 * specified data region. enum sci_status
1406 */ 1406 */
1407static enum sci_status 1407static enum sci_status
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index fd501f8dbb11..8660f923ace0 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -573,7 +573,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
573 event = DISC_EV_FAILED; 573 event = DISC_EV_FAILED;
574 } 574 }
575 if (error) 575 if (error)
576 fc_disc_error(disc, fp); 576 fc_disc_error(disc, ERR_PTR(error));
577 else if (event != DISC_EV_NONE) 577 else if (event != DISC_EV_NONE)
578 fc_disc_done(disc, event); 578 fc_disc_done(disc, event);
579 fc_frame_free(fp); 579 fc_frame_free(fp);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4ed48ed38e79..7ee1a94c0b33 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -205,8 +205,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
205 atomic_read(&tgtp->xmt_ls_rsp_error)); 205 atomic_read(&tgtp->xmt_ls_rsp_error));
206 206
207 len += snprintf(buf+len, PAGE_SIZE-len, 207 len += snprintf(buf+len, PAGE_SIZE-len,
208 "FCP: Rcv %08x Release %08x Drop %08x\n", 208 "FCP: Rcv %08x Defer %08x Release %08x "
209 "Drop %08x\n",
209 atomic_read(&tgtp->rcv_fcp_cmd_in), 210 atomic_read(&tgtp->rcv_fcp_cmd_in),
211 atomic_read(&tgtp->rcv_fcp_cmd_defer),
210 atomic_read(&tgtp->xmt_fcp_release), 212 atomic_read(&tgtp->xmt_fcp_release),
211 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 213 atomic_read(&tgtp->rcv_fcp_cmd_drop));
212 214
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 5cc8b0f7d885..744f3f395b64 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -782,8 +782,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
782 atomic_read(&tgtp->xmt_ls_rsp_error)); 782 atomic_read(&tgtp->xmt_ls_rsp_error));
783 783
784 len += snprintf(buf + len, size - len, 784 len += snprintf(buf + len, size - len,
785 "FCP: Rcv %08x Drop %08x\n", 785 "FCP: Rcv %08x Defer %08x Release %08x "
786 "Drop %08x\n",
786 atomic_read(&tgtp->rcv_fcp_cmd_in), 787 atomic_read(&tgtp->rcv_fcp_cmd_in),
788 atomic_read(&tgtp->rcv_fcp_cmd_defer),
789 atomic_read(&tgtp->xmt_fcp_release),
787 atomic_read(&tgtp->rcv_fcp_cmd_drop)); 790 atomic_read(&tgtp->rcv_fcp_cmd_drop));
788 791
789 if (atomic_read(&tgtp->rcv_fcp_cmd_in) != 792 if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index fbeec344c6cc..bbbd0f84160d 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -841,12 +841,31 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
841 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); 841 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
842} 842}
843 843
844static void
845lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
846 struct nvmefc_tgt_fcp_req *rsp)
847{
848 struct lpfc_nvmet_tgtport *tgtp;
849 struct lpfc_nvmet_rcv_ctx *ctxp =
850 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
851 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
852 struct lpfc_hba *phba = ctxp->phba;
853
854 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
855 ctxp->oxid, ctxp->size, smp_processor_id());
856
857 tgtp = phba->targetport->private;
858 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
859 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
860}
861
844static struct nvmet_fc_target_template lpfc_tgttemplate = { 862static struct nvmet_fc_target_template lpfc_tgttemplate = {
845 .targetport_delete = lpfc_nvmet_targetport_delete, 863 .targetport_delete = lpfc_nvmet_targetport_delete,
846 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp, 864 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
847 .fcp_op = lpfc_nvmet_xmt_fcp_op, 865 .fcp_op = lpfc_nvmet_xmt_fcp_op,
848 .fcp_abort = lpfc_nvmet_xmt_fcp_abort, 866 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
849 .fcp_req_release = lpfc_nvmet_xmt_fcp_release, 867 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
868 .defer_rcv = lpfc_nvmet_defer_rcv,
850 869
851 .max_hw_queues = 1, 870 .max_hw_queues = 1,
852 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS, 871 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
@@ -1504,6 +1523,17 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1504 return; 1523 return;
1505 } 1524 }
1506 1525
1526 /* Processing of FCP command is deferred */
1527 if (rc == -EOVERFLOW) {
1528 lpfc_nvmeio_data(phba,
1529 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
1530 oxid, size, sid);
1531 /* defer reposting rcv buffer till .defer_rcv callback */
1532 ctxp->rqb_buffer = nvmebuf;
1533 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1534 return;
1535 }
1536
1507 atomic_inc(&tgtp->rcv_fcp_cmd_drop); 1537 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1508 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, 1538 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1509 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", 1539 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index e675ef17be08..48a76788b003 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -49,6 +49,7 @@ struct lpfc_nvmet_tgtport {
49 atomic_t rcv_fcp_cmd_in; 49 atomic_t rcv_fcp_cmd_in;
50 atomic_t rcv_fcp_cmd_out; 50 atomic_t rcv_fcp_cmd_out;
51 atomic_t rcv_fcp_cmd_drop; 51 atomic_t rcv_fcp_cmd_drop;
52 atomic_t rcv_fcp_cmd_defer;
52 atomic_t xmt_fcp_release; 53 atomic_t xmt_fcp_release;
53 54
54 /* Stats counters - lpfc_nvmet_xmt_fcp_op */ 55 /* Stats counters - lpfc_nvmet_xmt_fcp_op */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 316c3df0c3fd..71c4746341ea 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6228,8 +6228,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
6228fail_start_aen: 6228fail_start_aen:
6229fail_io_attach: 6229fail_io_attach:
6230 megasas_mgmt_info.count--; 6230 megasas_mgmt_info.count--;
6231 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6232 megasas_mgmt_info.max_index--; 6231 megasas_mgmt_info.max_index--;
6232 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6233 6233
6234 instance->instancet->disable_intr(instance); 6234 instance->instancet->disable_intr(instance);
6235 megasas_destroy_irqs(instance); 6235 megasas_destroy_irqs(instance);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f990ab4d45e1..985510628f56 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -425,7 +425,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
425int 425int
426megasas_alloc_cmdlist_fusion(struct megasas_instance *instance) 426megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
427{ 427{
428 u32 max_mpt_cmd, i; 428 u32 max_mpt_cmd, i, j;
429 struct fusion_context *fusion; 429 struct fusion_context *fusion;
430 430
431 fusion = instance->ctrl_context; 431 fusion = instance->ctrl_context;
@@ -450,11 +450,15 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
450 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion), 450 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
451 GFP_KERNEL); 451 GFP_KERNEL);
452 if (!fusion->cmd_list[i]) { 452 if (!fusion->cmd_list[i]) {
453 for (j = 0; j < i; j++)
454 kfree(fusion->cmd_list[j]);
455 kfree(fusion->cmd_list);
453 dev_err(&instance->pdev->dev, 456 dev_err(&instance->pdev->dev,
454 "Failed from %s %d\n", __func__, __LINE__); 457 "Failed from %s %d\n", __func__, __LINE__);
455 return -ENOMEM; 458 return -ENOMEM;
456 } 459 }
457 } 460 }
461
458 return 0; 462 return 0;
459} 463}
460int 464int
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 4d038926a455..351f06dfc5a0 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -528,7 +528,8 @@ struct fip_vlan {
528#define QEDF_WRITE (1 << 0) 528#define QEDF_WRITE (1 << 0)
529#define MAX_FIBRE_LUNS 0xffffffff 529#define MAX_FIBRE_LUNS 0xffffffff
530 530
531#define QEDF_MAX_NUM_CQS 8 531#define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \
532 num_online_cpus())
532 533
533/* 534/*
534 * PCI function probe defines 535 * PCI function probe defines
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index b58bba4604e8..1d13c9ca517d 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1227,7 +1227,7 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
1227 1227
1228 if (rdata->spp_type != FC_TYPE_FCP) { 1228 if (rdata->spp_type != FC_TYPE_FCP) {
1229 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 1229 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1230 "Not offlading since since spp type isn't FCP\n"); 1230 "Not offloading since spp type isn't FCP\n");
1231 break; 1231 break;
1232 } 1232 }
1233 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { 1233 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
@@ -2760,11 +2760,9 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
2760 * we allocation is the minimum off: 2760 * we allocation is the minimum off:
2761 * 2761 *
2762 * Number of CPUs 2762 * Number of CPUs
2763 * Number of MSI-X vectors 2763 * Number allocated by qed for our PCI function
2764 * Max number allocated in hardware (QEDF_MAX_NUM_CQS)
2765 */ 2764 */
2766 qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, 2765 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
2767 num_online_cpus());
2768 2766
2769 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 2767 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
2770 qedf->num_queues); 2768 qedf->num_queues);
@@ -2962,6 +2960,13 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2962 goto err1; 2960 goto err1;
2963 } 2961 }
2964 2962
2963 /* Learn information crucial for qedf to progress */
2964 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
2965 if (rc) {
2966 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
2967 goto err1;
2968 }
2969
2965 /* queue allocation code should come here 2970 /* queue allocation code should come here
2966 * order should be 2971 * order should be
2967 * slowpath_start 2972 * slowpath_start
@@ -2977,13 +2982,6 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
2977 } 2982 }
2978 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 2983 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
2979 2984
2980 /* Learn information crucial for qedf to progress */
2981 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
2982 if (rc) {
2983 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
2984 goto err1;
2985 }
2986
2987 /* Record BDQ producer doorbell addresses */ 2985 /* Record BDQ producer doorbell addresses */
2988 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; 2986 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
2989 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; 2987 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
index 21331453db7b..2ff753ce6e27 100644
--- a/drivers/scsi/qedi/Kconfig
+++ b/drivers/scsi/qedi/Kconfig
@@ -5,6 +5,7 @@ config QEDI
5 select SCSI_ISCSI_ATTRS 5 select SCSI_ISCSI_ATTRS
6 select QED_LL2 6 select QED_LL2
7 select QED_ISCSI 7 select QED_ISCSI
8 select ISCSI_BOOT_SYSFS
8 ---help--- 9 ---help---
9 This driver supports iSCSI offload for the QLogic FastLinQ 10 This driver supports iSCSI offload for the QLogic FastLinQ
10 41000 Series Converged Network Adapters. 11 41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 32632c9b2276..91d2f51c351b 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -23,11 +23,17 @@
23#include <linux/qed/qed_iscsi_if.h> 23#include <linux/qed/qed_iscsi_if.h>
24#include <linux/qed/qed_ll2_if.h> 24#include <linux/qed/qed_ll2_if.h>
25#include "qedi_version.h" 25#include "qedi_version.h"
26#include "qedi_nvm_iscsi_cfg.h"
26 27
27#define QEDI_MODULE_NAME "qedi" 28#define QEDI_MODULE_NAME "qedi"
28 29
29struct qedi_endpoint; 30struct qedi_endpoint;
30 31
32#ifndef GET_FIELD2
33#define GET_FIELD2(value, name) \
34 (((value) & (name ## _MASK)) >> (name ## _OFFSET))
35#endif
36
31/* 37/*
32 * PCI function probe defines 38 * PCI function probe defines
33 */ 39 */
@@ -66,6 +72,11 @@ struct qedi_endpoint;
66#define QEDI_HW_DMA_BOUNDARY 0xfff 72#define QEDI_HW_DMA_BOUNDARY 0xfff
67#define QEDI_PATH_HANDLE 0xFE0000000UL 73#define QEDI_PATH_HANDLE 0xFE0000000UL
68 74
75enum qedi_nvm_tgts {
76 QEDI_NVM_TGT_PRI,
77 QEDI_NVM_TGT_SEC,
78};
79
69struct qedi_uio_ctrl { 80struct qedi_uio_ctrl {
70 /* meta data */ 81 /* meta data */
71 u32 uio_hsi_version; 82 u32 uio_hsi_version;
@@ -283,6 +294,8 @@ struct qedi_ctx {
283 void *bdq_pbl_list; 294 void *bdq_pbl_list;
284 dma_addr_t bdq_pbl_list_dma; 295 dma_addr_t bdq_pbl_list_dma;
285 u8 bdq_pbl_list_num_entries; 296 u8 bdq_pbl_list_num_entries;
297 struct nvm_iscsi_cfg *iscsi_cfg;
298 dma_addr_t nvm_buf_dma;
286 void __iomem *bdq_primary_prod; 299 void __iomem *bdq_primary_prod;
287 void __iomem *bdq_secondary_prod; 300 void __iomem *bdq_secondary_prod;
288 u16 bdq_prod_idx; 301 u16 bdq_prod_idx;
@@ -337,6 +350,10 @@ struct qedi_ctx {
337 bool use_fast_sge; 350 bool use_fast_sge;
338 351
339 atomic_t num_offloads; 352 atomic_t num_offloads;
353#define SYSFS_FLAG_FW_SEL_BOOT 2
354#define IPV6_LEN 41
355#define IPV4_LEN 17
356 struct iscsi_boot_kset *boot_kset;
340}; 357};
341 358
342struct qedi_work { 359struct qedi_work {
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 19254bd739d9..93d54acd4a22 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1411,7 +1411,7 @@ static void qedi_tmf_work(struct work_struct *work)
1411 1411
1412 list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC); 1412 list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC);
1413 if (!list_work) { 1413 if (!list_work) {
1414 QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n"); 1414 QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n");
1415 goto abort_ret; 1415 goto abort_ret;
1416 } 1416 }
1417 1417
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 80edd28b635f..37da9a8b43b1 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -824,7 +824,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
824 u32 iscsi_cid = QEDI_CID_RESERVED; 824 u32 iscsi_cid = QEDI_CID_RESERVED;
825 u16 len = 0; 825 u16 len = 0;
826 char *buf = NULL; 826 char *buf = NULL;
827 int ret; 827 int ret, tmp;
828 828
829 if (!shost) { 829 if (!shost) {
830 ret = -ENXIO; 830 ret = -ENXIO;
@@ -940,10 +940,10 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
940 940
941ep_rel_conn: 941ep_rel_conn:
942 qedi->ep_tbl[iscsi_cid] = NULL; 942 qedi->ep_tbl[iscsi_cid] = NULL;
943 ret = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle); 943 tmp = qedi_ops->release_conn(qedi->cdev, qedi_ep->handle);
944 if (ret) 944 if (tmp)
945 QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n", 945 QEDI_WARN(&qedi->dbg_ctx, "release_conn returned %d\n",
946 ret); 946 tmp);
947ep_free_sq: 947ep_free_sq:
948 qedi_free_sq(qedi, qedi_ep); 948 qedi_free_sq(qedi, qedi_ep);
949ep_conn_exit: 949ep_conn_exit:
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5f5a4ef2e529..2c3783684815 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -19,6 +19,7 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
21#include <linux/cpu.h> 21#include <linux/cpu.h>
22#include <linux/iscsi_boot_sysfs.h>
22 23
23#include <scsi/scsi_cmnd.h> 24#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_device.h> 25#include <scsi/scsi_device.h>
@@ -1143,6 +1144,30 @@ exit_setup_int:
1143 return rc; 1144 return rc;
1144} 1145}
1145 1146
1147static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1148{
1149 if (qedi->iscsi_cfg)
1150 dma_free_coherent(&qedi->pdev->dev,
1151 sizeof(struct nvm_iscsi_cfg),
1152 qedi->iscsi_cfg, qedi->nvm_buf_dma);
1153}
1154
1155static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1156{
1157 qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
1158 sizeof(struct nvm_iscsi_cfg),
1159 &qedi->nvm_buf_dma, GFP_KERNEL);
1160 if (!qedi->iscsi_cfg) {
1161 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
1162 return -ENOMEM;
1163 }
1164 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
1165 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
1166 qedi->nvm_buf_dma);
1167
1168 return 0;
1169}
1170
1146static void qedi_free_bdq(struct qedi_ctx *qedi) 1171static void qedi_free_bdq(struct qedi_ctx *qedi)
1147{ 1172{
1148 int i; 1173 int i;
@@ -1183,6 +1208,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi)
1183 kfree(gl[i]); 1208 kfree(gl[i]);
1184 } 1209 }
1185 qedi_free_bdq(qedi); 1210 qedi_free_bdq(qedi);
1211 qedi_free_nvm_iscsi_cfg(qedi);
1186} 1212}
1187 1213
1188static int qedi_alloc_bdq(struct qedi_ctx *qedi) 1214static int qedi_alloc_bdq(struct qedi_ctx *qedi)
@@ -1309,6 +1335,11 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
1309 if (rc) 1335 if (rc)
1310 goto mem_alloc_failure; 1336 goto mem_alloc_failure;
1311 1337
1338 /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
1339 rc = qedi_alloc_nvm_iscsi_cfg(qedi);
1340 if (rc)
1341 goto mem_alloc_failure;
1342
1312 /* Allocate a CQ and an associated PBL for each MSI-X 1343 /* Allocate a CQ and an associated PBL for each MSI-X
1313 * vector. 1344 * vector.
1314 */ 1345 */
@@ -1671,6 +1702,387 @@ void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
1671 qedi_ops->ll2->start(qedi->cdev, &params); 1702 qedi_ops->ll2->start(qedi->cdev, &params);
1672} 1703}
1673 1704
1705/**
1706 * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
1707 * for gaps) for the matching absolute-pf-id of the QEDI device.
1708 */
1709static struct nvm_iscsi_block *
1710qedi_get_nvram_block(struct qedi_ctx *qedi)
1711{
1712 int i;
1713 u8 pf;
1714 u32 flags;
1715 struct nvm_iscsi_block *block;
1716
1717 pf = qedi->dev_info.common.abs_pf_id;
1718 block = &qedi->iscsi_cfg->block[0];
1719 for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
1720 flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
1721 NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
1722 if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY |
1723 NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) &&
1724 (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
1725 >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET))
1726 return block;
1727 }
1728 return NULL;
1729}
1730
1731static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
1732{
1733 struct qedi_ctx *qedi = data;
1734 struct nvm_iscsi_initiator *initiator;
1735 char *str = buf;
1736 int rc = 1;
1737 u32 ipv6_en, dhcp_en, ip_len;
1738 struct nvm_iscsi_block *block;
1739 char *fmt, *ip, *sub, *gw;
1740
1741 block = qedi_get_nvram_block(qedi);
1742 if (!block)
1743 return 0;
1744
1745 initiator = &block->initiator;
1746 ipv6_en = block->generic.ctrl_flags &
1747 NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
1748 dhcp_en = block->generic.ctrl_flags &
1749 NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED;
1750 /* Static IP assignments. */
1751 fmt = ipv6_en ? "%pI6\n" : "%pI4\n";
1752 ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte;
1753 ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
1754 sub = ipv6_en ? initiator->ipv6.subnet_mask.byte :
1755 initiator->ipv4.subnet_mask.byte;
1756 gw = ipv6_en ? initiator->ipv6.gateway.byte :
1757 initiator->ipv4.gateway.byte;
1758 /* DHCP IP adjustments. */
1759 fmt = dhcp_en ? "%s\n" : fmt;
1760 if (dhcp_en) {
1761 ip = ipv6_en ? "0::0" : "0.0.0.0";
1762 sub = ip;
1763 gw = ip;
1764 ip_len = ipv6_en ? 5 : 8;
1765 }
1766
1767 switch (type) {
1768 case ISCSI_BOOT_ETH_IP_ADDR:
1769 rc = snprintf(str, ip_len, fmt, ip);
1770 break;
1771 case ISCSI_BOOT_ETH_SUBNET_MASK:
1772 rc = snprintf(str, ip_len, fmt, sub);
1773 break;
1774 case ISCSI_BOOT_ETH_GATEWAY:
1775 rc = snprintf(str, ip_len, fmt, gw);
1776 break;
1777 case ISCSI_BOOT_ETH_FLAGS:
1778 rc = snprintf(str, 3, "%hhd\n",
1779 SYSFS_FLAG_FW_SEL_BOOT);
1780 break;
1781 case ISCSI_BOOT_ETH_INDEX:
1782 rc = snprintf(str, 3, "0\n");
1783 break;
1784 case ISCSI_BOOT_ETH_MAC:
1785 rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
1786 break;
1787 case ISCSI_BOOT_ETH_VLAN:
1788 rc = snprintf(str, 12, "%d\n",
1789 GET_FIELD2(initiator->generic_cont0,
1790 NVM_ISCSI_CFG_INITIATOR_VLAN));
1791 break;
1792 case ISCSI_BOOT_ETH_ORIGIN:
1793 if (dhcp_en)
1794 rc = snprintf(str, 3, "3\n");
1795 break;
1796 default:
1797 rc = 0;
1798 break;
1799 }
1800
1801 return rc;
1802}
1803
1804static umode_t qedi_eth_get_attr_visibility(void *data, int type)
1805{
1806 int rc = 1;
1807
1808 switch (type) {
1809 case ISCSI_BOOT_ETH_FLAGS:
1810 case ISCSI_BOOT_ETH_MAC:
1811 case ISCSI_BOOT_ETH_INDEX:
1812 case ISCSI_BOOT_ETH_IP_ADDR:
1813 case ISCSI_BOOT_ETH_SUBNET_MASK:
1814 case ISCSI_BOOT_ETH_GATEWAY:
1815 case ISCSI_BOOT_ETH_ORIGIN:
1816 case ISCSI_BOOT_ETH_VLAN:
1817 rc = 0444;
1818 break;
1819 default:
1820 rc = 0;
1821 break;
1822 }
1823 return rc;
1824}
1825
1826static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
1827{
1828 struct qedi_ctx *qedi = data;
1829 struct nvm_iscsi_initiator *initiator;
1830 char *str = buf;
1831 int rc;
1832 struct nvm_iscsi_block *block;
1833
1834 block = qedi_get_nvram_block(qedi);
1835 if (!block)
1836 return 0;
1837
1838 initiator = &block->initiator;
1839
1840 switch (type) {
1841 case ISCSI_BOOT_INI_INITIATOR_NAME:
1842 rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
1843 initiator->initiator_name.byte);
1844 break;
1845 default:
1846 rc = 0;
1847 break;
1848 }
1849 return rc;
1850}
1851
1852static umode_t qedi_ini_get_attr_visibility(void *data, int type)
1853{
1854 int rc;
1855
1856 switch (type) {
1857 case ISCSI_BOOT_INI_INITIATOR_NAME:
1858 rc = 0444;
1859 break;
1860 default:
1861 rc = 0;
1862 break;
1863 }
1864 return rc;
1865}
1866
1867static ssize_t
1868qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
1869 char *buf, enum qedi_nvm_tgts idx)
1870{
1871 char *str = buf;
1872 int rc = 1;
1873 u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
1874 struct nvm_iscsi_block *block;
1875 char *chap_name, *chap_secret;
1876 char *mchap_name, *mchap_secret;
1877
1878 block = qedi_get_nvram_block(qedi);
1879 if (!block)
1880 goto exit_show_tgt_info;
1881
1882 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
1883 "Port:%d, tgt_idx:%d\n",
1884 GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
1885
1886 ctrl_flags = block->target[idx].ctrl_flags &
1887 NVM_ISCSI_CFG_TARGET_ENABLED;
1888
1889 if (!ctrl_flags) {
1890 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
1891 "Target disabled\n");
1892 goto exit_show_tgt_info;
1893 }
1894
1895 ipv6_en = block->generic.ctrl_flags &
1896 NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
1897 ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
1898 chap_en = block->generic.ctrl_flags &
1899 NVM_ISCSI_CFG_GEN_CHAP_ENABLED;
1900 chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
1901 chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
1902
1903 mchap_en = block->generic.ctrl_flags &
1904 NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED;
1905 mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
1906 mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
1907
1908 switch (type) {
1909 case ISCSI_BOOT_TGT_NAME:
1910 rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
1911 block->target[idx].target_name.byte);
1912 break;
1913 case ISCSI_BOOT_TGT_IP_ADDR:
1914 if (ipv6_en)
1915 rc = snprintf(str, ip_len, "%pI6\n",
1916 block->target[idx].ipv6_addr.byte);
1917 else
1918 rc = snprintf(str, ip_len, "%pI4\n",
1919 block->target[idx].ipv4_addr.byte);
1920 break;
1921 case ISCSI_BOOT_TGT_PORT:
1922 rc = snprintf(str, 12, "%d\n",
1923 GET_FIELD2(block->target[idx].generic_cont0,
1924 NVM_ISCSI_CFG_TARGET_TCP_PORT));
1925 break;
1926 case ISCSI_BOOT_TGT_LUN:
1927 rc = snprintf(str, 22, "%.*d\n",
1928 block->target[idx].lun.value[1],
1929 block->target[idx].lun.value[0]);
1930 break;
1931 case ISCSI_BOOT_TGT_CHAP_NAME:
1932 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
1933 chap_name);
1934 break;
1935 case ISCSI_BOOT_TGT_CHAP_SECRET:
1936 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
1937 chap_secret);
1938 break;
1939 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
1940 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
1941 mchap_name);
1942 break;
1943 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
1944 rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
1945 mchap_secret);
1946 break;
1947 case ISCSI_BOOT_TGT_FLAGS:
1948 rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
1949 break;
1950 case ISCSI_BOOT_TGT_NIC_ASSOC:
1951 rc = snprintf(str, 3, "0\n");
1952 break;
1953 default:
1954 rc = 0;
1955 break;
1956 }
1957
1958exit_show_tgt_info:
1959 return rc;
1960}
1961
1962static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf)
1963{
1964 struct qedi_ctx *qedi = data;
1965
1966 return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI);
1967}
1968
1969static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf)
1970{
1971 struct qedi_ctx *qedi = data;
1972
1973 return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC);
1974}
1975
1976static umode_t qedi_tgt_get_attr_visibility(void *data, int type)
1977{
1978 int rc;
1979
1980 switch (type) {
1981 case ISCSI_BOOT_TGT_NAME:
1982 case ISCSI_BOOT_TGT_IP_ADDR:
1983 case ISCSI_BOOT_TGT_PORT:
1984 case ISCSI_BOOT_TGT_LUN:
1985 case ISCSI_BOOT_TGT_CHAP_NAME:
1986 case ISCSI_BOOT_TGT_CHAP_SECRET:
1987 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
1988 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
1989 case ISCSI_BOOT_TGT_NIC_ASSOC:
1990 case ISCSI_BOOT_TGT_FLAGS:
1991 rc = 0444;
1992 break;
1993 default:
1994 rc = 0;
1995 break;
1996 }
1997 return rc;
1998}
1999
2000static void qedi_boot_release(void *data)
2001{
2002 struct qedi_ctx *qedi = data;
2003
2004 scsi_host_put(qedi->shost);
2005}
2006
2007static int qedi_get_boot_info(struct qedi_ctx *qedi)
2008{
2009 int ret = 1;
2010 u16 len;
2011
2012 len = sizeof(struct nvm_iscsi_cfg);
2013
2014 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2015 "Get NVM iSCSI CFG image\n");
2016 ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2017 QED_NVM_IMAGE_ISCSI_CFG,
2018 (char *)qedi->iscsi_cfg, len);
2019 if (ret)
2020 QEDI_ERR(&qedi->dbg_ctx,
2021 "Could not get NVM image. ret = %d\n", ret);
2022
2023 return ret;
2024}
2025
2026static int qedi_setup_boot_info(struct qedi_ctx *qedi)
2027{
2028 struct iscsi_boot_kobj *boot_kobj;
2029
2030 if (qedi_get_boot_info(qedi))
2031 return -EPERM;
2032
2033 qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no);
2034 if (!qedi->boot_kset)
2035 goto kset_free;
2036
2037 if (!scsi_host_get(qedi->shost))
2038 goto kset_free;
2039
2040 boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi,
2041 qedi_show_boot_tgt_pri_info,
2042 qedi_tgt_get_attr_visibility,
2043 qedi_boot_release);
2044 if (!boot_kobj)
2045 goto put_host;
2046
2047 if (!scsi_host_get(qedi->shost))
2048 goto kset_free;
2049
2050 boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi,
2051 qedi_show_boot_tgt_sec_info,
2052 qedi_tgt_get_attr_visibility,
2053 qedi_boot_release);
2054 if (!boot_kobj)
2055 goto put_host;
2056
2057 if (!scsi_host_get(qedi->shost))
2058 goto kset_free;
2059
2060 boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi,
2061 qedi_show_boot_ini_info,
2062 qedi_ini_get_attr_visibility,
2063 qedi_boot_release);
2064 if (!boot_kobj)
2065 goto put_host;
2066
2067 if (!scsi_host_get(qedi->shost))
2068 goto kset_free;
2069
2070 boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi,
2071 qedi_show_boot_eth_info,
2072 qedi_eth_get_attr_visibility,
2073 qedi_boot_release);
2074 if (!boot_kobj)
2075 goto put_host;
2076
2077 return 0;
2078
2079put_host:
2080 scsi_host_put(qedi->shost);
2081kset_free:
2082 iscsi_boot_destroy_kset(qedi->boot_kset);
2083 return -ENOMEM;
2084}
2085
1674static void __qedi_remove(struct pci_dev *pdev, int mode) 2086static void __qedi_remove(struct pci_dev *pdev, int mode)
1675{ 2087{
1676 struct qedi_ctx *qedi = pci_get_drvdata(pdev); 2088 struct qedi_ctx *qedi = pci_get_drvdata(pdev);
@@ -1724,6 +2136,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
1724 qedi->ll2_recv_thread = NULL; 2136 qedi->ll2_recv_thread = NULL;
1725 } 2137 }
1726 qedi_ll2_free_skbs(qedi); 2138 qedi_ll2_free_skbs(qedi);
2139
2140 if (qedi->boot_kset)
2141 iscsi_boot_destroy_kset(qedi->boot_kset);
1727 } 2142 }
1728} 2143}
1729 2144
@@ -1967,6 +2382,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
1967 /* F/w needs 1st task context memory entry for performance */ 2382 /* F/w needs 1st task context memory entry for performance */
1968 set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map); 2383 set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
1969 atomic_set(&qedi->num_offloads, 0); 2384 atomic_set(&qedi->num_offloads, 0);
2385
2386 if (qedi_setup_boot_info(qedi))
2387 QEDI_ERR(&qedi->dbg_ctx,
2388 "No iSCSI boot target configured\n");
1970 } 2389 }
1971 2390
1972 return 0; 2391 return 0;
diff --git a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
new file mode 100644
index 000000000000..df39b69b366d
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h
@@ -0,0 +1,210 @@
1/*
2 * QLogic iSCSI Offload Driver
3 * Copyright (c) 2016 Cavium Inc.
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef NVM_ISCSI_CFG_H
11#define NVM_ISCSI_CFG_H
12
13#define NUM_OF_ISCSI_TARGET_PER_PF 4 /* Defined as per the
14 * ISCSI IBFT constraint
15 */
16#define NUM_OF_ISCSI_PF_SUPPORTED 4 /* One PF per Port -
17 * assuming 4 port card
18 */
19
20#define NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN 256
21
22union nvm_iscsi_dhcp_vendor_id {
23 u32 value[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN / 4];
24 u8 byte[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN];
25};
26
27#define NVM_ISCSI_IPV4_ADDR_BYTE_LEN 4
28union nvm_iscsi_ipv4_addr {
29 u32 addr;
30 u8 byte[NVM_ISCSI_IPV4_ADDR_BYTE_LEN];
31};
32
33#define NVM_ISCSI_IPV6_ADDR_BYTE_LEN 16
34union nvm_iscsi_ipv6_addr {
35 u32 addr[4];
36 u8 byte[NVM_ISCSI_IPV6_ADDR_BYTE_LEN];
37};
38
39struct nvm_iscsi_initiator_ipv4 {
40 union nvm_iscsi_ipv4_addr addr; /* 0x0 */
41 union nvm_iscsi_ipv4_addr subnet_mask; /* 0x4 */
42 union nvm_iscsi_ipv4_addr gateway; /* 0x8 */
43 union nvm_iscsi_ipv4_addr primary_dns; /* 0xC */
44 union nvm_iscsi_ipv4_addr secondary_dns; /* 0x10 */
45 union nvm_iscsi_ipv4_addr dhcp_addr; /* 0x14 */
46
47 union nvm_iscsi_ipv4_addr isns_server; /* 0x18 */
48 union nvm_iscsi_ipv4_addr slp_server; /* 0x1C */
49 union nvm_iscsi_ipv4_addr primay_radius_server; /* 0x20 */
50 union nvm_iscsi_ipv4_addr secondary_radius_server; /* 0x24 */
51
52 union nvm_iscsi_ipv4_addr rsvd[4]; /* 0x28 */
53};
54
55struct nvm_iscsi_initiator_ipv6 {
56 union nvm_iscsi_ipv6_addr addr; /* 0x0 */
57 union nvm_iscsi_ipv6_addr subnet_mask; /* 0x10 */
58 union nvm_iscsi_ipv6_addr gateway; /* 0x20 */
59 union nvm_iscsi_ipv6_addr primary_dns; /* 0x30 */
60 union nvm_iscsi_ipv6_addr secondary_dns; /* 0x40 */
61 union nvm_iscsi_ipv6_addr dhcp_addr; /* 0x50 */
62
63 union nvm_iscsi_ipv6_addr isns_server; /* 0x60 */
64 union nvm_iscsi_ipv6_addr slp_server; /* 0x70 */
65 union nvm_iscsi_ipv6_addr primay_radius_server; /* 0x80 */
66 union nvm_iscsi_ipv6_addr secondary_radius_server; /* 0x90 */
67
68 union nvm_iscsi_ipv6_addr rsvd[3]; /* 0xA0 */
69
70 u32 config; /* 0xD0 */
71#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_MASK 0x000000FF
72#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_OFFSET 0
73
74 u32 rsvd_1[3];
75};
76
77#define NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN 256
78union nvm_iscsi_name {
79 u32 value[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN / 4];
80 u8 byte[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN];
81};
82
83#define NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN 256
84union nvm_iscsi_chap_name {
85 u32 value[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN / 4];
86 u8 byte[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN];
87};
88
89#define NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN 16 /* md5 need per RFC1996
90 * is 16 octets
91 */
92union nvm_iscsi_chap_password {
93 u32 value[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN / 4];
94 u8 byte[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN];
95};
96
97union nvm_iscsi_lun {
98 u8 byte[8];
99 u32 value[2];
100};
101
102struct nvm_iscsi_generic {
103 u32 ctrl_flags; /* 0x0 */
104#define NVM_ISCSI_CFG_GEN_CHAP_ENABLED BIT(0)
105#define NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED BIT(1)
106#define NVM_ISCSI_CFG_GEN_DHCP_ISCSI_CONFIG_ENABLED BIT(2)
107#define NVM_ISCSI_CFG_GEN_IPV6_ENABLED BIT(3)
108#define NVM_ISCSI_CFG_GEN_IPV4_FALLBACK_ENABLED BIT(4)
109#define NVM_ISCSI_CFG_GEN_ISNS_WORLD_LOGIN BIT(5)
110#define NVM_ISCSI_CFG_GEN_ISNS_SELECTIVE_LOGIN BIT(6)
111#define NVM_ISCSI_CFG_GEN_ADDR_REDIRECT_ENABLED BIT(7)
112#define NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED BIT(8)
113
114 u32 timeout; /* 0x4 */
115#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_MASK 0x0000FFFF
116#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_OFFSET 0
117#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_MASK 0xFFFF0000
118#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_OFFSET 16
119
120 union nvm_iscsi_dhcp_vendor_id dhcp_vendor_id; /* 0x8 */
121 u32 rsvd[62]; /* 0x108 */
122};
123
124struct nvm_iscsi_initiator {
125 struct nvm_iscsi_initiator_ipv4 ipv4; /* 0x0 */
126 struct nvm_iscsi_initiator_ipv6 ipv6; /* 0x38 */
127
128 union nvm_iscsi_name initiator_name; /* 0x118 */
129 union nvm_iscsi_chap_name chap_name; /* 0x218 */
130 union nvm_iscsi_chap_password chap_password; /* 0x318 */
131
132 u32 generic_cont0; /* 0x398 */
133#define NVM_ISCSI_CFG_INITIATOR_VLAN_MASK 0x0000FFFF
134#define NVM_ISCSI_CFG_INITIATOR_VLAN_OFFSET 0
135#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_MASK 0x00030000
136#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_OFFSET 16
137#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4 1
138#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_6 2
139#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4_AND_6 3
140
141 u32 ctrl_flags;
142#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_PRIORITY_V6 BIT(0)
143#define NVM_ISCSI_CFG_INITIATOR_VLAN_ENABLED BIT(1)
144
145 u32 rsvd[116]; /* 0x32C */
146};
147
148struct nvm_iscsi_target {
149 u32 ctrl_flags; /* 0x0 */
150#define NVM_ISCSI_CFG_TARGET_ENABLED BIT(0)
151#define NVM_ISCSI_CFG_BOOT_TIME_LOGIN_STATUS BIT(1)
152
153 u32 generic_cont0; /* 0x4 */
154#define NVM_ISCSI_CFG_TARGET_TCP_PORT_MASK 0x0000FFFF
155#define NVM_ISCSI_CFG_TARGET_TCP_PORT_OFFSET 0
156
157 u32 ip_ver;
158#define NVM_ISCSI_CFG_IPv4 4
159#define NVM_ISCSI_CFG_IPv6 6
160
161 u32 rsvd_1[7]; /* 0x24 */
162 union nvm_iscsi_ipv4_addr ipv4_addr; /* 0x28 */
163 union nvm_iscsi_ipv6_addr ipv6_addr; /* 0x2C */
164 union nvm_iscsi_lun lun; /* 0x3C */
165
166 union nvm_iscsi_name target_name; /* 0x44 */
167 union nvm_iscsi_chap_name chap_name; /* 0x144 */
168 union nvm_iscsi_chap_password chap_password; /* 0x244 */
169
170 u32 rsvd_2[107]; /* 0x2C4 */
171};
172
173struct nvm_iscsi_block {
174 u32 id; /* 0x0 */
175#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK 0x0000000F
176#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET 0
177#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK 0x00000FF0
178#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET 4
179#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY BIT(0)
180#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED BIT(1)
181
182 u32 rsvd_1[5]; /* 0x4 */
183
184 struct nvm_iscsi_generic generic; /* 0x18 */
185 struct nvm_iscsi_initiator initiator; /* 0x218 */
186 struct nvm_iscsi_target target[NUM_OF_ISCSI_TARGET_PER_PF];
187 /* 0x718 */
188
189 u32 rsvd_2[58]; /* 0x1718 */
190 /* total size - 0x1800 - 6K block */
191};
192
193struct nvm_iscsi_cfg {
194 u32 id; /* 0x0 */
195#define NVM_ISCSI_CFG_BLK_VERSION_MINOR_MASK 0x000000FF
196#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR_MASK 0x0000FF00
197#define NVM_ISCSI_CFG_BLK_SIGNATURE_MASK 0xFFFF0000
198#define NVM_ISCSI_CFG_BLK_SIGNATURE 0x49430000 /* IC - Iscsi
199 * Config
200 */
201
202#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR 0
203#define NVM_ISCSI_CFG_BLK_VERSION_MINOR 10
204#define NVM_ISCSI_CFG_BLK_VERSION ((NVM_ISCSI_CFG_BLK_VERSION_MAJOR << 8) | \
205 NVM_ISCSI_CFG_BLK_VERSION_MINOR)
206
207 struct nvm_iscsi_block block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */
208};
209
210#endif
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index c2dc836dc484..e101cd3043b9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3727,7 +3727,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3727 h &= QLA_CMD_HANDLE_MASK; 3727 h &= QLA_CMD_HANDLE_MASK;
3728 3728
3729 if (h != QLA_TGT_NULL_HANDLE) { 3729 if (h != QLA_TGT_NULL_HANDLE) {
3730 if (unlikely(h > req->num_outstanding_cmds)) { 3730 if (unlikely(h >= req->num_outstanding_cmds)) {
3731 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3731 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3732 "qla_target(%d): Wrong handle %x received\n", 3732 "qla_target(%d): Wrong handle %x received\n",
3733 vha->vp_idx, handle); 3733 vha->vp_idx, handle);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 33142610882f..b18646d6057f 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -401,9 +401,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
401 for (i = 0; i < vha->hw->max_req_queues; i++) { 401 for (i = 0; i < vha->hw->max_req_queues; i++) {
402 struct req_que *req = vha->hw->req_q_map[i]; 402 struct req_que *req = vha->hw->req_q_map[i];
403 403
404 if (!test_bit(i, vha->hw->req_qid_map))
405 continue;
406
407 if (req || !buf) { 404 if (req || !buf) {
408 length = req ? 405 length = req ?
409 req->length : REQUEST_ENTRY_CNT_24XX; 406 req->length : REQUEST_ENTRY_CNT_24XX;
@@ -418,9 +415,6 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
418 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 415 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
419 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 416 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
420 417
421 if (!test_bit(i, vha->hw->rsp_qid_map))
422 continue;
423
424 if (rsp || !buf) { 418 if (rsp || !buf) {
425 length = rsp ? 419 length = rsp ?
426 rsp->length : RESPONSE_ENTRY_CNT_MQ; 420 rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -660,9 +654,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
660 for (i = 0; i < vha->hw->max_req_queues; i++) { 654 for (i = 0; i < vha->hw->max_req_queues; i++) {
661 struct req_que *req = vha->hw->req_q_map[i]; 655 struct req_que *req = vha->hw->req_q_map[i];
662 656
663 if (!test_bit(i, vha->hw->req_qid_map))
664 continue;
665
666 if (req || !buf) { 657 if (req || !buf) {
667 qla27xx_insert16(i, buf, len); 658 qla27xx_insert16(i, buf, len);
668 qla27xx_insert16(1, buf, len); 659 qla27xx_insert16(1, buf, len);
@@ -675,9 +666,6 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
675 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 666 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
676 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 667 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
677 668
678 if (!test_bit(i, vha->hw->rsp_qid_map))
679 continue;
680
681 if (rsp || !buf) { 669 if (rsp || !buf) {
682 qla27xx_insert16(i, buf, len); 670 qla27xx_insert16(i, buf, len);
683 qla27xx_insert16(1, buf, len); 671 qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index b20da0d27ad7..3f82ea1b72dc 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
500static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 500static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
501{ 501{
502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 502 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
503 unsigned long flags;
504 503
505 /* 504 /*
506 * Ensure that the complete FCP WRITE payload has been received. 505 * Ensure that the complete FCP WRITE payload has been received.
@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
508 */ 507 */
509 cmd->cmd_in_wq = 0; 508 cmd->cmd_in_wq = 0;
510 509
511 spin_lock_irqsave(&cmd->cmd_lock, flags);
512 cmd->data_work = 1;
513 if (cmd->aborted) {
514 cmd->data_work_free = 1;
515 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
516
517 tcm_qla2xxx_free_cmd(cmd);
518 return;
519 }
520 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
521
522 cmd->qpair->tgt_counters.qla_core_ret_ctio++; 510 cmd->qpair->tgt_counters.qla_core_ret_ctio++;
523 if (!cmd->write_data_transferred) { 511 if (!cmd->write_data_transferred) {
524 /* 512 /*
@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
765 qlt_xmit_tm_rsp(mcmd); 753 qlt_xmit_tm_rsp(mcmd);
766} 754}
767 755
768#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
769static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) 756static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
770{ 757{
771 struct qla_tgt_cmd *cmd = container_of(se_cmd, 758 struct qla_tgt_cmd *cmd = container_of(se_cmd,
772 struct qla_tgt_cmd, se_cmd); 759 struct qla_tgt_cmd, se_cmd);
773 unsigned long flags;
774 760
775 if (qlt_abort_cmd(cmd)) 761 if (qlt_abort_cmd(cmd))
776 return; 762 return;
777
778 spin_lock_irqsave(&cmd->cmd_lock, flags);
779 if ((cmd->state == QLA_TGT_STATE_NEW)||
780 ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
781 DATA_WORK_NOT_FREE(cmd))) {
782 cmd->data_work_free = 1;
783 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
784 /*
785 * cmd has not reached fw, Use this trigger to free it.
786 */
787 tcm_qla2xxx_free_cmd(cmd);
788 return;
789 }
790 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
791 return;
792
793} 763}
794 764
795static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 765static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3d38c6d463b8..1bf274e3b2b6 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -800,7 +800,11 @@ MODULE_LICENSE("GPL");
800module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 800module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
801MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 801MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
802 802
803#ifdef CONFIG_SCSI_MQ_DEFAULT
803bool scsi_use_blk_mq = true; 804bool scsi_use_blk_mq = true;
805#else
806bool scsi_use_blk_mq = false;
807#endif
804module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 808module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
805 809
806static int __init init_scsi(void) 810static int __init init_scsi(void)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 7e24aa30c3b0..892fbd9800d9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1286,7 +1286,7 @@ store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
1286 unsigned long flags; 1286 unsigned long flags;
1287 1287
1288 spin_lock_irqsave(shost->host_lock, flags); 1288 spin_lock_irqsave(shost->host_lock, flags);
1289 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) { 1289 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING | FC_VPORT_DELETING)) {
1290 spin_unlock_irqrestore(shost->host_lock, flags); 1290 spin_unlock_irqrestore(shost->host_lock, flags);
1291 return -EBUSY; 1291 return -EBUSY;
1292 } 1292 }
@@ -2430,8 +2430,10 @@ fc_remove_host(struct Scsi_Host *shost)
2430 spin_lock_irqsave(shost->host_lock, flags); 2430 spin_lock_irqsave(shost->host_lock, flags);
2431 2431
2432 /* Remove any vports */ 2432 /* Remove any vports */
2433 list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) 2433 list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
2434 vport->flags |= FC_VPORT_DELETING;
2434 fc_queue_work(shost, &vport->vport_delete_work); 2435 fc_queue_work(shost, &vport->vport_delete_work);
2436 }
2435 2437
2436 /* Remove any remote ports */ 2438 /* Remove any remote ports */
2437 list_for_each_entry_safe(rport, next_rport, 2439 list_for_each_entry_safe(rport, next_rport,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bea36adeee17..e2647f2d4430 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1277,6 +1277,9 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
1277{ 1277{
1278 struct request *rq = SCpnt->request; 1278 struct request *rq = SCpnt->request;
1279 1279
1280 if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK)
1281 sd_zbc_write_unlock_zone(SCpnt);
1282
1280 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1283 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1281 __free_page(rq->special_vec.bv_page); 1284 __free_page(rq->special_vec.bv_page);
1282 1285
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 96855df9f49d..8aa54779aac1 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -294,6 +294,9 @@ int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd)
294 test_and_set_bit(zno, sdkp->zones_wlock)) 294 test_and_set_bit(zno, sdkp->zones_wlock))
295 return BLKPREP_DEFER; 295 return BLKPREP_DEFER;
296 296
297 WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK);
298 cmd->flags |= SCMD_ZONE_WRITE_LOCK;
299
297 return BLKPREP_OK; 300 return BLKPREP_OK;
298} 301}
299 302
@@ -302,9 +305,10 @@ void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd)
302 struct request *rq = cmd->request; 305 struct request *rq = cmd->request;
303 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 306 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
304 307
305 if (sdkp->zones_wlock) { 308 if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) {
306 unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); 309 unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq));
307 WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); 310 WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock));
311 cmd->flags &= ~SCMD_ZONE_WRITE_LOCK;
308 clear_bit_unlock(zno, sdkp->zones_wlock); 312 clear_bit_unlock(zno, sdkp->zones_wlock);
309 smp_mb__after_atomic(); 313 smp_mb__after_atomic();
310 } 314 }
@@ -335,9 +339,6 @@ void sd_zbc_complete(struct scsi_cmnd *cmd,
335 case REQ_OP_WRITE_ZEROES: 339 case REQ_OP_WRITE_ZEROES:
336 case REQ_OP_WRITE_SAME: 340 case REQ_OP_WRITE_SAME:
337 341
338 /* Unlock the zone */
339 sd_zbc_write_unlock_zone(cmd);
340
341 if (result && 342 if (result &&
342 sshdr->sense_key == ILLEGAL_REQUEST && 343 sshdr->sense_key == ILLEGAL_REQUEST &&
343 sshdr->asc == 0x21) 344 sshdr->asc == 0x21)
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index f1cdf32d7514..8927f9f54ad9 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -99,7 +99,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
99 99
100 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 100 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
101 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 101 NULL, SES_TIMEOUT, SES_RETRIES, NULL);
102 if (unlikely(!ret)) 102 if (unlikely(ret))
103 return ret; 103 return ret;
104 104
105 recv_page_code = ((unsigned char *)buf)[0]; 105 recv_page_code = ((unsigned char *)buf)[0];
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 21225d62b0c1..d7ff71e0c85c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -751,29 +751,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
751 return count; 751 return count;
752} 752}
753 753
754static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
755{
756 switch (hp->dxfer_direction) {
757 case SG_DXFER_NONE:
758 if (hp->dxferp || hp->dxfer_len > 0)
759 return false;
760 return true;
761 case SG_DXFER_TO_DEV:
762 case SG_DXFER_FROM_DEV:
763 case SG_DXFER_TO_FROM_DEV:
764 if (!hp->dxferp || hp->dxfer_len == 0)
765 return false;
766 return true;
767 case SG_DXFER_UNKNOWN:
768 if ((!hp->dxferp && hp->dxfer_len) ||
769 (hp->dxferp && hp->dxfer_len == 0))
770 return false;
771 return true;
772 default:
773 return false;
774 }
775}
776
777static int 754static int
778sg_common_write(Sg_fd * sfp, Sg_request * srp, 755sg_common_write(Sg_fd * sfp, Sg_request * srp,
779 unsigned char *cmnd, int timeout, int blocking) 756 unsigned char *cmnd, int timeout, int blocking)
@@ -794,7 +771,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
794 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 771 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
795 (int) cmnd[0], (int) hp->cmd_len)); 772 (int) cmnd[0], (int) hp->cmd_len));
796 773
797 if (!sg_is_valid_dxfer(hp)) 774 if (hp->dxfer_len >= SZ_256M)
798 return -EINVAL; 775 return -EINVAL;
799 776
800 k = sg_start_req(srp, cmnd); 777 k = sg_start_req(srp, cmnd);
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 07ec8a8877de..e164ffade38a 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -690,7 +690,7 @@ struct pqi_config_table_heartbeat {
690 690
691#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0) 691#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
692#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32 692#define PQI_MAX_OUTSTANDING_REQUESTS_KDUMP 32
693#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U) 693#define PQI_MAX_TRANSFER_SIZE (1024U * 1024U)
694#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U) 694#define PQI_MAX_TRANSFER_SIZE_KDUMP (512 * 1024U)
695 695
696#define RAID_MAP_MAX_ENTRIES 1024 696#define RAID_MAP_MAX_ENTRIES 1024
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 8e5013d9cad4..94e402ed30f6 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4299,11 +4299,11 @@ static int st_probe(struct device *dev)
4299 kref_init(&tpnt->kref); 4299 kref_init(&tpnt->kref);
4300 tpnt->disk = disk; 4300 tpnt->disk = disk;
4301 disk->private_data = &tpnt->driver; 4301 disk->private_data = &tpnt->driver;
4302 disk->queue = SDp->request_queue;
4303 /* SCSI tape doesn't register this gendisk via add_disk(). Manually 4302 /* SCSI tape doesn't register this gendisk via add_disk(). Manually
4304 * take queue reference that release_disk() expects. */ 4303 * take queue reference that release_disk() expects. */
4305 if (!blk_get_queue(disk->queue)) 4304 if (!blk_get_queue(SDp->request_queue))
4306 goto out_put_disk; 4305 goto out_put_disk;
4306 disk->queue = SDp->request_queue;
4307 tpnt->driver = &st_template; 4307 tpnt->driver = &st_template;
4308 4308
4309 tpnt->device = SDp; 4309 tpnt->device = SDp;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8b93197daefe..9be211d68b15 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -837,6 +837,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
837 .eh_abort_handler = virtscsi_abort, 837 .eh_abort_handler = virtscsi_abort,
838 .eh_device_reset_handler = virtscsi_device_reset, 838 .eh_device_reset_handler = virtscsi_device_reset,
839 .eh_timed_out = virtscsi_eh_timed_out, 839 .eh_timed_out = virtscsi_eh_timed_out,
840 .slave_alloc = virtscsi_device_alloc,
840 841
841 .can_queue = 1024, 842 .can_queue = 1024,
842 .dma_boundary = UINT_MAX, 843 .dma_boundary = UINT_MAX,
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 3039072911a5..afc7ecc3c187 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -200,16 +200,11 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev)
200 200
201 domain->dev = &pdev->dev; 201 domain->dev = &pdev->dev;
202 202
203 ret = pm_genpd_init(&domain->genpd, NULL, true);
204 if (ret) {
205 dev_err(domain->dev, "Failed to init power domain\n");
206 return ret;
207 }
208
209 domain->regulator = devm_regulator_get_optional(domain->dev, "power"); 203 domain->regulator = devm_regulator_get_optional(domain->dev, "power");
210 if (IS_ERR(domain->regulator)) { 204 if (IS_ERR(domain->regulator)) {
211 if (PTR_ERR(domain->regulator) != -ENODEV) { 205 if (PTR_ERR(domain->regulator) != -ENODEV) {
212 dev_err(domain->dev, "Failed to get domain's regulator\n"); 206 if (PTR_ERR(domain->regulator) != -EPROBE_DEFER)
207 dev_err(domain->dev, "Failed to get domain's regulator\n");
213 return PTR_ERR(domain->regulator); 208 return PTR_ERR(domain->regulator);
214 } 209 }
215 } else { 210 } else {
@@ -217,6 +212,12 @@ static int imx7_pgc_domain_probe(struct platform_device *pdev)
217 domain->voltage, domain->voltage); 212 domain->voltage, domain->voltage);
218 } 213 }
219 214
215 ret = pm_genpd_init(&domain->genpd, NULL, true);
216 if (ret) {
217 dev_err(domain->dev, "Failed to init power domain\n");
218 return ret;
219 }
220
220 ret = of_genpd_add_provider_simple(domain->dev->of_node, 221 ret = of_genpd_add_provider_simple(domain->dev->of_node,
221 &domain->genpd); 222 &domain->genpd);
222 if (ret) { 223 if (ret) {
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 279e7c5551dd..39225de9d7f1 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -745,6 +745,9 @@ void *knav_pool_create(const char *name,
745 bool slot_found; 745 bool slot_found;
746 int ret; 746 int ret;
747 747
748 if (!kdev)
749 return ERR_PTR(-EPROBE_DEFER);
750
748 if (!kdev->dev) 751 if (!kdev->dev)
749 return ERR_PTR(-ENODEV); 752 return ERR_PTR(-ENODEV);
750 753
diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c
index b0b283810e72..de31b9389e2e 100644
--- a/drivers/soc/ti/ti_sci_pm_domains.c
+++ b/drivers/soc/ti/ti_sci_pm_domains.c
@@ -176,6 +176,8 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
176 176
177 ti_sci_pd->dev = dev; 177 ti_sci_pd->dev = dev;
178 178
179 ti_sci_pd->pd.name = "ti_sci_pd";
180
179 ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev; 181 ti_sci_pd->pd.attach_dev = ti_sci_pd_attach_dev;
180 ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev; 182 ti_sci_pd->pd.detach_dev = ti_sci_pd_detach_dev;
181 183
diff --git a/drivers/soc/zte/Kconfig b/drivers/soc/zte/Kconfig
index 20bde38ce2f9..e9d750c510cd 100644
--- a/drivers/soc/zte/Kconfig
+++ b/drivers/soc/zte/Kconfig
@@ -2,6 +2,7 @@
2# ZTE SoC drivers 2# ZTE SoC drivers
3# 3#
4menuconfig SOC_ZTE 4menuconfig SOC_ZTE
5 depends on ARCH_ZX || COMPILE_TEST
5 bool "ZTE SoC driver support" 6 bool "ZTE SoC driver support"
6 7
7if SOC_ZTE 8if SOC_ZTE
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 2afe3597982e..f4b7a98a7913 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -134,7 +134,6 @@ struct apid_data {
134 * @spmic: SPMI controller object 134 * @spmic: SPMI controller object
135 * @ver_ops: version dependent operations. 135 * @ver_ops: version dependent operations.
136 * @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table. 136 * @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table.
137 * v2 only.
138 */ 137 */
139struct spmi_pmic_arb { 138struct spmi_pmic_arb {
140 void __iomem *rd_base; 139 void __iomem *rd_base;
@@ -1016,6 +1015,13 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
1016 goto err_put_ctrl; 1015 goto err_put_ctrl;
1017 } 1016 }
1018 1017
1018 pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID,
1019 sizeof(*pa->ppid_to_apid), GFP_KERNEL);
1020 if (!pa->ppid_to_apid) {
1021 err = -ENOMEM;
1022 goto err_put_ctrl;
1023 }
1024
1019 hw_ver = readl_relaxed(core + PMIC_ARB_VERSION); 1025 hw_ver = readl_relaxed(core + PMIC_ARB_VERSION);
1020 1026
1021 if (hw_ver < PMIC_ARB_VERSION_V2_MIN) { 1027 if (hw_ver < PMIC_ARB_VERSION_V2_MIN) {
@@ -1048,15 +1054,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
1048 err = PTR_ERR(pa->wr_base); 1054 err = PTR_ERR(pa->wr_base);
1049 goto err_put_ctrl; 1055 goto err_put_ctrl;
1050 } 1056 }
1051
1052 pa->ppid_to_apid = devm_kcalloc(&ctrl->dev,
1053 PMIC_ARB_MAX_PPID,
1054 sizeof(*pa->ppid_to_apid),
1055 GFP_KERNEL);
1056 if (!pa->ppid_to_apid) {
1057 err = -ENOMEM;
1058 goto err_put_ctrl;
1059 }
1060 } 1057 }
1061 1058
1062 dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n", 1059 dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n",
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 2b9b0941d9eb..6d23226e5f69 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -365,11 +365,23 @@ static int spmi_drv_remove(struct device *dev)
365 return 0; 365 return 0;
366} 366}
367 367
368static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
369{
370 int ret;
371
372 ret = of_device_uevent_modalias(dev, env);
373 if (ret != -ENODEV)
374 return ret;
375
376 return 0;
377}
378
368static struct bus_type spmi_bus_type = { 379static struct bus_type spmi_bus_type = {
369 .name = "spmi", 380 .name = "spmi",
370 .match = spmi_device_match, 381 .match = spmi_device_match,
371 .probe = spmi_drv_probe, 382 .probe = spmi_drv_probe,
372 .remove = spmi_drv_remove, 383 .remove = spmi_drv_remove,
384 .uevent = spmi_drv_uevent,
373}; 385};
374 386
375/** 387/**
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 268d4e6ef48a..ef28a1cb64ae 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -110,4 +110,6 @@ source "drivers/staging/ccree/Kconfig"
110 110
111source "drivers/staging/typec/Kconfig" 111source "drivers/staging/typec/Kconfig"
112 112
113source "drivers/staging/vboxvideo/Kconfig"
114
113endif # STAGING 115endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b93e6f5f0f6e..2918580bdb9e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_KS7010) += ks7010/
44obj-$(CONFIG_GREYBUS) += greybus/ 44obj-$(CONFIG_GREYBUS) += greybus/
45obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ 45obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
46obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ 46obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
47obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index ca11be21f64b..34ca7823255d 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2396,6 +2396,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
2396 continue; 2396 continue;
2397 } 2397 }
2398 2398
2399 set_current_state(TASK_RUNNING);
2399 wp = async->buf_write_ptr; 2400 wp = async->buf_write_ptr;
2400 n1 = min(n, async->prealloc_bufsz - wp); 2401 n1 = min(n, async->prealloc_bufsz - wp);
2401 n2 = n - n1; 2402 n2 = n - n1;
@@ -2528,6 +2529,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2528 } 2529 }
2529 continue; 2530 continue;
2530 } 2531 }
2532
2533 set_current_state(TASK_RUNNING);
2531 rp = async->buf_read_ptr; 2534 rp = async->buf_read_ptr;
2532 n1 = min(n, async->prealloc_bufsz - rp); 2535 n1 = min(n, async->prealloc_bufsz - rp);
2533 n2 = n - n1; 2536 n2 = n - n1;
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index b2e382888981..2f7bfc1c59e5 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3116,8 +3116,7 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
3116 /* following line: 2-1 per STC */ 3116 /* following line: 2-1 per STC */
3117 ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG); 3117 ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
3118 ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG); 3118 ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
3119 /* following line: N-1 per STC */ 3119 ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
3120 ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
3121 } else { /* TRIG_EXT */ 3120 } else { /* TRIG_EXT */
3122 /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */ 3121 /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
3123 devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA; 3122 devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
diff --git a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
index b37a6f48225f..8ea3920400a0 100644
--- a/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/fsl-mc-allocator.c
@@ -16,9 +16,9 @@
16 16
17static bool __must_check fsl_mc_is_allocatable(const char *obj_type) 17static bool __must_check fsl_mc_is_allocatable(const char *obj_type)
18{ 18{
19 return strcmp(obj_type, "dpbp") || 19 return strcmp(obj_type, "dpbp") == 0 ||
20 strcmp(obj_type, "dpmcp") || 20 strcmp(obj_type, "dpmcp") == 0 ||
21 strcmp(obj_type, "dpcon"); 21 strcmp(obj_type, "dpcon") == 0;
22} 22}
23 23
24/** 24/**
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index a6a8393d6664..3e00df74b18c 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -472,7 +472,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
472 long m) 472 long m)
473{ 473{
474 struct ad2s1210_state *st = iio_priv(indio_dev); 474 struct ad2s1210_state *st = iio_priv(indio_dev);
475 bool negative; 475 u16 negative;
476 int ret = 0; 476 int ret = 0;
477 u16 pos; 477 u16 pos;
478 s16 vel; 478 s16 vel;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 85b242ec5f9b..8fc191d99927 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1640,8 +1640,13 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
1640 ibmsg = tx->tx_msg; 1640 ibmsg = tx->tx_msg;
1641 ibmsg->ibm_u.immediate.ibim_hdr = *hdr; 1641 ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1642 1642
1643 copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE, 1643 rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
1644 &from); 1644 &from);
1645 if (rc != payload_nob) {
1646 kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
1647 return -EFAULT;
1648 }
1649
1645 nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]); 1650 nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
1646 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); 1651 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1647 1652
@@ -1741,8 +1746,14 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1741 break; 1746 break;
1742 } 1747 }
1743 1748
1744 copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, 1749 rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
1745 IBLND_MSG_SIZE, to); 1750 to);
1751 if (rc != rlen) {
1752 rc = -EFAULT;
1753 break;
1754 }
1755
1756 rc = 0;
1746 lnet_finalize(ni, lntmsg, 0); 1757 lnet_finalize(ni, lntmsg, 0);
1747 break; 1758 break;
1748 1759
diff --git a/drivers/staging/media/atomisp/i2c/ap1302.h b/drivers/staging/media/atomisp/i2c/ap1302.h
index 9341232c580d..4d0b181a9671 100644
--- a/drivers/staging/media/atomisp/i2c/ap1302.h
+++ b/drivers/staging/media/atomisp/i2c/ap1302.h
@@ -158,8 +158,8 @@ struct ap1302_res_struct {
158}; 158};
159 159
160struct ap1302_context_res { 160struct ap1302_context_res {
161 s32 res_num; 161 u32 res_num;
162 s32 cur_res; 162 u32 cur_res;
163 struct ap1302_res_struct *res_table; 163 struct ap1302_res_struct *res_table;
164}; 164};
165 165
diff --git a/drivers/staging/media/atomisp/i2c/gc0310.h b/drivers/staging/media/atomisp/i2c/gc0310.h
index f31eb277f542..7d8a0aeecb6c 100644
--- a/drivers/staging/media/atomisp/i2c/gc0310.h
+++ b/drivers/staging/media/atomisp/i2c/gc0310.h
@@ -454,6 +454,6 @@ struct gc0310_resolution gc0310_res_video[] = {
454#define N_RES_VIDEO (ARRAY_SIZE(gc0310_res_video)) 454#define N_RES_VIDEO (ARRAY_SIZE(gc0310_res_video))
455 455
456static struct gc0310_resolution *gc0310_res = gc0310_res_preview; 456static struct gc0310_resolution *gc0310_res = gc0310_res_preview;
457static int N_RES = N_RES_PREVIEW; 457static unsigned long N_RES = N_RES_PREVIEW;
458#endif 458#endif
459 459
diff --git a/drivers/staging/media/atomisp/i2c/gc2235.h b/drivers/staging/media/atomisp/i2c/gc2235.h
index ccbc757045a5..7c3d994180cc 100644
--- a/drivers/staging/media/atomisp/i2c/gc2235.h
+++ b/drivers/staging/media/atomisp/i2c/gc2235.h
@@ -668,5 +668,5 @@ struct gc2235_resolution gc2235_res_video[] = {
668#define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video)) 668#define N_RES_VIDEO (ARRAY_SIZE(gc2235_res_video))
669 669
670static struct gc2235_resolution *gc2235_res = gc2235_res_preview; 670static struct gc2235_resolution *gc2235_res = gc2235_res_preview;
671static int N_RES = N_RES_PREVIEW; 671static unsigned long N_RES = N_RES_PREVIEW;
672#endif 672#endif
diff --git a/drivers/staging/media/atomisp/i2c/imx/imx.h b/drivers/staging/media/atomisp/i2c/imx/imx.h
index 36b3f3a5a41f..41b4133ca995 100644
--- a/drivers/staging/media/atomisp/i2c/imx/imx.h
+++ b/drivers/staging/media/atomisp/i2c/imx/imx.h
@@ -480,7 +480,7 @@ struct imx_device {
480 struct imx_vcm *vcm_driver; 480 struct imx_vcm *vcm_driver;
481 struct imx_otp *otp_driver; 481 struct imx_otp *otp_driver;
482 const struct imx_resolution *curr_res_table; 482 const struct imx_resolution *curr_res_table;
483 int entries_curr_table; 483 unsigned long entries_curr_table;
484 const struct firmware *fw; 484 const struct firmware *fw;
485 struct imx_reg_addr *reg_addr; 485 struct imx_reg_addr *reg_addr;
486 const struct imx_reg *param_hold; 486 const struct imx_reg *param_hold;
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
index 944fe8e3bcbf..ab8907e6c9ef 100644
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ b/drivers/staging/media/atomisp/i2c/ov2680.h
@@ -934,7 +934,6 @@ static struct ov2680_resolution ov2680_res_video[] = {
934#define N_RES_VIDEO (ARRAY_SIZE(ov2680_res_video)) 934#define N_RES_VIDEO (ARRAY_SIZE(ov2680_res_video))
935 935
936static struct ov2680_resolution *ov2680_res = ov2680_res_preview; 936static struct ov2680_resolution *ov2680_res = ov2680_res_preview;
937static int N_RES = N_RES_PREVIEW; 937static unsigned long N_RES = N_RES_PREVIEW;
938
939 938
940#endif 939#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov2722.h b/drivers/staging/media/atomisp/i2c/ov2722.h
index b0d40965d89e..73ecb1679718 100644
--- a/drivers/staging/media/atomisp/i2c/ov2722.h
+++ b/drivers/staging/media/atomisp/i2c/ov2722.h
@@ -1263,5 +1263,5 @@ struct ov2722_resolution ov2722_res_video[] = {
1263#define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video)) 1263#define N_RES_VIDEO (ARRAY_SIZE(ov2722_res_video))
1264 1264
1265static struct ov2722_resolution *ov2722_res = ov2722_res_preview; 1265static struct ov2722_resolution *ov2722_res = ov2722_res_preview;
1266static int N_RES = N_RES_PREVIEW; 1266static unsigned long N_RES = N_RES_PREVIEW;
1267#endif 1267#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
index d88ac1777d86..8c2e6794463b 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
+++ b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
@@ -1377,5 +1377,5 @@ struct ov5693_resolution ov5693_res_video[] = {
1377#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video)) 1377#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video))
1378 1378
1379static struct ov5693_resolution *ov5693_res = ov5693_res_preview; 1379static struct ov5693_resolution *ov5693_res = ov5693_res_preview;
1380static int N_RES = N_RES_PREVIEW; 1380static unsigned long N_RES = N_RES_PREVIEW;
1381#endif 1381#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov8858.h b/drivers/staging/media/atomisp/i2c/ov8858.h
index 9be6a0e63861..d3fde200c013 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858.h
@@ -266,7 +266,7 @@ struct ov8858_device {
266 const struct ov8858_reg *regs; 266 const struct ov8858_reg *regs;
267 struct ov8858_vcm *vcm_driver; 267 struct ov8858_vcm *vcm_driver;
268 const struct ov8858_resolution *curr_res_table; 268 const struct ov8858_resolution *curr_res_table;
269 int entries_curr_table; 269 unsigned long entries_curr_table;
270 270
271 struct v4l2_ctrl_handler ctrl_handler; 271 struct v4l2_ctrl_handler ctrl_handler;
272 struct v4l2_ctrl *run_mode; 272 struct v4l2_ctrl *run_mode;
diff --git a/drivers/staging/media/atomisp/i2c/ov8858_btns.h b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
index 09e3cdc1a394..f9a3cf8fbf1a 100644
--- a/drivers/staging/media/atomisp/i2c/ov8858_btns.h
+++ b/drivers/staging/media/atomisp/i2c/ov8858_btns.h
@@ -266,7 +266,7 @@ struct ov8858_device {
266 const struct ov8858_reg *regs; 266 const struct ov8858_reg *regs;
267 struct ov8858_vcm *vcm_driver; 267 struct ov8858_vcm *vcm_driver;
268 const struct ov8858_resolution *curr_res_table; 268 const struct ov8858_resolution *curr_res_table;
269 int entries_curr_table; 269 unsigned long entries_curr_table;
270 270
271 struct v4l2_ctrl_handler ctrl_handler; 271 struct v4l2_ctrl_handler ctrl_handler;
272 struct v4l2_ctrl *run_mode; 272 struct v4l2_ctrl *run_mode;
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
index 726eaa293c55..2bd98f0667ec 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile
+++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile
@@ -354,7 +354,9 @@ ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
354 354
355# HACK! While this driver is in bad shape, don't enable several warnings 355# HACK! While this driver is in bad shape, don't enable several warnings
356# that would be otherwise enabled with W=1 356# that would be otherwise enabled with W=1
357ccflags-y += -Wno-unused-const-variable -Wno-missing-prototypes \ 357ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
358 -Wno-unused-but-set-variable -Wno-missing-declarations \ 358ccflags-y += $(call cc-disable-warning, missing-prototypes)
359 -Wno-suggest-attribute=format -Wno-missing-prototypes \ 359ccflags-y += $(call cc-disable-warning, missing-declarations)
360 -Wno-implicit-fallthrough 360ccflags-y += $(call cc-disable-warning, suggest-attribute=format)
361ccflags-y += $(call cc-disable-warning, unused-const-variable)
362ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
index d3667132851b..c8e0c4fe3717 100644
--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h
@@ -275,7 +275,7 @@ struct atomisp_device {
275 */ 275 */
276 struct mutex streamoff_mutex; 276 struct mutex streamoff_mutex;
277 277
278 int input_cnt; 278 unsigned int input_cnt;
279 struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS]; 279 struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS];
280 struct v4l2_subdev *flash; 280 struct v4l2_subdev *flash;
281 struct v4l2_subdev *motor; 281 struct v4l2_subdev *motor;
diff --git a/drivers/staging/media/cxd2099/cxd2099.c b/drivers/staging/media/cxd2099/cxd2099.c
index 370ecb959543..f28916ea69f1 100644
--- a/drivers/staging/media/cxd2099/cxd2099.c
+++ b/drivers/staging/media/cxd2099/cxd2099.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * cxd2099.c: Driver for the CXD2099AR Common Interface Controller 2 * cxd2099.c: Driver for the CXD2099AR Common Interface Controller
3 * 3 *
4 * Copyright (C) 2010-2011 Digital Devices GmbH 4 * Copyright (C) 2010-2013 Digital Devices GmbH
5 * 5 *
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
@@ -33,7 +33,10 @@
33 33
34#include "cxd2099.h" 34#include "cxd2099.h"
35 35
36#define MAX_BUFFER_SIZE 248 36/* comment this line to deactivate the cxd2099ar buffer mode */
37#define BUFFER_MODE 1
38
39static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount);
37 40
38struct cxd { 41struct cxd {
39 struct dvb_ca_en50221 en; 42 struct dvb_ca_en50221 en;
@@ -48,6 +51,7 @@ struct cxd {
48 int mode; 51 int mode;
49 int ready; 52 int ready;
50 int dr; 53 int dr;
54 int write_busy;
51 int slot_stat; 55 int slot_stat;
52 56
53 u8 amem[1024]; 57 u8 amem[1024];
@@ -55,6 +59,9 @@ struct cxd {
55 59
56 int cammode; 60 int cammode;
57 struct mutex lock; 61 struct mutex lock;
62
63 u8 rbuf[1028];
64 u8 wbuf[1028];
58}; 65};
59 66
60static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr, 67static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr,
@@ -73,7 +80,7 @@ static int i2c_write_reg(struct i2c_adapter *adapter, u8 adr,
73} 80}
74 81
75static int i2c_write(struct i2c_adapter *adapter, u8 adr, 82static int i2c_write(struct i2c_adapter *adapter, u8 adr,
76 u8 *data, u8 len) 83 u8 *data, u16 len)
77{ 84{
78 struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len}; 85 struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = data, .len = len};
79 86
@@ -100,12 +107,12 @@ static int i2c_read_reg(struct i2c_adapter *adapter, u8 adr,
100} 107}
101 108
102static int i2c_read(struct i2c_adapter *adapter, u8 adr, 109static int i2c_read(struct i2c_adapter *adapter, u8 adr,
103 u8 reg, u8 *data, u8 n) 110 u8 reg, u8 *data, u16 n)
104{ 111{
105 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0, 112 struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
106 .buf = &reg, .len = 1}, 113 .buf = &reg, .len = 1},
107 {.addr = adr, .flags = I2C_M_RD, 114 {.addr = adr, .flags = I2C_M_RD,
108 .buf = data, .len = n} }; 115 .buf = data, .len = n} };
109 116
110 if (i2c_transfer(adapter, msgs, 2) != 2) { 117 if (i2c_transfer(adapter, msgs, 2) != 2) {
111 dev_err(&adapter->dev, "error in i2c_read\n"); 118 dev_err(&adapter->dev, "error in i2c_read\n");
@@ -114,14 +121,26 @@ static int i2c_read(struct i2c_adapter *adapter, u8 adr,
114 return 0; 121 return 0;
115} 122}
116 123
117static int read_block(struct cxd *ci, u8 adr, u8 *data, u8 n) 124static int read_block(struct cxd *ci, u8 adr, u8 *data, u16 n)
118{ 125{
119 int status; 126 int status = 0;
120 127
121 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); 128 if (ci->lastaddress != adr)
129 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
122 if (!status) { 130 if (!status) {
123 ci->lastaddress = adr; 131 ci->lastaddress = adr;
124 status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, n); 132
133 while (n) {
134 int len = n;
135
136 if (ci->cfg.max_i2c && (len > ci->cfg.max_i2c))
137 len = ci->cfg.max_i2c;
138 status = i2c_read(ci->i2c, ci->cfg.adr, 1, data, len);
139 if (status)
140 return status;
141 data += len;
142 n -= len;
143 }
125 } 144 }
126 return status; 145 return status;
127} 146}
@@ -182,16 +201,16 @@ static int write_io(struct cxd *ci, u16 address, u8 val)
182 201
183static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask) 202static int write_regm(struct cxd *ci, u8 reg, u8 val, u8 mask)
184{ 203{
185 int status; 204 int status = 0;
186 205
187 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg); 206 if (ci->lastaddress != reg)
207 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, reg);
188 if (!status && reg >= 6 && reg <= 8 && mask != 0xff) 208 if (!status && reg >= 6 && reg <= 8 && mask != 0xff)
189 status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]); 209 status = i2c_read_reg(ci->i2c, ci->cfg.adr, 1, &ci->regs[reg]);
210 ci->lastaddress = reg;
190 ci->regs[reg] = (ci->regs[reg] & (~mask)) | val; 211 ci->regs[reg] = (ci->regs[reg] & (~mask)) | val;
191 if (!status) { 212 if (!status)
192 ci->lastaddress = reg;
193 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]); 213 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 1, ci->regs[reg]);
194 }
195 if (reg == 0x20) 214 if (reg == 0x20)
196 ci->regs[reg] &= 0x7f; 215 ci->regs[reg] &= 0x7f;
197 return status; 216 return status;
@@ -203,16 +222,29 @@ static int write_reg(struct cxd *ci, u8 reg, u8 val)
203} 222}
204 223
205#ifdef BUFFER_MODE 224#ifdef BUFFER_MODE
206static int write_block(struct cxd *ci, u8 adr, u8 *data, int n) 225static int write_block(struct cxd *ci, u8 adr, u8 *data, u16 n)
207{ 226{
208 int status; 227 int status = 0;
209 u8 buf[256] = {1}; 228 u8 *buf = ci->wbuf;
210 229
211 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr); 230 if (ci->lastaddress != adr)
212 if (!status) { 231 status = i2c_write_reg(ci->i2c, ci->cfg.adr, 0, adr);
213 ci->lastaddress = adr; 232 if (status)
214 memcpy(buf + 1, data, n); 233 return status;
215 status = i2c_write(ci->i2c, ci->cfg.adr, buf, n + 1); 234
235 ci->lastaddress = adr;
236 buf[0] = 1;
237 while (n) {
238 int len = n;
239
240 if (ci->cfg.max_i2c && (len + 1 > ci->cfg.max_i2c))
241 len = ci->cfg.max_i2c - 1;
242 memcpy(buf + 1, data, len);
243 status = i2c_write(ci->i2c, ci->cfg.adr, buf, len + 1);
244 if (status)
245 return status;
246 n -= len;
247 data += len;
216 } 248 }
217 return status; 249 return status;
218} 250}
@@ -238,6 +270,8 @@ static void set_mode(struct cxd *ci, int mode)
238 270
239static void cam_mode(struct cxd *ci, int mode) 271static void cam_mode(struct cxd *ci, int mode)
240{ 272{
273 u8 dummy;
274
241 if (mode == ci->cammode) 275 if (mode == ci->cammode)
242 return; 276 return;
243 277
@@ -246,16 +280,15 @@ static void cam_mode(struct cxd *ci, int mode)
246 write_regm(ci, 0x20, 0x80, 0x80); 280 write_regm(ci, 0x20, 0x80, 0x80);
247 break; 281 break;
248 case 0x01: 282 case 0x01:
249#ifdef BUFFER_MODE
250 if (!ci->en.read_data) 283 if (!ci->en.read_data)
251 return; 284 return;
285 ci->write_busy = 0;
252 dev_info(&ci->i2c->dev, "enable cam buffer mode\n"); 286 dev_info(&ci->i2c->dev, "enable cam buffer mode\n");
253 /* write_reg(ci, 0x0d, 0x00); */ 287 write_reg(ci, 0x0d, 0x00);
254 /* write_reg(ci, 0x0e, 0x01); */ 288 write_reg(ci, 0x0e, 0x01);
255 write_regm(ci, 0x08, 0x40, 0x40); 289 write_regm(ci, 0x08, 0x40, 0x40);
256 /* read_reg(ci, 0x12, &dummy); */ 290 read_reg(ci, 0x12, &dummy);
257 write_regm(ci, 0x08, 0x80, 0x80); 291 write_regm(ci, 0x08, 0x80, 0x80);
258#endif
259 break; 292 break;
260 default: 293 default:
261 break; 294 break;
@@ -325,7 +358,10 @@ static int init(struct cxd *ci)
325 if (status < 0) 358 if (status < 0)
326 break; 359 break;
327 360
328 if (ci->cfg.clock_mode) { 361 if (ci->cfg.clock_mode == 2) {
362 /* bitrate*2^13/ 72000 */
363 u32 reg = ((ci->cfg.bitrate << 13) + 71999) / 72000;
364
329 if (ci->cfg.polarity) { 365 if (ci->cfg.polarity) {
330 status = write_reg(ci, 0x09, 0x6f); 366 status = write_reg(ci, 0x09, 0x6f);
331 if (status < 0) 367 if (status < 0)
@@ -335,6 +371,25 @@ static int init(struct cxd *ci)
335 if (status < 0) 371 if (status < 0)
336 break; 372 break;
337 } 373 }
374 status = write_reg(ci, 0x20, 0x08);
375 if (status < 0)
376 break;
377 status = write_reg(ci, 0x21, (reg >> 8) & 0xff);
378 if (status < 0)
379 break;
380 status = write_reg(ci, 0x22, reg & 0xff);
381 if (status < 0)
382 break;
383 } else if (ci->cfg.clock_mode == 1) {
384 if (ci->cfg.polarity) {
385 status = write_reg(ci, 0x09, 0x6f); /* D */
386 if (status < 0)
387 break;
388 } else {
389 status = write_reg(ci, 0x09, 0x6d);
390 if (status < 0)
391 break;
392 }
338 status = write_reg(ci, 0x20, 0x68); 393 status = write_reg(ci, 0x20, 0x68);
339 if (status < 0) 394 if (status < 0)
340 break; 395 break;
@@ -346,7 +401,7 @@ static int init(struct cxd *ci)
346 break; 401 break;
347 } else { 402 } else {
348 if (ci->cfg.polarity) { 403 if (ci->cfg.polarity) {
349 status = write_reg(ci, 0x09, 0x4f); 404 status = write_reg(ci, 0x09, 0x4f); /* C */
350 if (status < 0) 405 if (status < 0)
351 break; 406 break;
352 } else { 407 } else {
@@ -354,7 +409,6 @@ static int init(struct cxd *ci)
354 if (status < 0) 409 if (status < 0)
355 break; 410 break;
356 } 411 }
357
358 status = write_reg(ci, 0x20, 0x28); 412 status = write_reg(ci, 0x20, 0x28);
359 if (status < 0) 413 if (status < 0)
360 break; 414 break;
@@ -401,7 +455,6 @@ static int read_attribute_mem(struct dvb_ca_en50221 *ca,
401 set_mode(ci, 1); 455 set_mode(ci, 1);
402 read_pccard(ci, address, &val, 1); 456 read_pccard(ci, address, &val, 1);
403 mutex_unlock(&ci->lock); 457 mutex_unlock(&ci->lock);
404 /* printk(KERN_INFO "%02x:%02x\n", address,val); */
405 return val; 458 return val;
406} 459}
407 460
@@ -446,6 +499,9 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
446{ 499{
447 struct cxd *ci = ca->data; 500 struct cxd *ci = ca->data;
448 501
502 if (ci->cammode)
503 read_data(ca, slot, ci->rbuf, 0);
504
449 mutex_lock(&ci->lock); 505 mutex_lock(&ci->lock);
450 cam_mode(ci, 0); 506 cam_mode(ci, 0);
451 write_reg(ci, 0x00, 0x21); 507 write_reg(ci, 0x00, 0x21);
@@ -465,7 +521,6 @@ static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
465 } 521 }
466 } 522 }
467 mutex_unlock(&ci->lock); 523 mutex_unlock(&ci->lock);
468 /* msleep(500); */
469 return 0; 524 return 0;
470} 525}
471 526
@@ -474,11 +529,19 @@ static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
474 struct cxd *ci = ca->data; 529 struct cxd *ci = ca->data;
475 530
476 dev_info(&ci->i2c->dev, "%s\n", __func__); 531 dev_info(&ci->i2c->dev, "%s\n", __func__);
532 if (ci->cammode)
533 read_data(ca, slot, ci->rbuf, 0);
477 mutex_lock(&ci->lock); 534 mutex_lock(&ci->lock);
535 write_reg(ci, 0x00, 0x21);
536 write_reg(ci, 0x06, 0x1F);
537 msleep(300);
538
478 write_regm(ci, 0x09, 0x08, 0x08); 539 write_regm(ci, 0x09, 0x08, 0x08);
479 write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */ 540 write_regm(ci, 0x20, 0x80, 0x80); /* Reset CAM Mode */
480 write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */ 541 write_regm(ci, 0x06, 0x07, 0x07); /* Clear IO Mode */
542
481 ci->mode = -1; 543 ci->mode = -1;
544 ci->write_busy = 0;
482 mutex_unlock(&ci->lock); 545 mutex_unlock(&ci->lock);
483 return 0; 546 return 0;
484} 547}
@@ -490,9 +553,7 @@ static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
490 mutex_lock(&ci->lock); 553 mutex_lock(&ci->lock);
491 write_regm(ci, 0x09, 0x00, 0x08); 554 write_regm(ci, 0x09, 0x00, 0x08);
492 set_mode(ci, 0); 555 set_mode(ci, 0);
493#ifdef BUFFER_MODE
494 cam_mode(ci, 1); 556 cam_mode(ci, 1);
495#endif
496 mutex_unlock(&ci->lock); 557 mutex_unlock(&ci->lock);
497 return 0; 558 return 0;
498} 559}
@@ -506,12 +567,10 @@ static int campoll(struct cxd *ci)
506 return 0; 567 return 0;
507 write_reg(ci, 0x05, istat); 568 write_reg(ci, 0x05, istat);
508 569
509 if (istat & 0x40) { 570 if (istat & 0x40)
510 ci->dr = 1; 571 ci->dr = 1;
511 dev_info(&ci->i2c->dev, "DR\n");
512 }
513 if (istat & 0x20) 572 if (istat & 0x20)
514 dev_info(&ci->i2c->dev, "WC\n"); 573 ci->write_busy = 0;
515 574
516 if (istat & 2) { 575 if (istat & 2) {
517 u8 slotstat; 576 u8 slotstat;
@@ -519,7 +578,8 @@ static int campoll(struct cxd *ci)
519 read_reg(ci, 0x01, &slotstat); 578 read_reg(ci, 0x01, &slotstat);
520 if (!(2 & slotstat)) { 579 if (!(2 & slotstat)) {
521 if (!ci->slot_stat) { 580 if (!ci->slot_stat) {
522 ci->slot_stat = DVB_CA_EN50221_POLL_CAM_PRESENT; 581 ci->slot_stat |=
582 DVB_CA_EN50221_POLL_CAM_PRESENT;
523 write_regm(ci, 0x03, 0x08, 0x08); 583 write_regm(ci, 0x03, 0x08, 0x08);
524 } 584 }
525 585
@@ -531,8 +591,8 @@ static int campoll(struct cxd *ci)
531 ci->ready = 0; 591 ci->ready = 0;
532 } 592 }
533 } 593 }
534 if (istat & 8 && 594 if ((istat & 8) &&
535 ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT) { 595 (ci->slot_stat == DVB_CA_EN50221_POLL_CAM_PRESENT)) {
536 ci->ready = 1; 596 ci->ready = 1;
537 ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY; 597 ci->slot_stat |= DVB_CA_EN50221_POLL_CAM_READY;
538 } 598 }
@@ -553,7 +613,6 @@ static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
553 return ci->slot_stat; 613 return ci->slot_stat;
554} 614}
555 615
556#ifdef BUFFER_MODE
557static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) 616static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
558{ 617{
559 struct cxd *ci = ca->data; 618 struct cxd *ci = ca->data;
@@ -564,30 +623,38 @@ static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
564 campoll(ci); 623 campoll(ci);
565 mutex_unlock(&ci->lock); 624 mutex_unlock(&ci->lock);
566 625
567 dev_info(&ci->i2c->dev, "%s\n", __func__);
568 if (!ci->dr) 626 if (!ci->dr)
569 return 0; 627 return 0;
570 628
571 mutex_lock(&ci->lock); 629 mutex_lock(&ci->lock);
572 read_reg(ci, 0x0f, &msb); 630 read_reg(ci, 0x0f, &msb);
573 read_reg(ci, 0x10, &lsb); 631 read_reg(ci, 0x10, &lsb);
574 len = (msb << 8) | lsb; 632 len = ((u16)msb << 8) | lsb;
633 if (len > ecount || len < 2) {
634 /* read it anyway or cxd may hang */
635 read_block(ci, 0x12, ci->rbuf, len);
636 mutex_unlock(&ci->lock);
637 return -EIO;
638 }
575 read_block(ci, 0x12, ebuf, len); 639 read_block(ci, 0x12, ebuf, len);
576 ci->dr = 0; 640 ci->dr = 0;
577 mutex_unlock(&ci->lock); 641 mutex_unlock(&ci->lock);
578
579 return len; 642 return len;
580} 643}
581 644
645#ifdef BUFFER_MODE
646
582static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount) 647static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
583{ 648{
584 struct cxd *ci = ca->data; 649 struct cxd *ci = ca->data;
585 650
651 if (ci->write_busy)
652 return -EAGAIN;
586 mutex_lock(&ci->lock); 653 mutex_lock(&ci->lock);
587 dev_info(&ci->i2c->dev, "%s %d\n", __func__, ecount);
588 write_reg(ci, 0x0d, ecount >> 8); 654 write_reg(ci, 0x0d, ecount >> 8);
589 write_reg(ci, 0x0e, ecount & 0xff); 655 write_reg(ci, 0x0e, ecount & 0xff);
590 write_block(ci, 0x11, ebuf, ecount); 656 write_block(ci, 0x11, ebuf, ecount);
657 ci->write_busy = 1;
591 mutex_unlock(&ci->lock); 658 mutex_unlock(&ci->lock);
592 return ecount; 659 return ecount;
593} 660}
diff --git a/drivers/staging/media/cxd2099/cxd2099.h b/drivers/staging/media/cxd2099/cxd2099.h
index 0eb607c5b423..f4b29b1d6eb8 100644
--- a/drivers/staging/media/cxd2099/cxd2099.h
+++ b/drivers/staging/media/cxd2099/cxd2099.h
@@ -30,8 +30,10 @@
30struct cxd2099_cfg { 30struct cxd2099_cfg {
31 u32 bitrate; 31 u32 bitrate;
32 u8 adr; 32 u8 adr;
33 u8 polarity:1; 33 u8 polarity;
34 u8 clock_mode:1; 34 u8 clock_mode;
35
36 u32 max_i2c;
35}; 37};
36 38
37#if defined(CONFIG_DVB_CXD2099) || \ 39#if defined(CONFIG_DVB_CXD2099) || \
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 002d09159896..a69007ef77bf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -132,7 +132,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
132 kfree(pcmd->parmbuf); 132 kfree(pcmd->parmbuf);
133 } 133 }
134 134
135 if (!pcmd->rsp) { 135 if (pcmd->rsp) {
136 if (pcmd->rspsz != 0) { 136 if (pcmd->rspsz != 0) {
137 /* free rsp in cmd_obj */ 137 /* free rsp in cmd_obj */
138 kfree(pcmd->rsp); 138 kfree(pcmd->rsp);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 963235fd7292..56cd4e5e51b2 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -43,7 +43,9 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
43 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 43 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
44 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 44 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
45 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ 45 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
46 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
46 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 47 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
48 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
47 {} /* Terminating entry */ 49 {} /* Terminating entry */
48}; 50};
49 51
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 944dd25924be..4754f7a20684 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -40,7 +40,7 @@ static unsigned int get_mxclk_freq(void)
40 40
41 pll_reg = peek32(MXCLK_PLL_CTRL); 41 pll_reg = peek32(MXCLK_PLL_CTRL);
42 M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT; 42 M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT;
43 N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT; 43 N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_N_SHIFT;
44 OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT; 44 OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
45 POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT; 45 POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
46 46
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 3aa4128703d5..67207b0554cd 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1053,6 +1053,26 @@ release_fb:
1053 return err; 1053 return err;
1054} 1054}
1055 1055
1056static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
1057{
1058 struct apertures_struct *ap;
1059 bool primary = false;
1060
1061 ap = alloc_apertures(1);
1062 if (!ap)
1063 return -ENOMEM;
1064
1065 ap->ranges[0].base = pci_resource_start(pdev, 0);
1066 ap->ranges[0].size = pci_resource_len(pdev, 0);
1067#ifdef CONFIG_X86
1068 primary = pdev->resource[PCI_ROM_RESOURCE].flags &
1069 IORESOURCE_ROM_SHADOW;
1070#endif
1071 remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
1072 kfree(ap);
1073 return 0;
1074}
1075
1056static int lynxfb_pci_probe(struct pci_dev *pdev, 1076static int lynxfb_pci_probe(struct pci_dev *pdev,
1057 const struct pci_device_id *ent) 1077 const struct pci_device_id *ent)
1058{ 1078{
@@ -1061,6 +1081,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
1061 int fbidx; 1081 int fbidx;
1062 int err; 1082 int err;
1063 1083
1084 err = lynxfb_kick_out_firmware_fb(pdev);
1085 if (err)
1086 return err;
1087
1064 /* enable device */ 1088 /* enable device */
1065 err = pcim_enable_device(pdev); 1089 err = pcim_enable_device(pdev);
1066 if (err) 1090 if (err)
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 82e5de248947..67956e24779c 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2314,6 +2314,7 @@ static void __exit speakup_exit(void)
2314 mutex_lock(&spk_mutex); 2314 mutex_lock(&spk_mutex);
2315 synth_release(); 2315 synth_release();
2316 mutex_unlock(&spk_mutex); 2316 mutex_unlock(&spk_mutex);
2317 spk_ttyio_unregister_ldisc();
2317 2318
2318 speakup_kobj_exit(); 2319 speakup_kobj_exit();
2319 2320
@@ -2376,6 +2377,7 @@ static int __init speakup_init(void)
2376 if (err) 2377 if (err)
2377 goto error_kobjects; 2378 goto error_kobjects;
2378 2379
2380 spk_ttyio_register_ldisc();
2379 synth_init(synth_name); 2381 synth_init(synth_name);
2380 speakup_register_devsynth(); 2382 speakup_register_devsynth();
2381 /* 2383 /*
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 87b6a0a4c54d..046040ac074c 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -48,6 +48,8 @@ void spk_stop_serial_interrupt(void);
48int spk_wait_for_xmitr(struct spk_synth *in_synth); 48int spk_wait_for_xmitr(struct spk_synth *in_synth);
49void spk_serial_release(void); 49void spk_serial_release(void);
50void spk_ttyio_release(void); 50void spk_ttyio_release(void);
51void spk_ttyio_register_ldisc(void);
52void spk_ttyio_unregister_ldisc(void);
51 53
52void synth_buffer_skip_nonlatin1(void); 54void synth_buffer_skip_nonlatin1(void);
53u16 synth_buffer_getc(void); 55u16 synth_buffer_getc(void);
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index ed8e96b06ead..fe340b07c482 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -154,12 +154,6 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
154 struct ktermios tmp_termios; 154 struct ktermios tmp_termios;
155 dev_t dev; 155 dev_t dev;
156 156
157 ret = tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops);
158 if (ret) {
159 pr_err("Error registering line discipline.\n");
160 return ret;
161 }
162
163 ret = get_dev_to_use(synth, &dev); 157 ret = get_dev_to_use(synth, &dev);
164 if (ret) 158 if (ret)
165 return ret; 159 return ret;
@@ -196,10 +190,24 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
196 tty_unlock(tty); 190 tty_unlock(tty);
197 191
198 ret = tty_set_ldisc(tty, N_SPEAKUP); 192 ret = tty_set_ldisc(tty, N_SPEAKUP);
193 if (ret)
194 pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
199 195
200 return ret; 196 return ret;
201} 197}
202 198
199void spk_ttyio_register_ldisc(void)
200{
201 if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops))
202 pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
203}
204
205void spk_ttyio_unregister_ldisc(void)
206{
207 if (tty_unregister_ldisc(N_SPEAKUP))
208 pr_warn("speakup: Couldn't unregister ldisc\n");
209}
210
203static int spk_ttyio_out(struct spk_synth *in_synth, const char ch) 211static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
204{ 212{
205 if (in_synth->alive && speakup_tty && speakup_tty->ops->write) { 213 if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
@@ -300,7 +308,7 @@ void spk_ttyio_release(void)
300 308
301 tty_ldisc_flush(speakup_tty); 309 tty_ldisc_flush(speakup_tty);
302 tty_unlock(speakup_tty); 310 tty_unlock(speakup_tty);
303 tty_ldisc_release(speakup_tty); 311 tty_release_struct(speakup_tty, speakup_tty->index);
304} 312}
305EXPORT_SYMBOL_GPL(spk_ttyio_release); 313EXPORT_SYMBOL_GPL(spk_ttyio_release);
306 314
diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/staging/vboxvideo/Kconfig
new file mode 100644
index 000000000000..a52746f9a670
--- /dev/null
+++ b/drivers/staging/vboxvideo/Kconfig
@@ -0,0 +1,12 @@
1config DRM_VBOXVIDEO
2 tristate "Virtual Box Graphics Card"
3 depends on DRM && X86 && PCI
4 select DRM_KMS_HELPER
5 help
6 This is a KMS driver for the virtual Graphics Card used in
7 Virtual Box virtual machines.
8
9 Although it is possible to builtin this module, it is advised
10 to build this driver as a module, so that it can be updated
11 independently of the kernel. Select M to built this driver as a
12 module and add support for these devices via drm/kms interfaces.
diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/staging/vboxvideo/Makefile
new file mode 100644
index 000000000000..2d0b3bc7ad73
--- /dev/null
+++ b/drivers/staging/vboxvideo/Makefile
@@ -0,0 +1,7 @@
1ccflags-y := -Iinclude/drm
2
3vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
4 vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
5 vbox_mode.o vbox_prime.o vbox_ttm.o
6
7obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO
new file mode 100644
index 000000000000..ce764309b079
--- /dev/null
+++ b/drivers/staging/vboxvideo/TODO
@@ -0,0 +1,9 @@
1TODO:
2-Move the driver over to the atomic API
3-Stop using old load / unload drm_driver hooks
4-Get a full review from the drm-maintainers on dri-devel done on this driver
5-Extend this TODO with the results of that review
6
7Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
8Hans de Goede <hdegoede@redhat.com> and
9Michael Thayer <michael.thayer@oracle.com>.
diff --git a/drivers/staging/vboxvideo/hgsmi_base.c b/drivers/staging/vboxvideo/hgsmi_base.c
new file mode 100644
index 000000000000..15ff5f42e2cd
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_base.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vbox_drv.h"
24#include "vbox_err.h"
25#include "vboxvideo_guest.h"
26#include "vboxvideo_vbe.h"
27#include "hgsmi_channels.h"
28#include "hgsmi_ch_setup.h"
29
30/**
31 * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
32 * @param ctx the context of the guest heap to use.
33 * @param location the offset chosen for the flags within guest VRAM.
34 * @returns 0 on success, -errno on failure
35 */
36int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
37{
38 struct hgsmi_buffer_location *p;
39
40 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
41 HGSMI_CC_HOST_FLAGS_LOCATION);
42 if (!p)
43 return -ENOMEM;
44
45 p->buf_location = location;
46 p->buf_len = sizeof(struct hgsmi_host_flags);
47
48 hgsmi_buffer_submit(ctx, p);
49 hgsmi_buffer_free(ctx, p);
50
51 return 0;
52}
53
54/**
55 * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
56 * @param ctx the context of the guest heap to use.
57 * @param caps the capabilities to report, see vbva_caps.
58 * @returns 0 on success, -errno on failure
59 */
60int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps)
61{
62 struct vbva_caps *p;
63
64 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
65 if (!p)
66 return -ENOMEM;
67
68 p->rc = VERR_NOT_IMPLEMENTED;
69 p->caps = caps;
70
71 hgsmi_buffer_submit(ctx, p);
72
73 WARN_ON_ONCE(RT_FAILURE(p->rc));
74
75 hgsmi_buffer_free(ctx, p);
76
77 return 0;
78}
79
80int hgsmi_test_query_conf(struct gen_pool *ctx)
81{
82 u32 value = 0;
83 int ret;
84
85 ret = hgsmi_query_conf(ctx, U32_MAX, &value);
86 if (ret)
87 return ret;
88
89 return value == U32_MAX ? 0 : -EIO;
90}
91
92/**
93 * Query the host for an HGSMI configuration parameter via an HGSMI command.
94 * @param ctx the context containing the heap used
95 * @param index the index of the parameter to query,
96 * @see vbva_conf32::index
97 * @param value_ret where to store the value of the parameter on success
98 * @returns 0 on success, -errno on failure
99 */
100int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
101{
102 struct vbva_conf32 *p;
103
104 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
105 VBVA_QUERY_CONF32);
106 if (!p)
107 return -ENOMEM;
108
109 p->index = index;
110 p->value = U32_MAX;
111
112 hgsmi_buffer_submit(ctx, p);
113
114 *value_ret = p->value;
115
116 hgsmi_buffer_free(ctx, p);
117
118 return 0;
119}
120
121/**
122 * Pass the host a new mouse pointer shape via an HGSMI command.
123 *
124 * @param ctx the context containing the heap to be used
125 * @param flags cursor flags, @see VMMDevReqMousePointer::flags
126 * @param hot_x horizontal position of the hot spot
127 * @param hot_y vertical position of the hot spot
128 * @param width width in pixels of the cursor
129 * @param height height in pixels of the cursor
130 * @param pixels pixel data, @see VMMDevReqMousePointer for the format
131 * @param len size in bytes of the pixel data
132 * @returns 0 on success, -errno on failure
133 */
134int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
135 u32 hot_x, u32 hot_y, u32 width, u32 height,
136 u8 *pixels, u32 len)
137{
138 struct vbva_mouse_pointer_shape *p;
139 u32 pixel_len = 0;
140 int rc;
141
142 if (flags & VBOX_MOUSE_POINTER_SHAPE) {
143 /*
144 * Size of the pointer data:
145 * sizeof (AND mask) + sizeof (XOR_MASK)
146 */
147 pixel_len = ((((width + 7) / 8) * height + 3) & ~3) +
148 width * 4 * height;
149 if (pixel_len > len)
150 return -EINVAL;
151
152 /*
153 * If shape is supplied, then always create the pointer visible.
154 * See comments in 'vboxUpdatePointerShape'
155 */
156 flags |= VBOX_MOUSE_POINTER_VISIBLE;
157 }
158
159 p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
160 VBVA_MOUSE_POINTER_SHAPE);
161 if (!p)
162 return -ENOMEM;
163
164 p->result = VINF_SUCCESS;
165 p->flags = flags;
166 p->hot_X = hot_x;
167 p->hot_y = hot_y;
168 p->width = width;
169 p->height = height;
170 if (pixel_len)
171 memcpy(p->data, pixels, pixel_len);
172
173 hgsmi_buffer_submit(ctx, p);
174
175 switch (p->result) {
176 case VINF_SUCCESS:
177 rc = 0;
178 break;
179 case VERR_NO_MEMORY:
180 rc = -ENOMEM;
181 break;
182 case VERR_NOT_SUPPORTED:
183 rc = -EBUSY;
184 break;
185 default:
186 rc = -EINVAL;
187 }
188
189 hgsmi_buffer_free(ctx, p);
190
191 return rc;
192}
193
194/**
195 * Report the guest cursor position. The host may wish to use this information
196 * to re-position its own cursor (though this is currently unlikely). The
197 * current host cursor position is returned.
198 * @param ctx The context containing the heap used.
199 * @param report_position Are we reporting a position?
200 * @param x Guest cursor X position.
201 * @param y Guest cursor Y position.
202 * @param x_host Host cursor X position is stored here. Optional.
203 * @param y_host Host cursor Y position is stored here. Optional.
204 * @returns 0 on success, -errno on failure
205 */
206int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
207 u32 x, u32 y, u32 *x_host, u32 *y_host)
208{
209 struct vbva_cursor_position *p;
210
211 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
212 VBVA_CURSOR_POSITION);
213 if (!p)
214 return -ENOMEM;
215
216 p->report_position = report_position;
217 p->x = x;
218 p->y = y;
219
220 hgsmi_buffer_submit(ctx, p);
221
222 *x_host = p->x;
223 *y_host = p->y;
224
225 hgsmi_buffer_free(ctx, p);
226
227 return 0;
228}
229
230/**
231 * @todo Mouse pointer position to be read from VMMDev memory, address of the
232 * memory region can be queried from VMMDev via an IOCTL. This VMMDev memory
233 * region will contain host information which is needed by the guest.
234 *
235 * Reading will not cause a switch to the host.
236 *
237 * Have to take into account:
238 * * synchronization: host must write to the memory only from EMT,
239 * large structures must be read under flag, which tells the host
240 * that the guest is currently reading the memory (OWNER flag?).
241 * * guest writes: may be allocate a page for the host info and make
242 * the page readonly for the guest.
243 * * the information should be available only for additions drivers.
244 * * VMMDev additions driver will inform the host which version of the info
245 * it expects, host must support all versions.
246 */
diff --git a/drivers/staging/vboxvideo/hgsmi_ch_setup.h b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
new file mode 100644
index 000000000000..8e6d9e11a69c
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __HGSMI_CH_SETUP_H__
24#define __HGSMI_CH_SETUP_H__
25
26/*
27 * Tell the host the location of hgsmi_host_flags structure, where the host
28 * can write information about pending buffers, etc, and which can be quickly
29 * polled by the guest without a need to port IO.
30 */
31#define HGSMI_CC_HOST_FLAGS_LOCATION 0
32
33struct hgsmi_buffer_location {
34 u32 buf_location;
35 u32 buf_len;
36} __packed;
37
38/* HGSMI setup and configuration data structures. */
39/* host->guest commands pending, should be accessed under FIFO lock only */
40#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u
41/* IRQ is fired, should be accessed under VGAState::lock only */
42#define HGSMIHOSTFLAGS_IRQ 0x02u
43/* vsync interrupt flag, should be accessed under VGAState::lock only */
44#define HGSMIHOSTFLAGS_VSYNC 0x10u
45/** monitor hotplug flag, should be accessed under VGAState::lock only */
46#define HGSMIHOSTFLAGS_HOTPLUG 0x20u
47/**
48 * Cursor capability state change flag, should be accessed under
49 * VGAState::lock only. @see vbva_conf32.
50 */
51#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u
52
53struct hgsmi_host_flags {
54 /*
55 * Host flags can be accessed and modified in multiple threads
56 * concurrently, e.g. CrOpenGL HGCM and GUI threads when completing
57 * HGSMI 3D and Video Accel respectively, EMT thread when dealing with
58 * HGSMI command processing, etc.
59 * Besides settings/cleaning flags atomically, some flags have their
60 * own special sync restrictions, see comments for flags above.
61 */
62 u32 host_flags;
63 u32 reserved[3];
64} __packed;
65
66#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_channels.h b/drivers/staging/vboxvideo/hgsmi_channels.h
new file mode 100644
index 000000000000..a2a34b2167b4
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_channels.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __HGSMI_CHANNELS_H__
24#define __HGSMI_CHANNELS_H__
25
26/*
27 * Each channel has an 8 bit identifier. There are a number of predefined
28 * (hardcoded) channels.
29 *
30 * HGSMI_CH_HGSMI channel can be used to map a string channel identifier
31 * to a free 16 bit numerical value. values are allocated in range
32 * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
33 */
34
35/* A reserved channel value */
36#define HGSMI_CH_RESERVED 0x00
37/* HGCMI: setup and configuration */
38#define HGSMI_CH_HGSMI 0x01
39/* Graphics: VBVA */
40#define HGSMI_CH_VBVA 0x02
41/* Graphics: Seamless with a single guest region */
42#define HGSMI_CH_SEAMLESS 0x03
43/* Graphics: Seamless with separate host windows */
44#define HGSMI_CH_SEAMLESS2 0x04
45/* Graphics: OpenGL HW acceleration */
46#define HGSMI_CH_OPENGL 0x05
47
48/* The first channel index to be used for string mappings (inclusive) */
49#define HGSMI_CH_STRING_FIRST 0x20
50/* The last channel index for string mappings (inclusive) */
51#define HGSMI_CH_STRING_LAST 0xff
52
53#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_defs.h b/drivers/staging/vboxvideo/hgsmi_defs.h
new file mode 100644
index 000000000000..5b21fb974d20
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_defs.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __HGSMI_DEFS_H__
24#define __HGSMI_DEFS_H__
25
26/* Buffer sequence type mask. */
27#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03
28/* Single buffer, not a part of a sequence. */
29#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00
30/* The first buffer in a sequence. */
31#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01
32/* A middle buffer in a sequence. */
33#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02
34/* The last buffer in a sequence. */
35#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03
36
37/* 16 bytes buffer header. */
38struct hgsmi_buffer_header {
39 u32 data_size; /* Size of data that follows the header. */
40 u8 flags; /* HGSMI_BUFFER_HEADER_F_* */
41 u8 channel; /* The channel the data must be routed to. */
42 u16 channel_info; /* Opaque to the HGSMI, used by the channel. */
43
44 union {
45 /* Opaque placeholder to make the union 8 bytes. */
46 u8 header_data[8];
47
48 /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
49 struct {
50 u32 reserved1; /* A reserved field, initialize to 0. */
51 u32 reserved2; /* A reserved field, initialize to 0. */
52 } buffer;
53
54 /* HGSMI_BUFFER_HEADER_F_SEQ_START */
55 struct {
56 /* Must be the same for all buffers in the sequence. */
57 u32 sequence_number;
58 /* The total size of the sequence. */
59 u32 sequence_size;
60 } sequence_start;
61
62 /*
63 * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and
64 * HGSMI_BUFFER_HEADER_F_SEQ_END
65 */
66 struct {
67 /* Must be the same for all buffers in the sequence. */
68 u32 sequence_number;
69 /* Data offset in the entire sequence. */
70 u32 sequence_offset;
71 } sequence_continue;
72 } u;
73} __packed;
74
75/* 8 bytes buffer tail. */
76struct hgsmi_buffer_tail {
77 /* Reserved, must be initialized to 0. */
78 u32 reserved;
79 /*
80 * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html
81 * Over the header, offset and for first 4 bytes of the tail.
82 */
83 u32 checksum;
84} __packed;
85
86/*
87 * The size of the array of channels. Array indexes are u8.
88 * Note: the value must not be changed.
89 */
90#define HGSMI_NUMBER_OF_CHANNELS 0x100
91
92#endif
diff --git a/drivers/staging/vboxvideo/modesetting.c b/drivers/staging/vboxvideo/modesetting.c
new file mode 100644
index 000000000000..7616b8aab23a
--- /dev/null
+++ b/drivers/staging/vboxvideo/modesetting.c
@@ -0,0 +1,142 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vbox_drv.h"
24#include "vbox_err.h"
25#include "vboxvideo_guest.h"
26#include "vboxvideo_vbe.h"
27#include "hgsmi_channels.h"
28
29/**
30 * Set a video mode via an HGSMI request. The views must have been
31 * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
32 * set on the first display then it must be set first using registers.
33 * @param ctx The context containing the heap to use
34 * @param display The screen number
35 * @param origin_x The horizontal displacement relative to the first scrn
36 * @param origin_y The vertical displacement relative to the first screen
37 * @param start_offset The offset of the visible area of the framebuffer
38 * relative to the framebuffer start
39 * @param pitch The offset in bytes between the starts of two adjecent
40 * scan lines in video RAM
41 * @param width The mode width
42 * @param height The mode height
43 * @param bpp The colour depth of the mode
44 * @param flags Flags
45 */
46void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
47 s32 origin_x, s32 origin_y, u32 start_offset,
48 u32 pitch, u32 width, u32 height,
49 u16 bpp, u16 flags)
50{
51 struct vbva_infoscreen *p;
52
53 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
54 VBVA_INFO_SCREEN);
55 if (!p)
56 return;
57
58 p->view_index = display;
59 p->origin_x = origin_x;
60 p->origin_y = origin_y;
61 p->start_offset = start_offset;
62 p->line_size = pitch;
63 p->width = width;
64 p->height = height;
65 p->bits_per_pixel = bpp;
66 p->flags = flags;
67
68 hgsmi_buffer_submit(ctx, p);
69 hgsmi_buffer_free(ctx, p);
70}
71
72/**
73 * Report the rectangle relative to which absolute pointer events should be
74 * expressed. This information remains valid until the next VBVA resize event
75 * for any screen, at which time it is reset to the bounding rectangle of all
76 * virtual screens.
77 * @param ctx The context containing the heap to use.
78 * @param origin_x Upper left X co-ordinate relative to the first screen.
79 * @param origin_y Upper left Y co-ordinate relative to the first screen.
80 * @param width Rectangle width.
81 * @param height Rectangle height.
82 * @returns 0 on success, -errno on failure
83 */
84int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
85 u32 width, u32 height)
86{
87 struct vbva_report_input_mapping *p;
88
89 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
90 VBVA_REPORT_INPUT_MAPPING);
91 if (!p)
92 return -ENOMEM;
93
94 p->x = origin_x;
95 p->y = origin_y;
96 p->cx = width;
97 p->cy = height;
98
99 hgsmi_buffer_submit(ctx, p);
100 hgsmi_buffer_free(ctx, p);
101
102 return 0;
103}
104
105/**
106 * Get most recent video mode hints.
107 * @param ctx The context containing the heap to use.
108 * @param screens The number of screens to query hints for, starting at 0.
109 * @param hints Array of vbva_modehint structures for receiving the hints.
110 * @returns 0 on success, -errno on failure
111 */
112int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
113 struct vbva_modehint *hints)
114{
115 struct vbva_query_mode_hints *p;
116 size_t size;
117
118 if (WARN_ON(!hints))
119 return -EINVAL;
120
121 size = screens * sizeof(struct vbva_modehint);
122 p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
123 VBVA_QUERY_MODE_HINTS);
124 if (!p)
125 return -ENOMEM;
126
127 p->hints_queried_count = screens;
128 p->hint_structure_guest_size = sizeof(struct vbva_modehint);
129 p->rc = VERR_NOT_SUPPORTED;
130
131 hgsmi_buffer_submit(ctx, p);
132
133 if (RT_FAILURE(p->rc)) {
134 hgsmi_buffer_free(ctx, p);
135 return -EIO;
136 }
137
138 memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
139 hgsmi_buffer_free(ctx, p);
140
141 return 0;
142}
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
new file mode 100644
index 000000000000..92ae1560a16d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -0,0 +1,286 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_drv.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>
27 * Michael Thayer <michael.thayer@oracle.com,
28 * Hans de Goede <hdegoede@redhat.com>
29 */
30#include <linux/module.h>
31#include <linux/console.h>
32#include <linux/vt_kern.h>
33
34#include <drm/drmP.h>
35#include <drm/drm_crtc_helper.h>
36
37#include "vbox_drv.h"
38
39int vbox_modeset = -1;
40
41MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
42module_param_named(modeset, vbox_modeset, int, 0400);
43
44static struct drm_driver driver;
45
46static const struct pci_device_id pciidlist[] = {
47 { 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
48 { 0, 0, 0},
49};
50MODULE_DEVICE_TABLE(pci, pciidlist);
51
52static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
53{
54 return drm_get_pci_dev(pdev, ent, &driver);
55}
56
57static void vbox_pci_remove(struct pci_dev *pdev)
58{
59 struct drm_device *dev = pci_get_drvdata(pdev);
60
61 drm_put_dev(dev);
62}
63
64static int vbox_drm_freeze(struct drm_device *dev)
65{
66 struct vbox_private *vbox = dev->dev_private;
67
68 drm_kms_helper_poll_disable(dev);
69
70 pci_save_state(dev->pdev);
71
72 drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, true);
73
74 return 0;
75}
76
77static int vbox_drm_thaw(struct drm_device *dev)
78{
79 struct vbox_private *vbox = dev->dev_private;
80
81 drm_mode_config_reset(dev);
82 drm_helper_resume_force_mode(dev);
83 drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false);
84
85 return 0;
86}
87
88static int vbox_drm_resume(struct drm_device *dev)
89{
90 int ret;
91
92 if (pci_enable_device(dev->pdev))
93 return -EIO;
94
95 ret = vbox_drm_thaw(dev);
96 if (ret)
97 return ret;
98
99 drm_kms_helper_poll_enable(dev);
100
101 return 0;
102}
103
104static int vbox_pm_suspend(struct device *dev)
105{
106 struct pci_dev *pdev = to_pci_dev(dev);
107 struct drm_device *ddev = pci_get_drvdata(pdev);
108 int error;
109
110 error = vbox_drm_freeze(ddev);
111 if (error)
112 return error;
113
114 pci_disable_device(pdev);
115 pci_set_power_state(pdev, PCI_D3hot);
116
117 return 0;
118}
119
120static int vbox_pm_resume(struct device *dev)
121{
122 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
123
124 return vbox_drm_resume(ddev);
125}
126
127static int vbox_pm_freeze(struct device *dev)
128{
129 struct pci_dev *pdev = to_pci_dev(dev);
130 struct drm_device *ddev = pci_get_drvdata(pdev);
131
132 if (!ddev || !ddev->dev_private)
133 return -ENODEV;
134
135 return vbox_drm_freeze(ddev);
136}
137
138static int vbox_pm_thaw(struct device *dev)
139{
140 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
141
142 return vbox_drm_thaw(ddev);
143}
144
145static int vbox_pm_poweroff(struct device *dev)
146{
147 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
148
149 return vbox_drm_freeze(ddev);
150}
151
152static const struct dev_pm_ops vbox_pm_ops = {
153 .suspend = vbox_pm_suspend,
154 .resume = vbox_pm_resume,
155 .freeze = vbox_pm_freeze,
156 .thaw = vbox_pm_thaw,
157 .poweroff = vbox_pm_poweroff,
158 .restore = vbox_pm_resume,
159};
160
161static struct pci_driver vbox_pci_driver = {
162 .name = DRIVER_NAME,
163 .id_table = pciidlist,
164 .probe = vbox_pci_probe,
165 .remove = vbox_pci_remove,
166 .driver.pm = &vbox_pm_ops,
167};
168
169static const struct file_operations vbox_fops = {
170 .owner = THIS_MODULE,
171 .open = drm_open,
172 .release = drm_release,
173 .unlocked_ioctl = drm_ioctl,
174 .mmap = vbox_mmap,
175 .poll = drm_poll,
176#ifdef CONFIG_COMPAT
177 .compat_ioctl = drm_compat_ioctl,
178#endif
179 .read = drm_read,
180};
181
182static int vbox_master_set(struct drm_device *dev,
183 struct drm_file *file_priv, bool from_open)
184{
185 struct vbox_private *vbox = dev->dev_private;
186
187 /*
188 * We do not yet know whether the new owner can handle hotplug, so we
189 * do not advertise dynamic modes on the first query and send a
190 * tentative hotplug notification after that to see if they query again.
191 */
192 vbox->initial_mode_queried = false;
193
194 mutex_lock(&vbox->hw_mutex);
195 /*
196 * Disable VBVA when someone releases master in case the next person
197 * tries tries to do VESA.
198 */
199 /** @todo work out if anyone is likely to and whether it will work. */
200 /*
201 * Update: we also disable it because if the new master does not do
202 * dirty rectangle reporting (e.g. old versions of Plymouth) then at
203 * least the first screen will still be updated. We enable it as soon
204 * as we receive a dirty rectangle report.
205 */
206 vbox_disable_accel(vbox);
207 mutex_unlock(&vbox->hw_mutex);
208
209 return 0;
210}
211
212static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
213{
214 struct vbox_private *vbox = dev->dev_private;
215
216 /* See vbox_master_set() */
217 vbox->initial_mode_queried = false;
218
219 mutex_lock(&vbox->hw_mutex);
220 vbox_disable_accel(vbox);
221 mutex_unlock(&vbox->hw_mutex);
222}
223
224static struct drm_driver driver = {
225 .driver_features =
226 DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
227 DRIVER_PRIME,
228 .dev_priv_size = 0,
229
230 .load = vbox_driver_load,
231 .unload = vbox_driver_unload,
232 .lastclose = vbox_driver_lastclose,
233 .master_set = vbox_master_set,
234 .master_drop = vbox_master_drop,
235 .set_busid = drm_pci_set_busid,
236
237 .fops = &vbox_fops,
238 .irq_handler = vbox_irq_handler,
239 .name = DRIVER_NAME,
240 .desc = DRIVER_DESC,
241 .date = DRIVER_DATE,
242 .major = DRIVER_MAJOR,
243 .minor = DRIVER_MINOR,
244 .patchlevel = DRIVER_PATCHLEVEL,
245
246 .gem_free_object = vbox_gem_free_object,
247 .dumb_create = vbox_dumb_create,
248 .dumb_map_offset = vbox_dumb_mmap_offset,
249 .dumb_destroy = drm_gem_dumb_destroy,
250 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
251 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
252 .gem_prime_export = drm_gem_prime_export,
253 .gem_prime_import = drm_gem_prime_import,
254 .gem_prime_pin = vbox_gem_prime_pin,
255 .gem_prime_unpin = vbox_gem_prime_unpin,
256 .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
257 .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
258 .gem_prime_vmap = vbox_gem_prime_vmap,
259 .gem_prime_vunmap = vbox_gem_prime_vunmap,
260 .gem_prime_mmap = vbox_gem_prime_mmap,
261};
262
263static int __init vbox_init(void)
264{
265#ifdef CONFIG_VGA_CONSOLE
266 if (vgacon_text_force() && vbox_modeset == -1)
267 return -EINVAL;
268#endif
269
270 if (vbox_modeset == 0)
271 return -EINVAL;
272
273 return drm_pci_init(&driver, &vbox_pci_driver);
274}
275
276static void __exit vbox_exit(void)
277{
278 drm_pci_exit(&driver, &vbox_pci_driver);
279}
280
281module_init(vbox_init);
282module_exit(vbox_exit);
283
284MODULE_AUTHOR("Oracle Corporation");
285MODULE_DESCRIPTION(DRIVER_DESC);
286MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
new file mode 100644
index 000000000000..4b9302703b36
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -0,0 +1,296 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_drv.h
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>
27 * Michael Thayer <michael.thayer@oracle.com,
28 * Hans de Goede <hdegoede@redhat.com>
29 */
30#ifndef __VBOX_DRV_H__
31#define __VBOX_DRV_H__
32
33#include <linux/genalloc.h>
34#include <linux/io.h>
35#include <linux/string.h>
36#include <linux/version.h>
37
38#include <drm/drmP.h>
39#include <drm/drm_encoder.h>
40#include <drm/drm_fb_helper.h>
41#include <drm/drm_gem.h>
42
43#include <drm/ttm/ttm_bo_api.h>
44#include <drm/ttm/ttm_bo_driver.h>
45#include <drm/ttm/ttm_placement.h>
46#include <drm/ttm/ttm_memory.h>
47#include <drm/ttm/ttm_module.h>
48
49#include "vboxvideo_guest.h"
50#include "vboxvideo_vbe.h"
51#include "hgsmi_ch_setup.h"
52
53#define DRIVER_NAME "vboxvideo"
54#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
55#define DRIVER_DATE "20130823"
56
57#define DRIVER_MAJOR 1
58#define DRIVER_MINOR 0
59#define DRIVER_PATCHLEVEL 0
60
61#define VBOX_MAX_CURSOR_WIDTH 64
62#define VBOX_MAX_CURSOR_HEIGHT 64
63#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
64#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
65
66#define VBOX_MAX_SCREENS 32
67
68#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
69 VBVA_ADAPTER_INFORMATION_SIZE)
70#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
71#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
72 sizeof(struct hgsmi_host_flags))
73#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
74
75struct vbox_fbdev;
76
77struct vbox_private {
78 struct drm_device *dev;
79
80 u8 __iomem *guest_heap;
81 u8 __iomem *vbva_buffers;
82 struct gen_pool *guest_pool;
83 struct vbva_buf_ctx *vbva_info;
84 bool any_pitch;
85 u32 num_crtcs;
86 /** Amount of available VRAM, including space used for buffers. */
87 u32 full_vram_size;
88 /** Amount of available VRAM, not including space used for buffers. */
89 u32 available_vram_size;
90 /** Array of structures for receiving mode hints. */
91 struct vbva_modehint *last_mode_hints;
92
93 struct vbox_fbdev *fbdev;
94
95 int fb_mtrr;
96
97 struct {
98 struct drm_global_reference mem_global_ref;
99 struct ttm_bo_global_ref bo_global_ref;
100 struct ttm_bo_device bdev;
101 } ttm;
102
103 struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
104 /**
105 * We decide whether or not user-space supports display hot-plug
106 * depending on whether they react to a hot-plug event after the initial
107 * mode query.
108 */
109 bool initial_mode_queried;
110 struct work_struct hotplug_work;
111 u32 input_mapping_width;
112 u32 input_mapping_height;
113 /**
114 * Is user-space using an X.Org-style layout of one large frame-buffer
115 * encompassing all screen ones or is the fbdev console active?
116 */
117 bool single_framebuffer;
118 u32 cursor_width;
119 u32 cursor_height;
120 u32 cursor_hot_x;
121 u32 cursor_hot_y;
122 size_t cursor_data_size;
123 u8 cursor_data[CURSOR_DATA_SIZE];
124};
125
126#undef CURSOR_PIXEL_COUNT
127#undef CURSOR_DATA_SIZE
128
129int vbox_driver_load(struct drm_device *dev, unsigned long flags);
130void vbox_driver_unload(struct drm_device *dev);
131void vbox_driver_lastclose(struct drm_device *dev);
132
133struct vbox_gem_object;
134
135struct vbox_connector {
136 struct drm_connector base;
137 char name[32];
138 struct vbox_crtc *vbox_crtc;
139 struct {
140 u16 width;
141 u16 height;
142 bool disconnected;
143 } mode_hint;
144};
145
146struct vbox_crtc {
147 struct drm_crtc base;
148 bool blanked;
149 bool disconnected;
150 unsigned int crtc_id;
151 u32 fb_offset;
152 bool cursor_enabled;
153 u16 x_hint;
154 u16 y_hint;
155};
156
157struct vbox_encoder {
158 struct drm_encoder base;
159};
160
161struct vbox_framebuffer {
162 struct drm_framebuffer base;
163 struct drm_gem_object *obj;
164};
165
166struct vbox_fbdev {
167 struct drm_fb_helper helper;
168 struct vbox_framebuffer afb;
169 int size;
170 struct ttm_bo_kmap_obj mapping;
171 int x1, y1, x2, y2; /* dirty rect */
172 spinlock_t dirty_lock;
173};
174
175#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
176#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
177#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
178#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
179
180int vbox_mode_init(struct drm_device *dev);
181void vbox_mode_fini(struct drm_device *dev);
182
183#define DRM_MODE_FB_CMD drm_mode_fb_cmd2
184#define CRTC_FB(crtc) ((crtc)->primary->fb)
185
186void vbox_enable_accel(struct vbox_private *vbox);
187void vbox_disable_accel(struct vbox_private *vbox);
188void vbox_report_caps(struct vbox_private *vbox);
189
190void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
191 struct drm_clip_rect *rects,
192 unsigned int num_rects);
193
194int vbox_framebuffer_init(struct drm_device *dev,
195 struct vbox_framebuffer *vbox_fb,
196 const struct DRM_MODE_FB_CMD *mode_cmd,
197 struct drm_gem_object *obj);
198
199int vbox_fbdev_init(struct drm_device *dev);
200void vbox_fbdev_fini(struct drm_device *dev);
201void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr);
202
203struct vbox_bo {
204 struct ttm_buffer_object bo;
205 struct ttm_placement placement;
206 struct ttm_bo_kmap_obj kmap;
207 struct drm_gem_object gem;
208 struct ttm_place placements[3];
209 int pin_count;
210};
211
212#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
213
214static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
215{
216 return container_of(bo, struct vbox_bo, bo);
217}
218
219#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
220
221int vbox_dumb_create(struct drm_file *file,
222 struct drm_device *dev,
223 struct drm_mode_create_dumb *args);
224
225void vbox_gem_free_object(struct drm_gem_object *obj);
226int vbox_dumb_mmap_offset(struct drm_file *file,
227 struct drm_device *dev,
228 u32 handle, u64 *offset);
229
230#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
231
232int vbox_mm_init(struct vbox_private *vbox);
233void vbox_mm_fini(struct vbox_private *vbox);
234
235int vbox_bo_create(struct drm_device *dev, int size, int align,
236 u32 flags, struct vbox_bo **pvboxbo);
237
238int vbox_gem_create(struct drm_device *dev,
239 u32 size, bool iskernel, struct drm_gem_object **obj);
240
241int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
242int vbox_bo_unpin(struct vbox_bo *bo);
243
244static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
245{
246 int ret;
247
248 ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
249 if (ret) {
250 if (ret != -ERESTARTSYS && ret != -EBUSY)
251 DRM_ERROR("reserve failed %p\n", bo);
252 return ret;
253 }
254 return 0;
255}
256
257static inline void vbox_bo_unreserve(struct vbox_bo *bo)
258{
259 ttm_bo_unreserve(&bo->bo);
260}
261
262void vbox_ttm_placement(struct vbox_bo *bo, int domain);
263int vbox_bo_push_sysram(struct vbox_bo *bo);
264int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
265
266/* vbox_prime.c */
267int vbox_gem_prime_pin(struct drm_gem_object *obj);
268void vbox_gem_prime_unpin(struct drm_gem_object *obj);
269struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
270struct drm_gem_object *vbox_gem_prime_import_sg_table(
271 struct drm_device *dev, struct dma_buf_attachment *attach,
272 struct sg_table *table);
273void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
274void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
275int vbox_gem_prime_mmap(struct drm_gem_object *obj,
276 struct vm_area_struct *area);
277
278/* vbox_irq.c */
279int vbox_irq_init(struct vbox_private *vbox);
280void vbox_irq_fini(struct vbox_private *vbox);
281void vbox_report_hotplug(struct vbox_private *vbox);
282irqreturn_t vbox_irq_handler(int irq, void *arg);
283
284/* vbox_hgsmi.c */
285void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
286 u8 channel, u16 channel_info);
287void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
288int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
289
290static inline void vbox_write_ioport(u16 index, u16 data)
291{
292 outw(index, VBE_DISPI_IOPORT_INDEX);
293 outw(data, VBE_DISPI_IOPORT_DATA);
294}
295
296#endif
diff --git a/drivers/staging/vboxvideo/vbox_err.h b/drivers/staging/vboxvideo/vbox_err.h
new file mode 100644
index 000000000000..562db8630eb0
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_err.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VBOX_ERR_H__
24#define __VBOX_ERR_H__
25
26/**
27 * @name VirtualBox virtual-hardware error macros
28 * @{
29 */
30
31#define VINF_SUCCESS 0
32#define VERR_INVALID_PARAMETER (-2)
33#define VERR_INVALID_POINTER (-6)
34#define VERR_NO_MEMORY (-8)
35#define VERR_NOT_IMPLEMENTED (-12)
36#define VERR_INVALID_FUNCTION (-36)
37#define VERR_NOT_SUPPORTED (-37)
38#define VERR_TOO_MUCH_DATA (-42)
39#define VERR_INVALID_STATE (-79)
40#define VERR_OUT_OF_RESOURCES (-80)
41#define VERR_ALREADY_EXISTS (-105)
42#define VERR_INTERNAL_ERROR (-225)
43
44#define RT_SUCCESS_NP(rc) ((int)(rc) >= VINF_SUCCESS)
45#define RT_SUCCESS(rc) (likely(RT_SUCCESS_NP(rc)))
46#define RT_FAILURE(rc) (unlikely(!RT_SUCCESS_NP(rc)))
47
48/** @} */
49
50#endif
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
new file mode 100644
index 000000000000..35f6d9f8c203
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -0,0 +1,412 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_fb.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>
27 * Michael Thayer <michael.thayer@oracle.com,
28 */
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/mm.h>
34#include <linux/tty.h>
35#include <linux/sysrq.h>
36#include <linux/delay.h>
37#include <linux/fb.h>
38#include <linux/init.h>
39
40#include <drm/drmP.h>
41#include <drm/drm_crtc.h>
42#include <drm/drm_fb_helper.h>
43#include <drm/drm_crtc_helper.h>
44
45#include "vbox_drv.h"
46#include "vboxvideo.h"
47
48#define VBOX_DIRTY_DELAY (HZ / 30)
49/**
50 * Tell the host about dirty rectangles to update.
51 */
52static void vbox_dirty_update(struct vbox_fbdev *fbdev,
53 int x, int y, int width, int height)
54{
55 struct drm_gem_object *obj;
56 struct vbox_bo *bo;
57 int ret = -EBUSY;
58 bool store_for_later = false;
59 int x2, y2;
60 unsigned long flags;
61 struct drm_clip_rect rect;
62
63 obj = fbdev->afb.obj;
64 bo = gem_to_vbox_bo(obj);
65
66 /*
67 * try and reserve the BO, if we fail with busy
68 * then the BO is being moved and we should
69 * store up the damage until later.
70 */
71 if (drm_can_sleep())
72 ret = vbox_bo_reserve(bo, true);
73 if (ret) {
74 if (ret != -EBUSY)
75 return;
76
77 store_for_later = true;
78 }
79
80 x2 = x + width - 1;
81 y2 = y + height - 1;
82 spin_lock_irqsave(&fbdev->dirty_lock, flags);
83
84 if (fbdev->y1 < y)
85 y = fbdev->y1;
86 if (fbdev->y2 > y2)
87 y2 = fbdev->y2;
88 if (fbdev->x1 < x)
89 x = fbdev->x1;
90 if (fbdev->x2 > x2)
91 x2 = fbdev->x2;
92
93 if (store_for_later) {
94 fbdev->x1 = x;
95 fbdev->x2 = x2;
96 fbdev->y1 = y;
97 fbdev->y2 = y2;
98 spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
99 return;
100 }
101
102 fbdev->x1 = INT_MAX;
103 fbdev->y1 = INT_MAX;
104 fbdev->x2 = 0;
105 fbdev->y2 = 0;
106
107 spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
108
109 /*
110 * Not sure why the original code subtracted 1 here, but I will keep
111 * it that way to avoid unnecessary differences.
112 */
113 rect.x1 = x;
114 rect.x2 = x2 + 1;
115 rect.y1 = y;
116 rect.y2 = y2 + 1;
117 vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
118
119 vbox_bo_unreserve(bo);
120}
121
122#ifdef CONFIG_FB_DEFERRED_IO
123static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist)
124{
125 struct vbox_fbdev *fbdev = info->par;
126 unsigned long start, end, min, max;
127 struct page *page;
128 int y1, y2;
129
130 min = ULONG_MAX;
131 max = 0;
132 list_for_each_entry(page, pagelist, lru) {
133 start = page->index << PAGE_SHIFT;
134 end = start + PAGE_SIZE - 1;
135 min = min(min, start);
136 max = max(max, end);
137 }
138
139 if (min < max) {
140 y1 = min / info->fix.line_length;
141 y2 = (max / info->fix.line_length) + 1;
142 DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n",
143 __func__, y1, info->var.xres, y2 - y1 - 1);
144 vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1);
145 }
146}
147
148static struct fb_deferred_io vbox_defio = {
149 .delay = VBOX_DIRTY_DELAY,
150 .deferred_io = vbox_deferred_io,
151};
152#endif
153
154static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
155{
156 struct vbox_fbdev *fbdev = info->par;
157
158 sys_fillrect(info, rect);
159 vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height);
160}
161
162static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area)
163{
164 struct vbox_fbdev *fbdev = info->par;
165
166 sys_copyarea(info, area);
167 vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height);
168}
169
170static void vbox_imageblit(struct fb_info *info, const struct fb_image *image)
171{
172 struct vbox_fbdev *fbdev = info->par;
173
174 sys_imageblit(info, image);
175 vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
176 image->height);
177}
178
179static struct fb_ops vboxfb_ops = {
180 .owner = THIS_MODULE,
181 .fb_check_var = drm_fb_helper_check_var,
182 .fb_set_par = drm_fb_helper_set_par,
183 .fb_fillrect = vbox_fillrect,
184 .fb_copyarea = vbox_copyarea,
185 .fb_imageblit = vbox_imageblit,
186 .fb_pan_display = drm_fb_helper_pan_display,
187 .fb_blank = drm_fb_helper_blank,
188 .fb_setcmap = drm_fb_helper_setcmap,
189 .fb_debug_enter = drm_fb_helper_debug_enter,
190 .fb_debug_leave = drm_fb_helper_debug_leave,
191};
192
193static int vboxfb_create_object(struct vbox_fbdev *fbdev,
194 struct DRM_MODE_FB_CMD *mode_cmd,
195 struct drm_gem_object **gobj_p)
196{
197 struct drm_device *dev = fbdev->helper.dev;
198 u32 size;
199 struct drm_gem_object *gobj;
200 u32 pitch = mode_cmd->pitches[0];
201 int ret;
202
203 size = pitch * mode_cmd->height;
204 ret = vbox_gem_create(dev, size, true, &gobj);
205 if (ret)
206 return ret;
207
208 *gobj_p = gobj;
209
210 return 0;
211}
212
213static int vboxfb_create(struct drm_fb_helper *helper,
214 struct drm_fb_helper_surface_size *sizes)
215{
216 struct vbox_fbdev *fbdev =
217 container_of(helper, struct vbox_fbdev, helper);
218 struct drm_device *dev = fbdev->helper.dev;
219 struct DRM_MODE_FB_CMD mode_cmd;
220 struct drm_framebuffer *fb;
221 struct fb_info *info;
222 struct device *device = &dev->pdev->dev;
223 struct drm_gem_object *gobj;
224 struct vbox_bo *bo;
225 int size, ret;
226 u32 pitch;
227
228 mode_cmd.width = sizes->surface_width;
229 mode_cmd.height = sizes->surface_height;
230 pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
231 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
232 sizes->surface_depth);
233 mode_cmd.pitches[0] = pitch;
234
235 size = pitch * mode_cmd.height;
236
237 ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
238 if (ret) {
239 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
240 return ret;
241 }
242
243 ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
244 if (ret)
245 return ret;
246
247 bo = gem_to_vbox_bo(gobj);
248
249 ret = vbox_bo_reserve(bo, false);
250 if (ret)
251 return ret;
252
253 ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
254 if (ret) {
255 vbox_bo_unreserve(bo);
256 return ret;
257 }
258
259 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
260 vbox_bo_unreserve(bo);
261 if (ret) {
262 DRM_ERROR("failed to kmap fbcon\n");
263 return ret;
264 }
265
266 info = framebuffer_alloc(0, device);
267 if (!info)
268 return -ENOMEM;
269 info->par = fbdev;
270
271 fbdev->size = size;
272
273 fb = &fbdev->afb.base;
274 fbdev->helper.fb = fb;
275 fbdev->helper.fbdev = info;
276
277 strcpy(info->fix.id, "vboxdrmfb");
278
279 /*
280 * The last flag forces a mode set on VT switches even if the kernel
281 * does not think it is needed.
282 */
283 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
284 FBINFO_MISC_ALWAYS_SETPAR;
285 info->fbops = &vboxfb_ops;
286
287 ret = fb_alloc_cmap(&info->cmap, 256, 0);
288 if (ret)
289 return -ENOMEM;
290
291 /*
292 * This seems to be done for safety checking that the framebuffer
293 * is not registered twice by different drivers.
294 */
295 info->apertures = alloc_apertures(1);
296 if (!info->apertures)
297 return -ENOMEM;
298 info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
299 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
300
301 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
302 drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
303 sizes->fb_height);
304
305 info->screen_base = bo->kmap.virtual;
306 info->screen_size = size;
307
308#ifdef CONFIG_FB_DEFERRED_IO
309 info->fbdefio = &vbox_defio;
310 fb_deferred_io_init(info);
311#endif
312
313 info->pixmap.flags = FB_PIXMAP_SYSTEM;
314
315 DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
316
317 return 0;
318}
319
320static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
321 u16 blue, int regno)
322{
323}
324
325static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
326 u16 *blue, int regno)
327{
328 *red = regno;
329 *green = regno;
330 *blue = regno;
331}
332
333static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
334 .gamma_set = vbox_fb_gamma_set,
335 .gamma_get = vbox_fb_gamma_get,
336 .fb_probe = vboxfb_create,
337};
338
339void vbox_fbdev_fini(struct drm_device *dev)
340{
341 struct vbox_private *vbox = dev->dev_private;
342 struct vbox_fbdev *fbdev = vbox->fbdev;
343 struct vbox_framebuffer *afb = &fbdev->afb;
344
345 drm_fb_helper_unregister_fbi(&fbdev->helper);
346
347 if (afb->obj) {
348 struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
349
350 if (!vbox_bo_reserve(bo, false)) {
351 if (bo->kmap.virtual)
352 ttm_bo_kunmap(&bo->kmap);
353 /*
354 * QXL does this, but is it really needed before
355 * freeing?
356 */
357 if (bo->pin_count)
358 vbox_bo_unpin(bo);
359 vbox_bo_unreserve(bo);
360 }
361 drm_gem_object_unreference_unlocked(afb->obj);
362 afb->obj = NULL;
363 }
364 drm_fb_helper_fini(&fbdev->helper);
365
366 drm_framebuffer_unregister_private(&afb->base);
367 drm_framebuffer_cleanup(&afb->base);
368}
369
370int vbox_fbdev_init(struct drm_device *dev)
371{
372 struct vbox_private *vbox = dev->dev_private;
373 struct vbox_fbdev *fbdev;
374 int ret;
375
376 fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL);
377 if (!fbdev)
378 return -ENOMEM;
379
380 vbox->fbdev = fbdev;
381 spin_lock_init(&fbdev->dirty_lock);
382
383 drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
384 ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
385 if (ret)
386 return ret;
387
388 ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
389 if (ret)
390 goto err_fini;
391
392 /* disable all the possible outputs/crtcs before entering KMS mode */
393 drm_helper_disable_unused_functions(dev);
394
395 ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
396 if (ret)
397 goto err_fini;
398
399 return 0;
400
401err_fini:
402 drm_fb_helper_fini(&fbdev->helper);
403 return ret;
404}
405
406void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
407{
408 struct fb_info *fbdev = vbox->fbdev->helper.fbdev;
409
410 fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr;
411 fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr;
412}
diff --git a/drivers/staging/vboxvideo/vbox_hgsmi.c b/drivers/staging/vboxvideo/vbox_hgsmi.c
new file mode 100644
index 000000000000..822fd31121cb
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_hgsmi.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 * Authors: Hans de Goede <hdegoede@redhat.com>
25 */
26
27#include "vbox_drv.h"
28#include "vboxvideo_vbe.h"
29#include "hgsmi_defs.h"
30
31/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
32static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
33{
34 while (size--) {
35 hash += *data++;
36 hash += (hash << 10);
37 hash ^= (hash >> 6);
38 }
39
40 return hash;
41}
42
43static u32 hgsmi_hash_end(u32 hash)
44{
45 hash += (hash << 3);
46 hash ^= (hash >> 11);
47 hash += (hash << 15);
48
49 return hash;
50}
51
52/* Not really a checksum but that is the naming used in all vbox code */
53static u32 hgsmi_checksum(u32 offset,
54 const struct hgsmi_buffer_header *header,
55 const struct hgsmi_buffer_tail *tail)
56{
57 u32 checksum;
58
59 checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
60 checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
61 /* 4 -> Do not checksum the checksum itself */
62 checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
63
64 return hgsmi_hash_end(checksum);
65}
66
67void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
68 u8 channel, u16 channel_info)
69{
70 struct hgsmi_buffer_header *h;
71 struct hgsmi_buffer_tail *t;
72 size_t total_size;
73 dma_addr_t offset;
74
75 total_size = size + sizeof(*h) + sizeof(*t);
76 h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
77 if (!h)
78 return NULL;
79
80 t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
81
82 h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
83 h->data_size = size;
84 h->channel = channel;
85 h->channel_info = channel_info;
86 memset(&h->u.header_data, 0, sizeof(h->u.header_data));
87
88 t->reserved = 0;
89 t->checksum = hgsmi_checksum(offset, h, t);
90
91 return (u8 *)h + sizeof(*h);
92}
93
94void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
95{
96 struct hgsmi_buffer_header *h =
97 (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h));
98 size_t total_size = h->data_size + sizeof(*h) +
99 sizeof(struct hgsmi_buffer_tail);
100
101 gen_pool_free(guest_pool, (unsigned long)h, total_size);
102}
103
104int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
105{
106 phys_addr_t offset;
107
108 offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
109 sizeof(struct hgsmi_buffer_header));
110 outl(offset, VGA_PORT_HGSMI_GUEST);
111 /* Make the compiler aware that the host has changed memory. */
112 mb();
113
114 return 0;
115}
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
new file mode 100644
index 000000000000..3ca8bec62ac4
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_irq.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (C) 2016-2017 Oracle Corporation
3 * This file is based on qxl_irq.c
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alon Levy
26 * Michael Thayer <michael.thayer@oracle.com,
27 * Hans de Goede <hdegoede@redhat.com>
28 */
29
30#include <drm/drm_crtc_helper.h>
31
32#include "vbox_drv.h"
33#include "vboxvideo.h"
34
35static void vbox_clear_irq(void)
36{
37 outl((u32)~0, VGA_PORT_HGSMI_HOST);
38}
39
40static u32 vbox_get_flags(struct vbox_private *vbox)
41{
42 return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
43}
44
45void vbox_report_hotplug(struct vbox_private *vbox)
46{
47 schedule_work(&vbox->hotplug_work);
48}
49
50irqreturn_t vbox_irq_handler(int irq, void *arg)
51{
52 struct drm_device *dev = (struct drm_device *)arg;
53 struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
54 u32 host_flags = vbox_get_flags(vbox);
55
56 if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
57 return IRQ_NONE;
58
59 /*
60 * Due to a bug in the initial host implementation of hot-plug irqs,
61 * the hot-plug and cursor capability flags were never cleared.
62 * Fortunately we can tell when they would have been set by checking
63 * that the VSYNC flag is not set.
64 */
65 if (host_flags &
66 (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
67 !(host_flags & HGSMIHOSTFLAGS_VSYNC))
68 vbox_report_hotplug(vbox);
69
70 vbox_clear_irq();
71
72 return IRQ_HANDLED;
73}
74
75/**
76 * Check that the position hints provided by the host are suitable for GNOME
77 * shell (i.e. all screens disjoint and hints for all enabled screens) and if
78 * not replace them with default ones. Providing valid hints improves the
79 * chances that we will get a known screen layout for pointer mapping.
80 */
81static void validate_or_set_position_hints(struct vbox_private *vbox)
82{
83 struct vbva_modehint *hintsi, *hintsj;
84 bool valid = true;
85 u16 currentx = 0;
86 int i, j;
87
88 for (i = 0; i < vbox->num_crtcs; ++i) {
89 for (j = 0; j < i; ++j) {
90 hintsi = &vbox->last_mode_hints[i];
91 hintsj = &vbox->last_mode_hints[j];
92
93 if (hintsi->enabled && hintsj->enabled) {
94 if (hintsi->dx >= 0xffff ||
95 hintsi->dy >= 0xffff ||
96 hintsj->dx >= 0xffff ||
97 hintsj->dy >= 0xffff ||
98 (hintsi->dx <
99 hintsj->dx + (hintsj->cx & 0x8fff) &&
100 hintsi->dx + (hintsi->cx & 0x8fff) >
101 hintsj->dx) ||
102 (hintsi->dy <
103 hintsj->dy + (hintsj->cy & 0x8fff) &&
104 hintsi->dy + (hintsi->cy & 0x8fff) >
105 hintsj->dy))
106 valid = false;
107 }
108 }
109 }
110 if (!valid)
111 for (i = 0; i < vbox->num_crtcs; ++i) {
112 if (vbox->last_mode_hints[i].enabled) {
113 vbox->last_mode_hints[i].dx = currentx;
114 vbox->last_mode_hints[i].dy = 0;
115 currentx +=
116 vbox->last_mode_hints[i].cx & 0x8fff;
117 }
118 }
119}
120
121/**
122 * Query the host for the most recent video mode hints.
123 */
124static void vbox_update_mode_hints(struct vbox_private *vbox)
125{
126 struct drm_device *dev = vbox->dev;
127 struct drm_connector *connector;
128 struct vbox_connector *vbox_conn;
129 struct vbva_modehint *hints;
130 u16 flags;
131 bool disconnected;
132 unsigned int crtc_id;
133 int ret;
134
135 ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs,
136 vbox->last_mode_hints);
137 if (ret) {
138 DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret);
139 return;
140 }
141
142 validate_or_set_position_hints(vbox);
143 drm_modeset_lock_all(dev);
144 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
145 vbox_conn = to_vbox_connector(connector);
146
147 hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
148 if (hints->magic != VBVAMODEHINT_MAGIC)
149 continue;
150
151 disconnected = !(hints->enabled);
152 crtc_id = vbox_conn->vbox_crtc->crtc_id;
153 vbox_conn->mode_hint.width = hints->cx & 0x8fff;
154 vbox_conn->mode_hint.height = hints->cy & 0x8fff;
155 vbox_conn->vbox_crtc->x_hint = hints->dx;
156 vbox_conn->vbox_crtc->y_hint = hints->dy;
157 vbox_conn->mode_hint.disconnected = disconnected;
158
159 if (vbox_conn->vbox_crtc->disconnected == disconnected)
160 continue;
161
162 if (disconnected)
163 flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
164 else
165 flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK;
166
167 hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0,
168 hints->cx * 4, hints->cx,
169 hints->cy, 0, flags);
170
171 vbox_conn->vbox_crtc->disconnected = disconnected;
172 }
173 drm_modeset_unlock_all(dev);
174}
175
176static void vbox_hotplug_worker(struct work_struct *work)
177{
178 struct vbox_private *vbox = container_of(work, struct vbox_private,
179 hotplug_work);
180
181 vbox_update_mode_hints(vbox);
182 drm_kms_helper_hotplug_event(vbox->dev);
183}
184
185int vbox_irq_init(struct vbox_private *vbox)
186{
187 INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
188 vbox_update_mode_hints(vbox);
189
190 return drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
191}
192
193void vbox_irq_fini(struct vbox_private *vbox)
194{
195 drm_irq_uninstall(vbox->dev);
196 flush_work(&vbox->hotplug_work);
197}
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
new file mode 100644
index 000000000000..d0c6ec75a3c7
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -0,0 +1,534 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_main.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>,
27 * Michael Thayer <michael.thayer@oracle.com,
28 * Hans de Goede <hdegoede@redhat.com>
29 */
30#include <drm/drm_fb_helper.h>
31#include <drm/drm_crtc_helper.h>
32
33#include "vbox_drv.h"
34#include "vbox_err.h"
35#include "vboxvideo_guest.h"
36#include "vboxvideo_vbe.h"
37
38static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
39{
40 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
41
42 if (vbox_fb->obj)
43 drm_gem_object_unreference_unlocked(vbox_fb->obj);
44
45 drm_framebuffer_cleanup(fb);
46 kfree(fb);
47}
48
49void vbox_enable_accel(struct vbox_private *vbox)
50{
51 unsigned int i;
52 struct vbva_buffer *vbva;
53
54 if (!vbox->vbva_info || !vbox->vbva_buffers) {
55 /* Should never happen... */
56 DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
57 return;
58 }
59
60 for (i = 0; i < vbox->num_crtcs; ++i) {
61 if (vbox->vbva_info[i].vbva)
62 continue;
63
64 vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
65 if (!vbva_enable(&vbox->vbva_info[i],
66 vbox->guest_pool, vbva, i)) {
67 /* very old host or driver error. */
68 DRM_ERROR("vboxvideo: vbva_enable failed\n");
69 return;
70 }
71 }
72}
73
74void vbox_disable_accel(struct vbox_private *vbox)
75{
76 unsigned int i;
77
78 for (i = 0; i < vbox->num_crtcs; ++i)
79 vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
80}
81
82void vbox_report_caps(struct vbox_private *vbox)
83{
84 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
85 VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
86
87 if (vbox->initial_mode_queried)
88 caps |= VBVACAPS_VIDEO_MODE_HINTS;
89
90 hgsmi_send_caps_info(vbox->guest_pool, caps);
91}
92
93/**
94 * Send information about dirty rectangles to VBVA. If necessary we enable
95 * VBVA first, as this is normally disabled after a change of master in case
96 * the new master does not send dirty rectangle information (is this even
97 * allowed?)
98 */
99void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
100 struct drm_clip_rect *rects,
101 unsigned int num_rects)
102{
103 struct vbox_private *vbox = fb->dev->dev_private;
104 struct drm_crtc *crtc;
105 unsigned int i;
106
107 mutex_lock(&vbox->hw_mutex);
108 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
109 if (CRTC_FB(crtc) != fb)
110 continue;
111
112 vbox_enable_accel(vbox);
113
114 for (i = 0; i < num_rects; ++i) {
115 struct vbva_cmd_hdr cmd_hdr;
116 unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
117
118 if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
119 (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
120 (rects[i].x2 < crtc->x) ||
121 (rects[i].y2 < crtc->y))
122 continue;
123
124 cmd_hdr.x = (s16)rects[i].x1;
125 cmd_hdr.y = (s16)rects[i].y1;
126 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
127 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
128
129 if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
130 vbox->guest_pool))
131 continue;
132
133 vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
134 &cmd_hdr, sizeof(cmd_hdr));
135 vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
136 }
137 }
138 mutex_unlock(&vbox->hw_mutex);
139}
140
141static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
142 struct drm_file *file_priv,
143 unsigned int flags, unsigned int color,
144 struct drm_clip_rect *rects,
145 unsigned int num_rects)
146{
147 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
148
149 return 0;
150}
151
152static const struct drm_framebuffer_funcs vbox_fb_funcs = {
153 .destroy = vbox_user_framebuffer_destroy,
154 .dirty = vbox_user_framebuffer_dirty,
155};
156
157int vbox_framebuffer_init(struct drm_device *dev,
158 struct vbox_framebuffer *vbox_fb,
159 const struct DRM_MODE_FB_CMD *mode_cmd,
160 struct drm_gem_object *obj)
161{
162 int ret;
163
164 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
165 vbox_fb->obj = obj;
166 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
167 if (ret) {
168 DRM_ERROR("framebuffer init failed %d\n", ret);
169 return ret;
170 }
171
172 return 0;
173}
174
175static struct drm_framebuffer *vbox_user_framebuffer_create(
176 struct drm_device *dev,
177 struct drm_file *filp,
178 const struct drm_mode_fb_cmd2 *mode_cmd)
179{
180 struct drm_gem_object *obj;
181 struct vbox_framebuffer *vbox_fb;
182 int ret = -ENOMEM;
183
184 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
185 if (!obj)
186 return ERR_PTR(-ENOENT);
187
188 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
189 if (!vbox_fb)
190 goto err_unref_obj;
191
192 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
193 if (ret)
194 goto err_free_vbox_fb;
195
196 return &vbox_fb->base;
197
198err_free_vbox_fb:
199 kfree(vbox_fb);
200err_unref_obj:
201 drm_gem_object_unreference_unlocked(obj);
202 return ERR_PTR(ret);
203}
204
205static const struct drm_mode_config_funcs vbox_mode_funcs = {
206 .fb_create = vbox_user_framebuffer_create,
207};
208
209static int vbox_accel_init(struct vbox_private *vbox)
210{
211 unsigned int i;
212
213 vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
214 sizeof(*vbox->vbva_info), GFP_KERNEL);
215 if (!vbox->vbva_info)
216 return -ENOMEM;
217
218 /* Take a command buffer for each screen from the end of usable VRAM. */
219 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
220
221 vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
222 vbox->available_vram_size,
223 vbox->num_crtcs *
224 VBVA_MIN_BUFFER_SIZE);
225 if (!vbox->vbva_buffers)
226 return -ENOMEM;
227
228 for (i = 0; i < vbox->num_crtcs; ++i)
229 vbva_setup_buffer_context(&vbox->vbva_info[i],
230 vbox->available_vram_size +
231 i * VBVA_MIN_BUFFER_SIZE,
232 VBVA_MIN_BUFFER_SIZE);
233
234 return 0;
235}
236
237static void vbox_accel_fini(struct vbox_private *vbox)
238{
239 vbox_disable_accel(vbox);
240 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
241}
242
243/** Do we support the 4.3 plus mode hint reporting interface? */
244static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
245{
246 u32 have_hints, have_cursor;
247 int ret;
248
249 ret = hgsmi_query_conf(vbox->guest_pool,
250 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
251 &have_hints);
252 if (ret)
253 return false;
254
255 ret = hgsmi_query_conf(vbox->guest_pool,
256 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
257 &have_cursor);
258 if (ret)
259 return false;
260
261 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
262}
263
264static bool vbox_check_supported(u16 id)
265{
266 u16 dispi_id;
267
268 vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
269 dispi_id = inw(VBE_DISPI_IOPORT_DATA);
270
271 return dispi_id == id;
272}
273
274/**
275 * Set up our heaps and data exchange buffers in VRAM before handing the rest
276 * to the memory manager.
277 */
278static int vbox_hw_init(struct vbox_private *vbox)
279{
280 int ret = -ENOMEM;
281
282 vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
283 vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
284
285 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
286
287 /* Map guest-heap at end of vram */
288 vbox->guest_heap =
289 pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
290 GUEST_HEAP_SIZE);
291 if (!vbox->guest_heap)
292 return -ENOMEM;
293
294 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
295 vbox->guest_pool = gen_pool_create(4, -1);
296 if (!vbox->guest_pool)
297 goto err_unmap_guest_heap;
298
299 ret = gen_pool_add_virt(vbox->guest_pool,
300 (unsigned long)vbox->guest_heap,
301 GUEST_HEAP_OFFSET(vbox),
302 GUEST_HEAP_USABLE_SIZE, -1);
303 if (ret)
304 goto err_destroy_guest_pool;
305
306 ret = hgsmi_test_query_conf(vbox->guest_pool);
307 if (ret) {
308 DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
309 goto err_destroy_guest_pool;
310 }
311
312 /* Reduce available VRAM size to reflect the guest heap. */
313 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
314 /* Linux drm represents monitors as a 32-bit array. */
315 hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
316 &vbox->num_crtcs);
317 vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
318
319 if (!have_hgsmi_mode_hints(vbox)) {
320 ret = -ENOTSUPP;
321 goto err_destroy_guest_pool;
322 }
323
324 vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
325 sizeof(struct vbva_modehint),
326 GFP_KERNEL);
327 if (!vbox->last_mode_hints) {
328 ret = -ENOMEM;
329 goto err_destroy_guest_pool;
330 }
331
332 ret = vbox_accel_init(vbox);
333 if (ret)
334 goto err_destroy_guest_pool;
335
336 return 0;
337
338err_destroy_guest_pool:
339 gen_pool_destroy(vbox->guest_pool);
340err_unmap_guest_heap:
341 pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
342 return ret;
343}
344
345static void vbox_hw_fini(struct vbox_private *vbox)
346{
347 vbox_accel_fini(vbox);
348 gen_pool_destroy(vbox->guest_pool);
349 pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
350}
351
352int vbox_driver_load(struct drm_device *dev, unsigned long flags)
353{
354 struct vbox_private *vbox;
355 int ret = 0;
356
357 if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
358 return -ENODEV;
359
360 vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
361 if (!vbox)
362 return -ENOMEM;
363
364 dev->dev_private = vbox;
365 vbox->dev = dev;
366
367 mutex_init(&vbox->hw_mutex);
368
369 ret = vbox_hw_init(vbox);
370 if (ret)
371 return ret;
372
373 ret = vbox_mm_init(vbox);
374 if (ret)
375 goto err_hw_fini;
376
377 drm_mode_config_init(dev);
378
379 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
380 dev->mode_config.min_width = 64;
381 dev->mode_config.min_height = 64;
382 dev->mode_config.preferred_depth = 24;
383 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
384 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
385
386 ret = vbox_mode_init(dev);
387 if (ret)
388 goto err_drm_mode_cleanup;
389
390 ret = vbox_irq_init(vbox);
391 if (ret)
392 goto err_mode_fini;
393
394 ret = vbox_fbdev_init(dev);
395 if (ret)
396 goto err_irq_fini;
397
398 return 0;
399
400err_irq_fini:
401 vbox_irq_fini(vbox);
402err_mode_fini:
403 vbox_mode_fini(dev);
404err_drm_mode_cleanup:
405 drm_mode_config_cleanup(dev);
406 vbox_mm_fini(vbox);
407err_hw_fini:
408 vbox_hw_fini(vbox);
409 return ret;
410}
411
412void vbox_driver_unload(struct drm_device *dev)
413{
414 struct vbox_private *vbox = dev->dev_private;
415
416 vbox_fbdev_fini(dev);
417 vbox_irq_fini(vbox);
418 vbox_mode_fini(dev);
419 drm_mode_config_cleanup(dev);
420 vbox_mm_fini(vbox);
421 vbox_hw_fini(vbox);
422}
423
424/**
425 * @note this is described in the DRM framework documentation. AST does not
426 * have it, but we get an oops on driver unload if it is not present.
427 */
428void vbox_driver_lastclose(struct drm_device *dev)
429{
430 struct vbox_private *vbox = dev->dev_private;
431
432 if (vbox->fbdev)
433 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
434}
435
436int vbox_gem_create(struct drm_device *dev,
437 u32 size, bool iskernel, struct drm_gem_object **obj)
438{
439 struct vbox_bo *vboxbo;
440 int ret;
441
442 *obj = NULL;
443
444 size = roundup(size, PAGE_SIZE);
445 if (size == 0)
446 return -EINVAL;
447
448 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
449 if (ret) {
450 if (ret != -ERESTARTSYS)
451 DRM_ERROR("failed to allocate GEM object\n");
452 return ret;
453 }
454
455 *obj = &vboxbo->gem;
456
457 return 0;
458}
459
460int vbox_dumb_create(struct drm_file *file,
461 struct drm_device *dev, struct drm_mode_create_dumb *args)
462{
463 int ret;
464 struct drm_gem_object *gobj;
465 u32 handle;
466
467 args->pitch = args->width * ((args->bpp + 7) / 8);
468 args->size = args->pitch * args->height;
469
470 ret = vbox_gem_create(dev, args->size, false, &gobj);
471 if (ret)
472 return ret;
473
474 ret = drm_gem_handle_create(file, gobj, &handle);
475 drm_gem_object_unreference_unlocked(gobj);
476 if (ret)
477 return ret;
478
479 args->handle = handle;
480
481 return 0;
482}
483
484static void vbox_bo_unref(struct vbox_bo **bo)
485{
486 struct ttm_buffer_object *tbo;
487
488 if ((*bo) == NULL)
489 return;
490
491 tbo = &((*bo)->bo);
492 ttm_bo_unref(&tbo);
493 if (!tbo)
494 *bo = NULL;
495}
496
497void vbox_gem_free_object(struct drm_gem_object *obj)
498{
499 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
500
501 vbox_bo_unref(&vbox_bo);
502}
503
504static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
505{
506 return drm_vma_node_offset_addr(&bo->bo.vma_node);
507}
508
509int
510vbox_dumb_mmap_offset(struct drm_file *file,
511 struct drm_device *dev,
512 u32 handle, u64 *offset)
513{
514 struct drm_gem_object *obj;
515 int ret;
516 struct vbox_bo *bo;
517
518 mutex_lock(&dev->struct_mutex);
519 obj = drm_gem_object_lookup(file, handle);
520 if (!obj) {
521 ret = -ENOENT;
522 goto out_unlock;
523 }
524
525 bo = gem_to_vbox_bo(obj);
526 *offset = vbox_bo_mmap_offset(bo);
527
528 drm_gem_object_unreference(obj);
529 ret = 0;
530
531out_unlock:
532 mutex_unlock(&dev->struct_mutex);
533 return ret;
534}
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
new file mode 100644
index 000000000000..f2b85f3256fa
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -0,0 +1,877 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_mode.c
4 * Copyright 2012 Red Hat Inc.
5 * Parts based on xf86-video-ast
6 * Copyright (c) 2005 ASPEED Technology Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 */
29/*
30 * Authors: Dave Airlie <airlied@redhat.com>
31 * Michael Thayer <michael.thayer@oracle.com,
32 * Hans de Goede <hdegoede@redhat.com>
33 */
34#include <linux/export.h>
35#include <drm/drm_crtc_helper.h>
36#include <drm/drm_plane_helper.h>
37
38#include "vbox_drv.h"
39#include "vboxvideo.h"
40#include "hgsmi_channels.h"
41
42static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
43 u32 handle, u32 width, u32 height,
44 s32 hot_x, s32 hot_y);
45static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
46
47/**
48 * Set a graphics mode. Poke any required values into registers, do an HGSMI
49 * mode set and tell the host we support advanced graphics functions.
50 */
51static void vbox_do_modeset(struct drm_crtc *crtc,
52 const struct drm_display_mode *mode)
53{
54 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
55 struct vbox_private *vbox;
56 int width, height, bpp, pitch;
57 unsigned int crtc_id;
58 u16 flags;
59 s32 x_offset, y_offset;
60
61 vbox = crtc->dev->dev_private;
62 width = mode->hdisplay ? mode->hdisplay : 640;
63 height = mode->vdisplay ? mode->vdisplay : 480;
64 crtc_id = vbox_crtc->crtc_id;
65 bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
66 pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
67 x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint;
68 y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint;
69
70 /*
71 * This is the old way of setting graphics modes. It assumed one screen
72 * and a frame-buffer at the start of video RAM. On older versions of
73 * VirtualBox, certain parts of the code still assume that the first
74 * screen is programmed this way, so try to fake it.
75 */
76 if (vbox_crtc->crtc_id == 0 && crtc->enabled &&
77 vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
78 vbox_crtc->fb_offset % (bpp / 8) == 0) {
79 vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
80 vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
81 vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
82 vbox_write_ioport(VBE_DISPI_INDEX_BPP,
83 CRTC_FB(crtc)->format->cpp[0] * 8);
84 vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
85 vbox_write_ioport(
86 VBE_DISPI_INDEX_X_OFFSET,
87 vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x);
88 vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
89 vbox_crtc->fb_offset / pitch + crtc->y);
90 }
91
92 flags = VBVA_SCREEN_F_ACTIVE;
93 flags |= (crtc->enabled && !vbox_crtc->blanked) ?
94 0 : VBVA_SCREEN_F_BLANK;
95 flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
96 hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
97 x_offset, y_offset,
98 crtc->x * bpp / 8 + crtc->y * pitch,
99 pitch, width, height,
100 vbox_crtc->blanked ? 0 : bpp, flags);
101}
102
103static int vbox_set_view(struct drm_crtc *crtc)
104{
105 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
106 struct vbox_private *vbox = crtc->dev->dev_private;
107 struct vbva_infoview *p;
108
109 /*
110 * Tell the host about the view. This design originally targeted the
111 * Windows XP driver architecture and assumed that each screen would
112 * have a dedicated frame buffer with the command buffer following it,
113 * the whole being a "view". The host works out which screen a command
114 * buffer belongs to by checking whether it is in the first view, then
115 * whether it is in the second and so on. The first match wins. We
116 * cheat around this by making the first view be the managed memory
117 * plus the first command buffer, the second the same plus the second
118 * buffer and so on.
119 */
120 p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
121 HGSMI_CH_VBVA, VBVA_INFO_VIEW);
122 if (!p)
123 return -ENOMEM;
124
125 p->view_index = vbox_crtc->crtc_id;
126 p->view_offset = vbox_crtc->fb_offset;
127 p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
128 vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
129 p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
130
131 hgsmi_buffer_submit(vbox->guest_pool, p);
132 hgsmi_buffer_free(vbox->guest_pool, p);
133
134 return 0;
135}
136
137static void vbox_crtc_load_lut(struct drm_crtc *crtc)
138{
139}
140
141static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
142{
143 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
144 struct vbox_private *vbox = crtc->dev->dev_private;
145
146 switch (mode) {
147 case DRM_MODE_DPMS_ON:
148 vbox_crtc->blanked = false;
149 break;
150 case DRM_MODE_DPMS_STANDBY:
151 case DRM_MODE_DPMS_SUSPEND:
152 case DRM_MODE_DPMS_OFF:
153 vbox_crtc->blanked = true;
154 break;
155 }
156
157 mutex_lock(&vbox->hw_mutex);
158 vbox_do_modeset(crtc, &crtc->hwmode);
159 mutex_unlock(&vbox->hw_mutex);
160}
161
162static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
163 const struct drm_display_mode *mode,
164 struct drm_display_mode *adjusted_mode)
165{
166 return true;
167}
168
169/*
170 * Try to map the layout of virtual screens to the range of the input device.
171 * Return true if we need to re-set the crtc modes due to screen offset
172 * changes.
173 */
174static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
175{
176 struct drm_crtc *crtci;
177 struct drm_connector *connectori;
178 struct drm_framebuffer *fb1 = NULL;
179 bool single_framebuffer = true;
180 bool old_single_framebuffer = vbox->single_framebuffer;
181 u16 width = 0, height = 0;
182
183 /*
184 * Are we using an X.Org-style single large frame-buffer for all crtcs?
185 * If so then screen layout can be deduced from the crtc offsets.
186 * Same fall-back if this is the fbdev frame-buffer.
187 */
188 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
189 if (!fb1) {
190 fb1 = CRTC_FB(crtci);
191 if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb)
192 break;
193 } else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) {
194 single_framebuffer = false;
195 }
196 }
197 if (single_framebuffer) {
198 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
199 head) {
200 if (to_vbox_crtc(crtci)->crtc_id != 0)
201 continue;
202
203 vbox->single_framebuffer = true;
204 vbox->input_mapping_width = CRTC_FB(crtci)->width;
205 vbox->input_mapping_height = CRTC_FB(crtci)->height;
206 return old_single_framebuffer !=
207 vbox->single_framebuffer;
208 }
209 }
210 /* Otherwise calculate the total span of all screens. */
211 list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list,
212 head) {
213 struct vbox_connector *vbox_connector =
214 to_vbox_connector(connectori);
215 struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
216
217 width = max_t(u16, width, vbox_crtc->x_hint +
218 vbox_connector->mode_hint.width);
219 height = max_t(u16, height, vbox_crtc->y_hint +
220 vbox_connector->mode_hint.height);
221 }
222
223 vbox->single_framebuffer = false;
224 vbox->input_mapping_width = width;
225 vbox->input_mapping_height = height;
226
227 return old_single_framebuffer != vbox->single_framebuffer;
228}
229
230static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
231 struct drm_framebuffer *old_fb, int x, int y)
232{
233 struct vbox_private *vbox = crtc->dev->dev_private;
234 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
235 struct drm_gem_object *obj;
236 struct vbox_framebuffer *vbox_fb;
237 struct vbox_bo *bo;
238 int ret;
239 u64 gpu_addr;
240
241 /* Unpin the previous fb. */
242 if (old_fb) {
243 vbox_fb = to_vbox_framebuffer(old_fb);
244 obj = vbox_fb->obj;
245 bo = gem_to_vbox_bo(obj);
246 ret = vbox_bo_reserve(bo, false);
247 if (ret)
248 return ret;
249
250 vbox_bo_unpin(bo);
251 vbox_bo_unreserve(bo);
252 }
253
254 vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
255 obj = vbox_fb->obj;
256 bo = gem_to_vbox_bo(obj);
257
258 ret = vbox_bo_reserve(bo, false);
259 if (ret)
260 return ret;
261
262 ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
263 if (ret) {
264 vbox_bo_unreserve(bo);
265 return ret;
266 }
267
268 if (&vbox->fbdev->afb == vbox_fb)
269 vbox_fbdev_set_base(vbox, gpu_addr);
270 vbox_bo_unreserve(bo);
271
272 /* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
273 vbox_crtc->fb_offset = gpu_addr;
274 if (vbox_set_up_input_mapping(vbox)) {
275 struct drm_crtc *crtci;
276
277 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
278 head) {
279 vbox_set_view(crtc);
280 vbox_do_modeset(crtci, &crtci->mode);
281 }
282 }
283
284 return 0;
285}
286
287static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
288 struct drm_framebuffer *old_fb)
289{
290 return vbox_crtc_do_set_base(crtc, old_fb, x, y);
291}
292
293static int vbox_crtc_mode_set(struct drm_crtc *crtc,
294 struct drm_display_mode *mode,
295 struct drm_display_mode *adjusted_mode,
296 int x, int y, struct drm_framebuffer *old_fb)
297{
298 struct vbox_private *vbox = crtc->dev->dev_private;
299 int ret;
300
301 vbox_crtc_mode_set_base(crtc, x, y, old_fb);
302
303 mutex_lock(&vbox->hw_mutex);
304 ret = vbox_set_view(crtc);
305 if (!ret)
306 vbox_do_modeset(crtc, mode);
307 hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
308 vbox->input_mapping_width,
309 vbox->input_mapping_height);
310 mutex_unlock(&vbox->hw_mutex);
311
312 return ret;
313}
314
315static void vbox_crtc_disable(struct drm_crtc *crtc)
316{
317}
318
319static void vbox_crtc_prepare(struct drm_crtc *crtc)
320{
321}
322
323static void vbox_crtc_commit(struct drm_crtc *crtc)
324{
325}
326
327static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
328 .dpms = vbox_crtc_dpms,
329 .mode_fixup = vbox_crtc_mode_fixup,
330 .mode_set = vbox_crtc_mode_set,
331 /* .mode_set_base = vbox_crtc_mode_set_base, */
332 .disable = vbox_crtc_disable,
333 .load_lut = vbox_crtc_load_lut,
334 .prepare = vbox_crtc_prepare,
335 .commit = vbox_crtc_commit,
336};
337
338static void vbox_crtc_reset(struct drm_crtc *crtc)
339{
340}
341
342static void vbox_crtc_destroy(struct drm_crtc *crtc)
343{
344 drm_crtc_cleanup(crtc);
345 kfree(crtc);
346}
347
348static const struct drm_crtc_funcs vbox_crtc_funcs = {
349 .cursor_move = vbox_cursor_move,
350 .cursor_set2 = vbox_cursor_set2,
351 .reset = vbox_crtc_reset,
352 .set_config = drm_crtc_helper_set_config,
353 /* .gamma_set = vbox_crtc_gamma_set, */
354 .destroy = vbox_crtc_destroy,
355};
356
357static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
358{
359 struct vbox_crtc *vbox_crtc;
360
361 vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
362 if (!vbox_crtc)
363 return NULL;
364
365 vbox_crtc->crtc_id = i;
366
367 drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
368 drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
369 drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
370
371 return vbox_crtc;
372}
373
374static void vbox_encoder_destroy(struct drm_encoder *encoder)
375{
376 drm_encoder_cleanup(encoder);
377 kfree(encoder);
378}
379
380static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
381 *connector)
382{
383 int enc_id = connector->encoder_ids[0];
384
385 /* pick the encoder ids */
386 if (enc_id)
387 return drm_encoder_find(connector->dev, enc_id);
388
389 return NULL;
390}
391
392static const struct drm_encoder_funcs vbox_enc_funcs = {
393 .destroy = vbox_encoder_destroy,
394};
395
396static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
397{
398}
399
400static bool vbox_mode_fixup(struct drm_encoder *encoder,
401 const struct drm_display_mode *mode,
402 struct drm_display_mode *adjusted_mode)
403{
404 return true;
405}
406
407static void vbox_encoder_mode_set(struct drm_encoder *encoder,
408 struct drm_display_mode *mode,
409 struct drm_display_mode *adjusted_mode)
410{
411}
412
413static void vbox_encoder_prepare(struct drm_encoder *encoder)
414{
415}
416
417static void vbox_encoder_commit(struct drm_encoder *encoder)
418{
419}
420
421static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
422 .dpms = vbox_encoder_dpms,
423 .mode_fixup = vbox_mode_fixup,
424 .prepare = vbox_encoder_prepare,
425 .commit = vbox_encoder_commit,
426 .mode_set = vbox_encoder_mode_set,
427};
428
429static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
430 unsigned int i)
431{
432 struct vbox_encoder *vbox_encoder;
433
434 vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
435 if (!vbox_encoder)
436 return NULL;
437
438 drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
439 DRM_MODE_ENCODER_DAC, NULL);
440 drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
441
442 vbox_encoder->base.possible_crtcs = 1 << i;
443 return &vbox_encoder->base;
444}
445
446/**
447 * Generate EDID data with a mode-unique serial number for the virtual
448 * monitor to try to persuade Unity that different modes correspond to
449 * different monitors and it should not try to force the same resolution on
450 * them.
451 */
452static void vbox_set_edid(struct drm_connector *connector, int width,
453 int height)
454{
455 enum { EDID_SIZE = 128 };
456 unsigned char edid[EDID_SIZE] = {
457 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
458 0x58, 0x58, /* manufacturer (VBX) */
459 0x00, 0x00, /* product code */
460 0x00, 0x00, 0x00, 0x00, /* serial number goes here */
461 0x01, /* week of manufacture */
462 0x00, /* year of manufacture */
463 0x01, 0x03, /* EDID version */
464 0x80, /* capabilities - digital */
465 0x00, /* horiz. res in cm, zero for projectors */
466 0x00, /* vert. res in cm */
467 0x78, /* display gamma (120 == 2.2). */
468 0xEE, /* features (standby, suspend, off, RGB, std */
469 /* colour space, preferred timing mode) */
470 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
471 /* chromaticity for standard colour space. */
472 0x00, 0x00, 0x00, /* no default timings */
473 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
474 0x01, 0x01,
475 0x01, 0x01, 0x01, 0x01, /* no standard timings */
476 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
477 0x02, 0x02,
478 /* descriptor block 1 goes below */
479 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
480 /* descriptor block 2, monitor ranges */
481 0x00, 0x00, 0x00, 0xFD, 0x00,
482 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
483 0x20, 0x20,
484 /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
485 0x20,
486 /* descriptor block 3, monitor name */
487 0x00, 0x00, 0x00, 0xFC, 0x00,
488 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
489 '\n',
490 /* descriptor block 4: dummy data */
491 0x00, 0x00, 0x00, 0x10, 0x00,
492 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
493 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
494 0x20,
495 0x00, /* number of extensions */
496 0x00 /* checksum goes here */
497 };
498 int clock = (width + 6) * (height + 6) * 60 / 10000;
499 unsigned int i, sum = 0;
500
501 edid[12] = width & 0xff;
502 edid[13] = width >> 8;
503 edid[14] = height & 0xff;
504 edid[15] = height >> 8;
505 edid[54] = clock & 0xff;
506 edid[55] = clock >> 8;
507 edid[56] = width & 0xff;
508 edid[58] = (width >> 4) & 0xf0;
509 edid[59] = height & 0xff;
510 edid[61] = (height >> 4) & 0xf0;
511 for (i = 0; i < EDID_SIZE - 1; ++i)
512 sum += edid[i];
513 edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
514 drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
515}
516
517static int vbox_get_modes(struct drm_connector *connector)
518{
519 struct vbox_connector *vbox_connector = NULL;
520 struct drm_display_mode *mode = NULL;
521 struct vbox_private *vbox = NULL;
522 unsigned int num_modes = 0;
523 int preferred_width, preferred_height;
524
525 vbox_connector = to_vbox_connector(connector);
526 vbox = connector->dev->dev_private;
527 /*
528 * Heuristic: we do not want to tell the host that we support dynamic
529 * resizing unless we feel confident that the user space client using
530 * the video driver can handle hot-plug events. So the first time modes
531 * are queried after a "master" switch we tell the host that we do not,
532 * and immediately after we send the client a hot-plug notification as
533 * a test to see if they will respond and query again.
534 * That is also the reason why capabilities are reported to the host at
535 * this place in the code rather than elsewhere.
536 * We need to report the flags location before reporting the IRQ
537 * capability.
538 */
539 hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
540 HOST_FLAGS_OFFSET);
541 if (vbox_connector->vbox_crtc->crtc_id == 0)
542 vbox_report_caps(vbox);
543 if (!vbox->initial_mode_queried) {
544 if (vbox_connector->vbox_crtc->crtc_id == 0) {
545 vbox->initial_mode_queried = true;
546 vbox_report_hotplug(vbox);
547 }
548 return drm_add_modes_noedid(connector, 800, 600);
549 }
550 num_modes = drm_add_modes_noedid(connector, 2560, 1600);
551 preferred_width = vbox_connector->mode_hint.width ?
552 vbox_connector->mode_hint.width : 1024;
553 preferred_height = vbox_connector->mode_hint.height ?
554 vbox_connector->mode_hint.height : 768;
555 mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
556 60, false, false, false);
557 if (mode) {
558 mode->type |= DRM_MODE_TYPE_PREFERRED;
559 drm_mode_probed_add(connector, mode);
560 ++num_modes;
561 }
562 vbox_set_edid(connector, preferred_width, preferred_height);
563 drm_object_property_set_value(
564 &connector->base, vbox->dev->mode_config.suggested_x_property,
565 vbox_connector->vbox_crtc->x_hint);
566 drm_object_property_set_value(
567 &connector->base, vbox->dev->mode_config.suggested_y_property,
568 vbox_connector->vbox_crtc->y_hint);
569
570 return num_modes;
571}
572
573static int vbox_mode_valid(struct drm_connector *connector,
574 struct drm_display_mode *mode)
575{
576 return MODE_OK;
577}
578
579static void vbox_connector_destroy(struct drm_connector *connector)
580{
581 struct vbox_connector *vbox_connector;
582
583 vbox_connector = to_vbox_connector(connector);
584 drm_connector_unregister(connector);
585 drm_connector_cleanup(connector);
586 kfree(connector);
587}
588
589static enum drm_connector_status
590vbox_connector_detect(struct drm_connector *connector, bool force)
591{
592 struct vbox_connector *vbox_connector;
593
594 vbox_connector = to_vbox_connector(connector);
595
596 return vbox_connector->mode_hint.disconnected ?
597 connector_status_disconnected : connector_status_connected;
598}
599
600static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
601 u32 max_y)
602{
603 struct vbox_connector *vbox_connector;
604 struct drm_device *dev;
605 struct drm_display_mode *mode, *iterator;
606
607 vbox_connector = to_vbox_connector(connector);
608 dev = vbox_connector->base.dev;
609 list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
610 list_del(&mode->head);
611 drm_mode_destroy(dev, mode);
612 }
613
614 return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
615}
616
617static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
618 .mode_valid = vbox_mode_valid,
619 .get_modes = vbox_get_modes,
620 .best_encoder = vbox_best_single_encoder,
621};
622
623static const struct drm_connector_funcs vbox_connector_funcs = {
624 .dpms = drm_helper_connector_dpms,
625 .detect = vbox_connector_detect,
626 .fill_modes = vbox_fill_modes,
627 .destroy = vbox_connector_destroy,
628};
629
630static int vbox_connector_init(struct drm_device *dev,
631 struct vbox_crtc *vbox_crtc,
632 struct drm_encoder *encoder)
633{
634 struct vbox_connector *vbox_connector;
635 struct drm_connector *connector;
636
637 vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
638 if (!vbox_connector)
639 return -ENOMEM;
640
641 connector = &vbox_connector->base;
642 vbox_connector->vbox_crtc = vbox_crtc;
643
644 drm_connector_init(dev, connector, &vbox_connector_funcs,
645 DRM_MODE_CONNECTOR_VGA);
646 drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
647
648 connector->interlace_allowed = 0;
649 connector->doublescan_allowed = 0;
650
651 drm_mode_create_suggested_offset_properties(dev);
652 drm_object_attach_property(&connector->base,
653 dev->mode_config.suggested_x_property, -1);
654 drm_object_attach_property(&connector->base,
655 dev->mode_config.suggested_y_property, -1);
656 drm_connector_register(connector);
657
658 drm_mode_connector_attach_encoder(connector, encoder);
659
660 return 0;
661}
662
663int vbox_mode_init(struct drm_device *dev)
664{
665 struct vbox_private *vbox = dev->dev_private;
666 struct drm_encoder *encoder;
667 struct vbox_crtc *vbox_crtc;
668 unsigned int i;
669 int ret;
670
671 /* vbox_cursor_init(dev); */
672 for (i = 0; i < vbox->num_crtcs; ++i) {
673 vbox_crtc = vbox_crtc_init(dev, i);
674 if (!vbox_crtc)
675 return -ENOMEM;
676 encoder = vbox_encoder_init(dev, i);
677 if (!encoder)
678 return -ENOMEM;
679 ret = vbox_connector_init(dev, vbox_crtc, encoder);
680 if (ret)
681 return ret;
682 }
683
684 return 0;
685}
686
687void vbox_mode_fini(struct drm_device *dev)
688{
689 /* vbox_cursor_fini(dev); */
690}
691
692/**
693 * Copy the ARGB image and generate the mask, which is needed in case the host
694 * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
695 * if the corresponding alpha value in the ARGB image is greater than 0xF0.
696 */
697static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
698 size_t mask_size)
699{
700 size_t line_size = (width + 7) / 8;
701 u32 i, j;
702
703 memcpy(dst + mask_size, src, width * height * 4);
704 for (i = 0; i < height; ++i)
705 for (j = 0; j < width; ++j)
706 if (((u32 *)src)[i * width + j] > 0xf0000000)
707 dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
708}
709
710static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
711 u32 handle, u32 width, u32 height,
712 s32 hot_x, s32 hot_y)
713{
714 struct vbox_private *vbox = crtc->dev->dev_private;
715 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
716 struct ttm_bo_kmap_obj uobj_map;
717 size_t data_size, mask_size;
718 struct drm_gem_object *obj;
719 u32 flags, caps = 0;
720 struct vbox_bo *bo;
721 bool src_isiomem;
722 u8 *dst = NULL;
723 u8 *src;
724 int ret;
725
726 /*
727 * Re-set this regularly as in 5.0.20 and earlier the information was
728 * lost on save and restore.
729 */
730 hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
731 vbox->input_mapping_width,
732 vbox->input_mapping_height);
733 if (!handle) {
734 bool cursor_enabled = false;
735 struct drm_crtc *crtci;
736
737 /* Hide cursor. */
738 vbox_crtc->cursor_enabled = false;
739 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
740 head) {
741 if (to_vbox_crtc(crtci)->cursor_enabled)
742 cursor_enabled = true;
743 }
744
745 if (!cursor_enabled)
746 hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
747 0, 0, NULL, 0);
748 return 0;
749 }
750
751 vbox_crtc->cursor_enabled = true;
752
753 if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
754 width == 0 || height == 0)
755 return -EINVAL;
756
757 ret = hgsmi_query_conf(vbox->guest_pool,
758 VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
759 if (ret)
760 return ret;
761
762 if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
763 /*
764 * -EINVAL means cursor_set2() not supported, -EAGAIN means
765 * retry at once.
766 */
767 return -EBUSY;
768 }
769
770 obj = drm_gem_object_lookup(file_priv, handle);
771 if (!obj) {
772 DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
773 return -ENOENT;
774 }
775
776 bo = gem_to_vbox_bo(obj);
777 ret = vbox_bo_reserve(bo, false);
778 if (ret)
779 goto out_unref_obj;
780
781 /*
782 * The mask must be calculated based on the alpha
783 * channel, one bit per ARGB word, and must be 32-bit
784 * padded.
785 */
786 mask_size = ((width + 7) / 8 * height + 3) & ~3;
787 data_size = width * height * 4 + mask_size;
788 vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
789 vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
790 vbox->cursor_width = width;
791 vbox->cursor_height = height;
792 vbox->cursor_data_size = data_size;
793 dst = vbox->cursor_data;
794
795 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
796 if (ret) {
797 vbox->cursor_data_size = 0;
798 goto out_unreserve_bo;
799 }
800
801 src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
802 if (src_isiomem) {
803 DRM_ERROR("src cursor bo not in main memory\n");
804 ret = -EIO;
805 goto out_unmap_bo;
806 }
807
808 copy_cursor_image(src, dst, width, height, mask_size);
809
810 flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
811 VBOX_MOUSE_POINTER_ALPHA;
812 ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags,
813 vbox->cursor_hot_x, vbox->cursor_hot_y,
814 width, height, dst, data_size);
815out_unmap_bo:
816 ttm_bo_kunmap(&uobj_map);
817out_unreserve_bo:
818 vbox_bo_unreserve(bo);
819out_unref_obj:
820 drm_gem_object_unreference_unlocked(obj);
821
822 return ret;
823}
824
825static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
826{
827 struct vbox_private *vbox = crtc->dev->dev_private;
828 u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
829 VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
830 s32 crtc_x =
831 vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
832 s32 crtc_y =
833 vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
834 u32 host_x, host_y;
835 u32 hot_x = 0;
836 u32 hot_y = 0;
837 int ret;
838
839 /*
840 * We compare these to unsigned later and don't
841 * need to handle negative.
842 */
843 if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
844 return 0;
845
846 ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x,
847 y + crtc_y, &host_x, &host_y);
848
849 /*
850 * The only reason we have vbox_cursor_move() is that some older clients
851 * might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and
852 * use DRM_MODE_CURSOR_MOVE to set the hot-spot.
853 *
854 * However VirtualBox 5.0.20 and earlier has a bug causing it to return
855 * 0,0 as host cursor location after a save and restore.
856 *
857 * To work around this we ignore a 0, 0 return, since missing the odd
858 * time when it legitimately happens is not going to hurt much.
859 */
860 if (ret || (host_x == 0 && host_y == 0))
861 return ret;
862
863 if (x + crtc_x < host_x)
864 hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
865 if (y + crtc_y < host_y)
866 hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
867
868 if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
869 return 0;
870
871 vbox->cursor_hot_x = hot_x;
872 vbox->cursor_hot_y = hot_y;
873
874 return hgsmi_update_pointer_shape(vbox->guest_pool, flags,
875 hot_x, hot_y, vbox->cursor_width, vbox->cursor_height,
876 vbox->cursor_data, vbox->cursor_data_size);
877}
diff --git a/drivers/staging/vboxvideo/vbox_prime.c b/drivers/staging/vboxvideo/vbox_prime.c
new file mode 100644
index 000000000000..b7453e427a1d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_prime.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright (C) 2017 Oracle Corporation
3 * Copyright 2017 Canonical
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Andreas Pokorny
24 */
25
26#include "vbox_drv.h"
27
28/*
29 * Based on qxl_prime.c:
30 * Empty Implementations as there should not be any other driver for a virtual
31 * device that might share buffers with vboxvideo
32 */
33
34int vbox_gem_prime_pin(struct drm_gem_object *obj)
35{
36 WARN_ONCE(1, "not implemented");
37 return -ENOSYS;
38}
39
40void vbox_gem_prime_unpin(struct drm_gem_object *obj)
41{
42 WARN_ONCE(1, "not implemented");
43}
44
45struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
46{
47 WARN_ONCE(1, "not implemented");
48 return ERR_PTR(-ENOSYS);
49}
50
51struct drm_gem_object *vbox_gem_prime_import_sg_table(
52 struct drm_device *dev, struct dma_buf_attachment *attach,
53 struct sg_table *table)
54{
55 WARN_ONCE(1, "not implemented");
56 return ERR_PTR(-ENOSYS);
57}
58
59void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
60{
61 WARN_ONCE(1, "not implemented");
62 return ERR_PTR(-ENOSYS);
63}
64
65void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
66{
67 WARN_ONCE(1, "not implemented");
68}
69
70int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
71{
72 WARN_ONCE(1, "not implemented");
73 return -ENOSYS;
74}
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
new file mode 100644
index 000000000000..34a905d40735
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -0,0 +1,472 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_ttm.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 * Authors: Dave Airlie <airlied@redhat.com>
28 * Michael Thayer <michael.thayer@oracle.com>
29 */
30#include "vbox_drv.h"
31#include <ttm/ttm_page_alloc.h>
32
33static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
34{
35 return container_of(bd, struct vbox_private, ttm.bdev);
36}
37
38static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
39{
40 return ttm_mem_global_init(ref->object);
41}
42
43static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
44{
45 ttm_mem_global_release(ref->object);
46}
47
48/**
49 * Adds the vbox memory manager object/structures to the global memory manager.
50 */
51static int vbox_ttm_global_init(struct vbox_private *vbox)
52{
53 struct drm_global_reference *global_ref;
54 int ret;
55
56 global_ref = &vbox->ttm.mem_global_ref;
57 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
58 global_ref->size = sizeof(struct ttm_mem_global);
59 global_ref->init = &vbox_ttm_mem_global_init;
60 global_ref->release = &vbox_ttm_mem_global_release;
61 ret = drm_global_item_ref(global_ref);
62 if (ret) {
63 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
64 return ret;
65 }
66
67 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
68 global_ref = &vbox->ttm.bo_global_ref.ref;
69 global_ref->global_type = DRM_GLOBAL_TTM_BO;
70 global_ref->size = sizeof(struct ttm_bo_global);
71 global_ref->init = &ttm_bo_global_init;
72 global_ref->release = &ttm_bo_global_release;
73
74 ret = drm_global_item_ref(global_ref);
75 if (ret) {
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&vbox->ttm.mem_global_ref);
78 return ret;
79 }
80
81 return 0;
82}
83
84/**
85 * Removes the vbox memory manager object from the global memory manager.
86 */
87static void vbox_ttm_global_release(struct vbox_private *vbox)
88{
89 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
90 drm_global_item_unref(&vbox->ttm.mem_global_ref);
91}
92
93static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
94{
95 struct vbox_bo *bo;
96
97 bo = container_of(tbo, struct vbox_bo, bo);
98
99 drm_gem_object_release(&bo->gem);
100 kfree(bo);
101}
102
103static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
104{
105 if (bo->destroy == &vbox_bo_ttm_destroy)
106 return true;
107
108 return false;
109}
110
111static int
112vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
113 struct ttm_mem_type_manager *man)
114{
115 switch (type) {
116 case TTM_PL_SYSTEM:
117 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
118 man->available_caching = TTM_PL_MASK_CACHING;
119 man->default_caching = TTM_PL_FLAG_CACHED;
120 break;
121 case TTM_PL_VRAM:
122 man->func = &ttm_bo_manager_func;
123 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
124 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
125 man->default_caching = TTM_PL_FLAG_WC;
126 break;
127 default:
128 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
129 return -EINVAL;
130 }
131
132 return 0;
133}
134
135static void
136vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
137{
138 struct vbox_bo *vboxbo = vbox_bo(bo);
139
140 if (!vbox_ttm_bo_is_vbox_bo(bo))
141 return;
142
143 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
144 *pl = vboxbo->placement;
145}
146
147static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
148 struct file *filp)
149{
150 return 0;
151}
152
153static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
154 struct ttm_mem_reg *mem)
155{
156 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
157 struct vbox_private *vbox = vbox_bdev(bdev);
158
159 mem->bus.addr = NULL;
160 mem->bus.offset = 0;
161 mem->bus.size = mem->num_pages << PAGE_SHIFT;
162 mem->bus.base = 0;
163 mem->bus.is_iomem = false;
164 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
165 return -EINVAL;
166 switch (mem->mem_type) {
167 case TTM_PL_SYSTEM:
168 /* system memory */
169 return 0;
170 case TTM_PL_VRAM:
171 mem->bus.offset = mem->start << PAGE_SHIFT;
172 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
173 mem->bus.is_iomem = true;
174 break;
175 default:
176 return -EINVAL;
177 }
178 return 0;
179}
180
181static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
182 struct ttm_mem_reg *mem)
183{
184}
185
186static int vbox_bo_move(struct ttm_buffer_object *bo,
187 bool evict, bool interruptible,
188 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
189{
190 return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
191}
192
193static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
194{
195 ttm_tt_fini(tt);
196 kfree(tt);
197}
198
199static struct ttm_backend_func vbox_tt_backend_func = {
200 .destroy = &vbox_ttm_backend_destroy,
201};
202
203static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
204 unsigned long size,
205 u32 page_flags,
206 struct page *dummy_read_page)
207{
208 struct ttm_tt *tt;
209
210 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
211 if (!tt)
212 return NULL;
213
214 tt->func = &vbox_tt_backend_func;
215 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
216 kfree(tt);
217 return NULL;
218 }
219
220 return tt;
221}
222
223static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
224{
225 return ttm_pool_populate(ttm);
226}
227
228static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
229{
230 ttm_pool_unpopulate(ttm);
231}
232
233struct ttm_bo_driver vbox_bo_driver = {
234 .ttm_tt_create = vbox_ttm_tt_create,
235 .ttm_tt_populate = vbox_ttm_tt_populate,
236 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
237 .init_mem_type = vbox_bo_init_mem_type,
238 .eviction_valuable = ttm_bo_eviction_valuable,
239 .evict_flags = vbox_bo_evict_flags,
240 .move = vbox_bo_move,
241 .verify_access = vbox_bo_verify_access,
242 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
243 .io_mem_free = &vbox_ttm_io_mem_free,
244 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
245};
246
247int vbox_mm_init(struct vbox_private *vbox)
248{
249 int ret;
250 struct drm_device *dev = vbox->dev;
251 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
252
253 ret = vbox_ttm_global_init(vbox);
254 if (ret)
255 return ret;
256
257 ret = ttm_bo_device_init(&vbox->ttm.bdev,
258 vbox->ttm.bo_global_ref.ref.object,
259 &vbox_bo_driver,
260 dev->anon_inode->i_mapping,
261 DRM_FILE_PAGE_OFFSET, true);
262 if (ret) {
263 DRM_ERROR("Error initialising bo driver; %d\n", ret);
264 goto err_ttm_global_release;
265 }
266
267 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
268 vbox->available_vram_size >> PAGE_SHIFT);
269 if (ret) {
270 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
271 goto err_device_release;
272 }
273
274#ifdef DRM_MTRR_WC
275 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
276 pci_resource_len(dev->pdev, 0),
277 DRM_MTRR_WC);
278#else
279 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
280 pci_resource_len(dev->pdev, 0));
281#endif
282 return 0;
283
284err_device_release:
285 ttm_bo_device_release(&vbox->ttm.bdev);
286err_ttm_global_release:
287 vbox_ttm_global_release(vbox);
288 return ret;
289}
290
291void vbox_mm_fini(struct vbox_private *vbox)
292{
293#ifdef DRM_MTRR_WC
294 drm_mtrr_del(vbox->fb_mtrr,
295 pci_resource_start(vbox->dev->pdev, 0),
296 pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
297#else
298 arch_phys_wc_del(vbox->fb_mtrr);
299#endif
300 ttm_bo_device_release(&vbox->ttm.bdev);
301 vbox_ttm_global_release(vbox);
302}
303
304void vbox_ttm_placement(struct vbox_bo *bo, int domain)
305{
306 unsigned int i;
307 u32 c = 0;
308
309 bo->placement.placement = bo->placements;
310 bo->placement.busy_placement = bo->placements;
311
312 if (domain & TTM_PL_FLAG_VRAM)
313 bo->placements[c++].flags =
314 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
315 if (domain & TTM_PL_FLAG_SYSTEM)
316 bo->placements[c++].flags =
317 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
318 if (!c)
319 bo->placements[c++].flags =
320 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
321
322 bo->placement.num_placement = c;
323 bo->placement.num_busy_placement = c;
324
325 for (i = 0; i < c; ++i) {
326 bo->placements[i].fpfn = 0;
327 bo->placements[i].lpfn = 0;
328 }
329}
330
331int vbox_bo_create(struct drm_device *dev, int size, int align,
332 u32 flags, struct vbox_bo **pvboxbo)
333{
334 struct vbox_private *vbox = dev->dev_private;
335 struct vbox_bo *vboxbo;
336 size_t acc_size;
337 int ret;
338
339 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
340 if (!vboxbo)
341 return -ENOMEM;
342
343 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
344 if (ret)
345 goto err_free_vboxbo;
346
347 vboxbo->bo.bdev = &vbox->ttm.bdev;
348
349 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
350
351 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
352 sizeof(struct vbox_bo));
353
354 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
355 ttm_bo_type_device, &vboxbo->placement,
356 align >> PAGE_SHIFT, false, NULL, acc_size,
357 NULL, NULL, vbox_bo_ttm_destroy);
358 if (ret)
359 goto err_free_vboxbo;
360
361 *pvboxbo = vboxbo;
362
363 return 0;
364
365err_free_vboxbo:
366 kfree(vboxbo);
367 return ret;
368}
369
370static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
371{
372 return bo->bo.offset;
373}
374
375int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
376{
377 int i, ret;
378
379 if (bo->pin_count) {
380 bo->pin_count++;
381 if (gpu_addr)
382 *gpu_addr = vbox_bo_gpu_offset(bo);
383
384 return 0;
385 }
386
387 vbox_ttm_placement(bo, pl_flag);
388
389 for (i = 0; i < bo->placement.num_placement; i++)
390 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
391
392 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
393 if (ret)
394 return ret;
395
396 bo->pin_count = 1;
397
398 if (gpu_addr)
399 *gpu_addr = vbox_bo_gpu_offset(bo);
400
401 return 0;
402}
403
404int vbox_bo_unpin(struct vbox_bo *bo)
405{
406 int i, ret;
407
408 if (!bo->pin_count) {
409 DRM_ERROR("unpin bad %p\n", bo);
410 return 0;
411 }
412 bo->pin_count--;
413 if (bo->pin_count)
414 return 0;
415
416 for (i = 0; i < bo->placement.num_placement; i++)
417 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
418
419 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
420 if (ret)
421 return ret;
422
423 return 0;
424}
425
426/*
427 * Move a vbox-owned buffer object to system memory if no one else has it
428 * pinned. The caller must have pinned it previously, and this call will
429 * release the caller's pin.
430 */
431int vbox_bo_push_sysram(struct vbox_bo *bo)
432{
433 int i, ret;
434
435 if (!bo->pin_count) {
436 DRM_ERROR("unpin bad %p\n", bo);
437 return 0;
438 }
439 bo->pin_count--;
440 if (bo->pin_count)
441 return 0;
442
443 if (bo->kmap.virtual)
444 ttm_bo_kunmap(&bo->kmap);
445
446 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
447
448 for (i = 0; i < bo->placement.num_placement; i++)
449 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
450
451 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
452 if (ret) {
453 DRM_ERROR("pushing to VRAM failed\n");
454 return ret;
455 }
456
457 return 0;
458}
459
460int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
461{
462 struct drm_file *file_priv;
463 struct vbox_private *vbox;
464
465 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
466 return -EINVAL;
467
468 file_priv = filp->private_data;
469 vbox = file_priv->minor->dev->dev_private;
470
471 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
472}
diff --git a/drivers/staging/vboxvideo/vboxvideo.h b/drivers/staging/vboxvideo/vboxvideo.h
new file mode 100644
index 000000000000..d835d75d761c
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo.h
@@ -0,0 +1,491 @@
1/*
2 * Copyright (C) 2006-2016 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 */
23
24#ifndef __VBOXVIDEO_H__
25#define __VBOXVIDEO_H__
26
27/*
28 * This should be in sync with monitorCount <xsd:maxInclusive value="64"/> in
29 * src/VBox/Main/xml/VirtualBox-settings-common.xsd
30 */
31#define VBOX_VIDEO_MAX_SCREENS 64
32
33/*
34 * The last 4096 bytes of the guest VRAM contains the generic info for all
35 * DualView chunks: sizes and offsets of chunks. This is filled by miniport.
36 *
37 * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
38 * etc. This is used exclusively by the corresponding instance of a display
39 * driver.
40 *
41 * The VRAM layout:
42 * Last 4096 bytes - Adapter information area.
43 * 4096 bytes aligned miniport heap (value specified in the config rouded up).
44 * Slack - what left after dividing the VRAM.
45 * 4096 bytes aligned framebuffers:
46 * last 4096 bytes of each framebuffer is the display information area.
47 *
48 * The Virtual Graphics Adapter information in the guest VRAM is stored by the
49 * guest video driver using structures prepended by VBOXVIDEOINFOHDR.
50 *
51 * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
52 * the host starts to process the info. The first element at the start of
53 * the 4096 bytes region should be normally be a LINK that points to
54 * actual information chain. That way the guest driver can have some
55 * fixed layout of the information memory block and just rewrite
56 * the link to point to relevant memory chain.
57 *
58 * The processing stops at the END element.
59 *
60 * The host can access the memory only when the port IO is processed.
61 * All data that will be needed later must be copied from these 4096 bytes.
62 * But other VRAM can be used by host until the mode is disabled.
63 *
64 * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
65 * to disable the mode.
66 *
67 * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
68 * from the host and issue commands to the host.
69 *
70 * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
71 * following operations with the VBE data register can be performed:
72 *
73 * Operation Result
74 * write 16 bit value NOP
75 * read 16 bit value count of monitors
76 * write 32 bit value set the vbox cmd value and the cmd processed by the host
77 * read 32 bit value result of the last vbox command is returned
78 */
79
80/**
81 * VBVA command header.
82 *
83 * @todo Where does this fit in?
84 */
85struct vbva_cmd_hdr {
86 /** Coordinates of affected rectangle. */
87 s16 x;
88 s16 y;
89 u16 w;
90 u16 h;
91} __packed;
92
93/** @name VBVA ring defines.
94 *
95 * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
96 * data. For example big bitmaps which do not fit to the buffer.
97 *
98 * Guest starts writing to the buffer by initializing a record entry in the
99 * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
100 * written. As data is written to the ring buffer, the guest increases
101 * free_offset.
102 *
103 * The host reads the records on flushes and processes all completed records.
104 * When host encounters situation when only a partial record presents and
105 * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
106 * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
107 * data_offset. After that on each flush the host continues fetching the data
108 * until the record is completed.
109 *
110 */
111#define VBVA_RING_BUFFER_SIZE (4194304 - 1024)
112#define VBVA_RING_BUFFER_THRESHOLD (4096)
113
114#define VBVA_MAX_RECORDS (64)
115
116#define VBVA_F_MODE_ENABLED 0x00000001u
117#define VBVA_F_MODE_VRDP 0x00000002u
118#define VBVA_F_MODE_VRDP_RESET 0x00000004u
119#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u
120
121#define VBVA_F_STATE_PROCESSING 0x00010000u
122
123#define VBVA_F_RECORD_PARTIAL 0x80000000u
124
125/**
126 * VBVA record.
127 */
128struct vbva_record {
129 /** The length of the record. Changed by guest. */
130 u32 len_and_flags;
131} __packed;
132
133/*
134 * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of
135 * the runtime heapsimple API. Use minimum 2 pages here, because the info area
136 * also may contain other data (for example hgsmi_host_flags structure).
137 */
138#define VBVA_ADAPTER_INFORMATION_SIZE 65536
139#define VBVA_MIN_BUFFER_SIZE 65536
140
141/* The value for port IO to let the adapter to interpret the adapter memory. */
142#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
143
144/* The value for port IO to let the adapter to interpret the adapter memory. */
145#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
146
147/* The value for port IO to let the adapter to interpret the display memory.
148 * The display number is encoded in low 16 bits.
149 */
150#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
151
152struct vbva_host_flags {
153 u32 host_events;
154 u32 supported_orders;
155} __packed;
156
157struct vbva_buffer {
158 struct vbva_host_flags host_flags;
159
160 /* The offset where the data start in the buffer. */
161 u32 data_offset;
162 /* The offset where next data must be placed in the buffer. */
163 u32 free_offset;
164
165 /* The queue of record descriptions. */
166 struct vbva_record records[VBVA_MAX_RECORDS];
167 u32 record_first_index;
168 u32 record_free_index;
169
170 /* Space to leave free when large partial records are transferred. */
171 u32 partial_write_tresh;
172
173 u32 data_len;
174 /* variable size for the rest of the vbva_buffer area in VRAM. */
175 u8 data[0];
176} __packed;
177
178#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
179
180/* guest->host commands */
181#define VBVA_QUERY_CONF32 1
182#define VBVA_SET_CONF32 2
183#define VBVA_INFO_VIEW 3
184#define VBVA_INFO_HEAP 4
185#define VBVA_FLUSH 5
186#define VBVA_INFO_SCREEN 6
187#define VBVA_ENABLE 7
188#define VBVA_MOUSE_POINTER_SHAPE 8
189/* informs host about HGSMI caps. see vbva_caps below */
190#define VBVA_INFO_CAPS 12
191/* configures scanline, see VBVASCANLINECFG below */
192#define VBVA_SCANLINE_CFG 13
193/* requests scanline info, see VBVASCANLINEINFO below */
194#define VBVA_SCANLINE_INFO 14
195/* inform host about VBVA Command submission */
196#define VBVA_CMDVBVA_SUBMIT 16
197/* inform host about VBVA Command submission */
198#define VBVA_CMDVBVA_FLUSH 17
199/* G->H DMA command */
200#define VBVA_CMDVBVA_CTL 18
201/* Query most recent mode hints sent */
202#define VBVA_QUERY_MODE_HINTS 19
203/**
204 * Report the guest virtual desktop position and size for mapping host and
205 * guest pointer positions.
206 */
207#define VBVA_REPORT_INPUT_MAPPING 20
208/** Report the guest cursor position and query the host position. */
209#define VBVA_CURSOR_POSITION 21
210
211/* host->guest commands */
212#define VBVAHG_EVENT 1
213#define VBVAHG_DISPLAY_CUSTOM 2
214
215/* vbva_conf32::index */
216#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
217#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
218/**
219 * Returns VINF_SUCCESS if the host can report mode hints via VBVA.
220 * Set value to VERR_NOT_SUPPORTED before calling.
221 */
222#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
223/**
224 * Returns VINF_SUCCESS if the host can report guest cursor enabled status via
225 * VBVA. Set value to VERR_NOT_SUPPORTED before calling.
226 */
227#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
228/**
229 * Returns the currently available host cursor capabilities. Available if
230 * vbva_conf32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
231 * @see VMMDevReqMouseStatus::mouseFeatures.
232 */
233#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
234/** Returns the supported flags in vbva_infoscreen::flags. */
235#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
236/** Returns the max size of VBVA record. */
237#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
238
239struct vbva_conf32 {
240 u32 index;
241 u32 value;
242} __packed;
243
244/** Reserved for historical reasons. */
245#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0)
246/**
247 * Guest cursor capability: can the host show a hardware cursor at the host
248 * pointer location?
249 */
250#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1)
251/** Reserved for historical reasons. */
252#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2)
253/** Reserved for historical reasons. Must always be unset. */
254#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3)
255/** Reserved for historical reasons. */
256#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4)
257/** Reserved for historical reasons. */
258#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5)
259
260struct vbva_infoview {
261 /* Index of the screen, assigned by the guest. */
262 u32 view_index;
263
264 /* The screen offset in VRAM, the framebuffer starts here. */
265 u32 view_offset;
266
267 /* The size of the VRAM memory that can be used for the view. */
268 u32 view_size;
269
270 /* The recommended maximum size of the VRAM memory for the screen. */
271 u32 max_screen_size;
272} __packed;
273
274struct vbva_flush {
275 u32 reserved;
276} __packed;
277
278/* vbva_infoscreen::flags */
279#define VBVA_SCREEN_F_NONE 0x0000
280#define VBVA_SCREEN_F_ACTIVE 0x0001
281/**
282 * The virtual monitor has been disabled by the guest and should be removed
283 * by the host and ignored for purposes of pointer position calculation.
284 */
285#define VBVA_SCREEN_F_DISABLED 0x0002
286/**
287 * The virtual monitor has been blanked by the guest and should be blacked
288 * out by the host using width, height, etc values from the vbva_infoscreen
289 * request.
290 */
291#define VBVA_SCREEN_F_BLANK 0x0004
292/**
293 * The virtual monitor has been blanked by the guest and should be blacked
294 * out by the host using the previous mode values for width. height, etc.
295 */
296#define VBVA_SCREEN_F_BLANK2 0x0008
297
298struct vbva_infoscreen {
299 /* Which view contains the screen. */
300 u32 view_index;
301
302 /* Physical X origin relative to the primary screen. */
303 s32 origin_x;
304
305 /* Physical Y origin relative to the primary screen. */
306 s32 origin_y;
307
308 /* Offset of visible framebuffer relative to the framebuffer start. */
309 u32 start_offset;
310
311 /* The scan line size in bytes. */
312 u32 line_size;
313
314 /* Width of the screen. */
315 u32 width;
316
317 /* Height of the screen. */
318 u32 height;
319
320 /* Color depth. */
321 u16 bits_per_pixel;
322
323 /* VBVA_SCREEN_F_* */
324 u16 flags;
325} __packed;
326
327/* vbva_enable::flags */
328#define VBVA_F_NONE 0x00000000
329#define VBVA_F_ENABLE 0x00000001
330#define VBVA_F_DISABLE 0x00000002
331/* extended VBVA to be used with WDDM */
332#define VBVA_F_EXTENDED 0x00000004
333/* vbva offset is absolute VRAM offset */
334#define VBVA_F_ABSOFFSET 0x00000008
335
336struct vbva_enable {
337 u32 flags;
338 u32 offset;
339 s32 result;
340} __packed;
341
342struct vbva_enable_ex {
343 struct vbva_enable base;
344 u32 screen_id;
345} __packed;
346
347struct vbva_mouse_pointer_shape {
348 /* The host result. */
349 s32 result;
350
351 /* VBOX_MOUSE_POINTER_* bit flags. */
352 u32 flags;
353
354 /* X coordinate of the hot spot. */
355 u32 hot_X;
356
357 /* Y coordinate of the hot spot. */
358 u32 hot_y;
359
360 /* Width of the pointer in pixels. */
361 u32 width;
362
363 /* Height of the pointer in scanlines. */
364 u32 height;
365
366 /* Pointer data.
367 *
368 ****
369 * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
370 * mask.
371 *
372 * For pointers without alpha channel the XOR mask pixels are 32 bit
373 * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask
374 * consists of (lsb)BGRA(msb) 32 bit values.
375 *
376 * Guest driver must create the AND mask for pointers with alpha chan.,
377 * so if host does not support alpha, the pointer could be displayed as
378 * a normal color pointer. The AND mask can be constructed from alpha
379 * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
380 *
381 * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
382 * mask, therefore, is and_len = (width + 7) / 8 * height. The padding
383 * bits at the end of any scanline are undefined.
384 *
385 * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
386 * u8 *xor = and + (and_len + 3) & ~3
387 * Bytes in the gap between the AND and the XOR mask are undefined.
388 * XOR mask scanlines have no gap between them and size of XOR mask is:
389 * xor_len = width * 4 * height.
390 ****
391 *
392 * Preallocate 4 bytes for accessing actual data as p->data.
393 */
394 u8 data[4];
395} __packed;
396
397/**
398 * @name vbva_mouse_pointer_shape::flags
399 * @note The VBOX_MOUSE_POINTER_* flags are used in the guest video driver,
400 * values must be <= 0x8000 and must not be changed. (try make more sense
401 * of this, please).
402 * @{
403 */
404
405/** pointer is visible */
406#define VBOX_MOUSE_POINTER_VISIBLE 0x0001
407/** pointer has alpha channel */
408#define VBOX_MOUSE_POINTER_ALPHA 0x0002
409/** pointerData contains new pointer shape */
410#define VBOX_MOUSE_POINTER_SHAPE 0x0004
411
412/** @} */
413
414/*
415 * The guest driver can handle asynch guest cmd completion by reading the
416 * command offset from io port.
417 */
418#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
419/* the guest driver can handle video adapter IRQs */
420#define VBVACAPS_IRQ 0x00000002
421/** The guest can read video mode hints sent via VBVA. */
422#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
423/** The guest can switch to a software cursor on demand. */
424#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
425/** The guest does not depend on host handling the VBE registers. */
426#define VBVACAPS_USE_VBVA_ONLY 0x00000010
427
428struct vbva_caps {
429 s32 rc;
430 u32 caps;
431} __packed;
432
433/** Query the most recent mode hints received from the host. */
434struct vbva_query_mode_hints {
435 /** The maximum number of screens to return hints for. */
436 u16 hints_queried_count;
437 /** The size of the mode hint structures directly following this one. */
438 u16 hint_structure_guest_size;
439 /** Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
440 s32 rc;
441} __packed;
442
443/**
444 * Structure in which a mode hint is returned. The guest allocates an array
445 * of these immediately after the vbva_query_mode_hints structure.
446 * To accommodate future extensions, the vbva_query_mode_hints structure
447 * specifies the size of the vbva_modehint structures allocated by the guest,
448 * and the host only fills out structure elements which fit into that size. The
449 * host should fill any unused members (e.g. dx, dy) or structure space on the
450 * end with ~0. The whole structure can legally be set to ~0 to skip a screen.
451 */
452struct vbva_modehint {
453 u32 magic;
454 u32 cx;
455 u32 cy;
456 u32 bpp; /* Which has never been used... */
457 u32 display;
458 u32 dx; /**< X offset into the virtual frame-buffer. */
459 u32 dy; /**< Y offset into the virtual frame-buffer. */
460 u32 enabled; /* Not flags. Add new members for new flags. */
461} __packed;
462
463#define VBVAMODEHINT_MAGIC 0x0801add9u
464
465/**
466 * Report the rectangle relative to which absolute pointer events should be
467 * expressed. This information remains valid until the next VBVA resize event
468 * for any screen, at which time it is reset to the bounding rectangle of all
469 * virtual screens and must be re-set.
470 * @see VBVA_REPORT_INPUT_MAPPING.
471 */
472struct vbva_report_input_mapping {
473 s32 x; /**< Upper left X co-ordinate relative to the first screen. */
474 s32 y; /**< Upper left Y co-ordinate relative to the first screen. */
475 u32 cx; /**< Rectangle width. */
476 u32 cy; /**< Rectangle height. */
477} __packed;
478
479/**
480 * Report the guest cursor position and query the host one. The host may wish
481 * to use the guest information to re-position its own cursor (though this is
482 * currently unlikely).
483 * @see VBVA_CURSOR_POSITION
484 */
485struct vbva_cursor_position {
486 u32 report_position; /**< Are we reporting a position? */
487 u32 x; /**< Guest cursor X position */
488 u32 y; /**< Guest cursor Y position */
489} __packed;
490
491#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_guest.h b/drivers/staging/vboxvideo/vboxvideo_guest.h
new file mode 100644
index 000000000000..d09da841711a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_guest.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VBOXVIDEO_GUEST_H__
24#define __VBOXVIDEO_GUEST_H__
25
26#include <linux/genalloc.h>
27#include "vboxvideo.h"
28
29/**
30 * Structure grouping the context needed for sending graphics acceleration
31 * information to the host via VBVA. Each screen has its own VBVA buffer.
32 */
33struct vbva_buf_ctx {
34 /** Offset of the buffer in the VRAM section for the screen */
35 u32 buffer_offset;
36 /** Length of the buffer in bytes */
37 u32 buffer_length;
38 /** Set if we wrote to the buffer faster than the host could read it */
39 bool buffer_overflow;
40 /** VBVA record that we are currently preparing for the host, or NULL */
41 struct vbva_record *record;
42 /**
43 * Pointer to the VBVA buffer mapped into the current address space.
44 * Will be NULL if VBVA is not enabled.
45 */
46 struct vbva_buffer *vbva;
47};
48
49/**
50 * @name Base HGSMI APIs
51 * @{
52 */
53int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location);
54int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps);
55int hgsmi_test_query_conf(struct gen_pool *ctx);
56int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
57int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
58 u32 hot_x, u32 hot_y, u32 width, u32 height,
59 u8 *pixels, u32 len);
60int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
61 u32 x, u32 y, u32 *x_host, u32 *y_host);
62/** @} */
63
64/**
65 * @name VBVA APIs
66 * @{
67 */
68bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
69 struct vbva_buffer *vbva, s32 screen);
70void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
71 s32 screen);
72bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
73 struct gen_pool *ctx);
74void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx);
75bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
76 const void *p, u32 len);
77void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
78 u32 buffer_offset, u32 buffer_length);
79/** @} */
80
81/**
82 * @name Modesetting APIs
83 * @{
84 */
85void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
86 s32 origin_x, s32 origin_y, u32 start_offset,
87 u32 pitch, u32 width, u32 height,
88 u16 bpp, u16 flags);
89int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
90 u32 width, u32 height);
91int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
92 struct vbva_modehint *hints);
93/** @} */
94
95#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_vbe.h b/drivers/staging/vboxvideo/vboxvideo_vbe.h
new file mode 100644
index 000000000000..f842f4d9c80a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_vbe.h
@@ -0,0 +1,84 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VBOXVIDEO_VBE_H__
24#define __VBOXVIDEO_VBE_H__
25
26/* GUEST <-> HOST Communication API */
27
28/**
29 * @todo FIXME: Either dynamicly ask host for this or put somewhere high in
30 * physical memory like 0xE0000000.
31 */
32
33#define VBE_DISPI_BANK_ADDRESS 0xA0000
34#define VBE_DISPI_BANK_SIZE_KB 64
35
36#define VBE_DISPI_MAX_XRES 16384
37#define VBE_DISPI_MAX_YRES 16384
38#define VBE_DISPI_MAX_BPP 32
39
40#define VBE_DISPI_IOPORT_INDEX 0x01CE
41#define VBE_DISPI_IOPORT_DATA 0x01CF
42
43#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
44#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
45
46#define VBE_DISPI_INDEX_ID 0x0
47#define VBE_DISPI_INDEX_XRES 0x1
48#define VBE_DISPI_INDEX_YRES 0x2
49#define VBE_DISPI_INDEX_BPP 0x3
50#define VBE_DISPI_INDEX_ENABLE 0x4
51#define VBE_DISPI_INDEX_BANK 0x5
52#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
53#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
54#define VBE_DISPI_INDEX_X_OFFSET 0x8
55#define VBE_DISPI_INDEX_Y_OFFSET 0x9
56#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
57#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
58
59#define VBE_DISPI_ID0 0xB0C0
60#define VBE_DISPI_ID1 0xB0C1
61#define VBE_DISPI_ID2 0xB0C2
62#define VBE_DISPI_ID3 0xB0C3
63#define VBE_DISPI_ID4 0xB0C4
64
65#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
66/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
67#define VBE_DISPI_ID_HGSMI 0xBE01
68#define VBE_DISPI_ID_ANYX 0xBE02
69
70#define VBE_DISPI_DISABLED 0x00
71#define VBE_DISPI_ENABLED 0x01
72#define VBE_DISPI_GETCAPS 0x02
73#define VBE_DISPI_8BIT_DAC 0x20
74/**
75 * @note this definition is a BOCHS legacy, used only in the video BIOS
76 * code and ignored by the emulated hardware.
77 */
78#define VBE_DISPI_LFB_ENABLED 0x40
79#define VBE_DISPI_NOCLEARMEM 0x80
80
81#define VGA_PORT_HGSMI_HOST 0x3b0
82#define VGA_PORT_HGSMI_GUEST 0x3d0
83
84#endif
diff --git a/drivers/staging/vboxvideo/vbva_base.c b/drivers/staging/vboxvideo/vbva_base.c
new file mode 100644
index 000000000000..c10c782f94e1
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbva_base.c
@@ -0,0 +1,233 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vbox_drv.h"
24#include "vbox_err.h"
25#include "vboxvideo_guest.h"
26#include "hgsmi_channels.h"
27
28/*
29 * There is a hardware ring buffer in the graphics device video RAM, formerly
30 * in the VBox VMMDev PCI memory space.
31 * All graphics commands go there serialized by vbva_buffer_begin_update.
32 * and vbva_buffer_end_update.
33 *
34 * free_offset is writing position. data_offset is reading position.
35 * free_offset == data_offset means buffer is empty.
36 * There must be always gap between data_offset and free_offset when data
37 * are in the buffer.
38 * Guest only changes free_offset, host changes data_offset.
39 */
40
41static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
42{
43 s32 diff = vbva->data_offset - vbva->free_offset;
44
45 return diff > 0 ? diff : vbva->data_len + diff;
46}
47
48static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
49 const void *p, u32 len, u32 offset)
50{
51 struct vbva_buffer *vbva = vbva_ctx->vbva;
52 u32 bytes_till_boundary = vbva->data_len - offset;
53 u8 *dst = &vbva->data[offset];
54 s32 diff = len - bytes_till_boundary;
55
56 if (diff <= 0) {
57 /* Chunk will not cross buffer boundary. */
58 memcpy(dst, p, len);
59 } else {
60 /* Chunk crosses buffer boundary. */
61 memcpy(dst, p, bytes_till_boundary);
62 memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
63 }
64}
65
66static void vbva_buffer_flush(struct gen_pool *ctx)
67{
68 struct vbva_flush *p;
69
70 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
71 if (!p)
72 return;
73
74 p->reserved = 0;
75
76 hgsmi_buffer_submit(ctx, p);
77 hgsmi_buffer_free(ctx, p);
78}
79
80bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
81 const void *p, u32 len)
82{
83 struct vbva_record *record;
84 struct vbva_buffer *vbva;
85 u32 available;
86
87 vbva = vbva_ctx->vbva;
88 record = vbva_ctx->record;
89
90 if (!vbva || vbva_ctx->buffer_overflow ||
91 !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
92 return false;
93
94 available = vbva_buffer_available(vbva);
95
96 while (len > 0) {
97 u32 chunk = len;
98
99 if (chunk >= available) {
100 vbva_buffer_flush(ctx);
101 available = vbva_buffer_available(vbva);
102 }
103
104 if (chunk >= available) {
105 if (WARN_ON(available <= vbva->partial_write_tresh)) {
106 vbva_ctx->buffer_overflow = true;
107 return false;
108 }
109 chunk = available - vbva->partial_write_tresh;
110 }
111
112 vbva_buffer_place_data_at(vbva_ctx, p, chunk,
113 vbva->free_offset);
114
115 vbva->free_offset = (vbva->free_offset + chunk) %
116 vbva->data_len;
117 record->len_and_flags += chunk;
118 available -= chunk;
119 len -= chunk;
120 p += chunk;
121 }
122
123 return true;
124}
125
126static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
127 struct gen_pool *ctx, s32 screen, bool enable)
128{
129 struct vbva_enable_ex *p;
130 bool ret;
131
132 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
133 if (!p)
134 return false;
135
136 p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
137 p->base.offset = vbva_ctx->buffer_offset;
138 p->base.result = VERR_NOT_SUPPORTED;
139 if (screen >= 0) {
140 p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
141 p->screen_id = screen;
142 }
143
144 hgsmi_buffer_submit(ctx, p);
145
146 if (enable)
147 ret = RT_SUCCESS(p->base.result);
148 else
149 ret = true;
150
151 hgsmi_buffer_free(ctx, p);
152
153 return ret;
154}
155
156bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
157 struct vbva_buffer *vbva, s32 screen)
158{
159 bool ret = false;
160
161 memset(vbva, 0, sizeof(*vbva));
162 vbva->partial_write_tresh = 256;
163 vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
164 vbva_ctx->vbva = vbva;
165
166 ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
167 if (!ret)
168 vbva_disable(vbva_ctx, ctx, screen);
169
170 return ret;
171}
172
173void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
174 s32 screen)
175{
176 vbva_ctx->buffer_overflow = false;
177 vbva_ctx->record = NULL;
178 vbva_ctx->vbva = NULL;
179
180 vbva_inform_host(vbva_ctx, ctx, screen, false);
181}
182
183bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
184 struct gen_pool *ctx)
185{
186 struct vbva_record *record;
187 u32 next;
188
189 if (!vbva_ctx->vbva ||
190 !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
191 return false;
192
193 WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
194
195 next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
196
197 /* Flush if all slots in the records queue are used */
198 if (next == vbva_ctx->vbva->record_first_index)
199 vbva_buffer_flush(ctx);
200
201 /* If even after flush there is no place then fail the request */
202 if (next == vbva_ctx->vbva->record_first_index)
203 return false;
204
205 record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
206 record->len_and_flags = VBVA_F_RECORD_PARTIAL;
207 vbva_ctx->vbva->record_free_index = next;
208 /* Remember which record we are using. */
209 vbva_ctx->record = record;
210
211 return true;
212}
213
214void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
215{
216 struct vbva_record *record = vbva_ctx->record;
217
218 WARN_ON(!vbva_ctx->vbva || !record ||
219 !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
220
221 /* Mark the record completed. */
222 record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
223
224 vbva_ctx->buffer_overflow = false;
225 vbva_ctx->record = NULL;
226}
227
228void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
229 u32 buffer_offset, u32 buffer_length)
230{
231 vbva_ctx->buffer_offset = buffer_offset;
232 vbva_ctx->buffer_length = buffer_length;
233}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 030bec855d86..314ffac50bb8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3391,7 +3391,6 @@ static int vchiq_probe(struct platform_device *pdev)
3391 struct device_node *fw_node; 3391 struct device_node *fw_node;
3392 struct rpi_firmware *fw; 3392 struct rpi_firmware *fw;
3393 int err; 3393 int err;
3394 void *ptr_err;
3395 3394
3396 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0); 3395 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
3397 if (!fw_node) { 3396 if (!fw_node) {
@@ -3427,14 +3426,14 @@ static int vchiq_probe(struct platform_device *pdev)
3427 3426
3428 /* create sysfs entries */ 3427 /* create sysfs entries */
3429 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME); 3428 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
3430 ptr_err = vchiq_class; 3429 err = PTR_ERR(vchiq_class);
3431 if (IS_ERR(ptr_err)) 3430 if (IS_ERR(vchiq_class))
3432 goto failed_class_create; 3431 goto failed_class_create;
3433 3432
3434 vchiq_dev = device_create(vchiq_class, NULL, 3433 vchiq_dev = device_create(vchiq_class, NULL,
3435 vchiq_devid, NULL, "vchiq"); 3434 vchiq_devid, NULL, "vchiq");
3436 ptr_err = vchiq_dev; 3435 err = PTR_ERR(vchiq_dev);
3437 if (IS_ERR(ptr_err)) 3436 if (IS_ERR(vchiq_dev))
3438 goto failed_device_create; 3437 goto failed_device_create;
3439 3438
3440 /* create debugfs entries */ 3439 /* create debugfs entries */
@@ -3455,7 +3454,6 @@ failed_device_create:
3455 class_destroy(vchiq_class); 3454 class_destroy(vchiq_class);
3456failed_class_create: 3455failed_class_create:
3457 cdev_del(&vchiq_cdev); 3456 cdev_del(&vchiq_cdev);
3458 err = PTR_ERR(ptr_err);
3459failed_cdev_add: 3457failed_cdev_add:
3460 unregister_chrdev_region(vchiq_devid, 1); 3458 unregister_chrdev_region(vchiq_devid, 1);
3461failed_platform_init: 3459failed_platform_init:
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index e583dd8a418b..d4fa41be80f9 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1510 1510
1511 if (!cnp) { 1511 if (!cnp) {
1512 pr_info("%s stid %d lookup failure\n", __func__, stid); 1512 pr_info("%s stid %d lookup failure\n", __func__, stid);
1513 return; 1513 goto rel_skb;
1514 } 1514 }
1515 1515
1516 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); 1516 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1517 cxgbit_put_cnp(cnp); 1517 cxgbit_put_cnp(cnp);
1518rel_skb:
1519 __kfree_skb(skb);
1518} 1520}
1519 1521
1520static void 1522static void
@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1530 1532
1531 if (!cnp) { 1533 if (!cnp) {
1532 pr_info("%s stid %d lookup failure\n", __func__, stid); 1534 pr_info("%s stid %d lookup failure\n", __func__, stid);
1533 return; 1535 goto rel_skb;
1534 } 1536 }
1535 1537
1536 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); 1538 cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1537 cxgbit_put_cnp(cnp); 1539 cxgbit_put_cnp(cnp);
1540rel_skb:
1541 __kfree_skb(skb);
1538} 1542}
1539 1543
1540static void 1544static void
@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1819 struct tid_info *t = lldi->tids; 1823 struct tid_info *t = lldi->tids;
1820 1824
1821 csk = lookup_tid(t, tid); 1825 csk = lookup_tid(t, tid);
1822 if (unlikely(!csk)) 1826 if (unlikely(!csk)) {
1823 pr_err("can't find connection for tid %u.\n", tid); 1827 pr_err("can't find connection for tid %u.\n", tid);
1824 else 1828 goto rel_skb;
1829 } else {
1825 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); 1830 cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1831 }
1826 1832
1827 cxgbit_put_csk(csk); 1833 cxgbit_put_csk(csk);
1834rel_skb:
1835 __kfree_skb(skb);
1828} 1836}
1829 1837
1830static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) 1838static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index dda13f1af38e..514986b57c2d 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
827 827
828static void 828static void
829cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, 829cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
830 unsigned int nents) 830 unsigned int nents, u32 skip)
831{ 831{
832 struct skb_seq_state st; 832 struct skb_seq_state st;
833 const u8 *buf; 833 const u8 *buf;
@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
846 } 846 }
847 847
848 consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, 848 consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
849 buf_len, consumed); 849 buf_len, skip + consumed);
850 } 850 }
851} 851}
852 852
@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
912 struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; 912 struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
913 u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); 913 u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
914 914
915 cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); 915 cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
916 } 916 }
917 917
918 cmd->write_data_done += pdu_cb->dlen; 918 cmd->write_data_done += pdu_cb->dlen;
@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
1069 cmd->se_cmd.data_length); 1069 cmd->se_cmd.data_length);
1070 1070
1071 if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { 1071 if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
1072 u32 skip = data_offset % PAGE_SIZE;
1073
1072 sg_off = data_offset / PAGE_SIZE; 1074 sg_off = data_offset / PAGE_SIZE;
1073 sg_start = &cmd->se_cmd.t_data_sg[sg_off]; 1075 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1074 sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); 1076 sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
1075 1077
1076 cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); 1078 cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
1077 } 1079 }
1078 1080
1079check_payload: 1081check_payload:
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 74e4975dd1b1..5001261f5d69 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
418 return 0; 418 return 0;
419 } 419 }
420 np->np_thread_state = ISCSI_NP_THREAD_RESET; 420 np->np_thread_state = ISCSI_NP_THREAD_RESET;
421 atomic_inc(&np->np_reset_count);
421 422
422 if (np->np_thread) { 423 if (np->np_thread) {
423 spin_unlock_bh(&np->np_thread_lock); 424 spin_unlock_bh(&np->np_thread_lock);
@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2167 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); 2168 cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
2168 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); 2169 cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
2169 cmd->data_direction = DMA_NONE; 2170 cmd->data_direction = DMA_NONE;
2171 kfree(cmd->text_in_ptr);
2170 cmd->text_in_ptr = NULL; 2172 cmd->text_in_ptr = NULL;
2171 2173
2172 return 0; 2174 return 0;
@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
3487 return text_length; 3489 return text_length;
3488 3490
3489 if (completed) { 3491 if (completed) {
3490 hdr->flags |= ISCSI_FLAG_CMD_FINAL; 3492 hdr->flags = ISCSI_FLAG_CMD_FINAL;
3491 } else { 3493 } else {
3492 hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; 3494 hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
3493 cmd->read_data_done += text_length; 3495 cmd->read_data_done += text_length;
3494 if (cmd->targ_xfer_tag == 0xFFFFFFFF) 3496 if (cmd->targ_xfer_tag == 0xFFFFFFFF)
3495 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); 3497 cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index e9bdc8b86e7d..dc13afbd4c88 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1243 flush_signals(current); 1243 flush_signals(current);
1244 1244
1245 spin_lock_bh(&np->np_thread_lock); 1245 spin_lock_bh(&np->np_thread_lock);
1246 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1246 if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
1247 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1247 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1248 spin_unlock_bh(&np->np_thread_lock);
1248 complete(&np->np_restart_comp); 1249 complete(&np->np_restart_comp);
1250 return 1;
1249 } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { 1251 } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
1250 spin_unlock_bh(&np->np_thread_lock); 1252 spin_unlock_bh(&np->np_thread_lock);
1251 goto exit; 1253 goto exit;
@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1278 goto exit; 1280 goto exit;
1279 } else if (rc < 0) { 1281 } else if (rc < 0) {
1280 spin_lock_bh(&np->np_thread_lock); 1282 spin_lock_bh(&np->np_thread_lock);
1281 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1283 if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
1284 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1282 spin_unlock_bh(&np->np_thread_lock); 1285 spin_unlock_bh(&np->np_thread_lock);
1283 complete(&np->np_restart_comp); 1286 complete(&np->np_restart_comp);
1284 iscsit_put_transport(conn->conn_transport); 1287 iscsit_put_transport(conn->conn_transport);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 36913734c6bc..02e8a5d86658 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
364 mutex_lock(&tpg->acl_node_mutex); 364 mutex_lock(&tpg->acl_node_mutex);
365 if (acl->dynamic_node_acl) 365 if (acl->dynamic_node_acl)
366 acl->dynamic_node_acl = 0; 366 acl->dynamic_node_acl = 0;
367 list_del(&acl->acl_list); 367 list_del_init(&acl->acl_list);
368 mutex_unlock(&tpg->acl_node_mutex); 368 mutex_unlock(&tpg->acl_node_mutex);
369 369
370 target_shutdown_sessions(acl); 370 target_shutdown_sessions(acl);
@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
548 * in transport_deregister_session(). 548 * in transport_deregister_session().
549 */ 549 */
550 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { 550 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
551 list_del(&nacl->acl_list); 551 list_del_init(&nacl->acl_list);
552 552
553 core_tpg_wait_for_nacl_pr_ref(nacl); 553 core_tpg_wait_for_nacl_pr_ref(nacl);
554 core_free_device_list_for_node(nacl, se_tpg); 554 core_free_device_list_for_node(nacl, se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 97fed9a298bd..836d552b0385 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
466 } 466 }
467 467
468 mutex_lock(&se_tpg->acl_node_mutex); 468 mutex_lock(&se_tpg->acl_node_mutex);
469 list_del(&nacl->acl_list); 469 list_del_init(&nacl->acl_list);
470 mutex_unlock(&se_tpg->acl_node_mutex); 470 mutex_unlock(&se_tpg->acl_node_mutex);
471 471
472 core_tpg_wait_for_nacl_pr_ref(nacl); 472 core_tpg_wait_for_nacl_pr_ref(nacl);
@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
538 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); 538 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
539 539
540 if (se_nacl->dynamic_stop) 540 if (se_nacl->dynamic_stop)
541 list_del(&se_nacl->acl_list); 541 list_del_init(&se_nacl->acl_list);
542 } 542 }
543 mutex_unlock(&se_tpg->acl_node_mutex); 543 mutex_unlock(&se_tpg->acl_node_mutex);
544 544
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 80ee130f8253..942d094269fb 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev,
563 block_remaining); 563 block_remaining);
564 to_offset = get_block_offset_user(udev, dbi, 564 to_offset = get_block_offset_user(udev, dbi,
565 block_remaining); 565 block_remaining);
566 offset = DATA_BLOCK_SIZE - block_remaining;
567 to += offset;
568 566
569 if (*iov_cnt != 0 && 567 if (*iov_cnt != 0 &&
570 to_offset == iov_tail(*iov)) { 568 to_offset == iov_tail(*iov)) {
@@ -575,8 +573,10 @@ static int scatter_data_area(struct tcmu_dev *udev,
575 (*iov)->iov_len = copy_bytes; 573 (*iov)->iov_len = copy_bytes;
576 } 574 }
577 if (copy_data) { 575 if (copy_data) {
578 memcpy(to, from + sg->length - sg_remaining, 576 offset = DATA_BLOCK_SIZE - block_remaining;
579 copy_bytes); 577 memcpy(to + offset,
578 from + sg->length - sg_remaining,
579 copy_bytes);
580 tcmu_flush_dcache_range(to, copy_bytes); 580 tcmu_flush_dcache_range(to, copy_bytes);
581 } 581 }
582 sg_remaining -= copy_bytes; 582 sg_remaining -= copy_bytes;
@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
637 copy_bytes = min_t(size_t, sg_remaining, 637 copy_bytes = min_t(size_t, sg_remaining,
638 block_remaining); 638 block_remaining);
639 offset = DATA_BLOCK_SIZE - block_remaining; 639 offset = DATA_BLOCK_SIZE - block_remaining;
640 from += offset;
641 tcmu_flush_dcache_range(from, copy_bytes); 640 tcmu_flush_dcache_range(from, copy_bytes);
642 memcpy(to + sg->length - sg_remaining, from, 641 memcpy(to + sg->length - sg_remaining, from + offset,
643 copy_bytes); 642 copy_bytes);
644 643
645 sg_remaining -= copy_bytes; 644 sg_remaining -= copy_bytes;
@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
1433 if (udev->dev_config[0]) 1432 if (udev->dev_config[0])
1434 snprintf(str + used, size - used, "/%s", udev->dev_config); 1433 snprintf(str + used, size - used, "/%s", udev->dev_config);
1435 1434
1435 /* If the old string exists, free it */
1436 kfree(info->name);
1436 info->name = str; 1437 info->name = str;
1437 1438
1438 return 0; 1439 return 0;
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 308b6e17c88a..fe2f00ceafc5 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -333,6 +333,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw,
333 int res; 333 int res;
334 enum tb_port_type type; 334 enum tb_port_type type;
335 335
336 /*
337 * Some DROMs list more ports than the controller actually has
338 * so we skip those but allow the parser to continue.
339 */
340 if (header->index > sw->config.max_port_number) {
341 dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
342 return 0;
343 }
344
336 port = &sw->ports[header->index]; 345 port = &sw->ports[header->index];
337 port->disabled = header->port_disabled; 346 port->disabled = header->port_disabled;
338 if (port->disabled) 347 if (port->disabled)
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 8ee340290219..bdaac1ff00a5 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -904,7 +904,14 @@ static int icm_driver_ready(struct tb *tb)
904 904
905static int icm_suspend(struct tb *tb) 905static int icm_suspend(struct tb *tb)
906{ 906{
907 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); 907 int ret;
908
909 ret = nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
910 if (ret)
911 tb_info(tb, "Ignoring mailbox command error (%d) in %s\n",
912 ret, __func__);
913
914 return 0;
908} 915}
909 916
910/* 917/*
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ab3e8f410444..e9391bbd4036 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -30,7 +30,7 @@ static DEFINE_IDA(nvm_ida);
30 30
31struct nvm_auth_status { 31struct nvm_auth_status {
32 struct list_head list; 32 struct list_head list;
33 uuid_be uuid; 33 uuid_t uuid;
34 u32 status; 34 u32 status;
35}; 35};
36 36
@@ -47,7 +47,7 @@ static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
47 struct nvm_auth_status *st; 47 struct nvm_auth_status *st;
48 48
49 list_for_each_entry(st, &nvm_auth_status_cache, list) { 49 list_for_each_entry(st, &nvm_auth_status_cache, list) {
50 if (!uuid_be_cmp(st->uuid, *sw->uuid)) 50 if (uuid_equal(&st->uuid, sw->uuid))
51 return st; 51 return st;
52 } 52 }
53 53
@@ -281,9 +281,11 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
281 if (active) { 281 if (active) {
282 config.name = "nvm_active"; 282 config.name = "nvm_active";
283 config.reg_read = tb_switch_nvm_read; 283 config.reg_read = tb_switch_nvm_read;
284 config.read_only = true;
284 } else { 285 } else {
285 config.name = "nvm_non_active"; 286 config.name = "nvm_non_active";
286 config.reg_write = tb_switch_nvm_write; 287 config.reg_write = tb_switch_nvm_write;
288 config.root_only = true;
287 } 289 }
288 290
289 config.id = id; 291 config.id = id;
@@ -292,7 +294,6 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
292 config.size = size; 294 config.size = size;
293 config.dev = &sw->dev; 295 config.dev = &sw->dev;
294 config.owner = THIS_MODULE; 296 config.owner = THIS_MODULE;
295 config.root_only = true;
296 config.priv = sw; 297 config.priv = sw;
297 298
298 return nvmem_register(&config); 299 return nvmem_register(&config);
@@ -1460,7 +1461,7 @@ struct tb_sw_lookup {
1460 struct tb *tb; 1461 struct tb *tb;
1461 u8 link; 1462 u8 link;
1462 u8 depth; 1463 u8 depth;
1463 const uuid_be *uuid; 1464 const uuid_t *uuid;
1464}; 1465};
1465 1466
1466static int tb_switch_match(struct device *dev, void *data) 1467static int tb_switch_match(struct device *dev, void *data)
@@ -1517,7 +1518,7 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
1517 * Returned switch has reference count increased so the caller needs to 1518 * Returned switch has reference count increased so the caller needs to
1518 * call tb_switch_put() when done with the switch. 1519 * call tb_switch_put() when done with the switch.
1519 */ 1520 */
1520struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid) 1521struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1521{ 1522{
1522 struct tb_sw_lookup lookup; 1523 struct tb_sw_lookup lookup;
1523 struct device *dev; 1524 struct device *dev;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 3d9f64676e58..e0deee4f1eb0 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -101,7 +101,7 @@ struct tb_switch {
101 struct tb_dma_port *dma_port; 101 struct tb_dma_port *dma_port;
102 struct tb *tb; 102 struct tb *tb;
103 u64 uid; 103 u64 uid;
104 uuid_be *uuid; 104 uuid_t *uuid;
105 u16 vendor; 105 u16 vendor;
106 u16 device; 106 u16 device;
107 const char *vendor_name; 107 const char *vendor_name;
@@ -407,7 +407,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw);
407struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); 407struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
408struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, 408struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
409 u8 depth); 409 u8 depth);
410struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid); 410struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
411 411
412static inline unsigned int tb_switch_phy_port_from_link(unsigned int link) 412static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
413{ 413{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 85b6d33c0919..de6441e4a060 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -179,7 +179,7 @@ struct icm_fr_pkg_get_topology_response {
179 179
180struct icm_fr_event_device_connected { 180struct icm_fr_event_device_connected {
181 struct icm_pkg_header hdr; 181 struct icm_pkg_header hdr;
182 uuid_be ep_uuid; 182 uuid_t ep_uuid;
183 u8 connection_key; 183 u8 connection_key;
184 u8 connection_id; 184 u8 connection_id;
185 u16 link_info; 185 u16 link_info;
@@ -193,7 +193,7 @@ struct icm_fr_event_device_connected {
193 193
194struct icm_fr_pkg_approve_device { 194struct icm_fr_pkg_approve_device {
195 struct icm_pkg_header hdr; 195 struct icm_pkg_header hdr;
196 uuid_be ep_uuid; 196 uuid_t ep_uuid;
197 u8 connection_key; 197 u8 connection_key;
198 u8 connection_id; 198 u8 connection_id;
199 u16 reserved; 199 u16 reserved;
@@ -207,7 +207,7 @@ struct icm_fr_event_device_disconnected {
207 207
208struct icm_fr_pkg_add_device_key { 208struct icm_fr_pkg_add_device_key {
209 struct icm_pkg_header hdr; 209 struct icm_pkg_header hdr;
210 uuid_be ep_uuid; 210 uuid_t ep_uuid;
211 u8 connection_key; 211 u8 connection_key;
212 u8 connection_id; 212 u8 connection_id;
213 u16 reserved; 213 u16 reserved;
@@ -216,7 +216,7 @@ struct icm_fr_pkg_add_device_key {
216 216
217struct icm_fr_pkg_add_device_key_response { 217struct icm_fr_pkg_add_device_key_response {
218 struct icm_pkg_header hdr; 218 struct icm_pkg_header hdr;
219 uuid_be ep_uuid; 219 uuid_t ep_uuid;
220 u8 connection_key; 220 u8 connection_key;
221 u8 connection_id; 221 u8 connection_id;
222 u16 reserved; 222 u16 reserved;
@@ -224,7 +224,7 @@ struct icm_fr_pkg_add_device_key_response {
224 224
225struct icm_fr_pkg_challenge_device { 225struct icm_fr_pkg_challenge_device {
226 struct icm_pkg_header hdr; 226 struct icm_pkg_header hdr;
227 uuid_be ep_uuid; 227 uuid_t ep_uuid;
228 u8 connection_key; 228 u8 connection_key;
229 u8 connection_id; 229 u8 connection_id;
230 u16 reserved; 230 u16 reserved;
@@ -233,7 +233,7 @@ struct icm_fr_pkg_challenge_device {
233 233
234struct icm_fr_pkg_challenge_device_response { 234struct icm_fr_pkg_challenge_device_response {
235 struct icm_pkg_header hdr; 235 struct icm_pkg_header hdr;
236 uuid_be ep_uuid; 236 uuid_t ep_uuid;
237 u8 connection_key; 237 u8 connection_key;
238 u8 connection_id; 238 u8 connection_id;
239 u16 reserved; 239 u16 reserved;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index d1399aac05a1..a6d5164c33a9 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -69,13 +69,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
69#ifdef CONFIG_UNIX98_PTYS 69#ifdef CONFIG_UNIX98_PTYS
70 if (tty->driver == ptm_driver) { 70 if (tty->driver == ptm_driver) {
71 mutex_lock(&devpts_mutex); 71 mutex_lock(&devpts_mutex);
72 if (tty->link->driver_data) { 72 if (tty->link->driver_data)
73 struct path *path = tty->link->driver_data; 73 devpts_pty_kill(tty->link->driver_data);
74
75 devpts_pty_kill(path->dentry);
76 path_put(path);
77 kfree(path);
78 }
79 mutex_unlock(&devpts_mutex); 74 mutex_unlock(&devpts_mutex);
80 } 75 }
81#endif 76#endif
@@ -448,48 +443,6 @@ err:
448 return retval; 443 return retval;
449} 444}
450 445
451/**
452 * pty_open_peer - open the peer of a pty
453 * @tty: the peer of the pty being opened
454 *
455 * Open the cached dentry in tty->link, providing a safe way for userspace
456 * to get the slave end of a pty (where they have the master fd and cannot
457 * access or trust the mount namespace /dev/pts was mounted inside).
458 */
459static struct file *pty_open_peer(struct tty_struct *tty, int flags)
460{
461 if (tty->driver->subtype != PTY_TYPE_MASTER)
462 return ERR_PTR(-EIO);
463 return dentry_open(tty->link->driver_data, flags, current_cred());
464}
465
466static int pty_get_peer(struct tty_struct *tty, int flags)
467{
468 int fd = -1;
469 struct file *filp = NULL;
470 int retval = -EINVAL;
471
472 fd = get_unused_fd_flags(0);
473 if (fd < 0) {
474 retval = fd;
475 goto err;
476 }
477
478 filp = pty_open_peer(tty, flags);
479 if (IS_ERR(filp)) {
480 retval = PTR_ERR(filp);
481 goto err_put;
482 }
483
484 fd_install(fd, filp);
485 return fd;
486
487err_put:
488 put_unused_fd(fd);
489err:
490 return retval;
491}
492
493static void pty_cleanup(struct tty_struct *tty) 446static void pty_cleanup(struct tty_struct *tty)
494{ 447{
495 tty_port_put(tty->port); 448 tty_port_put(tty->port);
@@ -646,9 +599,58 @@ static inline void legacy_pty_init(void) { }
646 599
647/* Unix98 devices */ 600/* Unix98 devices */
648#ifdef CONFIG_UNIX98_PTYS 601#ifdef CONFIG_UNIX98_PTYS
649
650static struct cdev ptmx_cdev; 602static struct cdev ptmx_cdev;
651 603
604/**
605 * ptm_open_peer - open the peer of a pty
606 * @master: the open struct file of the ptmx device node
607 * @tty: the master of the pty being opened
608 * @flags: the flags for open
609 *
610 * Provide a race free way for userspace to open the slave end of a pty
611 * (where they have the master fd and cannot access or trust the mount
612 * namespace /dev/pts was mounted inside).
613 */
614int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
615{
616 int fd = -1;
617 struct file *filp;
618 int retval = -EINVAL;
619 struct path path;
620
621 if (tty->driver != ptm_driver)
622 return -EIO;
623
624 fd = get_unused_fd_flags(0);
625 if (fd < 0) {
626 retval = fd;
627 goto err;
628 }
629
630 /* Compute the slave's path */
631 path.mnt = devpts_mntget(master, tty->driver_data);
632 if (IS_ERR(path.mnt)) {
633 retval = PTR_ERR(path.mnt);
634 goto err_put;
635 }
636 path.dentry = tty->link->driver_data;
637
638 filp = dentry_open(&path, flags, current_cred());
639 mntput(path.mnt);
640 if (IS_ERR(filp)) {
641 retval = PTR_ERR(filp);
642 goto err_put;
643 }
644
645 fd_install(fd, filp);
646 return fd;
647
648err_put:
649 put_unused_fd(fd);
650err:
651 return retval;
652}
653
652static int pty_unix98_ioctl(struct tty_struct *tty, 654static int pty_unix98_ioctl(struct tty_struct *tty,
653 unsigned int cmd, unsigned long arg) 655 unsigned int cmd, unsigned long arg)
654{ 656{
@@ -663,8 +665,6 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
663 return pty_get_pktmode(tty, (int __user *)arg); 665 return pty_get_pktmode(tty, (int __user *)arg);
664 case TIOCGPTN: /* Get PT Number */ 666 case TIOCGPTN: /* Get PT Number */
665 return put_user(tty->index, (unsigned int __user *)arg); 667 return put_user(tty->index, (unsigned int __user *)arg);
666 case TIOCGPTPEER: /* Open the other end */
667 return pty_get_peer(tty, (int) arg);
668 case TIOCSIG: /* Send signal to other side of pty */ 668 case TIOCSIG: /* Send signal to other side of pty */
669 return pty_signal(tty, (int) arg); 669 return pty_signal(tty, (int) arg);
670 } 670 }
@@ -792,7 +792,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
792{ 792{
793 struct pts_fs_info *fsi; 793 struct pts_fs_info *fsi;
794 struct tty_struct *tty; 794 struct tty_struct *tty;
795 struct path *pts_path;
796 struct dentry *dentry; 795 struct dentry *dentry;
797 int retval; 796 int retval;
798 int index; 797 int index;
@@ -846,26 +845,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
846 retval = PTR_ERR(dentry); 845 retval = PTR_ERR(dentry);
847 goto err_release; 846 goto err_release;
848 } 847 }
849 /* We need to cache a fake path for TIOCGPTPEER. */ 848 tty->link->driver_data = dentry;
850 pts_path = kmalloc(sizeof(struct path), GFP_KERNEL);
851 if (!pts_path)
852 goto err_release;
853 pts_path->mnt = filp->f_path.mnt;
854 pts_path->dentry = dentry;
855 path_get(pts_path);
856 tty->link->driver_data = pts_path;
857 849
858 retval = ptm_driver->ops->open(tty, filp); 850 retval = ptm_driver->ops->open(tty, filp);
859 if (retval) 851 if (retval)
860 goto err_path_put; 852 goto err_release;
861 853
862 tty_debug_hangup(tty, "opening (count=%d)\n", tty->count); 854 tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
863 855
864 tty_unlock(tty); 856 tty_unlock(tty);
865 return 0; 857 return 0;
866err_path_put:
867 path_put(pts_path);
868 kfree(pts_path);
869err_release: 858err_release:
870 tty_unlock(tty); 859 tty_unlock(tty);
871 // This will also put-ref the fsi 860 // This will also put-ref the fsi
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index b5def356af63..1aab3010fbfa 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1043,13 +1043,24 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
1043 if (up->dl_write) 1043 if (up->dl_write)
1044 uart->dl_write = up->dl_write; 1044 uart->dl_write = up->dl_write;
1045 1045
1046 if (serial8250_isa_config != NULL) 1046 if (uart->port.type != PORT_8250_CIR) {
1047 serial8250_isa_config(0, &uart->port, 1047 if (serial8250_isa_config != NULL)
1048 &uart->capabilities); 1048 serial8250_isa_config(0, &uart->port,
1049 &uart->capabilities);
1050
1051 ret = uart_add_one_port(&serial8250_reg,
1052 &uart->port);
1053 if (ret == 0)
1054 ret = uart->port.line;
1055 } else {
1056 dev_info(uart->port.dev,
1057 "skipping CIR port at 0x%lx / 0x%llx, IRQ %d\n",
1058 uart->port.iobase,
1059 (unsigned long long)uart->port.mapbase,
1060 uart->port.irq);
1049 1061
1050 ret = uart_add_one_port(&serial8250_reg, &uart->port); 1062 ret = 0;
1051 if (ret == 0) 1063 }
1052 ret = uart->port.line;
1053 } 1064 }
1054 mutex_unlock(&serial_mutex); 1065 mutex_unlock(&serial_mutex);
1055 1066
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index b5c98e5bf524..c6360fbdf808 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -261,7 +261,7 @@ __xr17v35x_register_gpio(struct pci_dev *pcidev,
261} 261}
262 262
263static const struct property_entry exar_gpio_properties[] = { 263static const struct property_entry exar_gpio_properties[] = {
264 PROPERTY_ENTRY_U32("linux,first-pin", 0), 264 PROPERTY_ENTRY_U32("exar,first-pin", 0),
265 PROPERTY_ENTRY_U32("ngpios", 16), 265 PROPERTY_ENTRY_U32("ngpios", 16),
266 { } 266 { }
267}; 267};
@@ -326,7 +326,7 @@ static int iot2040_rs485_config(struct uart_port *port,
326} 326}
327 327
328static const struct property_entry iot2040_gpio_properties[] = { 328static const struct property_entry iot2040_gpio_properties[] = {
329 PROPERTY_ENTRY_U32("linux,first-pin", 10), 329 PROPERTY_ENTRY_U32("exar,first-pin", 10),
330 PROPERTY_ENTRY_U32("ngpios", 1), 330 PROPERTY_ENTRY_U32("ngpios", 1),
331 { } 331 { }
332}; 332};
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8a857bb34fbb..1888d168a41c 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -142,15 +142,7 @@ static struct vendor_data vendor_sbsa = {
142 .fixed_options = true, 142 .fixed_options = true,
143}; 143};
144 144
145/* 145#ifdef CONFIG_ACPI_SPCR_TABLE
146 * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
147 * occasionally getting stuck as 1. To avoid the potential for a hang, check
148 * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
149 * implementations, so only do so if an affected platform is detected in
150 * parse_spcr().
151 */
152static bool qdf2400_e44_present = false;
153
154static struct vendor_data vendor_qdt_qdf2400_e44 = { 146static struct vendor_data vendor_qdt_qdf2400_e44 = {
155 .reg_offset = pl011_std_offsets, 147 .reg_offset = pl011_std_offsets,
156 .fr_busy = UART011_FR_TXFE, 148 .fr_busy = UART011_FR_TXFE,
@@ -165,6 +157,7 @@ static struct vendor_data vendor_qdt_qdf2400_e44 = {
165 .always_enabled = true, 157 .always_enabled = true,
166 .fixed_options = true, 158 .fixed_options = true,
167}; 159};
160#endif
168 161
169static u16 pl011_st_offsets[REG_ARRAY_SIZE] = { 162static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
170 [REG_DR] = UART01x_DR, 163 [REG_DR] = UART01x_DR,
@@ -2375,12 +2368,14 @@ static int __init pl011_console_match(struct console *co, char *name, int idx,
2375 resource_size_t addr; 2368 resource_size_t addr;
2376 int i; 2369 int i;
2377 2370
2378 if (strcmp(name, "qdf2400_e44") == 0) { 2371 /*
2379 pr_info_once("UART: Working around QDF2400 SoC erratum 44"); 2372 * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum
2380 qdf2400_e44_present = true; 2373 * have a distinct console name, so make sure we check for that.
2381 } else if (strcmp(name, "pl011") != 0) { 2374 * The actual implementation of the erratum occurs in the probe
2375 * function.
2376 */
2377 if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0))
2382 return -ENODEV; 2378 return -ENODEV;
2383 }
2384 2379
2385 if (uart_parse_earlycon(options, &iotype, &addr, &options)) 2380 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2386 return -ENODEV; 2381 return -ENODEV;
@@ -2734,11 +2729,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
2734 } 2729 }
2735 uap->port.irq = ret; 2730 uap->port.irq = ret;
2736 2731
2737 uap->reg_offset = vendor_sbsa.reg_offset; 2732#ifdef CONFIG_ACPI_SPCR_TABLE
2738 uap->vendor = qdf2400_e44_present ? 2733 if (qdf2400_e44_present) {
2739 &vendor_qdt_qdf2400_e44 : &vendor_sbsa; 2734 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n");
2735 uap->vendor = &vendor_qdt_qdf2400_e44;
2736 } else
2737#endif
2738 uap->vendor = &vendor_sbsa;
2739
2740 uap->reg_offset = uap->vendor->reg_offset;
2740 uap->fifosize = 32; 2741 uap->fifosize = 32;
2741 uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM; 2742 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2742 uap->port.ops = &sbsa_uart_pops; 2743 uap->port.ops = &sbsa_uart_pops;
2743 uap->fixed_baud = baudrate; 2744 uap->fixed_baud = baudrate;
2744 2745
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 343de8c384b0..898dcb091a27 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -619,6 +619,12 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port)
619 TIOCSER_TEMT : 0; 619 TIOCSER_TEMT : 0;
620} 620}
621 621
622static bool lpuart_is_32(struct lpuart_port *sport)
623{
624 return sport->port.iotype == UPIO_MEM32 ||
625 sport->port.iotype == UPIO_MEM32BE;
626}
627
622static irqreturn_t lpuart_txint(int irq, void *dev_id) 628static irqreturn_t lpuart_txint(int irq, void *dev_id)
623{ 629{
624 struct lpuart_port *sport = dev_id; 630 struct lpuart_port *sport = dev_id;
@@ -627,7 +633,7 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
627 633
628 spin_lock_irqsave(&sport->port.lock, flags); 634 spin_lock_irqsave(&sport->port.lock, flags);
629 if (sport->port.x_char) { 635 if (sport->port.x_char) {
630 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) 636 if (lpuart_is_32(sport))
631 lpuart32_write(&sport->port, sport->port.x_char, UARTDATA); 637 lpuart32_write(&sport->port, sport->port.x_char, UARTDATA);
632 else 638 else
633 writeb(sport->port.x_char, sport->port.membase + UARTDR); 639 writeb(sport->port.x_char, sport->port.membase + UARTDR);
@@ -635,14 +641,14 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id)
635 } 641 }
636 642
637 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { 643 if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
638 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) 644 if (lpuart_is_32(sport))
639 lpuart32_stop_tx(&sport->port); 645 lpuart32_stop_tx(&sport->port);
640 else 646 else
641 lpuart_stop_tx(&sport->port); 647 lpuart_stop_tx(&sport->port);
642 goto out; 648 goto out;
643 } 649 }
644 650
645 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) 651 if (lpuart_is_32(sport))
646 lpuart32_transmit_buffer(sport); 652 lpuart32_transmit_buffer(sport);
647 else 653 else
648 lpuart_transmit_buffer(sport); 654 lpuart_transmit_buffer(sport);
@@ -1978,12 +1984,12 @@ static int __init lpuart_console_setup(struct console *co, char *options)
1978 if (options) 1984 if (options)
1979 uart_parse_options(options, &baud, &parity, &bits, &flow); 1985 uart_parse_options(options, &baud, &parity, &bits, &flow);
1980 else 1986 else
1981 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) 1987 if (lpuart_is_32(sport))
1982 lpuart32_console_get_options(sport, &baud, &parity, &bits); 1988 lpuart32_console_get_options(sport, &baud, &parity, &bits);
1983 else 1989 else
1984 lpuart_console_get_options(sport, &baud, &parity, &bits); 1990 lpuart_console_get_options(sport, &baud, &parity, &bits);
1985 1991
1986 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) 1992 if (lpuart_is_32(sport))
1987 lpuart32_setup_watermark(sport); 1993 lpuart32_setup_watermark(sport);
1988 else 1994 else
1989 lpuart_setup_watermark(sport); 1995 lpuart_setup_watermark(sport);
@@ -2118,7 +2124,7 @@ static int lpuart_probe(struct platform_device *pdev)
2118 } 2124 }
2119 sport->port.irq = ret; 2125 sport->port.irq = ret;
2120 sport->port.iotype = sdata->iotype; 2126 sport->port.iotype = sdata->iotype;
2121 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) 2127 if (lpuart_is_32(sport))
2122 sport->port.ops = &lpuart32_pops; 2128 sport->port.ops = &lpuart32_pops;
2123 else 2129 else
2124 sport->port.ops = &lpuart_pops; 2130 sport->port.ops = &lpuart_pops;
@@ -2145,7 +2151,7 @@ static int lpuart_probe(struct platform_device *pdev)
2145 2151
2146 platform_set_drvdata(pdev, &sport->port); 2152 platform_set_drvdata(pdev, &sport->port);
2147 2153
2148 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) 2154 if (lpuart_is_32(sport))
2149 lpuart_reg.cons = LPUART32_CONSOLE; 2155 lpuart_reg.cons = LPUART32_CONSOLE;
2150 else 2156 else
2151 lpuart_reg.cons = LPUART_CONSOLE; 2157 lpuart_reg.cons = LPUART_CONSOLE;
@@ -2198,7 +2204,7 @@ static int lpuart_suspend(struct device *dev)
2198 struct lpuart_port *sport = dev_get_drvdata(dev); 2204 struct lpuart_port *sport = dev_get_drvdata(dev);
2199 unsigned long temp; 2205 unsigned long temp;
2200 2206
2201 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) { 2207 if (lpuart_is_32(sport)) {
2202 /* disable Rx/Tx and interrupts */ 2208 /* disable Rx/Tx and interrupts */
2203 temp = lpuart32_read(&sport->port, UARTCTRL); 2209 temp = lpuart32_read(&sport->port, UARTCTRL);
2204 temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE); 2210 temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE);
@@ -2249,7 +2255,7 @@ static int lpuart_resume(struct device *dev)
2249 if (sport->port.suspended && !sport->port.irq_wake) 2255 if (sport->port.suspended && !sport->port.irq_wake)
2250 clk_prepare_enable(sport->clk); 2256 clk_prepare_enable(sport->clk);
2251 2257
2252 if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) { 2258 if (lpuart_is_32(sport)) {
2253 lpuart32_setup_watermark(sport); 2259 lpuart32_setup_watermark(sport);
2254 temp = lpuart32_read(&sport->port, UARTCTRL); 2260 temp = lpuart32_read(&sport->port, UARTCTRL);
2255 temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE | 2261 temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 9e3162bf3bd1..80934e7bd67f 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -186,11 +186,6 @@
186 186
187#define UART_NR 8 187#define UART_NR 8
188 188
189/* RX DMA buffer periods */
190#define RX_DMA_PERIODS 4
191#define RX_BUF_SIZE (PAGE_SIZE)
192
193
194/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */ 189/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
195enum imx_uart_type { 190enum imx_uart_type {
196 IMX1_UART, 191 IMX1_UART,
@@ -226,7 +221,6 @@ struct imx_port {
226 struct dma_chan *dma_chan_rx, *dma_chan_tx; 221 struct dma_chan *dma_chan_rx, *dma_chan_tx;
227 struct scatterlist rx_sgl, tx_sgl[2]; 222 struct scatterlist rx_sgl, tx_sgl[2];
228 void *rx_buf; 223 void *rx_buf;
229 unsigned int rx_buf_size;
230 struct circ_buf rx_ring; 224 struct circ_buf rx_ring;
231 unsigned int rx_periods; 225 unsigned int rx_periods;
232 dma_cookie_t rx_cookie; 226 dma_cookie_t rx_cookie;
@@ -464,7 +458,7 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
464 } 458 }
465 } 459 }
466 460
467 while (!uart_circ_empty(xmit) && 461 while (!uart_circ_empty(xmit) && !sport->dma_is_txing &&
468 !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) { 462 !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) {
469 /* send xmit->buf[xmit->tail] 463 /* send xmit->buf[xmit->tail]
470 * out the port here */ 464 * out the port here */
@@ -967,6 +961,8 @@ static void imx_timeout(unsigned long data)
967 } 961 }
968} 962}
969 963
964#define RX_BUF_SIZE (PAGE_SIZE)
965
970/* 966/*
971 * There are two kinds of RX DMA interrupts(such as in the MX6Q): 967 * There are two kinds of RX DMA interrupts(such as in the MX6Q):
972 * [1] the RX DMA buffer is full. 968 * [1] the RX DMA buffer is full.
@@ -1049,6 +1045,9 @@ static void dma_rx_callback(void *data)
1049 } 1045 }
1050} 1046}
1051 1047
1048/* RX DMA buffer periods */
1049#define RX_DMA_PERIODS 4
1050
1052static int start_rx_dma(struct imx_port *sport) 1051static int start_rx_dma(struct imx_port *sport)
1053{ 1052{
1054 struct scatterlist *sgl = &sport->rx_sgl; 1053 struct scatterlist *sgl = &sport->rx_sgl;
@@ -1059,8 +1058,9 @@ static int start_rx_dma(struct imx_port *sport)
1059 1058
1060 sport->rx_ring.head = 0; 1059 sport->rx_ring.head = 0;
1061 sport->rx_ring.tail = 0; 1060 sport->rx_ring.tail = 0;
1061 sport->rx_periods = RX_DMA_PERIODS;
1062 1062
1063 sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size); 1063 sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
1064 ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE); 1064 ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
1065 if (ret == 0) { 1065 if (ret == 0) {
1066 dev_err(dev, "DMA mapping error for RX.\n"); 1066 dev_err(dev, "DMA mapping error for RX.\n");
@@ -1171,7 +1171,7 @@ static int imx_uart_dma_init(struct imx_port *sport)
1171 goto err; 1171 goto err;
1172 } 1172 }
1173 1173
1174 sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL); 1174 sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1175 if (!sport->rx_buf) { 1175 if (!sport->rx_buf) {
1176 ret = -ENOMEM; 1176 ret = -ENOMEM;
1177 goto err; 1177 goto err;
@@ -2036,7 +2036,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
2036{ 2036{
2037 struct device_node *np = pdev->dev.of_node; 2037 struct device_node *np = pdev->dev.of_node;
2038 int ret; 2038 int ret;
2039 u32 dma_buf_size[2];
2040 2039
2041 sport->devdata = of_device_get_match_data(&pdev->dev); 2040 sport->devdata = of_device_get_match_data(&pdev->dev);
2042 if (!sport->devdata) 2041 if (!sport->devdata)
@@ -2060,14 +2059,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
2060 if (of_get_property(np, "rts-gpios", NULL)) 2059 if (of_get_property(np, "rts-gpios", NULL))
2061 sport->have_rtsgpio = 1; 2060 sport->have_rtsgpio = 1;
2062 2061
2063 if (!of_property_read_u32_array(np, "fsl,dma-size", dma_buf_size, 2)) {
2064 sport->rx_buf_size = dma_buf_size[0] * dma_buf_size[1];
2065 sport->rx_periods = dma_buf_size[1];
2066 } else {
2067 sport->rx_buf_size = RX_BUF_SIZE;
2068 sport->rx_periods = RX_DMA_PERIODS;
2069 }
2070
2071 return 0; 2062 return 0;
2072} 2063}
2073#else 2064#else
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index da5ddfc14778..e08b16b070c0 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1085,10 +1085,12 @@ static ssize_t rx_trigger_store(struct device *dev,
1085{ 1085{
1086 struct uart_port *port = dev_get_drvdata(dev); 1086 struct uart_port *port = dev_get_drvdata(dev);
1087 struct sci_port *sci = to_sci_port(port); 1087 struct sci_port *sci = to_sci_port(port);
1088 int ret;
1088 long r; 1089 long r;
1089 1090
1090 if (kstrtol(buf, 0, &r) == -EINVAL) 1091 ret = kstrtol(buf, 0, &r);
1091 return -EINVAL; 1092 if (ret)
1093 return ret;
1092 1094
1093 sci->rx_trigger = scif_set_rtrg(port, r); 1095 sci->rx_trigger = scif_set_rtrg(port, r);
1094 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) 1096 if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
@@ -1116,10 +1118,12 @@ static ssize_t rx_fifo_timeout_store(struct device *dev,
1116{ 1118{
1117 struct uart_port *port = dev_get_drvdata(dev); 1119 struct uart_port *port = dev_get_drvdata(dev);
1118 struct sci_port *sci = to_sci_port(port); 1120 struct sci_port *sci = to_sci_port(port);
1121 int ret;
1119 long r; 1122 long r;
1120 1123
1121 if (kstrtol(buf, 0, &r) == -EINVAL) 1124 ret = kstrtol(buf, 0, &r);
1122 return -EINVAL; 1125 if (ret)
1126 return ret;
1123 sci->rx_fifo_timeout = r; 1127 sci->rx_fifo_timeout = r;
1124 scif_set_rtrg(port, 1); 1128 scif_set_rtrg(port, 1);
1125 if (r > 0) 1129 if (r > 0)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index f5335be344f6..6b0ca65027d0 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -758,6 +758,7 @@ static int asc_init_port(struct asc_port *ascport,
758 if (IS_ERR(ascport->pinctrl)) { 758 if (IS_ERR(ascport->pinctrl)) {
759 ret = PTR_ERR(ascport->pinctrl); 759 ret = PTR_ERR(ascport->pinctrl);
760 dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret); 760 dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret);
761 return ret;
761 } 762 }
762 763
763 ascport->states[DEFAULT] = 764 ascport->states[DEFAULT] =
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 974b13d24401..10c4038c0e8d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2518,6 +2518,9 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2518 case TIOCSSERIAL: 2518 case TIOCSSERIAL:
2519 tty_warn_deprecated_flags(p); 2519 tty_warn_deprecated_flags(p);
2520 break; 2520 break;
2521 case TIOCGPTPEER:
2522 /* Special because the struct file is needed */
2523 return ptm_open_peer(file, tty, (int)arg);
2521 default: 2524 default:
2522 retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg); 2525 retval = tty_jobctrl_ioctl(tty, real_tty, file, cmd, arg);
2523 if (retval != -ENOIOCTLCMD) 2526 if (retval != -ENOIOCTLCMD)
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5357d83bbda2..5e056064259c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1829,6 +1829,9 @@ static const struct usb_device_id acm_ids[] = {
1829 { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */ 1829 { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
1830 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ 1830 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1831 }, 1831 },
1832 { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
1833 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1834 },
1832 1835
1833 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ 1836 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
1834 .driver_info = CLEAR_HALT_CONDITIONS, 1837 .driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index ab1bb3b538ac..7f277b092b5b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1888,7 +1888,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
1888 /* No more submits can occur */ 1888 /* No more submits can occur */
1889 spin_lock_irq(&hcd_urb_list_lock); 1889 spin_lock_irq(&hcd_urb_list_lock);
1890rescan: 1890rescan:
1891 list_for_each_entry (urb, &ep->urb_list, urb_list) { 1891 list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
1892 int is_in; 1892 int is_in;
1893 1893
1894 if (urb->unlinked) 1894 if (urb->unlinked)
@@ -2485,6 +2485,8 @@ void usb_hc_died (struct usb_hcd *hcd)
2485 } 2485 }
2486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) { 2486 if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
2487 hcd = hcd->shared_hcd; 2487 hcd = hcd->shared_hcd;
2488 clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
2489 set_bit(HCD_FLAG_DEAD, &hcd->flags);
2488 if (hcd->rh_registered) { 2490 if (hcd->rh_registered) {
2489 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2491 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2490 2492
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 6e6797d145dd..822f8c50e423 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4725,7 +4725,8 @@ hub_power_remaining(struct usb_hub *hub)
4725static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, 4725static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4726 u16 portchange) 4726 u16 portchange)
4727{ 4727{
4728 int status, i; 4728 int status = -ENODEV;
4729 int i;
4729 unsigned unit_load; 4730 unsigned unit_load;
4730 struct usb_device *hdev = hub->hdev; 4731 struct usb_device *hdev = hub->hdev;
4731 struct usb_hcd *hcd = bus_to_hcd(hdev->bus); 4732 struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4929,9 +4930,10 @@ loop:
4929 4930
4930done: 4931done:
4931 hub_port_disable(hub, port1, 1); 4932 hub_port_disable(hub, port1, 1);
4932 if (hcd->driver->relinquish_port && !hub->hdev->parent) 4933 if (hcd->driver->relinquish_port && !hub->hdev->parent) {
4933 hcd->driver->relinquish_port(hcd, port1); 4934 if (status != -ENOTCONN && status != -ENODEV)
4934 4935 hcd->driver->relinquish_port(hcd, port1);
4936 }
4935} 4937}
4936 4938
4937/* Handle physical or logical connection change events. 4939/* Handle physical or logical connection change events.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3116edfcdc18..574da2b4529c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
150 /* appletouch */ 150 /* appletouch */
151 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 151 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
152 152
153 /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
154 { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
155
153 /* Avision AV600U */ 156 /* Avision AV600U */
154 { USB_DEVICE(0x0638, 0x0a13), .driver_info = 157 { USB_DEVICE(0x0638, 0x0a13), .driver_info =
155 USB_QUIRK_STRING_FETCH_255 }, 158 USB_QUIRK_STRING_FETCH_255 },
@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
249 { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME }, 252 { USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
250 { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME }, 253 { USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
251 { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME }, 254 { USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
255 { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
252 256
253 /* Logitech Optical Mouse M90/M100 */ 257 /* Logitech Optical Mouse M90/M100 */
254 { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME }, 258 { USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index bc3b3fda5000..c4066cd77e47 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3573,6 +3573,9 @@ irq_retry:
3573 /* Report disconnection if it is not already done. */ 3573 /* Report disconnection if it is not already done. */
3574 dwc2_hsotg_disconnect(hsotg); 3574 dwc2_hsotg_disconnect(hsotg);
3575 3575
3576 /* Reset device address to zero */
3577 __bic32(hsotg->regs + DCFG, DCFG_DEVADDR_MASK);
3578
3576 if (usb_status & GOTGCTL_BSESVLD && connected) 3579 if (usb_status & GOTGCTL_BSESVLD && connected)
3577 dwc2_hsotg_core_init_disconnected(hsotg, true); 3580 dwc2_hsotg_core_init_disconnected(hsotg, true);
3578 } 3581 }
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 326b302fc440..03474d3575ab 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -766,15 +766,15 @@ static int dwc3_core_init(struct dwc3 *dwc)
766 dwc->maximum_speed = USB_SPEED_HIGH; 766 dwc->maximum_speed = USB_SPEED_HIGH;
767 } 767 }
768 768
769 ret = dwc3_core_soft_reset(dwc); 769 ret = dwc3_core_get_phy(dwc);
770 if (ret) 770 if (ret)
771 goto err0; 771 goto err0;
772 772
773 ret = dwc3_phy_setup(dwc); 773 ret = dwc3_core_soft_reset(dwc);
774 if (ret) 774 if (ret)
775 goto err0; 775 goto err0;
776 776
777 ret = dwc3_core_get_phy(dwc); 777 ret = dwc3_phy_setup(dwc);
778 if (ret) 778 if (ret)
779 goto err0; 779 goto err0;
780 780
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 98926504b55b..f5aaa0cf3873 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -512,15 +512,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
512 512
513 /* check the DMA Status */ 513 /* check the DMA Status */
514 reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG); 514 reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
515 irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
516 ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
517 dwc3_omap_interrupt_thread, IRQF_SHARED,
518 "dwc3-omap", omap);
519 if (ret) {
520 dev_err(dev, "failed to request IRQ #%d --> %d\n",
521 omap->irq, ret);
522 goto err1;
523 }
524 515
525 ret = dwc3_omap_extcon_register(omap); 516 ret = dwc3_omap_extcon_register(omap);
526 if (ret < 0) 517 if (ret < 0)
@@ -532,8 +523,15 @@ static int dwc3_omap_probe(struct platform_device *pdev)
532 goto err1; 523 goto err1;
533 } 524 }
534 525
526 ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
527 dwc3_omap_interrupt_thread, IRQF_SHARED,
528 "dwc3-omap", omap);
529 if (ret) {
530 dev_err(dev, "failed to request IRQ #%d --> %d\n",
531 omap->irq, ret);
532 goto err1;
533 }
535 dwc3_omap_enable_irqs(omap); 534 dwc3_omap_enable_irqs(omap);
536 enable_irq(omap->irq);
537 return 0; 535 return 0;
538 536
539err1: 537err1:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9e41605a276b..f064f1549333 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -191,14 +191,16 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
191 191
192 req->started = false; 192 req->started = false;
193 list_del(&req->list); 193 list_del(&req->list);
194 req->trb = NULL;
195 req->remaining = 0; 194 req->remaining = 0;
196 195
197 if (req->request.status == -EINPROGRESS) 196 if (req->request.status == -EINPROGRESS)
198 req->request.status = status; 197 req->request.status = status;
199 198
200 usb_gadget_unmap_request_by_dev(dwc->sysdev, 199 if (req->trb)
201 &req->request, req->direction); 200 usb_gadget_unmap_request_by_dev(dwc->sysdev,
201 &req->request, req->direction);
202
203 req->trb = NULL;
202 204
203 trace_dwc3_gadget_giveback(req); 205 trace_dwc3_gadget_giveback(req);
204 206
@@ -894,9 +896,40 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
894 if (!node) { 896 if (!node) {
895 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST; 897 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
896 898
899 /*
900 * USB Specification 2.0 Section 5.9.2 states that: "If
901 * there is only a single transaction in the microframe,
902 * only a DATA0 data packet PID is used. If there are
903 * two transactions per microframe, DATA1 is used for
904 * the first transaction data packet and DATA0 is used
905 * for the second transaction data packet. If there are
906 * three transactions per microframe, DATA2 is used for
907 * the first transaction data packet, DATA1 is used for
908 * the second, and DATA0 is used for the third."
909 *
910 * IOW, we should satisfy the following cases:
911 *
912 * 1) length <= maxpacket
913 * - DATA0
914 *
915 * 2) maxpacket < length <= (2 * maxpacket)
916 * - DATA1, DATA0
917 *
918 * 3) (2 * maxpacket) < length <= (3 * maxpacket)
919 * - DATA2, DATA1, DATA0
920 */
897 if (speed == USB_SPEED_HIGH) { 921 if (speed == USB_SPEED_HIGH) {
898 struct usb_ep *ep = &dep->endpoint; 922 struct usb_ep *ep = &dep->endpoint;
899 trb->size |= DWC3_TRB_SIZE_PCM1(ep->mult - 1); 923 unsigned int mult = ep->mult - 1;
924 unsigned int maxp = usb_endpoint_maxp(ep->desc);
925
926 if (length <= (2 * maxp))
927 mult--;
928
929 if (length <= maxp)
930 mult--;
931
932 trb->size |= DWC3_TRB_SIZE_PCM1(mult);
900 } 933 }
901 } else { 934 } else {
902 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; 935 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index e80b9c123a9d..f95bddd6513f 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2490,7 +2490,7 @@ static int fsg_main_thread(void *common_)
2490 int i; 2490 int i;
2491 2491
2492 down_write(&common->filesem); 2492 down_write(&common->filesem);
2493 for (i = 0; i < ARRAY_SIZE(common->luns); --i) { 2493 for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
2494 struct fsg_lun *curlun = common->luns[i]; 2494 struct fsg_lun *curlun = common->luns[i];
2495 if (!curlun || !fsg_lun_is_open(curlun)) 2495 if (!curlun || !fsg_lun_is_open(curlun))
2496 continue; 2496 continue;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 8656f84e17d9..29efbedc91f9 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -92,9 +92,9 @@ static struct uac_input_terminal_descriptor usb_out_it_desc = {
92 .bDescriptorType = USB_DT_CS_INTERFACE, 92 .bDescriptorType = USB_DT_CS_INTERFACE,
93 .bDescriptorSubtype = UAC_INPUT_TERMINAL, 93 .bDescriptorSubtype = UAC_INPUT_TERMINAL,
94 .bTerminalID = USB_OUT_IT_ID, 94 .bTerminalID = USB_OUT_IT_ID,
95 .wTerminalType = UAC_TERMINAL_STREAMING, 95 .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
96 .bAssocTerminal = 0, 96 .bAssocTerminal = 0,
97 .wChannelConfig = 0x3, 97 .wChannelConfig = cpu_to_le16(0x3),
98}; 98};
99 99
100#define IO_OUT_OT_ID 2 100#define IO_OUT_OT_ID 2
@@ -103,7 +103,7 @@ static struct uac1_output_terminal_descriptor io_out_ot_desc = {
103 .bDescriptorType = USB_DT_CS_INTERFACE, 103 .bDescriptorType = USB_DT_CS_INTERFACE,
104 .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, 104 .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
105 .bTerminalID = IO_OUT_OT_ID, 105 .bTerminalID = IO_OUT_OT_ID,
106 .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER, 106 .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
107 .bAssocTerminal = 0, 107 .bAssocTerminal = 0,
108 .bSourceID = USB_OUT_IT_ID, 108 .bSourceID = USB_OUT_IT_ID,
109}; 109};
@@ -114,9 +114,9 @@ static struct uac_input_terminal_descriptor io_in_it_desc = {
114 .bDescriptorType = USB_DT_CS_INTERFACE, 114 .bDescriptorType = USB_DT_CS_INTERFACE,
115 .bDescriptorSubtype = UAC_INPUT_TERMINAL, 115 .bDescriptorSubtype = UAC_INPUT_TERMINAL,
116 .bTerminalID = IO_IN_IT_ID, 116 .bTerminalID = IO_IN_IT_ID,
117 .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE, 117 .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
118 .bAssocTerminal = 0, 118 .bAssocTerminal = 0,
119 .wChannelConfig = 0x3, 119 .wChannelConfig = cpu_to_le16(0x3),
120}; 120};
121 121
122#define USB_IN_OT_ID 4 122#define USB_IN_OT_ID 4
@@ -125,7 +125,7 @@ static struct uac1_output_terminal_descriptor usb_in_ot_desc = {
125 .bDescriptorType = USB_DT_CS_INTERFACE, 125 .bDescriptorType = USB_DT_CS_INTERFACE,
126 .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, 126 .bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
127 .bTerminalID = USB_IN_OT_ID, 127 .bTerminalID = USB_IN_OT_ID,
128 .wTerminalType = UAC_TERMINAL_STREAMING, 128 .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
129 .bAssocTerminal = 0, 129 .bAssocTerminal = 0,
130 .bSourceID = IO_IN_IT_ID, 130 .bSourceID = IO_IN_IT_ID,
131}; 131};
@@ -174,7 +174,7 @@ static struct uac1_as_header_descriptor as_out_header_desc = {
174 .bDescriptorSubtype = UAC_AS_GENERAL, 174 .bDescriptorSubtype = UAC_AS_GENERAL,
175 .bTerminalLink = USB_OUT_IT_ID, 175 .bTerminalLink = USB_OUT_IT_ID,
176 .bDelay = 1, 176 .bDelay = 1,
177 .wFormatTag = UAC_FORMAT_TYPE_I_PCM, 177 .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
178}; 178};
179 179
180static struct uac1_as_header_descriptor as_in_header_desc = { 180static struct uac1_as_header_descriptor as_in_header_desc = {
@@ -183,7 +183,7 @@ static struct uac1_as_header_descriptor as_in_header_desc = {
183 .bDescriptorSubtype = UAC_AS_GENERAL, 183 .bDescriptorSubtype = UAC_AS_GENERAL,
184 .bTerminalLink = USB_IN_OT_ID, 184 .bTerminalLink = USB_IN_OT_ID,
185 .bDelay = 1, 185 .bDelay = 1,
186 .wFormatTag = UAC_FORMAT_TYPE_I_PCM, 186 .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM),
187}; 187};
188 188
189DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1); 189DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
@@ -606,8 +606,8 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
606 if (status) 606 if (status)
607 goto fail; 607 goto fail;
608 608
609 audio->out_ep_maxpsize = as_out_ep_desc.wMaxPacketSize; 609 audio->out_ep_maxpsize = le16_to_cpu(as_out_ep_desc.wMaxPacketSize);
610 audio->in_ep_maxpsize = as_in_ep_desc.wMaxPacketSize; 610 audio->in_ep_maxpsize = le16_to_cpu(as_in_ep_desc.wMaxPacketSize);
611 audio->params.c_chmask = audio_opts->c_chmask; 611 audio->params.c_chmask = audio_opts->c_chmask;
612 audio->params.c_srate = audio_opts->c_srate; 612 audio->params.c_srate = audio_opts->c_srate;
613 audio->params.c_ssize = audio_opts->c_ssize; 613 audio->params.c_ssize = audio_opts->c_ssize;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 9082ce261e70..f05c3f3e6103 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -168,7 +168,7 @@ static struct uac2_input_terminal_descriptor usb_out_it_desc = {
168 .bAssocTerminal = 0, 168 .bAssocTerminal = 0,
169 .bCSourceID = USB_OUT_CLK_ID, 169 .bCSourceID = USB_OUT_CLK_ID,
170 .iChannelNames = 0, 170 .iChannelNames = 0,
171 .bmControls = (CONTROL_RDWR << COPY_CTRL), 171 .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
172}; 172};
173 173
174/* Input Terminal for I/O-In */ 174/* Input Terminal for I/O-In */
@@ -182,7 +182,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
182 .bAssocTerminal = 0, 182 .bAssocTerminal = 0,
183 .bCSourceID = USB_IN_CLK_ID, 183 .bCSourceID = USB_IN_CLK_ID,
184 .iChannelNames = 0, 184 .iChannelNames = 0,
185 .bmControls = (CONTROL_RDWR << COPY_CTRL), 185 .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
186}; 186};
187 187
188/* Ouput Terminal for USB_IN */ 188/* Ouput Terminal for USB_IN */
@@ -196,7 +196,7 @@ static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
196 .bAssocTerminal = 0, 196 .bAssocTerminal = 0,
197 .bSourceID = IO_IN_IT_ID, 197 .bSourceID = IO_IN_IT_ID,
198 .bCSourceID = USB_IN_CLK_ID, 198 .bCSourceID = USB_IN_CLK_ID,
199 .bmControls = (CONTROL_RDWR << COPY_CTRL), 199 .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
200}; 200};
201 201
202/* Ouput Terminal for I/O-Out */ 202/* Ouput Terminal for I/O-Out */
@@ -210,7 +210,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
210 .bAssocTerminal = 0, 210 .bAssocTerminal = 0,
211 .bSourceID = USB_OUT_IT_ID, 211 .bSourceID = USB_OUT_IT_ID,
212 .bCSourceID = USB_OUT_CLK_ID, 212 .bCSourceID = USB_OUT_CLK_ID,
213 .bmControls = (CONTROL_RDWR << COPY_CTRL), 213 .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
214}; 214};
215 215
216static struct uac2_ac_header_descriptor ac_hdr_desc = { 216static struct uac2_ac_header_descriptor ac_hdr_desc = {
@@ -220,9 +220,10 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = {
220 .bDescriptorSubtype = UAC_MS_HEADER, 220 .bDescriptorSubtype = UAC_MS_HEADER,
221 .bcdADC = cpu_to_le16(0x200), 221 .bcdADC = cpu_to_le16(0x200),
222 .bCategory = UAC2_FUNCTION_IO_BOX, 222 .bCategory = UAC2_FUNCTION_IO_BOX,
223 .wTotalLength = sizeof in_clk_src_desc + sizeof out_clk_src_desc 223 .wTotalLength = cpu_to_le16(sizeof in_clk_src_desc
224 + sizeof usb_out_it_desc + sizeof io_in_it_desc 224 + sizeof out_clk_src_desc + sizeof usb_out_it_desc
225 + sizeof usb_in_ot_desc + sizeof io_out_ot_desc, 225 + sizeof io_in_it_desc + sizeof usb_in_ot_desc
226 + sizeof io_out_ot_desc),
226 .bmControls = 0, 227 .bmControls = 0,
227}; 228};
228 229
@@ -569,10 +570,12 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
569 return ret; 570 return ret;
570 } 571 }
571 572
572 agdev->in_ep_maxpsize = max(fs_epin_desc.wMaxPacketSize, 573 agdev->in_ep_maxpsize = max_t(u16,
573 hs_epin_desc.wMaxPacketSize); 574 le16_to_cpu(fs_epin_desc.wMaxPacketSize),
574 agdev->out_ep_maxpsize = max(fs_epout_desc.wMaxPacketSize, 575 le16_to_cpu(hs_epin_desc.wMaxPacketSize));
575 hs_epout_desc.wMaxPacketSize); 576 agdev->out_ep_maxpsize = max_t(u16,
577 le16_to_cpu(fs_epout_desc.wMaxPacketSize),
578 le16_to_cpu(hs_epout_desc.wMaxPacketSize));
576 579
577 hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; 580 hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
578 hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; 581 hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 9ffb11ec9ed9..7cd5c969fcbe 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -192,7 +192,7 @@ config USB_RENESAS_USBHS_UDC
192config USB_RENESAS_USB3 192config USB_RENESAS_USB3
193 tristate 'Renesas USB3.0 Peripheral controller' 193 tristate 'Renesas USB3.0 Peripheral controller'
194 depends on ARCH_RENESAS || COMPILE_TEST 194 depends on ARCH_RENESAS || COMPILE_TEST
195 depends on EXTCON 195 depends on EXTCON && HAS_DMA
196 help 196 help
197 Renesas USB3.0 Peripheral controller is a USB peripheral controller 197 Renesas USB3.0 Peripheral controller is a USB peripheral controller
198 that supports super, high, and full speed USB 3.0 data transfers. 198 that supports super, high, and full speed USB 3.0 data transfers.
@@ -257,6 +257,7 @@ config USB_MV_U3D
257 257
258config USB_SNP_CORE 258config USB_SNP_CORE
259 depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT) 259 depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT)
260 depends on HAS_DMA
260 tristate 261 tristate
261 help 262 help
262 This enables core driver support for Synopsys USB 2.0 Device 263 This enables core driver support for Synopsys USB 2.0 Device
@@ -271,7 +272,7 @@ config USB_SNP_CORE
271 272
272config USB_SNP_UDC_PLAT 273config USB_SNP_UDC_PLAT
273 tristate "Synopsys USB 2.0 Device controller" 274 tristate "Synopsys USB 2.0 Device controller"
274 depends on (USB_GADGET && OF) 275 depends on USB_GADGET && OF && HAS_DMA
275 select USB_GADGET_DUALSPEED 276 select USB_GADGET_DUALSPEED
276 select USB_SNP_CORE 277 select USB_SNP_CORE
277 default ARCH_BCM_IPROC 278 default ARCH_BCM_IPROC
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index d8278322d5ac..e1de8fe599a3 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -89,6 +89,9 @@
89 89
90/* USB_COM_CON */ 90/* USB_COM_CON */
91#define USB_COM_CON_CONF BIT(24) 91#define USB_COM_CON_CONF BIT(24)
92#define USB_COM_CON_PN_WDATAIF_NL BIT(23)
93#define USB_COM_CON_PN_RDATAIF_NL BIT(22)
94#define USB_COM_CON_PN_LSTTR_PP BIT(21)
92#define USB_COM_CON_SPD_MODE BIT(17) 95#define USB_COM_CON_SPD_MODE BIT(17)
93#define USB_COM_CON_EP0_EN BIT(16) 96#define USB_COM_CON_EP0_EN BIT(16)
94#define USB_COM_CON_DEV_ADDR_SHIFT 8 97#define USB_COM_CON_DEV_ADDR_SHIFT 8
@@ -686,6 +689,9 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
686{ 689{
687 usb3_init_axi_bridge(usb3); 690 usb3_init_axi_bridge(usb3);
688 usb3_init_epc_registers(usb3); 691 usb3_init_epc_registers(usb3);
692 usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
693 USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
694 USB3_USB_COM_CON);
689 usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA); 695 usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA);
690 usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA); 696 usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA);
691 697
@@ -832,21 +838,32 @@ static struct renesas_usb3_request *usb3_get_request(struct renesas_usb3_ep
832 return usb3_req; 838 return usb3_req;
833} 839}
834 840
835static void usb3_request_done(struct renesas_usb3_ep *usb3_ep, 841static void __usb3_request_done(struct renesas_usb3_ep *usb3_ep,
836 struct renesas_usb3_request *usb3_req, int status) 842 struct renesas_usb3_request *usb3_req,
843 int status)
837{ 844{
838 struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); 845 struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
839 unsigned long flags;
840 846
841 dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n", 847 dev_dbg(usb3_to_dev(usb3), "giveback: ep%2d, %u, %u, %d\n",
842 usb3_ep->num, usb3_req->req.length, usb3_req->req.actual, 848 usb3_ep->num, usb3_req->req.length, usb3_req->req.actual,
843 status); 849 status);
844 usb3_req->req.status = status; 850 usb3_req->req.status = status;
845 spin_lock_irqsave(&usb3->lock, flags);
846 usb3_ep->started = false; 851 usb3_ep->started = false;
847 list_del_init(&usb3_req->queue); 852 list_del_init(&usb3_req->queue);
848 spin_unlock_irqrestore(&usb3->lock, flags); 853 spin_unlock(&usb3->lock);
849 usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req); 854 usb_gadget_giveback_request(&usb3_ep->ep, &usb3_req->req);
855 spin_lock(&usb3->lock);
856}
857
858static void usb3_request_done(struct renesas_usb3_ep *usb3_ep,
859 struct renesas_usb3_request *usb3_req, int status)
860{
861 struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep);
862 unsigned long flags;
863
864 spin_lock_irqsave(&usb3->lock, flags);
865 __usb3_request_done(usb3_ep, usb3_req, status);
866 spin_unlock_irqrestore(&usb3->lock, flags);
850} 867}
851 868
852static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3) 869static void usb3_irq_epc_pipe0_status_end(struct renesas_usb3 *usb3)
@@ -1369,7 +1386,7 @@ static int renesas_usb3_dma_free_prd(struct renesas_usb3 *usb3,
1369 1386
1370 usb3_for_each_dma(usb3, dma, i) { 1387 usb3_for_each_dma(usb3, dma, i) {
1371 if (dma->prd) { 1388 if (dma->prd) {
1372 dma_free_coherent(dev, USB3_DMA_MAX_XFER_SIZE, 1389 dma_free_coherent(dev, USB3_DMA_PRD_SIZE,
1373 dma->prd, dma->prd_dma); 1390 dma->prd, dma->prd_dma);
1374 dma->prd = NULL; 1391 dma->prd = NULL;
1375 } 1392 }
@@ -1409,12 +1426,12 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep,
1409 int ret = -EAGAIN; 1426 int ret = -EAGAIN;
1410 u32 enable_bits = 0; 1427 u32 enable_bits = 0;
1411 1428
1429 spin_lock_irqsave(&usb3->lock, flags);
1412 if (usb3_ep->halt || usb3_ep->started) 1430 if (usb3_ep->halt || usb3_ep->started)
1413 return; 1431 goto out;
1414 if (usb3_req != usb3_req_first) 1432 if (usb3_req != usb3_req_first)
1415 return; 1433 goto out;
1416 1434
1417 spin_lock_irqsave(&usb3->lock, flags);
1418 if (usb3_pn_change(usb3, usb3_ep->num) < 0) 1435 if (usb3_pn_change(usb3, usb3_ep->num) < 0)
1419 goto out; 1436 goto out;
1420 1437
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 2e11f19e07ae..f7b4d0f159e4 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -28,7 +28,7 @@
28/* description */ 28/* description */
29#define UDC_MOD_DESCRIPTION "Synopsys UDC platform driver" 29#define UDC_MOD_DESCRIPTION "Synopsys UDC platform driver"
30 30
31void start_udc(struct udc *udc) 31static void start_udc(struct udc *udc)
32{ 32{
33 if (udc->driver) { 33 if (udc->driver) {
34 dev_info(udc->dev, "Connecting...\n"); 34 dev_info(udc->dev, "Connecting...\n");
@@ -38,7 +38,7 @@ void start_udc(struct udc *udc)
38 } 38 }
39} 39}
40 40
41void stop_udc(struct udc *udc) 41static void stop_udc(struct udc *udc)
42{ 42{
43 int tmp; 43 int tmp;
44 u32 reg; 44 u32 reg;
@@ -76,7 +76,7 @@ void stop_udc(struct udc *udc)
76 dev_info(udc->dev, "Device disconnected\n"); 76 dev_info(udc->dev, "Device disconnected\n");
77} 77}
78 78
79void udc_drd_work(struct work_struct *work) 79static void udc_drd_work(struct work_struct *work)
80{ 80{
81 struct udc *udc; 81 struct udc *udc;
82 82
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index a9a1e4c40480..c8f38649f749 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -77,6 +77,16 @@
77#define USB_INTEL_USB3_PSSEN 0xD8 77#define USB_INTEL_USB3_PSSEN 0xD8
78#define USB_INTEL_USB3PRM 0xDC 78#define USB_INTEL_USB3PRM 0xDC
79 79
80/* ASMEDIA quirk use */
81#define ASMT_DATA_WRITE0_REG 0xF8
82#define ASMT_DATA_WRITE1_REG 0xFC
83#define ASMT_CONTROL_REG 0xE0
84#define ASMT_CONTROL_WRITE_BIT 0x02
85#define ASMT_WRITEREG_CMD 0x10423
86#define ASMT_FLOWCTL_ADDR 0xFA30
87#define ASMT_FLOWCTL_DATA 0xBA
88#define ASMT_PSEUDO_DATA 0
89
80/* 90/*
81 * amd_chipset_gen values represent AMD different chipset generations 91 * amd_chipset_gen values represent AMD different chipset generations
82 */ 92 */
@@ -88,6 +98,7 @@ enum amd_chipset_gen {
88 AMD_CHIPSET_HUDSON2, 98 AMD_CHIPSET_HUDSON2,
89 AMD_CHIPSET_BOLTON, 99 AMD_CHIPSET_BOLTON,
90 AMD_CHIPSET_YANGTZE, 100 AMD_CHIPSET_YANGTZE,
101 AMD_CHIPSET_TAISHAN,
91 AMD_CHIPSET_UNKNOWN, 102 AMD_CHIPSET_UNKNOWN,
92}; 103};
93 104
@@ -131,6 +142,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
131 pinfo->sb_type.gen = AMD_CHIPSET_SB700; 142 pinfo->sb_type.gen = AMD_CHIPSET_SB700;
132 else if (rev >= 0x40 && rev <= 0x4f) 143 else if (rev >= 0x40 && rev <= 0x4f)
133 pinfo->sb_type.gen = AMD_CHIPSET_SB800; 144 pinfo->sb_type.gen = AMD_CHIPSET_SB800;
145 }
146 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
147 0x145c, NULL);
148 if (pinfo->smbus_dev) {
149 pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
134 } else { 150 } else {
135 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 151 pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
136 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); 152 PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
@@ -250,11 +266,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
250{ 266{
251 /* Make sure amd chipset type has already been initialized */ 267 /* Make sure amd chipset type has already been initialized */
252 usb_amd_find_chipset_info(); 268 usb_amd_find_chipset_info();
253 if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE) 269 if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
254 return 0; 270 amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
255 271 dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
256 dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); 272 return 1;
257 return 1; 273 }
274 return 0;
258} 275}
259EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); 276EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
260 277
@@ -412,6 +429,50 @@ void usb_amd_quirk_pll_disable(void)
412} 429}
413EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); 430EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
414 431
432static int usb_asmedia_wait_write(struct pci_dev *pdev)
433{
434 unsigned long retry_count;
435 unsigned char value;
436
437 for (retry_count = 1000; retry_count > 0; --retry_count) {
438
439 pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value);
440
441 if (value == 0xff) {
442 dev_err(&pdev->dev, "%s: check_ready ERROR", __func__);
443 return -EIO;
444 }
445
446 if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
447 return 0;
448
449 usleep_range(40, 60);
450 }
451
452 dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
453 return -ETIMEDOUT;
454}
455
456void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev)
457{
458 if (usb_asmedia_wait_write(pdev) != 0)
459 return;
460
461 /* send command and address to device */
462 pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD);
463 pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR);
464 pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
465
466 if (usb_asmedia_wait_write(pdev) != 0)
467 return;
468
469 /* send data to device */
470 pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA);
471 pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA);
472 pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT);
473}
474EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol);
475
415void usb_amd_quirk_pll_enable(void) 476void usb_amd_quirk_pll_enable(void)
416{ 477{
417 usb_amd_quirk_pll(0); 478 usb_amd_quirk_pll(0);
@@ -1096,3 +1157,23 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev)
1096} 1157}
1097DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, 1158DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1098 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); 1159 PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
1160
1161bool usb_xhci_needs_pci_reset(struct pci_dev *pdev)
1162{
1163 /*
1164 * Our dear uPD72020{1,2} friend only partially resets when
1165 * asked to via the XHCI interface, and may end up doing DMA
1166 * at the wrong addresses, as it keeps the top 32bit of some
1167 * addresses from its previous programming under obscure
1168 * circumstances.
1169 * Give it a good wack at probe time. Unfortunately, this
1170 * needs to happen before we've had a chance to discover any
1171 * quirk, or the system will be in a rather bad state.
1172 */
1173 if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1174 (pdev->device == 0x0014 || pdev->device == 0x0015))
1175 return true;
1176
1177 return false;
1178}
1179EXPORT_SYMBOL_GPL(usb_xhci_needs_pci_reset);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 0222195bd5b0..5582cbafecd4 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -11,13 +11,16 @@ bool usb_amd_prefetch_quirk(void);
11void usb_amd_dev_put(void); 11void usb_amd_dev_put(void);
12void usb_amd_quirk_pll_disable(void); 12void usb_amd_quirk_pll_disable(void);
13void usb_amd_quirk_pll_enable(void); 13void usb_amd_quirk_pll_enable(void);
14void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
14void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); 15void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
15void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); 16void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
16void sb800_prefetch(struct device *dev, int on); 17void sb800_prefetch(struct device *dev, int on);
18bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
17#else 19#else
18struct pci_dev; 20struct pci_dev;
19static inline void usb_amd_quirk_pll_disable(void) {} 21static inline void usb_amd_quirk_pll_disable(void) {}
20static inline void usb_amd_quirk_pll_enable(void) {} 22static inline void usb_amd_quirk_pll_enable(void) {}
23static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
21static inline void usb_amd_dev_put(void) {} 24static inline void usb_amd_dev_put(void) {}
22static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} 25static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
23static inline void sb800_prefetch(struct device *dev, int on) {} 26static inline void sb800_prefetch(struct device *dev, int on) {}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1adae9eab831..00721e8807ab 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -398,14 +398,21 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
398 spin_lock_irqsave(&xhci->lock, flags); 398 spin_lock_irqsave(&xhci->lock, flags);
399 for (i = LAST_EP_INDEX; i > 0; i--) { 399 for (i = LAST_EP_INDEX; i > 0; i--) {
400 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) { 400 if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
401 struct xhci_ep_ctx *ep_ctx;
401 struct xhci_command *command; 402 struct xhci_command *command;
403
404 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
405
406 /* Check ep is running, required by AMD SNPS 3.1 xHC */
407 if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING)
408 continue;
409
402 command = xhci_alloc_command(xhci, false, false, 410 command = xhci_alloc_command(xhci, false, false,
403 GFP_NOWAIT); 411 GFP_NOWAIT);
404 if (!command) { 412 if (!command) {
405 spin_unlock_irqrestore(&xhci->lock, flags); 413 spin_unlock_irqrestore(&xhci->lock, flags);
406 xhci_free_command(xhci, cmd); 414 xhci_free_command(xhci, cmd);
407 return -ENOMEM; 415 return -ENOMEM;
408
409 } 416 }
410 xhci_queue_stop_endpoint(xhci, command, slot_id, i, 417 xhci_queue_stop_endpoint(xhci, command, slot_id, i,
411 suspend); 418 suspend);
@@ -603,12 +610,14 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
603 610
604 /* Disable all Device Slots */ 611 /* Disable all Device Slots */
605 xhci_dbg(xhci, "Disable all slots\n"); 612 xhci_dbg(xhci, "Disable all slots\n");
613 spin_unlock_irqrestore(&xhci->lock, *flags);
606 for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { 614 for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
607 retval = xhci_disable_slot(xhci, NULL, i); 615 retval = xhci_disable_slot(xhci, NULL, i);
608 if (retval) 616 if (retval)
609 xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", 617 xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n",
610 i, retval); 618 i, retval);
611 } 619 }
620 spin_lock_irqsave(&xhci->lock, *flags);
612 /* Put all ports to the Disable state by clear PP */ 621 /* Put all ports to the Disable state by clear PP */
613 xhci_dbg(xhci, "Disable all port (PP = 0)\n"); 622 xhci_dbg(xhci, "Disable all port (PP = 0)\n");
614 /* Power off USB3 ports*/ 623 /* Power off USB3 ports*/
@@ -897,6 +906,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
897 clear_bit(wIndex, &bus_state->resuming_ports); 906 clear_bit(wIndex, &bus_state->resuming_ports);
898 907
899 set_bit(wIndex, &bus_state->rexit_ports); 908 set_bit(wIndex, &bus_state->rexit_ports);
909
910 xhci_test_and_clear_bit(xhci, port_array, wIndex,
911 PORT_PLC);
900 xhci_set_link_state(xhci, port_array, wIndex, 912 xhci_set_link_state(xhci, port_array, wIndex,
901 XDEV_U0); 913 XDEV_U0);
902 914
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 53882e2babbb..8071c8fdd15e 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -59,6 +59,8 @@
59#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb 59#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
60#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc 60#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
61 61
62#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
63
62static const char hcd_name[] = "xhci_hcd"; 64static const char hcd_name[] = "xhci_hcd";
63 65
64static struct hc_driver __read_mostly xhci_pci_hc_driver; 66static struct hc_driver __read_mostly xhci_pci_hc_driver;
@@ -217,6 +219,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
217 pdev->device == 0x1142) 219 pdev->device == 0x1142)
218 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 220 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
219 221
222 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
223 pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI)
224 xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL;
225
220 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) 226 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
221 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; 227 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
222 228
@@ -278,6 +284,13 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
278 284
279 driver = (struct hc_driver *)id->driver_data; 285 driver = (struct hc_driver *)id->driver_data;
280 286
287 /* For some HW implementation, a XHCI reset is just not enough... */
288 if (usb_xhci_needs_pci_reset(dev)) {
289 dev_info(&dev->dev, "Resetting\n");
290 if (pci_reset_function_locked(dev))
291 dev_warn(&dev->dev, "Reset failed");
292 }
293
281 /* Prevent runtime suspending between USB-2 and USB-3 initialization */ 294 /* Prevent runtime suspending between USB-2 and USB-3 initialization */
282 pm_runtime_get_noresume(&dev->dev); 295 pm_runtime_get_noresume(&dev->dev);
283 296
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index c50c902d009e..cc368ad2b51e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -864,13 +864,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
864 (ep->ep_state & EP_GETTING_NO_STREAMS)) { 864 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
865 int stream_id; 865 int stream_id;
866 866
867 for (stream_id = 0; stream_id < ep->stream_info->num_streams; 867 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
868 stream_id++) { 868 stream_id++) {
869 ring = ep->stream_info->stream_rings[stream_id];
870 if (!ring)
871 continue;
872
869 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 873 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
870 "Killing URBs for slot ID %u, ep index %u, stream %u", 874 "Killing URBs for slot ID %u, ep index %u, stream %u",
871 slot_id, ep_index, stream_id + 1); 875 slot_id, ep_index, stream_id);
872 xhci_kill_ring_urbs(xhci, 876 xhci_kill_ring_urbs(xhci, ring);
873 ep->stream_info->stream_rings[stream_id]);
874 } 877 }
875 } else { 878 } else {
876 ring = ep->ring; 879 ring = ep->ring;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 56f85df013db..b2ff1ff1a02f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -198,6 +198,9 @@ int xhci_reset(struct xhci_hcd *xhci)
198 if (ret) 198 if (ret)
199 return ret; 199 return ret;
200 200
201 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
202 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
203
201 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 204 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
202 "Wait for controller to be ready for doorbell rings"); 205 "Wait for controller to be ready for doorbell rings");
203 /* 206 /*
@@ -622,8 +625,10 @@ int xhci_run(struct usb_hcd *hcd)
622 if (!command) 625 if (!command)
623 return -ENOMEM; 626 return -ENOMEM;
624 627
625 xhci_queue_vendor_command(xhci, command, 0, 0, 0, 628 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
626 TRB_TYPE(TRB_NEC_GET_FW)); 629 TRB_TYPE(TRB_NEC_GET_FW));
630 if (ret)
631 xhci_free_command(xhci, command);
627 } 632 }
628 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 633 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
629 "Finished xhci_run for USB2 roothub"); 634 "Finished xhci_run for USB2 roothub");
@@ -1085,6 +1090,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1085 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) 1090 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1086 compliance_mode_recovery_timer_init(xhci); 1091 compliance_mode_recovery_timer_init(xhci);
1087 1092
1093 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1094 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1095
1088 /* Re-enable port polling. */ 1096 /* Re-enable port polling. */
1089 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1097 xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1090 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1098 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c6da1f93c84..e3e935291ed6 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1820,6 +1820,7 @@ struct xhci_hcd {
1820#define XHCI_BROKEN_PORT_PED (1 << 25) 1820#define XHCI_BROKEN_PORT_PED (1 << 25)
1821#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) 1821#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
1822#define XHCI_U2_DISABLE_WAKE (1 << 27) 1822#define XHCI_U2_DISABLE_WAKE (1 << 27)
1823#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
1823 1824
1824 unsigned int num_active_eps; 1825 unsigned int num_active_eps;
1825 unsigned int limit_active_eps; 1826 unsigned int limit_active_eps;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 76decb8011eb..3344ffd5bb13 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -139,6 +139,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
139 "Could not flush host TX%d fifo: csr: %04x\n", 139 "Could not flush host TX%d fifo: csr: %04x\n",
140 ep->epnum, csr)) 140 ep->epnum, csr))
141 return; 141 return;
142 mdelay(1);
142 } 143 }
143} 144}
144 145
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8fb86a5f458e..3d0dd2f97415 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -197,6 +197,7 @@ struct msm_otg {
197 struct regulator *v3p3; 197 struct regulator *v3p3;
198 struct regulator *v1p8; 198 struct regulator *v1p8;
199 struct regulator *vddcx; 199 struct regulator *vddcx;
200 struct regulator_bulk_data supplies[3];
200 201
201 struct reset_control *phy_rst; 202 struct reset_control *phy_rst;
202 struct reset_control *link_rst; 203 struct reset_control *link_rst;
@@ -1731,7 +1732,6 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
1731 1732
1732static int msm_otg_probe(struct platform_device *pdev) 1733static int msm_otg_probe(struct platform_device *pdev)
1733{ 1734{
1734 struct regulator_bulk_data regs[3];
1735 int ret = 0; 1735 int ret = 0;
1736 struct device_node *np = pdev->dev.of_node; 1736 struct device_node *np = pdev->dev.of_node;
1737 struct msm_otg_platform_data *pdata; 1737 struct msm_otg_platform_data *pdata;
@@ -1817,17 +1817,18 @@ static int msm_otg_probe(struct platform_device *pdev)
1817 return motg->irq; 1817 return motg->irq;
1818 } 1818 }
1819 1819
1820 regs[0].supply = "vddcx"; 1820 motg->supplies[0].supply = "vddcx";
1821 regs[1].supply = "v3p3"; 1821 motg->supplies[1].supply = "v3p3";
1822 regs[2].supply = "v1p8"; 1822 motg->supplies[2].supply = "v1p8";
1823 1823
1824 ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); 1824 ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(motg->supplies),
1825 motg->supplies);
1825 if (ret) 1826 if (ret)
1826 return ret; 1827 return ret;
1827 1828
1828 motg->vddcx = regs[0].consumer; 1829 motg->vddcx = motg->supplies[0].consumer;
1829 motg->v3p3 = regs[1].consumer; 1830 motg->v3p3 = motg->supplies[1].consumer;
1830 motg->v1p8 = regs[2].consumer; 1831 motg->v1p8 = motg->supplies[2].consumer;
1831 1832
1832 clk_set_rate(motg->clk, 60000000); 1833 clk_set_rate(motg->clk, 60000000);
1833 1834
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 623c51300393..f0ce304c5aaf 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -752,8 +752,10 @@ static int usbhsc_resume(struct device *dev)
752 struct usbhs_priv *priv = dev_get_drvdata(dev); 752 struct usbhs_priv *priv = dev_get_drvdata(dev);
753 struct platform_device *pdev = usbhs_priv_to_pdev(priv); 753 struct platform_device *pdev = usbhs_priv_to_pdev(priv);
754 754
755 if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) 755 if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
756 usbhsc_power_ctrl(priv, 1); 756 usbhsc_power_ctrl(priv, 1);
757 usbhs_mod_autonomy_mode(priv);
758 }
757 759
758 usbhs_platform_call(priv, phy_reset, pdev); 760 usbhs_platform_call(priv, phy_reset, pdev);
759 761
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 5bc7a6138855..2c8161bcf5b5 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -37,6 +37,7 @@ struct usbhsg_gpriv;
37struct usbhsg_uep { 37struct usbhsg_uep {
38 struct usb_ep ep; 38 struct usb_ep ep;
39 struct usbhs_pipe *pipe; 39 struct usbhs_pipe *pipe;
40 spinlock_t lock; /* protect the pipe */
40 41
41 char ep_name[EP_NAME_SIZE]; 42 char ep_name[EP_NAME_SIZE];
42 43
@@ -636,10 +637,13 @@ usbhsg_ep_enable_end:
636static int usbhsg_ep_disable(struct usb_ep *ep) 637static int usbhsg_ep_disable(struct usb_ep *ep)
637{ 638{
638 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); 639 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
639 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 640 struct usbhs_pipe *pipe;
641 unsigned long flags;
640 642
643 spin_lock_irqsave(&uep->lock, flags);
644 pipe = usbhsg_uep_to_pipe(uep);
641 if (!pipe) 645 if (!pipe)
642 return -EINVAL; 646 goto out;
643 647
644 usbhsg_pipe_disable(uep); 648 usbhsg_pipe_disable(uep);
645 usbhs_pipe_free(pipe); 649 usbhs_pipe_free(pipe);
@@ -647,6 +651,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
647 uep->pipe->mod_private = NULL; 651 uep->pipe->mod_private = NULL;
648 uep->pipe = NULL; 652 uep->pipe = NULL;
649 653
654out:
655 spin_unlock_irqrestore(&uep->lock, flags);
656
650 return 0; 657 return 0;
651} 658}
652 659
@@ -696,8 +703,11 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
696{ 703{
697 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); 704 struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
698 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); 705 struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
699 struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); 706 struct usbhs_pipe *pipe;
707 unsigned long flags;
700 708
709 spin_lock_irqsave(&uep->lock, flags);
710 pipe = usbhsg_uep_to_pipe(uep);
701 if (pipe) 711 if (pipe)
702 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); 712 usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
703 713
@@ -706,6 +716,7 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
706 * even if the pipe is NULL. 716 * even if the pipe is NULL.
707 */ 717 */
708 usbhsg_queue_pop(uep, ureq, -ECONNRESET); 718 usbhsg_queue_pop(uep, ureq, -ECONNRESET);
719 spin_unlock_irqrestore(&uep->lock, flags);
709 720
710 return 0; 721 return 0;
711} 722}
@@ -852,10 +863,10 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
852{ 863{
853 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); 864 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
854 struct usbhs_mod *mod = usbhs_mod_get_current(priv); 865 struct usbhs_mod *mod = usbhs_mod_get_current(priv);
855 struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); 866 struct usbhsg_uep *uep;
856 struct device *dev = usbhs_priv_to_dev(priv); 867 struct device *dev = usbhs_priv_to_dev(priv);
857 unsigned long flags; 868 unsigned long flags;
858 int ret = 0; 869 int ret = 0, i;
859 870
860 /******************** spin lock ********************/ 871 /******************** spin lock ********************/
861 usbhs_lock(priv, flags); 872 usbhs_lock(priv, flags);
@@ -887,7 +898,9 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
887 usbhs_sys_set_test_mode(priv, 0); 898 usbhs_sys_set_test_mode(priv, 0);
888 usbhs_sys_function_ctrl(priv, 0); 899 usbhs_sys_function_ctrl(priv, 0);
889 900
890 usbhsg_ep_disable(&dcp->ep); 901 /* disable all eps */
902 usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
903 usbhsg_ep_disable(&uep->ep);
891 904
892 dev_dbg(dev, "stop gadget\n"); 905 dev_dbg(dev, "stop gadget\n");
893 906
@@ -1069,6 +1082,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
1069 ret = -ENOMEM; 1082 ret = -ENOMEM;
1070 goto usbhs_mod_gadget_probe_err_gpriv; 1083 goto usbhs_mod_gadget_probe_err_gpriv;
1071 } 1084 }
1085 spin_lock_init(&uep->lock);
1072 1086
1073 gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); 1087 gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
1074 dev_info(dev, "%stransceiver found\n", 1088 dev_info(dev, "%stransceiver found\n",
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d544b331c9f2..02b67abfc2a1 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -20,9 +20,13 @@
20/* Low Power Status register (LPSTS) */ 20/* Low Power Status register (LPSTS) */
21#define LPSTS_SUSPM 0x4000 21#define LPSTS_SUSPM 0x4000
22 22
23/* USB General control register 2 (UGCTRL2), bit[31:6] should be 0 */ 23/*
24 * USB General control register 2 (UGCTRL2)
25 * Remarks: bit[31:11] and bit[9:6] should be 0
26 */
24#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */ 27#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
25#define UGCTRL2_USB0SEL_OTG 0x00000030 28#define UGCTRL2_USB0SEL_OTG 0x00000030
29#define UGCTRL2_VBUSSEL 0x00000400
26 30
27static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data) 31static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
28{ 32{
@@ -34,7 +38,8 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
34{ 38{
35 struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev); 39 struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
36 40
37 usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG); 41 usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG |
42 UGCTRL2_VBUSSEL);
38 43
39 if (enable) { 44 if (enable) {
40 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM); 45 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index f64e914a8985..2d945c9f975c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -142,6 +142,7 @@ static const struct usb_device_id id_table[] = {
142 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 142 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
143 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 143 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
144 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ 144 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
145 { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
145 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ 146 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
146 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ 147 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
147 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ 148 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ebe51f11105d..fe123153b1a5 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2025,6 +2025,8 @@ static const struct usb_device_id option_ids[] = {
2025 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ 2025 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
2026 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ 2026 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
2027 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 2027 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2028 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
2029 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2028 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 2030 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2029 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 2031 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2030 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 2032 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index c9ebefd8f35f..a585b477415d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -52,6 +52,8 @@ static const struct usb_device_id id_table[] = {
52 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, 52 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
53 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), 53 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID),
54 .driver_info = PL2303_QUIRK_ENDPOINT_HACK }, 54 .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
55 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485),
56 .driver_info = PL2303_QUIRK_ENDPOINT_HACK },
55 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) }, 57 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
56 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) }, 58 { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
57 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) }, 59 { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09d9be88209e..3b5a15d1dc0d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
27#define ATEN_VENDOR_ID 0x0557 27#define ATEN_VENDOR_ID 0x0557
28#define ATEN_VENDOR_ID2 0x0547 28#define ATEN_VENDOR_ID2 0x0547
29#define ATEN_PRODUCT_ID 0x2008 29#define ATEN_PRODUCT_ID 0x2008
30#define ATEN_PRODUCT_UC485 0x2021
30#define ATEN_PRODUCT_ID2 0x2118 31#define ATEN_PRODUCT_ID2 0x2118
31 32
32#define IODATA_VENDOR_ID 0x04bb 33#define IODATA_VENDOR_ID 0x04bb
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index fba4005dd737..6a7720e66595 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1529,8 +1529,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
1529 1529
1530 /* Make sure driver was initialized */ 1530 /* Make sure driver was initialized */
1531 1531
1532 if (us->extra == NULL) 1532 if (us->extra == NULL) {
1533 usb_stor_dbg(us, "ERROR Driver not initialized\n"); 1533 usb_stor_dbg(us, "ERROR Driver not initialized\n");
1534 srb->result = DID_ERROR << 16;
1535 return;
1536 }
1534 1537
1535 scsi_set_resid(srb, 0); 1538 scsi_set_resid(srb, 0);
1536 /* scsi_bufflen might change in protocol translation to ata */ 1539 /* scsi_bufflen might change in protocol translation to ata */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index cbea9f329e71..cde115359793 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -124,9 +124,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
124/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ 124/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
125UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, 125UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
126 "Initio Corporation", 126 "Initio Corporation",
127 "", 127 "INIC-3069",
128 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 128 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
129 US_FL_NO_ATA_1X), 129 US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
130 130
131/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */ 131/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
132UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999, 132UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 06615934fed1..0dceb9fa3a06 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -315,6 +315,7 @@ static int usb_stor_control_thread(void * __us)
315{ 315{
316 struct us_data *us = (struct us_data *)__us; 316 struct us_data *us = (struct us_data *)__us;
317 struct Scsi_Host *host = us_to_host(us); 317 struct Scsi_Host *host = us_to_host(us);
318 struct scsi_cmnd *srb;
318 319
319 for (;;) { 320 for (;;) {
320 usb_stor_dbg(us, "*** thread sleeping\n"); 321 usb_stor_dbg(us, "*** thread sleeping\n");
@@ -330,6 +331,7 @@ static int usb_stor_control_thread(void * __us)
330 scsi_lock(host); 331 scsi_lock(host);
331 332
332 /* When we are called with no command pending, we're done */ 333 /* When we are called with no command pending, we're done */
334 srb = us->srb;
333 if (us->srb == NULL) { 335 if (us->srb == NULL) {
334 scsi_unlock(host); 336 scsi_unlock(host);
335 mutex_unlock(&us->dev_mutex); 337 mutex_unlock(&us->dev_mutex);
@@ -398,14 +400,11 @@ static int usb_stor_control_thread(void * __us)
398 /* lock access to the state */ 400 /* lock access to the state */
399 scsi_lock(host); 401 scsi_lock(host);
400 402
401 /* indicate that the command is done */ 403 /* was the command aborted? */
402 if (us->srb->result != DID_ABORT << 16) { 404 if (us->srb->result == DID_ABORT << 16) {
403 usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
404 us->srb->result);
405 us->srb->scsi_done(us->srb);
406 } else {
407SkipForAbort: 405SkipForAbort:
408 usb_stor_dbg(us, "scsi command aborted\n"); 406 usb_stor_dbg(us, "scsi command aborted\n");
407 srb = NULL; /* Don't call srb->scsi_done() */
409 } 408 }
410 409
411 /* 410 /*
@@ -429,6 +428,13 @@ SkipForAbort:
429 428
430 /* unlock the device pointers */ 429 /* unlock the device pointers */
431 mutex_unlock(&us->dev_mutex); 430 mutex_unlock(&us->dev_mutex);
431
432 /* now that the locks are released, notify the SCSI core */
433 if (srb) {
434 usb_stor_dbg(us, "scsi cmd done, result=0x%x\n",
435 srb->result);
436 srb->scsi_done(srb);
437 }
432 } /* for (;;) */ 438 } /* for (;;) */
433 439
434 /* Wait until we are told to stop */ 440 /* Wait until we are told to stop */
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 6b0d2f0918c6..8a88f45822e3 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -3,6 +3,7 @@
3#define __DRIVER_USB_TYPEC_UCSI_H 3#define __DRIVER_USB_TYPEC_UCSI_H
4 4
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <linux/device.h>
6#include <linux/types.h> 7#include <linux/types.h>
7 8
8/* -------------------------------------------------------------------------- */ 9/* -------------------------------------------------------------------------- */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 063c1ce6fa42..f041b1a6cf66 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -226,7 +226,14 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
226 if (ret) 226 if (ret)
227 return ret; 227 return ret;
228 228
229 vdev->reset_works = (pci_reset_function(pdev) == 0); 229 /* If reset fails because of the device lock, fail this path entirely */
230 ret = pci_try_reset_function(pdev);
231 if (ret == -EAGAIN) {
232 pci_disable_device(pdev);
233 return ret;
234 }
235
236 vdev->reset_works = !ret;
230 pci_save_state(pdev); 237 pci_save_state(pdev);
231 vdev->pci_saved_state = pci_store_saved_state(pdev); 238 vdev->pci_saved_state = pci_store_saved_state(pdev);
232 if (!vdev->pci_saved_state) 239 if (!vdev->pci_saved_state)
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 330a57024cbc..5628fe114347 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -839,7 +839,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
839/* Permissions for PCI Express capability */ 839/* Permissions for PCI Express capability */
840static int __init init_pci_cap_exp_perm(struct perm_bits *perm) 840static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
841{ 841{
842 /* Alloc larger of two possible sizes */ 842 /* Alloc largest of possible sizes */
843 if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2)) 843 if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
844 return -ENOMEM; 844 return -ENOMEM;
845 845
@@ -1243,11 +1243,16 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
1243 vdev->extended_caps = (dword != 0); 1243 vdev->extended_caps = (dword != 0);
1244 } 1244 }
1245 1245
1246 /* length based on version */ 1246 /* length based on version and type */
1247 if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) 1247 if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1) {
1248 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
1249 return 0xc; /* "All Devices" only, no link */
1248 return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1; 1250 return PCI_CAP_EXP_ENDPOINT_SIZEOF_V1;
1249 else 1251 } else {
1252 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END)
1253 return 0x2c; /* No link */
1250 return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2; 1254 return PCI_CAP_EXP_ENDPOINT_SIZEOF_V2;
1255 }
1251 case PCI_CAP_ID_HT: 1256 case PCI_CAP_ID_HT:
1252 ret = pci_read_config_byte(pdev, pos + 3, &byte); 1257 ret = pci_read_config_byte(pdev, pos + 3, &byte);
1253 if (ret) 1258 if (ret)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e4613a3c362d..9cb3f722dce1 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -308,7 +308,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
308 vq->avail = NULL; 308 vq->avail = NULL;
309 vq->used = NULL; 309 vq->used = NULL;
310 vq->last_avail_idx = 0; 310 vq->last_avail_idx = 0;
311 vq->last_used_event = 0;
312 vq->avail_idx = 0; 311 vq->avail_idx = 0;
313 vq->last_used_idx = 0; 312 vq->last_used_idx = 0;
314 vq->signalled_used = 0; 313 vq->signalled_used = 0;
@@ -1402,7 +1401,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
1402 r = -EINVAL; 1401 r = -EINVAL;
1403 break; 1402 break;
1404 } 1403 }
1405 vq->last_avail_idx = vq->last_used_event = s.num; 1404 vq->last_avail_idx = s.num;
1406 /* Forget the cached index value. */ 1405 /* Forget the cached index value. */
1407 vq->avail_idx = vq->last_avail_idx; 1406 vq->avail_idx = vq->last_avail_idx;
1408 break; 1407 break;
@@ -2241,6 +2240,10 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2241 __u16 old, new; 2240 __u16 old, new;
2242 __virtio16 event; 2241 __virtio16 event;
2243 bool v; 2242 bool v;
2243 /* Flush out used index updates. This is paired
2244 * with the barrier that the Guest executes when enabling
2245 * interrupts. */
2246 smp_mb();
2244 2247
2245 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && 2248 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2246 unlikely(vq->avail_idx == vq->last_avail_idx)) 2249 unlikely(vq->avail_idx == vq->last_avail_idx))
@@ -2248,10 +2251,6 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2248 2251
2249 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2252 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
2250 __virtio16 flags; 2253 __virtio16 flags;
2251 /* Flush out used index updates. This is paired
2252 * with the barrier that the Guest executes when enabling
2253 * interrupts. */
2254 smp_mb();
2255 if (vhost_get_avail(vq, flags, &vq->avail->flags)) { 2254 if (vhost_get_avail(vq, flags, &vq->avail->flags)) {
2256 vq_err(vq, "Failed to get flags"); 2255 vq_err(vq, "Failed to get flags");
2257 return true; 2256 return true;
@@ -2266,26 +2265,11 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
2266 if (unlikely(!v)) 2265 if (unlikely(!v))
2267 return true; 2266 return true;
2268 2267
2269 /* We're sure if the following conditions are met, there's no
2270 * need to notify guest:
2271 * 1) cached used event is ahead of new
2272 * 2) old to new updating does not cross cached used event. */
2273 if (vring_need_event(vq->last_used_event, new + vq->num, new) &&
2274 !vring_need_event(vq->last_used_event, new, old))
2275 return false;
2276
2277 /* Flush out used index updates. This is paired
2278 * with the barrier that the Guest executes when enabling
2279 * interrupts. */
2280 smp_mb();
2281
2282 if (vhost_get_avail(vq, event, vhost_used_event(vq))) { 2268 if (vhost_get_avail(vq, event, vhost_used_event(vq))) {
2283 vq_err(vq, "Failed to get used event idx"); 2269 vq_err(vq, "Failed to get used event idx");
2284 return true; 2270 return true;
2285 } 2271 }
2286 vq->last_used_event = vhost16_to_cpu(vq, event); 2272 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
2287
2288 return vring_need_event(vq->last_used_event, new, old);
2289} 2273}
2290 2274
2291/* This actually signals the guest, using eventfd. */ 2275/* This actually signals the guest, using eventfd. */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index f72095868b93..bb7c29b8b9fc 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -115,9 +115,6 @@ struct vhost_virtqueue {
115 /* Last index we used. */ 115 /* Last index we used. */
116 u16 last_used_idx; 116 u16 last_used_idx;
117 117
118 /* Last used evet we've seen */
119 u16 last_used_event;
120
121 /* Used flags */ 118 /* Used flags */
122 u16 used_flags; 119 u16 used_flags;
123 120
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index ff01bed7112f..1e784adb89b1 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -17,6 +17,7 @@
17#include <asm/efi.h> 17#include <asm/efi.h>
18 18
19static bool request_mem_succeeded = false; 19static bool request_mem_succeeded = false;
20static bool nowc = false;
20 21
21static struct fb_var_screeninfo efifb_defined = { 22static struct fb_var_screeninfo efifb_defined = {
22 .activate = FB_ACTIVATE_NOW, 23 .activate = FB_ACTIVATE_NOW,
@@ -99,6 +100,8 @@ static int efifb_setup(char *options)
99 screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0); 100 screen_info.lfb_height = simple_strtoul(this_opt+7, NULL, 0);
100 else if (!strncmp(this_opt, "width:", 6)) 101 else if (!strncmp(this_opt, "width:", 6))
101 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); 102 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
103 else if (!strcmp(this_opt, "nowc"))
104 nowc = true;
102 } 105 }
103 } 106 }
104 107
@@ -255,7 +258,10 @@ static int efifb_probe(struct platform_device *dev)
255 info->apertures->ranges[0].base = efifb_fix.smem_start; 258 info->apertures->ranges[0].base = efifb_fix.smem_start;
256 info->apertures->ranges[0].size = size_remap; 259 info->apertures->ranges[0].size = size_remap;
257 260
258 info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len); 261 if (nowc)
262 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
263 else
264 info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
259 if (!info->screen_base) { 265 if (!info->screen_base) {
260 pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", 266 pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
261 efifb_fix.smem_len, efifb_fix.smem_start); 267 efifb_fix.smem_len, efifb_fix.smem_start);
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index c166e0725be5..ba82f97fb42b 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -1073,20 +1073,16 @@ static int imxfb_remove(struct platform_device *pdev)
1073 imxfb_disable_controller(fbi); 1073 imxfb_disable_controller(fbi);
1074 1074
1075 unregister_framebuffer(info); 1075 unregister_framebuffer(info);
1076 1076 fb_dealloc_cmap(&info->cmap);
1077 pdata = dev_get_platdata(&pdev->dev); 1077 pdata = dev_get_platdata(&pdev->dev);
1078 if (pdata && pdata->exit) 1078 if (pdata && pdata->exit)
1079 pdata->exit(fbi->pdev); 1079 pdata->exit(fbi->pdev);
1080
1081 fb_dealloc_cmap(&info->cmap);
1082 kfree(info->pseudo_palette);
1083 framebuffer_release(info);
1084
1085 dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base, 1080 dma_free_wc(&pdev->dev, fbi->map_size, info->screen_base,
1086 fbi->map_dma); 1081 fbi->map_dma);
1087
1088 iounmap(fbi->regs); 1082 iounmap(fbi->regs);
1089 release_mem_region(res->start, resource_size(res)); 1083 release_mem_region(res->start, resource_size(res));
1084 kfree(info->pseudo_palette);
1085 framebuffer_release(info);
1090 1086
1091 return 0; 1087 return 0;
1092} 1088}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index eecf695c16f4..09e5bb013d28 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -193,7 +193,6 @@ static struct notifier_block omap_dss_pm_notif_block = {
193 193
194static int __init omap_dss_probe(struct platform_device *pdev) 194static int __init omap_dss_probe(struct platform_device *pdev)
195{ 195{
196 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
197 int r; 196 int r;
198 197
199 core.pdev = pdev; 198 core.pdev = pdev;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 22caf808bfab..f0b3a0b9d42f 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -104,12 +104,6 @@ static u32 page_to_balloon_pfn(struct page *page)
104 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; 104 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE;
105} 105}
106 106
107static struct page *balloon_pfn_to_page(u32 pfn)
108{
109 BUG_ON(pfn % VIRTIO_BALLOON_PAGES_PER_PAGE);
110 return pfn_to_page(pfn / VIRTIO_BALLOON_PAGES_PER_PAGE);
111}
112
113static void balloon_ack(struct virtqueue *vq) 107static void balloon_ack(struct virtqueue *vq)
114{ 108{
115 struct virtio_balloon *vb = vq->vdev->priv; 109 struct virtio_balloon *vb = vq->vdev->priv;
@@ -138,8 +132,10 @@ static void set_page_pfns(struct virtio_balloon *vb,
138{ 132{
139 unsigned int i; 133 unsigned int i;
140 134
141 /* Set balloon pfns pointing at this page. 135 /*
142 * Note that the first pfn points at start of the page. */ 136 * Set balloon pfns pointing at this page.
137 * Note that the first pfn points at start of the page.
138 */
143 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) 139 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
144 pfns[i] = cpu_to_virtio32(vb->vdev, 140 pfns[i] = cpu_to_virtio32(vb->vdev,
145 page_to_balloon_pfn(page) + i); 141 page_to_balloon_pfn(page) + i);
@@ -182,18 +178,16 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
182 return num_allocated_pages; 178 return num_allocated_pages;
183} 179}
184 180
185static void release_pages_balloon(struct virtio_balloon *vb) 181static void release_pages_balloon(struct virtio_balloon *vb,
182 struct list_head *pages)
186{ 183{
187 unsigned int i; 184 struct page *page, *next;
188 struct page *page;
189 185
190 /* Find pfns pointing at start of each page, get pages and free them. */ 186 list_for_each_entry_safe(page, next, pages, lru) {
191 for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
192 page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
193 vb->pfns[i]));
194 if (!virtio_has_feature(vb->vdev, 187 if (!virtio_has_feature(vb->vdev,
195 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 188 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
196 adjust_managed_page_count(page, 1); 189 adjust_managed_page_count(page, 1);
190 list_del(&page->lru);
197 put_page(page); /* balloon reference */ 191 put_page(page); /* balloon reference */
198 } 192 }
199} 193}
@@ -203,6 +197,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
203 unsigned num_freed_pages; 197 unsigned num_freed_pages;
204 struct page *page; 198 struct page *page;
205 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; 199 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
200 LIST_HEAD(pages);
206 201
207 /* We can only do one array worth at a time. */ 202 /* We can only do one array worth at a time. */
208 num = min(num, ARRAY_SIZE(vb->pfns)); 203 num = min(num, ARRAY_SIZE(vb->pfns));
@@ -216,6 +211,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
216 if (!page) 211 if (!page)
217 break; 212 break;
218 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 213 set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
214 list_add(&page->lru, &pages);
219 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; 215 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
220 } 216 }
221 217
@@ -227,7 +223,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
227 */ 223 */
228 if (vb->num_pfns != 0) 224 if (vb->num_pfns != 0)
229 tell_host(vb, vb->deflate_vq); 225 tell_host(vb, vb->deflate_vq);
230 release_pages_balloon(vb); 226 release_pages_balloon(vb, &pages);
231 mutex_unlock(&vb->balloon_lock); 227 mutex_unlock(&vb->balloon_lock);
232 return num_freed_pages; 228 return num_freed_pages;
233} 229}
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 007a4f366086..1c4797e53f68 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -107,6 +107,7 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
107{ 107{
108 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
109 const char *name = dev_name(&vp_dev->vdev.dev); 109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned flags = PCI_IRQ_MSIX;
110 unsigned i, v; 111 unsigned i, v;
111 int err = -ENOMEM; 112 int err = -ENOMEM;
112 113
@@ -126,10 +127,13 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
126 GFP_KERNEL)) 127 GFP_KERNEL))
127 goto error; 128 goto error;
128 129
130 if (desc) {
131 flags |= PCI_IRQ_AFFINITY;
132 desc->pre_vectors++; /* virtio config vector */
133 }
134
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors, 135 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX | 136 nvectors, flags, desc);
131 (desc ? PCI_IRQ_AFFINITY : 0),
132 desc);
133 if (err < 0) 137 if (err < 0)
134 goto error; 138 goto error;
135 vp_dev->msix_enabled = 1; 139 vp_dev->msix_enabled = 1;
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3612542b6044..83fc9aab34e8 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -704,7 +704,8 @@ static int omap_hdq_probe(struct platform_device *pdev)
704 704
705 irq = platform_get_irq(pdev, 0); 705 irq = platform_get_irq(pdev, 0);
706 if (irq < 0) { 706 if (irq < 0) {
707 ret = -ENXIO; 707 dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
708 ret = irq;
708 goto err_irq; 709 goto err_irq;
709 } 710 }
710 711
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 95ea7e6b1d99..74471e7aa5cc 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -728,6 +728,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
728 memcpy(&sl->reg_num, rn, sizeof(sl->reg_num)); 728 memcpy(&sl->reg_num, rn, sizeof(sl->reg_num));
729 atomic_set(&sl->refcnt, 1); 729 atomic_set(&sl->refcnt, 1);
730 atomic_inc(&sl->master->refcnt); 730 atomic_inc(&sl->master->refcnt);
731 dev->slave_count++;
731 732
732 /* slave modules need to be loaded in a context with unlocked mutex */ 733 /* slave modules need to be loaded in a context with unlocked mutex */
733 mutex_unlock(&dev->mutex); 734 mutex_unlock(&dev->mutex);
@@ -747,11 +748,11 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
747 748
748 sl->family = f; 749 sl->family = f;
749 750
750
751 err = __w1_attach_slave_device(sl); 751 err = __w1_attach_slave_device(sl);
752 if (err < 0) { 752 if (err < 0) {
753 dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__, 753 dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__,
754 sl->name); 754 sl->name);
755 dev->slave_count--;
755 w1_family_put(sl->family); 756 w1_family_put(sl->family);
756 atomic_dec(&sl->master->refcnt); 757 atomic_dec(&sl->master->refcnt);
757 kfree(sl); 758 kfree(sl);
@@ -759,7 +760,6 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
759 } 760 }
760 761
761 sl->ttl = dev->slave_ttl; 762 sl->ttl = dev->slave_ttl;
762 dev->slave_count++;
763 763
764 memcpy(msg.id.id, rn, sizeof(msg.id)); 764 memcpy(msg.id.id, rn, sizeof(msg.id));
765 msg.type = W1_SLAVE_ADD; 765 msg.type = W1_SLAVE_ADD;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 8feab810aed9..7f188b8d0c67 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -7,9 +7,6 @@ obj-y += xenbus/
7nostackp := $(call cc-option, -fno-stack-protector) 7nostackp := $(call cc-option, -fno-stack-protector)
8CFLAGS_features.o := $(nostackp) 8CFLAGS_features.o := $(nostackp)
9 9
10CFLAGS_efi.o += -fshort-wchar
11LDFLAGS += $(call ld-option, --no-wchar-size-warning)
12
13dom0-$(CONFIG_ARM64) += arm-device.o 10dom0-$(CONFIG_ARM64) += arm-device.o
14dom0-$(CONFIG_PCI) += pci.o 11dom0-$(CONFIG_PCI) += pci.o
15dom0-$(CONFIG_USB_SUPPORT) += dbgp.o 12dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 50dcb68d8070..ab609255a0f3 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -780,6 +780,9 @@ static int __init balloon_init(void)
780 } 780 }
781#endif 781#endif
782 782
783 /* Init the xen-balloon driver. */
784 xen_balloon_init();
785
783 return 0; 786 return 0;
784} 787}
785subsys_initcall(balloon_init); 788subsys_initcall(balloon_init);
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 4da69dbf7dca..1bdd02a6d6ac 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
10 unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); 10 unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
11 unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); 11 unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
12 12
13 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && 13 return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
14 ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
15#else 14#else
16 /* 15 /*
17 * XXX: Add support for merging bio_vec when using different page 16 * XXX: Add support for merging bio_vec when using different page
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index b241bfa529ce..2d43118077e4 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -343,14 +343,6 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
343 info->cpu = cpu; 343 info->cpu = cpu;
344} 344}
345 345
346static void xen_evtchn_mask_all(void)
347{
348 unsigned int evtchn;
349
350 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
351 mask_evtchn(evtchn);
352}
353
354/** 346/**
355 * notify_remote_via_irq - send event to remote end of event channel via irq 347 * notify_remote_via_irq - send event to remote end of event channel via irq
356 * @irq: irq of event channel to send event to 348 * @irq: irq of event channel to send event to
@@ -582,7 +574,7 @@ static void shutdown_pirq(struct irq_data *data)
582 574
583static void enable_pirq(struct irq_data *data) 575static void enable_pirq(struct irq_data *data)
584{ 576{
585 startup_pirq(data); 577 enable_dynirq(data);
586} 578}
587 579
588static void disable_pirq(struct irq_data *data) 580static void disable_pirq(struct irq_data *data)
@@ -1573,7 +1565,6 @@ void xen_irq_resume(void)
1573 struct irq_info *info; 1565 struct irq_info *info;
1574 1566
1575 /* New event-channel space is not 'live' yet. */ 1567 /* New event-channel space is not 'live' yet. */
1576 xen_evtchn_mask_all();
1577 xen_evtchn_resume(); 1568 xen_evtchn_resume();
1578 1569
1579 /* No IRQ <-> event-channel mappings. */ 1570 /* No IRQ <-> event-channel mappings. */
@@ -1681,6 +1672,7 @@ module_param(fifo_events, bool, 0);
1681void __init xen_init_IRQ(void) 1672void __init xen_init_IRQ(void)
1682{ 1673{
1683 int ret = -EINVAL; 1674 int ret = -EINVAL;
1675 unsigned int evtchn;
1684 1676
1685 if (fifo_events) 1677 if (fifo_events)
1686 ret = xen_evtchn_fifo_init(); 1678 ret = xen_evtchn_fifo_init();
@@ -1692,7 +1684,8 @@ void __init xen_init_IRQ(void)
1692 BUG_ON(!evtchn_to_irq); 1684 BUG_ON(!evtchn_to_irq);
1693 1685
1694 /* No event channels are 'live' right now. */ 1686 /* No event channels are 'live' right now. */
1695 xen_evtchn_mask_all(); 1687 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
1688 mask_evtchn(evtchn);
1696 1689
1697 pirq_needs_eoi = pirq_needs_eoi_flag; 1690 pirq_needs_eoi = pirq_needs_eoi_flag;
1698 1691
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index d6786b87e13b..2c6a9114d332 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -42,6 +42,7 @@
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/hardirq.h> 43#include <linux/hardirq.h>
44#include <linux/workqueue.h> 44#include <linux/workqueue.h>
45#include <linux/ratelimit.h>
45 46
46#include <xen/xen.h> 47#include <xen/xen.h>
47#include <xen/interface/xen.h> 48#include <xen/interface/xen.h>
@@ -1072,8 +1073,14 @@ static int gnttab_expand(unsigned int req_entries)
1072 cur = nr_grant_frames; 1073 cur = nr_grant_frames;
1073 extra = ((req_entries + (grefs_per_grant_frame-1)) / 1074 extra = ((req_entries + (grefs_per_grant_frame-1)) /
1074 grefs_per_grant_frame); 1075 grefs_per_grant_frame);
1075 if (cur + extra > gnttab_max_grant_frames()) 1076 if (cur + extra > gnttab_max_grant_frames()) {
1077 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1078 " cur=%u extra=%u limit=%u"
1079 " gnttab_free_count=%u req_entries=%u\n",
1080 cur, extra, gnttab_max_grant_frames(),
1081 gnttab_free_count, req_entries);
1076 return -ENOSPC; 1082 return -ENOSPC;
1083 }
1077 1084
1078 rc = gnttab_map(cur, cur + extra - 1); 1085 rc = gnttab_map(cur, cur + extra - 1);
1079 if (rc == 0) 1086 if (rc == 0)
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e7715cb62eef..e89136ab851e 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -59,6 +59,8 @@ static void watch_target(struct xenbus_watch *watch,
59{ 59{
60 unsigned long long new_target; 60 unsigned long long new_target;
61 int err; 61 int err;
62 static bool watch_fired;
63 static long target_diff;
62 64
63 err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); 65 err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
64 if (err != 1) { 66 if (err != 1) {
@@ -69,7 +71,14 @@ static void watch_target(struct xenbus_watch *watch,
69 /* The given memory/target value is in KiB, so it needs converting to 71 /* The given memory/target value is in KiB, so it needs converting to
70 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. 72 * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
71 */ 73 */
72 balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); 74 new_target >>= PAGE_SHIFT - 10;
75 if (watch_fired) {
76 balloon_set_new_target(new_target - target_diff);
77 return;
78 }
79
80 watch_fired = true;
81 target_diff = new_target - balloon_stats.target_pages;
73} 82}
74static struct xenbus_watch target_watch = { 83static struct xenbus_watch target_watch = {
75 .node = "memory/target", 84 .node = "memory/target",
@@ -94,22 +103,15 @@ static struct notifier_block xenstore_notifier = {
94 .notifier_call = balloon_init_watcher, 103 .notifier_call = balloon_init_watcher,
95}; 104};
96 105
97static int __init balloon_init(void) 106void xen_balloon_init(void)
98{ 107{
99 if (!xen_domain())
100 return -ENODEV;
101
102 pr_info("Initialising balloon driver\n");
103
104 register_balloon(&balloon_dev); 108 register_balloon(&balloon_dev);
105 109
106 register_xen_selfballooning(&balloon_dev); 110 register_xen_selfballooning(&balloon_dev);
107 111
108 register_xenstore_notifier(&xenstore_notifier); 112 register_xenstore_notifier(&xenstore_notifier);
109
110 return 0;
111} 113}
112subsys_initcall(balloon_init); 114EXPORT_SYMBOL_GPL(xen_balloon_init);
113 115
114#define BALLOON_SHOW(name, format, args...) \ 116#define BALLOON_SHOW(name, format, args...) \
115 static ssize_t show_##name(struct device *dev, \ 117 static ssize_t show_##name(struct device *dev, \
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 66620713242a..a67e955cacd1 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -151,8 +151,8 @@ static unsigned long frontswap_inertia_counter;
151static void frontswap_selfshrink(void) 151static void frontswap_selfshrink(void)
152{ 152{
153 static unsigned long cur_frontswap_pages; 153 static unsigned long cur_frontswap_pages;
154 static unsigned long last_frontswap_pages; 154 unsigned long last_frontswap_pages;
155 static unsigned long tgt_frontswap_pages; 155 unsigned long tgt_frontswap_pages;
156 156
157 last_frontswap_pages = cur_frontswap_pages; 157 last_frontswap_pages = cur_frontswap_pages;
158 cur_frontswap_pages = frontswap_curr_pages(); 158 cur_frontswap_pages = frontswap_curr_pages();
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index e46080214955..3e59590c7254 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -857,6 +857,8 @@ static int xenwatch_thread(void *unused)
857 struct list_head *ent; 857 struct list_head *ent;
858 struct xs_watch_event *event; 858 struct xs_watch_event *event;
859 859
860 xenwatch_pid = current->pid;
861
860 for (;;) { 862 for (;;) {
861 wait_event_interruptible(watch_events_waitq, 863 wait_event_interruptible(watch_events_waitq,
862 !list_empty(&watch_events)); 864 !list_empty(&watch_events));
@@ -925,7 +927,6 @@ int xs_init(void)
925 task = kthread_run(xenwatch_thread, NULL, "xenwatch"); 927 task = kthread_run(xenwatch_thread, NULL, "xenwatch");
926 if (IS_ERR(task)) 928 if (IS_ERR(task))
927 return PTR_ERR(task); 929 return PTR_ERR(task);
928 xenwatch_pid = task->pid;
929 930
930 /* shutdown watches for kexec boot */ 931 /* shutdown watches for kexec boot */
931 xs_reset_watches(); 932 xs_reset_watches();
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 967f069385d0..71ddfb4cf61c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -87,7 +87,6 @@ static int __init xenfs_init(void)
87 if (xen_domain()) 87 if (xen_domain())
88 return register_filesystem(&xenfs_type); 88 return register_filesystem(&xenfs_type);
89 89
90 pr_info("not registering filesystem on non-xen platform\n");
91 return 0; 90 return 0;
92} 91}
93 92